index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
983,000 | 5d1388b204c13ef7a9aef92d5fe10bcb436904dd | one = '../one'
one_num = 40
two = '../two'
two_num = 20
three = '../three'
three_num = 20
i = 1
# test = 'for'
# test_num = 20 |
983,001 | fbd92846d879e5ee5759591d9864cc7209e8ca7d | import datetime
import pickle
import glob
import os
import pandas as pd
import pymongo
from pymongo import MongoClient
from pandas import DataFrame
client = MongoClient("localhost", 27017)
db = client["reddit_polarization"]
data_path = "/home/jichao/MongoDB/reddit"
bot_file = os.path.join(data_path, "bot_authors_2015_05.csv")
author_bot = pd.read_csv(bot_file)
subreddits = ("MensRights", "Feminism", "Cooking")
for subreddit in subreddits:
collection = db[subreddit]
fn_wildcard = os.path.join(data_path, subreddit + "_RC_*.pickle")
filenames = glob.glob(fn_wildcard)
for fn in filenames:
print fn
df = pickle.load(open(fn))
df["author"] = df["author"].astype(str)
df["subreddit"] = df["subreddit"].astype(str)
df["created_utc"] = df["created_utc"].astype(int)
# Remove posts from Bots
df = df.ix[~df["author"].isin(author_bot["author"]), :]
df["created_utc"] = df["created_utc"].map(lambda x: datetime.datetime.fromtimestamp(x))
posts = df.T.to_dict().values()
if len(posts) > 0:
collection.insert_many(posts)
collection.create_index([("created_utc", pymongo.ASCENDING)])
client.close()
|
983,002 | f601fab5d87e99d3466840a640a24957b2e8b624 | """empty message
Revision ID: aa880472dd75
Revises: 8020d161821e
Create Date: 2018-09-20 14:43:13.876308
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'aa880472dd75'
down_revision = '8020d161821e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('article', sa.Column('click_nums', sa.BigInteger(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('article', 'click_nums')
# ### end Alembic commands ###
|
983,003 | 626ffe6228a61034ee6b167172276a431c7d4e1b | import scraperwiki
import lxml.html
from mechanize import ParseResponse, urlopen, urljoin
import mechanize
import lxml.html
uri="http://www.censusindia.gov.in/Census_Data_2001/Census_Data_Online/Area_Profile/Town_Profile.aspx?cki=6QHuVhlb10a"
response= urlopen(uri)
forms = ParseResponse(response, backwards_compat=False)
print forms
form = forms[0]
print form
statecode=[]
serial=1
st=[]
dt=[]
sb=[]
for item in form.find_control("drpState").items:
if item.name!='':
statecode.append(item.name)
control=form.find_control("drpState")
if control.type == "select" and control.name=="drpState": # means it's class ClientForm.SelectControl
for item in control.items:
st.append(([label.text for label in item.get_labels()]))
print statecode
st=st[1:]
v1=0
v2=0
print st
for i in statecode:
if v1>len(st):
break
m1=0
m2=0
if(i==""):
continue
else:
districtcode=[]
form.set_value([i], name="drpState")
content=urlopen(form.click())
forms=ParseResponse(content, backwards_compat=False)
form=forms[0]
for item in form.find_control("drpDistrict").items:
districtcode.append(item.name)
if len(dt)==0:
control=form.find_control("drpDistrict")
if control.type == "select" and control.name=="drpDistrict" : # means it's class ClientForm.SelectControl
for item in control.items:
dt.append(([label.text for label in item.get_labels()]))
dt=dt[1:]
print dt
print dt
for j in districtcode:
if m1>len(dt):
break
b1=0
b2=0
if(j==""):
continue
else:
subdistrictcode=[]
form.set_value([j], name="drpDistrict")
content=urlopen(form.click())
forms=ParseResponse(content, backwards_compat=False)
form=forms[0]
for item in form.find_control("drpTown").items:
subdistrictcode.append(item.name)
if len(sb)==0:
control=form.find_control("drpTown")
if control.type == "select" and control.name=="drpTown" : # means it's class ClientForm.SelectControl
for item in control.items:
sb.append(([label.text for label in item.get_labels()]))
sb=sb[1:]
print sb
for l in subdistrictcode:
if b1>len(sb):
break
if(l==""):
continue
else:
form.set_value([l],name="drpTown")
content=urlopen(form.click())
response=lxml.html.fromstring(content.read())
row=[]
data=[]
l_c=0
s_no=serial
for k in response.cssselect("tr.GridRows td"):
if l_c<3:
row.append(k.text_content())
l_c+=1
else:
row.append(k.text_content())
l_c=0
data.append(row)
scraperwiki.sqlite.save(unique_keys=["S_no"],data={"S_no":s_no,"Column1":row[0],"Column2":row[1],"Column3":row[2],"Column4":row[3],"State":st[v1][v2],"district":dt[m1][m2],"subdistrict":sb[b1][b2]})
s_no+=2
row=[]
s_no=serial+1
for k in response.cssselect("tr.GridAlternativeRows td"):
if l_c<3:
row.append(k.text_content())
l_c+=1
else:
row.append(k.text_content())
l_c=0
data.append(row)
scraperwiki.sqlite.save(unique_keys=["S_no"],data={"S_no":s_no,"Column1":row[0],"Column2":row[1],"Column3":row[2],"Column4":row[3],"State":st[v1][v2],"district":dt[m1][m2],"subdistrict":sb[b1][b2]})
s_no+=2
row=[]
#st=[]
serial=s_no-1
b1+=1
sb=[]
m1+=1
dt=[]
v1+=1
st=[]
import scraperwiki
import lxml.html
from mechanize import ParseResponse, urlopen, urljoin
import mechanize
import lxml.html
uri="http://www.censusindia.gov.in/Census_Data_2001/Census_Data_Online/Area_Profile/Town_Profile.aspx?cki=6QHuVhlb10a"
response= urlopen(uri)
forms = ParseResponse(response, backwards_compat=False)
print forms
form = forms[0]
print form
statecode=[]
serial=1
st=[]
dt=[]
sb=[]
for item in form.find_control("drpState").items:
if item.name!='':
statecode.append(item.name)
control=form.find_control("drpState")
if control.type == "select" and control.name=="drpState": # means it's class ClientForm.SelectControl
for item in control.items:
st.append(([label.text for label in item.get_labels()]))
print statecode
st=st[1:]
v1=0
v2=0
print st
for i in statecode:
if v1>len(st):
break
m1=0
m2=0
if(i==""):
continue
else:
districtcode=[]
form.set_value([i], name="drpState")
content=urlopen(form.click())
forms=ParseResponse(content, backwards_compat=False)
form=forms[0]
for item in form.find_control("drpDistrict").items:
districtcode.append(item.name)
if len(dt)==0:
control=form.find_control("drpDistrict")
if control.type == "select" and control.name=="drpDistrict" : # means it's class ClientForm.SelectControl
for item in control.items:
dt.append(([label.text for label in item.get_labels()]))
dt=dt[1:]
print dt
print dt
for j in districtcode:
if m1>len(dt):
break
b1=0
b2=0
if(j==""):
continue
else:
subdistrictcode=[]
form.set_value([j], name="drpDistrict")
content=urlopen(form.click())
forms=ParseResponse(content, backwards_compat=False)
form=forms[0]
for item in form.find_control("drpTown").items:
subdistrictcode.append(item.name)
if len(sb)==0:
control=form.find_control("drpTown")
if control.type == "select" and control.name=="drpTown" : # means it's class ClientForm.SelectControl
for item in control.items:
sb.append(([label.text for label in item.get_labels()]))
sb=sb[1:]
print sb
for l in subdistrictcode:
if b1>len(sb):
break
if(l==""):
continue
else:
form.set_value([l],name="drpTown")
content=urlopen(form.click())
response=lxml.html.fromstring(content.read())
row=[]
data=[]
l_c=0
s_no=serial
for k in response.cssselect("tr.GridRows td"):
if l_c<3:
row.append(k.text_content())
l_c+=1
else:
row.append(k.text_content())
l_c=0
data.append(row)
scraperwiki.sqlite.save(unique_keys=["S_no"],data={"S_no":s_no,"Column1":row[0],"Column2":row[1],"Column3":row[2],"Column4":row[3],"State":st[v1][v2],"district":dt[m1][m2],"subdistrict":sb[b1][b2]})
s_no+=2
row=[]
s_no=serial+1
for k in response.cssselect("tr.GridAlternativeRows td"):
if l_c<3:
row.append(k.text_content())
l_c+=1
else:
row.append(k.text_content())
l_c=0
data.append(row)
scraperwiki.sqlite.save(unique_keys=["S_no"],data={"S_no":s_no,"Column1":row[0],"Column2":row[1],"Column3":row[2],"Column4":row[3],"State":st[v1][v2],"district":dt[m1][m2],"subdistrict":sb[b1][b2]})
s_no+=2
row=[]
#st=[]
serial=s_no-1
b1+=1
sb=[]
m1+=1
dt=[]
v1+=1
st=[]
|
983,004 | ac087ee119aa8c5d73d5f2079bcc2aefd9120cec | #Calcule el valor de π a partir de la serie infinita:
#Imprima una tabla que muestre el valor aproximado de π,
#calculando un término de esta serie, dos términos, tres, etcétera.
# ¿Cuántos términos de esta serie tiene que utilizar para obtener 3.14? ¿3.141? ¿3.1415? ¿3.14159?
"""
n=2
i=3
contador = 0
pi = 4-((4/i)+(4/i+n)-(4/i+n))
while pi <= 3.14159:
contador +=1
print(pi) """
pi= 0
contador = 1
iteracione = 1
a = [3.14, 3.141, 3.1415, 3.14159]
iteracionDelFor = 2
for versionPi in a:
contador = 1
iteraciones = 1
while True:
if iteraciones%2==1:
pi +=4/contador
else:
pi -= 4/contador
if round(pi,iteracionDelFor) == versionPi:
print("Pi: ", pi, "Aproximacion", round(pi,iteracionDelFor))
break
contador +=2
iteraciones +=1
iteracionDelFor+=1
|
983,005 | 471ce9bdb6677785292eb30e2f7990980753d8f4 | from flask import Blueprint, render_template, redirect, url_for, request, flash
from flask_login import current_user
from models.image import Image
from models.user import User
from models.donation import Donation
from instagram_web.util.braintree import generate_client_token
from instagram_web.util.braintree import complete_transaction
from instagram_web.util.sendgrid import send_email
donations_blueprint = Blueprint(
'donations', __name__, template_folder='templates')
@donations_blueprint.route('/<image_id>/new', methods=['GET'])
def new(image_id):
image = Image.get_or_none(Image.id == image_id)
client_token = generate_client_token()
if not image:
flash('Unable to find image with the provided id.')
return redirect(url_for('home'))
else:
return render_template('donations/new.html', image=image, client_token=client_token)
@donations_blueprint.route('/<image_id>/checkout', methods=['POST'])
def create(image_id):
payment_nonce = request.form.get('payment_nonce')
amount = request.form.get('donation_amount')
image = Image.get_or_none(Image.id == image_id)
email = image.user.email
if not image:
flash('Unable to find image. Please try again.')
return redirect(url_for('home'))
if not amount or round(int(amount), 2) == 0:
flash('Please insert a proper amount')
return redirect(url_for('donations.new', image_id=image.id))
if not payment_nonce:
flash('Error with payment system. Please try again.')
return redirect(url_for('users.show', username=image.user.username))
if not complete_transaction(payment_nonce, amount):
flash('Something went wrong')
return redirect(url_for('donations.new', image_id=image.id))
#SEND EMAIL#
send_email(email)
#SAVING DONATIONS TO THE DATABASE#
new_donation = Donation(
user_id=current_user.id,
amount=amount,
image_id=image.id
)
if not new_donation.save():
flash('Unable to complete the transaction!')
return redirect(url_for('donations.new', image_id=image.id))
flash('Donation successful!')
return redirect(url_for('users.show', username=image.user.username))
|
983,006 | 8c8b5e5eb40dcbdedb66f8f016a88750ac623f3a | from discord.ext import commands
from Chat.chat_bot import ChatBot
from ERBS.erbs_bot import ERBSBot
# from Music.music_bot import MusicBot
from Game.game_bot import GameBot
from Point.point_bot import PointBot
from Basic.basic_bot import BasicBot
from Log.infoLog import logger as log
from Settings import debug
from Stock.stock_bot import StockBot
class MyBot(commands.Bot):
def __init__(self):
if not debug:
prefix = commands.when_mentioned_or("$")
else:
prefix = commands.when_mentioned_or("!")
desc = 'GreenRain discord bot 3.5'
super(MyBot, self).__init__(command_prefix=prefix, description=desc)
# create bot
self.pointBot = PointBot(self)
self.erbsBot = ERBSBot(self)
self.basicBot = BasicBot(self, pointBot=self.pointBot, erbsBot=self.erbsBot)
self.gameBot = GameBot(self)
self.chatBot = ChatBot(self)
self.stockBot = StockBot(self)
# add bot
self.add_cog(self.pointBot)
self.add_cog(self.erbsBot)
self.add_cog(self.basicBot)
self.add_cog(self.gameBot)
self.add_cog(self.chatBot)
self.add_cog(self.stockBot)
async def on_message(self, message):
log.info('{0.author}: {0.content}'.format(message))
await self.chatBot.checkBlock(message)
await super(MyBot, self).on_message(message)
await self.pointBot.dailyCheck(message)
|
983,007 | abeba380fb4953fff076615c806601ece5bdec7b | # -*- coding: utf-8 -*-
# @Time : 2019/2/22 21:36
# @Author : lemon_huahua
# @Email : 204893985@qq.com
# @File : do_excel.py
#写一个类 类的作用是完成Excel数据的读写 新建表单的操作
#函数一:读取指定表单的数据,
#有一个列表row_list,把每一行的每一个单元格的数据存到row_list里面去。
#每一行都有 一个单独的row_list [[1,2,3],[4,5,6]]
#每一行数据读取完毕后,把row_list存到大列表all_row_list
#函数二:在指定的单元格写入指定的数据,并保存到当前Excel
#函数三:新建一个Excel
from openpyxl import workbook
from openpyxl import load_workbook
from class_0227.read_config import ReadConfig#用这个模块 要用我们刚刚写的类
from class_0227.my_log import MyLog
logger=MyLog()
class DoExcel:
'''类的作用是完成Excel数据的读写 新建表单的操作'''
def __init__(self,file_name,sheet_name):
self.file_name=file_name
self.sheet_name=sheet_name
def read_excel(self,button):#
'''读取所有的数据,以嵌套列表的形式存储,每一行都是一个子列表,每一个单元格都是子列表里面的元素'''
wb=load_workbook(self.file_name)
sheet=wb[self.sheet_name]
#嵌套列表--嵌套循环
# test_data=[]#大列表 所有的字列表会存在这个里面
# for j in range(2,sheet.max_row+1):
# row_data=[]#每一行数据存在一个字列表里面
# for i in range(1,sheet.max_column+1):
# row_data.append(sheet.cell(j,i).value)
# test_data.append(row_data)
# return test_data
#
#单层循环
logger.info('开始读取数据了啦!')
test_data=[]#大列表 所有的字列表会存在这个里面
if button==1:#1读取所有的用例
for i in range(2,sheet.max_row+1):
row_data=[]#每一行数据存在一个字列表里面
row_data.append(sheet.cell(i,1).value)
row_data.append(sheet.cell(i,2).value)
row_data.append(sheet.cell(i,3).value)
row_data.append(sheet.cell(i,4).value)
row_data.append(sheet.cell(i,5).value)
row_data.append(sheet.cell(i,6).value)
row_data.append(sheet.cell(i,7).value)
test_data.append(row_data)
logger.info('读取数据完毕!')
else:
test_data=[]#大列表 所有的字列表会存在这个里面
for i in eval(button):#如果button不等于1 eval()之后就是一个列表
row_data=[]#每一行数据存在一个字列表里面
row_data.append(sheet.cell(i+1,1).value)
row_data.append(sheet.cell(i+1,2).value)
row_data.append(sheet.cell(i+1,3).value)
row_data.append(sheet.cell(i+1,4).value)
row_data.append(sheet.cell(i+1,5).value)
row_data.append(sheet.cell(i+1,6).value)
row_data.append(sheet.cell(i+1,7).value)
test_data.append(row_data)
logger.info('读取数据完毕!')
return test_data
#嵌套字典
# test_data=[]#大列表 所有的字字典会存在这个里面
# for i in range(2,sheet.max_row+1):
# row_data={}#每一行数据存在一个字典里面
# row_data['CaseId']=sheet.cell(i,1).value
# row_data['Title']=sheet.cell(i,2).value
# row_data['Module']=sheet.cell(i,3).value
# row_data['TestData']=sheet.cell(i,4).value
# row_data['ExpectedResult']=sheet.cell(i,5).value
# row_data['ActualResult']=sheet.cell(i,6).value
# row_data['TestReuslt']=sheet.cell(i,7).value
# test_data.append(row_data)
# return test_data
def write_excel(self,row,col,value):
'''在指定的单元格写入指定的数据,并保存到当前Excel'''
wb=load_workbook(self.file_name)
sheet=wb[self.sheet_name]
logger.info('开始往Excel里面写数据')
try:
sheet.cell(row,col).value=value
wb.save(self.file_name)
wb.close()#每次操作完 关闭掉!!!
except Exception as e:
logger.error(e)
logger.info('Excel里面数据写入完毕')
def create_excel(self):
'''新建一个Excel'''
wb=workbook.Workbook()
wb.create_sheet(self.sheet_name)#新建表单
wb.save(self.file_name)
if __name__ == '__main__':
button=ReadConfig('case.conf').get_data('CASE','button')
print(type(button))
test_data=DoExcel('python_14.xlsx','test_cases').read_excel(button)
print(test_data) |
983,008 | 3f29362db059e1db3cc812682e3b57788a4d1ff8 | import pandas as pd
import numpy as np
import seaborn as sns
# ----------------------------------------- 연습문제 1
# key1값을 기준으로 data1값을 분류해서 합계를 구하고 결과를 데이터프레임으로 구한다
\
# a = df2.groupby(df2.key1).sum()['data1']
# a = pd.DataFrame(a)
# print(type(a))
# ----------------------------------------- 연습문제 2
# species별로 꽃잎길이(sepal_length), 꽃잎 폭(sepal_width)평균 구하기
# 종이 표시 되지 않을결루 종을 찾아 낼수 있는가?
# mean, median, min, max - 그룹데이터의 평균, 중앙값, 최소, 최대
# sum, prod, std, var, quantile - 그룹데이터의 합계, 곱, 표준편차, 분산, 사분위수
#
# iris = sns.load_dataset('iris')
# print(iris)
# ir = iris.groupby(iris.species).mean()
# print(ir[['sepal_width','sepal_length']])
# ----------------------------------------- 연습문제 3
# tips = sns.load_dataset('tips')
# 1. 요일, 점심/저녁/인원수의 영향을 받는지 확인하기
# print(tips.groupby(['day','time'])[['size']].describe())
# 2. 어떠한 요인이 가장 크게 작용하는 판단 방법있는가
# describe로 전체 통계값을 확인하고 day,time별로 묶는다
#
# ----------------------------------------- 연습문제 4
# 타이타닉 승객 분석
titanic = sns.load_dataset('titanic')
# 1. qcut 명령으로 나이 그룹 만들기
df = titanic['who'].unique() # columns 데이터 확인하기
titanic['age_group']= pd.qcut(titanic.age, 3, labels=['child','mid','old'])
print(titanic.head())
# 2. 성별, 선실, 나이 생존율을 데이터 프레임 계산,
# row - sex, 나이그룹, columns - 선실
# 3. 성별 및 선실에 의한 생존율 pivot_table형태로 만들기
|
983,009 | fdb0d165dc7547d27821012da167641784f50ade | from .Component import Component
class Movable(Component):
"""
Implemented by both motors and servos. Provides a port and speed value.
"""
speed: float
"""
The speed of the motor between 0.0 (slowest) and 1.0 (fastest).
"""
def __init__(self, port: int, speed: float):
super().__init__(port)
if speed < 0.0 or speed > 1.0:
raise ValueError("Speed must be between 0.0 and 1.0")
self.speed = speed
|
983,010 | d45beff2b2935d80d1ad8d1057a161308818615d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 ft=python
# author : Prakash [प्रकाश]
# date : 2019-09-19 19:30
import os
from pathlib import Path
import scrapy
from scrapy.crawler import CrawlerProcess
from .spiders import Kantipur
class NewsCrawler:
def __init__(self,opath):
self.output = []
self.process = CrawlerProcess(settings={'LOG_ENABLED': False}) # Enables/disables log
self.path = opath
self.exist = []
def get_result(self):
return self.output
def yield_output(self, data):
self.output.append(data)
url = data['url']
newsid = data['id']
date = data['date']
urlcat = data['urlcat']
miti = data['miti']
title = data['title']
news = data['content']
nepcat = data['category']
author = data['author']
pathdir = os.path.join(self.path,date,urlcat)
filename = os.path.join(pathdir,newsid+'.txt')
os.makedirs(pathdir, exist_ok=True)
#print(f'making directory {pathdir}')
#print(f'writing file {filename}')
print(f'<- [{date}] :: {url} ... ')
my_file = Path(filename)
if not my_file.is_file():
with open(filename,'w') as ofl:
print(f'# url: {url}\n# title : {title}\n# date: {miti}\n# category: {nepcat}\n# author:{author}',file=ofl,end='\n')
ofl.write(news)
else:
print(f'DUP: [####]:: {url}')
self.exist.append(url)
def crawl_news(self, spider,start_date=None,end_date=None):
self.process.crawl(spider, start_date, end_date, callback_func=self.yield_output )
self.process.start()
print(f'there wer {len(self.exist)} duplicates ')
if __name__ == '__main__':
start_date = '2019/01/01'
end_date = '2019/01/01'
opath = '../scrapped/kantipur'
NC = NewsCrawler(opath)
NC.crawl_news(Kantipur,start_date,end_date)
op = NC.output
|
983,011 | 3c348014d9520a59b331bbaaa4d95957f345ee30 | from .learning import (
LearningRule,
MSTDPET,
FedeSTDP,
)
|
983,012 | a625c18543bf5e4a91f1230e299eeafa551f60f3 | #!/usr/bin/env python
import rospy
import array
import time
from std_msgs.msg import String
import serial
import struct
ser = serial.Serial('/dev/ttyACM0', baudrate=115200, timeout=0)
ready_to_read = True
def SerialOutCallback(msg):
global ser
ser.write(bytes(msg.data))
rospy.loginfo("velCmd")
def serialNode():
global ser
time.sleep(2) #Delay to allow serial comms to open up
pub = rospy.Publisher('leftEncoder_SerialIn', String, queue_size = 1000)
sub = rospy.Subscriber('leftMotorVel_SerialOut', String, SerialOutCallback)
rospy.init_node('serialNode_Motor', anonymous=True)
bytecount = 0
writeserial = String()
writeserial.data = ''
NotStartFlag = True
StartVal = struct.pack("b",127)
StopVal = struct.pack("b",126)
StopValReturn = bytes(struct.pack("h", 32767))
readByte = b''
ser.write(StopVal) #Send stop command
time.sleep(0.3)
ser.flushInput()
time.sleep(0.1)
ser.write(StartVal) #Start command
rate = rospy.Rate(100)
#Process incoming encoder values
while not rospy.is_shutdown():
readByte += ser.read(1)
if len(readByte) == 2:
pub.publish(str(readByte))
readByte = b''
rate.sleep()
if __name__ == '__main__':
try:
serialNode()
except rospy.ROSInterruptException:
pass
|
983,013 | 4843b42ccea64fa2813260d4c82e884670d1fb52 | """Package for loading and running the nuclei and cell segmentation models programmaticly."""
import os
import sys
import cv2
import imageio
import numpy as np
import torch
import torch.nn
import torch.nn.functional as F
from skimage import transform, util
from torch.utils.data import Dataset
from hpacellseg.constants import (MULTI_CHANNEL_CELL_MODEL_URL,
NUCLEI_MODEL_URL, TWO_CHANNEL_CELL_MODEL_URL)
from hpacellseg.utils import download_with_url
from config import CFG
NORMALIZE = {"mean": [124 / 255, 117 / 255, 104 / 255], "std": [1 / (0.0167 * 255)] * 3}
class CellSegmentator(object):
"""Uses pretrained DPN-Unet models to segment cells from images."""
def __init__(
self,
nuclei_model="./nuclei_model.pth",
cell_model="./cell_model.pth",
model_width_height=None,
device="cuda",
multi_channel_model=True,
return_without_scale_restore=False,
scale_factor=0.25,
padding=False
):
if device != "cuda" and device != "cpu" and "cuda" not in device:
raise ValueError(f"{device} is not a valid device (cuda/cpu)")
if device != "cpu":
try:
assert torch.cuda.is_available()
except AssertionError:
print("No GPU found, using CPU.", file=sys.stderr)
device = "cpu"
self.device = device
if isinstance(nuclei_model, str):
if not os.path.exists(nuclei_model):
print(
f"Could not find {nuclei_model}. Downloading it now",
file=sys.stderr,
)
download_with_url(NUCLEI_MODEL_URL, nuclei_model)
nuclei_model = torch.load(
nuclei_model, map_location=torch.device(self.device)
)
if isinstance(nuclei_model, torch.nn.DataParallel) and device == "cpu":
nuclei_model = nuclei_model.module
self.nuclei_model = nuclei_model.to(self.device)
self.multi_channel_model = multi_channel_model
if isinstance(cell_model, str):
if not os.path.exists(cell_model):
print(
f"Could not find {cell_model}. Downloading it now", file=sys.stderr
)
if self.multi_channel_model:
download_with_url(MULTI_CHANNEL_CELL_MODEL_URL, cell_model)
else:
download_with_url(TWO_CHANNEL_CELL_MODEL_URL, cell_model)
cell_model = torch.load(cell_model, map_location=torch.device(self.device))
self.cell_model = cell_model.to(self.device)
self.model_width_height = model_width_height
self.return_without_scale_restore = return_without_scale_restore
self.scale_factor = scale_factor
self.padding = padding
def _image_conversion(self, images):
microtubule_imgs, er_imgs, nuclei_imgs = images
if self.multi_channel_model:
if not isinstance(er_imgs, list):
raise ValueError("Please speicify the image path(s) for er channels!")
else:
if not er_imgs is None:
raise ValueError(
"second channel should be None for two channel model predition!"
)
if not isinstance(microtubule_imgs, list):
raise ValueError("The microtubule images should be a list")
if not isinstance(nuclei_imgs, list):
raise ValueError("The microtubule images should be a list")
if er_imgs:
if not len(microtubule_imgs) == len(er_imgs) == len(nuclei_imgs):
raise ValueError("The lists of images needs to be the same length")
else:
if not len(microtubule_imgs) == len(nuclei_imgs):
raise ValueError("The lists of images needs to be the same length")
if not all(isinstance(item, np.ndarray) for item in microtubule_imgs):
microtubule_imgs = [
os.path.expanduser(item) for _, item in enumerate(microtubule_imgs)
]
nuclei_imgs = [
os.path.expanduser(item) for _, item in enumerate(nuclei_imgs)
]
microtubule_imgs = list(
map(lambda item: imageio.imread(item), microtubule_imgs)
)
nuclei_imgs = list(map(lambda item: imageio.imread(item), nuclei_imgs))
if er_imgs:
er_imgs = [os.path.expanduser(item) for _, item in enumerate(er_imgs)]
er_imgs = list(map(lambda item: imageio.imread(item), er_imgs))
if not er_imgs:
er_imgs = [
np.zeros(item.shape, dtype=item.dtype)
for _, item in enumerate(microtubule_imgs)
]
cell_imgs = list(
map(
lambda item: np.dstack((item[0], item[1], item[2])),
list(zip(microtubule_imgs, er_imgs, nuclei_imgs)),
)
)
return cell_imgs
def _pad(self, image):
rows, cols = image.shape[:2]
self.scaled_shape = rows, cols
img_pad= cv2.copyMakeBorder(
image,
32,
(32 - rows % 32),
32,
(32 - cols % 32),
cv2.BORDER_REFLECT,
)
return img_pad
def pred_nuclei(self, images):
def _preprocess(images):
if isinstance(images[0], str):
raise NotImplementedError('Currently the model requires images as numpy arrays, not paths.')
# images = [imageio.imread(image_path) for image_path in images]
self.target_shapes = [image.shape for image in images]
#print(images.shape)
#resize like in original implementation with https://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.resize
if self.model_width_height:
images = np.array([transform.resize(image, (self.model_width_height,self.model_width_height))
for image in images])
else:
images = [transform.rescale(image, self.scale_factor) for image in images]
if self.padding:
images = [self._pad(image) for image in images]
nuc_images = np.array([np.dstack((image[..., 2], image[..., 2], image[..., 2])) if len(image.shape) >= 3
else np.dstack((image, image, image)) for image in images])
nuc_images = nuc_images.transpose([0, 3, 1, 2])
#print("nuc", nuc_images.shape)
return nuc_images
def _segment_helper(imgs):
with torch.no_grad():
mean = torch.as_tensor(NORMALIZE["mean"], device=self.device)
std = torch.as_tensor(NORMALIZE["std"], device=self.device)
imgs = torch.tensor(imgs).float()
imgs = imgs.to(self.device)
imgs = imgs.sub_(mean[:, None, None]).div_(std[:, None, None])
imgs = self.nuclei_model(imgs)
imgs = F.softmax(imgs, dim=1)
return imgs
preprocessed_imgs = _preprocess(images)
predictions = _segment_helper(preprocessed_imgs)
predictions = predictions.to("cpu").numpy()
#dont restore scaling, just save and scale later ...
predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape)
for pred, target_shape in zip(predictions, self.target_shapes)]
return predictions
def _restore_scaling(self, n_prediction, target_shape):
"""Restore an image from scaling and padding.
This method is intended for internal use.
It takes the output from the nuclei model as input.
"""
n_prediction = n_prediction.transpose([1, 2, 0])
if self.padding:
n_prediction = n_prediction[
32 : 32 + self.scaled_shape[0], 32 : 32 + self.scaled_shape[1], ...
]
n_prediction[..., 0] = 0
if not self.return_without_scale_restore:
n_prediction = cv2.resize(
n_prediction,
(target_shape[0], target_shape[1]),
#try INTER_NEAREST_EXACT
interpolation=cv2.INTER_AREA,
)
return n_prediction
def pred_cells(self, images, precombined=False):
def _preprocess(images):
self.target_shapes = [image.shape for image in images]
for image in images:
if not len(image.shape) == 3:
raise ValueError("image should has 3 channels")
#resize like in original implementation with https://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.resize
if self.model_width_height:
images = np.array([transform.resize(image, (self.model_width_height,self.model_width_height))
for image in images])
else:
images = np.array([transform.rescale(image, self.scale_factor, multichannel=True) for image in images])
if self.padding:
images = np.array([self._pad(image) for image in images])
cell_images = images.transpose([0, 3, 1, 2])
return cell_images
def _segment_helper(imgs):
with torch.no_grad():
mean = torch.as_tensor(NORMALIZE["mean"], device=self.device)
std = torch.as_tensor(NORMALIZE["std"], device=self.device)
imgs = torch.tensor(imgs).float()
imgs = imgs.to(self.device)
imgs = imgs.sub_(mean[:, None, None]).div_(std[:, None, None])
imgs = self.cell_model(imgs)
imgs = F.softmax(imgs, dim=1)
return imgs
if not precombined:
images = self._image_conversion(images)
preprocessed_imgs = _preprocess(images)
predictions = _segment_helper(preprocessed_imgs)
predictions = predictions.to("cpu").numpy()
predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape)
for pred, target_shape in zip(predictions, self.target_shapes)]
return predictions
class Yield_Images_Dataset(Dataset):
def __init__(self, csv_file, root=CFG.PATH_TEST, transform=None):
self.images_df = csv_file
self.transform = transform
self.root = root
def __len__(self):
return len(self.images_df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
_id = self.images_df["ID"].iloc[idx]
r = os.path.join(self.root, f'{_id}_red.png')
y = os.path.join(self.root, f'{_id}_yellow.png')
b = os.path.join(self.root, f'{_id}_blue.png')
r = cv2.imread(r, 0)
y = cv2.imread(y, 0)
b = cv2.imread(b, 0)
#don't resize
size = r.shape[0]
if CFG.size_seg == None:
ryb_image = np.stack((r, y, b), axis=2)/255.
blue_image = b/255.
return blue_image, ryb_image, size, _id
if size != CFG.size_seg:
blue_image = cv2.resize(b, (CFG.size_seg, CFG.size_seg))/255.
ryb_image = np.stack((r, y, b), axis=2)
ryb_image = cv2.resize(ryb_image, (CFG.size_seg, CFG.size_seg))/255.
else:
ryb_image = np.stack((r, y, b), axis=2)/255.
blue_image = b/255.
return blue_image, ryb_image, size, _id |
983,014 | 7b2de201a71b3bc02d5f1dca74c8dce7121e2e57 | def min_Coins(target,coins,known_tar):
minCoins=target
if target in coins:
known_tar[target]=1
if known_tar[target]>0:
return known_tar[target]
for i in [c for c in coins if c<=target]:
numcoins=1+min_Coins(target-i,coins,known_tar)
if numcoins<minCoins:
minCoins=numcoins
known_tar[target]=minCoins
return known_tar[target]
target=14
coins=[1,2,5]
coin_arr=[0]*(target+1)
print min_Coins(target,coins,coin_arr)
|
983,015 | 4777ba965aa3780c31fe9465fae5c494c127fdfa |
for i in range(1001):
sum = 0
for j in range(i+1,1001):
sum += j
if sum == 1000:
print(range(i,j+1))
if sum > 1000:
break
|
983,016 | d9f4cfe829d7afe1569403d873815cffbd9d2d7b | """This module defines the Status class, which represents statuses a pokemon can be
afflicted with.
"""
# pyre-ignore-all-errors[45]
from enum import Enum, auto, unique
@unique
class Status(Enum):
"""Enumeration, represent a status a pokemon can be afflicted with."""
BRN = auto()
FNT = auto()
FRZ = auto()
PAR = auto()
PSN = auto()
SLP = auto()
TOX = auto()
def __str__(self) -> str:
return f"{self.name} (status) object"
|
983,017 | 5a56e0e1722f3cee00621551138a59074d3ea2c2 | from flask import request, jsonify, g, Blueprint
from flask_security import login_required
from uuid import UUID
projects_info = Blueprint('api_projects_info', __name__)
@projects_info.route('/api/projects/authors', methods=['GET'])
@login_required
def get_all_authors():
""" Returns a list of all email adresses used in projects "authors" field.
"""
try:
authors = g.projects.distinct('authors')
all_authors = sorted(authors, key=lambda k: str(k).lower()) if authors else []
return jsonify(all_authors)
except Exception as err:
raise ApiException(str(err), 500)
@projects_info.route('/api/projects/tags', methods=['GET'])
@login_required
def get_all_tags():
""" Returns a list of all tags used in projects "tags" field.
"""
try:
tags = g.projects.distinct('tags')
return jsonify(sorted(tags, key=str.lower))
except Exception as err:
raise ApiException(str(err), 500)
|
983,018 | ad800e7bb9dc6c0a1045b6dc02a13357e95cf1ca | #!/usr/bin/env python
# Copyright (C) 2010 Distance and e-Learning Centre,
# University of Southern Queensland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from oxml2xhtml_utils import SaxContentHandler, saxParseString
class DocRels(object, SaxContentHandler):
# word/_rels/document.xml.rels
# xmlns="http://schemas.openxmlformats.org/package/2006/relationships"
# <Relationships>
# <Relationship Id='rId3' Target="media/image3.jpeg" Type="http://.../image" [TargetMode="External"] />
def __init__(self, xmlStr):
SaxContentHandler.__init__(self)
self._rels = {} # id:(Target, Type, TargetMode)
if xmlStr is not None:
saxParseString(xmlStr, self)
def getTarget(self, id):
return self._rels.get(id, (None, None, None))[0]
def getType(self, id):
return self._rels.get(id, (None, None, None))[1]
def getTargetMode(self, id):
return self._rels.get(id, (None, None, None))[2]
# Sax event handlers
def startElement(self, name, _attrs):
attrs = {}
for k in _attrs.keys():
attrs[k]=_attrs.get(k)
if name=="Relationship":
id = attrs.get("Id")
type = attrs.get("Type")
target = attrs.get("Target")
mode = attrs.get("TargetMode")
self._rels[id]=(target, type, mode)
def endElement(self, name):
pass
def characters(self, data):
pass
# def processingInstruction(self, target, data):
# pass
#
# def setDocumentLocator(self, locator):
# pass
#
# def startDocument(self):
# pass
#
# def endDocument(self):
# pass
#
# def startPrefixMapping(self, *args):
# pass
#
# def endPrefixMapping(self, *args):
# pass
|
983,019 | 21c3f927517729e05c959298c2ece671b7d19e88 | import train
import logistic_regression
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import time
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
# ML libraries
import lightgbm as lgb
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
le = preprocessing.LabelEncoder()
def calculate_trend(df, lag_list, column):
for lag in lag_list:
trend_column_lag = "Trend_" + column + "_" + str(lag)
df[trend_column_lag] = (df[column]-df[column].shift(lag, fill_value=-999))/df[column].shift(lag, fill_value=0)
return df
def calculate_lag(df, lag_list, column):
for lag in lag_list:
column_lag = column + "_" + str(lag)
df[column_lag] = df[column].shift(lag, fill_value=0)
return df
# Run the model for Spain
def main():
TRAINING_DATA_DIR = os.environ.get("TRAINING_DATA")
TEST_DATA = os.environ.get("TEST_DATA")
train_data = pd.read_csv(TRAINING_DATA_DIR)
test = pd.read_csv(TEST_DATA)
add_columns = train.addingColumns(train_data,test)
data,country_dict,all_data = train.addingWolrd(add_columns)
dates_list = ['2020-03-01', '2020-03-02', '2020-03-03', '2020-03-04', '2020-03-05', '2020-03-06', '2020-03-07', '2020-03-08', '2020-03-09',
'2020-03-10', '2020-03-11','2020-03-12','2020-03-13','2020-03-14','2020-03-15','2020-03-16','2020-03-17','2020-03-18',
'2020-03-19','2020-03-20','2020-03-21','2020-03-22','2020-03-23', '2020-03-24']
country_name = os.environ.get("COUNTRY")
# country_name = 'Spain'
day_start = 35
lag_size = 30
data = logistic_regression.lin_reg_with_lags_country(all_data, country_name, day_start, lag_size, country_dict)
logistic_regression.plot_real_vs_prediction_country(data, train_data, country_name, 39, dates_list)
logistic_regression.plot_real_vs_prediction_country_fatalities(data, train_data, country_name, 39, dates_list)
# ts = time.time()
# Inputs
# country_name = "Italy"
# day_start = 35
# lag_size = 30
# data = lin_reg_with_lags_country(all_data, country_name, day_start, lag_size, country_dict)
# plot_real_vs_prediction_country(data, train, country_name, 39, dates_list)
# plot_real_vs_prediction_country_fatalities(data, train, country_name, 39, dates_list)
if __name__ == "__main__":
main()
|
983,020 | 430ac4598a66d08fb64d6bb6bb7b8fb0a4c038b0 | # Generated by Django 2.1.7 on 2019-03-29 07:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('module1', '0003_delete_login'),
]
operations = [
migrations.RenameField(
model_name='signup',
old_name='repassword',
new_name='gender',
),
]
|
983,021 | 7c8ba62980137e55dff6188e25fabbb49c0ba7bc | '''
Using a "for-loop", print out all odd numbers from 1-100.
'''
for number in range(1, 100, 2):
print(number)
|
983,022 | 1d08c233a936cf3527001091cc9d0250ad3657e1 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 19:16:38 2020
@author: Mikko
"""
from tkinter import filedialog
from tkinter import *
from tkinter import Frame, Canvas, Label, Button, LEFT, RIGHT, ALL, Tk, Entry, BOTH, S
from random import randint
import tkinter as tk
from tkinter import ttk
import numpy as np
from PIL import Image, ImageTk
import PIL
import time
#root = Tk()
#root.filename = filedialog.asksaveasfilename(initialdir = "/",title = "Select file",filetypes = (("jpeg files","*.jpg"),("all files","*.*")))
#print (root.filename)
def get_filename_dialog(root):
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("jpeg files","*.jpg"),("all files","*.*")))
return(root.filename)
class AppGUI:
def __init__(self, root, windowsize):
self.root = root
_tabs = ttk.Notebook(root, width=900, height=900)
leaf = Frame(_tabs)
_tabs.add(leaf, text="Create animation")
self.windowsize = windowsize
self.xpic,self.ypic = 0,0
self.root.bind("<B1-Motion>", self.callback)
self.root.bind("<Button-1>", self.callback)
_f1 = Frame(leaf)
_f1.pack(fill=BOTH)
f1 = Frame(_f1)
f1.pack(fill=BOTH)
self.photoframe = Frame(f1)
getfilebtn = Button(f1, width=15, text="Get picture file", command=self.get_picture)
getfilebtn.pack(side=LEFT)
createbtn = Button(f1, width=15, text="Execute", command=self.create)
createbtn.pack(side=LEFT)
defaultsbtn = Button(f1, width=15, text="Set defaults", command=self.set_defaults)
defaultsbtn.pack(side=LEFT)
clearbtn = Button(f1, width=15, text="Clear rows", command=self.clear_rows)
clearbtn.pack(side=LEFT)
f2 = Frame(_f1)
f2.pack(side=TOP)
self.fnamevar = StringVar()
self.filename = Label(f2, textvariable=self.fnamevar)
self.filename.pack(side=TOP)
addrow = Button(f2, width=15, text="Add row", command=self.set_option_row)
addrow.pack(side=BOTTOM)
self.rows = Frame(_f1, borderwidth=2)
self.rows.pack(side=TOP)
self.row_frames = []
self.rows_empty = True
#s1 = ttk.Separator(_f1, orient="horizontal")
#s2 = ttk.Separator(_f1, orient="horizontal")
self.index = 0
self.arr_loaded = False
self.checkvars = []
_f2 = Frame(_tabs)
_tabs.add(_f2, text="Preview")
_tabs.grid(row=0, column=0)
def callback(self, event):
pass
def set_option_row(self):
row = Frame(self.rows)
row.pack(side=TOP)
radlabel = Label(row, text="shift length",height=1, compound=LEFT)
radlabel.pack(side=LEFT)
var = IntVar()
cneg = Checkbutton(row, text="negative", variable=var)
cneg.pack(side=LEFT)
self.checkvars.append(var)
radius = tk.Scale(row, orient=tk.HORIZONTAL, length=200)
radius.pack(side=LEFT)
radius.set(25)
combolabel = Label(row, text="# Channel",height=1, compound=LEFT)
combolabel.pack(side=LEFT)
channels = ttk.Combobox(row, values=[str(i) for i in range(3)])
channels.pack(side=LEFT)
channels.set(0)
self.row_frames.append(row)
self.rows_empty = False
def get_picture(self):
photo_name = get_filename_dialog(self.root)
self.fnamevar.set(photo_name)
load = Image.open(photo_name)
self.arr = to_array(load)
self.arr_loaded = True
def get_row_parameters(self):
self.parameters = []
i = 0
for fra in self.row_frames:
rowwid = [widget for widget in fra.winfo_children()]
self.parameters.append((self.checkvars[i].get(),rowwid[2].get(),int(rowwid[4].get())))
i += 1
del rowwid
def save_parameters(self):
pass
def load_parameters(self):
pass
def clear_rows(self):
for widget in self.rows.winfo_children():
widget.destroy()
self.row_frames = []
self.parameters = []
self.checkvars = []
self.rows_empty=True
def set_defaults(self):
pass
def create(self):
if not self.rows_empty:
self.get_row_parameters()
print(self.parameters)
if self.arr_loaded:
self.animation = Animate1(self.arr)
for par in self.parameters:
p1,p2,p3 = par
sign = [1,-1][p1]
self.animation.create_cycle1(maxshift=p2*sign, channel=p3)
self.animation.stack_cycles()
print("starting")
self.animation.history_to_gif(counter=int(time.time()))
print("done")
if True:
window = Tk()
window.title("...")
lx,ly = 900,900
size = "{}x{}".format(lx,ly)
window.geometry(size)
window.resizable(0, 1)
framex,framey = 900,900
gui1 = AppGUI(window, windowsize=(framex,framey))
#gui2 = App2(root)
window.mainloop()
|
983,023 | 60f5c387b2ecc74da885b83b7f9cd8ebc3717fcf | from PIL import Image
import pyocr
import translation
def imageToString(image):
# OCRエンジン取得
tools = pyocr.get_available_tools()
tool = tools[0]
print(type(image))
# 使用する画像を指定してOCRを実行
txt = tool.image_to_string(
# Image.open(image), # 画像ファイルを読み込む場合は左記のように記述
image,
lang='eng',
builder=pyocr.builders.TextBuilder()
)
# 翻訳を実行
translatedTxt = translation.translateEngToJa(txt)
print(translatedTxt)
# 翻訳結果を返却
return translatedTxt |
983,024 | f753681a666ec56ef75078e9d15378621636d0f9 | # -*- coding: utf-8 -*-
from django.db.models import Q
from questions.questions import TeamRelationalQuestion
from questions.models import Question
from football.models import Team
fields = ["venue_city", "founded", "logo"]
def create_relational_questions():
teams = Team.objects.all()
for team in teams:
#import pdb; pdb.set_trace()
if team.venue_city and team.founded and team.logo:
for field in fields:
origin = {
"team_id": team.id,
"field": field
}
quest = TeamRelationalQuestion(origin)
question = quest.to_model()
question.save()
print(">>>> Salva pergunta '%s'" % question.statement)
def set_questions_dificulty():
for question in Question.objects.all():
if question.type == "0":
team_id = question.origin["team_id"]
team = Team.objects.get(id=team_id)
question.dificulty = team.popularity
question.save()
print(">>>> Salva dificuldade %s para pergunta '%s'" % (str(question.dificulty), question.statement))
|
983,025 | 043c19e76db06fd6aff1624a3e03c07b67a7dddb |
# Here we are setting up our connection to google sheets
# and google drive. Taking this out of the main file helps clean up the main
# app
import os
from oauth2client.service_account import ServiceAccountCredentials
gData = {
"type": os.environ.get('type'),
"project_id": os.environ.get('project_id'),
"private_key_id": os.environ.get('private_key_id'),
"private_key": os.environ.get('private_key'),
"client_email": os.environ.get('client_email'),
"client_id": os.environ.get('client_id'),
"auth_uri": os.environ.get('auth_uri'),
"token_uri": os.environ.get('token_uri'),
"auth_provider_x509_cert_url": os.environ.get('auth_provider_x509_cert_url'),
"client_x509_cert_url": os.environ.get('client_x509_cert_url')
}
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_dict(gData, scope) |
983,026 | 0e16c0e85b8c3d8d32605d5c03d803e0e63945ee | '''
Define useful variables for chart styling applied in Python-land.
'''
BAR_DEFAULT = '#004c76'
BAR_HIGHLIGHT = '#fff200'
DISTRIBUTION_MAX = 200000
DISTRIBUTION_BIN_NUM = 20
|
983,027 | f16259244ab5b907aa62e7a364f9846072b2299e | import shelve
import os
import send2trash
import shutil
import sys
# TO ADD EXTRA ITEMS
xen = shelve.open('try2')
note = xen['things']
def mklist():
global note, item, xen
print('\nEnter Word')
item = input()
note.append(item)
note.sort(key=str.lower)
xen['things'] = note
print('\nAdd another Word, y/n ?')
choice = input()
if choice == 'y':
mklist()
else:
menu()
# TO VIEW THE DICTIONARY
def viewlist():
global note
print('' + ' My Words \n')
note.sort(key=str.lower)
for i in range(len(note)):
print('\n' + str(i + 1) + '.) ' + note[i])
menu()
# TO DELETE WORDS
def delitem():
try:
global note
for i in range(len(note)):
print('\n' + str(i + 1) + '.) ' + note[i])
print('Which Word do you want to delete ?')
ch = input()
note.remove(ch)
print(ch + ' was Deleted\n')
xen['things'] = note
menu()
except ValueError:
del note[int(ch) - 1]
xen['things'] = note
print(note[(ch) - 1] + ' Was Deleted\n')
menu()
# MAIN MENU
def menu():
print('' + '\n PERSONAL DICTIONARY')
print('\n1. Add Word')
print('2. Delete Word')
print('3. View Dictionary')
print('4. Exit\n')
opt = input()
if opt == '1':
mklist()
elif opt == '3':
viewlist()
elif opt == '2':
delitem()
else:
print('Thank You For Using LIST')
sys.exit()
menu()
|
983,028 | 3d752fb981c55f7fc39fc8deeadc328b37eef0b6 | # Generated by Django 2.0.1 on 2018-02-01 16:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('votaciones', '0003_auto_20180201_1531'),
]
operations = [
migrations.RemoveField(
model_name='consulta',
name='autor',
),
]
|
983,029 | 31c0baf889f94443c65d8a653779d8199dd81b28 | import json, os
from testDataToUnitTest import generate_unit_test
if __name__ == '__main__':
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "testData.json"), encoding = "utf-8") as json_file:
test_data = json.load(json_file)
generate_unit_test(test_data)
|
983,030 | 405ff6f810685952d6eb652e8d1ead7376289fc9 | import paddle
from paddle.metric import Accuracy
from paddle.nn import CrossEntropyLoss
from paddle.vision.datasets import Cifar10
from paddle.vision.transforms import RandomHorizontalFlip, RandomResizedCrop, Compose, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation
import math
import time
import logging
import argparse
import numpy as np
from model import WideResNet
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler("log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def config():
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--epoch', default=200, type=int, help='epoch of model')
parser.add_argument('--batchsize', default=128, type=int, help='epoch of model')
parser.add_argument('--lr', default=0.1, type=float, help='learning_rate')
parser.add_argument('--net_type', default='wide-resnet', type=str, help='model')
parser.add_argument('--depth', default=28, type=int, help='depth of model')
parser.add_argument('--widen_factor', default=20, type=int, help='width of model')
parser.add_argument('--dropout', default=0.3, type=float, help='dropout_rate')
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset = [cifar10/cifar100]')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
return parser.parse_args()
def learning_rate(init, epoch):
optim_factor = 0
if(epoch > 160):
optim_factor = 3
elif(epoch > 120):
optim_factor = 2
elif(epoch > 60):
optim_factor = 1
return init*math.pow(0.2, optim_factor)
class ToArray(object):
"""Convert a ``PIL.Image`` to ``numpy.ndarray``.
Converts a PIL.Image or numpy.ndarray (H x W x C) to a paddle.Tensor of shape (C x H x W).
If input is a grayscale image (H x W), it will be converted to a image of shape (H x W x 1).
And the shape of output tensor will be (1 x H x W).
If you want to keep the shape of output tensor as (H x W x C), you can set data_format = ``HWC`` .
Converts a PIL.Image or numpy.ndarray in the range [0, 255] to a paddle.Tensor in the
range [0.0, 1.0] if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr,
RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8.
In the other cases, tensors are returned without scaling.
"""
def __call__(self, img):
img = np.array(img)
img = np.transpose(img, [2, 0, 1])
img = img / 255.
return img.astype('float32')
def build_transform():
CIFAR_MEAN = [0.4914, 0.4822, 0.4465]
CIFAR_STD = [0.2023, 0.1994, 0.2010]
train_transforms = Compose([
RandomCrop(32, padding=4),
ContrastTransform(0.1),
BrightnessTransform(0.1),
RandomHorizontalFlip(),
RandomRotation(15),
ToArray(),
Normalize(CIFAR_MEAN, CIFAR_STD),
])
test_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)])
return train_transforms, test_transforms
# Training
def train(epoch,model,train_loader,criterion,cfg):
epoch_loss = 0
epoch_acc = 0
metric = Accuracy()
model.train()
opt = paddle.optimizer.SGD(learning_rate=learning_rate(cfg.lr, epoch), parameters = net.parameters())
for batch_id,(img, label) in enumerate(train_loader):
logits = model(img)
loss = criterion(logits, label)
acc = metric.update(metric.compute(logits, label))
if batch_id % 10 == 0:
logger.info("epoch: {}, batch_id: {}, train_loss: {}, train_acc: {}".format(epoch, batch_id, loss.item(),acc))
loss.backward()
opt.step()
opt.clear_grad()
epoch_loss += loss.item()
epoch_acc += acc
return epoch_loss / len(train_loader), epoch_acc / len(train_loader)
def test(epoch,model,val_loader,criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
metric = Accuracy()
for batch_id,(img, label) in enumerate(val_loader):
logits = model(img)
loss = criterion(logits, label)
acc = metric.update(metric.compute(logits, label))
if batch_id % 10 == 0:
logger.info("epoch: {}, batch_id: {}, val_loss: {}, val_acc: {}".format(epoch, batch_id, loss.item(),acc))
epoch_loss += loss.item()
epoch_acc += acc
return epoch_loss / len(val_loader), epoch_acc / len(val_loader)
if __name__ == '__main__':
#加载参数
cfg = config()
#加载数据
train_transforms,val_transforms = build_transform()
train_set = Cifar10(mode='train', transform=train_transforms,download=True)
test_set = Cifar10(mode='test', transform=val_transforms)
train_loader = paddle.io.DataLoader(train_set,batch_size=cfg.batchsize,num_workers=2,return_list=True)
val_loader = paddle.io.DataLoader(test_set,batch_size=cfg.batchsize)
#定义模型
net = WideResNet(depth=cfg.depth, widen_factor=cfg.widen_factor, dropout_rate=cfg.dropout,num_classes=10)
criterion = CrossEntropyLoss()
# 训练
best_acc = 0
use_gpu = True
paddle.set_device('gpu:0') if use_gpu else paddle.set_device('cpu')
for epoch in range(2):
start_time = time.time()
train_loss, train_acc = train(epoch,net,train_loader,criterion,cfg)
valid_loss, valid_acc = test(epoch,net,val_loader,criterion)
if best_acc < valid_acc:
best_acc = valid_acc
paddle.save(net.state_dict(), './checkpoint/best.pdparams')
logger.info(f'Epoch: {epoch:02}, Best Acc: {best_acc * 100:.2f}%')
|
983,031 | b5cb88b512434980c4eb2eeb4f7f5696e3582cf7 | # Jameel H. Khan
# Module 8 Assignment - LFSR
class LFSR:
# create an LFSR with initial state 'seed' and tap 'tap'
def __init__(self, seed: str, tap: int):
self.seed = seed
self.tap = tap
# return the number of bits in the LFSR
def length(long):
numOfBits = len(long.seed)
return numOfBits
# return the bit at index 'i'
def bit(mit, i: int):
return mit.seed[i]
# execute one LFSR iteration and return new (rightmost) bit as an int
def step(go):
lastBit = bin(int(go.seed[0]) ^ int(go.seed[-(go.tap)]))
newBit = lastBit[-1]
go.seed = go.seed[1:len(go.seed)] + newBit
return newBit
def __str__(self):
print(self.seed)
def main():
reg1 = LFSR("0110100111", 2)
reg2 = LFSR("0100110010", 8)
reg3 = LFSR("1001011101", 5)
reg4 = LFSR("0001001100", 1)
reg5 = LFSR("1010011101", 7)
regList = [reg1, reg2, reg3, reg4, reg5]
for i in range(len(regList)):
n = regList[i].step()
print(regList[i].seed + " " + n)
if __name__ == "__main__":
main()
|
983,032 | 238f7b10709995282208c2440cd29fc16c1798d3 | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from ui.views import HomepageView, IbanView
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'apiban.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', HomepageView.as_view(), name="homepage"),
url(r'^get-iban$', IbanView.as_view(), name="get_iban"),
url(r'^admin/', include(admin.site.urls)),
)+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
983,033 | 10c54eb9ce28b8bd15409f72a93fd30ffd5341e3 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-08 16:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("old_submit", "0003_externalsubmittoken")]
operations = [
migrations.AlterField(
model_name="submit",
name="filepath",
field=models.CharField(blank=True, max_length=512, verbose_name="s\xfabor"),
)
]
|
983,034 | d8eacfb703ab2bad97f82f16876809f34b31d896 | #
# Collective Knowledge (CK)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin
#
import sys
import os
##############################################################################
def load_json_file(i):
"""Load json from file into dict
Target audience: end users
Args:
json_file (str): name of a json file
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
dict (dict or list): dict or list from the json file
"""
import json
fn = i['json_file']
try:
if sys.version_info[0] > 2:
f = open(fn, 'r', encoding='utf8')
else:
f = open(fn, 'r')
except Exception as e:
return {'return': 16, 'error': 'problem opening json file='+fn+' ('+format(e)+')'}
try:
s = f.read()
except Exception as e:
f.close()
return {'return': 1, 'error': 'problem reading json file='+fn+' ('+format(e)+')'}
f.close()
try:
if sys.version_info[0] > 2:
d = json.loads(s)
else:
d = json.loads(s, encoding='utf8')
except Exception as e:
return {'return': 1, 'error': 'problem parsing json from file='+fn+' ('+format(e)+')'}
return {'return': 0, 'dict': d}
##############################################################################
def save_json_to_file(i):
"""Save dict to a json file
Target audience: end users
Args:
json_file (str): filename to save dictionary
dict (dict): dict to save
(sort_keys) (str): if 'yes', sort keys
(safe) (str): if 'yes', ignore non-JSON values (only for Debugging - changes original dict!)
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
"""
import json
import ck.strings
fn = i['json_file']
if i.get('safe', '') == 'yes':
d = i['dict']
sd = {}
# Check main unprintable keys
for k in d:
try:
json.dumps(d[k])
except Exception as e:
pass
else:
sd[k] = d[k]
i['dict'] = sd
r = ck.strings.dump_json(i)
if r['return'] > 0:
return r
s = r['string'].replace('\r', '')+'\n'
return save_text_file({'text_file': fn, 'string': s})
##############################################################################
def load_yaml_file(i):
"""Load YAML file to dict
Target audience: end users
Args:
yaml_file (str): name of a YAML file
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
dict (dict): dict from a YAML file
"""
import yaml
fn = i['yaml_file']
try:
if sys.version_info[0] > 2:
f = open(fn, 'r', encoding='utf8')
else:
f = open(fn, 'r')
except Exception as e:
return {'return': 16, 'error': 'problem opening YAML file='+fn+' ('+format(e)+')'}
try:
s = f.read()
except Exception as e:
f.close()
return {'return': 1, 'error': 'problem reading YAML file='+fn+' ('+format(e)+')'}
f.close()
try:
d = yaml.load(s, Loader=yaml.FullLoader)
except Exception as e:
return {'return': 1, 'error': 'problem parsing YAML from file='+fn+' ('+format(e)+')'}
return {'return': 0, 'dict': d}
##############################################################################
def save_yaml_to_file(i):
"""Save dict to a YAML file
Target audience: end users
Args:
yaml_file (str): name of a YAML file
dict (dict): dict to save
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
"""
import yaml
fn = i['yaml_file']
d = i['dict']
try:
# If using just dump and keys are in unicode,
# pyyaml adds warning and makes produced yaml unparsable
s = yaml.safe_dump(d)
except Exception as e:
return {'return': 1, 'error': 'problem converting dict to YAML ('+format(e)+')'}
return save_text_file({'text_file': fn, 'string': s})
##############################################################################
def load_text_file(i):
"""Load a text file to a string or list
Target audience: end users
Args:
text_file (str): name of a text file
(keep_as_bin) (str): if 'yes', return only bin
(encoding) (str): by default 'utf8', however sometimes we use utf16
(split_to_list) (str): if 'yes', split to list
(convert_to_dict) (str): if 'yes', split to list and convert to dict
(str_split) (str): if !='', use as separator of keys/values when converting to dict
(remove_quotes) (str): if 'yes', remove quotes from values when converting to dict
(delete_after_read) (str): if 'yes', delete file after read (useful when reading tmp files)
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
bin (byte): loaded text file as byte array
(string) (str): loaded text as string with removed \r
(lst) (list): if split_to_list=='yes', split text to list
(dict) (dict): if convert_to_dict=='yes', return as dict
"""
fn = i['text_file']
en = i.get('encoding', '')
if en == '' or en == None:
en = 'utf8'
try:
f = open(fn, 'rb')
except Exception as e:
return {'return': 16, 'error': 'problem opening text file='+fn+' ('+format(e)+')'}
try:
b = f.read()
except Exception as e:
f.close()
return {'return': 1, 'error': 'problem reading text file='+fn+' ('+format(e)+')'}
f.close()
r = {'return': 0, 'bin': b}
if i.get('delete_after_read', '') == 'yes':
import os
os.remove(fn)
if i.get('keep_as_bin', '') != 'yes':
try:
# decode into Python string (unicode in Python3)
s = b.decode(en).replace('\r', '')
except Exception as e:
return {'return': 1, 'error': 'problem decoding content from file "'+fn+'" ('+format(e)+')'}
r['string'] = s
cl = i.get('split_to_list', '')
cd = i.get('convert_to_dict', '')
if cl == 'yes' or cd == 'yes':
lst = s.split('\n')
r['lst'] = lst
if cd == 'yes':
dd = {}
ss = i.get('str_split', '')
rq = i.get('remove_quotes', '')
if ss == '':
ss = ':'
for q in lst:
qq = q.strip()
ix = qq.find(ss)
if ix > 0:
k = qq[0:ix].strip()
v = ''
if ix+1 < len(qq):
v = qq[ix+1:].strip()
if v != '' and rq == 'yes':
if v.startswith('"'):
v = v[1:]
if v.endswith('"'):
v = v[:-1]
dd[k] = v
r['dict'] = dd
return r
##############################################################################
def save_text_file(i):
"""Save string to a text file with all \r removed
Target audience: end users
Args:
text_file (str): name of a text file
string (str): string to write to a file (all \r will be removed)
(append) (str): if 'yes', append to a file
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
"""
fn = i['text_file']
s = i['string']
try:
s = s.replace('\r', '')
except Exception as e:
pass
try:
s = s.replace(b'\r', b'')
except Exception as e:
pass
m = 'w'
if i.get('append', '') == 'yes':
m = 'a'
try:
s = s.encode('utf8')
except Exception as e:
pass
try:
# if sys.version_info[0]>2:
# f=open(fn, m+'b')
# f.write(s)
# else:
f = open(fn, m+'b')
f.write(s)
except Exception as e:
return {'return': 1, 'error': 'problem writing text file='+fn+' ('+format(e)+')'}
f.close()
return {'return': 0}
|
983,035 | 90586f9bfd38936bd6ba995cb6890013f7413a75 | # Das war die Übung zu Datentypen
x = 3 * 4 + 1
print(x * len("Python") < 5)
print(3/2)
print(type(3/2))
print(type ("Hallo " + "Welt"))
print(type(3/3))
print("\n Ho " * 3 + "\n Das ist ja witzig :-) \n")
#Fragen Sie den/die Benutzer_in nach dem Namen und grüßen Sie mit
#»Hallo Benutzername!« (oder einer Grußformel Ihrer Wahl)
UserName = input("Bitte Namen eingeben: ")
NumberoffChoise = 0
NumberoffChoise = int(input ("Hallo " + UserName + ", bitte gib einen Integer ein, Werte Größer 10 brechen das Programm ab: "))
#TODO: richtigne Datentyp abfangen!
while NumberoffChoise < 10:
NumberoffChoise = int(input ("Bitte gib einen Integer ein:"))
def f(pNumberoffChoise):
return {
1: "\n Eins - Langweilig gib mir mehr! ",
2: "\n Zwei" * 2,
3: "\n Drei" * 3
}.get(pNumberoffChoise, "Ich kann nur bis 3 zählen.\n")
print (f(NumberoffChoise) + "\n. \n..\n..."+ "\nnochmal?")
Ausgabe = "\n Programmende erreicht. - Machs Gut " + UserName + "!" * len(UserName)
print(Ausgabe)
print ("-" * len(Ausgabe))
|
983,036 | 7eda4a7c426d45b7d7ccae404ce7ee663c0dd51f | import pandas as pd
import numpy as np
import features_compute
import linearRegression as lg
import myKmeans as mk
def feature_eng(data,label,time_range=600):
duration_list = list(label['Duration'])
den_G = features_compute.compute_density(data)
data['Den_G'] = den_G
data['Difference'] = data.DP1 - data.DP2
mean_features = pd.DataFrame()
for i in range(0, len(data), time_range):
newfeatures = pd.DataFrame({
'p_mean': [data.iloc[i:i + time_range, 1].mean()],
't_mean': [data.iloc[i:i + time_range, 4].mean()],
'DP1_mean': [data.iloc[i:i + time_range, 2].mean()],
'DP2_mean': [data.iloc[i:i + time_range, 3].mean()],
'DP1_std': [data.iloc[i:i + time_range, 2].std()],
'DP2_std': [data.iloc[i:i + time_range, 3].std()],
'WLR_mean': [data.iloc[i:i + time_range, 5].mean()],
'Den_G_mean': [data.iloc[i:i + time_range, 6].mean()],
'Diff_mean': [data.iloc[i:i + time_range, 7].mean()]})
mean_features = pd.concat([mean_features, newfeatures], ignore_index=True)
return mean_features,duration_list
def select_categories(label):
"""
根据样本标签的数量选择分类的类别数
"""
if len(label) < 10:
raise ValueError('Labels are not enough!')
elif (len(label) >= 10) and (len(label) < 20):
return 5
elif (len(label) >= 20) and (len(label) < 40):
return 8
elif (len(label) > 40) and (len(label) <= 60):
return 10
elif (len(label) > 60) and (len(label) <= 100):
return 15
else:
return 20
def classificationPointList(data,n):
cpl = [0]*(n)
for i in range(n):
cpl[i] = data.DP1_mean.min()+(data.DP1_mean.max()-data.DP1_mean.min())/n*(i+1)
return cpl
def myClassification(data,n,model='default'):
labels = []
if model=='default':
cpl = classificationPointList(data, n)
for i in range(len(data)):
for k in range(len(cpl)):
if data.DP1_mean[i]<=cpl[k]:
labels.append(k)
break
# if data.DP1_mean[i]>=cpl[-1]:
# labels.append(k+1)
data['Label'] = labels
return cpl
elif model=='kmeans':
dataset = np.array(data.iloc[:,2:4])
myCentroids, clustAssing = mk.KMeans(dataset,n)
data['Label'] = clustAssing[:,0]
return myCentroids
def compute_general_Q(p, t, dp1, dp2, std1, std2, den_g, model='single_or', beta=0.480769, A=0.000491):
"""
输入前后差压均值、标准差
返回该前差压对应的虚高流量值
model: 可选参数为'single_or','dual_or',分别代表使用单虚高模型与双虚高模型
"""
if model == 'single_or':
q_L = np.zeros(len(dp1), )
q_G = np.zeros(len(dp1), )
for i in range(len(dp1)):
if ((dp1[i] < 0.1) and (dp2[i] < 0.1)) and (std1[i] < 0.01 and std2[i] < 0.01):
q_L[i] = 0
q_G[i] = 0
elif dp1[i] <= 0:
q_L[i] = 0
q_G[i] = 0
else:
q_L[i] = (0.99 / (1 - beta ** 4) ** 0.5 * A * (2 / 880 * dp1[i] * 1000) ** 0.5 * 3600)
q_G[i] = (0.99 / (1 - beta ** 4) ** 0.5 * A * (2 / den_g[i] * dp1[i] * 1000) ** 0.5 * 3600) * 293 * (
p[i] + 0.1) / 0.1 / (t[i] + 273)
return q_L, q_G
elif model == 'dual_or':
q_L1 = np.zeros(len(dp1), )
q_G1 = np.zeros(len(dp1), )
q_L2 = np.zeros(len(dp1), )
q_G2 = np.zeros(len(dp1), )
for i in range(len(dp1)):
if ((dp1[i] < 0.1) and (dp2[i] < 0.1)) and (std1[i] < 0.01 and std2[i] < 0.01):
q_L1[i] = 0
q_G1[i] = 0
q_L2[i] = 0
q_G2[i] = 0
elif dp1[i] <= 0:
q_L1[i] = 0
q_G1[i] = 0
q_L2[i] = 0
q_G2[i] = 0
else:
q_L1[i] = (0.99 / (1 - beta ** 4) ** 0.5 * A * (2 / 880 * dp1[i] * 1000) ** 0.5 * 3600)
q_G1[i] = (0.99 / (1 - beta ** 4) ** 0.5 * A * (2 / den_g[i] * dp1[i] * 1000) ** 0.5 * 3600) * 293 * (
p[i] + 0.1) / 0.1 / (t[i] + 273)
q_L2[i] = (0.99 / (1 - beta ** 4) ** 0.5 * A * (2 / 880 * dp2[i] * 1000) ** 0.5 * 3600)
q_G2[i] = (0.99 / (1 - beta ** 4) ** 0.5 * A * (2 / den_g[i] * dp2[i] * 1000) ** 0.5 * 3600) * 293 * (
p[i] + 0.1) / 0.1 / (t[i] + 273)
return q_L1, q_L2, q_G1, q_G2
def fitting_liquid_data(features_per_min, K, counts, targets, model='single_or', learningRate=0.00005, alpha=1.0):
"""
输入features_per_min为标定时间段每分钟的特征值,K为对标定数据分类的类别数,counts为各个标定时间段时长(包含分钟数)的列表,targets为标定
时间段的真实流量标签,类型为array
输出为各类别的拟合权重系数Weights_or和各标定时间段按类别的累计流量矩阵regression_fit
"""
if model == 'single_or':
n = len(counts)
ql_train_gmodel_or = compute_general_Q(features_per_min.p_mean.values,
features_per_min.t_mean.values,
features_per_min.DP1_mean.values,
features_per_min.DP2_mean.values,
features_per_min.DP1_std.values,
features_per_min.DP2_std.values,
features_per_min.Den_G_mean.values,
model=model)[0]
ql_per_min = ql_train_gmodel_or
all_labels = features_per_min['Label'].values
regression_fit = np.zeros((n, K))
for i in range(n):
range_data = ql_per_min[:counts[i]]
labels = all_labels[:counts[i]]
ql_per_min = np.delete(ql_per_min, [j for j in range(counts[i])])
all_labels = np.delete(all_labels, [j for j in range(counts[i])])
for kk in range(len(range_data)):
for label in range(K):
if labels[kk] == label:
regression_fit[i][label] += range_data[kk]
regression_fit[i] = regression_fit[i] / counts[i]
Weights_or,loss = lg.liner_Regression \
(regression_fit, targets.reshape(n, 1), learningRate=learningRate, Loopnum=2000000, alpha=alpha)
return Weights_or, regression_fit,loss
# =============================================================================
# '''
# 使用双虚高模型计算
# '''
# =============================================================================
elif model == 'dual_or':
n = len(counts)
ql_train_gmodel_or1, ql_train_gmodel_or2 = compute_general_Q(features_per_min.p_mean.values,
features_per_min.t_mean.values,
features_per_min.DP1_mean.values,
features_per_min.DP2_mean.values,
features_per_min.DP1_std.values,
features_per_min.DP2_std.values,
features_per_min.Den_G_mean.values,
model=model)[0:2]
ql_per_min1, ql_per_min2 = ql_train_gmodel_or1, ql_train_gmodel_or2
all_labels = features_per_min['Label'].values
regression_fit = np.zeros((n, 2 * K))
for i in range(n):
range_data1 = ql_per_min1[:counts[i]]
range_data2 = ql_per_min2[:counts[i]]
labels = all_labels[:counts[i]]
ql_per_min1 = np.delete(ql_per_min1, [j for j in range(counts[i])])
ql_per_min2 = np.delete(ql_per_min2, [j for j in range(counts[i])])
all_labels = np.delete(all_labels, [j for j in range(counts[i])])
for kk in range(len(range_data1)):
for label in range(K):
if labels[kk] == label:
regression_fit[i][label] += range_data1[kk]
regression_fit[i][label + K] += range_data2[kk]
regression_fit[i] = regression_fit[i] / counts[i]
Weights_or,loss = lg.liner_Regression(regression_fit, targets.reshape(n, 1), learningRate=learningRate,
Loopnum=2000000, alpha=alpha)
return Weights_or, regression_fit,loss
def fitting_gas_data(features_per_min, K, counts, targets, model='single_or', learningRate=0.0000005, alpha=1.0):
"""
输入features_per_min为标定时间段每分钟的特征值,K为对标定数据分类的类别数,counts为各个标定时间段时长(包含分钟数)的列表,targets为标定
时间段的真实流量标签,类型为array
输出为各类别的拟合权重系数Weights_or和各标定时间段按类别的累计流量矩阵regression_fit
"""
if model == 'single_or':
n = len(counts)
qg_train_gmodel_or = compute_general_Q(features_per_min.p_mean.values,
features_per_min.t_mean.values,
features_per_min.DP1_mean.values,
features_per_min.DP2_mean.values,
features_per_min.DP1_std.values,
features_per_min.DP2_std.values,
features_per_min.Den_G_mean.values,
model=model)[1]
qg_per_min = qg_train_gmodel_or
all_labels = features_per_min['Label'].values
regression_fit = np.zeros((n, K))
for i in range(n):
range_data = qg_per_min[:counts[i]]
labels = all_labels[:counts[i]]
qg_per_min = np.delete(qg_per_min, [j for j in range(counts[i])])
all_labels = np.delete(all_labels, [j for j in range(counts[i])])
for kk in range(len(range_data)):
for label in range(K):
if labels[kk] == label:
regression_fit[i][label] += range_data[kk]
regression_fit[i] = regression_fit[i] / counts[i]
Weights_or,loss = lg.liner_Regression(regression_fit, targets.reshape(n, 1), learningRate=learningRate,
Loopnum=2000000, alpha=alpha)
return Weights_or, regression_fit,loss
elif model == 'dual_or':
n = len(counts)
qg_train_gmodel_or1, qg_train_gmodel_or2 = compute_general_Q(features_per_min.p_mean.values,
features_per_min.t_mean.values,
features_per_min.DP1_mean.values,
features_per_min.DP2_mean.values,
features_per_min.DP1_std.values,
features_per_min.DP2_std.values,
features_per_min.Den_G_mean.values,
model=model)[2:4]
qg_per_min1, qg_per_min2 = qg_train_gmodel_or1, qg_train_gmodel_or2
all_labels = features_per_min['Label'].values
regression_fit = np.zeros((n, 2 * K))
for i in range(n):
range_data1 = qg_per_min1[:counts[i]]
range_data2 = qg_per_min2[:counts[i]]
labels = all_labels[:counts[i]]
qg_per_min1 = np.delete(qg_per_min1, [j for j in range(counts[i])])
qg_per_min2 = np.delete(qg_per_min2, [j for j in range(counts[i])])
all_labels = np.delete(all_labels, [j for j in range(counts[i])])
for kk in range(len(range_data1)):
for label in range(K):
if labels[kk] == label:
regression_fit[i][label] += range_data1[kk]
regression_fit[i][label + K] += range_data2[kk]
regression_fit[i] = regression_fit[i] / counts[i]
Weights_or,loss = lg.liner_Regression(regression_fit, targets.reshape(n, 1), learningRate=learningRate,
Loopnum=2000000, alpha=alpha)
return Weights_or, regression_fit,loss
# def my_smooth(dst, span):
# """
# smooth函数python实现
# """
# src = dst.copy()
# if span <= 0:
# ex = Exception('输入非法区间值')
# raise ex
# # 如果输入的区间数为偶数,将区间值减一变为奇数
# if (span % 2 == 0):
# span -= 1
#
# if (span > len(dst)):
# ex = Exception('输入区间值大于列表长度')
# raise ex
#
# for i in range(len(dst)):
# r = int((span - 1) / 2)
#
# # 对两端元素不够区间窗口长度的,减小窗口半径
# while (i - r < 0 or i + r >= len(dst)):
# r -= 1
#
# src[i] = sum(dst[i - r:i + r + 1]) / (2 * r + 1)
# return src
# def compute_features(data):
# """
# data = pd.read_csv(filename,header=None,delimiter=' ',names=['P','DP1','DP2','Temp'],engine='python')
# 计算标定值时间范围内的特征值
# data为该段时间范围内的原始信号(命名规则:['P','DP1','DP2','Temp'])
# """
# den_G = features_compute.compute_density(data)
# data['Den_G'] = den_G
# data['Difference'] = data.DP1 - data.DP2
# # data['smooth'] = my_smooth(data.DP1, 10)
# # data['ratio'] = data.DP1 / data.DP2
# # data.drop(columns=['P', 'Temp'], inplace=True)
#
# DP1_p, DP1_f = features_compute.compute_freq_power(data.DP1)
# DP2_p, DP2_f = features_compute.compute_freq_power(data.DP2)
# Den_p, Den_f = features_compute.compute_freq_power(data.Den_G)
# Diff_p, Diff_f = features_compute.compute_freq_power(data.Difference)
# # print(DP1_p)
# # if 'features' not in locals():
# features = pd.DataFrame(
# {'p_mean': [data.P.mean()],
# 't_mean': [data.Temp.mean()],
# 'DP1_mean': [data.DP1.mean()],
# 'DP1_std': [data.DP1.std()],
# 'DP1_var': [data.DP1.var()],
# # 'DP1_zrcs': [features_compute.compute_zrcs(data.DP1)],
# # 'DP1_avgcs': [features_compute.compute_avgcs(data.DP1)],
# 'DP1_skew': [features_compute.compute_skew(data.DP1)],
# 'DP1_kurt': [features_compute.compute_kurt(data.DP1)],
# 'DP1_f1': [features_compute.compute_f1(DP1_p, DP1_f)],
# 'DP1_f2': [features_compute.compute_f2(DP1_p, DP1_f)],
# # 'DP1_E': [features_compute.compute_Entropy(DP1_p)],
# 'DP1_SF': [features_compute.compute_SF(DP1_p)],
# 'DP2_mean': [data.DP2.mean()],
# 'DP2_std': [data.DP2.std()],
# 'DP2_var': [data.DP2.var()],
# # 'DP2_zrcs': [features_compute.compute_zrcs(data.DP2)],
# # 'DP2_avgcs': [features_compute.compute_avgcs(data.DP2)],
# 'DP2_skew': [features_compute.compute_skew(data.DP2)],
# 'DP2_kurt': [features_compute.compute_kurt(data.DP2)],
# 'DP2_f1': [features_compute.compute_f1(DP2_p, DP2_f)],
# 'DP2_f2': [features_compute.compute_f2(DP2_p, DP2_f)],
# # 'DP2_E': [features_compute.compute_Entropy(DP2_p)],
# 'DP2_SF': [features_compute.compute_SF(DP2_p)],
# 'Den_mean': [data.Den_G.mean()],
# 'Den_std': [data.Den_G.std()],
# 'Den_var': [data.Den_G.var()],
# # 'Den_zrcs': [features_compute.compute_zrcs(data.Den_G)],
# 'Den_skew': [features_compute.compute_skew(data.Den_G)],
# 'Den_kurt': [features_compute.compute_kurt(data.Den_G)],
# 'Den_f1': [features_compute.compute_f1(Den_p, Den_f)],
# 'Den_f2': [features_compute.compute_f2(Den_p, Den_f)],
# # 'Den_E': [features_compute.compute_Entropy(Den_p)],
# 'Den_SF': [features_compute.compute_SF(Den_p)],
# 'Diff_mean': [data.Difference.mean()],
# 'Diff_std': [data.Difference.std()],
# 'Diff_var': [data.Difference.var()],
# # 'Diff_zrcs': [features_compute.compute_zrcs(data.Difference)],
# 'Diff_skew': [features_compute.compute_skew(data.Difference)],
# 'Diff_kurt': [features_compute.compute_kurt(data.Difference)],
# 'Diff_f1': [features_compute.compute_f1(Diff_p, Diff_f)],
# 'Diff_f2': [features_compute.compute_f2(Diff_p, Diff_f)],
# # 'Diff_E': [features_compute.compute_Entropy(Diff_p)],
# 'Diff_SF': [features_compute.compute_SF(Diff_p)],
# 'pulse': [abs(data.DP1 - data.smooth).mean()],
# 'ratio_mean': [data.ratio.mean()]})
#
# # else:
# # newfeature = pd.DataFrame(
# # {'DP1_mean': [data.DP1.mean()], 'DP1_std': [data.DP1.std()], 'DP1_var': [data.DP1.var()], 'DP1_zrcs': \
# # [features_compute.compute_zrcs(data.DP1)], 'DP1_avgcs': [features_compute.compute_avgcs(data.DP1)],
# # 'DP1_skew': [features_compute.compute_skew(data.DP1)],
# # 'DP1_kurt': [features_compute.compute_kurt(data.DP1)], \
# # 'DP1_f1': [features_compute.compute_f1(DP1_p, DP1_f)],
# # 'DP1_f2': [features_compute.compute_f2(DP1_p, DP1_f)], 'DP1_E': [features_compute.compute_Entropy(DP1_p)], \
# # 'DP1_SF': [features_compute.compute_SF(DP1_p)], 'DP2_mean': [data.DP2.mean()], 'DP2_std': [data.DP2.std()],
# # 'DP2_var': \
# # [data.DP2.var()], 'DP2_zrcs': [features_compute.compute_zrcs(data.DP2)],
# # 'DP2_avgcs': [features_compute.compute_avgcs(data.DP2)],
# # 'DP2_skew': [features_compute.compute_skew(data.DP2)], 'DP2_kurt': \
# # [features_compute.compute_kurt(data.DP2)], 'DP2_f1': [features_compute.compute_f1(DP2_p, DP2_f)],
# # 'DP2_f2': [features_compute.compute_f2(DP2_p, DP2_f)], 'DP2_E': \
# # [features_compute.compute_Entropy(DP2_p)], 'DP2_SF': [features_compute.compute_SF(DP2_p)],
# # 'Den_mean': [data.Den_G.mean()], 'Den_std': \
# # [data.Den_G.std()], 'Den_var': [data.Den_G.var()],
# # 'Den_zrcs': [features_compute.compute_zrcs(data.Den_G)], 'Den_skew': \
# # [features_compute.compute_skew(data.Den_G)], 'Den_kurt': [features_compute.compute_kurt(data.Den_G)],
# # 'Den_f1': [features_compute.compute_f1(Den_p, Den_f)], \
# # 'Den_f2': [features_compute.compute_f2(Den_p, Den_f)], 'Den_E': [features_compute.compute_Entropy(Den_p)],
# # 'Den_SF': [features_compute.compute_SF(Den_p)], \
# # 'Diff_mean': [data.Difference.mean()], 'Diff_std': [data.Difference.std()],
# # 'Diff_var': [data.Difference.var()], \
# # 'Diff_zrcs': [features_compute.compute_zrcs(data.Difference)],
# # 'Diff_skew': [features_compute.compute_skew(data.Difference)], 'Diff_kurt': \
# # [features_compute.compute_kurt(data.Difference)],
# # 'Diff_f1': [features_compute.compute_f1(Diff_p, Diff_f)],
# # 'Diff_f2': [features_compute.compute_f2(Diff_p, Diff_f)], \
# # 'Diff_E': [features_compute.compute_Entropy(Diff_p)], 'Diff_SF': [features_compute.compute_SF(Diff_p)], \
# # 'pulse': [abs(data.DP1 - data.smooth).mean()], 'ratio_mean': [data.ratio.mean()]})
# # features = features.append(newfeature, ignore_index=True)
# return features
#
#
# def compute_features_by_time(data_all, time_range=600):
# """
# data_all = pd.read_csv(filename,header=None,delimiter=' ',names=['P','DP1','DP2','Temp'],engine='python')
# 计算标定值时间范围内time_range时间段的平均特征值(默认为600帧,即一分钟)
# data_all为该段时间范围内的原始信号(命名规则:['P','DP1','DP2','Temp'])
# """
# # den_G = features_compute.compute_density(data_all)
# # data_all['Den_G'] = den_G
# # data_all['Difference'] = data_all.DP1 - data_all.DP2
# # data_all['smooth'] = my_smooth(data_all.DP1, 10)
# # data_all['ratio'] = data_all.DP1 / data_all.DP2
# # data_all.drop(columns=['P', 'Temp'], inplace=True)
# counts = len(data_all) // time_range
# for n in range(0, len(data_all) // time_range * time_range, time_range):
# data = data_all.iloc[n:n + time_range, :]
# DP1_p, DP1_f = features_compute.compute_freq_power(data.DP1)
# DP2_p, DP2_f = features_compute.compute_freq_power(data.DP2)
# Den_p, Den_f = features_compute.compute_freq_power(data.Den_G)
# Diff_p, Diff_f = features_compute.compute_freq_power(data.Difference)
# # print(DP1_p)
# if 'features_split' not in locals():
# features_split = pd.DataFrame(
# {'p_mean': [data.P.mean()],
# 't_mean': [data.Temp.mean()],
# 'DP1_mean': [data.DP1.mean()],
# 'DP1_std': [data.DP1.std()],
# 'DP1_var': [data.DP1.var()],
# # 'DP1_zrcs': [features_compute.compute_zrcs(data.DP1)],
# # 'DP1_avgcs': [features_compute.compute_avgcs(data.DP1)],
# 'DP1_skew': [features_compute.compute_skew(data.DP1)],
# 'DP1_kurt': [features_compute.compute_kurt(data.DP1)],
# 'DP1_f1': [features_compute.compute_f1(DP1_p, DP1_f)],
# 'DP1_f2': [features_compute.compute_f2(DP1_p, DP1_f)],
# # 'DP1_E': [features_compute.compute_Entropy(DP1_p)],
# 'DP1_SF': [features_compute.compute_SF(DP1_p)],
# 'DP2_mean': [data.DP2.mean()],
# 'DP2_std': [data.DP2.std()],
# 'DP2_var': [data.DP2.var()],
# # 'DP2_zrcs': [features_compute.compute_zrcs(data.DP2)],
# # 'DP2_avgcs': [features_compute.compute_avgcs(data.DP2)],
# 'DP2_skew': [features_compute.compute_skew(data.DP2)],
# 'DP2_kurt': [features_compute.compute_kurt(data.DP2)],
# 'DP2_f1': [features_compute.compute_f1(DP2_p, DP2_f)],
# 'DP2_f2': [features_compute.compute_f2(DP2_p, DP2_f)],
# # 'DP2_E': [features_compute.compute_Entropy(DP2_p)],
# 'DP2_SF': [features_compute.compute_SF(DP2_p)],
# 'Den_mean': [data.Den_G.mean()],
# 'Den_std': [data.Den_G.std()],
# 'Den_var': [data.Den_G.var()],
# # 'Den_zrcs': [features_compute.compute_zrcs(data.Den_G)],
# 'Den_skew': [features_compute.compute_skew(data.Den_G)],
# 'Den_kurt': [features_compute.compute_kurt(data.Den_G)],
# 'Den_f1': [features_compute.compute_f1(Den_p, Den_f)],
# 'Den_f2': [features_compute.compute_f2(Den_p, Den_f)],
# # 'Den_E': [features_compute.compute_Entropy(Den_p)],
# 'Den_SF': [features_compute.compute_SF(Den_p)],
# 'Diff_mean': [data.Difference.mean()],
# 'Diff_std': [data.Difference.std()],
# 'Diff_var': [data.Difference.var()],
# # 'Diff_zrcs': [features_compute.compute_zrcs(data.Difference)],
# 'Diff_skew': [features_compute.compute_skew(data.Difference)],
# 'Diff_kurt': [features_compute.compute_kurt(data.Difference)],
# 'Diff_f1': [features_compute.compute_f1(Diff_p, Diff_f)],
# 'Diff_f2': [features_compute.compute_f2(Diff_p, Diff_f)],
# # 'Diff_E': [features_compute.compute_Entropy(Diff_p)],
# 'Diff_SF': [features_compute.compute_SF(Diff_p)],
# 'pulse': [abs(data.DP1 - data.smooth).mean()],
# 'ratio_mean': [data.ratio.mean()]})
# else:
# newfeature = pd.DataFrame(
# {'p_mean': [data.P.mean()],
# 't_mean': [data.Temp.mean()],
# 'DP1_mean': [data.DP1.mean()],
# 'DP2_mean': [data.DP2.mean()],
# 'Den_mean': [data.Den_G.mean()],
# 'Diff_mean': [data.Difference.mean()],
# 'Diff_std': [data.Difference.std()],
# 'Diff_var': [data.Difference.var()],
# # 'Diff_zrcs': [features_compute.compute_zrcs(data.Difference)],
# 'Diff_skew': [features_compute.compute_skew(data.Difference)],
# 'Diff_kurt': [features_compute.compute_kurt(data.Difference)],
# 'Diff_f1': [features_compute.compute_f1(Diff_p, Diff_f)],
# 'Diff_f2': [features_compute.compute_f2(Diff_p, Diff_f)],
# # 'Diff_E': [features_compute.compute_Entropy(Diff_p)],
# 'Diff_SF': [features_compute.compute_SF(Diff_p)],
# 'pulse': [abs(data.DP1 - data.smooth).mean()],
# 'ratio_mean': [data.ratio.mean()]})
# features_split = features_split.append(newfeature, ignore_index=True)
# return features_split, counts
# def feature_extraction(data):
# """
# 输入标定时间段原始信号,输出分别为标定时间段特征值与标定时间段每分钟特征值
# """
# features_all = compute_features(data)
# features_per_min = compute_features_by_time(data)
# return features_all, features_per_min
# def my_feature_selection(features_all, targets, feature_num=2):
# """
# 输入标定时间段的特征矩阵、标定值以及希望提取的相关特征个数n(默认为2)
# 返回一个长度为n的列表,列表中的元素为相关特征的索引值
# """
# from sklearn.feature_selection import f_regression, SelectKBest
# from sklearn.preprocessing import StandardScaler
# if feature_num == 'all':
# index_ = [i for i in range(len(features_all.columns))]
# return index_
# else:
# index_ = []
# scaler = StandardScaler()
# selector = SelectKBest(f_regression, feature_num)
# features_sca = scaler.fit_transform(features_all)
# selected_feature = selector.fit_transform(features_sca, targets)
# for i in range(feature_num):
# index = list(features_sca[0]).index(selected_feature[0][i])
# index_.append(index)
# return index_
|
983,037 | cd74e00240cab11586ee836af0931b1b03952293 | import scipy.stats as sta
import matplotlib.pyplot as plt
X = sta.norm(loc=950,scale=20)
plt.hist(X.rvs(size=100),color='yellowgreen')
plt.show()
|
983,038 | ed74b4549951ee6696508bd7deb5811306f33f08 | def difference(self, other, match='line', path=None, replace=None):
"Perform a config diff against the another network config\n\n :param other: instance of NetworkConfig to diff against\n :param match: type of diff to perform. valid values are 'line',\n 'strict', 'exact'\n :param path: context in the network config to filter the diff\n :param replace: the method used to generate the replacement lines.\n valid values are 'block', 'line'\n\n :returns: a string of lines that are different\n "
if (path and (match != 'line')):
try:
other = other.get_block(path)
except ValueError:
other = list()
else:
other = other.items
meth = getattr(self, ('_diff_%s' % match))
updates = meth(other)
if (replace == 'block'):
parents = list()
for item in updates:
if (not item.has_parents):
parents.append(item)
else:
for p in item._parents:
if (p not in parents):
parents.append(p)
updates = list()
for item in parents:
updates.extend(self._expand_block(item))
visited = set()
expanded = list()
for item in updates:
for p in item._parents:
if (p.line not in visited):
visited.add(p.line)
expanded.append(p)
expanded.append(item)
visited.add(item.line)
return expanded |
983,039 | 5230aa3a2c3b2b2b53719b1b56d033bc8896f176 | def make_adder(n):
def add(x):
return x + n
return add
if __name__ == '__main__':
plus_3 = make_adder(3)
plus_5 = make_adder(5)
print(plus_3(3))
print(plus_5(7))
|
983,040 | 43a0680786318d034d1c2814e0f6a461d98e5fb6 | from common.okfpgaservers.pulser.pulse_sequences.pulse_sequence import pulse_sequence
class back_ramp_U2(pulse_sequence):
def configuration(self):
config = [
('DACcontrol','dac_pulse_length'),
('DACcontrol','num_steps'),
('DACcontrol','time_up'),
]
return config
def sequence(self):
self.end = self.start + self.p.dac_pulse_length
# N TTL pulses
index = 1.0
while index <= self.p.num_steps:
self.ttl_pulses.append(('adv', self.start+self.p.time_up * (index-1), self.p.dac_pulse_length))
index = index + 1
|
983,041 | fca5f7ee067aa854aaabff9f69b964a9f37d0c15 | # 实现PCA分析和法向量计算,并加载数据集中的文件进行验证
import open3d as o3d
import os
import numpy as np
from pyntcloud import PyntCloud
# 功能:计算PCA的函数
# 输入:
# data:点云,NX3的矩阵
# correlation:区分np的cov和corrcoef,不输入时默认为False
# sort: 特征值排序,排序是为了其他功能方便使用,不输入时默认为True
# 输出:
# eigenvalues:特征值
# eigenvectors:特征向量
def PCA(data, correlation=False, sort=True):
# 作业1
# 屏蔽开始
m = np.zeros([3,3], dtype=np.float)
if correlation:
m = np.corrcoef(data, rowvar=False)
else:
m = np.cov(data, rowvar=False)
eigenvalues, eigenvectors = np.linalg.eig(m)
# 屏蔽结束
if sort:
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def main():
# 指定点云路径
# cat_index = 10 # 物体编号,范围是0-39,即对应数据集中40个物体
# root_dir = '/Users/renqian/cloud_lesson/ModelNet40/ply_data_points' # 数据集路径
# cat = os.listdir(root_dir)
# filename = os.path.join(root_dir, cat[cat_index],'train', cat[cat_index]+'_0001.ply') # 默认使用第一个点云
# 加载原始点云
point_cloud_pynt = PyntCloud.from_file("/media/wegatron/data/data/ModelNet/data/airplane_0223.off")
point_cloud_o3d = point_cloud_pynt.to_instance("open3d", mesh=False)
# o3d.visualization.draw_geometries([point_cloud_o3d]) # 显示原始点云
# 从点云中获取点,只对点进行处理
points = point_cloud_pynt.points.values
print('total points number is:', len(points))
# 用PCA分析点云主方向
w, v = PCA(points)
point_cloud_vector = v[:, 0] #点云主方向对应的向量
print('the main orientation of this pointcloud is: ', point_cloud_vector)
# o3d.visualization.draw_geometries([point_cloud_o3d])
# 循环计算每个点的法向量
pcd_tree = o3d.geometry.KDTreeFlann(point_cloud_o3d)
normals = []
# 作业2
# 屏蔽开始
for i in range(0, len(points)):
inds = pcd_tree.search_knn_vector_3d(points[i], 10)[1]
neighbor_pts = points[inds]
evals, evecs = PCA(neighbor_pts)
normals.append(evecs[:, 0])
# 屏蔽结束
normals = np.array(normals, dtype=np.float64)
# TODO: 此处把法向量存放在了normals中
point_cloud_o3d.normals = o3d.utility.Vector3dVector(normals)
o3d.visualization.draw_geometries([point_cloud_o3d])
if __name__ == '__main__':
main() |
983,042 | 2243eb5d63c9b19e16394932787025896220326f | from setuptools import setup, find_namespace_packages
setup(
name='bristolhackspace.flask_theme',
packages=find_namespace_packages(include=['bristolhackspace.*']),
include_package_data=True,
zip_safe=False,
install_requires=[
"flask>=2.0",
]
) |
983,043 | 0fc2b41aede2ab05d9176e907d983cba55a8128e | from Card_class import Card
import random
class CardDeck:
"""
a class representing a deck of 52 cards. each card will be different.
their are 4 suits and 13 cards of each suit
"""
def __init__(self):
"""
initialize a shuffled deck of 52 cards of 4 suits (D, S, H, C - from strongest to weakest)
at the end of the init the params will be:
self.__suit_dict will be a shuffles list of 52 different cards
self.__suit_dict will always be a list of dicts representing the suits exist
"""
self.__suit_dict = [{"Diamonds": 1}, {"Spades": 2}, {"Harts": 3}, {"Clubs": 4}]
self.cards_list = []
for suit in self.__suit_dict:
for value in range(1, 14):
self.cards_list.append(Card(suit, value))
self.Shuffle()
# def __str__(self):
# """
# simple str method using the Card class __repr__ method
# :return:
# """
# return f"the Deck: {self.cards_list}"
def Shuffle(self):
"""
Shuffle the cards in the deck (self._card_deck) used in the __init__ method
:return:
"""
random.shuffle(self.cards_list)
def deal_one(self):
"""
return a random card from the cards_list and delete him from the cards_list
:return: card from cards_list
"""
rand_card = random.choice(self.cards_list)
self.cards_list.remove(rand_card)
return rand_card
def show(self):
"""
shows all the cards in the cards_list
"""
for card in self.cards_list:
print(card)
|
983,044 | 559ca4923665131246c9933e5ec49656b8e0ed6e | #Chetan Velonde 3019155
#Python program to make a simple calculator
def calculate(a,b,i):
if i == 1:
c = a + b
print(str(a) + " + " + str(b) + " = " + str(c))
if i == 2:
c = a - b
print(str(a) + " - " + str(b) + " = " + str(c))
if i == 3:
c = a*b
print(str(a) + " * " + str(b) + " = " + str(c))
if i == 4:
c = a/b
print(str(a) + " / " + str(b) + " = " + str(c))
if i == 5:
print("The value of base and power is " + str(a) + " and " + str(b) + " respectively")
c = a**b
print(str(a) + "^" + str(b) + " = " + str(c))
print("The operations available for calculation are: ")
print(" 1. Addition\n 2. Subtraction\n 3. Multiplication\n 4. Division\n 5. Power")
i = int(input("Enter the value corresponding to the operation: "))
a = int(input("Enter the first value: "))
b = int(input("Enter the second value: "))
calculate(a,b,i)
|
983,045 | 1f92757f4f6bc0e0b57884673b63f1b2f6ada530 | Python 3.7.5 (tags/v3.7.5:5c02a39a0b, Oct 15 2019, 00:11:34) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> 2 + 3
5
>>> 9 - 8
1
>>> 4 * 6
24
>>> 8 / 4
2.0
>>> 5/2
2.5
>>> 5 //2
2
>>> 8+9-10
7
>>> 8+9-
SyntaxError: invalid syntax
>>> 8+2*3
14
>>> (8+2)*3
30
>>> 2*2*2
8
>>> 2**3
8
>>> 10//3
3
>>> 10 %3
1
>>> 'navin'
'navin'
>>> print("navin")
navin
>>> print('sandhya's laptop')
SyntaxError: invalid syntax
>>> print("sandhya's laptop")
sandhya's laptop
>>> print('sandhya "laptop"')
sandhya "laptop"
>>>
>>>
>>>
>>>
>>>
>>>
>>> print('navin's "laptop"')
SyntaxError: invalid syntax
>>> print('navin\'s "laptop"')
navin's "laptop"
>>> 'navin' * 3
'navinnavinnavin'
>>> 'navin\n' * 3
'navin\nnavin\nnavin\n'
>>> 'navin'\n * 3
SyntaxError: unexpected character after line continuation character
>>> 'navin' + 'navin'
'navinnavin'
>>> 10* 'navin'
'navinnavinnavinnavinnavinnavinnavinnavinnavinnavin'
>>> print('c:\docos\navin')
c:\docos
avin
>>> 'navin \n' * 3
'navin \nnavin \nnavin \n'
>>> print('c:\docos\navin')
c:\docos
avin
>>> print(r'c:\docos\navin')
c:\docos\navin
>>> x=2
>>> x+3
5
>>> y=3
>>> x+y
5
>>> x=9
>>> x+y
12
>>> x
9
>>> abc
Traceback (most recent call last):
File "<pyshell#43>", line 1, in <module>
abc
NameError: name 'abc' is not defined
>>> x+10
19
>>> 19+y
22
>>> _+y
25
>>> __+x
Traceback (most recent call last):
File "<pyshell#47>", line 1, in <module>
__+x
NameError: name '__' is not defined
>>> name="youtube"
>>> name
'youtube'
>>> name + 'rocks'
'youtuberocks'
>>> name + ' rocks'
'youtube rocks'
>>> name 'rocks'
SyntaxError: invalid syntax
>>> name[0]
'y'
>>> name[6]
'e'
>>> name[8]
Traceback (most recent call last):
File "<pyshell#55>", line 1, in <module>
name[8]
IndexError: string index out of range
>>> name[-1]
'e'
>>> name[-2]
'b'
>>> name[-7]
'y'
>>> name[0:2]
'yo'
>>> name[1:4]
'out'
>>> name[1:]
'outube'
>>> name[:4]
'yout'
>>> name[3:10]
'tube'
>>> name[0:3]='my'
Traceback (most recent call last):
File "<pyshell#64>", line 1, in <module>
name[0:3]='my'
TypeError: 'str' object does not support item assignment
>>> name[0]
'y'
>>> name[0]='r'
Traceback (most recent call last):
File "<pyshell#66>", line 1, in <module>
name[0]='r'
TypeError: 'str' object does not support item assignment
>>> 'my' =name[3]
SyntaxError: can't assign to literal
>>> 'my' +name[3:]
'mytube'
>>> my='Sandhya Rani'
>>> len(my)
12
>>> print(r,'Telusko \n Rocks')
Traceback (most recent call last):
File "<pyshell#71>", line 1, in <module>
print(r,'Telusko \n Rocks')
NameError: name 'r' is not defined
>>> print(r'Telusko\n Rocks')
Telusko\n Rocks
>>> print(r'Telusko \n Rocks')
Telusko \n Rocks
>>> nums=[25,12,95,14,36]
>>> nums
[25, 12, 95, 14, 36]
>>> num[0]
Traceback (most recent call last):
File "<pyshell#76>", line 1, in <module>
num[0]
NameError: name 'num' is not defined
>>> nums[0]
25
>>> nums[4]
36
>>> nums[2:]
[95, 14, 36]
>>> nums[-1]
36
>>> nums[5]
Traceback (most recent call last):
File "<pyshell#81>", line 1, in <module>
nums[5]
IndexError: list index out of range
>>> nums[-5]
25
>>> names=['navin','kiran','john']
>>> names
['navin', 'kiran', 'john']
>>> values=[9.5,'navin',25]
>>> values
[9.5, 'navin', 25]
>>> mil=[nums,names]
|
983,046 | 8f40fc690decdd7d94dda269aef34364def1e335 | import json
fich=open("/home/franhidalgo/Documentos/LM/Json/asociaciones.txt","r")
asociaciones=json.load(fich)
fich.close()
for a in asociaciones ["directorios"]["directorio"]:
print a["nombre"]["content"]
asociacion=raw_input("Mete una Asociacion: ")
for pre in asociaciones:
if pre["nombre"]["content"]==asociacion:
print pre["descripcion"]["content"]
|
983,047 | 683ef409e843b6f62c851d26004f8a3a1fee1d97 | from Tkinter import *
from tkFont import Font
from cStringIO import StringIO
from khronos.utils import Namespace
class StatusViewer(LabelFrame):
def __init__(self, master, title="Status", width=80, height=10, fontsize=12):
LabelFrame.__init__(self, master, text=title)
self.sim = None
self.build(width, height, fontsize)
self.layout()
def build(self, width, height, fontsize):
w = self.widgets = Namespace()
w.text = Text(self, width=width, height=height, state=DISABLED, wrap=NONE,
undo=False, font=Font(family="Courier New", size=fontsize))
w.xscroll = Scrollbar(self, orient=HORIZONTAL, command=w.text.xview)
w.yscroll = Scrollbar(self, orient=VERTICAL, command=w.text.yview)
w.text.configure(xscrollcommand=w.xscroll.set, yscrollcommand=w.yscroll.set)
def layout(self):
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
w = self.widgets
w.text.grid(row=0, column=0, stick=N+S+E+W)
w.yscroll.grid(row=0, column=1, sticky=N+S)
w.xscroll.grid(row=1, column=0, sticky=E+W)
def clear(self):
text = self.widgets.text
text.configure(state=NORMAL)
text.delete("0.0", END)
text.configure(state=DISABLED)
def setup_listeners(self, sigmanager):
sigmanager.add_listener("set_sim", self.set_sim)
sigmanager.add_listener("del_sim", self.del_sim)
sigmanager.add_listener("sim_start", self.sim_update)
sigmanager.add_listener("sim_stop", self.sim_update)
sigmanager.add_listener("sim_update", self.sim_update)
def set_sim(self, sim):
self.sim = sim
def del_sim(self):
self.clear()
self.sim = None
def sim_update(self):
string = StringIO()
self.sim.tree_status(out=string)
text = self.widgets.text
text.configure(state=NORMAL)
text.delete("0.0", END)
text.insert(END, string.getvalue())
text.configure(state=DISABLED)
|
983,048 | 8ed9ace87fe061ff596a042290539ff05ffccf55 | #quick sort iterative approach
from collections import deque
def partition(arr, start, end):
pivot = arr[end]
pIndex = start
for i in range(start, end):
if arr[i] <= pivot:
arr[i], arr[pIndex] = arr[pIndex], arr[i]
pIndex += 1
arr[pIndex], arr[end] = arr[end], arr[pIndex]
return pIndex
def quick_sort_iterative(arr, start, end):
stack = deque()
stack.append((start,end))
while(stack):
start, end = stack.pop()
pIndex = partition(arr,start,end)
if pIndex - 1 > start:
stack.append((start,pIndex-1))
if pIndex + 1 < end:
stack.append((pIndex + 1, end))
arr = [2,3,1,4,-8]
quick_sort_iterative(arr,0,len(arr)-1)
print(arr)
|
983,049 | 47b7f9a9d8fac2cfb0cbf566b47c421b0dc5b4ed | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class User(models.Model):
license_id = models.IntegerField
name = models.CharField(max_length=200)
class Spot(models.Model):
spot_id = models.IntegerField
price = models.DecimalField(max_digits=6,decimal_places=2)
class Transaction(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
spot = models.ForeignKey(Spot, on_delete=models.CASCADE)
trans_id = models.IntegerField
trans_date = models.DateTimeField('date of transaction')
pm = models.CharField(max_length=200)
amount = models.DecimalField(max_digits=6,decimal_places=2)
|
983,050 | 4f12ed6c1d49853ccabf263c8514662e8b6ea0d1 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import auto_triage
from dashboard import testing_common
from dashboard import utils
from dashboard.models import anomaly
from dashboard.models import anomaly_config
from dashboard.models import bug_data
from dashboard.models import graph_data
from dashboard.models import sheriff
@mock.patch.object(utils, 'TickMonitoringCustomMetric', mock.MagicMock())
class AutoTriageTest(testing_common.TestCase):
def setUp(self):
super(AutoTriageTest, self).setUp()
app = webapp2.WSGIApplication(
[('/auto_triage', auto_triage.AutoTriageHandler)])
self.testapp = webtest.TestApp(app)
def _AddTestData(self, test_name, rows, sheriff_key,
improvement_direction=anomaly.UNKNOWN):
"""Adds a sample Test and associated data and returns the Test."""
testing_common.AddTests(
['ChromiumGPU'],
['linux-release'], {
'scrolling_benchmark': {
test_name: {},
},
})
test = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/' + test_name).get()
test.improvement_direction = improvement_direction
test_container_key = utils.GetTestContainerKey(test.key)
sheriff_key = sheriff_key.get()
if sheriff_key.patterns:
sheriff_key.patterns.append(test.test_path)
else:
sheriff_key.patterns = [test.test_path]
sheriff_key.put()
for i, val in enumerate(rows):
graph_data.Row(id=(i+1), value=val, parent=test_container_key).put()
# Add test config.
overridden_config = {
'min_relative_change': 0.1,
'min_absolute_change': 10.0
}
anomaly_config.AnomalyConfig(
id='config_' + test_name, config=overridden_config,
patterns=[test.test_path]).put()
test.put()
return test
def _AddAnomalyForTest(
self, median_before_anomaly, std_dev_before_anomaly, sheriff_key,
bug_id, test_key):
"""Adds an Anomaly to the given Test with the given properties.
Args:
median_before_anomaly: Median value of segment before alert.
std_dev_before_anomaly: Std. dev. for segment before alert.
sheriff_key: Sheriff associated with the Anomaly.
bug_id: Bug ID associated with the Anomaly.
test_key: Test to associate the Anomaly with.
Returns:
The ndb.Key for the Anomaly that was put.
"""
if bug_id > 0:
bug = ndb.Key('Bug', int(bug_id)).get()
if not bug:
bug_data.Bug(id=bug_id).put()
return anomaly.Anomaly(
start_revision=4,
end_revision=4,
test=test_key,
median_before_anomaly=median_before_anomaly,
segment_size_after=3,
window_end_revision=6,
std_dev_before_anomaly=std_dev_before_anomaly,
bug_id=bug_id,
sheriff=sheriff_key).put()
def testAnomalyRecovery_AbsoluteCheck(self):
sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
abs_not_recovered = [990, 1000, 1010, 1010, 1010, 1010, 1000, 1010, 1020]
t1 = self._AddTestData('t1', abs_not_recovered, sheriff_key)
self._AddAnomalyForTest(1000, 10, sheriff_key, None, t1.key)
abs_recovered = [990, 1000, 1010, 1010, 1010, 1010, 995, 1005, 1015]
t2 = self._AddTestData('t2', abs_recovered, sheriff_key)
self._AddAnomalyForTest(1000, 10, sheriff_key, None, t2.key)
self.testapp.post('/auto_triage')
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(2, len(anomalies))
self.assertEqual(t1.key, anomalies[0].test)
self.assertEqual(t2.key, anomalies[1].test)
self.assertFalse(anomalies[0].recovered)
self.assertTrue(anomalies[1].recovered)
def testAnomalyRecovery_RelativeCheck(self):
sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
rel_not_recovered = [49, 50, 51, 55, 55, 55, 44, 55, 56]
t1 = self._AddTestData('t1', rel_not_recovered, sheriff_key)
self._AddAnomalyForTest(50, 10, sheriff_key, None, t1.key)
rel_recovered = [40, 50, 60, 60, 60, 60, 44, 54, 64]
t2 = self._AddTestData('t2', rel_recovered, sheriff_key)
self._AddAnomalyForTest(50, 10, sheriff_key, None, t2.key)
self.testapp.post('/auto_triage')
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(2, len(anomalies))
self.assertEqual(t1.key, anomalies[0].test)
self.assertEqual(t2.key, anomalies[1].test)
self.assertFalse(anomalies[0].recovered)
self.assertTrue(anomalies[1].recovered)
def testAnomalyRecovery_StdDevCheck(self):
sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
std_not_recovered = [990, 1000, 1010, 1010, 1010, 1010, 1010, 1020, 1030]
test = self._AddTestData('t1', std_not_recovered, sheriff_key)
self._AddAnomalyForTest(1000, 10, sheriff_key, None, test.key)
self.testapp.post('/auto_triage')
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(anomalies))
self.assertFalse(anomalies[0].recovered)
def testAnomalyRecovery_ImprovementCheck(self):
sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
improvements = [990, 1000, 1010, 1010, 1010, 1010, 890, 900, 910]
test = self._AddTestData('t1', improvements, sheriff_key, anomaly.DOWN)
self._AddAnomalyForTest(1000, 10, sheriff_key, None, test.key)
self.testapp.post('/auto_triage')
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(anomalies))
self.assertTrue(anomalies[0].recovered)
def testAnomalyRecover_IgnoredCheck(self):
sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
recovered = [990, 1000, 1010, 1010, 1010, 1010, 990, 1000, 1010]
test = self._AddTestData('t1', recovered, sheriff_key)
self._AddAnomalyForTest(1000, 10, sheriff_key, -1, test.key)
self.testapp.post('/auto_triage')
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(anomalies))
self.assertFalse(anomalies[0].recovered)
@mock.patch.object(
auto_triage.rietveld_service, 'Credentials', mock.MagicMock())
@mock.patch.object(
auto_triage.issue_tracker_service.IssueTrackerService, 'AddBugComment')
def testPost_AllAnomaliesRecovered_AddsComment(self, add_bug_comment_mock):
sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
recovered = [990, 1000, 1010, 1010, 1010, 1010, 990, 1000, 1010]
t1 = self._AddTestData('t1', recovered, sheriff_key)
self._AddAnomalyForTest(1000, 10, sheriff_key, 1234, t1.key)
abs_recovered = [990, 1000, 1010, 1010, 1010, 1010, 995, 1005, 1015]
t2 = self._AddTestData('t2', abs_recovered, sheriff_key)
self._AddAnomalyForTest(1000, 10, sheriff_key, 1234, t2.key)
self.testapp.post('/auto_triage')
self.ExecuteTaskQueueTasks('/auto_triage', auto_triage._TASK_QUEUE_NAME)
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(2, len(anomalies))
self.assertTrue(anomalies[0].recovered)
self.assertTrue(anomalies[1].recovered)
add_bug_comment_mock.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(auto_triage.TriageBugs, '_CommentOnRecoveredBug')
def testPost_BugHasNoAlerts_NotMarkRecovered(self, close_recovered_bug_mock):
bug_id = 1234
bug_data.Bug(id=bug_id).put()
self.testapp.post('/auto_triage')
self.ExecuteTaskQueueTasks('/auto_triage', auto_triage._TASK_QUEUE_NAME)
bug = ndb.Key('Bug', bug_id).get()
self.assertEqual(bug_data.BUG_STATUS_CLOSED, bug.status)
self.assertFalse(close_recovered_bug_mock.called)
if __name__ == '__main__':
unittest.main()
|
983,051 | 9868c31f67253da39c68381c0b32c7d4824e3602 | def pyramid(level,sign,is_reversed=False):
if not (is_reversed) and level>1 and len(sign)==1:
row = 1
while(row<=level):
print(" "*(level-row),end="")
print(sign*(row*2-1),end="")
row+=1
print()
elif(is_reversed) and level>1 and len(sign)==1:
row = 0
full = level
i = 1
while(row<level):
print(" "*row,end="")
print(sign*(level*2-i),end="")
i+=2
row+=1
print()
else:
print("Invalid parameters") |
983,052 | 62469310d81d31d05e6f298139bd3aef73d11487 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-21 01:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0010_auto_20170320_1752'),
]
operations = [
migrations.AddField(
model_name='homepagefeature',
name='slug',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
]
|
983,053 | 4c8c12a1e98737f8dd67ef5ec28cf68fcb23e3f5 | # -*- coding: utf-8 -*-
import os
import re
import socket
import subprocess
from libqtile.config import KeyChord, Key, Screen, Group, Drag, Click, Match
from libqtile.command import lazy
from libqtile import layout, bar, widget, hook
from libqtile import qtile
from libqtile.lazy import lazy
from libqtile.log_utils import logger
from typing import List # noqa: F401
# from custom_popups import Confirm, ShowGroupName
def focus_master(qtile):
"""Focus on window in the Master position, if focus is already there, move
focus to the next position."""
grp = qtile.current_group
if grp.layout.clients.current_index > 0:
c = grp.layout.clients.focus_first()
grp.focus(c, True)
elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:
grp.layout.cmd_down()
def swap_master(qtile):
"""Swap focused window to Master position. If focus is on Master, swap it
with the next window, placing focus on the new Master."""
grp = qtile.current_group
if grp.layout.clients.current_index > 0:
grp.layout.cmd_swap_main()
elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:
grp.layout.cmd_shuffle_down()
c = grp.layout.clients.focus_first()
grp.focus(c, True)
def float_to_front(qtile):
"""Bring all floating windows of the group to front."""
for window in qtile.current_group.windows:
if window.floating:
window.cmd_bring_to_front()
def sink_floats(qtile):
"""Bring all floating windows of the group to front."""
for window in qtile.current_group.windows:
if window.floating:
window.toggle_floating()
def load_randr_layout(name):
cmd = "sh /home/geoff/.screenlayout/%s.sh" % name
def load(qtile):
qtile.cmd_spawn(cmd)
# qtile.call_later(0.075, qtile.cmd_restart)
qtile.call_later(0.075, lazy.restart)
return load
def grab_cursor(qtile):
current_win = qtile.current_group.layout.clients.current_client
x, y = current_win.cmd_get_position()
w, h = current_win.cmd_get_size()
qtile.cmd_spawn("xdotool mousemove %i %i" % (x + w / 2, y + h / 2))
# globals (flags, and placeholder Nones)
class Flags:
def __init__(self):
self.restarting = True
def get_restarting(self):
return self.restarting
# TODO: rename Flags to Globals and put group_shower and confirm_exit in?
# Would do away with using the global keyword...
flags = Flags()
group_shower = None
confirm_exit = None
# HACK: This seems to be working as a fix for the failed `qtile is not None`
# assertion in the Popup class that I was getting when passing in the global qtile
# object at the top-level. It seems that the Popup objects were being instantiated
# before qtile was given a value.
# @hook.subscribe.startup_complete
# def instantiate_popups():
# global group_shower, confirm_exit
#
# group_shower = ShowGroupName(
# qtile,
# flags.get_restarting,
# font="FiraCode",
# font_size=80,
# x_incr=50,
# fmt="[{}]",
# height=125,
# horizontal_padding=25,
# vertical_padding=15,
# background="#292d3e",
# foreground="#d0d0d0",
# )
# confirm_exit = Confirm(
# qtile,
# "exit",
# qtile.cmd_shutdown,
# font="FiraCode",
# font_size=40,
# x_incr=25,
# height=125,
# horizontal_padding=30,
# vertical_padding=15,
# background="#292d3e",
# foreground="#d0d0d0",
# )
# # dynamically add keybindings using popups
# qtile.grab_key(
# Key(
# [mod, "shift"],
# "e",
# lazy.function(confirm_exit.show),
# desc="Shutdown Qtile",
# )
# )
# Special configs
auto_fullscreen = True
focus_on_window_activation = "smart"
mod = "mod4" # SUPER
alt = "mod1"
my_term = "kitty"
term_exec = my_term + " -e "
layout_theme = {
"border_width": 3,
"margin": 12,
"border_focus": "6623df",
"border_normal": "422773",
"new_at_current": True,
}
default_tall = layout.MonadTall(**layout_theme)
default_max = layout.Max(**layout_theme)
www_tall = layout.MonadTall(**layout_theme, ratio=0.6, align=layout.MonadTall._right)
### Special name, this is used as the default layouts list
layouts = [
# layout.MonadWide(**layout_theme),
# layout.Bsp(**layout_theme),
# layout.Stack(stacks=2, **layout_theme),
# layout.Columns(**layout_theme),
# layout.RatioTile(**layout_theme),
# layout.VerticalTile(**layout_theme),
# layout.Matrix(**layout_theme),
# layout.Zoomy(**layout_theme),
default_tall,
default_max,
# layout.Tile(shift_windows=True, **layout_theme),
# layout.Stack(num_stacks=2),
# layout.Floating(**layout_theme)
]
keys = [
### The essentials
Key([mod], "Return", lazy.spawn(my_term), desc="Launches Terminal"),
Key([mod], "space", lazy.spawn("rofi -show drun"), desc="Run Launcher"),
Key([mod], "w", lazy.spawn("rofi -show window"), desc="Run Window Picker"),
Key([mod], "Tab", lazy.next_layout(), desc="Toggle through layouts"),
Key([mod, "shift"], "q", lazy.window.kill(), desc="Kill active window"),
Key([mod, "shift"], "r", lazy.restart(), desc="Restart Qtile"),
Key([mod], "e", lazy.spawn("emacs"), desc="Doom Emacs"),
### Switch focus to specific monitor (out of three)
Key([mod], "z", lazy.to_screen(0), desc="Keyboard focus to monitor 1"),
Key([mod], "x", lazy.to_screen(1), desc="Keyboard focus to monitor 2"),
Key([mod], "c", lazy.to_screen(2), desc="Keyboard focus to monitor 3"),
### Window controls
Key([mod], "j", lazy.layout.down(), desc="Move focus down in current stack pane"),
Key([mod], "k", lazy.layout.up(), desc="Move focus up in current stack pane"),
Key(
[mod, "shift"],
"j",
lazy.layout.shuffle_down(),
desc="Move windows down in current stack",
),
Key(
[mod, "shift"],
"k",
lazy.layout.shuffle_up(),
desc="Move windows up in current stack",
),
Key(
[mod],
"l",
lazy.layout.grow(),
lazy.layout.increase_nmaster(),
desc="Expand window (MonadTall), increase number in master pane (Tile)",
),
Key(
[mod],
"h",
lazy.layout.shrink(),
lazy.layout.decrease_nmaster(),
desc="Shrink window (MonadTall), decrease number in master pane (Tile)",
),
Key([mod], "n", lazy.layout.normalize(), desc="normalize window size ratios"),
Key(
[mod, "control"],
"m",
lazy.layout.maximize(),
desc="toggle window between minimum and maximum sizes",
),
Key([mod], "m", lazy.function(focus_master), desc="Focus on master."),
Key(
[mod, "shift"],
"m",
lazy.function(swap_master),
desc="Swap current window with master.",
),
Key([mod, "shift"], "f", lazy.window.toggle_floating(), desc="toggle floating"),
Key(
[mod, alt],
"f",
lazy.function(float_to_front),
desc="Uncover all floating windows.",
),
Key(
[mod],
"t",
lazy.function(sink_floats),
desc="Drop all floating windows into tiled layer.",
),
Key([mod], "f", lazy.window.toggle_fullscreen(), desc="toggle fullscreen"),
Key([mod], "c", lazy.function(grab_cursor), desc="bring cursor to current window"),
### Stack controls
Key(
[mod, "shift"],
"space",
lazy.layout.rotate(),
lazy.layout.flip(),
desc="Switch which side main pane occupies (XmonadTall)",
),
### Misc Applications
Key([mod, "shift"], "Return", lazy.spawn("firefox"), desc="Internet Browser"),
Key([mod], "p", lazy.spawn("pcmanfm"), desc="Graphical File Manager"),
Key([mod, "shift"], "s", lazy.spawn("flameshot gui"), desc="Screenshot Tool"),
Key([mod, alt], "d", lazy.spawn("discord"), desc="Discord"),
Key([mod], "v", lazy.spawn(term_exec + "nvim"), desc="Neovim"),
Key([mod, "shift"], "o", lazy.spawn(term_exec + "htop"), desc="Htop"),
Key(
[mod, alt],
"p",
lazy.spawn("/home/geoff/.config/qtile/picom_toggle.sh"),
desc="Toggle Picom",
),
### RANDR Layouts
Key([mod, alt], "h", lazy.function(load_randr_layout("right_hdmi"))),
Key([mod, alt], "w", lazy.function(load_randr_layout("work_right_hdmi"))),
]
group_names = [
("WWW", {"layout": "monadtall", "layouts": [www_tall, default_max]}),
(
"DEV",
{
"layout": "monadtall",
},
),
("SCI", {"layout": "monadtall"}),
(
"DIR",
{
"layout": "monadtall",
},
),
(
"SYS",
{
"layout": "monadtall",
},
),
(
"GAME",
{
"layout": "monadtall",
"matches": [Match(wm_class=["Steam"])],
},
),
(
"PRV",
{
"layout": "monadtall",
},
),
("8", {"layout": "monadtall"}),
("9", {"layout": "monadtall"}),
]
groups = [Group(name, **kwargs) for name, kwargs in group_names]
for i, (name, kwargs) in enumerate(group_names, 1):
# Switch to another group
keys.append(Key([mod], str(i), lazy.group[name].toscreen()))
# Send current window to another group
keys.append(Key([mod, "shift"], str(i), lazy.window.togroup(name)))
colors = [
["#282c34", "#282c34"], # panel background
["#434758", "#434758"], # background for current screen tab
["#ffffff", "#ffffff"], # font color for group names
["#6623df", "#6623df"], # border line color for current tab
["#730c7d", "#730c7d"], # border line color for other tab and odd widgets
["#422773", "#422773"], # color for the even widgets
["#6df1d8", "#6df1d8"], # window name
]
##### DEFAULT WIDGET SETTINGS #####
widget_defaults = dict(font="FiraCode", fontsize=12, padding=2, background=colors[2])
extension_defaults = widget_defaults.copy()
def init_widgets_list(tray=True):
widgets_list = [
widget.Sep(linewidth=0, padding=6, foreground=colors[2], background=colors[0]),
widget.Image(
filename="~/.config/qtile/icons/python.png",
mouse_callbacks={"Button1": lambda: qtile.cmd_spawn("rofi -show drun")},
),
widget.GroupBox(
font="FiraCode",
fontsize=14,
margin_y=3,
margin_x=0,
padding_y=5,
padding_x=3,
borderwidth=3,
active=colors[2],
inactive=colors[2],
rounded=False,
highlight_color=colors[1],
highlight_method="line",
this_current_screen_border=colors[3],
this_screen_border=colors[4],
other_current_screen_border=colors[0],
other_screen_border=colors[0],
foreground=colors[2],
background=colors[0],
),
widget.Sep(linewidth=1, padding=15, foreground=colors[2], background=colors[0]),
widget.WindowName(
foreground=colors[6], background=colors[0], padding=0, fontsize=13
),
widget.TextBox(
# text="",
text="\ue0b2",
background=colors[0],
foreground=colors[5],
padding=-5,
fontsize=37,
),
widget.CurrentLayoutIcon(
custom_icon_paths=[os.path.expanduser("~/.config/qtile/icons")],
foreground=colors[0],
background=colors[5],
padding=0,
scale=0.7,
),
widget.CurrentLayout(foreground=colors[2], background=colors[5], padding=5),
widget.TextBox(
# text="",
text="\ue0b2",
background=colors[5],
foreground=colors[4],
padding=-5,
fontsize=37,
),
widget.TextBox(
text=" 🌡",
padding=2,
foreground=colors[2],
background=colors[4],
fontsize=11,
),
widget.ThermalSensor(
foreground=colors[2], background=colors[4], threshold=90, padding=5
),
widget.TextBox(
# text="",
text="\ue0b2",
background=colors[4],
foreground=colors[5],
padding=-5,
fontsize=37,
),
widget.TextBox(
text=" 🖬",
foreground=colors[2],
background=colors[5],
padding=0,
fontsize=14,
),
widget.Memory(
foreground=colors[2],
background=colors[5],
mouse_callbacks={"Button1": lambda: qtile.cmd_spawn(term_exec + "htop")},
padding=5,
),
widget.TextBox(
# text="",
text="\ue0b2",
background=colors[5],
foreground=colors[4],
padding=-5,
fontsize=37,
),
widget.CPU(foreground=colors[2], background=colors[4], padding=5),
widget.TextBox(
# text="",
text="\ue0b2",
background=colors[4],
foreground=colors[5],
padding=-5,
fontsize=37,
),
widget.TextBox(
text=" ⟳",
padding=2,
foreground=colors[2],
background=colors[5],
fontsize=14,
),
widget.CheckUpdates(
distro="Arch_checkupdates",
no_update_string="Fresh ",
display_format="Updates: {updates}",
update_interval=1800,
foreground=colors[2],
mouse_callbacks={
"Button1": lambda: qtile.cmd_spawn(term_exec + "yay -Syyu")
},
background=colors[5],
),
widget.TextBox(
# text="",
text="\ue0b2",
background=colors[5],
foreground=colors[4],
padding=-5,
fontsize=37,
),
widget.Clock(
foreground=colors[2], background=colors[4], format="%A, %B %d [ %H:%M ]"
),
]
if tray:
widgets_list.append(
widget.TextBox(
text="",
background=colors[4],
foreground=colors[0],
padding=-5,
fontsize=37,
)
)
widgets_list.append(widget.Systray(background=colors[0], padding=5))
return widgets_list
def init_widgets_screen1():
return init_widgets_list()
def init_widgets_screen2():
return init_widgets_list(tray=False)
def init_screens():
return [
Screen(top=bar.Bar(widgets=init_widgets_screen1(), opacity=1.0, size=20)),
Screen(top=bar.Bar(widgets=init_widgets_screen2(), opacity=1.0, size=20)),
]
if __name__ in ["config", "__main__"]:
screens = init_screens()
widgets_list = init_widgets_list()
widgets_screen1 = init_widgets_screen1()
widgets_screen2 = init_widgets_screen2()
mouse = [
Drag(
[mod],
"Button1",
lazy.window.set_position_floating(),
start=lazy.window.get_position(),
),
Drag(
[mod], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size()
),
Click([mod], "Button2", lazy.window.bring_to_front()),
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None
follow_mouse_focus = False
bring_front_click = False
cursor_warp = False
# windows caught with these rules will spawn as floating
floating_layout = layout.Floating(
float_rules=[
*layout.Floating.default_float_rules,
Match(title="Confirmation"), # tastyworks exit box
Match(title="Qalculate!"), # qalculate-gtk
Match(wm_class="kdenlive"), # kdenlive
Match(wm_class="pinentry-gtk-2"), # GPG key password entry
Match(wm_class="Gimp"),
Match(wm_class="Nitrogen"),
Match(wm_class="Lightdm-settings"),
Match(wm_class="Pavucontrol"),
Match(wm_class="NEURON"),
Match(wm_class="matplotlib"),
Match(wm_class="Viewnior"),
Match(wm_class="Gnome-calculator"),
Match(wm_class="StimGen 5.0"), # BMB stimulus generator
]
)
@hook.subscribe.startup_once
def start_once():
home = os.path.expanduser("~")
subprocess.call([home + "/.config/qtile/autostart.sh"])
@hook.subscribe.screen_change
def restart_on_randr(qtile):
# qtile.cmd_restart()
lazy.restart()
# def restart_on_randr(qtile, event):
# qtile.cmd_restart()
@hook.subscribe.startup_complete
def refresh_wallpaper():
qtile.cmd_spawn("nitrogen --restore")
auto_spawns = {
"WWW": {
"spawn": ["firefox", "element-desktop"],
},
"DEV": {
"spawn": ["emacs", "firefox", "kitty -d ~/git"],
},
"DIR": {
"spawn": ["pcmanfm", term_exec + "joshuto", my_term],
},
"SYS": {
"spawn": [term_exec + "htop", term_exec + "btm", my_term],
},
"GAME": {
"spawn": ["steam"],
},
"PRV": {
"spawn": ["firefox -private-window"],
},
}
def group_spawn(grp):
if grp.name in auto_spawns and len(grp.windows) == 0:
for s in auto_spawns[grp.name]["spawn"]:
qtile.cmd_spawn(s)
@hook.subscribe.startup_complete
def finished_restarting():
"""hack to prevent auto-spawner from firing off during restart.
TODO: Perhaps make a class that offers a more clean solution."""
flags.restarting = False
group_spawn(qtile.current_group)
qtile.cmd_spawn("nitrogen --restore")
@hook.subscribe.setgroup
def auto_spawner():
if not flags.restarting:
grp = qtile.current_group
if grp.name in auto_spawns and len(grp.windows) == 0:
for s in auto_spawns[grp.name]["spawn"]:
qtile.cmd_spawn(s)
@hook.subscribe.client_managed
def dev_term_shrinker(c):
grp = qtile.current_group
if qtile.current_group.name == "DEV":
clients = grp.layout.clients.clients
n = len(clients)
# check that new window is client of the group (ignore transient popups)
if n == 3 and c in clients:
is_term = [my_term in c.window.get_wm_class() for c in clients]
if True in is_term:
term_idx = is_term.index(True)
grp.focus(clients[term_idx], True)
for _ in range(n - term_idx):
grp.layout.cmd_shuffle_down()
grp.layout._shrink_secondary(grp.layout.change_size * 15)
wmname = "LG3D"
|
983,054 | b538a882d331cc3ef28217e713df4e4545ecf849 | import datetime
from socket import socket
import threading
import logging
FORMAT = '%(asctime)-15s \t [%(threadName)s ,%(thread)8d] %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
class ChatClient:
def __init__(self,rip='127.0.0.1',rport=9999):
self.raddr = (rip,rport)
self.socket = socket()
self.event = threading.Event()
def start(self):
self.socket.connect(self.raddr)
threading.Thread(target=self.recv,name='recv').start()
def recv(self):
while not self.event.is_set():
data = self.socket.recv(1024)
logging.info(data)
def send(self,msg:str):
data = "{}\n".format(msg.strip()).encode() # 服务端需要一个换行符
self.socket.send(data)
def stop(self):
self.socket.close()
self.event.wait(3)
self.event.set()
logging.info("client stops ")
def main():
cc = ChatClient()
cc.start()
while True:
cmd = input('>>>>')
if cmd.strip() == 'quit':
break
cc.send(cmd)
print(threading.enumerate())
if __name__ == '__main__':
main()
|
983,055 | 6b98c3120a2218f47c2c2422dd828215e2fa65f5 | # Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is used to store information for a DNA strand.
A DNA strand is a continuous chain of nucleotides. It can be either a scaffold of a staple.
"""
import random
import sys
import os
from sets import Set
import numpy as np
class DnaStrand(object):
""" The DnaStrand class stores data for a DNA strand.
Attributes:
base_id_list (Dict): The location of each strand base within list of base IDs making up the strand.
The dictionary maps base IDs to an index into tour[].
color (List[float]: The strand color in RGB.
dna_structure (DnaStructure): The DNA structure this strand belongs to.
domain_list (List[Domain]): The list of domains for this strand.
helix_list (Dict): The list of helices the strand passes through. The dictionary maps helix IDs
to DnaStructureHelix objects.
icolor (int): The strand color as an integer. The integer color can be used as an ID to group staple strands.
id (int): The strand ID.
insert_seq (List[string]): The list of sequence letters inserted into this strand.
is_circular (bool): If True then the strand is circular, returning to its starting postion.
is_scaffold (bool): If True then the strand is a scaffold strand.
tour (List[DnaBase]): The list of base objects making up the strand.
"""
def __init__(self, id, dna_structure, is_scaffold, is_circular, tour):
""" Initialize a DnaStrand object.
Arguments:
id (int): The strand ID.
dna_structure (DnaStructure): The DNA structure this strand belongs to.
is_scaffold (bool): If True then the strand is a scaffold strand.
is_circular (bool): If True then the strand is circular, returning to its starting postion.
tour (List[DnaBase]): The list of base objects making up the strand.
"""
self.id = id
self.is_scaffold = is_scaffold
self.is_circular = is_circular
self.tour = tour
self.color = self.create_random_color()
self.icolor = None
self.helix_list = dict()
self.base_id_list = dict()
self.dna_structure = dna_structure
self.domain_list = []
self.insert_seq = []
def create_random_color(self):
""" Create a random color for the strand.
Colors are generated from the limited set of intensity values
in color_list[] to make them more distinguishable.
"""
# Create a list of n colors.
n = 4
dc = 1.0 / (n-1)
color_list = [i*dc for i in range(n)]
if self.is_scaffold:
rgb = [1.0, 1.0, 1.0]
else:
rgb = [random.choice(color_list) for i in range(3)]
# Don't generate blue (that's for a scaffold in cadnano) or black.
if (rgb[0] == 0.0) and (rgb[1] == 0.0):
rgb[0] = random.choice(color_list[1:])
if rgb[2] == 0.0:
rgb[2] = random.choice(color_list[1:])
#__if (rgb[0] == 0) and (rgb[1] == 0)
#__if self.is_scaffold
return rgb
#__def create_random_color
def add_helix(self, helix):
""" Add a helix reference to the strand.
Arguments:
helix (DnaStructureHelix): The helix to add.
"""
id = helix.lattice_num
if (id not in self.helix_list):
self.helix_list[id] = helix
#__def add_helix
def get_base_coords(self):
""" Get the coordinates of bases along the dna helix axis.
This is only used when writing a visualization file.
"""
num_bases = len(self.tour)
base_coords = np.zeros((num_bases,3), dtype=float)
for i,base in enumerate(self.tour):
helix_num = base.h
helix_pos = base.p
helix = self.helix_list[helix_num]
base_coords[i] = base.coordinates
return base_coords
#__def get_base_coords
def get_base_index(self, base):
""" Get the index into the strand for the given base.
Arguments:
base (DnaBase): The base to get the index for.
"""
num_bases = len(self.tour)
if (not self.base_id_list):
for i,sbase in enumerate(self.tour):
self.base_id_list[sbase.id] = i
if base.id not in self.base_id_list:
sys.stderr.write("[strand::get_base_index] **** WARNING: base %d not found in strand %d.\n" % (base.id, self.id))
return None
return self.base_id_list[base.id]
#__def get_base_index
|
983,056 | ad98a438287c59feab9af95dffecbdca31e70856 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 30 15:20:45 2011
@author: josef
"""
from statsmodels.compat.python import lrange
import numpy as np
from scipy import stats
from statsmodels.sandbox.tools.mctools import StatTestMC
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.tsa.stattools import adfuller
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(5000, statindices=lrange(4))
print(mc1.summary_quantiles([1,2,3], stats.chi2([2,3,4]).ppf,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print('\n\n')
frac = [0.01, 0.025, 0.05, 0.1, 0.975]
crit = stats.chi2([2,3,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,2,3], frac, crit,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(crit, [1,2,3])[1])
#----------------------
def randwalksim(nobs=500, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
def adf20(x):
return adfuller(x, 2, regression="n", autolag=None)
print(adf20(np.random.randn(100)))
mc2 = StatTestMC(randwalksim, adf20)
mc2.run(10000, statindices=[0,1])
frac = [0.01, 0.05, 0.1]
#bug
crit = np.array([-3.4996365338407074, -2.8918307730370025, -2.5829283377617176])[:,None]
print(mc2.summary_cdf([0], frac, crit,
varnames=['adf'],
title='adf'))
#bug
#crit2 = np.column_stack((crit, frac))
#print mc2.summary_cdf([0, 1], frac, crit,
# varnames=['adf'],
# title='adf')
print(mc2.quantiles([0]))
print(mc2.cdf(crit, [0]))
doplot=1
if doplot:
import matplotlib.pyplot as plt
mc1.plot_hist([3],stats.chi2([4]).pdf)
plt.title('acorr_ljungbox - MC versus chi2')
plt.show()
|
983,057 | cc1f6bdf4e1443e944d956e94904a71365248b03 | import torch
import numpy as np
import matplotlib.pyplot as plt
from skimage.filters import sobel
from torch import nn
from loss.dice import dice_loss_sq, dice_coefficient
from loss.focal import focal_loss_with_logits
def show_enc_orig(enc, orig,save_path=None):
#show encoded vector and original image
"""
Parameters
----------
enc : `torch.tensor`
encoded vector
orig : `torch.tensor`
image
save_path : str, optional
save location, by default None
"""
chann = enc.shape[0]
fig, ax = plt.subplots(1, chann+1)
[axi.set_axis_off() for axi in ax.ravel()]
ax[0].imshow(orig.clone().detach().cpu())
for ind, axis in enumerate(ax[1:]):
axis.imshow(enc.clone().detach().cpu()[ind])
if save_path:
plt.savefig(save_path)
plt.close("all")
def get_hot_enc(input, channels=3):
"""get one hot encoded vector
Parameters
----------
input : `torch.tensor`
image
channels : int, optional
channel count, by default 3
Returns
-------
`torch.tensor`
oe hot encoded vector
"""
if len(input.shape)==2:
input = input.unsqueeze(dim=0).unsqueeze(dim=0)
if len(input.shape)==3:
input = input.view(input.shape[0], 1, input.shape[2], input.shape[2])
input_zer = (torch.zeros(input.shape[0], channels, *input.shape[2:]))
if input.is_cuda:
input_zer = input_zer.to(input.get_device())
input_hot = input_zer.scatter(1, input.long(), 1)
return input_hot
def get_edge_img(act1):
"""get border image
Parameters
----------
act1 : `torch.tensor`
target
Returns
-------
`torch.tensor`
border image
"""
border_img = sobel(act1.cpu().numpy().astype(np.int16))
non0_inds = np.nonzero(border_img)
edge_img = torch.zeros_like(act1)
edge_img[non0_inds[0], non0_inds[1]] = 1
# edge_img[non0_inds[0], non0_inds[1]] = torch.tensor(1).type((edge_img.type()))
return edge_img
def ppce_edgeloss(prob1, act1):
"""computes edges pixels from segm and calculate dice loss on this output and probs
expecting outputs as output from EdgeNet, a tuple of 3 tensors
Parameters
----------
probs : `torch.tensor `
predictions
segm : `torch.tensor`
target
"""
if isinstance(prob1,tuple):
prob1 = prob1[-1]
act1_enc = torch.cat(list(map(get_hot_enc, act1))) # BX3XhXW shaped img
act_origs = act1_enc.shape#act original shape
act_flat = torch.flatten(act1_enc, 0,1)
edge_img = torch.stack(list(map(get_edge_img, act_flat)))
edge_img = edge_img.view(act_origs)
# ce=torch.nn.CrossEntropyLoss()
# loss = ce(prob1, edge_img)
# edge_img = torch.argmax(edge_img, dim=1)
# loss_focal = focal_loss_with_logits(prob1, edge_img)
dice_l = dice_loss_sq(prob1, edge_img[:,:,...], is_all_chann=False, no_sft=False)# 2 channeled output for edge and edge_net is one hotencoded vector
return dice_l
def pp_edgeacc(prob1, act1, is_list_bat=False, nosft=False):
"""computes edges pixels from segm and calculate CE on this output and probs
Parameters
----------
probs : `torch.tensor `
predictions
segm : `torch.tensor`
target
"""
if isinstance(prob1,tuple):
prob1 = prob1[-1]
# edge_img = torch.stack(list(map(get_edge_img, act1)))
act1_enc = torch.cat(list(map(get_hot_enc, act1))) # BX3XhXW shaped img
act_origs = act1_enc.shape#act original shape
act_flat = torch.flatten(act1_enc, 0,1)
edge_img = torch.stack(list(map(get_edge_img, act_flat)))
edge_img = edge_img.view(act_origs)
# edge_img = torch.argmax(edge_img, dim=1)
edge_img = edge_img[:, :, ...]
dice_scr = dice_coefficient(prob1, edge_img, is_list_bat, nosft=False, channelcnt=3, is_all_chann=False)
return dice_scr #+ loss_focal
def get_act_bnd_lbs(act):
#get actual boundary labels.not binary image
act_enc = torch.cat(list(map(get_hot_enc, act))) # BX3XhXW shaped img
act_origs = act_enc.shape#act original shape
act_flat = torch.flatten(act_enc, 0,1)
edge_img = torch.stack(list(map(get_edge_img, act_flat)))
edge_img = edge_img.view(act_origs)
edge_img[:,0,...] = ((edge_img[:,0,...]+1)%2)#invert values in bg channel
return torch.argmax(edge_img, dim=1)
def bp_edgeloss(prob, act):
"""computes edges pixels from segm and calculate CE on this output and probs
expecting outputs as output from EdgeNet, a tuple of 3 tensors
Parameters
----------
probs : [type]
[description]
segm : [type]
[description]
"""
sft = nn.Softmax2d()
# predicted boundary
predb = prob[-9]
# predb = sft(predb)
# predicted mask
predm = prob[-1]
# predm = sft(predm)
act_hot=(torch.zeros(act.shape[0],3,*act.shape[1:]))#for one hot encoding, 3 channels and then reduce to 2 channels for loss comp
act_hot = act_hot.to(act.device)
act_m = act_hot.scatter(1, act.unsqueeze(dim=1), 1)
act_enc = torch.cat(list(map(get_hot_enc, act))) # BX3XhXW shaped img
act_origs = act_enc.shape#act original shape
act_flat = torch.flatten(act_enc, 0,1)
edge_img = torch.stack(list(map(get_edge_img, act_flat)))
edge_img = edge_img.view(act_origs)
# edge_img = torch.argmax(edge_img, dim=1)
# edge_img = torch.stack(list(map(get_edge_img, act)))
# edge_hot=(torch.zeros(edge_img.shape[0],edge_img.max()+1,*edge_img.shape[1:]))
# edge_hot = edge_hot.to(edge_img.device)
# act_b = edge_hot.scatter(1, edge_img.unsqueeze(dim=1), 1)
# dl = dice_loss_sq#torch.nn.MSELoss()
#negating bg channel
# edge_img[:,0,...] = ((edge_img[:,0,...]+1)%2)
edge_img = edge_img[:,:,...]
lossb = dice_loss_sq(predb, edge_img, no_sft=False, is_all_chann=False)# + focal_loss_with_logits(predb, torch.argmax(edge_img, dim=1))
#trying 2 channel ouput for mask, no meaning as we need softmax at final layer
# lossm = dice_loss_sq(predm, act_m[:,1:,...], no_sft=True) #+ focal_loss_with_logits(predm, act)
bce = nn.BCELoss(reduction='sum')
# mse = nn.MSELoss()
lossm = dice_loss_sq(predm, act_m[:,:,...])
return lossb + lossm
def bp_edgeacc(prob, act, is_list_bat=False):
"""computes edges pixels from segm and calculate CE on this output and probs
Parameters
----------
probs : [type]
[description]
segm : [type]
[description]
"""
if isinstance(prob,tuple):
prob = prob[-1]
act_hot=(torch.zeros(act.shape[0],3,*act.shape[1:]))#for one hot encoding, 3 channels and then reduce to 2 channels for loss comp
act_hot = act_hot.to(act.device)
act_m = act_hot.scatter(1, act.unsqueeze(dim=1), 1)
dice_scr = dice_coefficient(prob, act_m[:,:,...], is_list_bat, channelcnt=3, nosft=False)
#TODO try toinclude boundary acc and display in save_img
# predb = prob[-9]
# # predb = sft(predb)
# act_enc = torch.cat(list(map(get_hot_enc, act))) # BX3XhXW shaped img
# act_origs = act_enc.shape#act original shape
# act_flat = torch.flatten(act_enc, 0,1)
# edge_img = torch.stack(list(map(get_edge_img, act_flat)))
# edge_img = edge_img.view(act_origs)
# edge_img = torch.stack(list(map(get_edge_img, act)))
return dice_scr #+ loss_focal |
983,058 | a160e7e0af3aa97d737e194b86cf27d77137644c | import pickle
from multiprocessing.pool import Pool
import numpy as np
import torch
import tqdm as tqdm
from matplotlib import pyplot as plt
from scipy.io import loadmat
import os
import cv2
from easydict import EasyDict as edict
import numpy as np
import sys
DEBUG = False
# setting 'borrow' from https://github.com/spurra/vae-hands-3d/blob/master/data/stb/create_db.m
# intrinsic camera values for BB
I_BB = edict()
I_BB.fx = 822.79041
I_BB.fy = 822.79041
I_BB.tx = 318.47345
I_BB.ty = 250.31296
I_BB.base = 120.054
I_BB.R_l = np.array([[0.0, 0.0, 0.0]])
I_BB.R_r = I_BB.R_l
I_BB.T_l = np.array([0.0, 0.0, 0.0])
I_BB.T_r = np.array([-I_BB.base, 0, 0])
I_BB.K = np.diag([I_BB.fx, I_BB.fy, 1.0])
I_BB.K[0, 2] = I_BB.tx
I_BB.K[1, 2] = I_BB.ty
# intrinsic camerae value for SK
I_SK = edict()
I_SK.fx_color = 607.92271
I_SK.fy_color = 607.88192
I_SK.tx_color = 314.78337
I_SK.ty_color = 236.42484
I_SK.K_color = np.diag([I_SK.fx_color, I_SK.fy_color, 1])
I_SK.K_color[0, 2] = I_SK.tx_color
I_SK.K_color[1, 2] = I_SK.ty_color
I_SK.fx_depth = 475.62768
I_SK.fy_depth = 474.77709
I_SK.tx_depth = 336.41179
I_SK.ty_depth = 238.77962
I_SK.K_depth = np.diag([I_SK.fx_depth, I_SK.fy_depth, 1])
I_SK.K_depth[0, 2] = I_SK.tx_depth
I_SK.K_depth[1, 2] = I_SK.ty_depth
I_SK.R_depth = I_BB.R_l.copy()
I_SK.T_depth = I_BB.T_l.copy()
# https://github.com/zhjwustc/icip17_stereo_hand_pose_dataset claims that R and T is for color -> depth trans. It is not.
# it is in fact depth -> color.
I_SK.R_color = -1 * np.array([[0.00531, -0.01196, 0.00301]])
I_SK.T_color = -1 * np.array([-24.0381, -0.4563, -1.2326])
PALM_COLOR = [10] * 3
THUMB_COLOR1 = [20] * 3
THUMB_COLOR2 = [30] * 3
THUMB_COLOR3 = [40] * 3
INDEX_COLOR1 = [50] * 3
INDEX_COLOR2 = [60] * 3
INDEX_COLOR3 = [70] * 3
MIDDLE_COLOR1 = [80] * 3
MIDDLE_COLOR2 = [90] * 3
MIDDLE_COLOR3 = [100] * 3
RING_COLOR1 = [110] * 3
RING_COLOR2 = [120] * 3
RING_COLOR3 = [130] * 3
PINKY_COLOR1 = [140] * 3
PINKY_COLOR2 = [150] * 3
PINKY_COLOR3 = [160] * 3
#
# ordering: palm center(not wrist or hand center), little_mcp, little_pip, little_dip, little_tip, ring_mcp, ring_pip,
# ring_dip, ring_tip, middle_mcp, middle_pip, middle_dip, middle_tip, index_mcp, index_pip, index_dip, index_tip,
# thumb_mcp, thumb_pip, thumb_dip, thumb_tip.
# remapping labels to fit with standard labeling.
STB_TO_STD = [0, 17, 18, 19, 20, 13, 14, 15, 16, 9, 10, 11, 12, 5, 6, 7, 8, 1, 2, 3, 4]
def create_jointsmap(uv_coord, size):
""" Plots a hand stick figure into a matplotlib figure. """
# define connections and colors of the bones
# print(coords_hw[-1]) # this is center ( the 22nd point)
canvas = np.zeros((size, size, 3))
bones = [
((1, 2), THUMB_COLOR1),
((2, 3), THUMB_COLOR2),
((3, 4), THUMB_COLOR3),
((5, 6), INDEX_COLOR1),
((6, 7), INDEX_COLOR2),
((7, 8), INDEX_COLOR3),
((9, 10), MIDDLE_COLOR1),
((10, 11), MIDDLE_COLOR2),
((11, 12), MIDDLE_COLOR3),
((13, 14), RING_COLOR1),
((14, 15), RING_COLOR2),
((15, 16), RING_COLOR3),
((17, 18), PINKY_COLOR1),
((18, 19), PINKY_COLOR2),
((19, 20), PINKY_COLOR3)]
palm = []
for connection, _ in [((0, 1), []),
((1, 5), []),
((5, 9), []),
((9, 13), []),
((13, 17), []),
((17, 0), []), ]:
coord1 = uv_coord[connection[0]]
palm.append([int(coord1[0]), int(coord1[1])])
# palm.append([int((coord1[0]-.5)* W_scale+ W_offset ), int(-(coord1[1]- .5)* H_scale+ H_offset)])
# print(palm)
cv2.fillConvexPoly(canvas, np.array([palm], dtype=np.int32), PALM_COLOR)
for connection, color in bones:
coord1 = uv_coord[connection[0]]
coord2 = uv_coord[connection[1]]
coords = np.stack([coord1, coord2])
# 0.5, 0.5 is the center
x = coords[:, 0]
y = coords[:, 1]
mX = x.mean()
mY = y.mean()
length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
angle = np.math.degrees(np.math.atan2(y[0] - y[1], x[0] - x[1]))
polygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 2), 16), int(angle), 0, 360, 1)
cv2.fillConvexPoly(canvas, polygon, color)
return canvas
def reorder(xyz_coord):
return xyz_coord[STB_TO_STD]
def get_xyz_coord(path):
"""
get xyz coordinate from STB's mat file return 1500x21x3 matrix
:param path:
:return: hand labels
"""
labels = loadmat(path)
anno_xyz = []
for index in range(0, 1500):
anno_xyz.append([])
for i in range(0, 21):
x = labels['handPara'][0][i][index]
y = labels['handPara'][1][i][index]
z = labels['handPara'][2][i][index]
anno_xyz[-1].append([x, y, z])
anno_xyz = np.array(anno_xyz)
# anno_xyz = np.reshape(labels['handPara'], (1500, 21, 3))
return anno_xyz
def get_uv_coord(mode, camera, anno_xyz):
"""
gets uv coordinates from xyz coordinate for STB dataset
:param mode: have to be either "l" for left hand or "r" for right hand. 'c' for color, 'd' for depth.
:param camera: either "BB" or "SK"
:param anno_xyz: the 3d coordinate
:return: uv_coords
"""
if camera == 'SK':
# SK only have left hand. this is only for color image. Unable to translate kp to depth image.
if mode == 'color':
uv_coord, _ = cv2.projectPoints(anno_xyz, I_SK.R_color, I_SK.T_color, I_SK.K_color, None)
elif mode == 'depth':
uv_coord, _ = cv2.projectPoints(anno_xyz, I_SK.R_depth, I_SK.T_depth, I_SK.K_depth, None)
else:
raise ValueError
elif camera == 'BB':
if mode == 'left':
uv_coord, _ = cv2.projectPoints(anno_xyz, I_BB.R_l, I_BB.T_l, I_BB.K, None)
elif mode == 'right':
uv_coord, _ = cv2.projectPoints(anno_xyz, I_BB.R_r, I_BB.T_r, I_BB.K, None)
else:
raise ValueError
else:
raise ValueError
return np.reshape(uv_coord, (21, 2))
def get_bounding_box(uv_coor, shape):
"""
returns bounding box given 2d coordinate
:param uv_coor: x,y dataset of joints
:param shape: height and width of an image
:return: bounding box
"""
xmin = ymin = 99999
xmax = ymax = 0
for x, y in uv_coor:
xmin = min(xmin, int(x))
xmax = max(xmax, int(x))
ymin = min(ymin, int(y))
ymax = max(ymax, int(y))
xmin = max(0, xmin - 20)
ymin = max(0, ymin - 20)
xmax = min(shape[1], xmax + 20)
ymax = min(shape[0], ymax + 20)
return xmin, xmax, ymin, ymax
def scale(uv_coord, K, bbox, new_size):
"""
scale and translate key points/K map to new size
:param uv_coord: 2d key points coordinates
:param K: Intrinsic matrix
:param bbox: bounding box of the hand
:param new_size: new size (width x height)
:return: uv_coord, K
"""
xmin, xmax, ymin, ymax = bbox
uv_coord[:, 0] = (uv_coord[:, 0] - xmin) / (xmax - xmin + 1.) * new_size[1]
uv_coord[:, 1] = (uv_coord[:, 1] - ymin) / (ymax - ymin + 1.) * new_size[0]
xscale = new_size[1] / (xmax - xmin + 1.)
yscale = new_size[0] / (ymax - ymin + 1.)
shift = [[1, 0, -xmin],
[0, 1, -ymin],
[0, 0, 1]]
scale = [[xscale, 0, 0],
[0, yscale, 0],
[0, 0, 1]]
shift = np.array(shift)
scale = np.array(scale)
K = np.matmul(scale, np.matmul(shift, K))
return uv_coord, K
def draw(image, uv_coord, bbox=None):
"""
draw image with uv_coord and an optional bounding box
:param image:
:param uv_coord:
:param bbox:
:return: image
"""
for i, p in enumerate(uv_coord):
x, y = p
cv2.circle(image, (int(x), int(y)), 10, 255, 2)
cv2.putText(image, str(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 255)
if bbox is not None:
cv2.rectangle(image, (bbox[0], bbox[3]), (bbox[1], bbox[2]), 255, 2)
return image
def to_tensor(image):
shape = image.shape
if shape[-1] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.transpose(2, 0, 1)
image = torch.from_numpy(image)
else:
# grayscale
image = torch.from_numpy(image)
image = image.unsqueeze(0)
return image
def get_heatmaps(uv_coords, shape):
heatmaps = []
for x, y in uv_coords:
heatmaps.append(to_tensor(gen_heatmap(x, y, shape).astype(np.float32)))
heatmaps = torch.stack(heatmaps)
heatmaps = heatmaps.squeeze(1)
return heatmaps
def gen_heatmap(x, y, shape):
# base on DGGAN description
# a heat map is a dirac-delta function on (x,y) with Gaussian Distribution sprinkle on top.
centermap = np.zeros((shape[0], shape[1], 1), dtype=np.float32)
center_map = gaussian_kernel(shape[0], shape[1], x, y, 3)
center_map[center_map > 1] = 1
center_map[center_map < 0.0099] = 0
centermap[:, :, 0] = center_map
return center_map
def gaussian_kernel(width, height, x, y, sigma):
gridy, gridx = np.mgrid[0:height, 0:width]
D2 = (gridx - x) ** 2 + (gridy - y) ** 2
return np.exp(-D2 / 2.0 / sigma / sigma)
def image_process(arg):
img_path, destination, anno_xyz, size = arg
image = cv2.imread(img_path)
camera, mode, index = os.path.basename(img_path).split("_")
depth = anno_xyz[:, -1].copy()
uv_coor = get_uv_coord(mode, camera, anno_xyz)
bbox = get_bounding_box(uv_coor, image.shape)
xmin, xmax, ymin, ymax = bbox
# image = image[ymin:ymax + 1, xmin:xmax + 1] # crop the image
# image = cv2.resize(image, (size, size))
if camera == "BB":
K = I_BB.K.copy()
else:
if mode == "color":
K = I_SK.K_depth.copy()
else:
K = I_SK.K_color.copy()
uv_coor, k = scale(uv_coor, K, bbox, (size, size))
# joints_map = create_jointsmap(uv_coor, size)
joints_map_name = os.path.basename(destination).split('_')
joints_map_name = joints_map_name[0] + '_' + joints_map_name[1] + '_' + "joints" + "_" + joints_map_name[2]
joints_map_path = os.path.join(os.path.dirname(destination), joints_map_name)
# saving 21x1x256x256 heatmaps as .pt
#heatmaps = get_heatmaps(uv_coor, (size, size))
#torch.save(heatmaps, os.path.join(os.path.dirname(destination), os.path.basename(destination)[0:-3]+"pt"))
# cv2.imwrite(destination, image)
# cv2.imwrite(joints_map_path, joints_map)
return [destination, uv_coor, depth, anno_xyz, k]
def main(src, dst, size):
"""
run STB preprocessing. which will create a new STB_crop folder where the hand region occupied the majority of the frame.
replace multiple .mat label files with a single pickle file.
the pickle file is under the format:
[folder name]/[image_name]/
k
uv_coord
jointmaps
heatmaps
:param src: dataset folder
:param dst: dst folder for new cropped dataset
:param size: new image size (size x size)
:return: None
"""
train_dst = os.path.join(dst, 'train')
test_dst = os.path.join(dst, 'test')
label_paths = [os.path.join(src, 'labels', i) for i in os.listdir(os.path.join(src, 'labels'))]
image_folders = [os.path.join(src, i) for i in os.listdir(src) if i != "labels"]
image_paths = {}
for folder in image_folders:
images = os.listdir(folder)
image_paths[os.path.basename(folder)] = [os.path.join(folder, i) for i in images]
if DEBUG:
print("image folders are : {}".format(image_paths.keys()))
# for each image assign its xyz coordinate
args = []
train_labels = ["B1", "B2", "B3", "B5", "B6"]
test_labels = ["B4"]
for l_p in label_paths:
folder = os.path.basename(l_p).split('_')[0]
camera = os.path.basename(l_p).split('_')[-1][0:-4]
images = image_paths[folder]
labels = get_xyz_coord(l_p)
images = list(filter(lambda x: os.path.basename(x).split("_")[0] == camera, images))
if DEBUG:
print(l_p, camera)
for i in images:
index = int(os.path.basename(i).split('_')[-1][0:-4])
if os.path.basename(l_p)[0:2] in train_labels:
destination = os.path.join(train_dst, folder, os.path.basename(i))
elif os.path.basename(l_p)[0:2] in test_labels:
destination = os.path.join(test_dst, folder, os.path.basename(i))
else:
raise ValueError
args.append([i, destination, reorder(labels[index]), size])
p = Pool()
results = list(tqdm.tqdm(p.imap(image_process, args), ascii=True, total=len(args)))
p.close()
p.join()
annotations_train = edict()
annotations_test = edict()
for r in results:
destination, uv_coord, depth, xyz, k = r
folder = os.path.basename(os.path.dirname(destination))
image = os.path.basename(destination)
if folder[0:2] in train_labels:
annotations = annotations_train
elif folder[0:2] in test_labels:
annotations = annotations_test
else:
raise ValueError
if folder not in annotations:
annotations[folder] = edict()
annotations[folder][image] = edict()
else:
annotations[folder][image] = edict()
annotations[folder][image].uv_coord = uv_coord
annotations[folder][image].k = k
annotations[folder][image].depth = depth
annotations[folder][image].xyz = xyz
with open(os.path.join(train_dst, "annotation.pickle"), "wb") as handle:
pickle.dump(annotations_train, handle)
with open(os.path.join(test_dst, "annotation.pickle"), "wb") as handle:
pickle.dump(annotations_test, handle)
if __name__ == "__main__":
"""
STB stores its label under the following format
*_SK -> Intel Sense cameara
*_BK -> bumble bee camera
labels are stored in "handPara" and are in 3 X 21 X N
3 are x, y, z
21 are the joints
N are the total samples typically 1500
# note that only SK or Intel Sense camera contains RBG, D and xyz dataset.
# """
destination = sys.argv[2]
folders = ['train', 'test']
if not os.path.exists(destination):
os.mkdir(destination)
for f in folders:
os.mkdir(os.path.join(destination, f))
os.mkdir(os.path.join(destination, f, "B1Counting"))
os.mkdir(os.path.join(destination, f, "B1Random"))
os.mkdir(os.path.join(destination, f, "B2Counting"))
os.mkdir(os.path.join(destination, f, "B2Random"))
os.mkdir(os.path.join(destination, f, "B3Counting"))
os.mkdir(os.path.join(destination, f, "B3Random"))
os.mkdir(os.path.join(destination, f, "B4Counting"))
os.mkdir(os.path.join(destination, f, "B4Random"))
os.mkdir(os.path.join(destination, f, "B5Counting"))
os.mkdir(os.path.join(destination, f, "B5Random"))
os.mkdir(os.path.join(destination, f, "B6Counting"))
os.mkdir(os.path.join(destination, f, "B6Random"))
main(sys.argv[1], sys.argv[2], int(sys.argv[3]))
|
983,059 | ba67a696d9de7d167c64b1b58c01a7740291481b | #!usr/bin/python3
# -*- coding: utf-8 -*-
#----------------------------------------
# name: predict
# purpose: ランダムフォレストを用いて、雲海出現を予測する。学習成果の検証用スクリプト。
# author: Katsuhiro MORISHITA, 森下功啓
# created: 2015-08-08
# lisence: MIT
#----------------------------------------
import pandas
import pickle
from sklearn.ensemble import RandomForestRegressor
import datetime
import feature
def predict(clf, date_list, feature_generation_func, raw_data, save=False):
""" 引数で渡された日付の特徴量を作成して、渡された学習済みの学習器に入力して識別結果を返す
"""
results = {}
for _date in date_list:
#print(_date)
_feature = feature_generation_func(_date, raw_data)
#print(_feature)
if _feature != None:
if not None in _feature: # Noneを渡すとエラーが帰るので対策
test = clf.predict(_feature)
results[_date] = test[0]
print(_date, test)
else: # 推定ができなくても、ファイルに書き出すことで正解との比較がやりやすい
print("--feature has None!--")
print(_feature)
results[_date] = None
else:
print("--feature is None!--") # 殆ど無いんだが、一応対応
results[_date] = None
# 予測結果を保存
if save:
dates = sorted(results.keys())
with open("result_temp.csv", "w") as fw:
for date in dates:
predict_result = results[date]
for _ in range(1): # 複数行出力できるようにしている
fw.write(str(date))
fw.write(",")
fw.write(str(predict_result))
fw.write("\n")
return results
def predict2(clf, date_list, features_dict, save=False):
""" 引数で渡された日付の特徴量を作成して、渡された学習済みの学習器に入力して識別結果を返す
"""
results = {}
for _date in date_list:
#print(_date)
date, _feature, label = features_dict[_date]
#print(_feature)
if _feature != None:
if not None in _feature: # Noneを渡すとエラーが帰るので対策
test = clf.predict(_feature)
results[_date] = test[0]
print(_date, test)
else: # 推定ができなくても、ファイルに書き出すことで正解との比較がやりやすい
print("--feature has None!--")
print(_feature)
results[_date] = None
else:
print("--feature is None!--") # 殆ど無いんだが、一応対応
results[_date] = None
# 予測結果を保存
if save:
dates = sorted(results.keys())
with open("result_temp.csv", "w") as fw:
for date in dates:
predict_result = results[date]
for _ in range(1): # 複数行出力できるようにしている
fw.write(str(date))
fw.write(",")
fw.write(str(predict_result))
fw.write("\n")
return results
def date_range(date_start, date_end):
""" 指定された範囲の日付のリストを作成する
"""
ans = []
_date = date_start
while _date <= date_end:
ans.append(_date)
_date += datetime.timedelta(days=1)
return ans
def main():
# 機械学習オブジェクトを生成
clf = RandomForestRegressor()
with open('entry_temp.pickle', 'rb') as f:# 学習成果を読み出す
clf = pickle.load(f) # オブジェクト復元
# 気象データの読み込み
raw_data = feature.read_raw_data()
predict(\
clf, \
date_range(datetime.datetime(2015, 6, 23), datetime.datetime(2015, 10, 24)), \
feature.create_feature23, \
raw_data, \
True)
if __name__ == '__main__':
main() |
983,060 | cce367bd4381536e81492e80c701157f09ece366 | #!/usr/bin/env python
import cgi
import MySQLdb
import cgitb
cgitb.enable()
form = cgi.FieldStorage()
lister = ['a','b','c']
html_list = ''
for value in lister:
html_list += '<option value={0}>{0}</option>'.format(value)
html = """Content-type: text/html\n
<html>
<style>
div.blueTable {
overflow: scroll;
text-align: left;
}
div.blueTable td, table.blueTable th {
padding: 3px 2px;
}
div.blueTable tbody td {
font-size: 9px;
}
div.blueTable tr:nth-child(even) {
background: #D0E4F5;
}
div.blueTable thead {
background: #1C6EA4;
background: -moz-linear-gradient(top, #5592bb 0%, #327cad 66%, #1C6EA4 100%);
background: -webkit-linear-gradient(top, #5592bb 0%, #327cad 66%, #1C6EA4 100%);
background: linear-gradient(to bottom, #5592bb 0%, #327cad 66%, #1C6EA4 100%);
}
div.blueTable thead th {
font-size: 11px;
font-weight: bold;
color: #FFFFFF;
}
div.blueTable thead th:first-child {
border-left: none;
}
div.blueTable tfoot {
font-size: 14px;
font-weight: bold;
color: #FFFFFF;
background: #D0E4F5;
background: -moz-linear-gradient(top, #dcebf7 0%, #d4e6f6 66%, #D0E4F5 100%);
background: -webkit-linear-gradient(top, #dcebf7 0%, #d4e6f6 66%, #D0E4F5 100%);
background: linear-gradient(to bottom, #dcebf7 0%, #d4e6f6 66%, #D0E4F5 100%);
}
div.blueTable tfoot td {
font-size: 14px;
}
div.blueTable tfoot .links {
text-align: right;
}
div.blueTable tfoot .links a{
display: inline-block;
background: #1C6EA4;
color: #FFFFFF;
padding: 2px 8px;
}
/* NOTE: The styles were added inline because Prefixfree needs access to your styles and they must be inlined if they are on local disk! */
.btn { display: inline-block; *display: inline; *zoom: 1; padding: 4px 10px 4px; margin-bottom: 0; font-size: 13px; line-height: 18px; color: #333333; text-align: center;text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75); vertical-align: middle; background-color: #f5f5f5; background-image: -moz-linear-gradient(top, #ffffff, #e6e6e6); background-image: -ms-linear-gradient(top, #ffffff, #e6e6e6); background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#e6e6e6)); background-image: -webkit-linear-gradient(top, #ffffff, #e6e6e6); background-image: -o-linear-gradient(top, #ffffff, #e6e6e6); background-image: linear-gradient(top, #ffffff, #e6e6e6); background-repeat: repeat-x; filter: progid:dximagetransform.microsoft.gradient(startColorstr=#ffffff, endColorstr=#e6e6e6, GradientType=0); border-color: #e6e6e6 #e6e6e6 #e6e6e6; border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); border: 1px solid #e6e6e6; -webkit-border-radius: 4px; -moz-border-radius: 4px; border-radius: 4px; -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); cursor: pointer; *margin-left: .3em; }
.btn:hover, .btn:active, .btn.active, .btn.disabled, .btn[disabled] { background-color: #e6e6e6; }
.btn-large { padding: 9px 14px; font-size: 15px; line-height: normal; -webkit-border-radius: 5px; -moz-border-radius: 5px; border-radius: 5px; }
.btn:hover { color: #333333; text-decoration: none; background-color: #e6e6e6; background-position: 0 -15px; -webkit-transition: background-position 0.1s linear; -moz-transition: background-position 0.1s linear; -ms-transition: background-position 0.1s linear; -o-transition: background-position 0.1s linear; transition: background-position 0.1s linear; }
.btn-primary, .btn-primary:hover { text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); color: #ffffff; }
.btn-primary.active { color: rgba(255, 255, 255, 0.75); }
.btn-primary { background-color: #4a77d4; background-image: -moz-linear-gradient(top, #6eb6de, #4a77d4); background-image: -ms-linear-gradient(top, #6eb6de, #4a77d4); background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#6eb6de), to(#4a77d4)); background-image: -webkit-linear-gradient(top, #6eb6de, #4a77d4); background-image: -o-linear-gradient(top, #6eb6de, #4a77d4); background-image: linear-gradient(top, #6eb6de, #4a77d4); background-repeat: repeat-x; filter: progid:dximagetransform.microsoft.gradient(startColorstr=#6eb6de, endColorstr=#4a77d4, GradientType=0); border: 1px solid #3762bc; text-shadow: 1px 1px 1px rgba(0,0,0,0.4); box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.5); }
.btn-primary:hover, .btn-primary:active, .btn-primary.active, .btn-primary.disabled, .btn-primary[disabled] { filter: none; background-color: #6a77d4; }
.btn-block { width: 10%; display:block; }
* { -webkit-box-sizing:border-box; -moz-box-sizing:border-box; -ms-box-sizing:border-box; -o-box-sizing:border-box; box-sizing:border-box; }
html { width: 100%; height:100%; }
body {
font-family: 'Open Sans', sans-serif;
background: -webkit-radial-gradient(0% 100%, ellipse cover, rgba(104,128,138,.4) 10%,rgba(138,114,76,0) 40%), linear-gradient(to bottom, rgba(57,173,219,.25) 0%,rgba(42,60,87,.4) 100%), linear-gradient(135deg, #670d10 0%,#092756 100%);
filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#3E1D6D', endColorstr='#092756',GradientType=1 );
}
.title {
top: 20%;
}
.title { color: #fff; text-shadow: 0 0 10px rgba(0,0,0,0.3); letter-spacing:1px; text-align:center; }
input {
margin-bottom: 10px;
background: rgba(0,0,0,0.3);
border: none;
outline: none;
padding: 10px;
font-size: 13px;
color: #fff;
text-shadow: 1px 1px 1px rgba(0,0,0,0.3);
border: 1px solid rgba(0,0,0,0.3);
border-radius: 4px;
box-shadow: inset 0 -5px 45px rgba(100,100,100,0.2), 0 1px 1px rgba(255,255,255,0.2);
-webkit-transition: box-shadow .5s ease;
-moz-transition: box-shadow .5s ease;
-o-transition: box-shadow .5s ease;
-ms-transition: box-shadow .5s ease;
transition: box-shadow .5s ease;
}
input:focus { box-shadow: inset 0 -5px 45px rgba(100,100,100,0.4), 0 1px 1px rgba(255,255,255,0.2); }
</style>
<script>
function edit_row(no)
{
document.getElementById("edit_button"+no).style.display="none";
document.getElementById("save_button"+no).style.display="block";
var name=document.getElementById("name_row"+no);
var country=document.getElementById("country_row"+no);
var age=document.getElementById("age_row"+no);
var name_data=name.innerHTML;
var country_data=country.innerHTML;
var age_data=age.innerHTML;
name.innerHTML="<input type='text' id='name_text"+no+"' value='"+name_data+"'>";
country.innerHTML="<input type='text' id='country_text"+no+"' value='"+country_data+"'>";
age.innerHTML="<input type='text' id='age_text"+no+"' value='"+age_data+"'>";
}
function save_row(no)
{
window.alert(no);
var name_val=document.getElementById("name_text"+no).value;
var country_val=document.getElementById("country_text"+no).value;
var age_val=document.getElementById("age_text"+no).value;
document.getElementById("name_row"+no).innerHTML=name_val;
document.getElementById("country_row"+no).innerHTML=country_val;
document.getElementById("age_row"+no).innerHTML=age_val;
document.getElementById("edit_button"+no).style.display="block";
document.getElementById("save_button"+no).style.display="none";
}
function delete_row(no)
{
document.getElementById("row"+no+"").outerHTML="";
}
function add_row()
{
var new_name=document.getElementById("new_name").value;
var new_country=document.getElementById("new_country").value;
var new_age=document.getElementById("new_age").value;
var table=document.getElementById("data_table");
var table_len=(table.rows.length)-1;
var row = table.insertRow(table_len).outerHTML="<tr id='row"+table_len+"'><td id='name_row"+table_len+"'>"+new_name+"</td><td id='country_row"+table_len+"'>"+new_country+"</td><td id='age_row"+table_len+"'>"+new_age+"</td><td><input type='button' id='edit_button"+table_len+"' value='Edit' class='edit' onclick='edit_row("+table_len+")'> <input type='button' id='save_button"+table_len+"' value='Save' class='save' onclick='save_row("+table_len+")'> <input type='button' value='Delete' class='delete' onclick='delete_row("+table_len+")'></td></tr>";
document.getElementById("new_name").value="";
document.getElementById("new_country").value="";
document.getElementById("new_age").value="";
}
</script>
<head>
<script type="text/javascript" src="table_script.js"></script>
</head>
<body>
<div id="wrapper" class ="blueTable">
<table align='center' cellspacing=2 cellpadding=5 id="data_table" border=1>
<tr>
<th>Field</th>
<th>Type</th>
<th>Value</th>
</tr>
<tr>
<td><select>
{OPTIONS}
</select></td>
<td><input type="text" id="new_country"></td>
<td><input type="text" id="new_age"></td>
<td><input type="button" class="add" onclick="add_row();" value="Add Row"></td>
</tr>
</table>
</div>
<iframe src="http://localhost/reshma/g4/ff.html" seamless>
iframe[seamless] {
border: none;}
</iframe>
</body>
</html>
"""
print(html)
|
983,061 | 985e75a01e3e32b221787f24c8a064d732d03b6e | """
Test Gradient of SVGVideoMaker
"""
# region Imports
from SVGVideoMaker import Circle, Rectangle, Point2D, SVG, save
# endregion Imports
def main():
# Global values
width, height = 500, 500
svg = SVG(width=width, height=height)
svg.set_view_box(Point2D(0, 0), Point2D(width, height))
rect1 = Rectangle(Point2D(5, 5), 225, 225)
id_g1 = "Gradient1_ID"
svg.add_gradient(id_g1, offsets=[0, 25, 50, 100], colors=["red", "blue", "green", "purple"], opacities=[1, 1, 1, 1])
rect1.set_style(fill_color=f"url(#{id_g1})", stroke_width=0)
rect2 = Rectangle(Point2D(262, 262), 225, 225)
rect2.set_style(fill_color=f"red", stroke_color="black", stroke_width=10)
circle1 = Circle(Point2D(375, 137), 112)
id_g3 = "Gradient3_ID"
svg.add_gradient(id_g3, [0, 50, 100], ["red", "green", "blue"], [0, 1, 0],
orientation_start=(0, 0), orientation_end=(0, 1))
circle1.set_style(fill_color=f"url(#{id_g3})", stroke_width=0)
circle2 = Circle(Point2D(137, 375), 112)
id_g4 = "Gradient4_ID"
svg.add_gradient(id_g4, [0, 75, 100], ["red", "#1A2B3C", "rgb(0, 255, 200)"], [0.5, 1, 1], (0, 0), (1, 1))
circle2.set_style(fill_color=f"url(#{id_g4})", stroke_width=0)
svg.append(rect1, rect2, circle1, circle2)
save(svg.get_svg(), path="./color", ext="png")
if __name__ == '__main__':
main()
|
983,062 | c67d58a267d206127dbd15b37a5bbd93f6c0af38 | import driveDataset
from keras.models import Sequential
from keras.layers import Conv2D, Reshape, AveragePooling2D, UpSampling2D
from keras.callbacks import EarlyStopping, TensorBoard
from keras.optimizers import Adam
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
X_train, Y_train = driveDataset.loadImages(mode='training')
X_test, Y_test = driveDataset.loadImages(mode='test')
print(X_train.shape, '->', Y_train.shape)
print(X_test.shape, '->', Y_test.shape)
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=7, padding='same', activation='tanh', input_shape=(584, 565, 3)))
model.add(Conv2D(filters=32, kernel_size=5, padding='same', activation='tanh'))
model.add(Conv2D(filters=1, kernel_size=3, dilation_rate=2, padding='same', activation='sigmoid'))
model.add(Reshape((584, 565)))
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.001))
print(model.summary())
stop = EarlyStopping(monitor='loss', patience=3, min_delta=0.0005)
log = TensorBoard()
model.fit(x=X_train, y=Y_train, batch_size=10, shuffle=True, epochs=300, callbacks=[stop, log])
model.save('eye_vessel.h5')
print('Predicting Test...')
Y_pred = model.predict(X_test)
print('Saving Images...')
driveDataset.saveImages(Y_test, Y_pred)
print('Calculating Loss...')
score = model.evaluate(x=X_test, y=Y_test)
print("Loss: %.2f" % score)
|
983,063 | 67d23d8ee567449cf97f08137349a78b88d7b2c5 | # Adapted from Brett Terpstra script : http://brettterpstra.com/2013/04/28/instantly-grab-a-high-res-icon-for-any-ios-app/
# Fetches the 1024px version of an OS X app icon. The result is displayed in Pythonista's console, you can tap and hold to save or copy it.
# If you find any bug, you can find me @silouane20 on Twitter.
from PIL import Image
from StringIO import StringIO
import re
import requests
def find_icon(terms):
search_url = 'http://itunes.apple.com/search?term='+ terms +'&entity=macSoftware'
res = requests.get(search_url)
m = re.search('artworkUrl512":"(.+?)", ', res.text)
if m:
found = m.group(1)
return found
def main():
terms = raw_input("Input app name: ")
icon_url = find_icon(terms)
if icon_url:
file = requests.get(icon_url)
image = Image.open(StringIO(file.content))
image.show()
else:
print "Failed to get iTunes url"
if __name__ == "__main__":
main()
|
983,064 | b2781c546b0965a25dd7051721490f664009c739 | from __future__ import unicode_literals
from datetime import datetime
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
from accounts.slugify import unique_slugify
def upload_image(instance, image):
"""
Stores the attachment in a "per gallery/module-class/yyyy/mm/dd" folder.
:param instance, filename
:returns ex: gallery/Image/2016/03/30/filename
"""
today = datetime.today()
return 'gallery/{model}/{year}/{month}/{day}/{image}'.format(
model=instance._meta.model_name,
year=today.year, month=today.month,
day=today.day, image=image,
)
class Article(TimeStampedModel):
"""
Article will have the title, body, posted_on date and
author. From title, body we will know what type of article.
From author we will who worte the article and which date.
is_published: True when Article is live. When False it will be visible only
for the author. Author can view how Article looks like and make approval
for publishing
"""
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
body = models.TextField()
posted_on = models.DateTimeField()
is_published = models.BooleanField(default=False)
image = models.ImageField(
_("Upload Article Picture"), upload_to=upload_image,
null=True, blank=True)
optinal_image = models.ImageField(
_("Upload Article Picture"), upload_to=upload_image,
null=True, blank=True)
author = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def save(self, *args, **kwargs):
if self.id is None:
unique_slugify(self, self.title)
super(Article, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('article-detail', args=[self.slug])
class Category(models.Model):
"""
Category: classification of Article in an area of expertise
Many Category will have one article and one category can have
many articles.
"""
name = models.CharField(max_length=50)
article = models.ForeignKey('Article', blank=True, null=True)
class Meta:
verbose_name_plural = 'categories'
def __unicode__(self):
return self.name
|
983,065 | 4d03b21e2f2406aef7b389d4481d200f01750d4e | #实例二:亚马逊
import requests
url = input("请输入一个URL:")
"""e.g.
https://www.amazon.cn/gp/product/B073LBRNV2?ref_=plp_web_a_A2XQOEEUXFBHEM_pc_2&me=A1AJ19PSB66TGU
"""
print()
try:
kv = {'user-agent': 'Mozilla/5.0'}
r = requests.get(url, headers=kv)
print("headers: ", r.request.headers)
r.raise_for_status()
print("以下为详细信息:")
print("原r.encoding: ",r.encoding)
print("原r.apparent_encoding: ",r.apparent_encoding)
r.encoding = r.apparent_encoding
# print(r.text[1000: 2000])
except:
print("爬取失败")
|
983,066 | 48f97a3faacfa6f37bc2d5bb618f139d71390118 | # coding=utf-8
from selenium import webdriver
import time
driver = webdriver.Chrome(r"d:\tools\webdrivers\chromedriver.exe")
driver.get('file:///C:/Users/Administrator/Dropbox/python_autotest/autoUI_selenium/lesson07/ac.html')
# ---------------------------------------
from selenium.webdriver.common.action_chains import ActionChains
ac = ActionChains(driver)
t1 = driver.find_element_by_id('t1')
t2 = driver.find_element_by_id('t2')
t3 = driver.find_element_by_id('t3')
ac1=ac.click(t1).send_keys('松勤教育')
ac2=ac1.send_keys('1').click(t2).send_keys('2').click(t3).send_keys('3').perform()
# ---------------------------------------
input('..')
driver.quit()
|
983,067 | f135ad1980d58d37669bc86297199e259fada057 | import os
from os import listdir
from os.path import isfile, join
import random
from shutil import copyfile, rmtree
class DataFolder:
def __init__(self, destination, remove_datafolder = False):
self.directory = destination
self.folders = ['train','val','test']
self.styles = ['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock']
if remove_datafolder:
if os.path.exists(self.directory):
rmtree(self.directory)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
for folder in self.folders:
if not os.path.exists(self.directory + '/' + folder):
os.makedirs(self.directory + '/' + folder)
for style in self.styles:
if not os.path.exists(self.directory + '/' + folder + '/' + style):
os.makedirs(self.directory + '/' + folder + '/' + style)
pass
def generate(self, spectrogram_directory, train_pourcent=0.6, val_pourcent=0.1):
for style in self.styles:
path = spectrogram_directory + '/' + style
train_dest = self.directory + '/train/' + style
val_dest = self.directory + '/val/' + style
test_dest = self.directory + '/test/' + style
files = [f for f in listdir(path) if isfile(join(path, f))]
size = len(files)
random.shuffle(files)
train = files[: int(size * train_pourcent)]
val = files[int(size * train_pourcent):int(size * (train_pourcent+val_pourcent))]
test = files[int(size * (train_pourcent+val_pourcent)):]
for file in train:
copyfile(path + "/" + file, train_dest + "/" + file)
for file in val:
copyfile(path + "/" + file, val_dest + "/" + file)
for file in test:
copyfile(path + "/" + file, test_dest + "/" + file) |
983,068 | 05633a36437fbe2a4a82e7ccc926e12bce6c7dc2 | # %%
import numpy as np
from math import pi, sin, cos
import matplotlib.pyplot as plt
from matplotlib import cm
from sympy import Matrix
import json
import os
from Scenarios import Scenario
from Scenarios import Indices
from plotOnline import transformedRectangle
mypath = 'C:\\Users\\Lisnol\\National University of Singapore\\Ma Jun - Research-XX\\SCP\\'
# mypath = 'D:\\SoftWare\\DropBox\\Dropbox\\[5]SCP\\paper\\'
# %%
# load data to plot figure 1-4
num_vehicles = 8
scenario_choice = 'Circle'
controllerName = 'SCP' # 'MIQP', 'SCP'
angles = [2*pi/num_vehicles*(i+1) for i in range(num_vehicles)]
idx = Indices()
scenario = Scenario()
if scenario_choice == 'Circle':
scenario.get_circle_scenario(angles)
elif scenario_choice == 'Frog':
scenario.get_frog_scenario()
elif scenario_choice == 'Parallel':
num_vehicles = 11
scenario.get_parallel_scenario(num_vehicles)
scenario.dsafeExtra = 0.9
scenario.complete_scenario()
Hp = scenario.Hp
Nsim = scenario.Nsim
dt = scenario.dt
nVeh = scenario.nVeh
nObst = scenario.nObst
nx = scenario.model.nx
nu = scenario.model.nu
steps = 10
with open('Data\\'+scenario_choice+'_num_'+str(scenario.nVeh)+'_control_'+controllerName+'.json', 'r') as f:
result = json.load(f)
vehiclePathFullRes = np.reshape(result['vehiclePathFullRes'],(nx, nVeh, scenario.ticks_total+1),order='F') # (nx, nVeh, ticks_total+1)
obstaclePathFullRes = np.reshape(result['obstaclePathFullRes'], (nObst, 2, scenario.ticks_total+1) , order='F') # (nObst, 2, ticks_total+1)
controlPathFullRes = np.reshape(result['controlPathFullRes'], (nVeh, scenario.ticks_total+1), order='F') # (nVeh, ticks_total+1)
controlPrediction = np.reshape(result['controlPredictions'], (Hp, nVeh, Nsim), order='F') # (Hp, nVeh, Nsim)
trajectoryPredictions = np.reshape(result['trajectoryPredictions'], (Hp, scenario.model.ny, nVeh, Nsim), order='F') # (Hp, ny, nVeh, Nsim)
initial_pos = np.reshape(result['initial_pos'], (1, 2, nVeh, Nsim), order='F') # (1, 2, nVeh, Nsim)
MPC_delay_compensation_trajectory = np.reshape(result['MPC_delay_compensation_trajectory'], (steps, nx, nVeh, Nsim), order='F') # (steps, nx, nVeh, Nsim)
evaluations_obj_value = np.reshape(result['evaluations_obj_value'], (Nsim,1), order='F') # Nsim
controllerRuntime = np.reshape(result['controllerRuntime'], (Nsim,1), order='F') # (Nsim, 1)
stepTime = np.reshape(result['stepTime'], (Nsim,1), order='F') # (Nsim, 1)
ReferenceTrajectory = np.reshape(result['ReferenceTrajectory'], (Hp, 2, nVeh, Nsim), order='F') # (Hp, 2, nVeh, Nsim)
trajectoryPrediction_with_x0 = np.zeros((Hp+1, scenario.model.ny, nVeh, Nsim))
for step_idx in range(Nsim):
trajectoryPrediction_with_x0[:,:,:,step_idx] = np.vstack([initial_pos[:,:,:,step_idx],trajectoryPredictions[:,:,:,step_idx] ])
## Colors
colorVehmap = cm.get_cmap('rainbow', nVeh)
colorVeh = colorVehmap(range(nVeh))
# %%
"""
############################################################################################################
############################ Plot One scenario One controller name (No Compare)############################
############################################################################################################
"""
# %%
"""
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Plot trajectories
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""
import matplotlib
matplotlib.rcParams["font.family"] = "Times New Roman"
matplotlib.rcParams['font.size'] = 18
nrows, ncols = 1, 1
figsize = (8,8)
for step_idx in range(Nsim):
fig1, ax1 = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
ax1.set_aspect('equal', adjustable='box')
tick_now = step_idx*scenario.ticks_per_sim
vehiclePositions = vehiclePathFullRes[:,:,tick_now]
obstaclePositions = obstaclePathFullRes[:,:,tick_now]
for v in range(nVeh):
# Sampled trajectory points
ax1.scatter( ReferenceTrajectory[:,idx.x,v,step_idx], ReferenceTrajectory[:,idx.y,v,step_idx], marker='o', s=9, color=colorVeh[v,:])
# predicted trajectory
ax1.plot( trajectoryPrediction_with_x0[:,idx.x,v,step_idx],trajectoryPrediction_with_x0[:,idx.y,v,step_idx], color=colorVeh[v,:] )
# vehicle trajectory delay prediction
ax1.plot( MPC_delay_compensation_trajectory[:,idx.x,v,step_idx], MPC_delay_compensation_trajectory[:,idx.y,v,step_idx], color=colorVeh[v,:], linewidth=2 )
# Vehicle rectangles
x = vehiclePositions[:,v]
vehiclePolygon = transformedRectangle(x[idx.x],x[idx.y],x[idx.heading], scenario.Length[v],scenario.Width[v])
ax1.fill(vehiclePolygon[0,:], vehiclePolygon[1,:], fc=colorVeh[v,:], ec='k')
# Obstacle rectangles
if nObst:
for i in range(nObst):
obstaclePolygon = transformedRectangle( obstaclePositions[i,idx.x], obstaclePositions[i,idx.y],
scenario.obstacles[i,idx.heading], scenario.obstacles[i,idx.length], scenario.obstacles[i,idx.width])
ax1.fill(obstaclePolygon[0,:],obstaclePolygon[1,:], color='gray')
ax1.set_ylabel(r'$y$ [m]')
ax1.set_xlabel(r'$x$ [m]')
if scenario_choice == 'Parallel':
ax1.set_xlim(-40,40)
ax1.set_ylim(-25,25)
plt.tight_layout()
plt.savefig('figs\\'+str(step_idx)+'.png')
# %%
|
983,069 | 2403b7b14ee98e0d3f8024185f4bc81a45dfed78 | # Generated by Django 3.0.1 on 2020-03-17 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productCard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='productcard',
name='cost',
field=models.CharField(max_length=20, verbose_name='Цена'),
),
]
|
983,070 | 299b116808252fcb7bf2d1411859cb8d16fed58d | #!/usr/bin/python
#-*-coding:utf-8-*-
import urllib
import urllib2
import sys
url1 = raw_input("url: ")
up1 = urllib2.urlopen(url1)
s1=up1.read()
#print s
h1 = "<a href="
c1 = ".html"
posh1 = -len(h1)
posc1 = -len(c1)
j = 0
while j < s1.count(h1):
posh = s1.find(h1,posc1 +len(h1))
posc = s1.find(c1,posh1 +len(h1))
t1 = s1[posh1 : posc1 + len(c1)]
print t1
http1 = t1.find("/tupianqu")
print http1
#if len(t[http:]) == len(temp):
url = "http://1122ap" + t1[http1:]
print url
up = urllib2.urlopen(url)
s=up.read()
#print s
h = "<img src="
c = ".jpg"
temp = ""
posh = -len(h)
posc = -len(c)
i = 0
while i < s.count(h):
posh = s.find(h,posc +len(h))
posc = s.find(c,posh +len(h))
t = s[posh : posc + len(c)]
http = t.find("http")
#print http
#if len(t[http:]) == len(temp):
url = t[http:]
print url
try:
urllib.urlretrieve(url,str(i) + 'jpg')
print "picture from %s ;download sucessful" % url
except:
print "Unexpected error:", sys.exc_info()[0]
i += 1
j += 1
|
983,071 | 451a979902f82c0a8589be44e7f25942e0397285 | #Codeforces Problema 1097 A
#implementacion *600
#https://codeforces.com/problemset/problem/1097/A
#A. Gennady and a Card Game
cMesa=input()
c1,c2,c3,c4,c5 = list(map(str,input().split()))
if c1[1]==cMesa[1] or c1[0]==cMesa[0]:
resul="YES"
elif c2[1]==cMesa[1] or c2[0]==cMesa[0]:
resul="YES"
elif c3[1]==cMesa[1] or c3[0]==cMesa[0]:
resul="YES"
elif c4[1]==cMesa[1] or c4[0]==cMesa[0]:
resul="YES"
elif c5[1]==cMesa[1] or c5[0]==cMesa[0]:
resul="YES"
else:
resul="NO"
print(resul)
|
983,072 | e427dfc60d9980b7a5046cd505c29dfb9c02f052 | import dash
from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_table as dt
from libs.pubsub import get_ps_2
import dash_core_components as dcc
rconfig = get_ps_2()
app = dash.Dash(__name__, prevent_initial_callbacks=True,
assets_folder="assets", title='systrade config')
def getconfig():
configs = rconfig.hgetall('configs')
return [dict(configname=k, value=configs[k]) for k in configs]
def addConfig(key, value):
rconfig.hset('configs', {key: value})
def delConfig(key):
rconfig.r.hdel('configs', key)
app.layout = html.Div([
html.Div(1, id='xconfigchanged', hidden=True),
html.Div(id='dummy2', hidden=True),
html.Div([
html.H1('Systrade configurations', className='max-w-sm'),
# html.Button('update', id='btn_updateConfig',
# className='p-4 shadow-md h-full'),
], className='flex justify-between items-center mt-8'),
html.Hr(),
html.Div(
[dcc.Input(id='new_configkey', placeholder='config key', className='border'),
dcc.Input(id='new_configvalue', placeholder='config value',
className='border'),
html.Button('Add', id='btn_add_config', className='border px-4 mx-2')]
),
html.Div(
dt.DataTable(
id='tbl_config',
columns=[dict(id='configname', name='configname', editable=False),
dict(id='value', name='value', editable=True)],
row_deletable=True
),
className='w-1/2 overflow-auto h-1/2 m-auto'
),
])
@app.callback(Output('new_configkey', 'value'),
Input('xconfigchanged', 'children'),
)
def clearkey(xstate):
return ''
@app.callback(Output('new_configvalue', 'value'),
Input('xconfigchanged', 'children')
)
def clearvalue(xstate):
return ''
@app.callback(Output('xconfigchanged', 'children'),
Input('btn_add_config', 'n_clicks'),
State('new_configkey', 'value'),
State('new_configvalue', 'value'),
State('xconfigchanged', 'children')
)
def addnewConfig(add_clicks, newkey, newvalue, xstate):
if(add_clicks):
if(newkey and newvalue):
addConfig(newkey, newvalue)
return add_clicks
@app.callback(
Output('tbl_config', 'data'),
Input('tbl_config', 'data_previous'),
Input('xconfigchanged', 'children'),
State('tbl_config', 'data'),
)
def updateConfig(previous, changed, data):
print('updating...')
if(previous):
deleted = [k['configname'] for k in previous if k['configname']
not in [x['configname'] for x in data]]
for k in deleted:
delConfig(k)
configs = {r['configname']: r['value'] for r in data}
rconfig.hset('configs', configs)
return getconfig()
if(__name__ == '__main__'):
app.run_server(debug=True, port=8050)
|
983,073 | 07911f82d1c894e0f377a50ea64b0d01fa00b9ac | # WARRIORS BATTLE GAME
''' Game Sample Output
Sam attacks Paul and deals 9 damage
Paul is down to 10 health
Paul attacks Sam and deals 7 damage
Sam is down to 7 health
Sam attacks Paul and deals 19 damage
Paul is down to -9 health
Paul has Died and Sam is Victorious
Game Over
'''
import random
import math
# Warrior & Battle Class
class Warrior:
# Warriors will have names, health, and attack and block maximums
def __init__(self, name='Warrior', health=0, maxAtck=0, maxBlck=0):
self.name = name
self.health = health
self.maxAtck = maxAtck
self.maxBlck = maxBlck
# They wll have capabilities to attack and block random amounts
def attack(self):
# Attack random random() 0.0 to 1.0 * maxAtck + .5
atckAmount = self.maxAtck * (random.random() + .5)
return atckAmount
def block(self):
# Block will use random()
blckAmount = self.maxBlck * (random.random() + .5)
return blckAmount
class Battle:
def fight(self, warrior1, warrior2):
# loop until 1 warrier is dead
# it is unknown how long warriors will fight
while True:
# w1 attacks w2
if self.getAtckRes(warrior1, warrior2) == 'Game over':
print('Game over')
break
# w2 attacks w1
if self.getAtckRes(warrior2, warrior1) == 'Game over':
print('Game over')
break
# this function does not require self, it is just fine with the warriors
@staticmethod
# battle.fight() loop is switching warriors then A, and B will be different
def getAtckRes(warriorA, warriorB):
# get warrior A attack
warriorA_atckAmount = warriorA.attack()
# get warrior B block
warriorB_blckAmount = warriorB.block()
# calculate damage dealt to warrior B
dmg2warriorB = math.ceil(warriorA_atckAmount - warriorB_blckAmount)
# update warrior B health
warriorB.health = warriorB.health - dmg2warriorB
# print action
print('{} attacks {} and deals {} damage'
.format(warriorA.name, warriorB.name, dmg2warriorB))
# print result
print('{} is down to {} health'
.format(warriorB.name, warriorB.health))
# check attacked warrior is dead or alive
if warriorB.health <= 0:
print('{} has Died and {} is Victorious'
.format(warriorB.name, warriorA.name))
return 'Game over'
else:
return 'Fight again'
def main():
# define warrior1 object
maximus = Warrior('Maximus', 50, 20, 10)
# define warrior2 object
galaxon = Warrior('Galaxon', 50, 20, 10)
# create the battle object
battle = Battle()
# start the battle by calling the fight() method
battle.fight(maximus, galaxon)
main()
|
983,074 | 7bec091aad1d7f70be0059a02541baecc982e324 | from .models import *
from django.contrib import admin
class DealersAdmin(admin.ModelAdmin):
list_display = ('dealer_number', 'dealer_name', 'dealer_phone_number', 'dealer_email', 'dealer_address', 'total_transaction', )
class PurchaseAdmin(admin.ModelAdmin):
list_display = ('invoice_by', 'invoice_date_and_time', )
admin.site.register(DealersDetails, DealersAdmin)
admin.site.register(PurchaseDetails, PurchaseAdmin)
admin.site.register(SalesDetails)
admin.site.register(ItemDetails)
|
983,075 | 3a42d2f02a2a33a2fb1bdb4195459faa59c57ad7 | # coding=utf-8
from sklearn.neural_network import MLPClassifier
from sklearn import datasets
import matplotlib.pyplot as plt
class Hyun_MLPerceptron_Charcter:
digits = []
X_train, y_train = [], []
def Load_Data(self):
digits=Hyun_MLPerceptron_Charcter.digits = datasets.load_digits()
Hyun_MLPerceptron_Charcter.X_train, Hyun_MLPerceptron_Charcter.y_train = digits.data[:-10], digits.target[:-10]
Hyun_MLPerceptron_Charcter.X_train, Hyun_MLPerceptron_Charcter.y_train = digits.data[:-10], digits.target[:-10]
def Get_TrainData(self):
return Hyun_MLPerceptron_Charcter.X_train,Hyun_MLPerceptron_Charcter.y_train
def Train_Data(self):
mlp = MLPClassifier(hidden_layer_sizes=(10, 10, 10, 10, 10), max_iter=1000, alpha=1e-4,
# mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=1000, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.001)
X_train , y_train = Hyun_MLPerceptron_Charcter.Get_TrainData(0)
mlp.fit(X_train, y_train)
return mlp
def Get_digits(self):
return Hyun_MLPerceptron_Charcter.digits
def Get_digits_index(self):
return 9
def Get_TestData(self):
digits=Hyun_MLPerceptron_Charcter.Get_digits(0)
digits_index = Hyun_MLPerceptron_Charcter.Get_digits_index(0)
x_test = digits.data[digits_index].reshape(1, -1)
return x_test
def Draw_Grap(self):
Hyun_MLPerceptron_Charcter.Load_Data(0)
x_test=Hyun_MLPerceptron_Charcter.Get_TestData(0)
digits=Hyun_MLPerceptron_Charcter.Get_digits(0)
digits_index=Hyun_MLPerceptron_Charcter.Get_digits_index(0)
mlp = Hyun_MLPerceptron_Charcter.Train_Data(0)
print(mlp.predict(x_test))
# print('Test accuracy:', mlp.score(X_test, y_test))
plt.imshow(digits.images[digits_index], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
hyun =Hyun_MLPerceptron_Charcter
hyun.Draw_Grap(0)
|
983,076 | ecd52bd8eaa6f6a87353537a4d455225812f70a3 | # Generated by Django 3.1.7 on 2021-07-29 09:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0044_notification'),
]
operations = [
migrations.AlterModelOptions(
name='notification',
options={'ordering': ('-sent_time',)},
),
migrations.AlterModelOptions(
name='wagesheet',
options={'ordering': ('supervisor_user__first_name', 'date')},
),
migrations.RenameField(
model_name='deduction',
old_name='amount',
new_name='payment',
),
]
|
983,077 | 8da14a6c11b7f0e37865dab52559aab638d9f6a3 | import time
import pandas as pd
import numpy as np
# PLEASE USE THE GIVEN FUNCTION NAME, DO NOT CHANGE IT
def read_csv(filepath):
'''
TODO : This function needs to be completed.
Read the events.csv and mortality_events.csv files.
Variables returned from this function are passed as input to the metric functions.
'''
events = pd.read_csv(filepath + 'events.csv')
mortality = pd.read_csv(filepath + 'mortality_events.csv')
return events, mortality
def event_count_metrics(events, mortality):
'''
TODO : Implement this function to return the event count metrics.
Event count is defined as the number of events recorded for a given patient.
'''
#events.columns
event_count = events.groupby(['patient_id'])['event_id'].count()
merge1 = pd.merge(event_count,mortality,how='left',on='patient_id')
out = merge1.fillna({'label':0}).groupby(by=['label']).agg({'event_id':['mean','max','min']})
#remove multi index
out = out['event_id']
# event_count.shape, merge1.shape
# merge1.columns
# merge1['label'].value_counts(dropna=False)
# merge1.head(10)
# pd.__version__
# type(out)
avg_dead_event_count = out.loc[1.0,'mean']
max_dead_event_count = out.loc[1.0,'max']
min_dead_event_count = out.loc[1.0,'min']
avg_alive_event_count = out.loc[0.0,'mean']
max_alive_event_count = out.loc[0.0,'max']
min_alive_event_count = out.loc[0.0,'min']
return min_dead_event_count, max_dead_event_count, avg_dead_event_count, min_alive_event_count, max_alive_event_count, avg_alive_event_count
def encounter_count_metrics(events, mortality):
'''
TODO : Implement this function to return the encounter count metrics.
Encounter count is defined as the count of unique dates on which a given patient visited the ICU.
'''
encounter_count = events.groupby(['patient_id'])['timestamp'].nunique()
merge1 = pd.merge(encounter_count,mortality,how='left',on='patient_id')
out = merge1.fillna({'label':0}).groupby(by=['label']).agg({'timestamp_x':['mean','max','min']})
#remove multi index
out = out['timestamp_x']
avg_dead_encounter_count = out.loc[1.0,'mean']
max_dead_encounter_count = out.loc[1.0,'max']
min_dead_encounter_count = out.loc[1.0,'min']
avg_alive_encounter_count = out.loc[0.0,'mean']
max_alive_encounter_count = out.loc[0.0,'max']
min_alive_encounter_count = out.loc[0.0,'min']
return min_dead_encounter_count, max_dead_encounter_count, avg_dead_encounter_count, min_alive_encounter_count, max_alive_encounter_count, avg_alive_encounter_count
def record_length_metrics(events, mortality):
'''
TODO: Implement this function to return the record length metrics.
Record length is the duration between the first event and the last event for a given patient.
'''
events['timestamp'] = pd.to_datetime(events['timestamp'])
rec_len = events.groupby(['patient_id'])['timestamp'].agg(lambda x:x.max()-x.min())
rec_len = rec_len.dt.days
merge1 = pd.merge(rec_len ,mortality,how='left',on='patient_id')
out = merge1.fillna({'label':0}).groupby(by=['label']).agg({'timestamp_x':['mean','max','min']})
#remove multi index
out = out['timestamp_x']
avg_dead_rec_len = out.loc[1.0,'mean']
max_dead_rec_len = out.loc[1.0,'max']
min_dead_rec_len = out.loc[1.0,'min']
avg_alive_rec_len = out.loc[0.0,'mean']
max_alive_rec_len = out.loc[0.0,'max']
min_alive_rec_len = out.loc[0.0,'min']
return min_dead_rec_len, max_dead_rec_len, avg_dead_rec_len, min_alive_rec_len, max_alive_rec_len, avg_alive_rec_len
def main():
'''
DO NOT MODIFY THIS FUNCTION.
'''
# You may change the following path variable in coding but switch it back when submission.
train_path = '../data/train/'
# train_path = 'C:/Users/Xiaojun/Desktop/omscs/CSE6250/hw1/data/train/'
# DO NOT CHANGE ANYTHING BELOW THIS ----------------------------
events, mortality = read_csv(train_path)
#Compute the event count metrics
start_time = time.time()
event_count = event_count_metrics(events, mortality)
end_time = time.time()
print(("Time to compute event count metrics: " + str(end_time - start_time) + "s"))
print(event_count)
#Compute the encounter count metrics
start_time = time.time()
encounter_count = encounter_count_metrics(events, mortality)
end_time = time.time()
print(("Time to compute encounter count metrics: " + str(end_time - start_time) + "s"))
print(encounter_count)
#Compute record length metrics
start_time = time.time()
record_length = record_length_metrics(events, mortality)
end_time = time.time()
print(("Time to compute record length metrics: " + str(end_time - start_time) + "s"))
print(record_length)
if __name__ == "__main__":
main()
|
983,078 | 9f2d10ba2c330b8bd904fe54abb64ddb101508c5 | from random import randint
#DO NOT CHANGE THIS!!!
# =============================================================================
is_effective_dictionary = {'bug': {'dark', 'grass', 'psychic'},
'dark': {'ghost', 'psychic'},
'dragon': {'dragon'},
'electric': {'water', 'flying'},
'fairy': {'dark', 'dragon', 'fighting'},
'fighting': {'dark', 'ice', 'normal', 'rock', 'steel'},
'fire': {'bug', 'grass', 'ice', 'steel'},
'flying': {'bug', 'fighting', 'grass'},
'ghost': {'ghost', 'psychic'},
'grass': {'water', 'ground', 'rock'},
'ground': {'electric', 'fire', 'poison', 'rock', 'steel'},
'ice': {'dragon', 'flying', 'grass', 'ground'},
'normal': set(),
'poison': {'fairy', 'grass'},
'psychic': {'fighting', 'poison'},
'rock': {'bug', 'fire', 'flying', 'ice'},
'steel': {'fairy', 'ice', 'rock'},
'water': {'fire', 'ground', 'rock'}
}
not_effective_dictionary = {'bug': {'fairy', 'flying', 'fighting', 'fire', 'ghost','poison','steel'},
'dragon': {'steel'},
'dark': {'dark', 'fairy', 'fighting'},
'electric': {'dragon', 'electric', 'grass'},
'fairy': {'fire', 'poison', 'steel'},
'fighting': {'bug', 'fairy', 'flying', 'poison', 'psychic'},
'fire': {'dragon', 'fire', 'rock', 'water'},
'flying': {'electric', 'rock', 'steel'},
'ghost': {'dark'},
'grass': {'bug', 'dragon', 'grass', 'fire', 'flying', 'poison', 'steel'},
'ground': {'bug','grass'},
'ice': {'fire', 'ice', 'steel', 'water'},
'normal': {'rock', 'steel'},
'poison': {'ghost', 'ground', 'poison', 'rock'},
'psychic': {'psychic', 'steel'},
'rock': {'fighting', 'ground', 'steel'},
'steel': {'electric', 'fire', 'steel', 'water'},
'water': {'dragon','grass', 'ice'}
}
no_effect_dictionary = {'electric': {'ground'},
'dragon': {'fairy'},
'fighting': {'ghost'},
'ghost': {'normal', 'psychic'},
'ground': {'flying'},
'normal': {'ghost'},
'poison': {'steel'},
'psychic': {'dark'},
'bug': set(), 'dark': set(), 'fairy': set(),'fire': set(),
'flying': set(), 'grass': set(), 'ice': set(),
'rock': set(), 'steel': set(), 'water': set()
}
#Dictionaries that determine element advantages and disadvantages
# =============================================================================
class Move(object):
def __init__(self, name = "", element = "Normal", power = 20, accuracy = 80,
attack_type = 2):
self.name = name
self.element = element
self.power = power
self.accuracy = accuracy
self.attack_type = attack_type #attack_type is 1, 2 or 3
# 1 - status moves, 2 - physical attacks, 3 - special attacks
def __str__(self):
ret_str = str(self.name)
return ret_str
def __repr__(self):
return self.__str__()
def get_name(self):
return self.name
def get_element(self):
return self.element
def get_power(self):
return self.power
def get_accuracy(self):
return self.accuracy
def get_attack_type(self):
return self.attack_type
class Pokemon(object):
def __init__(self, name = "", element1 = "Normal", element2 = "", moves = None,
hp = 100, patt = 10, pdef = 10, satt = 10, sdef = 10):
self.name = name
self.element1 = element1
self.element2 = element2
self.hp = hp
self.patt = patt
self.pdef = pdef
self.satt = satt
self.sdef = sdef
self.moves = moves
try:
if len(moves > 4):
self.moves = moves[:4]
else:
self.moves = moves
except TypeError: #For Nonetype
self.moves = list()
def __str__(self):
ret_str = "{:<15s} {:<15d} {:<15d} {:<15d} {:<15d} {:<15d}\n".format(
self.name,self.hp,self.patt,self.pdef,self.satt,self.sdef)
ret_str += "{:<15s} {:<15s}\n".format(self.element1,self.element2)
if len(self.moves) == 4:
mv1 = str(self.moves[0])
mv2 = str(self.moves[1])
mv3 = str(self.moves[2])
mv4 = str(self.moves[3])
ret_str += "{:<15} {:<15} {:<15} {:<15}".format(mv1,mv2,mv3,mv4)
else:
for idx,move in enumerate(self.moves):
ret_str = ret_str + str(move)
if idx != len(self.moves)-1:
ret_str += 15 * ' '
return ret_str
def __repr__(self):
return self.__str__()
def get_name(self):
return self.name
def get_element1(self):
return self.element1
def get_element2(self):
return self.element2
def get_hp(self):
return self.hp
def get_patt(self):
return self.patt
def get_pdef(self):
return self.pdef
def get_satt(self):
return self.satt
def get_sdef(self):
return self.sdef
def get_moves(self):
return self.moves
def get_number_moves(self):
return len(self.moves)
def choose(self,index):
"""
Input:
self: reference to pokemon object that called this method
index: an index by which a move from the moves list is chosen
Output:
The corresponding move object or None
Algorithm:
Returns the move object corresponding to the index choosen
"""
try:
return self.moves[index]
except IndexError:
return None
def show_move_elements(self):
"""
Input:
self: reference to pokemon object that called this method
Output:
None
Algorithm:
Displays the elements of the pokemon's moves
"""
ret_str = ""
if len(self.moves) == 4:
mv1 = self.moves[0].get_element()
mv2 = self.moves[1].get_element()
mv3 = self.moves[2].get_element()
mv4 = self.moves[3].get_element()
ret_str += "{:<15} {:<15} {:<15} {:<15}".format(mv1,mv2,mv3,mv4)
else:
for idx,move in enumerate(self.moves):
ret_str = ret_str + move.get_element()
if idx != len(self.moves)-1:
ret_str += 15 * ' '
print(ret_str)
def show_move_power(self):
"""
Input:
self: reference to pokemon object that called this method
Output:
None
Algorithm:
Displays the power of the pokemon's moves
"""
ret_str = ""
if len(self.moves) == 4:
mv1 = self.moves[0].get_power()
mv2 = self.moves[1].get_power()
mv3 = self.moves[2].get_power()
mv4 = self.moves[3].get_power()
ret_str += "{:<15} {:<15} {:<15} {:<15}".format(mv1,mv2,mv3,mv4)
else:
for idx,move in enumerate(self.moves):
ret_str = ret_str + move.get_power()
if idx != len(self.moves)-1:
ret_str += 15 * ' '
print(ret_str)
def show_move_accuracy(self):
"""
Input:
self: reference to pokemon object that called this method
Output:
None
Algorithm:
Displays the accuracy of the pokemon's moves
"""
ret_str = ""
if len(self.moves) == 4:
mv1 = self.moves[0].get_accuracy()
mv2 = self.moves[1].get_accuracy()
mv3 = self.moves[2].get_accuracy()
mv4 = self.moves[3].get_accuracy()
ret_str += "{:<15} {:<15} {:<15} {:<15}".format(mv1,mv2,mv3,mv4)
else:
for idx,move in enumerate(self.moves):
ret_str = ret_str + move.get_accuracy()
if idx != len(self.moves)-1:
ret_str += 15 * ' '
print(ret_str)
def add_move(self, move):
"""
Input:
self: reference to pokemon object that called this method
move: a move object to be added to the pokemon's list of moves
Output:
None
Algorithm:
Adds a move object the list of pokemon's moves if the current number of moves
the pokemon has is less than four.
"""
if len(self.moves) < 4:
if type(move) == Move:
self.moves.append(move)
else:
print("Invalid type!")
else:
print("This pokemon already has 4 moves.")
def attack(self, move, opponent):
"""
Input:
self: reference to pokemon object that called this method
move: the move object that will be doing damage to the opponent
opponent: reference to the opposing pokemon
Output:
None
Algorithm:
Calculates how much damage the opponent will receive and subtracts that
from the opponent's hp
"""
damage = move.get_power()
#If physical attack
if move.get_attack_type() == '2':
A = self.patt
D = opponent.pdef
#If special attack
elif move.get_attack_type() == '3':
A = self.satt
D = opponent.sdef
else:
print("Invalid attack_type, turn skipped")
return
#Damage calculator
damage = damage * (A/D) * 20
damage = (damage / 50.0) + 2
#Accuracy
acc_val = randint(1,100)
if acc_val > move.get_accuracy():
print("Move missed!")
return #No need for further calculations
modifier = 1.0
se = 0
ne = 0
if opponent.get_element1() in is_effective_dictionary[move.get_element()]:
#print("It's super effective!!!!")
modifier = modifier * 2
se += 1
elif opponent.get_element1() in not_effective_dictionary[move.get_element()]:
#print("Not very effective...")
modifier = modifier * 0.5
ne += 1
elif opponent.get_element1() in no_effect_dictionary[move.get_element()]:
print("No effect!")
return #No need for further calculations
if opponent.get_element2() in is_effective_dictionary[move.get_element()]:
#print("It's super effective!!!!")
modifier = modifier * 2
se += 1
elif opponent.get_element2() in not_effective_dictionary[move.get_element()]:
#print("Not very effective...")
modifier = modifier * 0.5
ne += 1
elif opponent.get_element2() in no_effect_dictionary[move.get_element()]:
print("No effect!")
return #No need for further calculations
#Determine printing message
if se > ne:
print("It's super effective!!!!")
elif se < ne:
print("Not very effective...")
#Same-type attack bonus (STAB)
if move.get_element() == self.element1 or move.get_element() == self.element2:
modifier = modifier * 1.5
damage = int(damage * modifier)
opponent.subtract_hp(damage)
def subtract_hp(self,damage):
"""
Input:
self: reference to pokemon object that called this method
damage: the amount of hp this pokemon object is going to lose
Output:
None
Algorithm:
Subtracts the damage from this pokemon's hhp
"""
#Hp should never become negative
self.hp = max(self.hp - damage,0) |
983,079 | 0029ed02ec3c7a807358fb1f75d089b371732483 | #!/usr/bin/env python
# USAGE
# python real_time_object_detection.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel
# import the necessary packages
#from imutils.video import VideoStream
#from imutils.video.pivideostream import PiVideoStream
#import argparse
#import imutils
import time
#import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
#import numpy as np
import roslib
import sys
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import rospkg
import os
#ap = argparse.ArgumentParser()
#ap.add_argument("-p", "--picamera", type=int, default=-1,
# help="whether or not the Raspberry Pi camera should be used")
#args = vars(ap.parse_args())
#vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
#time.sleep(5.0)
#vs = PiVideoStream().start()
camera = PiCamera()
resolution=(320,240)
camera.resolution = resolution
camera.framerate = 20
rawCapture = PiRGBArray(camera, size=resolution)
#stream = camera.capture_continuous(rawCapture, format="bgr", use_video_port=True)
#rawCapture = np.empty((240*320*3,), dtype=np.uint8)
time.sleep(2.0)
def CVControl():
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
rospy.init_node("rear_view", anonymous = True)
image_pub = rospy.Publisher("rear_cv",Image, queue_size = 10)
rate = rospy.Rate(20)
bridge = CvBridge()
# loop over the frames from the video stream
while not rospy.is_shutdown():
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
#frame = vs.read()
#frame = imutils.resize(frame,width=400)
# grab the frame dimensions and convert it to a blob
#rawCapture = np.empty((240*320*3,), dtype=np.uint8)
frame = camera.capture(rawCapture, 'bgr', use_video_port=True)
image = rawCapture.array
#rawCapture = rawCapture.reshape((240, 320, 3))
image_pub.publish(bridge.cv2_to_imgmsg(image, "bgr8")) # update the FPS counter
# rawCapture.seek(0)
rawCapture.truncate(0)
# rospy.spin()
if __name__ == '__main__':
CVControl()
# stop the timer and display FPS information
# fps.stop()
# print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
# print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
#cv2.destroyAllWindows()
#vs.stop()
|
983,080 | fef7aefaef092567c6e4144acc0a9284e2be4a00 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 19:26:46 2020
@author: cheerag.verma
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 16:30:31 2020
@author: cheerag.verma
"""
""" time complexity - O(n2)"""
class Node:
def __init__(self,data):
self.data = data
self.next = None
def inputLinkedList():
inputList = [int(i) for i in input().split()]
head = None
tail = None
for ele in inputList:
if ele == -1:
break
newNode = Node(ele)
if head is None:
head = newNode
tail = newNode
else:
tail.next = newNode
tail = newNode
return head
def printLinkedList(head):
current = head
while current is not None:
print(str(current.data)+"->",end = " ")
current = current.next
print("None")
if __name__ == "__main__":
head = inputLinkedList()
printLinkedList(head) |
983,081 | 993dc5d13244790e04fd1c32f18994463b9eba9b | from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'bikeprediction/$', views.BikeAnalysisModel.as_view()),
url(r'pollutionprediction/$', views.PollutionAnalysisModel.as_view())
]
|
983,082 | bcbdd56f55c9da5eccd38b47d77d3e134f71348a | # criando funções a ser utilizada pelo programa
def menu():
print('*=' * 20,
'\n[1] somar\n'
'[2] multiplicar\n'
'[3] maior número\n'
'[4] inserir novos números\n'
'[5] encerrar o programa')
def soma(n1, n2):
soma = n1 + n2
return f'{"*=" * 20}\nA soma entre {n1} e {n2} é igual a {soma}.'
def multiplicacao(n1, n2):
mult = n1 * n2
return f'{"*=" * 20}\nO produto entre {n1} e {n2} é igual a {mult}.'
def maior_numero(n1, n2):
if(n1 == n2):
return 'Os número são iguais!'
elif(n1 > n2):
return f'{n1} é o maior número!'
else:
return f'{n2} é o maior número!'
continua = True
primeiro_numero = float(input('Insira o primeiro valor: '))
segundo_numero = float(input('Insira o segundo valor: '))
while(continua):
menu()
valida_opcao = True
while(valida_opcao):
opcao = int(input('Digite uma das opções acima: '))
if(opcao < 1 or opcao > 5):
print('Opção inválida! Tente novamente.', end=' ')
else:
valida_opcao = False
if(opcao == 1):
print(soma(primeiro_numero, segundo_numero))
elif(opcao == 2):
print((multiplicacao(primeiro_numero, segundo_numero)))
elif(opcao == 3):
print(maior_numero(primeiro_numero, segundo_numero))
elif(opcao == 4):
primeiro_numero = float(input('Insira o primeiro valor: '))
segundo_numero = float(input('Insira o segundo valor: '))
else:
print('Programa encerrado!')
exit()
|
983,083 | 28521539f941a2fe7e1293739c225d09dc9efdd0 | import random
import sys
def general():
f = open("quotes.txt")
quotes = f.readlines()
f.close()
last = 17
rnd1 = random.randint(0, last)
rnd2 = random.randint(0, last)
sys.stdout.write(quotes[rnd1]),
sys.stdout.write(quotes[rnd2])
if __name__== "__main__":
general()
|
983,084 | 0da9cb64081c445382e22bb6f5205dff58fc3da7 | import numpy as np
import ticktack
from ticktack import fitting
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.lines import Line2D
from chainconsumer import ChainConsumer
cbm = ticktack.load_presaved_model("Guttler15", production_rate_units = "atoms/cm^2/s")
sf = fitting.SingleFitter(cbm, cbm_model="Guttler15")
sf.load_data("../notebooks/miyake12.csv")
sf.compile_production_model(model="simple_sinusoid")
default_params = np.array([775., np.log10(1./12), np.pi/2., np.log10(81./12)]) # start date, duration, phase, area
sampler = sf.MarkovChainSampler(default_params,
likelihood = sf.log_joint_likelihood,
burnin = 500,
production = 2000,
args = (np.array([770., np.log10(1/52.), 0, -2]), # lower bound
np.array([780., np.log10(5.), 11, 1.5])) # upper bound
)
samples = sampler.copy()
samples[:,1] = 10**samples[:,1] # duration not log duration
samples[:,-1] = 10**samples[:,-1] # area not log area
def chain_summary(sf, chain, walkers, figsize=(10, 10), labels=None, plot_dist=False,
label_font_size=8, tick_font_size=8, mle=False,usetex=False):
"""
Runs convergence tests and plots posteriors from a MCMC chain.
Parameters
----------
chain : ndarray
A MCMC chain
walkers : int
The total number of walkers of the chain
figsize : tuple, optional
Output figure size
labels : list[str], optional
A list of parameter names
plot_dist : bool, optional
If True, plot the marginal distributions of parameters. Else, plot both the marginal distribution
and the posterior surface
"""
c = ChainConsumer().add_chain(chain, walkers=walkers, parameters=labels)
c.configure(spacing=0.0, usetex=usetex, label_font_size=label_font_size, tick_font_size=tick_font_size,
diagonal_tick_labels=False,summary=False)
fig = c.plotter.plot(figsize=figsize)
return fig
labels = ["Start Date (yr)", "Duration (yr)", "φ (yr)", "Production ($q_0$ yr)"]
fig = chain_summary(sf, samples, 8, labels=labels, figsize=(20.0,8.0), label_font_size=10, tick_font_size=10,)
# fig = sf.chain_summary(samples, 8, labels=labels, label_font_size=10, tick_font_size=10,usetex=False)
fig.subplots_adjust(right=0.5)
gs = mpl.gridspec.GridSpec(1,2, width_ratios=[1, 1])
subfig = fig.add_subfigure(gs[0, 1])
(ax1, ax2) = subfig.subplots(2,1, sharex=True,gridspec_kw={'height_ratios': [2, 1]})
# fig.subplots_adjust(hspace=0.05)
plt.rcParams.update({"text.usetex": False})
idx = np.random.randint(len(sampler), size=100)
for param in tqdm(sampler[idx]):
ax1.plot(sf.time_data_fine, sf.dc14_fine(params=param), alpha=0.05, color="g")
for param in tqdm(sampler[idx][:30]):
ax2.plot(sf.time_data_fine, sf.production(sf.time_data_fine, *param), alpha=0.2, color="g")
ax1.errorbar(sf.time_data + sf.time_offset, sf.d14c_data, yerr=sf.d14c_data_error,
fmt="ok", capsize=3, markersize=6, elinewidth=3, label="$\Delta^{14}$C data")
ax1.legend(frameon=False);
ax2.set_ylim(1, 10);
ax1.set_ylabel("$\Delta^{14}$C (‰)")
ax2.set_xlabel("Calendar Year");
ax2.set_ylabel("Production rate (atoms cm$^2$s$^{-1}$)")
plt.savefig('joss_figure.png',bbox_inches='tight',dpi=300) |
983,085 | efe10f6956676feba7a314841d2a5f8e831316f9 | # Generated by Django 3.2.5 on 2021-07-09 16:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Employees',
fields=[
('employee', models.OneToOneField(db_column='Employee_ID', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='auth.user')),
('employee_name', models.CharField(db_column='Employee_Name', max_length=45)),
('birth_date', models.DateField(db_column='Birth_Date')),
('phone_number', models.CharField(db_column='Phone_Number', max_length=11)),
('national_number', models.CharField(db_column='National_Number', max_length=14)),
('address', models.CharField(db_column='Address', max_length=45)),
('emp_type', models.IntegerField(db_column='Emp_Type')),
],
options={
'db_table': 'EMPLOYEES',
'managed': False,
},
),
]
|
983,086 | 6d9a07fdb815d0169e99bc7d1bee390b686fe485 | # --------------------------------------------------------------------------- #
# D. Rodriguez, 2020-05-13
# --------------------------------------------------------------------------- #
|
983,087 | 5ebd50a40b10e5bf721cacf4f453b57dc4862682 | def make_posh(func):
def wrapper():
print("+---------+")
print("| |")
result = func()
print(result)
print("| |")
print("+=========+")
return result
return wrapper
@make_posh
def pfib():
'''Print out Fibonacci'''
return ' Fibonacci '
pfib() |
983,088 | 3b7e6023c43d42036af71900975f64d2c7f358d2 | import os
import glob
import copy
#samples={}
#/xrootd_user/jhchoi/xrootd/Latino/HWWNano/Summer16_102X_nAODv4_Full2016v4/MCl1loose2016
#nanoLatino_GluGluHToWWToLNuQQ_M700__part13
#----Make Sample List of MC ----#
DIR_NANO_LATINO="/xrootd_user/jhchoi/xrootd/Latino/HWWNano/"
CAMPAIGN='Summer16_102X_nAODv4_Full2016v4'
STEP="MCl1loose2016"
PROC_LIST=['GluGluHToWWToLNuQQ_M700', 'TT_TuneCUETP8M2T4', 'WJetsToLNu','DYJetsToLL_M-50_ext2', 'DYJetsToLL_M-50-LO','WW-LO','WZ_AMCNLO','_ZZ_']
for PROC in PROC_LIST:
#PROC="GluGluHToWWToLNuQQ_M700"
FILES=glob.glob(DIR_NANO_LATINO+"/"+CAMPAIGN+"/"+STEP+"/"+"nanoLatino_"+PROC+"*.root") ##File List
#ggH_M700_LIST=list( b for a in FILES b = a.split(DIR_NANO_LATINO)[1])
LIST=[]
for a in FILES: LIST.append(a.split(DIR_NANO_LATINO)[1].strip('/'))
#for a in ggH_M700_LIST : print a
samples[PROC] = {'name' : copy.deepcopy(LIST) , 'weight' : '1' }
#print samples['GluGluHToWWToLNuQQ_M700']
#print samples['DYJetsToLL_M-50-LO']
#print samples['_ZZ_']
CAMPAIGN='Run2016_102X_nAODv4_Full2016v4'
STEP="DATAl1loose2016"
PROC_LIST=['SingleMuon','SingleElectron']
for PROC in PROC_LIST:
FILES=glob.glob(DIR_NANO_LATINO+"/"+CAMPAIGN+"/"+STEP+"/"+"nanoLatino_"+PROC+"*.root") ##File List
LIST=[]
for a in FILES: LIST.append(a.split(DIR_NANO_LATINO)[1].strip('/'))
samples[PROC] = {'name' : copy.deepcopy(LIST) , 'weight' : '1' }
#samples['TT_semilep'] = { 'name' : ['Fall2017_102X_nAODv4_Full2017v4/MCl1loose2017v2/nanoLatino_TTToSemiLeptonic__part11.root',
#],
#'weight' : '1'
#}
#samples['ggHToLNuQQ'] = {'name' : ['Fall2017_102X_nAODv4_Full2017v4/MCl1loose2017v2/nanoLatino_GluGluHToWWToLNuQQ_M250__part0.root'],
#'weight' : '1'
#}
####DATA####
DataRun = [['B','Run2017B-Nano14Dec2018-v1'] ]
DataSet= ['SingleMuon']
DataTrig={
'SingleMuon' : 'trig_SnglMu'
}
#samples['DATA'] = { 'name': ['Run2017_102X_nAODv4_Full2017v4/DATAl1loose2017v2/nanoLatino_SingleMuon_Run2017B-Nano14Dec2018-v1__part0.root'] ,
# 'weight' : '1',
# 'weights' : ['trig_SnglMu' ],
# 'isData': ['all'],
#'FilesPerJob' : 20,
# }
#for Run in DataRun :
# directory = treeBaseDir+''
# for DataSet in DataSets :
# FileTarget = getSampleFiles(directory,DataSet+'_'+Run[1],True,'nanoLatino_')
# for iFile in FileTarget:
# print(iFile)
# samples['DATA']['name'].append(iFile)
# samples['DATA']['weights'].append(DataTrig[DataSet])
|
983,089 | 00c8c625c1fcbeb8a04d0d1d90f1a96977d04ab0 | from sys import argv
from swampy.TurtleWorld import *
import math
#prg, n, length = argv
def polyline(tur, n, length):
angle = 360.0 / n
for i in range(n):
fd(tur,length)
lt(tur, angle)
def polygon(tur, n, length):
angle = 360.0 / n
for i in range(n):
polyline(tur, n, length)
lt(tur)
def circle(tur, r):
circumference = 2 * math.pi * r
n = int(circumference / 3 ) + 1
length = circumference / n
polyline(tur, n, length)
#n, lenght, angle = map(int,raw_input("enter n length and angle :").split())
world = TurtleWorld()
tur1 = Turtle()
print tur1
tur1.delay = 0.1
r = int(raw_input("r = "))
#length = int(raw_input("length = "))
#angle = int(raw_input("angle = "))
#angle = 360.0 / n
circle(tur1, r)
#polyline(tur1, n, length)
#polygon(tur1, n , length)
raw_input()
|
983,090 | a69642b89204df4ac992434ff0b4d23e203a0c60 | from flask.ext.wtf import Form
from wtforms import PasswordField, SubmitField
from wtforms.validators import DataRequired
class GettingStartedForm(Form):
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Submit')
def validate(self):
if self.password.data == "abc":
return True
else:
return False |
983,091 | d13f3804a1d256d47b1b529a7f9c5838fa463dfa | import random
import turtle
import sys
x = random.randint(1, 100)
coord = open('gb_gibbet.txt')
coord_list = []
try_count = 0
def gotoxy(x, y):
turtle.penup()
turtle.goto(x, y)
turtle.pendown()
def draw_line(from_x, from_y, to_x, to_y):
gotoxy(from_x, from_y)
turtle.goto(to_x, to_y)
for line in coord:
line = line.strip().split(',')
nums = []
for n in line:
nums.append(int(n))
coord_list.append(nums)
for item in range(len((coord_list))):
draw_line(*coord_list[item])
gotoxy(-100, 0)
turtle.circle(20)
answer = turtle.textinput('Грати далі?', 'y/n')
if answer == 'n':
sys.exit()
while True:
number = turtle.numinput('Вгадайте', 'Число', 0, 0, 100)
if number == x:
gotoxy(-150, 100)
turtle.write('Ви перемогли!', font=('Arial', 28, 'normal'))
else:
gotoxy(-150, 100)
turtle.color('red')
turtle.write('Хиба!', font=('Arial', 28, 'normal'))
try_count += 1
if try_count == 10:
gotoxy(-20, 100)
turtle.color('red')
turtle.write('Ви програли!', font=('Arial', 28, 'normal'))
break
|
983,092 | 4622adcddaece46441e140ba545f52696f321670 | import torch
import torch.nn.functional as F
from .utils import spatial_argmax
class Planner(torch.nn.Module):
def __init__(self):
super().__init__()
"""
Your code here
"""
# H x W x 3
self.start = torch.nn.Sequential(
torch.nn.Conv2d(3, 32, kernel_size=7, padding=3, stride=2),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
# H/2 x W/2 x 32
self.layer1 = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=2),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU())
# H/4 x W/4 x 64
self.layer1_ds = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, kernel_size=1, stride=2),
torch.nn.BatchNorm2d(64))
self.layer2 = torch.nn.Sequential(
torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=2),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(),
torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU())
# H/8 x W/8 x 128
self.layer2_ds = torch.nn.Sequential(
torch.nn.Conv2d(64, 128, kernel_size=1, stride=2),
torch.nn.BatchNorm2d(128))
self.layer3 = torch.nn.Sequential(
torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2),
torch.nn.BatchNorm2d(256),
torch.nn.ReLU(),
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
torch.nn.BatchNorm2d(256),
torch.nn.ReLU())
# H/16 x W/16 x 256
self.layer3_ds = torch.nn.Sequential(
torch.nn.Conv2d(128, 256, kernel_size=1, stride=2),
torch.nn.BatchNorm2d(256))
self.drop_out_layer = torch.nn.Dropout()
# up-convolutions
self.layer4 = torch.nn.Sequential(
torch.nn.ConvTranspose2d(256, 256, kernel_size=3, stride=2, padding=1, output_padding=1),
torch.nn.BatchNorm2d(256),
torch.nn.ReLU())
self.layer5 = torch.nn.Sequential(
torch.nn.ConvTranspose2d(512, 128, kernel_size=3, stride=2, padding=1, output_padding=1),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU())
self.layer6 = torch.nn.Sequential(
torch.nn.ConvTranspose2d(256, 64, kernel_size=3, stride=2, padding=1, output_padding=1),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU())
self.final = torch.nn.Sequential(
torch.nn.ConvTranspose2d(128, 32, kernel_size=7, stride=2, padding=3, output_padding=1),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU())
# training 3 classifiers
self.classifer = torch.nn.Conv2d(32, 1, kernel_size=1)
# sigmoid to restrict output between 0-1
self.sigmoid = torch.nn.Sigmoid()
def forward(self, img):
"""
Your code here
Predict the aim point in image coordinate, given the supertuxkart image
@img: (B,3,96,128)
return (B,2)
"""
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
H, W = img.size()[2], img.size()[3]
#print('x',x)
#print('x.shape',x.shape) ## 32 x 3 x 96 x 128
z32 = self.start(img)
z64 = self.layer1(z32) + self.layer1_ds(z32)
#print('z1',z64.shape)
z128 = self.layer2(z64) + self.layer2_ds(z64)
#print('z2',z128.shape)
z256 = self.layer3(z128) + self.layer3_ds(z128)
#print('z3',z256.shape)
z256d = self.drop_out_layer(z256)
#print('z_drop',z256d.shape)
z256u = self.layer4(z256d)
#print('z4',z256u.shape)
z128u = self.layer5(torch.cat((z256u, F.interpolate(z256d,size=z256u.size()[2:] )), 1))
#print('z5',z128u.shape)
z64u = self.layer6(torch.cat((z128u, F.interpolate(z128,size=z128u.size()[2:] )), 1))
#print('z6',z64u.shape)
z32u = self.final(torch.cat((z64u, F.interpolate(z64,size=z64u.size()[2:] )), 1))
#print('z6_plus',z32u.shape)
#print('z7_result',self.classifer(z32u)[:, :, :H, :W].shape)
result_class = self.classifer(z32u)[:, :, :H, :W]
#print('model result shape',result_class.shape)
## 16 x 1 x 300 x 400
# using soft argmax
spa_argmax = spatial_argmax(torch.squeeze(result_class,1))
#one hot with spatial argmax
#xy_val = torch.zeros(spa_argmax.shape).float()
#for idx, pt in enumerate(spa_argmax):
# x_val = (pt[0]+1.0)*63.5
# y_val = (pt[1]+1.0)*47.5
# # for each batch. [0...127][0...95]
# xy_val[idx][0] = x_val
# xy_val[idx][1] = y_val
xy_val = (spa_argmax+1.0).to(device)
#print('spa_argmax',spa_argmax)
scaling_factor = torch.FloatTensor([[(W-1)/2,0.],[0.,(H-1)/2]]).to(device)
#scaling_factor = torch.FloatTensor([[63.5,0.],[0.,44.5]]).to(device)
xy_val = xy_val.mm(scaling_factor)
return xy_val
def save_model(model):
from torch import save
from os import path
if isinstance(model, Planner):
return save(model.state_dict(), path.join(path.dirname(path.abspath(__file__)), 'planner.th'))
raise ValueError("model type '%s' not supported!" % str(type(model)))
def load_model():
from torch import load
from os import path
r = Planner()
r.load_state_dict(load(path.join(path.dirname(path.abspath(__file__)), 'planner.th'), map_location='cpu'))
return r
if __name__ == '__main__':
from .controller import control
from .utils import PyTux
from argparse import ArgumentParser
def test_planner(args):
# Load model
planner = load_model().eval()
pytux = PyTux()
for t in args.track:
steps = pytux.rollout(t, control, planner=planner, max_frames=1000, verbose=args.verbose)
print(steps)
pytux.close()
parser = ArgumentParser("Test the planner")
parser.add_argument('track', nargs='+')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
test_planner(args)
|
983,093 | fa02c17059da76c74dada0667b26fda79f3ce64d | # Generated by Django 2.0.4 on 2018-04-26 02:25
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_category'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='佚名', max_length=20, verbose_name='姓名')),
('content', models.CharField(max_length=300, verbose_name='内容')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
],
options={
'verbose_name': '博客评论',
'verbose_name_plural': '博客评论',
},
),
migrations.CreateModel(
name='Counts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('blog_nums', models.IntegerField(default=0, verbose_name='博客数目')),
('category_nums', models.IntegerField(default=0, verbose_name='分类数目')),
('tag_nums', models.IntegerField(default=0, verbose_name='标签数目')),
('visit_nums', models.IntegerField(default=0, verbose_name='网站访问量')),
],
options={
'verbose_name': '数目统计',
'verbose_name_plural': '数目统计',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='博客标签')),
('number', models.IntegerField(default=1, verbose_name='标签数目')),
],
options={
'verbose_name': '博客标签',
'verbose_name_plural': '博客标签',
},
),
migrations.AlterModelOptions(
name='blog',
options={'verbose_name': '我的博客', 'verbose_name_plural': '我的博客'},
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='博客类别'),
preserve_default=False,
),
migrations.AddField(
model_name='blog',
name='click_nums',
field=models.IntegerField(default=0, verbose_name='点击量'),
),
migrations.AddField(
model_name='blog',
name='create_time',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'),
),
migrations.AddField(
model_name='blog',
name='modify_time',
field=models.DateTimeField(auto_now=True, verbose_name='修改时间'),
),
migrations.AlterField(
model_name='blog',
name='content',
field=models.TextField(default='', verbose_name='正文'),
),
migrations.AlterField(
model_name='blog',
name='title',
field=models.CharField(max_length=100, verbose_name='标题'),
),
migrations.AddField(
model_name='comment',
name='blog',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Blog', verbose_name='博客'),
),
migrations.AddField(
model_name='blog',
name='tag',
field=models.ManyToManyField(to='blog.Tag', verbose_name='博客标签'),
),
]
|
983,094 | 51e75ec33aeace7ecacb3baaf1d0390f6e0e97f2 | class Product(object):
product_count = 0
@staticmethod
def show_products():
product_count = Product.product_count
if product_count == 0:
print('No products exist at this time.\n')
else:
##I had to hardocde these in due to time, but it kept telling me that "self" wasn't defined and I had no idea how to fix that
print('The following products exist:\n')
print("Bluth Banana", "(Fruit),", str("$10"))
print("Oathbringer", "(Book),", str("$16"))
print("Doors of Stone", "(Book),", str("$30"))
print("Cheap EV", "(Car),", str("$36200"))
##inital
def __init__(self, name, cat, price):
self.__name = name
self.__cat = cat
self.__price = price
self.__sale = "[NOT YET FOR SALE]"
Product.product_count += 1
print(self.__name + " is now a Product. ")
##str function
def __str__(self):
return self.__name + "("+ self.__cat +")"+ ',' + "$" + str(self.__price) + self.__sale
##I didn't find myself needing to use this much, so I'm missing something
def get_price(self):
return self.__price
##approve method
def approve(self):
self.__sale = True
if self.__sale == True:
print(self.__name, " is now for sale!\n")
##I don't know if these functions were suppose to have try and exceptions in them, but after a while I went with conditionals which seemed to have worked
def set_name(self, new_name):
if new_name == self.__name:
print("Warning: The product already has that name!")
elif new_name == "":
print("Warning: The product must have a name!")
else:
self.__name = new_name
def set_price(self, new_price):
if new_price <= 0:
print("Warning: Product must have positive price!")
elif new_price == self.__price:
print("Warning: Product already has that price!")
else:
self.__price = new_price
##Main
print("Let's create some products:")
car = Product("Cheap EV", "Car", 36200)
book = Product("Doors of Stone", "Book", 30)
banana = Product("Bluth Banana", "Fruit", 10)
book2 = Product("Oathbringer", "Book", 16)
##car.approve()
##banana.approve()
##
##print('\nSort and show all products:')
###Product.product_count.sort()
##Product.show_products()
##print("\nHere we test warning cases. We should get 4 warnings:")
##car.set_name("")
##car.set_name("Cheap EV")
##car.set_name("Tesla Model 3")
##car.set_price(-1)
##car.set_price(36200)
##This practical was a lot tougher than the other ones for me, I felt as if I studied the wrong things which I don't comepletely understand
##As always if you could give me feedback on what I should study from this unit to get better, that would be very helfpul for the future
|
983,095 | c7c1ecd6f234c36f052d7c9857af20d54aec6a75 | from typing import List, Tuple
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
"""
https://leetcode.com/problems/accounts-merge/
合并账户,邮箱作为唯一标识
"""
map_union = {x: x for account in accounts for index, x in enumerate(account) if index != 0}
email_map = {x: account[0] for account in accounts for index, x in enumerate(account) if index != 0}
def find(a: str) -> Tuple[str, int]:
dep = 0
while map_union[a] != a:
map_union[a] = map_union[map_union[a]]
dep += 1
a = map_union[a]
return a, dep
def merge(a: str, b: str) -> None:
a_res, b_res = find(a), find(b)
if a_res[0] != b_res[0]:
if a_res[1] > b_res[1]:
map_union[b_res[0]] = a_res[0]
else:
map_union[a_res[0]] = b_res[0]
for account in accounts:
for pos in range(2, len(account)):
merge(account[1], account[pos])
email_group_map = {}
for key in map_union.keys():
root, _ = find(key)
if root not in email_group_map:
email_group_map[root] = []
email_group_map[root].append(key)
return [[email_map[k]] + sorted(v) for k, v in email_group_map.items()]
|
983,096 | e98473820a5b7bde8cff04159be5049901fdeca9 | # https://app.codesignal.com/challenge/rbwtuZjSG8zJQszCz
def twoArraysNthElement(array1, array2, n):
a1_ptr = 0
a2_ptr = 0
while a1_ptr < len(array1) and a2_ptr < len(array2) and n > 0:
if array1[a1_ptr] <= array2[a2_ptr]:
a1_ptr += 1
else:
a2_ptr += 1
n -= 1
if a1_ptr == len(array1):
return array2[a2_ptr + n]
if a2_ptr == len(array2):
return array1[a1_ptr + n]
return min(array1[a1_ptr], array2[a2_ptr])
|
983,097 | 4c92ceb23966b23850180d9c55bcfa806e514ea1 | # Generated by Django 3.0.7 on 2021-02-20 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("ulmg", "0021_prospectrating_cbs"),
]
operations = [
migrations.AddField(
model_name="player",
name="cannot_be_protected",
field=models.BooleanField(default=False),
),
]
|
983,098 | c60fc50776a6793361a5fb75e2f808a69e0c8ff1 | from message_dispatcher import MessageDispatcher
|
983,099 | c1b281e84c60154ab9965742215408bec70446cc | #Composition
class Laptop:
def __init__(self):
keyboard = Keyboard('This is keyboard')
screen = Screen('This is screen')
self.elements = [keyboard, screen]
class Keyboard:
def __init__(self, color):
self.color = color
class Screen:
def __init__(self, size):
self.size = size
prod = Laptop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.