text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python3
import torch
import os
import numpy as np
import re
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from imbalanced_sampler import ImbalancedDatasetSampler
from PIL import ImageFilter
class MyDataset(Dataset):
def __init__(self, path, img_size, is_train=True, degradations=None, resize=None):
self.paths = [os.path.join(path, cls, f)
for cls in os.listdir(path)
for f in os.listdir(os.path.join(path, cls))]
self.classes = [cls
for cls in os.listdir(path)
for f in os.listdir(os.path.join(path, cls))]
self.img_size = (img_size, img_size)
self.class_mapper = {k: i for i, k in enumerate(set(self.classes))}
self.augmentations = transforms.Compose([
#transforms.RandomHorizontalFlip(),
#transforms.RandomVerticalFlip(),
#transforms.RandomRotation(degrees=180, fill=255)
])
self.degradations = degradations
self.resize = resize
self.is_train = is_train
self.to_tensor = transforms.ToTensor()
self.finalize = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
#transforms.Normalize(mean=[0.8209723354249416, 0.7285539707273201, 0.8370288417509042],
# std=[0.002150924859562229, 0.0032857094678095506, 0.0016802816657651217]),
transforms.ToPILImage()
])
print('Number of classes:', len(self.class_mapper))
print('Image size:', img_size)
print('Degradations:', self.degradations)
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.to_list()
img = Image.open(self.paths[idx]).convert('RGB').resize(size=self.img_size, resample=Image.BILINEAR)
img = self.finalize(img)
y = self.class_mapper[self.classes[idx]]
if self.resize:
resize_transform = transforms.Resize(self.resize)
img = resize_transform(img)
if self.is_train:
img = self.augmentations(img)
if self.degradations:
for i in self.degradations:
w = re.split('_', i)
if w[0] == 'gaussblur' and w[1] != '0':
sigma = int(w[1])
img = img.filter(ImageFilter.GaussianBlur(radius=sigma))
elif w[0] == 'jpeg' and w[1] != '0':
quality = int(w[1])
out = BytesIO()
img.save(out, format='JPEG', quality=quality)
out.seek(0)
img = Image.open(out)
img = self.to_tensor(img)
return img, y # commet y to create the figures
def get_datasets(root_dir, img_size, degradations=[], batch_size=16):
trn_path, tst_path = os.path.join(root_dir, 'train',), os.path.join(root_dir, 'test')
trn_dataset = MyDataset(trn_path, img_size, is_train=True, degradations=degradations)
trn_loader = DataLoader(trn_dataset, batch_size=batch_size, shuffle=False,
pin_memory=True)#, #sampler=ImbalancedDatasetSampler(trn_dataset,
# callback_get_label=lambda dataset, ix: dataset.classes[ix]))
tst_dataset = MyDataset(tst_path, img_size, is_train=False)
tst_loader = DataLoader(tst_dataset, batch_size=batch_size, shuffle=False,
pin_memory=True, sampler=None)
return trn_loader, tst_loader
if __name__ == '__main__':
import matplotlib.pyplot as plt
dataset, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=[])
dataset_1, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['gaussblur_2'])
dataset_2, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['gaussblur_4'])
dataset_3, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['gaussblur_6'])
dataset_4, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['gaussblur_8'])
dataset_5, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['gaussblur_10'])
dataset_j0, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['jpeg_5'])
dataset_j1, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['jpeg_20'])
dataset_j2, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['jpeg_40'])
dataset_j3, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['jpeg_60'])
dataset_j4, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['jpeg_80'])
dataset_j5, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=['jpeg_95'])
dataset_l0, _ = get_datasets('./../../dataset_rem_lr', 400)
dataset_l1, _ = get_datasets('./../../dataset_rem_lr', 200)
dataset_l2, _ = get_datasets('./../../dataset_rem_lr', 100)
dataset_l3, _ = get_datasets('./../../dataset_rem_lr', 50)
dataset_l4, _ = get_datasets('./../../dataset_rem_lr', 25)
fig, axes = plt.subplots(1, 6, figsize=(10, 15), dpi=180)
for (x, a, b, c, d, e) in zip(dataset, dataset_1, dataset_2, dataset_3, dataset_4, dataset_5):
data = []
for d in [x, a, b, c, d, e]:
img = d.numpy()[0]
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 0, 1)
data.append((255 * img).astype(np.uint8)[:])
for i, (d, ax) in enumerate(zip(data, np.ravel(axes))):
ax.imshow(d)
ax.set_title(f'{i * 2 if i != 0 else None}', color='white', fontweight='bold')
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
fig.show()
fig.savefig('blurred.png', transparent=True)
break
fig, axes = plt.subplots(1, 7, figsize=(10, 15), dpi=180)
dataset, _ = get_datasets('./../../dataset_rem_lr', 400, degradations=[])
for (x, o, a, b, c, d, e) in zip(dataset_j0, dataset_j1, dataset_j2, dataset_j3, dataset_j4, dataset_j5, dataset):
data = []
for d in [x, o, a, b, c, d, e]:
img = d.numpy()[0]
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 0, 1)
data.append((255 * img).astype(np.uint8)[:])
for orig, d, ax in zip([5, 20, 40, 60, 80, 95, 'original'], data, np.ravel(axes)):
ax.imshow(d)
ax.set_title(f'{orig}', color='white', fontweight='bold')
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
fig.show()
fig.savefig('jpeg_quality.png', transparent=True)
break
fig, axes = plt.subplots(1, 5, figsize=(10, 15), dpi=180)
for (a, b, c, d, e) in zip(dataset_l0, dataset_l1, dataset_l2, dataset_l3, dataset_l4):
data = []
for d in [a, b, c, d, e]:
img = d.numpy()[0]
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 0, 1)
data.append((255 * img).astype(np.uint8)[:])
for orig, d, ax in zip([400, 200, 100, 50, 25], data, np.ravel(axes)):
ax.imshow(d)
ax.set_title(f'{orig}px', color='white', fontweight='bold')
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
fig.show()
fig.savefig('size.png', transparent=True)
break
|
# Создать класс TrafficLight (светофор) и определить у него один атрибут color (цвет) и метод running (запуск).
# Атрибут реализовать как приватный.
# В рамках метода реализовать переключение светофора в режимы: красный, желтый, зеленый.
# Продолжительность первого состояния (красный) составляет 7 секунд, второго (желтый) — 2 секунды,
# третьего (зеленый) — на ваше усмотрение.
# Переключение между режимами должно осуществляться только в указанном порядке (красный, желтый, зеленый).
# Проверить работу примера, создав экземпляр и вызвав описанный метод.
from time import sleep
class TrafficLight:
__color = None
def __setcolor(self, color):
self.__color = color
print(self.__color)
def running(self):
try:
while True:
self.__setcolor("red")
sleep(7)
self.__setcolor("yellow")
sleep(2)
self.__setcolor("green")
sleep(5)
except KeyboardInterrupt:
exit()
trafficlight = TrafficLight()
trafficlight.running()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InsEmployee import InsEmployee
class AlipayInsSceneEmploymentGroupendorseAppendModel(object):
def __init__(self):
self._employee_list = None
self._endorse_order_type = None
self._out_biz_no = None
self._partner_org_id = None
self._scene_code = None
self._summary_order_no = None
@property
def employee_list(self):
return self._employee_list
@employee_list.setter
def employee_list(self, value):
if isinstance(value, list):
self._employee_list = list()
for i in value:
if isinstance(i, InsEmployee):
self._employee_list.append(i)
else:
self._employee_list.append(InsEmployee.from_alipay_dict(i))
@property
def endorse_order_type(self):
return self._endorse_order_type
@endorse_order_type.setter
def endorse_order_type(self, value):
self._endorse_order_type = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def partner_org_id(self):
return self._partner_org_id
@partner_org_id.setter
def partner_org_id(self, value):
self._partner_org_id = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def summary_order_no(self):
return self._summary_order_no
@summary_order_no.setter
def summary_order_no(self, value):
self._summary_order_no = value
def to_alipay_dict(self):
params = dict()
if self.employee_list:
if isinstance(self.employee_list, list):
for i in range(0, len(self.employee_list)):
element = self.employee_list[i]
if hasattr(element, 'to_alipay_dict'):
self.employee_list[i] = element.to_alipay_dict()
if hasattr(self.employee_list, 'to_alipay_dict'):
params['employee_list'] = self.employee_list.to_alipay_dict()
else:
params['employee_list'] = self.employee_list
if self.endorse_order_type:
if hasattr(self.endorse_order_type, 'to_alipay_dict'):
params['endorse_order_type'] = self.endorse_order_type.to_alipay_dict()
else:
params['endorse_order_type'] = self.endorse_order_type
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.partner_org_id:
if hasattr(self.partner_org_id, 'to_alipay_dict'):
params['partner_org_id'] = self.partner_org_id.to_alipay_dict()
else:
params['partner_org_id'] = self.partner_org_id
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.summary_order_no:
if hasattr(self.summary_order_no, 'to_alipay_dict'):
params['summary_order_no'] = self.summary_order_no.to_alipay_dict()
else:
params['summary_order_no'] = self.summary_order_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsSceneEmploymentGroupendorseAppendModel()
if 'employee_list' in d:
o.employee_list = d['employee_list']
if 'endorse_order_type' in d:
o.endorse_order_type = d['endorse_order_type']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'partner_org_id' in d:
o.partner_org_id = d['partner_org_id']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'summary_order_no' in d:
o.summary_order_no = d['summary_order_no']
return o
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 21 20:32:18 2018
@author: wdr78
"""
import numpy as np
import pandas as pd
date_col = ['计飞','计到','实飞','实到']
flights = pd.read_csv('C:/Users/wdr78/Desktop/ceair_simulation/simulation/2018-01-17.csv',low_memory=False,parse_dates=date_col,keep_default_na=False,encoding='gbk')
quick_passtime=pd.read_csv('C:/Users/wdr78/Desktop/ceair_simulation/simulation/quick_acrosstime.csv',encoding='gbk')
f=flights['上座率'].str.strip("%").astype(float)/100
flights['上座率']=f#将百分比转换为float
flights = flights.sort_values(by=['机号','计飞'],ascending = [True,True])
N_flights = len(flights)
flights.index = np.arange(N_flights)
#nowTime=pd.Timestamp(2100,3,4)
#x=1
#nowTime+pd.Timedelta(hours=x)
#设定飞机初始状态
机号=[]
for name, group in flights.groupby('机号'):
机号.append(name)
flight = dict(list(flights.groupby('机号')))
t1=[]
for tailnum in 机号:
t1.append(flight[tailnum].values[0][7])
airplane_state=pd.DataFrame({'第几次任务':np.zeros(len(机号)),'是否正在飞行':np.zeros(len(机号)),'下次动作时间':t1},index=机号)
del flight
a1=0.5;a2=1;a3=1.5#价值排序方程里的系数
flights['价值排序']=a1*flights.上座率+a2*flights.航班收益+a3*flights.旅客价值
#虹桥,杭州,浦东,北京,东营机场受影响
base_airport=['虹桥','西安','南京','兰州','成都','浦东','合肥','昆明','青岛','武汉','南昌','广州','太原','石家庄','北京']
two_airport=['虹桥','浦东']#两个特殊航站
nowtime=pd.Timestamp(2018,1,17,4,00,00)#假设开始recovery工作的时间点
dealtime=pd.Timestamp(2018,1,17,6,00,00)#可处理任务时间窗为起飞前的两个小时
flights=flights[flights.计飞>dealtime]
airport_state=pd.DataFrame(index=['开始时间','结束时间','进出港速率','取消延误比'])
airport_state['虹桥']=[pd.Timestamp('2018-01-17 8:00:00'),pd.Timestamp('2018-01-17 15:30:00'),0.3,0.5]
airport_state['杭州']=[pd.Timestamp('2018-01-17 8:20:00'),pd.Timestamp('2018-01-17 14:30:00'),0.5,0.5]
airport_state['浦东']=[pd.Timestamp('2018-01-17 8:10:00'),pd.Timestamp('2018-01-17 16:00:00'),0.3,0.5]
airport_state['北京']=[pd.Timestamp('2018-01-17 7:00:00'),pd.Timestamp('2018-01-17 13:30:00'),0.8,0.5]
airport_state['重庆']=[pd.Timestamp('2018-01-17 7:30:00'),pd.Timestamp('2018-01-17 14:30:00'),0.4,0.5]
airport_state=airport_state.T
normal_num=[]
for i in airport_state.index.tolist():
normal_num.append(len(flights[(flights.起飞场==i)&(flights.计飞>airport_state.loc[i]['开始时间'])&(flights.计飞<airport_state.loc[i]['结束时间'])])+len(flights[(flights.降落场==i)&(flights.计到>airport_state.loc[i]['开始时间'])&(flights.计到<airport_state.loc[i]['结束时间'])]))
airport_state['正常通行量']=normal_num
airport_state=airport_state.sort_values(by='开始时间')
def f_effectairport(t):#时间t时所有受影响的航班状态
effect_airportstate=airport_state[airport_state.开始时间<=t]
effect_airportstate=effect_airportstate[effect_airportstate.结束时间>t]#现阶段受影响的航站状态
effect_airport=effect_airportstate.index.tolist()
return effect_airport
######################################################################################################
#取消规则
##A
def f_AB(effect_flights,flights):
IndexA=pd.DataFrame(columns=['dep','arr']);IndexB_SHA=pd.DataFrame(columns=['dep','arr']);IndexB_PVG=pd.DataFrame(columns=['dep','arr'])#因规则AB而取消的航班索引
for i in range(len(effect_flights)):
tailnum=effect_flights.values[i][1]
deptime=effect_flights.values[i][7]
theairplane=flights[flights.机号==tailnum]#受影响那个飞机的所有航班
theflight=theairplane[theairplane.实飞==deptime]#一个飞机航班串里受到影响的那个航班
arrairport=theflight.values[0][3]
theindex=theflight.index.tolist()[0]#受到影响航班的索引
try:
depairport=theairplane.loc[theindex-1,'起飞场']
if theairplane.loc[theindex-1,'VIP航班']==1:#VIP航班不取消
continue
except:
continue
else:
if arrairport in base_airport and arrairport==depairport:
IndexA.loc[theindex-1]=[theindex-1,theindex]
if arrairport=='浦东' and depairport=='虹桥':
IndexB_SHA.loc[theindex-1]=[theindex-1,theindex]
if arrairport=='虹桥' and depairport=='浦东':
IndexB_PVG.loc[theindex-1]=[theindex-1,theindex]
return IndexA,IndexB_SHA,IndexB_PVG
####################################################################################################
##CD
def f_CD(effect_flights,flights):
IndexC=pd.DataFrame(columns=['dep','arr']);IndexD_SHA=pd.DataFrame(columns=['dep','arr']);IndexD_PVG=pd.DataFrame(columns=['dep','arr'])
for i in range(len(effect_flights)):
tailnum=effect_flights.values[i][1]
deptime=effect_flights.values[i][7]
theairplane=flights[flights.机号==tailnum]#受影响那个飞机的所有航班
theflight=theairplane[theairplane.实飞==deptime]#一个飞机航班串里受到影响的那个航班
depairport=theflight.values[0][2]
arrairport=theflight.values[0][3]
theindex=theflight.index.tolist()[0]#受到影响航班的索引
try:
arrairport2=theairplane.loc[theindex+1,'降落场']
if theairplane.loc[theindex+1,'VIP航班']==1:
continue
except:
continue
else:
if arrairport2==depairport and arrairport not in base_airport:
IndexC.loc[theindex]=[theindex,theindex+1]
if depairport=='虹桥' and arrairport2=='浦东' and arrairport not in base_airport:
IndexD_SHA.loc[theindex]=[theindex,theindex+1]
if depairport=='浦东' and arrairport2=='虹桥' and arrairport not in base_airport:
IndexD_PVG.loc[theindex]=[theindex,theindex+1]
return IndexC,IndexD_SHA,IndexD_PVG
####################################################################################################
##EF
def f_EF(effect_flights,flights):
IndexE=pd.DataFrame(columns=['dep','mid','arr']);IndexF_SHA=pd.DataFrame(columns=['dep','mid','arr']);IndexF_PVG=pd.DataFrame(columns=['dep','mid','arr'])
for i in range(len(effect_flights)):
tailnum=effect_flights.values[i][1]
deptime=effect_flights.values[i][7]
theairplane=flights[flights.机号==tailnum]#受影响那个飞机的所有航班
theflight=theairplane[theairplane.实飞==deptime]#一个飞机航班串里受到影响的那个航班
depairport=theflight.values[0][2]
arrairport=theflight.values[0][3]
theindex=theflight.index.tolist()[0]#受到影响航班的索引
try:
arrairport2=theairplane.loc[theindex+1,'降落场']
arrairport3=theairplane.loc[theindex+2,'降落场']
if theairplane.loc[theindex+1,'VIP航班']==1 or theairplane.loc[theindex+2,'VIP航班']==1:
continue
except:
continue
else:
if arrairport3==depairport and arrairport not in base_airport and arrairport2 not in base_airport:
IndexE.loc[theindex]=[theindex,theindex+1,theindex+2]
if depairport=='虹桥' and arrairport3=='浦东' and arrairport not in base_airport and arrairport2 not in base_airport:
IndexF_SHA.loc[theindex]=[theindex,theindex+1,theindex+2]
if depairport=='浦东' and arrairport3=='虹桥' and arrairport not in base_airport and arrairport2 not in base_airport:
IndexF_PVG.loc[theindex]=[theindex,theindex+1,theindex+2]
return IndexE,IndexF_SHA,IndexF_PVG
####################################################################################################
##G
def f_G(effect_flights,flights):
IndexG=pd.DataFrame(columns=['dep','arr']);
for i in range(len(effect_flights)):
tailnum=effect_flights.values[i][1]
deptime=effect_flights.values[i][7]
theairplane=flights[flights.机号==tailnum]#受影响那个飞机的所有航班
# theairplane=theairplane[theairplane.计飞>=t]
theflight=theairplane[theairplane.实飞==deptime]#一个飞机航班串里受到影响的那个航班
arrairport=theflight.values[0][3]
theindex=theflight.index.tolist()[0]#受到影响航班的索引
try:
depairport=theairplane.loc[theindex-1,'起飞场']
if theairplane.loc[theindex-1,'VIP航班']==1:
continue
except:
continue
else:
if arrairport==depairport and depairport not in base_airport :
IndexG.loc[theindex-1]=[theindex-1,theindex]
return IndexG
####################################################################################################
#用于后面航班价值的排序
def f_index_value(index_matrix):
xxx=0
index_value=pd.Series([])
for i in index_matrix.index.tolist():
for j in list(index_matrix.loc[i]):
xxx+=flights.loc[j,'价值排序']
index_value[i]=xxx
return index_value
#####################################################################################################
#汇总模型
#while sum(airplane_state.第几次任务)<len(flights[flights.状态!='C']) or min(airplane_state.下次动作时间)<nowTime:
# flights_nocancel=flights[flights.状态!='C']
# t=min(airplane_state.下次动作时间)
# effect_airportstate=f_effectairport(airport_state,t)#时间t时所有受影响的航站状态
# effect_airportstate_fix=f_effectairport(airport_state_fix,t)#不随事件处理而更新的航站状态
# effect_airport_fix=effect_airportstate_fix.index#不随事件处理而更新的航站
# if len(effect_airportstate)==0:#如果该时间没有受影响的航站,按照事先的排班任务执行
# tailnum=airplane_state[airplane_state.下次动作时间==t].index[0]
# airplane_state.loc[tailnum,'是否正在飞行']=(airplane_state.loc[tailnum,'是否正在飞行']+1)%2
# if airplane_state.loc[tailnum,'是否正在飞行']==1:#正在飞行
# xxx=flights_nocancel[flights_nocancel.机号==tailnum].index.values[int(airplane_state.loc[tailnum,'第几次任务'])-1]
# #xxx正在飞行那个航班的索引
# flights.loc[xxx,'实飞']=t
# airplane_state.loc[tailnum,'第几次任务']+=1
# airplane_state.loc[tailnum,'下次动作时间']=flights_nocancel[flights_nocancel.机号==tailnum].values[int(airplane_state.loc[tailnum,'第几次任务'])-1][8]
# else:#降落了
# if airplane_state.loc[tailnum,'第几次任务']>=len(flights_nocancel[flights_nocancel.机号==tailnum]):
# xxx=flights_nocancel[flights_nocancel.机号==tailnum].index.values[int(airplane_state.loc[tailnum,'第几次任务'])-1]
# flights.loc[xxx,'实到']=t
# airplane_state.loc[tailnum,'下次动作时间']=nowTime
# else:
# airplane_state.loc[tailnum,'下次动作时间']=flights_nocancel[flights_nocancel.机号==tailnum].values[int(airplane_state.loc[tailnum,'第几次任务'])][7]
# xxx=flights_nocancel[flights_nocancel.机号==tailnum].index.values[int(airplane_state.loc[tailnum,'第几次任务']-1)]
# flights.loc[xxx,'实到']=t
for airport in airport_state.index.tolist():
cancel_num=int(airport_state.loc[airport]['正常通行量']*(1-airport_state.loc[airport]['进出港速率'])*airport_state.loc[airport]['取消延误比'])
#需要取消的数量
effect_flights=flights[(flights.起飞场==airport)&(flights.VIP航班!=1)&(flights.状态!='C')]
effect_flights=effect_flights[(effect_flights.实飞>airport_state.loc[airport]['开始时间']) & (effect_flights.实飞<airport_state.loc[airport]['结束时间'])]
#这个航站里受影响的起飞航班
effect_flights2=flights[(flights.降落场==airport)&(flights.VIP航班!=1)&(flights.状态!='C')]
effect_flights2=effect_flights2[(effect_flights2.实到>airport_state.loc[airport]['开始时间']) & (effect_flights2.实到<airport_state.loc[airport]['结束时间'])]
#这个航站里受影响的降落航班
effect_flights_index=effect_flights.index.tolist()+effect_flights2.index.tolist()
#受影响的航班索引(起飞和降落)
IndexA,IndexB_SHA,IndexB_PVG=f_AB(effect_flights,flights)
IndexC,IndexD_SHA,IndexD_PVG=f_CD(effect_flights,flights)
IndexE,IndexF_SHA,IndexF_PVG=f_EF(effect_flights,flights)
IndexG=f_G(effect_flights,flights)
cancelable_num=2*len(IndexA)+2*len(IndexB_SHA)+2*len(IndexB_PVG)+2*len(IndexC)+len(IndexD_SHA)+len(IndexD_PVG)+2*len(IndexE)+len(IndexF_SHA)+len(IndexF_PVG)+2*len(IndexG)
#可取消的航班index
if cancel_num>=cancelable_num:#如果需要取消的大于可取消的,就把可取消的全部cancle
Index=list(IndexA.dep)+list(IndexA.arr)+list(IndexB_SHA.dep)+list(IndexB_SHA.arr)+list(IndexB_PVG.dep)+list(IndexB_PVG.arr)+list(IndexC.dep)+list(IndexC.arr)+list(IndexD_SHA.dep)+list(IndexD_SHA.arr)+list(IndexD_PVG.dep)+list(IndexD_PVG.arr)+list(IndexE.dep)+list(IndexE.mid)+list(IndexE.arr)+list(IndexF_SHA.dep)+list(IndexF_SHA.mid)+list(IndexF_SHA.arr)+list(IndexF_PVG.dep)+list(IndexF_PVG.mid)+list(IndexF_PVG.arr)+list(IndexG.dep)+list(IndexG.arr)
flights.loc[Index,'状态']='C'
delay_num=airport_state.loc[airport]['正常通行量']*(1-airport_state.loc[airport]['进出港速率'])-cancelable_num
I=[]
for i in effect_flights_index:
if i not in Index:
I.append(i)
delay_index=flights.loc[I,:].sort_values(by=['价值排序','VIP航班'],ascending = [True,True]).index.tolist()
flights.loc[delay_index,'状态']='D'
#延误后的状态转移,该飞机的后续航班按照最小过站时间延误,直到恢复正常排班或时刻表内无后续航班
delayed_index=[]
for index in delay_index:
if index not in delayed_index:
tailnum=flights.loc[index]['机号']
release_time=airport_state.loc[airport]['结束时间']
time_gap=flights.loc[index,'实到']-flights.loc[index,'实飞']
if flights.loc[index,'起飞场']==airport:#如果该航班是在受影响机场起飞的,那他只能在该机场取消流控后起飞
flights.loc[index,'实到']=release_time+time_gap
flights.loc[index,'实飞']=release_time
else:#如果该航班是在受影响机场降落的,那他可以提前起飞,在该机场取消流控后降落
flights.loc[index,'实到']=release_time
flights.loc[index,'实飞']=release_time-time_gap
theflight1=flights[(flights.机号==tailnum)&(flights.index>=index)&(flights.状态!='C')]#包括受影响航班的后续航班
theflight2=flights[(flights.机号==tailnum)&(flights.index>index)&(flights.状态!='C')]
theflight_index=theflight2.index.tolist()#受影响飞机的后续航班索引(不包括他自己)
airportlist=quick_passtime[quick_passtime.机号==tailnum]['机场名称']
j=0
for i in theflight_index:
if flights.loc[i]['起飞场'] in airportlist:#求快速过站时间
passtime=quick_passtime[(quick_passtime.机号==tailnum)&(quick_passtime.机场名称==flights.loc[i]['起飞场'])].values[0,2]
else:
passtime=quick_passtime[(quick_passtime.机号==tailnum)&(quick_passtime.机场名称=='其他')].values[0,2]
if theflight1.values[j,8]+pd.Timedelta(minutes=passtime)>flights.loc[i]['实飞']:
time_gap=flights.loc[i,'实到']-flights.loc[i,'实飞']
flights.loc[i,'实到']=theflight1.values[j,8]+pd.Timedelta(minutes=passtime)+time_gap
flights.loc[i,'实飞']=theflight1.values[j,8]+pd.Timedelta(minutes=passtime)
j+=1
flights.loc[i,'状态']='D'
delayed_index.append(i)
else:
continue
#延误后的状态转移,被延误后,该飞机后面的航班依次延误
# delayed_index=[]
# for i in delay_index:
# if i not in delayed_index:
# tailnum=flights.loc[i,'机号']
# time_gap=airport_state.loc[airport]['结束时间']-flights.loc[i,'实飞']
# yyy=flights[(flights.机号==tailnum)&(flights.index>=i)&(flights.状态!='C')].index.tolist()
# delayed_index+=yyy
# #该飞机的后续航班索引
# for zzz in yyy:
# flights.loc[zzz,'实飞']+=time_gap
# flights.loc[zzz,'实到']+=time_gap
# flights.loc[zzz,'状态']='D'
else:#需要取消的小于可取消的,按照价值排序来取消
index_value=f_index_value(IndexA).append(f_index_value(IndexB_SHA)).append(f_index_value(IndexB_PVG)).append(f_index_value(IndexC)).append(f_index_value(IndexD_SHA)).append(f_index_value(IndexD_PVG)).append(f_index_value(IndexE)).append(f_index_value(IndexF_SHA)).append(f_index_value(IndexF_PVG)).append(f_index_value(IndexG))
index_value=index_value.sort_values()
Index=[]
lenth=0
for i in index_value.index.tolist():
if i in IndexA.index.tolist():
Index+=list(IndexA.loc[i,:])
lenth+=2
elif i in IndexB_SHA.index.tolist():
Index+=list(IndexB_SHA.loc[i,:])
lenth+=2
elif i in IndexB_PVG.index.tolist():
Index+=list(IndexB_PVG.loc[i,:])
lenth+=2
elif i in IndexC.index.tolist():
Index+=list(IndexC.loc[i,:])
lenth+=2
elif i in IndexD_SHA.index.tolist():
Index+=list(IndexD_SHA.loc[i,:])
lenth+=1
elif i in IndexD_PVG.index.tolist():
Index+=list(IndexD_PVG.loc[i,:])
lenth+=1
elif i in IndexE.index.tolist():
Index+=list(IndexE.loc[i,:])
lenth+=2
elif i in IndexF_SHA.index.tolist():
Index+=list(IndexF_SHA.loc[i,:])
lenth+=1
elif i in IndexF_PVG.index.tolist():
Index+=list(IndexF_PVG.loc[i,:])
lenth+=1
else:
Index+=list(IndexG.loc[i,:])
lenth+=2
if lenth>=cancel_num:
continue
flights.loc[Index,'状态']='C'
delay_num=int(airport_state.loc[airport]['正常通行量']*(1-airport_state.loc[airport]['进出港速率']))-cancel_num#需要被延误的航班数
I=[]
for i in effect_flights_index:
if i not in Index:
I.append(i)
delay_index=flights.loc[I,:].sort_values(by=['价值排序','VIP航班'],ascending = [True,True]).index.tolist()[:delay_num]
flights.loc[delay_index,'状态']='D'
#延误后的状态转移,该飞机的后续航班按照最小过站时间延误,直到恢复正常排班或时刻表内无后续航班
delayed_index=[]
for index in delay_index:
if index not in delayed_index:
tailnum=flights.loc[index]['机号']
release_time=airport_state.loc[airport]['结束时间']
time_gap=flights.loc[index,'实到']-flights.loc[index,'实飞']#该航班的飞行时间
if flights.loc[index,'起飞场']==airport:#如果该航班是在受影响机场起飞的,那他只能在该机场取消流控后起飞
flights.loc[index,'实到']=release_time+time_gap
flights.loc[index,'实飞']=release_time
else:#如果该航班是在受影响机场降落的,那他可以提前起飞,在该机场取消流控后降落
flights.loc[index,'实到']=release_time
flights.loc[index,'实飞']=release_time-time_gap
theflight1=flights[(flights.机号==tailnum)&(flights.index>=index)&(flights.状态!='C')]#包括受影响航班的后续航班
theflight2=flights[(flights.机号==tailnum)&(flights.index>index)&(flights.状态!='C')]#不包括受影响航班的后续航班
theflight_index=theflight2.index.tolist()#受影响飞机的后续航班索引
airportlist=quick_passtime[quick_passtime.机号==tailnum]['机场名称']
j=0
for i in theflight_index:
if flights.loc[i]['起飞场'] in airportlist:#求快速过站时间
passtime=quick_passtime[(quick_passtime.机号==tailnum)&(quick_passtime.机场名称==flights.loc[i]['起飞场'])].values[0,2]
else:
passtime=quick_passtime[(quick_passtime.机号==tailnum)&(quick_passtime.机场名称=='其他')].values[0,2]
if theflight1.values[j,8]+pd.Timedelta(minutes=passtime)>flights.loc[i]['实飞']:
time_gap=flights.loc[i,'实到']-flights.loc[i,'实飞']
flights.loc[i,'实到']=theflight1.values[j,8]+pd.Timedelta(minutes=passtime)+time_gap
flights.loc[i,'实飞']=theflight1.values[j,8]+pd.Timedelta(minutes=passtime)
j+=1
flights.loc[i,'状态']='D'
delayed_index.append(i)
else:
continue
#被延误后,该飞机后面的航班依次延误
# delayed_index=[]
# for i in delay_index:
# if i not in delayed_index:
# tailnum=flights.loc[i,'机号']
# time_gap=airport_state.loc[airport]['结束时间']-flights.loc[i,'实飞']
# yyy=flights[(flights.机号==tailnum)&(flights.index>=i)&(flights.状态!='C')].index.tolist()
# #该飞机的后续航班索引
# delayed_index+=yyy
# for zzz in yyy:
# flights.loc[zzz,'实飞']+=time_gap
# flights.loc[zzz,'实到']+=time_gap
# flights.loc[zzz,'状态']='D'
flights.loc[flights.状态=='C','实飞']=None
flights.loc[flights.状态=='C','实到']=None
#####################################################################################################
Cn=len(flights[flights.状态=='C'])
Dn=len(flights[flights.状态=='D'])
flights.to_csv('recovery_result.csv',encoding='gbk')
###################################################################################################
#switch case
|
from twilio.rest import Client
import os
import pymongo
from twilio.twiml.messaging_response import MessagingResponse
from flask import Flask, request, redirect
from twilio.twiml.voice_response import Play, VoiceResponse
from flask_cors import CORS, cross_origin
import random
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
@app.route('/', methods=['GET'])
def main():
return {
"api_stuff": "success",
}
@app.route('/image', methods=['GET', 'POST'])
@cross_origin()
def image():
images = ["https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSZCdzyXGUg2Gbcdc3fujKiLchsBxFVGpvNNA&usqp=CAU", "https://static.wikia.nocookie.net/arresteddevelopment/images/d/d1/2017_Lego_Batman_Premiere_-_Michael_Cera_01.jpg/revision/latest/top-crop/width/360/height/450?cb=20170624164358"]
random_number = random.randint(0,1)
content = request.json
phone = content["phone"]
message = client.messages.create(
body='',
from_='+15122014739',
media_url=[images[random_number]],
to='+1' + phone
)
print(message.sid)
return str(message.sid)
@app.route('/custom_message', methods=['GET', 'POST'])
@cross_origin()
def custom_message():
content = request.json
message = content["message"]
phone = content["phone"]
message = client.messages.create(
body=message,
from_='+15122014739',
to='+1' + phone
)
print(message.sid)
return str(message.sid)
@app.route('/cats', methods=['GET', 'POST'])
@cross_origin()
def cats():
content = request.json
facts = ["Cats have 4 legs"]
facts.append("The oldest known pet cat existed 9,500 years ago")
facts.append("Cats spend 70% of their lives sleeping")
facts.append("A cat was the Mayor of an Alaskan town for 20 years")
facts.append("The record for the longest cat ever is 48.5 inches")
facts.append("The richest cat in the world had £7 million")
facts.append("Cats walk like camels and giraffes")
facts.append("Isaac Newton invented the cat door")
facts.append("In 1963 a cat went to space")
facts.append("Ancient Egyptians would shave off their eyebrows when their cats died")
facts.append("House cats share 95.6% of their genetic makeup with tigers")
facts.append("A house cat can reach speeds of up to 30mph")
random_number = random.randint(0,1)
phone = content["phone"]
message = client.messages.create(
body=facts[random_number],
from_='+15122014739',
to='+1' + phone
)
print(message.sid)
return str(message.sid)
@app.route('/random_fact', methods=['GET', 'POST'])
@cross_origin()
def random_fact():
content = request.json
facts = ["Banging your head against a wall for one hour burns 150 calories."]
facts.append("In Switzerland it is illegal to own just one guinea pig.")
facts.append("Pteronophobia is the fear of being tickled by feathers.")
facts.append("Snakes can help predict earthquakes.")
facts.append("A flock of crows is known as a murder.")
facts.append("The oldest “your mom” joke was discovered on a 3,500 year old Babylonian tablet.")
facts.append("So far, two diseases have successfully been eradicated: smallpox and rinderpest.")
facts.append("29th May is officially “Put a Pillow on Your Fridge Day”.")
random_number = random.randint(0,len(facts) - 1)
phone = content["phone"]
message = client.messages.create(
body=facts[random_number],
from_='+15122014739',
to='+1' + phone
)
print(message.sid)
return str(message.sid)
@app.route("/call", methods=['GET', 'POST'])
@cross_origin()
def outgoing_call():
"""Send a dynamic phone call"""
content = request.json
type_of_message = content["type"]
# phone_number = request.data.phone
# mp3_url="http://prank-meme.herokuapp.com/answer"
if type_of_message == "johncena":
call = client.calls.create (
to="+1" + content["phone"],
from_="+15122014739",
url="http://prank-meme.herokuapp.com/johncena"
)
elif type_of_message == "howur":
call = client.calls.create(
to="+1" + content["phone"],
from_="+15122014739",
url="http://prank-meme.herokuapp.com/askyouhowyouare"
)
elif type_of_message == "road":
call = client.calls.create(
to="+1" + content["phone"],
from_="+15122014739",
url="http://prank-meme.herokuapp.com/road"
)
elif type_of_message == "car":
call = client.calls.create(
to="+1" + content["phone"],
from_="+15122014739",
url="http://prank-meme.herokuapp.com/car"
)
print(call.sid)
return str(call.sid)
@app.route("/askyouhowyouare", methods=['GET', 'POST'])
def askyouhowyouare():
"""Respond to incoming phone calls with a brief message."""
# Start our TwiML response
response = VoiceResponse()
response.play('https://youcustomizeit.s3.us-east-2.amazonaws.com/how-are-you-meme.mp3')
# Read a message aloud to the caller
# response.say("Thank you for calling! Have a great day.")
return str(response)
@app.route("/johncena", methods=['GET', 'POST'])
def johncena():
"""Respond to incoming phone calls with a brief message."""
# Start our TwiML response
response = VoiceResponse()
response.play('https://youcustomizeit.s3.us-east-2.amazonaws.com/John+Cena+Meme+Original+Remastered+HD.mp3')
# Read a message aloud to the caller
# response.say("Thank you for calling! Have a great day.")
return str(response)
@app.route("/road", methods=['GET', 'POST'])
def road():
"""Respond to incoming phone calls with a brief message."""
# Start our TwiML response
response = VoiceResponse()
response.play('https://youcustomizeit.s3.us-east-2.amazonaws.com/road.mp3')
# Read a message aloud to the caller
# response.say("Thank you for calling! Have a great day.")
return str(response)
@app.route("/car", methods=['GET', 'POST'])
def car():
"""Respond to incoming phone calls with a brief message."""
# Start our TwiML response
response = VoiceResponse()
response.play('https://youcustomizeit.s3.us-east-2.amazonaws.com/car.mp3')
# Read a message aloud to the caller
# response.say("Thank you for calling! Have a great day.")
return str(response)
@app.route("/sms", methods=['GET', 'POST'])
def incoming_sms():
"""Send a dynamic reply to an incoming text message"""
number = request.values.get('From', None)
body = request.values.get('Body', None)
print(body)
# Start our TwiML response
resp = MessagingResponse()
if body is None:
resp.message("Invalid: Enter your name, class, and session# separated by spaces as shown (one student at a time). Examples:\nAvi Patel grade1 session1\nRavi Rao PreK session1\nMira Singh KG session2")
return str(resp)
body = body.lower()
body = body.strip()
body_arr = body.split()
class_name = ""
name = ""
if len(body_arr) == 4:
first_name = body_arr[0]
last_name = body_arr[1]
name = first_name + " " + last_name
class_name = body_arr[2] + body_arr[3]
elif len(body_arr) == 6:
first_name = body_arr[0]
last_name = body_arr[1]
name = first_name + " " + last_name
class_name = body_arr[2] + body_arr[3] + body_arr[4] + body_arr[5]
else:
resp.message("Invalid: Enter your name, class, and session# separated by spaces as shown (one student at a time). Examples:\nAvi Patel grade1 session1\nRavi Rao PreK session1\nMira Singh KG session2")
return str(resp)
# forward_message(class_name, number, name)
return str(resp)
# def forward_message(class_name, number, name):
# message = client.messages.create(body=message_body, from_='+15122014739', to=i[1])
# print(message.sid)
if __name__ == '__main__':
app.run() |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'saveProject.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import os
from PyQt5.QtWidgets import *
class NewProject(QtWidgets.QDialog):
def __init__(self,Form):
super(NewProject, self).__init__()
Form.setObjectName("Form")
Form.resize(459, 204)
Form.setMinimumSize(QtCore.QSize(450, 200))
Form.setStyleSheet("background-color: white")
self.Projectlabel = QtWidgets.QLabel(Form)
self.Projectlabel.setGeometry(QtCore.QRect(40, 20, 191, 31))
font = QtGui.QFont()
font.setFamily("MS Sans Serif")
font.setPointSize(18)
self.Projectlabel.setFont(font)
self.Projectlabel.setStyleSheet("color: black;")
self.Projectlabel.setObjectName("Projectlabel")
self.ProjectName = QtWidgets.QLineEdit(Form)
self.ProjectName.setGeometry(QtCore.QRect(40, 60, 351, 41))
self.ProjectName.setStyleSheet("color: black;")
self.ProjectName.setObjectName("ProjectName")
self.CreateButton = QtWidgets.QPushButton(Form)
self.CreateButton.setGeometry(QtCore.QRect(40, 120, 161, 41))
font.setPointSize(11)
self.CreateButton.setFont(font)
self.CreateButton.setStyleSheet("background-color: #13333F; color: #FFFFFF; border-radius: 5px; padding: 8px 0px;")
self.CreateButton.setObjectName("CreateButton")
self.CancelButton = QtWidgets.QPushButton(Form)
self.CancelButton.setGeometry(QtCore.QRect(230, 120, 161, 41))
font.setPointSize(11)
self.CancelButton.setFont(font)
self.CancelButton.setStyleSheet("background-color: #13333F; color: #FFFFFF; border-radius: 5px; padding: 8px 0px;")
self.CancelButton.setObjectName("CancelButton")
# Error label
self.error_label = QtWidgets.QLabel(Form)
self.error_label.setStyleSheet("color: red;")
self.error_label.setGeometry(QtCore.QRect(40, 165, 190, 18))
self.error_label.setFont(font)
self.error_label.setHidden(True)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
#################BUTTON ACTIONS##################################
self.CreateButton.clicked.connect(self.create_folders)
self.CancelButton.clicked.connect(Form.close)
self.project_sP = ""
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Save Project"))
self.Projectlabel.setText(_translate("Form", "Project Name:"))
self.CreateButton.setText(_translate("Form", "Create Project"))
self.CancelButton.setText(_translate("Form", "Cancel"))
self.error_label.setText(_translate("Form", "This project already exists"))
def create_folders(self):
self.error_label.setHidden(True)
self.project_sP = self.ProjectName.text()
# Create Projects root path if does not exist
if not os.path.exists("Project Data"):
os.makedirs("Project Data")
# Create Project Paths
if not os.path.exists("Project Data/" + self.ProjectName.text()):
print("Creating folders....")
os.makedirs("Project Data/" + self.ProjectName.text())
os.makedirs("Project Data/" + self.ProjectName.text() + "/CE/")
os.makedirs("Project Data/" + self.ProjectName.text() + "/CE/CE_logs/")
eceld_path_file = 'eceld_project_path.txt'
with open (os.path.join("Project Data/" + self.ProjectName.text() + "/CE/CE_logs/",eceld_path_file),'w') as fp:
pass
os.makedirs("Project Data/" + self.ProjectName.text() + "/CE/Relationships/")
os.makedirs("Project Data/" + self.ProjectName.text() + "/Builder/")
os.makedirs("Project Data/" + self.ProjectName.text() + "/Builder/Builder_logs/")
os.makedirs("Project Data/" + self.ProjectName.text() + "/Builder/Dependencies/")
os.makedirs("Project Data/" + self.ProjectName.text() + "/Runner/")
os.makedirs("Project Data/" + self.ProjectName.text() + "/Runner/Scripts")
os.makedirs("Project Data/" + self.ProjectName.text() + "/Runner/Runner_logs")
os.makedirs("Project Data/" + self.ProjectName.text() + "/Packager/")
os.makedirs("Project Data/" + self.ProjectName.text() + "/Packager/Packager_logs")
self.CancelButton.setText("Continue")
self.CreateButton.hide()
print("Project " + self.ProjectName.text()+" was created")
else:
errorMsg = "Project Name "+ "'"+self.ProjectName.text()+"'" +" Already exists. Please input a different name"
self.alert_msg("Name Error", errorMsg)
#self.error_label.setHidden(False)
print("Project Name Already Exists")
###################### Alert Pop-up Window #############################
def alert_msg(self, title, msg):
print("Error occured: \n\t-Title: %s\n\t-Message: %s\n " %(str(title), str(msg)))
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle(str(title))
msgbox.setText(str(msg))
#msgbox.setStyleSheet("background-color: yellow; border: 1px solid black;")
msgbox.setStyleSheet("QLabel{ color: red}");
msgbox.exec_()
'''
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = NewProject(Form)
Form.show()
sys.exit(app.exec_())
''' |
from django.contrib import admin
from django.urls import path
from django.urls.conf import include
from django.views.generic.base import RedirectView
from django.conf.urls.static import static
from esabha import settings
from django.contrib.auth import views as auth_views
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('registration.backends.default.urls')),
path('social/', include('social.urls')),
path('', RedirectView.as_view(url="social/")),
path('password-reset/', auth_views.PasswordResetView.as_view(
template_name='social/password_reset.html',
subject_template_name='social/password_reset_subject.txt',
html_email_template_name='social/password_reset_email.html'),
name='password_reset'),
path('password-reset/done/', auth_views.PasswordResetDoneView.as_view(
template_name='social/password_reset_done.html'), name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>', auth_views.PasswordResetConfirmView.as_view(
template_name='social/password_reset_confirm.html'), name='password_reset_confirm'),
path('password-reset-complete', auth_views.PasswordResetCompleteView.as_view(
template_name='social/password_reset_complete.html'), name='password_reset_complete'),
path('password-change/', auth_views.PasswordChangeView.as_view(template_name='Profile/password_change.html'),
name='password_change'),
path('password-change-done/',
auth_views.PasswordChangeDoneView.as_view(template_name='Profile/password_change_done.html'),
name='password_change_done'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# Standard library imports
from time import time
import sys
import argparse
import os
# Third party imports
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
# Local application imports
from mymodules.preprocessing import SignalGenerator
def make_mlp(units):
model = keras.Sequential([
keras.layers.Flatten(),
keras.layers.Dense(units=256, activation='relu'),
keras.layers.Dense(units=256, activation='relu'),
keras.layers.Dense(units=256, activation='relu'),
keras.layers.Dense(units=units)
])
return model
def make_cnn(units, strides):
model = keras.Sequential([
keras.layers.Conv2D(filters=128, kernel_size=[3,3], strides=strides, use_bias=False),
keras.layers.BatchNormalization(momentum=0.1),
keras.layers.ReLU(),
keras.layers.Conv2D(filters=128, kernel_size=[3,3], strides=strides, use_bias=False),
keras.layers.BatchNormalization(momentum=0.1),
keras.layers.ReLU(),
keras.layers.Conv2D(filters=128, kernel_size=[3,3], strides=strides, use_bias=False),
keras.layers.BatchNormalization(momentum=0.1),
keras.layers.ReLU(),
keras.layers.GlobalAveragePooling2D(),
keras.layers.Dense(units=units)
])
return model
def make_ds_cnn(units, strides):
model = keras.Sequential([
keras.layers.Conv2D(filters=256, kernel_size=[3, 3], strides=strides, use_bias=False),
keras.layers.BatchNormalization(momentum=0.1),
keras.layers.ReLU(),
keras.layers.DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], use_bias=False),
keras.layers.Conv2D(filters=256, kernel_size=[1, 1], strides=[1, 1], use_bias=False),
keras.layers.BatchNormalization(momentum=0.1),
keras.layers.ReLU(),
keras.layers.DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], use_bias=False),
keras.layers.Conv2D(filters=256, kernel_size=[1, 1], strides=[1, 1], use_bias=False),
keras.layers.BatchNormalization(momentum=0.1),
keras.layers.ReLU(),
keras.layers.GlobalAveragePooling2D(),
keras.layers.Dense(units=units)
])
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", '--model', type=str, default="mlp", help='model name')
parser.add_argument("-e", "--epochs", type=int, default=20, help="Training epochs")
parser.add_argument('--mfcc', action='store_true', help='use MFCCs')
parser.add_argument("-s", '--silence', action='store_true', help='add silence')
parser.add_argument("-v", '--verbose', action='store_true')
args = parser.parse_args()
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
# zip_path = tf.keras.utils.get_file(
# origin="http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip",
# fname='mini_speech_commands.zip',
# extract=True,
# cache_dir='.',
# cache_subdir='data')
if args.silence:
num_samples = 9000
units = 9
data_dir = os.path.join('.', 'data', 'mini_speech_commands_silence')
else:
num_samples = 8000
units = 8
data_dir = os.path.join('.', 'data', 'mini_speech_commands')
filenames = tf.io.gfile.glob(str(data_dir) + '/*/*')
filenames = tf.random.shuffle(filenames)
train_files = filenames[:int(num_samples*0.8)]
val_files = filenames[int(num_samples*0.8): int(num_samples*0.9)]
test_files = filenames[int(num_samples*0.9):]
labels = np.array(tf.io.gfile.listdir(str(data_dir)))
labels = labels[labels != 'README.md']
STFT_OPTIONS = {'stft_frame_length': 256, 'stft_frame_step': 128, 'mfcc': False}
MFCC_OPTIONS = {'stft_frame_length': 640, 'stft_frame_step': 320, 'mfcc': True,
'lower_freq_mel': 20, 'upper_freq_mel': 4000, 'num_mel_bins': 40,'num_coefficients': 10}
if os.path.exists("./models") is False:
os.mkdir("./models")
os.mkdir("./models/silence")
if args.mfcc is True:
options = MFCC_OPTIONS
strides = [2, 1]
if args.model == "mlp":
model = make_mlp(units)
if args.silence:
if os.path.exists('./models/silence/MLP_MFCC/') is False:
os.mkdir('./models/silence/MLP_MFCC/')
filepath = './models/silence/MLP_MFCC/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
else:
if os.path.exists('./models/MLP_MFCC/') is False:
os.mkdir('./models/MLP_MFCC/')
filepath = './models/MLP_MFCC/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
elif args.model == "cnn":
model = make_cnn(units, strides)
if args.silence:
if os.path.exists('./models/silence/CNN_MFCC/') is False:
os.mkdir('./models/silence/CNN_MFCC/')
filepath = './models/silence/CNN_MFCC/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
else:
if os.path.exists('./models/CNN_MFCC/') is False:
os.mkdir('./models/CNN_MFCC/')
filepath = './models/CNN_MFCC/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
elif args.model == "dscnn":
model = make_ds_cnn(units, strides)
if args.silence:
if os.path.exists('./models/silence/DSCNN_MFCC/') is False:
os.mkdir('./models/silence/DSCNN_MFCC/')
filepath = './models/silence/DSCNN_MFCC/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
else:
if os.path.exists('./models/DSCNN_MFCC/') is False:
os.mkdir('./models/DSCNN_MFCC/')
filepath = './models/DSCNN_MFCC/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
else:
print("Error: -m, --model [mlp, cnn, dscnn]")
else:
options = STFT_OPTIONS
strides = [2, 2]
if args.model == "mlp":
model = make_mlp(units)
if args.silence:
if os.path.exists('./models/silence/MLP_STFT/') is False:
os.mkdir('./models/silence/MLP_STFT/')
filepath = './models/silence/MLP_STFT/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
else:
if os.path.exists('./models/MLP_STFT/') is False:
os.mkdir('./models/MLP_STFT/')
filepath = './models/MLP_STFT/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
elif args.model == "cnn":
model = make_cnn(units, strides)
if args.silence:
if os.path.exists('./models/silence/CNN_STFT/') is False:
os.mkdir('./models/silence/CNN_STFT/')
filepath = './models/silence/CNN_STFT/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
else:
if os.path.exists('./models/CNN_STFT/') is False:
os.mkdir('./models/CNN_STFT/')
filepath = './models/CNN_STFT/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
elif args.model == "dscnn":
model = make_ds_cnn(units, strides)
if args.silence:
if os.path.exists('./models/silence/DSCNN_STFT/') is False:
os.mkdir('./models/silence/DSCNN_STFT/')
filepath = './models/silence/DSCNN_STFT/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
else:
if os.path.exists('./models/DSCNN_STFT/') is False:
os.mkdir('./models/DSCNN_STFT/')
filepath = './models/DSCNN_STFT/model_{epoch:02d}_{val_sparse_categorical_accuracy:.4f}'
else:
print("Error: -m, --model [mlp, cnn, dscnn]")
generator = SignalGenerator(labels, 16000, **options)
train_dataset = generator.make_dataset(train_files, True)
val_dataset = generator.make_dataset(val_files, False)
test_dataset = generator.make_dataset(test_files, False)
model.compile( optimizer=tf.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=keras.metrics.SparseCategoricalAccuracy()
)
save_model = keras.callbacks.ModelCheckpoint(filepath=filepath,
monitor="val_sparse_categorical_accuracy",
save_best_only=True,
save_weights_only=False,
save_freq='epoch'
)
model.fit(train_dataset, validation_data=val_dataset, epochs=args.epochs, callbacks=save_model)
test_loss, test_acc = model.evaluate(test_dataset)
print(f"Test loss: {test_loss:.4f} - Test accuracy: {test_acc:.4f}")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 10:37:33 2021
@author: HP
"""
import cv2 as cv
import scipy.fft as spfft
import numpy as np
def dct2D(a):
return spfft.dct(spfft.dct(a.T, norm='ortho').T, norm='ortho')
def idct2D(a):
return spfft.idct(spfft.idct(a.T, norm='ortho').T, norm='ortho')
img11 = cv.imread('KungFuPanda.jpg',0)
cv.imshow('Kung Fu Panda', img11)
cv.waitKey(0)
# Getting the dimensions of the image
height = img11.shape[0]
width = img11.shape[1]
# Converting the spatial image into Frequency Domain using DCT
# DCT - Discrete Cosine Transform
dct_array = np.zeros(shape=(height, width))
print('Height: {}'.format(height))
print('Weight: {}'.format(width))
# Zero padding to make perfect dimensions for 8x8 DCT block
# Finding the number of rows and columns to pad with zeros to make a perfect
# rectangle that an 8x8 matrix of DCT coeffecients can easily traverse
rows_to_add = (height+8)%8
columns_to_add = (width+8)%8
new_height = height + rows_to_add
new_width = width + columns_to_add
padded_image = np.zeros((new_height, new_width), dtype=np.uint8)
padded_image[:height,:width] = img11
dct_of_padded_image = np.zeros((np.shape(padded_image)))
cv.imshow('Kung Fu Panda Padded', padded_image)
cv.waitKey(0)
# Padded Image is now ready for performing DCT
# Performing DCT on 8x8 blocks now
for i in range(0,height,8):
for j in range(0,height,8):
dct_of_padded_image[i:(i+8), j:(j+8)] = dct2D(padded_image[i:(i+8), j:(j+8)])
cv.imshow('Kung Fu Panda in DCT Domain', dct_of_padded_image)
cv.waitKey(0)
original_image_after_idct = np.zeros(np.shape(padded_image), dtype=np.uint8)
for k in range(0,height,8):
for l in range(0,height,8):
original_image_after_idct[k:(k+8), l:(l+8)] = idct2D(dct_of_padded_image[k:(k+8), l:(l+8)])
cv.imshow('Kung Fu Panda bank in spatial domain from DCT Domain', original_image_after_idct)
cv.waitKey(0)
|
'''1.Write a program that reads a positive integer, n, from the user and then displays the sum of all of the integers from 1 to n.
The sum of the first n positive integers can be computed using the formula: sum = (n)(n + 1) / 2 '''
print('-'*20)
n = int(input("Enter a positive integer: ")) #Read the input from the user
total = n * (n+1) / 2 #Calculate the sum
print("The sum of the first",n,"positive integers",total)#Display the result
print('-'*20)
|
"""
1. Pretraining SimCLR & Proto-typing
2. Training OOD (one-class classification)
3. Evaluation (eval.py?)
python3 train_main_ssl.py --save_dir semi --known_normal 0 --load_path ./pretrained/model_cifar10_0.pth --lr .0001 --dataset cifar10 --optimizer adam --ratio_known_normal 0.05 --ratio_known_outlier 0.05
"""
import sys, os
import utils,json
import torch.nn as nn
import transform_layers as TL
import torch.nn.functional as F
import torchvision.transforms as tr
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
import model_csi as C
from dataloader_es import *
from parser import *
#for kmeans++ to cluster the prototypes..
from soyclustering import SphericalKMeans
from scipy import sparse
from randaugment_without_rotation import *
import random,numpy as np
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
### helper functions
def checkpoint(f, tag, args, device):
f.cpu()
ckpt_dict = {
"model_state_dict": f.state_dict(),
"prototypes": f.module.prototypes # add model prototype save
}
torch.save(ckpt_dict, os.path.join(args.save_dir, tag))
f.to(device)
def generate_prototypes(model, valid_loader, n_cluster=100):
first = True
model.eval()
with torch.no_grad():
normal_distance = []
anomal_distance = []
first = True
for idx, (pos_1, _, _, semi_target, _, _) in enumerate(valid_loader):
pos_1 = pos_1.cuda(non_blocking=True)
#feature = model(pos_1) # normalized prototypes
_, outputs_aux = model(pos_1, simclr=True, penultimate=False, shift=False)
out = outputs_aux['simclr']
feature = F.normalize(out, dim=-1)
true_feature = feature[semi_target != -1,:]
if first:
totalembed = true_feature
first = False
else:
totalembed = torch.cat((totalembed, true_feature), dim = 0)
# Set prototypes (k-means++)
all_out_numpy = totalembed.cpu().numpy() # T * 4 * D
proto_list = []
all_out = all_out_numpy.reshape(-1, all_out_numpy.shape[1])
all_out_sp = sparse.csr_matrix(all_out)
while True:
try:
spherical_kmeans = SphericalKMeans(
n_clusters=n_cluster,
max_iter=10,
verbose=1,
init='similar_cut'
)
spherical_kmeans.fit(all_out_sp)
break
except KeyboardInterrupt:
assert 0
except:
print("K-means failure... Retrying")
continue
protos = spherical_kmeans.cluster_centers_
protos = F.normalize(torch.Tensor(protos), dim = -1)
return protos.to(device)
def get_simclr_augmentation(image_size):
# parameter for resizecrop
resize_scale = (0.54, 1.0) # resize scaling factor
if True: # if resize_fix is True, use same scale
resize_scale = (0.54, 0.54)
# Align augmentation
color_jitter = TL.ColorJitterLayer(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.8)
color_gray = TL.RandomColorGrayLayer(p=0.2)
resize_crop = TL.RandomResizedCropLayer(scale=resize_scale, size=image_size)
# Transform define #
if args.dataset == 'imagenet': # Using RandomResizedCrop at PIL transform
transform = nn.Sequential(
color_jitter,
color_gray,
)
else:
transform = nn.Sequential(
color_jitter,
color_gray,
resize_crop,
)
return transform
def energy_score(img,model):
#z = model(img)
_, outputs_aux = model(img, simclr=True, penultimate=False, shift=False)
out = outputs_aux['simclr']
z = F.normalize(out, dim=-1)
zp = model.module.prototypes
logits = torch.matmul(z, zp.t()) / args.temperature
Le = torch.log(torch.exp(logits).sum(dim=1))
return Le, logits
def cal_class_auroc_single(nd1,and1,cls_list):
# Class AUROC
normal_class = args.known_normal
anomaly_classes = [i for i in range(args.n_classes)]
anomaly_classes.remove(normal_class)
tod1_average = 0
for anomaly in anomaly_classes:
tod1 = nd1 + np.array(and1)[np.array(cls_list) == anomaly].tolist()
total_label = [1 for i in range(len(nd1))] + [0 for i in range(len(tod1) - len(nd1))]
print('---------------------- Evaluation class: {} --------------------------'.format(anomaly))
print("px\t", roc_auc_score(total_label, tod1))
tod1_average += roc_auc_score(total_label, tod1)
tod1_average /= len(anomaly_classes)
print('------------------- Evaluation class average --------------------')
print(len(nd1), len(tod1) - len(nd1))
print("px\t", tod1_average)
print()
return
def cal_class_auroc(nd1,nd2,and1,and2,ndsum,andsum,ndmul,andmul,cls_list):
# Class AUROC
normal_class = args.known_normal
anomaly_classes = [i for i in range(args.n_classes)]
anomaly_classes.remove(normal_class)
tosum_average = 0
tomul_average = 0
tod1_average = 0
tod2_average = 0
tod3_average = 0
for anomaly in anomaly_classes:
tosum = ndsum + np.array(andsum)[np.array(cls_list) == anomaly].tolist()
tomul = ndmul + np.array(andmul)[np.array(cls_list) == anomaly].tolist()
tod1 = nd1 + np.array(and1)[np.array(cls_list) == anomaly].tolist()
tod2 = nd2 + np.array(and2)[np.array(cls_list) == anomaly].tolist()
# tod3 = nd3 + np.array(and3)[np.array(cls_list) == anomaly].tolist()
total_label = [1 for i in range(len(ndsum))] + [0 for i in range(len(tosum) - len(ndsum))]
print('---------------------- Evaluation class: {} --------------------------'.format(anomaly))
print(len(ndsum), len(tosum) - len(ndsum))
print("sum\t", roc_auc_score(total_label, tosum))
print("mul\t", roc_auc_score(total_label, tomul))
print("px\t", roc_auc_score(total_label, tod1))
print("pyx\t", roc_auc_score(total_label, tod2))
# print("pshi\t", roc_auc_score(total_label, tod3))
print('----------------------------------------------------------------------')
print()
tosum_average += roc_auc_score(total_label, tosum)
tomul_average += roc_auc_score(total_label, tomul)
tod1_average += roc_auc_score(total_label, tod1)
tod2_average += roc_auc_score(total_label, tod2)
# tod3_average += roc_auc_score(total_label, tod3)
tosum_average /= len(anomaly_classes)
tomul_average /= len(anomaly_classes)
tod1_average /= len(anomaly_classes)
tod2_average /= len(anomaly_classes)
tod3_average /= len(anomaly_classes)
print('------------------- Evaluation class average --------------------')
print(len(ndsum), len(tosum) - len(ndsum))
print("sum\t", tosum_average)
print("mul\t", tomul_average)
print("px\t", tod1_average)
print("pyx\t", tod2_average)
# print("pshi\t", tod3_average)
print('----------------------------------------------------------------------')
print()
return
def earlystop_score(model,validation_dataset):
rot_num = 4
weighted_aucscores,aucscores = [],[]
zp = model.module.prototypes
#for pos, pos2, _, _, _, raw in valid_loader:
for images1, images2, semi_target in validation_dataset:
prob,prob2, label_list = [] , [], []
weighted_prob, weighted_prob2 = [], []
Px_mean,Px_mean2 = 0, 0
#images1 = pos.to(device)
#images2 = pos2.to(device)
#images1, images2 = simclr_aug(images1), simclr_aug(images2)
# images1, images2 = normalize(images1), normalize(images2)
all_semi_targets = torch.cat([semi_target,semi_target+1])
# _, out = model(images1,need_feat = True)
_, outputs_aux = model(images1, simclr=True, penultimate=False, shift=False)
out = outputs_aux['simclr']
norm_out = F.normalize(out,dim=-1)
logits = torch.matmul(norm_out, zp.t())
Le = torch.log(torch.exp(logits).sum(dim=1))
prob.extend(Le.tolist())
# _, out = model(images2,nee]]_feat = True)
_, outputs_aux = model(images2, simclr=True, penultimate=False, shift=False)
out = outputs_aux['simclr']
norm_out = F.normalize(out,dim=-1)
logits = torch.matmul(norm_out, zp.t())
Le = torch.log(torch.exp(logits).sum(dim=1))
prob2.extend(Le.tolist())
label_list.extend(all_semi_targets)
aucscores.append(roc_auc_score(label_list, prob2+prob))
# weighted_aucscores.append(roc_auc_score(label_list, weighted_prob2+weighted_prob))
print("earlystop_score:",np.mean(aucscores))
return np.mean(aucscores)
def test(model, test_loader, train_loader, epoch):
model.eval()
with torch.no_grad():
nd1,nd2,ndsum,ndmul = [],[],[],[]
and1,and2,andsum,andmul = [],[],[],[]
cls_list = []
# first = True
# for idx, (pos_1, _, _, semi_target, _, _) in enumerate(train_loader):
# pos_1 = pos_1.cuda(non_blocking=True)
# pos_1 = simclr_aug(pos_1)
# #pos_1 = normalize(pos_1)
# # feature = model(pos_1)
# _, outputs_aux = model(pos_1, simclr=True, penultimate=False, shift=False)
# out = outputs_aux['simclr']
# feature = F.normalize(out, dim=-1)
# true_feature = feature[semi_target != -1,:]
# if first:
# totalembed = true_feature
# first = False
# else:
# totalembed = torch.cat((totalembed, true_feature), dim = 0)
for idx, (pos_1, _, target, _, cls, image) in enumerate(test_loader):
negative_target = (target == 1).nonzero().squeeze()
positive_target = (target != 1).nonzero().squeeze()
# pos_1 = pos_1.cuda(non_blocking=True)
image = pos_1.cuda(non_blocking=True)
out_ensemble = []
for seed in range(args.sample_num):
set_random_seed(seed) # random seed setting
pos_1 = simclr_aug(image)
# pos_1 = normalize(pos_1)
pos_1 = pos_1.cuda(non_blocking=True)
# _ , feature = model(pos_1,need_feat = True)
_, outputs_aux = model(pos_1, simclr=True, penultimate=False, shift=False)
out_simclr = outputs_aux['simclr']
out_ensemble.append(out_simclr)
out = torch.stack(out_ensemble,dim=1).mean(dim=1)
norm_out = F.normalize(out,dim=-1)
zp = model.module.prototypes
logits = torch.matmul(norm_out, zp.t())
Le = torch.log(torch.exp(logits).sum(dim=1))
# totalsim, _ = torch.matmul(norm_out, totalembed.t()).max(dim = 1)
# Psum = Le + totalsim
# Pmul = Le * totalsim
cls_list.extend(cls[negative_target])
if len(positive_target.shape) != 0:
nd1.extend(Le[positive_target].tolist())
# nd2.extend(totalsim[positive_target].tolist())
# ndsum.extend(Psum[positive_target].tolist())
# ndmul.extend(Pmul[positive_target].tolist())
if len(negative_target.shape) != 0:
and1.extend(Le[negative_target].tolist())
# and2.extend(totalsim[negative_target].tolist())
# andsum.extend(Psum[negative_target].tolist())
# andmul.extend(Pmul[negative_target].tolist())
cal_class_auroc_single(nd1,and1,cls_list)
# cal_class_auroc(nd1,nd2,and1,and2,ndsum,andsum,ndmul,andmul,cls_list)
## 0) setting
seed = args.seed
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
utils.makedirs(args.save_dir)
with open(f'{args.save_dir}/params.txt', 'w') as f: # training setting saving
json.dump(args.__dict__, f)
if args.print_to_log: #
sys.stdout = open(f'{args.save_dir}/log.txt', 'w')
args.device = device
## 1) pretraining & prototyping
"""
if args.backbone == 'wide_resnet':
model = Wide_ResNet(28, 2, args.feature_dim)
elif args.backbone == 'resnet':
model = ResNet(args.feature_dim)
"""
model = C.get_classifier('resnet18', n_classes=10).to(device)
model = C.get_shift_classifer(model, 1).to(device)
if args.dataset == 'cifar10':
args.image_size = (32, 32, 3)
else:
raise
if args.load_path != None: # pretrained model loading
ckpt_dict = torch.load(args.load_path)
model.load_state_dict(ckpt_dict,strict=True)
else:
assert False , "Not implemented error: you should give pretrained and prototyped model"
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.to(args.device)
#model.module.prototypes = torch.rand(100, 128) - 0.5
#model.module.prototypes = F.normalize(model.module.prototypes, dim = -1)
# model.module.prototypes = model.module.prototypes.to(args.device)
# print(model.module.prototypes)
train_transform = transforms.Compose([
transforms.Resize((args.image_size[0], args.image_size[1])),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.Resize((args.image_size[0], args.image_size[1])),
transforms.ToTensor(),
])
strong_aug = RandAugmentMC(n=12,m=5)
# dataset loader
total_dataset = load_dataset("./data", normal_class=[args.known_normal], known_outlier_class=args.known_outlier,
n_known_outlier_classes=args.n_known_outlier, ratio_known_normal=args.ratio_known_normal,
ratio_known_outlier=args.ratio_known_outlier, ratio_pollution=args.ratio_pollution, random_state=None,
train_transform=train_transform, test_transform=test_transform,
valid_transform=strong_aug)
train_loader, false_valid_loader, valid_loader, test_loader = total_dataset.loaders(batch_size = args.batch_size)
simclr_aug = get_simclr_augmentation(image_size=(32, 32, 3)).to(device)
normalize = TL.NormalizeLayer()
print('setup fixed validation data')
validation_dataset = []
for i, (pos,pos2,_, semi_target,_,_) in tqdm(enumerate(valid_loader)):
#images1 = torch.cat([rotation(pos, k) for k in range(rot_num)])
#images2 = torch.cat([rotation(pos2, k) for k in range(rot_num)])
images1 = pos.to(device)
images2 = pos2.to(device)
images1 = simclr_aug(images1)
images2 = simclr_aug(images2)
val_semi_target = torch.zeros(len(semi_target), dtype=torch.int64)
validation_dataset.append([images1,images2,val_semi_target])
if args.set_initial_kmeanspp:
print("Prototype: initialize kmeans pp")
model.module.prototypes = generate_prototypes(model, false_valid_loader, n_cluster=args.n_cluster)
else:
print("Prototype: initialize random")
# model.module.prototypes = model.module.prototypes.to(args.device)
model.module.prototypes = torch.rand(args.n_cluster, 128) - 0.5
model.module.prototypes = F.normalize(model.module.prototypes, dim = -1)
model.module.prototypes = model.module.prototypes.to(args.device)
params = model.parameters()
if args.optimizer == "adam":
optim = torch.optim.Adam(params, lr=args.lr, betas=[.9, .999], weight_decay=args.weight_decay)
elif args.optimizer =="SGD":
optim = torch.optim.SGD(params, lr=args.lr, momentum=.9, weight_decay=args.weight_decay)
import copy
# Evaluation before training
test(model, test_loader, train_loader, -1)
earlystop_trace = []
end_train = False
max_earlystop_auroc = 0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
hflip = TL.HorizontalFlipLayer().to(device)
C = (torch.log(torch.Tensor([args.n_cluster])) + 1/args.temperature).to(device)
args.sample_num = 1
for epoch in range(args.n_epochs):
model.train()
# for m in model.modules():
# if isinstance(m, nn.BatchNorm2d):
# m.eval()
# adjust learning rate
# if epoch in args.decay_epochs:
# for param_group in optim.param_groups:
# new_lr = param_group['lr'] * args.decay_rate
# param_group['lr'] = new_lr
# print("Decaying lr to {}".format(new_lr))
# training
losses = []
for i, (pos, _, _, semi_target, _, _) in tqdm(enumerate(train_loader)):
#pos1, pos2 = pos1.to(args.device), pos2.to(args.device)
pos = pos.to(args.device)
semi_target = semi_target.to(args.device)
semi_target = semi_target.repeat(2)
#La = similarity_loss(pos1, pos2, model)
pos_1, pos_2 = hflip(pos.repeat(2, 1, 1, 1)).chunk(2)
pos = torch.cat([pos_1,pos_2],dim=0)
pos = simclr_aug(pos)
# pos = normalize(pos)
score, logits1 = energy_score(pos, model)
# _, logits2 = energy_score(pos2, model)
Le = torch.where(semi_target == -1, (C - score) ** -1, score ** -1).mean() ## Le inverse
# Le = torch.where(semi_target == -1, score, score ** -1).mean()
#x_out = F.softmax(logits1, dim=-1)
#x_tf_out = F.softmax(logits2, dim=-1)
#La = entropyloss(x_out,x_tf_out)
# _, outputs_aux = model(img, simclr=True, penultimate=False, shift=False)
# out = outputs_aux['simclr']
# z = F.normalize(out, dim=-1)
# zp = model.module.prototypes
# logits = torch.matmul(z, zp.t()) / args.temperature
# Le = torch.log(torch.exp(logits).sum(dim=1))
# Lr =
L = Le
optim.zero_grad()
L.backward()
optim.step()
losses.append(L.cpu().detach())
model.eval()
with torch.no_grad():
earlystop_auroc = earlystop_score(model,validation_dataset)
earlystop_trace.append(earlystop_auroc)
print('[{}]epoch loss:'.format(epoch), np.mean(losses))
print('[{}]earlystop loss:'.format(epoch),earlystop_auroc)
# if epoch % args.ckpt_every == 0 or epoch == args.n_epochs - 1:
# checkpoint(model, f'ckpt_ssl_{epoch}.pt', args, args.device)
if max_earlystop_auroc < earlystop_auroc:
max_earlystop_auroc = earlystop_auroc
best_epoch = epoch
best_model = copy.deepcopy(model)
# if epoch>50:
# if earlystop_trace[-4] < max_earlystop_auroc and earlystop_trace[-3] < max_earlystop_auroc and earlystop_trace[-2] < max_earlystop_auroc:
# end_train = True
# if end_train:
# checkpoint(model, f'ckpt_ssl_{epoch}.pt', args, args.device)
# print("trainin ended")
# break
test(model, test_loader, train_loader, epoch) # we do not test them
if (epoch%1) ==0:
model.eval()
with torch.no_grad():
print("redefine prototypes")
model.module.prototypes = generate_prototypes(model, false_valid_loader, n_cluster=args.n_cluster)
print("best epoch:",best_epoch,"best auroc:",max_earlystop_auroc)
test(best_model, test_loader, train_loader, epoch) # we do not test them
checkpoint(model, f'ckpt_ssl_{epoch}_best.pt', args, args.device)
|
import test
from flask import Flask, Response
from flask import render_template
from flask import url_for
app = Flask(__name__)
x = test.myfunction()
g = x[1]
b = x[0]
@app.route("/")
def hello():
return render_template('index.html')
@app.route("/good")
def good():
print(type(g))
return render_template('goodNews.html', good=g)
@app.route("/bad")
def bad():
return render_template('badNews.html', bad=b)
if __name__ == "__main__":
app.run()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from urllib import quote_plus
from django.contrib import messages
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from blog.models import Post
from blog.form import PostForm
# Create your views here.
def post_create(request):
if not request.user.is_staff or not request.user.superuser:
raise Http404
if not request.user.is_authenticated():
raise Http404
form = PostForm(request.POST or None, request.FILES or None)
context = {'form': form}
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, "Successfully created post")
return HttpResponseRedirect(instance.get_absolute_url())
return render(request, "post_form.html", context)
def post_detail(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
share_string = quote_plus(instance.content)
context = {
"title": "details",
"instance":instance,
"share_string":share_string
}
return render(request, "post_detail.html", context)
def post_list(request):
queryset_all = Post.objects.all()
paginator = Paginator(queryset_all, 3)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
queryset = paginator.page(paginator.num_pages)
context = {
"object_list":queryset,
"title": "details",
"page_request_var":page_request_var
}
return render(request, "index.html", context)
def post_update(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None, request.FILES or None, instance=instance)
context = {'form': form}
if form.is_valid():
instance = form.save(commit=False)
instance.save()
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": "details",
"instance":instance,
"form": form
}
return render(request, "post_form.html", context)
def post_delete(request, slug):
instance = get_object_or_404(Post, slug=slug)
instance.delete()
return redirect("blog:list") |
from IPython.core.magic import (register_line_magic, register_cell_magic,
register_line_cell_magic)
import cogniac
from tabulate import tabulate
from datetime import datetime
import os
import time_range
from sys import argv
__builtins__['cc'] = None
__builtins__['S'] = None
def print_tenants(tenants):
tenants.sort(key=lambda x: x['name'])
data = [['tenant_id', 'name']]
for tenant in tenants:
data.append([tenant['tenant_id'], tenant['name']])
print tabulate(data, headers='firstrow')
@register_line_magic
def tenants(line):
tenants = cogniac.CogniacConnection.get_all_authorized_tenants()['tenants']
print_tenants(tenants)
print "added ipython magic %tenants"
class Subjects(object):
def __init__(self):
count = 0
for subject in cc.get_all_subjects():
key = subject.subject_uid.replace('-', '_') # workaround for some unclean legacy subject_uid
self.__setattr__(key, subject)
count += 1
print 'added', count, 'subjects'
@register_line_magic
def authenticate(tenant_id):
"""
authenticate to the specified tenant_id
store CogniacConnection in cc object
load all Cogniac Subjects into S object
"""
cc = cogniac.CogniacConnection(tenant_id=tenant_id)
__builtins__['cc'] = cc # workaround ipython silliness
print cc.tenant
print "Adding all subjects to S"
S = Subjects()
__builtins__['S'] = S
print "Type S.<tab> to autocomplete subjects"
print "added ipython magic %authenticate"
def print_detections(detections):
# remove None values from dict
detections = [{k: v for k, v in d.items() if v is not None} for d in detections]
detections.sort(key=lambda x: x['created_at'])
for d in detections:
if 'activation' in d:
del d['activation']
value = datetime.fromtimestamp(d['created_at'])
d['created_at'] = value.strftime('%Y-%m-%d %H:%M:%S')
print tabulate(detections, headers='keys')
@register_line_magic
def media_detections(media_id):
"print media detections for the specified media_id"
try:
media = cc.get_media(media_id)
except:
print "media_id %s not found" % media_id
return
print_detections(media.detections())
print "added ipython magic %media_detections"
def print_subjects(media_subjects):
subjects = [ms['subject'] for ms in media_subjects]
subjects = [{k: v for k, v in s.items() if v is not None} for s in subjects]
subjects.sort(key=lambda x: x['updated_at'])
for s in subjects:
if 'timestamp' in s:
del s['timestamp']
value = datetime.fromtimestamp(s['updated_at'])
s['updated_at'] = value.strftime('%Y-%m-%d %H:%M:%S')
print tabulate(subjects, headers='keys')
@register_line_magic
def media_subjects(media_id):
"print subject media associations for the specified media_id"
try:
media = cc.get_media(media_id)
except:
print "media_id %s not found" % media_id
return
print_subjects(media.subjects())
print "added ipython magic %media_subjects"
@register_line_magic
def users(line):
def user_to_list(u):
try:
last = datetime.fromtimestamp(float(u['last_auth']))
last = last.strftime('%Y-%m-%d %H:%M:%S')
except:
last = ""
return [u['given_name'] + " " + u['surname'], u['email'], u['role'], last, u['user_id']]
print "Users for tenant %s (%s)" % (cc.tenant.name, cc.tenant.tenant_id)
users = cc.tenant.users()
users.sort(key=lambda x: x['last_auth'])
data = [['name', 'email', 'tenant_role', 'last_auth', 'user_id']]
for user in users:
data.append(user_to_list(user))
print tabulate(data, headers='firstrow')
@register_line_magic
def timeranges(line):
"""
print list of valid timeframe selector strings, their corresponding current values, and description
"""
time_range.help()
print "added ipython magic %timeranges"
def tenant_usage_convert_for_display(ur):
value = datetime.fromtimestamp(ur['start_time'])
ur['start_time'] = value.strftime('%Y-%m-%d %H:%M:%S')
value = datetime.fromtimestamp(ur['end_time'])
ur['end_time'] = value.strftime('%Y-%m-%d %H:%M:%S')
ur['app_count'] = len(ur['active_model_apps'])
gb = float(ur.get('media_bytes', 0)) / 1e9
if gb < 1000:
ur['media_GB'] = round(gb, 1)
else:
ur['media_GB'] = round(gb, 0)
if 'media_count' not in ur:
ur['media_count'] = 0
@register_line_magic
def usage(line):
if line in time_range.timeframes:
timerange_str = line
period = None
elif ' ' in line:
timerange_str, period = line.split(' ')
else:
timerange_str, period = "day", "hour"
start_time, end_time = time_range.start_end_times(timerange_str)
print 'tenant id:\t', cc.tenant.tenant_id
print 'tenant name:\t', cc.tenant.name
print 'report start\t', datetime.fromtimestamp(start_time).strftime('%Y-%m-%d %H:%M:%S')
print 'report end\t', datetime.fromtimestamp(end_time).strftime('%Y-%m-%d %H:%M:%S')
print
if not period:
if end_time - start_time >= (60*60*24*7):
period = "day"
elif end_time - start_time >= (60*60*6):
period = 'hour'
else:
period = '15min'
usage = list(cc.tenant.usage(start_time, end_time, period=period))
for ur in usage:
tenant_usage_convert_for_display(ur)
tenant_headers = ['start_time', 'end_time', 'amu', 'model_outputs', 'user_feedback', 'other_outputs', 'app_count', 'media_count', 'media_GB']
data = [tenant_headers] + [[d.get(h) for h in tenant_headers] for d in usage]
print tabulate(data, headers='firstrow')
print "added ipython magic %usage"
@register_line_magic
def login(tname):
"""
attempt to match user supplied partial tenant name or tenant_id
authenticate with the matched tenant
"""
tenant_list = cogniac.CogniacConnection.get_all_authorized_tenants()['tenants']
def match(t):
return tname.lower() in t['name'].lower() or tname in t['tenant_id']
filter_tenant_list = filter(match, tenant_list)
if len(filter_tenant_list) == 1:
authenticate(filter_tenant_list[0]['tenant_id']) # use tenant from command line
elif len(filter_tenant_list) > 1:
print_tenants(filter_tenant_list) # show tenants that match
else:
print_tenants(tenant_list) # show all tenants
if 'COG_TENANT' in os.environ:
tenant_id = os.environ['COG_TENANT']
print "found COG_TENANT %s" % tenant_id
authenticate(tenant_id)
else:
login(argv[-1])
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from send.models.Infomation import Information
from send.serializers.serializer import InformationSerializer
class InformationView(APIView):
def get(self, request, format=None):
informations = Information.objects.all()
serializer = InformationSerializer(informations, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = InformationSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) |
import numpy as np
import numpy.testing as npt
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.svm import SVR
import forestci as fci
def test_random_forest_error():
X = np.array([[5, 2], [5, 5], [3, 3], [6, 4], [6, 6]])
y = np.array([70, 100, 60, 100, 120])
train_idx = [2, 3, 4]
test_idx = [0, 1]
y_test = y[test_idx]
y_train = y[train_idx]
X_test = X[test_idx]
X_train = X[train_idx]
n_trees = 4
forest = RandomForestRegressor(n_estimators=n_trees)
forest.fit(X_train, y_train)
inbag = fci.calc_inbag(X_train.shape[0], forest)
for ib in [inbag, None]:
for calibrate in [True, False]:
V_IJ_unbiased = fci.random_forest_error(
forest, X_train, X_test, inbag=ib, calibrate=calibrate
)
npt.assert_equal(V_IJ_unbiased.shape[0], y_test.shape[0])
# We cannot calculate inbag from a non-bootstrapped forest. This is because
# Scikit-learn trees do not store their own sample weights. If you did This
# some other way, you can still use your own inbag
non_bootstrap_forest = RandomForestRegressor(n_estimators=n_trees, bootstrap=False)
npt.assert_raises(
ValueError, fci.calc_inbag, X_train.shape[0], non_bootstrap_forest
)
def test_bagging_svr_error():
X = np.array([[5, 2], [5, 5], [3, 3], [6, 4], [6, 6]])
y = np.array([70, 100, 60, 100, 120])
train_idx = [2, 3, 4]
test_idx = [0, 1]
y_test = y[test_idx]
y_train = y[train_idx]
X_test = X[test_idx]
X_train = X[train_idx]
n_trees = 4
bagger = BaggingRegressor(base_estimator=SVR(), n_estimators=n_trees)
bagger.fit(X_train, y_train)
inbag = fci.calc_inbag(X_train.shape[0], bagger)
for ib in [inbag, None]:
for calibrate in [True, False]:
V_IJ_unbiased = fci.random_forest_error(
bagger, X_train, X_test, inbag=ib, calibrate=calibrate
)
npt.assert_equal(V_IJ_unbiased.shape[0], y_test.shape[0])
def test_core_computation():
inbag_ex = np.array(
[[1.0, 2.0, 0.0, 1.0], [1.0, 0.0, 2.0, 0.0], [1.0, 1.0, 1.0, 2.0]]
)
X_train_ex = np.array([[3, 3], [6, 4], [6, 6]])
X_test_ex = np.vstack([np.array([[5, 2], [5, 5]]) for _ in range(1000)])
pred_centered_ex = np.vstack(
[np.array([[-20, -20, 10, 30], [-20, 30, -20, 10]]) for _ in range(1000)]
)
n_trees = 4
our_vij = fci._core_computation(
X_train_ex, X_test_ex, inbag_ex, pred_centered_ex, n_trees
)
r_vij = np.concatenate([np.array([112.5, 387.5]) for _ in range(1000)])
npt.assert_almost_equal(our_vij, r_vij)
for mc, ml in zip([True, False], [0.01, None]):
our_vij = fci._core_computation(
X_train_ex,
X_test_ex,
inbag_ex,
pred_centered_ex,
n_trees,
memory_constrained=True,
memory_limit=0.01,
test_mode=True,
)
npt.assert_almost_equal(our_vij, r_vij)
def test_bias_correction():
inbag_ex = np.array(
[[1.0, 2.0, 0.0, 1.0], [1.0, 0.0, 2.0, 0.0], [1.0, 1.0, 1.0, 2.0]]
)
X_train_ex = np.array([[3, 3], [6, 4], [6, 6]])
X_test_ex = np.array([[5, 2], [5, 5]])
pred_centered_ex = np.array([[-20, -20, 10, 30], [-20, 30, -20, 10]])
n_trees = 4
our_vij = fci._core_computation(
X_train_ex, X_test_ex, inbag_ex, pred_centered_ex, n_trees
)
our_vij_unbiased = fci._bias_correction(
our_vij, inbag_ex, pred_centered_ex, n_trees
)
r_unbiased_vij = np.array([-42.1875, 232.8125])
npt.assert_almost_equal(our_vij_unbiased, r_unbiased_vij)
def test_with_calibration():
# Test both with and without interpolation:
for n in [25 * 5, 205 * 5]:
X = np.random.rand(n).reshape(n // 5, 5)
y = np.random.rand(n // 5)
train_idx = np.arange(int(n // 5 * 0.75))
test_idx = np.arange(int(n // 5 * 0.75), n // 5)
y_test = y[test_idx]
y_train = y[train_idx]
X_test = X[test_idx]
X_train = X[train_idx]
n_trees = 4
forest = RandomForestRegressor(n_estimators=n_trees)
forest.fit(X_train, y_train)
V_IJ_unbiased = fci.random_forest_error(forest, X_train, X_test)
npt.assert_equal(V_IJ_unbiased.shape[0], y_test.shape[0])
|
voornaam= input("Wat is je voornaam?")
naam= input("Wat is je naam?")
leeftijd = input("Wat is j leeftijd?")
email = input("Wat is je email adres?")
print("Je heet {0} met je voornaam en {1} met je familienaam".format(voornaam, naam))
print("Je bent {0} jaar oud.".format(leeftijd))
print("Je email adress is " + email) |
# code from: https://github.com/tensorflow/models/blob/master/research/slim/slim_walkthrough.ipynb
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# Main slim library
from tensorflow.contrib import slim
def regression_model(inputs, is_training=True, scope="deep_regression"):
"""Creates the regression model.
Args:
inputs: A node that yields a `Tensor` of size [batch_size, dimensions].
is_training: Whether or not we're currently training the model.
scope: An optional variable_op scope for the model.
Returns:
predictions: 1-D `Tensor` of shape [batch_size] of responses.
end_points: A dict of end points representing the hidden layers.
"""
with tf.variable_scope(scope, 'deep_regression', [inputs]):
end_points = {}
# Set the default weight _regularizer and acvitation for each
# fully_connected layer.
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(0.01)):
# Creates a fully connected layer from the inputs with 32 hidden
# units.
net = slim.fully_connected(inputs, 32, scope='fc1')
end_points['fc1'] = net
# Adds a dropout layer to prevent over-fitting.
net = slim.dropout(net, 0.8, is_training=is_training)
# Adds another fully connected layer with 16 hidden units.
net = slim.fully_connected(net, 16, scope='fc2')
end_points['fc2'] = net
# Creates a fully-connected layer with a single hidden unit.
# Note that the layer is made linear by setting activation_fn=None.
predictions = slim.fully_connected(
net, 1, activation_fn=None, scope='prediction')
end_points['out'] = predictions
return predictions, end_points
with tf.Graph().as_default():
# Dummy placeholders for arbitrary number of 1d inputs and outputs
inputs = tf.placeholder(tf.float32, shape=(None, 1))
outputs = tf.placeholder(tf.float32, shape=(None, 1))
# Build model
predictions, end_points = regression_model(inputs)
# Print name and shape of each tensor.
print("Layers")
for k, v in end_points.items():
print('name = {}, shape = {}'.format(v.name, v.get_shape()))
# Print name and shape of parameter nodes (values not yet initialized)
print("\n")
print("Parameters")
for v in slim.get_model_variables():
print('name = {}, shape = {}'.format(v.name, v.get_shape()))
def produce_batch(batch_size, noise=0.3):
xs = np.random.random(size=[batch_size, 1]) * 10
ys = np.sin(xs) + 5 + np.random.normal(size=[batch_size, 1], scale=noise)
return [xs.astype(np.float32), ys.astype(np.float32)]
x_train, y_train = produce_batch(200)
x_test, y_test = produce_batch(200)
def convert_data_to_tensors(x, y):
inputs = tf.constant(x)
inputs.set_shape([None, 1])
outputs = tf.constant(y)
outputs.set_shape([None, 1])
return inputs, outputs
# The following snippet trains the regression model using a mean_squared_error
# loss.
ckpt_dir = './model_dir/'
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
inputs, targets = convert_data_to_tensors(x_train, y_train)
# Make the model.
predictions, nodes = regression_model(inputs, is_training=True)
# Add the loss function to the graph.
loss = tf.losses.mean_squared_error(
labels=targets, predictions=predictions)
# The total loss is the user's loss plus any regularization losses.
total_loss = tf.losses.get_total_loss()
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.005)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training inside a session.
final_loss = slim.learning.train(
train_op,
logdir=ckpt_dir,
number_of_steps=10000,
save_summaries_secs=5,
log_every_n_steps=500)
print("Finished training. Last batch loss:", final_loss)
print("Checkpoint saved in %s" % ckpt_dir)
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_train, y_train)
predictions, end_points = regression_model(inputs, is_training=True)
# Add multiple loss nodes.
mean_squared_error_loss = tf.losses.mean_squared_error(
labels=targets, predictions=predictions)
absolute_difference_loss = tf.losses.absolute_difference(
predictions, targets)
# The following two ways to compute the total loss are equivalent
regularization_loss = tf.add_n(tf.losses.get_regularization_losses())
total_loss1 = mean_squared_error_loss \
+ absolute_difference_loss \
+ regularization_loss
# Regularization Loss is included in the total loss by default.
# This is good for training, but not for testing.
total_loss2 = tf.losses.get_total_loss(add_regularization_losses=True)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
total_loss1, total_loss2 = sess.run([total_loss1, total_loss2])
print('Total Loss1: %f' % total_loss1)
print('Total Loss2: %f' % total_loss2)
print('Regularization Losses:')
for loss in tf.losses.get_regularization_losses():
print(loss)
print('Loss Functions:')
for loss in tf.losses.get_losses():
print(loss)
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_test, y_test)
# Create the model structure. (Parameters will be loaded below.)
predictions, end_points = regression_model(inputs, is_training=False)
global_step = tf.train.get_or_create_global_step()
# Make a session which restores the old parameters from a checkpoint.
with tf.train.MonitoredTrainingSession(checkpoint_dir=ckpt_dir) as sess:
inputs, predictions, targets = sess.run([inputs, predictions, targets])
plt.scatter(inputs, targets, c='r')
plt.scatter(inputs, predictions, c='b')
plt.title('red=true, blue=predicted')
plt.show()
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_test, y_test)
predictions, end_points = regression_model(inputs, is_training=False)
# Specify metrics to evaluate:
names_to_value_nodes, names_to_update_nodes = \
slim.metrics.aggregate_metric_map({
'Mean Squared Error': slim.metrics.streaming_mean_squared_error(
predictions, targets),
'Mean Absolute Error': slim.metrics.streaming_mean_absolute_error(
predictions, targets)})
# Make a session which restores the old graph parameters, and then run
# eval.
with tf.train.MonitoredTrainingSession(checkpoint_dir=ckpt_dir) as sess:
metric_values = slim.evaluation.evaluation(
sess,
num_evals=1, # Single pass over data
eval_op=names_to_update_nodes.values(),
final_op=names_to_value_nodes.values())
names_to_values = dict(zip(names_to_value_nodes.keys(), metric_values))
for key, value in names_to_values.items():
print('%s: %f' % (key, value))
def my_cnn(images, num_classes, is_training): # is_training is not used...
with slim.arg_scope([slim.max_pool2d], kernel_size=[3, 3], stride=2):
net = slim.conv2d(images, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.conv2d(net, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.flatten(net)
net = slim.fully_connected(net, 192)
net = slim.fully_connected(net, num_classes, activation_fn=None)
return net
with tf.Graph().as_default():
# The model can handle any input size because the first layer is convolutional.
# The size of the model is determined when image_node is first passed into the my_cnn function.
# Once the variables are initialized, the size of all the weight matrices is fixed.
# Because of the fully connected layers, this means that all subsequent images must have the same
# input size as the first image.
batch_size, height, width, channels = 3, 28, 28, 3
images = tf.random_uniform([batch_size, height, width, channels], maxval=1)
# Create the model.
num_classes = 10
logits = my_cnn(images, num_classes, is_training=True)
probabilities = tf.nn.softmax(logits)
# Initialize all the variables (including parameters) randomly.
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
# Run the init_op, evaluate the model outputs and print the results:
sess.run(init_op)
probabilities = sess.run(probabilities)
print('Probabilities Shape:')
print(probabilities.shape) # batch_size x num_classes
print('\nProbabilities:')
print(probabilities)
print('\nSumming across all classes (Should equal 1):')
print(np.sum(probabilities, 1)) # Each row sums to 1
|
import fitsql as fs
from astropy.io import fits
import psycopg2 as p2
con=p2.connect("dbname='stars' user='postgres' host='localhost'")
cur=con.cursor()
fs.upfile('/home/groberts/astro/DeaconHamblydata/DeaconHambly2004.PrevMemb.fit','stars')
fs.upfile('/home/groberts/astro/DeaconHamblydata/DeaconHambly2004.HighProb.fit','stars')
fs.upfile('/home/groberts/astro/Prosserdata/Prosser1992.Table4.fit','stars')
fs.upfile('/home/groberts/astro/Prosserdata/Prosser1992.Table6.fit','stars')
fs.upfile('/home/groberts/astro/Prosserdata/Prosser1992.Table10.fit','stars')
|
from .ControlLimit import ControlLimit
class UpperControlLimit(ControlLimit):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def setup(self, **kwargs):
self.limit = kwargs["limit"]
def check_sample(self, sample):
return sample <= self.limit
|
def chk(no):
if no == 0:
print("Number is Zero");
elif no > 0:
print("Number is Positive");
else:
print("Number is Negative");
print("Enter a number to check");
no = int(input());
chk(no);
|
import os
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import torch
from agent import Agent
from train_eval import evaluate_agent, train_agent
from unityagents import UnityEnvironment
TRAIN = False
# env_path = "/data/Tennis_Linux_NoVis/Tennis.x86_64"
local_path = os.path.dirname(os.path.abspath(__file__))
local_env_path = local_path + "/Tennis.app"
checkpoint_pth = local_path + "/scores/checkpoint_env_solved_{}_PER.pth"
# checkpoint_pth = local_path + "/checkpoint_{}.pth"
# Load Env
env = UnityEnvironment(file_name=local_env_path)
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
print(torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print("Number of agents:", num_agents)
# size of each action
action_size = brain.vector_action_space_size
print("Size of each action:", action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print(
"There are {} agents. Each observes a state with length: {}".format(
states.shape[0], state_size
)
)
print("The state for the first agent looks like:", states[0])
start_time = datetime.now()
# VARS
config = {
"buffer_size": int(1e6), # replay buffer size
"batch_size": 1024, # minibatch size
"replay_initial": 1024, # initial memory before updating the network
"gamma": 0.99, # discount factor
"lr_actor": 1e-4, # learning rate
"lr_critic": 1e-3, # learning rate of the critic
"update_every": 2, # how often to update the network
"tau": 1e-3, # soft update
"weight_decay": 0, # l2 weight decay
"net_body": (256, 128, 64), # hidden layers
"prioritized": True,
"per_alpha": 0.6,
"per_beta": 0.4,
"per_beta_increment": 0.0001,
"per_epsilon": 0.0001,
}
agent = Agent(
state_size=state_size,
action_size=action_size,
random_seed=0,
num_agents=num_agents,
**config
)
if TRAIN:
n_episodes = 2600
print(f"Train Agent for {n_episodes} episodes")
scores = train_agent(agent, env, n_episodes=n_episodes, min_score=.5, num_agents=num_agents)
time_elapsed = datetime.now() - start_time
print("\nTime elapsed (hh:mm:ss.ms) {}".format(time_elapsed))
# Plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel("Score")
plt.xlabel("Episode #")
plt.show()
# Evaluate agent (using solved agent)
print("Evaluate (solved) Agent")
evaluate_agent(
agent, env, num_agents=num_agents, checkpoint_pth=checkpoint_pth, num_episodes=10, min_score=.5
)
# Close env
env.close()
|
# Scrapes ESPN's 2012-2014 staff preseason rankings
# Wrties the rankings for each year to a csv file
from bs4 import BeautifulSoup
for year in ['2012', '2013', '2014']:
infile = open(year +'main.html', 'r').read()
soup = BeautifulSoup(infile, 'lxml')
rows = soup.find_all('tr', {'class' : 'last'})
qbs = []
rbs = []
wrs = []
tes = []
dst = []
ks = []
for row in rows:
name = row.a.contents[0]
pos = row.find_all('td')[-2].contents[0]
if pos[1] == 'Q':
qbs.append(name)
elif pos[1] == 'R':
rbs.append(name)
elif pos[1] == 'W':
wrs.append(name)
elif pos[1] == 'T':
tes.append(name)
elif pos[1] == 'D':
dst.append(name)
else:
ks.append(name)
outfile = open(year+'main.csv', 'w')
list_o_lists = [qbs, rbs, wrs, tes, dst, ks]
for group in list_o_lists:
for name in group:
outfile.write(name + ',\n')
outfile.close()
|
#!/usr/bin/env python
"""
Copyright 2014 Wordnik, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Document:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'name': 'str',
'id': 'int',
'size': 'long',
'shared': 'bool',
'mimeType': 'str',
'destinationId': 'int',
'accountId': 'int',
'pageCount': 'int',
'kind': 'str',
'deliveryState': 'str',
'relevantDate': 'date-time',
'actionDate': 'date-time',
'createdDate': 'date-time',
'addedDate': 'date-time',
'deliveredDate': 'date-time',
'originalName': 'str',
'originalRelevantDate': 'date-time'
}
#Name of the document (default is typically its filename)
self.name = None # str
#Unique Id of the document. Returned as a string. [RO]
self.id = None # int
#Size of the document file in bytes. [RO]
self.size = None # long
#Whether this is a Pro-Services shared document, or not. [RO]
self.shared = None # bool
#MIME type of the document. [RO]
self.mimeType = None # str
#Unique Id of the destination to which the document was delivered. [RO]
self.destinationId = None # int
#Unique Id of document's account. Returned as a string. [RO]
self.accountId = None # int
#Number of pages in the document. [RO]
self.pageCount = None # int
#Kind of document. [RO]
self.kind = None # str
#Delivery state. [RO]
self.deliveryState = None # str
#Relevant date of document
self.relevantDate = None # date-time
#The date on which an action can be taken on a document (eg. Credit card payment due). [RO]
self.actionDate = None # date-time
#Date that the document was originally created. [RO]
self.createdDate = None # date-time
#Date that the document was added to FileThis. [RO]
self.addedDate = None # date-time
#Date the document was delivered to its destination. [RO]
self.deliveredDate = None # date-time
#The original name of the document (typically its filename). [RO]
self.originalName = None # str
#Relevant date determined by fetcher. [RO]
self.originalRelevantDate = None # date-time
|
# Decimal dominants. Given an array with n keys, design an algorithm to find all values
# that occur more than n/10 times. The expected running time of your algorithm should be linear.
# Hint: determine the (n/10)th largest key using quickselect and check if it occurs more than n/10 times.
# https://massivealgorithms.blogspot.com/2019/03/decimal-dominants.html
# https://stackoverflow.com/questions/9599559/decimal-dominant-number-in-an-array
# [20,19,18,17,16,15,14,14,14,11,10,9,9,9,6,5,4,3,2,1]
# 20/10 = 2 find elements that occur more than twice
# we can narrow our candidates to 9 elements, namely (n/10)-th, (2n/10)-th, … (9n/10)-th elements
# any elements left to (n/10)-th array cannot occur more than n/10 times because there won’t be enough room
# any elements left to (2n/10)-th array cannot occur more than 2n/10 times because there won’t be enough room ...
# repeat for sub array including and to the right side of (n/10)-th largest element
import unittest
class DecimalDominant:
def __init__(self):
"""
"""
def main(self, arr):
dominants = []
self.three_way_partition(arr, 0, len(arr)-1, dominants)
return dominants
# three-way partioning solution
def three_way_partition(self, arr, start, end, dominants):
if (end - start + 1) >= len(arr) // 10:
lt = start
gt = end
i = lt
while True:
if arr[i] < arr[lt]:
arr[lt], arr[i] = arr[i], arr[lt]
i += 1
lt += 1
elif arr[i] > arr[lt]:
arr[gt], arr[i] = arr[i], arr[gt]
gt -= 1
else:
i += 1
if i > gt:
break;
if gt - lt + 1 > len(arr) // 10: # ???
dominants.append(arr[lt])
self.three_way_partition(arr, start, lt-1, dominants)
self.three_way_partition(arr, gt + 1, end, dominants)
#def partition(self, arr, lo, hi):
# i = lo + 1
# j = hi
# while True:
# while arr[i] < arr[lo]:
# i += 1
# if i == hi: break
# while arr[lo] < arr[j]:
# if j == lo: break
# j -= 1
# if i >= j: break # check if pointers cross
# arr[i], arr[j] = arr[j], arr[i] # swap
# arr[lo], arr[j] = arr[j], arr[lo] # swap with partitioning item
# return j # return index of item known to be in place
#def select(self, arr, k):
# lo = 0
# hi = len(arr) - 1
# while hi > lo:
# j = self.partition(arr, lo, hi)
# if j < k: lo = j + 1
# elif j > k: hi = j - 1
# else: return arr[k]
# return arr[k]
class Test(unittest.TestCase):
def test1(self):
"""
"""
d = DecimalDominant()
r = d.main([1,11,7,17,16,5,14,3,14,19,10,9,8,18,6,15,4,14,2,20])
self.assertListEqual(r, [14])
def test2(self):
d = DecimalDominant()
r = d.main([1,1,1,1,1,2,2,2,2,2])
self.assertListEqual(r, [1,2])
unittest.main(verbosity = 2) |
from metric import Metric
from util import Util
import numpy as np
import partition_comparison
class RIMeasure(Metric):
'''
This calculates rand index.
'''
@classmethod
def apply(cls, label_array):
'''
Apply the metric to a label_array with shape Y,X,Z.
'''
vi_sum = 0.
done = 0.
print 'Calulating for',label_array.shape[0],'slices.'
for z in range(label_array.shape[0]-1):
vi_sum += partition_comparison.rand_index(label_array[z,:,:].astype(np.uint64).ravel(),
label_array[z+1,:,:].astype(np.uint64).ravel())
done += 1
percentage = int((done / (label_array.shape[0]-1))*100)
if (percentage % 10 == 0):
print percentage, '% done'
vi_sum /= label_array.shape[0]
return vi_sum
|
#
# @lc app=leetcode id=199 lang=python3
#
# [199] Binary Tree Right Side View
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
# edge cases
if root is None:
return []
def helper(root, arr):
if root is None:
return []
# create recursive calls
right = helper(root.right, arr) # [x, x, ...]
left = helper(root.left, arr) # [x, x ...]
# process data
# do array replacement
if len(left) == 0:
arr = right
elif len(right) == 0:
arr = left
elif len(left) > len(right):
# replace all left elements not deeper than right
print(left, right)
for i in range(len(right)):
left[i] = right[i]
arr = left
else: # just return len(left) < len(left)
arr = right
# add new data
arr = [root.val] + arr
# return arr for root
return arr
result = helper(root, [])
return result
# @lc code=end
|
import numpy as np
from matplotlib import pyplot as plt
from q5_1 import *
import ipdb
import math
def calc_prob(x, lambda_, func):
return np.exp(lambda_*func(x))
def MH_algo(lambda_, x, func, var):
X = []
x_ = x*1.0
for i in range(500):
while(1):
y = np.random.normal(x_, var**0.5, 1)[0]
if(y>=-5 and y<=5):
break
alpha = np.amin([calc_prob(y, lambda_, func)/calc_prob(x_, lambda_, func),1])
if(math.isnan(alpha)):
ipdb.set_trace()
x_new = np.random.choice([y,x_], p=[alpha, 1-alpha])
X.append(x_new*1.0)
F = func(np.array(X))
return X[np.argmax(F)], np.mean(X), np.mean(F)
def SA_algo(func, lambda_0, eta, var):
x0 = np.random.uniform(-5, 5, 1)[0]
x = x0
lambda_ = lambda_0
X = []
F = []
while (lambda_<=100):
lambda_ = (1+eta)*lambda_
x_new, x_mean, F_mean = MH_algo(lambda_, x, func, var)
x = x_new*1.0
X.append(x_mean*1.0)
F.append(F_mean*1.0)
return x, X, F
def plot_prop(data, prop_name):
fig = plt.figure(figsize=(16,9))
plt.plot(data)
plt.xlabel("t")
plt.ylabel(prop_name)
plt.savefig("q5_3_{}.png".format(prop_name))
plt.close()
def main():
lambda_0 = 0.01
eta = 0.1
var = 0.1
x_optim, X, F = SA_algo(f, lambda_0, eta, var)
print(x_optim)
plot_prop(X, "sampled_points_mean")
plot_prop(F,"sampled_function_value_mean")
main()
|
from django.contrib import admin
from . import models
from django.utils.safestring import mark_safe
# Register your models here.
@admin.register(models.Categorie)
class CategorieAdmin(admin.ModelAdmin):
list_display = ('nom', 'date_add', 'date_upd', 'status',)
list_filter = ('date_add', 'date_upd', 'status',)
list_search = ('nom')
ordering = ('nom',)
list_per_page = 5
actions = ('active', 'desactive',)
def active(self, request, queryset):
queryset.update(status=True)
self.message_user(request, 'Activer une Categorie')
active.short_description = 'active Categorie'
def desactive(self, queryset, request):
queryset.update(status = False)
self.message_user(request, 'Desactiver une Categorie')
desactive.short_description = 'desactive Categorie'
@admin.register(models.Plat)
class PlatAdmin(admin.ModelAdmin):
list_display = ('categorie', 'nom', 'description', 'prix', 'date_add', 'date_upd', 'status', 'view_image',)
list_filter = ('date_add', 'date_upd', 'status',)
list_search = ('nom')
ordering = ('nom',)
list_per_page = 5
readonly_fields = ['detail_image']
actions = ('active', 'desactive',)
def active(self, request, queryset):
queryset.update(status=True)
self.message_user(request, 'Activer une Plat')
active.short_description = 'active Plat'
def desactive(self, queryset, request):
queryset.update(status = False)
self.message_user(request, 'Desactiver une Plat')
desactive.short_description = 'desactive Plat'
def view_image(self, obj):
return mark_safe('<img src = "{url}" width ="100px" height ="100px" />'.format(url = obj.image.url))
def detail_image(self, obj):
return mark_safe('<img src = "{url}" width ="100px" height ="100px" />'.format(url = obj.image.url))
@admin.register(models.Testimony)
class TestimonyAdmin(admin.ModelAdmin):
list_display = ('nom', 'profession', 'description', 'fb', 'tweet', 'instagram', 'date_add', 'date_upd', 'status', 'view_image')
list_filter = ('date_add', 'date_upd', 'status',)
list_search = ('nom')
ordering = ('nom',)
list_per_page = 5
actions = ('active', 'desactive',)
def active(self, request, queryset):
queryset.update(status=True)
self.message_user(request, 'Activer une Testimony')
active.short_description = 'active Testimony'
def desactive(self, request, queryset):
queryset.update(status = False)
self.message_user(request, 'Desactiver une Testimony')
desactive.short_description = 'desactive Testimony'
def view_image(self, obj):
return mark_safe('<img src = "{url}" width ="100px" height ="100px" />'.format(url = obj.image.url))
def detail_image(self, obj):
return mark_safe('<img src = "{url}" width ="100px" height ="100px" />'.format(url = obj.image.url))
@admin.register(models.Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ('titre', 'icon', 'description', 'date_add', 'date_upd', 'status',)
list_filter = ('date_add', 'date_upd', 'status',)
list_search = ('titre')
ordering = ('titre',)
list_per_page = 5
actions = ('active', 'desactive',)
def active(self, request, queryset):
queryset.update(status=True)
self.message_user(request, 'Activer une Service')
active.short_description = 'active Service'
def desactive(self, queryset, request):
queryset.update(status = False)
self.message_user(request, 'Desactiver une Service')
desactive.short_description = 'desactive Service'
|
import random
from time import sleep
numint = random.randint(1, 5) #Computador emite um número aleatório
#print(numint)
print('-=-' * 20)
numUser = int(input('Digite um número entre 1 e 5: ')) #Usuário digita um número aleatório
print('-=-' * 20)
print('Processando os dados...')
#sleep()
if numUser == numint: #Faz a Validação entre Computador x Usuário
print('Seu número {} é igual ao {} escolhido pelo computador. \nParabéns!'
.format(numUser, numint))
else:
print('Seu número {} não é o mesmo número {} que o computador escolheu. \nTente novamente!'
.format(numUser, numint)) |
from flask_app.models.game_model import GameModel
import datetime
class GameCollection():
def __init__(self):
self.models = []
def populate(self, data):
for datum in data:
g = GameModel()
g.populate(datum)
# These come back as '/'-delimited strings, so split them into arrays
if g.away_players:
g.away_players = g.away_players.split('/')
if g.home_players:
g.home_players = g.home_players.split('/')
self.models.append(g)
# Returns a new collection made up of the models in this collection that meet a certain criteria.
# If exclusive is True, we'll look for games that have all of the players in players and no other players.
# If exclusive is False, we'll look for games that have all of the players in players and possibly others
# e.g. if you want all games that 'Bob' played in, use exclusive = False. If you only want all games that featured
# 'Bob', 'Jon', and 'Tom' (and no other players), use exclusive = True
def filter_by_players(self, players, exclusive=True, even_teams=False):
players_set = set(players)
new_models = []
for model in self.models:
game_players_set = set(model.home_players + model.away_players)
# If we want even teams, filter out games that had different numbers on each team
if even_teams and (len(model.home_players) != len(model.away_players)):
continue
if exclusive:
if game_players_set == players_set:
new_models.append(model)
else:
if players_set.issubset(game_players_set):
new_models.append(model)
c = GameCollection()
c.models = new_models
return c
# Filters models to only games where the players on team_1 were against the players on team_2
# If exclusive is True, they can't have additional teammates (only the listed players were in the games)
def filter_by_matchup(self, team_1, team_2, exclusive=True):
players_set_1 = set(team_1)
players_set_2 = set(team_2)
new_models = []
for model in self.models:
away_players_set = set(model.away_players)
home_players_set = set(model.home_players)
if exclusive:
if ((players_set_1 == away_players_set) and (players_set_2 == home_players_set)
or (players_set_2 == away_players_set) and (players_set_1 == home_players_set)):
new_models.append(model)
else:
if ((players_set_1.issubset(away_players_set)) and (players_set_2.issubset(home_players_set))
or (players_set_2.issubset(away_players_set)) and (players_set_1.issubset(home_players_set))):
new_models.append(model)
c = GameCollection()
c.models = new_models
return c
def compute_player_stats(self):
player_results = {}
team_results = {}
club_results = {}
player_names = set()
team_names = set()
club_names = set()
for model in self.models:
for player in model.home_players + model.away_players:
player_names.add(player)
team_names.add(','.join(sorted(model.away_players)))
team_names.add(','.join(sorted(model.home_players)))
club_names.add(model.home_team)
club_names.add(model.away_team)
for name in team_names:
team_results[name] = {'name': name, 'wins': 0, 'losses': 0, 'ties': 0, 'goals': 0, 'goals_against': 0}
for name in player_names:
player_results[name] = {'name': name, 'wins': 0, 'losses': 0, 'ties': 0, 'goals': 0, 'goals_against': 0}
for name in club_names:
club_results[name] = {'name': name, 'wins': 0, 'losses': 0, 'ties': 0, 'goals': 0, 'goals_against': 0}
for model in self.models:
winning_players = []
losing_players = []
tying_teams = []
if model.home_score > model.away_score:
winning_players = model.home_players
losing_players = model.away_players
elif model.home_score < model.away_score:
winning_players = model.away_players
losing_players = model.home_players
elif model.home_score == model.away_score:
tying_teams = [model.home_players, model.away_players]
# Add in results for wins/losses/ties
for player in winning_players:
player_results[player]['wins'] += 1
for player in losing_players:
player_results[player]['losses'] += 1
for team in tying_teams:
for player in team:
player_results[player]['ties'] += 1
# Add in score for and against
for player in model.home_players:
player_results[player]['goals'] += model.home_score
player_results[player]['goals_against'] += model.away_score
for player in model.away_players:
player_results[player]['goals'] += model.away_score
player_results[player]['goals_against'] += model.home_score
# Now the same stats for teams
winning_team_name = ','.join(sorted(winning_players))
losing_team_name = ','.join(sorted(losing_players))
if winning_team_name:
team_results[winning_team_name]['wins'] += 1
if losing_team_name:
team_results[losing_team_name]['losses'] += 1
for team in tying_teams:
tying_team_name = ','.join(sorted(team))
team_results[tying_team_name]['ties'] += 1
away_team_name = ','.join(sorted(model.away_players))
home_team_name = ','.join(sorted(model.home_players))
team_results[away_team_name]['goals'] += model.away_score
team_results[away_team_name]['goals_against'] += model.home_score
team_results[home_team_name]['goals'] += model.home_score
team_results[home_team_name]['goals_against'] += model.away_score
winning_club_name = losing_club_name = None
# Now the same stats for clubs
tying_teams = []
if model.home_score > model.away_score:
winning_club_name = model.home_team
losing_club_name = model.away_team
elif model.away_score > model.home_score:
winning_club_name = model.away_team
losing_club_name = model.home_team
else:
tying_teams = [model.away_team, model.home_team]
if winning_club_name:
club_results[winning_club_name]['wins'] += 1
if losing_club_name:
club_results[losing_club_name]['losses'] += 1
for team in tying_teams:
club_results[team]['ties'] += 1
away_team_name = model.away_team
home_team_name = model.home_team
club_results[away_team_name]['goals'] += model.away_score
club_results[away_team_name]['goals_against'] += model.home_score
club_results[home_team_name]['goals'] += model.home_score
club_results[home_team_name]['goals_against'] += model.away_score
# Now compute some aggregate stats like goals per game etc.
for group in [team_results, player_results, club_results]:
for name, stats in group.iteritems():
total_games = stats['wins'] + stats['losses'] + stats['ties']
winning_percentage = float(stats['wins']) / total_games
goals_per_game = float(stats['goals']) / total_games
goals_against_per_game = float(stats['goals_against']) / total_games
goal_differential = stats['goals'] - stats['goals_against']
goal_differential_per_game = float(goal_differential) / total_games
group[name]['total_games'] = total_games
group[name]['winning_percentage'] = winning_percentage
group[name]['goals_per_game'] = goals_per_game
group[name]['goals_against_per_game'] = goals_against_per_game
group[name]['goal_differential'] = goal_differential
group[name]['goal_differential_per_game'] = goal_differential_per_game
player_results_list = []
team_results_list = []
club_results_list = []
for item in player_results.values():
player_results_list.append(item)
for item in team_results.values():
team_results_list.append(item)
for item in club_results.values():
club_results_list.append(item)
return {'player_stats': player_results_list, 'team_stats': team_results_list, 'club_stats': club_results_list}
def filter_by_date(self, start_date='0', end_date='Z'):
new_models = []
for model in self.models:
if model.date >= start_date and model.date < end_date:
new_models.append(model)
gc = GameCollection()
gc.models = new_models
return gc
def get_weekly_stats(self, weeks=6):
weekly_stats = []
today = datetime.date.today()
start_date = today + datetime.timedelta(days=(0-today.weekday()))
start_date_string = start_date.isoformat()
end_date = None
end_date_string = 'Z'
# cumulative_player_stats = {}
new_collection = self.filter_by_date(start_date_string, end_date_string)
stats = new_collection.compute_player_stats()
weekly_stats.append({'start_date': start_date_string, 'end_date': 'Now', 'stats': stats})
# cumulative_player_stats = stats['player_stats']
for i in xrange(weeks):
end_date = start_date
start_date = start_date - datetime.timedelta(days=7)
end_date_string = end_date.isoformat()
start_date_string = start_date.isoformat()
new_collection = self.filter_by_date(start_date_string, end_date_string)
stats = new_collection.compute_player_stats()
weekly_stats.append({'start_date': start_date_string, 'end_date': end_date_string, 'stats': stats})
# Go from past to present
weekly_stats = weekly_stats[::-1]
return weekly_stats
# This will remove games that have the same team for home and away (useful
# for computing club stats)
def remove_duplicate_club_games(self):
new_models = []
for model in self.models:
if model.away_team != model.home_team:
new_models.append(model)
c = GameCollection()
c.models = new_models
return c |
from PyObjCTools.TestSupport import TestCase, min_os_level
import SafariServices
class TestSFSafariExtensionManager(TestCase):
@min_os_level("10.12")
def testMethods(self):
self.assertArgIsBlock(
SafariServices.SFSafariExtensionManager.getStateOfSafariExtensionWithIdentifier_completionHandler_,
1,
b"v@@",
)
|
import numpy as np
from os import path
from proj1_helpers import load_csv_data, eval_model, predict_labels, create_csv_submission
from data_utils import feature_transform, standardise, standardise_to_fixed
from implementation_variants import logistic_regression_mean
cwd = path.dirname(__file__)
SEED = 42
DATA_PATH = '../data/'
# Training hyperparameters (obtained through procedure in Run.ipynb)
MAX_ITERS = 50000
GAMMA = 0.01
THRESHOLD = 1e-7
if __name__ == "__main__":
# Load train data
y_train, x_train, _ = load_csv_data(path.join(DATA_PATH, 'train.csv'))
# Apply feature transform
fx_train = feature_transform(x_train)
# Standardise to mean and s.d.
fx_train, mu_train, sigma_train = standardise(fx_train)
# Add offset term
tx_train = np.c_[np.ones(len(y_train)), fx_train]
# Initialise training
w_initial = np.ones(tx_train.shape[1])
# Run gradient descent
w, loss = logistic_regression_mean(y_train, tx_train, w_initial, MAX_ITERS, GAMMA, verbose=True)
print(f'Training loss: {loss}')
acc = eval_model(y_train, tx_train, w, thresh=0.5)
print(f'Training accuracy: {acc}')
# Load test data
y_test, x_test, ids_test = load_csv_data(path.join(DATA_PATH, 'test.csv'))
fx_test = feature_transform(x_test)
# Standardise to mean and s.d. of training data
fx_test = standardise_to_fixed(fx_test, mu_train, sigma_train)
# Add offset term
tx_test = np.c_[np.ones(fx_test.shape[0]), fx_test]
# Get predictions on test set
y_pred = predict_labels(w, tx_test, thresh=0.5)
create_csv_submission(ids_test, y_pred, path.join(DATA_PATH, 'final_submission.csv'))
|
'''
Created on 23 okt. 2015
@author: danhe
'''
from game_tools import Sprite
class Map(Sprite):
def __init__(self, map_file):
super(Map,self).__init__(source=map_file)
self.id = 'map'
return
def update(self):
return
|
import sys
import os
import fileinput
from functools import partial
import numpy as np
from _global_stat_main import main
sys.path.insert(0, '../')
import seqmodel as sq # noqa
def load_data(opt):
dpath = partial(os.path.join, opt['data_dir'])
vocab = sq.Vocabulary.from_vocab_file(dpath('vocab.txt'))
data_fn = partial(sq.read_seq_data, in_vocab=vocab, out_vocab=vocab,
keep_sentence=False, seq_len=opt['seq_len'])
data = [data_fn(sq.read_lines(dpath(f), token_split=' '))
for f in (opt['train_file'], opt['valid_file'], opt['eval_file'])]
batch_iter = partial(sq.seq_batch_iter, batch_size=opt['batch_size'],
shuffle=False, keep_sentence=False)
return data, batch_iter, (vocab, vocab), dpath('vocab.txt')
def load_only_data(opt, vocabs, text_filepath):
data = sq.read_seq_data(sq.read_lines(text_filepath, token_split=' '),
*vocabs, keep_sentence=False, seq_len=opt['seq_len'])
batch_iter = sq.seq_batch_iter(*data, batch_size=opt['batch_size'],
shuffle=False, keep_sentence=False)
return batch_iter
def decode(
opt, gns_opt, vocabs, model, sess, _data, _state, out_filename, num_tokens,
force=False):
vocab = vocabs[-1]
_b = gns_opt['dec_batch_size'] # opt['batch_size']
decode_dir = os.path.join(opt['exp_dir'], 'decode')
sq.ensure_dir(decode_dir)
opath = os.path.join(decode_dir, out_filename)
if gns_opt['use_model_prob'] and not force:
return opath
# start with empty seed
seed_in = np.array([[0] * _b], dtype=np.int32)
seed_len = np.array([1] * _b, dtype=np.int32)
features = sq.SeqFeatureTuple(seed_in, seed_len)
n_tokens = 0
# write each batch sequence to a separate file
tmp_paths = [f'{opath}.{i}' for i in range(_b)]
with sq.open_files(tmp_paths, mode='w') as ofps:
for b_sample, __ in sq.uncond_lm_decode(sess, model, features):
for i in range(_b):
word = vocab.i2w(b_sample[0, i])
if word == '</s>':
ofps[i].write('\n')
else:
ofps[i].write(f'{word} ')
n_tokens += 1
if n_tokens >= num_tokens:
break
# merge and clean up
with open(opath, mode='w') as ofp:
with fileinput.input(files=tmp_paths) as fin:
for line in fin:
ofp.write(line)
if not line.endswith('\n'):
ofp.write('\n')
for fpath in tmp_paths:
os.remove(fpath)
return opath
if __name__ == '__main__':
main('main_global_stat_lm.py', sq.SeqModel, load_data, decode, load_only_data)
|
"""
Created on Jan 14, 2016
@author: stefanopetrangeli
"""
from sqlalchemy import Column, VARCHAR, Boolean, Integer
from sqlalchemy.ext.declarative import declarative_base
import logging
from orchestrator_core.exception import DomainNotFound
from orchestrator_core.sql.sql_server import get_session
from sqlalchemy.orm.exc import NoResultFound
Base = declarative_base()
class DomainModel(Base):
"""
Maps the database table Domain
"""
__tablename__ = 'domain'
attributes = ['id', 'name', 'type', 'ip', 'port']
id = Column(Integer, primary_key=True)
name = Column(VARCHAR(64))
type = Column(VARCHAR(64))
ip = Column(VARCHAR(64))
port = Column(Integer)
class DomainTokenModel(Base):
"""
Maps the database table Domain
"""
__tablename__ = 'domain_token'
attributes = ['user_id', 'domain_id', 'token']
user_id = Column(Integer, primary_key=True)
domain_id = Column(Integer, primary_key=True)
token = Column(VARCHAR(64))
class Domain(object):
def __init__(self):
pass
def getDomain(self, domain_id):
session = get_session()
try:
return session.query(DomainModel).filter_by(id = domain_id).one()
except Exception as ex:
logging.exception(ex)
raise DomainNotFound("Domain not found: "+str(domain_id)) from None
def getDomainFromName(self, name):
session = get_session()
try:
return session.query(DomainModel).filter_by(name = name).one()
except Exception as ex:
logging.exception(ex)
raise DomainNotFound("Domain not found for name: "+str(name)) from None
def getDomainIP(self, domain_id):
session = get_session()
try:
return session.query(DomainModel).filter_by(id = domain_id).one().ip
except Exception as ex:
logging.exception(ex)
raise DomainNotFound("Domain not found: "+str(domain_id)) from None
def getUserToken(self, domain_id, user_id):
session = get_session()
try:
return session.query(DomainTokenModel).filter_by(user_id=user_id).filter_by(domain_id=domain_id).one().token
except NoResultFound:
return None
def updateUserToken(self, domain_id, user_id, token):
session = get_session()
with session.begin():
domain_token = session.query(DomainTokenModel).filter_by(user_id=user_id).filter_by(domain_id=domain_id).first()
if domain_token is not None:
session.query(DomainTokenModel).filter_by(user_id=user_id).filter_by(domain_id=domain_id).update({"token":token})
else:
domain_token = DomainTokenModel(user_id=user_id, domain_id=domain_id, token=token)
session.add(domain_token)
def addDomain(self, domain_name, domain_type, domain_ip, domain_port, update=False):
session = get_session()
with session.begin():
max_id = -1
domain_refs = session.query(DomainModel).all()
for domain_ref in domain_refs:
if domain_ref.id > max_id:
max_id = domain_ref.id
if domain_ref.name == domain_name and domain_ref.type == domain_type and domain_ref.ip == domain_ip and domain_ref.port == int(domain_port):
return domain_ref.id
domain = DomainModel(id=max_id+1, name=domain_name, type=domain_type, ip=domain_ip, port=domain_port)
session.add(domain)
return domain.id
|
#!/usr/bin/python
# http://flask.pocoo.org/docs/0.10/patterns/sqlite3/
# http://ryrobes.com/python/running-python-scripts-as-a-windows-service/
# http://stackoverflow.com/questions/23550067/deploy-flask-app-as-windows-service
# http://gouthamanbalaraman.com/blog/minimal-flask-login-example.html
from flask import Flask,g,request,render_template,redirect
import sqlite3
import ConfigParser
import sys
import os
import os.path
import json
import requests
import base64
import csv
import re
from collections import defaultdict, namedtuple
import datetime
from calendar import monthrange
import logging
from threading import Thread
app = Flask("simpleServer")
c = ConfigParser.ConfigParser()
configPath=None
for p in ["/opt/tinkeraccess/server.cfg", "server.cfg"]:
if os.path.isfile(p):
configPath=p
break
if configPath:
c.read(configPath)
C_password = c.get('config', 'password')
C_database = c.get('config', 'db')
C_slackPostUrl = c.get('config', 'slackurl')
c_webcam_username = None
c_webcam_password = None
c_imgur_client_id = None
if c.has_option('config', 'webcam_username'): c_webcam_username = c.get('config', 'webcam_username')
if c.has_option('config', 'webcam_password'): c_webcam_password = c.get('config', 'webcam_password')
if c.has_option('config', 'imgur_client_id'): c_imgur_client_id = c.get('config', 'imgur_client_id')
c_webcam_urls = dict()
if c.has_section('webcam_urls'):
if c_webcam_username is None or c_webcam_password is None or c_imgur_client_id is None:
print("To post webcam images, `webcam_username`, `webcam_password`,\n" +
"and `imgur_client_id` must all be specified in the `config`\n" +
"section")
else: c_webcam_urls = dict(c.items(section='webcam_urls'))
else:
print("config server.cfg not found")
sys.exit(1)
######### Slack function #############
def post_to_slack(message):
try:
requests.post(C_slackPostUrl, data=json.dumps(message))
except Exception as e:
logging.exception(e)
######### Database functions #########
def init_db():
with app.app_context():
if os.path.isfile("/opt/tinkeraccess/db.db"):
os.remove("/opt/tinkeraccess/db.db")
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def exec_db(query):
db = get_db()
db.cursor().execute(query)
db.commit()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(C_database)
return db
def insert(table, fields=(), values=()):
# g.db is the database connection
cur = get_db().cursor()
query = 'INSERT INTO %s (%s) VALUES (%s)' % (
table,
', '.join(fields),
', '.join(['?'] * len(values))
)
cur.execute(query, values)
get_db().commit()
id = cur.lastrowid
cur.close()
return id
def addNewUser(code, deviceid):
o = query_db("select code from newuser where code='%s'" % code)
if len(o) == 0:
o = query_db("select code from user where code='%s'" % code)
if len(o) == 0:
exec_db("insert into newuser (code,deviceID) values ('%s', %s)" % (code, deviceid) )
######### Webserver functions #########
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
# logout
@app.route("/device/<deviceid>/logout/<uid>")
def deviceLogout(deviceid, uid):
output = query_db("select device.name from device where id=%s" % deviceid)
exec_db("insert into log (message) values ('logout:%s:%s')" % (deviceid,uid) )
message_content = {'text': '{} is now available'.format(output[0][0])}
if output[0][0] in c_webcam_urls:
image_url = captureImage(c_webcam_urls[output[0][0]])
if len(image_url) > 0:
message_content['attachments'] = [{
'fallback': 'Webcam image of {}'.format(output[0][0]),
'image_url': image_url
}]
t = Thread(target=post_to_slack, args=(message_content,))
t.start()
return ""
# given a device and a rfid code return if there is access to that device
# this is the login
@app.route("/device/<deviceid>/code/<code>")
def deviceCode(deviceid,code):
deviceinfo = query_db("select name,allUsers,lockout,lockout_start,lockout_end from device where id=%s" % deviceid)
if len(deviceinfo) == 0:
# Device ID not found, so deny access
return json.dumps( {'devicename': 'none', 'username': 'none', 'userid': -1, 'time': 0 } )
devicename = deviceinfo[0][0]
allusers = deviceinfo[0][1]
lockout = deviceinfo[0][2]
lockout_start = deviceinfo[0][3]
lockout_end = deviceinfo[0][4]
if lockout == 2:
# Device is locked out, so deny access
return json.dumps( {'devicename': 'none', 'username': 'none', 'userid': -1, 'time': 0 } )
# Customize the query based upon the device settings
if lockout == 0:
# Device access is not time limited
if allusers:
# Device access is granted to all users and is not time limited
output = query_db("select user.name, user.id from user where user.code='%s' and user.status IN ('A','S')" % (code))
else:
# Device access is granted at an individual user level and is not time limited
output = query_db("select user.name, user.id, deviceAccess.time from deviceAccess join device on device.id=deviceAccess.device join user on user.id = deviceAccess.user \
where user.code='%s' and user.status IN ('A','S') and device.id=%s" % (code,deviceid))
elif lockout == 1:
# Device access is time limited
now = datetime.datetime.now()
now = 100 * now.hour + now.minute
x = re.split(':| ',lockout_start)
start_time = 100*((int(x[0]) % 12) + (12 if x[2]=="PM" else 0)) + int(x[1])
x = re.split(':| ',lockout_end)
end_time = 100*((int(x[0]) % 12) + (12 if x[2]=="PM" else 0)) + int(x[1])
if start_time <= end_time:
during_time_limit = start_time <= now < end_time
else:
during_time_limit = now >= start_time or now < end_time
if during_time_limit:
# Current time is during the time limited range, only select those with 24hr access
if allusers:
# Device access is granted to all users only with 24 hr access during time limited range
output = query_db("select user.name, user.id from user where user.code='%s' and user.status='S'" % (code))
else:
# Device access is granted at an individual user level only with 24 hr access during time limited range
output = query_db("select user.name, user.id, deviceAccess.time from deviceAccess join device on device.id=deviceAccess.device join user on user.id = deviceAccess.user \
where user.code='%s' and user.status='S' and device.id=%s" % (code,deviceid))
else:
# Device access is time limited but current time is not within the range
if allusers:
# Device access is granted to all users, outside the time limited range
output = query_db("select user.name, user.id from user where user.code='%s' and user.status IN ('A','S')" % (code))
else:
# Device access is granted at an individual user level, outside the time limited range
output = query_db("select user.name, user.id, deviceAccess.time from deviceAccess join device on device.id=deviceAccess.device join user on user.id = deviceAccess.user \
where user.code='%s' and user.status IN ('A','S') and device.id=%s" % (code,deviceid))
else:
# Unknown lockout status, deny access
return json.dumps( {'devicename': 'none', 'username': 'none', 'userid': -1, 'time': 0 } )
if len(output) == 0:
# User badge code not found, or is inactive, or does not have 24hr access, so deny access
addNewUser(code, deviceid)
return json.dumps( {'devicename': 'none', 'username': 'none', 'userid': -1, 'time': 0 } )
# User badge code found, so grant access
username = output[0][0]
userid = output[0][1]
accesstime = 100 if allusers else output[0][2]
# Send the data to slack
message_content = {'text': '{} is now in use by {}'.format(devicename, username)}
if devicename in c_webcam_urls:
image_url = captureImage(c_webcam_urls[devicename])
if len(image_url) > 0:
message_content['attachments'] = [{
'fallback': 'Webcam image of {}'.format(devicename),
'image_url': image_url
}]
t = Thread(target=post_to_slack, args=(message_content,))
t.start()
# log it to the database
exec_db("insert into log (message) values ('login:%s:%s')" % (deviceid, userid) )
return json.dumps(
{
'devicename': devicename,
'username': username,
'userid': userid,
'time': accesstime
}
)
@app.route("/")
def defaultRoute():
return render_template('admin_login.html')
@app.route("/checkLogin/<user>/<password>" )
def checkLogin(user,password):
if password == C_password:
return "true"
else:
return "false"
# given a name and badge code, add the user to the database
# and clear out any perms that id might have had in the past
def userAdd(name, badgeCode):
exec_db("insert into user (name,code) values ('%s','%s')" % (name, badgeCode))
# if the database is dirty, make sure that any existing records are cleared out
userAccess = query_db("select id from user where code='%s'" % badgeCode)
if len(userAccess) > 0:
exec_db("delete from deviceAccess where user=%s" % userAccess[0][0] )
@app.route("/admin/addUser/<userid>/name/<name>")
def addUser(userid, name):
if request.cookies.get('password') != C_password:
return False
name = name.strip()
output = query_db("select code from newuser where id=%s" % userid)
badgeCode = output[0][0]
output = query_db("select code from user where name='%s' and status IN ('A','S')" % name)
if len(output) == 0:
# No active user with same name, go ahead and add this user id
userAdd(name,badgeCode)
exec_db("delete from newuser where id=%s" % userid)
return redirect("/admin/interface/user")
else:
# User with same name already active, popup error window
existingBadge = output[0][0]
return redirect("/admin/interface/newuser/modal/useractive/%s/%s/%s" % (name, existingBadge, badgeCode))
@app.route("/admin/loadcsv", methods=['POST'])
def loadCSV():
if request.cookies.get('password') != C_password:
return False
data = request.form['csv']
# stip leading , if it is there
data = map(lambda x: re.sub('^,', '', x), data.split("\n"))
reader = csv.reader(data, delimiter=',')
for row in reader:
if len(row) == 0:
continue
name = row[0].strip()
code = row[1].strip()
output = query_db("select code from user where code='%s'" % code)
if len(output) == 0:
# Badge does not already exist in database
output = query_db("select name from user where name='%s' and status IN ('A','S')" % name)
if len(output) == 0:
# Name does not exist in database of an active user
userAdd(name, code)
return redirect("/admin/interface/user")
"""
when a trainer logs in, he can register anyone on this device as a user
http://localhost:5000/admin/marioStar/1/150060E726B4/0/2
"""
@app.route("/admin/marioStar/<trainerid>/<trainerBadge>/<deviceid>/<userBadge>")
def marioStarMode(trainerid,trainerBadge, deviceid, userBadge):
trainer = query_db("select user.id from user join deviceAccess on deviceAccess.user=user.id where user.id=%s and user.code='%s' and user.status IN ('A','S') and deviceAccess.trainer=1 and deviceAccess.device=%s" % (trainerid, trainerBadge, deviceid))
# the user must already exist in the system
userid = query_db("select id from user where code='%s' and status IN ('A','S')" % (userBadge) )
#print("userId", userid)
#print("lenUserId=1", len(userid))
#print("trainer", trainer)
#print("lentrainer=1", len(trainer))
if len(userid) == 1 and len(trainer) == 1:
userid = userid[0][0]
exec_db("delete from deviceAccess where user=%s and device=%s" % (userid, deviceid))
exec_db("insert into deviceAccess (user,device,time) values (%s, %s, 100)" % (userid, deviceid))
return "true"
else:
return "false"
@app.route("/admin/addUserAccess/<userid>/<deviceid>")
def addUserAccess(userid, deviceid):
if request.cookies.get('password') != C_password:
return False
exec_db("delete from deviceAccess where user=%s and device=%s" % (userid, deviceid))
exec_db("insert into deviceAccess (user,device,time) values (%s, %s, 100)" % (userid, deviceid))
return redirect("/admin/interface/userAccess/%s" % userid)
@app.route("/admin/toggle24Hr/user/<userid>")
def toggle24HrAccess(userid):
if request.cookies.get('password') != C_password:
return False
output = query_db("select status from user where id=%s" % userid)
if output[0][0] == 'A':
exec_db("update user set status='S' where id=%s" % userid)
elif output[0][0] == 'S':
exec_db("update user set status='A' where id=%s" % userid)
return redirect("/admin/interface/userAccess/%s" % userid)
@app.route("/admin/removeTrainer/<userid>/<deviceid>")
def delUserTrainerAccess(userid, deviceid):
if request.cookies.get('password') != C_password:
return False
exec_db("update deviceAccess set trainer=0 where user=%s and device=%s" % (userid, deviceid))
return redirect("/admin/interface/userAccess/%s" % userid)
@app.route("/admin/addTrainer/<userid>/<deviceid>")
def addUserTrainerAccess(userid, deviceid):
if request.cookies.get('password') != C_password:
return False
exec_db("update deviceAccess set trainer=1 where user=%s and device=%s" % (userid, deviceid))
return redirect("/admin/interface/userAccess/%s" % userid)
@app.route("/admin/delUserAccess/<userid>/<deviceid>")
def delUserAccess(userid, deviceid):
if request.cookies.get('password') != C_password:
return False
exec_db("delete from deviceAccess where user=%s and device=%s" % (userid, deviceid))
return redirect("/admin/interface/userAccess/%s" % userid)
@app.route("/admin/delNewUser/<userid>")
def delNewUser(userid):
if request.cookies.get('password') != C_password:
return False
exec_db("delete from newuser where id=%s" % userid)
return redirect("/admin/interface/newuser")
@app.route("/admin/delUser/<userid>")
def delUser(userid):
if request.cookies.get('password') != C_password:
return False
exec_db("delete from user where id=%s" % userid)
return redirect("/admin/interface/inactiveuser")
@app.route("/admin/activateUser/<userid>")
def activateUser(userid):
if request.cookies.get('password') != C_password:
return False
# Get the name and badge code to be activated
output = query_db("select name,code from user where id=%s" % userid)
name = output[0][0]
newbadge = output[0][1]
output = query_db("select code from user where name='%s' and status IN ('A','S')" % name)
if len(output) == 0:
# No active user with same name, make this user id active
exec_db("update user set status='A' where id=%s" % userid)
return redirect("/admin/interface/inactiveuser")
else:
# User with same name already active, popup error window
badge = output[0][0]
return redirect("/admin/interface/inactiveuser/modal/useractive/%s/%s/%s" % (name, badge, newbadge))
@app.route("/admin/deactivateUser/<userid>")
def deactivateUser(userid):
if request.cookies.get('password') != C_password:
return False
exec_db("update user set status='I' where id=%s" % userid)
return redirect("/admin/interface/user")
@app.route("/admin/deviceUnlimitedHr/<deviceid>")
def deviceUnlimitedHr(deviceid):
if request.cookies.get('password') != C_password:
return False
exec_db("update device set lockout=0 where id=%s" % deviceid)
return redirect("/admin/interface/devices")
@app.route("/admin/deviceLimitedHr/<deviceid>")
def deviceLimitedHr(deviceid):
if request.cookies.get('password') != C_password:
return False
exec_db("update device set lockout=1 where id=%s" % deviceid)
return redirect("/admin/interface/devices")
@app.route("/admin/deviceLockout/<deviceid>")
def deviceLockout(deviceid):
if request.cookies.get('password') != C_password:
return False
exec_db("update device set lockout=2 where id=%s" % deviceid)
return redirect("/admin/interface/devices")
@app.route("/admin/deviceLockoutTimes/<deviceid>/time/<start>/<end>")
def deviceLockoutTimes(deviceid, start, end):
if request.cookies.get('password') != C_password:
return False
exec_db("update device set lockout_start='%s', lockout_end='%s' where id=%s" % (start, end, deviceid))
return redirect("/admin/interface/devices")
@app.route("/admin/interface/newuser")
def newUserInterface():
if request.cookies.get('password') != C_password:
return redirect("/")
users = query_db("select newuser.id,newuser.code,device.name from newuser left join device on newuser.deviceID = device.id")
#users = query_db("select id,code,deviceID from newuser")
return render_template('admin_newuser.html', users=users)
@app.route("/admin/interface/inactiveuser")
def inactiveUserInterface():
if request.cookies.get('password') != C_password:
return redirect("/")
users = query_db("select name,code,id from user where status='I' order by name")
return render_template('admin_inactiveuser.html', users=users)
@app.route("/admin/interface/inactiveuser/modal/deluser/<userid>/<name>/<badge>")
def delUserModal(userid, name, badge):
if request.cookies.get('password') != C_password:
return redirect("/")
users = query_db("select name,code,id from user where status='I' order by name")
return render_template('modal_deluser.html', users=users, userid=userid, name=name, badge=badge)
@app.route("/admin/interface/inactiveuser/modal/useractive/<name>/<badge>/<newbadge>")
def userActiveModal(name, badge, newbadge):
if request.cookies.get('password') != C_password:
return redirect("/")
users = query_db("select name,code,id from user where status='I' order by name")
return render_template('modal_useractive.html', users=users, name=name, badge=badge, newbadge=newbadge)
@app.route("/admin/interface/newuser/modal/useractive/<name>/<badge>/<newbadge>")
def newuserActiveModal(name, badge, newbadge):
if request.cookies.get('password') != C_password:
return redirect("/")
users = query_db("select id,code,deviceID from newuser")
return render_template('modal_newuseractive.html', users=users, name=name, badge=badge, newbadge=newbadge)
@app.route("/admin/interface/user")
def adminInterface():
if request.cookies.get('password') != C_password:
return redirect("/")
users = query_db("select name,code,id,status from user where status IN ('A','S') order by status desc")
return render_template('admin_user.html', users=users)
@app.route("/admin/interface/userAccess/<userid>")
def userAccessInterface(userid):
if request.cookies.get('password') != C_password:
return redirect("/")
# list of all the devices the user could have access to
allDevices = query_db("select id, name from device where allUsers=0 except select device.id, device.name from deviceAccess join device on device.id=deviceAccess.device join user on user.id = deviceAccess.user where deviceAccess.user=%s and device.allUsers=0" % userid)
# list of devices with all user access
allUserDevices = query_db("select name from device where allUsers=1")
# list of devices the user currently has access to
userAccess = query_db("select user.name, user.id, device.id, device.name, deviceAccess.time, deviceAccess.trainer from deviceAccess join device on device.id=deviceAccess.device join user on user.id = deviceAccess.user where deviceAccess.user=%s and device.allUsers=0" % userid)
username = query_db("select user.name from user where id=%s" % userid)[0][0]
s = query_db("select user.status from user where id=%s" % userid)[0][0]
if s=='A':
userstatus = "Active"
elif s=='S':
userstatus = "Active - 24 Hr"
elif s=='I':
userstatus = "Inactive"
else:
userstatus = "Unknown"
return render_template('admin_userAccess.html', devices=allDevices, access=userAccess, alluserdevices=allUserDevices, userid=userid, username=username, userstatus=userstatus, ustatus=s)
@app.route("/admin/interface/devices")
def deviceInterface():
if request.cookies.get('password') != C_password:
return redirect("/")
devices = query_db("select id,name,allUsers,lockout,lockout_start,lockout_end from device")
return render_template('admin_devices.html', devices=devices)
@app.route("/admin/interface/deviceAccess/<deviceid>")
def deviceAccessInterface(deviceid):
if request.cookies.get('password') != C_password:
return redirect("/")
devicename = query_db("select device.name from device where id=%s" % deviceid)[0][0]
userAccess = query_db("select user.id, user.name, user.code, deviceAccess.trainer from deviceAccess join device on device.id=deviceAccess.device join user on user.id = deviceAccess.user where deviceAccess.device=%s and device.allUsers=0 and user.status IN ('A','S') order by deviceAccess.trainer desc" % deviceid)
return render_template('admin_deviceAccess.html', access=userAccess, deviceid=deviceid, devicename=devicename)
@app.route("/toolSummary")
@app.route("/toolSummary/<start_date>")
@app.route("/toolSummary/<start_date>/<end_date>")
def toolSummaryInterface(start_date=None, end_date=None):
#if request.cookies.get('password') != C_password:
# return redirect("/")
# calculate default start and end dates
if start_date is None:
today = datetime.datetime.now()
start_date = datetime.datetime(today.year - (today.month==1), ((today.month - 2)%12)+1, 1)
else:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
if end_date is None:
end_year = start_date.year + (start_date.month==12)
end_month = ((start_date.month+1)%12)
end_day = min(start_date.day, monthrange(end_year, end_month)[1])
end_date = datetime.datetime(end_year, end_month, end_day)
else:
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
tool_summary = genToolSummary(start_date, end_date)
return render_template('toolUse.html', tools = tool_summary,
start = start_date.strftime("%Y-%m-%d"),
end = end_date.strftime("%Y-%m-%d"))
@app.route("/admin/interface/log")
def viewLog():
if request.cookies.get('password') != C_password:
return redirect("/")
logs = query_db("select * from log")
return render_template('admin_log.html', logs=logs)
@app.route("/admin/interface/csv")
def csvHTMLInterface():
if request.cookies.get('password') != C_password:
return redirect("/")
return render_template('admin_csv.html')
#### Helper functions for tool summary ####
# define some classes for data records
class ToolSummary:
__slots__ = ['logins', 'logouts', 'total_time']
def __init__(self, logins=0, logouts=0, total_time=datetime.timedelta()):
self.logins = logins
self.logouts = logouts
self.total_time = total_time
def __repr__(self):
return "ToolSummary(logins={}, logouts={}, total_time={})".format(
self.logins, self.logouts, self.total_time)
class ToolState:
__slots__ = ['in_use', 'active_user', 'login_time']
def __init__(self, in_use=False, active_user=0, login_time=0):
self.in_use = in_use
self.active_user = active_user
self.login_time = login_time
class UserToolSummary:
__slots__ = ['name', 'logins', 'total_time']
def __init__(self, name='', logins=0, total_time=datetime.timedelta()):
self.name = ''
self.logins = logins
self.total_time = total_time
def __repr__(self):
return "UserToolSummary(logins={}, total_time={})".format(
self.logins, self.total_time)
def __lt__(self, other):
return self.total_time < other.total_time
class DefaultDictByKey(dict):
def __init__(self, message):
self.message = str(message)
def __missing__(self, key):
return self.message+str(key)
def genToolSummary(start_date, end_date):
'''Function to generate tool summaries given start and end dates
Input: start_date, end_date; defaults to "last month"
Output: Dictionary of tools, with associated summaries'''
tools = query_db("SELECT id, name FROM device")
toolnames = {}
for tool in tools:
toolnames[str(tool[0])] = tool[1]
users = query_db("SELECT id, name, code FROM user")
user_id_to_name = DefaultDictByKey("Unknown user, id ")
user_code_to_id = {}
for user in users:
user_id_to_name[str(user[0])] = str(user[1])
user_code_to_id[str(user[2])] = str(user[0])
# generate summaries
msgs = query_db("SELECT message, Timestamp FROM log WHERE Timestamp BETWEEN ? AND ?", (start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d')))
summaries = defaultdict(ToolSummary)
states = defaultdict(ToolState)
user_summaries = defaultdict(UserToolSummary)
other_msgs = 0;
unmatched = 0
for msg in msgs:
ts = datetime.datetime.strptime(msg[1], '%Y-%m-%d %H:%M:%S')
fields = msg[0].split(':')
if (len(fields) != 3) or (fields[0] not in ('login', 'logout')):
other_msgs += 1
continue
tool = fields[1]
user = fields[2]
if fields[0] == 'login':
summaries[tool].logins += 1
if states[tool].in_use:
pass
#print('Login without logout: ', ts, toolnames[tool],
#user_id_to_name[user], states[tool].login_time)
states[tool].in_use = True
states[tool].active_user = user
states[tool].login_time = ts
user_summaries[(user, tool)].logins += 1
user_summaries[(user, tool)].name = user_id_to_name[user]
elif fields[0] == 'logout':
try:
user_id = user_code_to_id[user]
except KeyError:
if states[tool].in_use:
user_id = states[tool].active_user
user_code_to_id[user] = user_id
print(("Unknown user, code {}. Assuming user id {} from "+
"prior tool login").format(user, user_id))
else:
print("Unknown user, code", user)
summaries[tool].logouts += 1
if not states[tool].in_use:
pass
#print('Logout without login: ', ts, toolnames[tool],
# user_id_to_name[user])
else:
states[tool].in_use = False
summaries[tool].total_time += (ts - states[tool].login_time)
if states[tool].active_user == user_id:
user_summaries[(user_id, tool)].total_time += (ts - states[tool].login_time)
else:
unmatched += 1
leaderboards = defaultdict(list)
for ((_, tool), s) in user_summaries.items():
leaderboards[tool].append(s)
#print('non-login/logout messages:', other_msgs)
#print('unmatched login/logout pairs:', unmatched)
out_sum = dict()
for (tool, s) in summaries.items():
out_sum[tool] = dict()
out_sum[tool]['name'] = toolnames[tool]
out_sum[tool]['logins'] = s.logins
out_sum[tool]['logouts'] = s.logouts
out_sum[tool]['total'] = s.total_time
out_sum[tool]['leaderboard'] = list()
leaderboards[tool].sort()
for s in list(reversed(leaderboards[tool]))[:10]:
out_sum[tool]['leaderboard'].append((s.name, s.total_time))
return out_sum
def captureImage(webcam_url):
"""Capture image from webcam and upload to Imgur; returns Imgur URL"""
# grab image from webcam
dl_resp = requests.get(webcam_url, auth=(c_webcam_username, c_webcam_password))
if dl_resp.status_code != 200:
print("Webcam download status code:", dl_resp.status_code)
print(dl_resp.text)
return ""
img_b64 = base64.b64encode(dl_resp.content)
# upload to Imgur
ul_resp = requests.post(
'https://api.imgur.com/3/image',
headers = {'Authorization': 'Client-ID ' + c_imgur_client_id},
data = {'image': img_b64})
if (ul_resp.status_code != 200):
print("Imgur upload status code:", ul_resp.status_code)
print(ul_resp.text)
return ""
return ul_resp.json()['data']['link']
if __name__ == "__main__":
#app.run(host='0.0.0.0')
use_reload = not (len(sys.argv) > 1 and sys.argv[1] == '--noreload')
app.run(host='0.0.0.0', debug=True, use_reloader=use_reload)
|
# coding: utf-8
# In[1]:
#In this project I will try to detect the presence of heart disease based on 13 different features.
#If a high accuracy is achieved, this will show that we can predict heart disease in people with high certainty.
#This can be very valuable in practice: A lot of these features are already captured in Electronic Health Records, thus these could be used to sooner detect people with heart disease
#The features are: age, gender, chest pain type, resting blood pressure, serum cholestoral in mg/dl, fasting blood sugar, resting electrocardiographic results, maximum heart rate achieved, exercise induced angina, ST depression induced by exercise relative to rest, the slope of the peak exercise ST segment, number of major vessels (0-3) colored by flourosopy, thal
#The dataset is available on http://archive.ics.uci.edu/ml/datasets/Heart+Disease
# In[2]:
#Importing the libraries we're going to work with
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
# In[3]:
#The data was collected from four different sources. Each database has the same instance format.
#Import the first data file, data collected from Cleveland
Cleveland = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.cleveland.data', header = None)
# In[4]:
#Import the second data file, data collected from Budapest
Budapest = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.hungarian.data', header = None)
# In[5]:
#Import the third data file, data collected from Switserland
Switserland = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.switzerland.data', header = None)
# In[8]:
#Import the fourth and last data file, data collected from California
California = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.va.data', header = None)
# In[9]:
#Now lets merge all this data into one DataFrame
heart = pd.concat([Cleveland, Budapest, Switserland, California], axis = 0)
# In[10]:
#Check shape of the df
heart.shape
# In[11]:
#Let's look at the first 5 instanes
#Note: It looks like all are values are numerical
heart.head()
# In[12]:
#Let's rename our columns to make it more clear
heart.rename(columns = {0: 'age', 1: 'sex', 2: 'cp', 3: 'trestbps', 4: 'chol', 5: 'fbs', 6: 'restecg', 7: 'thalach', 8: 'exang', 9: 'oldpeak', 10: 'slope', 11: 'ca', 12: 'thal', 13: 'disease'}, inplace= True)
# In[13]:
#How many missing values are there per feature?
#The missing values are presented as '?', so we need to change this to a NaN value
heart.replace(['?'], [np.nan], inplace=True)
# In[14]:
#Let's check the amount of missing values per column
#Note: we have a lot of missing values for the thal, ca and slope features
heart.count(axis = 0)
# In[15]:
#Let's look at the datatypes
#Note: Features trestbps to thal are objects
heart.info()
# In[16]:
#Convert object features to floats
heart['trestbps'] = heart['trestbps'].astype('float32')
heart['chol'] = heart['chol'].astype('float32')
heart['fbs'] = heart['fbs'].astype('float32')
heart['restecg'] = heart['restecg'].astype('float32')
heart['thalach'] = heart['thalach'].astype('float32')
heart['exang'] = heart['exang'].astype('float32')
heart['oldpeak'] = heart['oldpeak'].astype('float32')
heart['slope'] = heart['slope'].astype('float32')
heart['ca'] = heart['ca'].astype('float32')
heart['thal'] = heart['thal'].astype('float32')
# In[17]:
#Since we have a lot of missing values for the thal, ca and slope features, it's better to delete them
heart.pop('slope')
heart.pop('ca')
heart.pop('thal')
# In[18]:
#We have to impute the missing values for certain features, let's look at their distribution
heart.hist()
# In[19]:
#The features trestbps, chol, restecg, thalach, oldpeak are continuous, so let's impute the missing values using the median
heart['trestbps'].fillna(heart['trestbps'].median(), inplace= True)
heart['chol'].fillna(heart['chol'].median(), inplace= True)
heart['restecg'].fillna(heart['restecg'].median(), inplace= True)
heart['thalach'].fillna(heart['thalach'].median(), inplace= True)
heart['oldpeak'].fillna(heart['oldpeak'].median(), inplace= True)
# In[20]:
#The features fbs, exang are categorical, so let's impute the missing values using the most frequent value
heart['fbs'].fillna(0.0, inplace = True)
heart['exang'].fillna(0.0, inplace = True)
# In[21]:
#Right now the heart disease label consists of 5 categories: 0 (no heart disease) and 1, 2, 3, 4 are severities of heart disease
#Because we are only interested in presence or absence of heart disease, let's combine 1,2,3,4 into one category
heart['disease'].replace([1,2,3,4], [1,1,1,1], inplace = True)
# In[22]:
#Some ML algorithms don't perform well when the input features have very different scales
#Therefore, let's apply StandardScaler to features with a wide range
from sklearn.preprocessing import StandardScaler
std_scal = StandardScaler()
heart[['chol', 'thalach', 'trestbps', 'age']]= std_scal.fit_transform(heart[['chol', 'thalach', 'trestbps', 'age']])
# In[23]:
#Now we will create the X (features) and y (label) array
X = heart.iloc[:,:10]
y = heart['disease']
# In[24]:
#Let's start with the first model, support vector machines
#And use Grid Search to simultaneously test multiple hyperparameters
#Note: the hyperparameters C = 0.1, kernel = 'rbf' and degree = 3 turn out to be the best, achieving an f1 score of 0.82
from sklearn.svm import SVC
svc = SVC()
from sklearn.model_selection import GridSearchCV
grid_svc = [{'C': [0.1,1,5]}, {'kernel': ['poly', 'rbf', 'linear'] }, {'degree': [1,2,3]}]
gridsearch_svc = GridSearchCV(svc, grid_svc, cv = 10, scoring = 'f1')
gridsearch_svc.fit(X, y)
gridsearch_svc.best_params_
gridsearch_svc.best_score_
# In[26]:
#Now we will do the same using Random Forest
#Note: The hyperparameters max_depth = 5, min_samples_leaf = 25 provided the best score, f1 = 0.81
from sklearn.ensemble import RandomForestClassifier
ran_for = RandomForestClassifier()
grid_ran_for = [{'max_depth': [5,6,7]}, {'min_samples_leaf': [20,25,30]}]
gridsearch_ran_for = GridSearchCV(ran_for, grid_ran_for, cv = 10, scoring = 'f1')
gridsearch_ran_for.fit(X,y)
gridsearch_ran_for.best_params_
gridsearch_ran_for.best_score_
# In[30]:
#Finally, kNN.
#Note: the f1 score is 0.81 with the most optimal hyperparameter being n_neighbors = 5
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
grid_knn = [{'n_neighbors': [3,5]}]
gridsearch_knn = GridSearchCV(knn, grid_knn, cv = 10, scoring = 'f1')
gridsearch_knn.fit(X,y)
gridsearch_knn.best_params_
gridsearch_knn.best_score_
# In[31]:
#Let's try ensembling these models to get an even higher accuracy, using Hard voting
#Note: The ensembling the models gives us an accuracy of 0.83!
from sklearn.ensemble import VotingClassifier
voting = VotingClassifier(estimators = [('svc', SVC(C = 0.1, kernel = 'rbf', degree =3)), ('for', RandomForestClassifier(max_depth = 5, min_samples_leaf = 25)), ('knn',KNeighborsClassifier())], voting = 'hard')
# Divide into training and test to prevent overfitting to training data
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y)
voting.fit(X_train,y_train)
y_pred = voting.predict(X_test)
accuracy_score(y_pred, y_test)
# In[32]:
#Final note: This method achieves an accuracy of around 0.83 on the test data. This is similar to the accuracy on the training data (not shown in code)
#The following steps could be undertaken to further improve this:
# Gather more features
# Gather more data
# Do a more extensive Grid Search
# Use a more complex model
|
from __future__ import unicode_literals
from django.db import models
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class EmailManager(models.Manager):
def validate(self, email):
if len(email) < 1:
return False
if EMAIL_REGEX.match(email):
return True
else:
return False
class Email(models.Model):
email = models.CharField(max_length=155)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = EmailManager()
|
import robin_stocks as rs
from time import sleep
from getpass import getpass
ticker = 'NIO'
p = 0.94
c = 1
rs.login(username='liujch1998',
password=None,#getpass(),
expiresIn=86400 * 365,
by_sms=True)
try:
fund = rs.stocks.get_fundamentals(ticker, info=None)
last_high = float(fund[0]['high_52_weeks'])
print('last_high = ', last_high)
except Exception as e:
print('Error fetching fundamentals: ', e)
exit(0)
next_buy = p * last_high
with open('last_buy.txt', 'r') as f:
lines = f.readlines()
if lines != []:
last_buy = float(lines[-1].strip('\n'))
next_buy = p * last_buy
print('last_buy = ', last_buy)
print('next_buy = ', next_buy)
while True:
try:
price = rs.stocks.get_latest_price(ticker, includeExtendedHours=True)
price = float(price[0])
if last_high < price:
last_high = price
next_buy = p * last_high
if price <= next_buy:
try:
buy = rs.orders.order_buy_fractional_by_price(ticker, c, extendedHours=True)
print('Bought at price = ', price)
with open('last_buy.txt', 'a') as f:
f.write(str(next_buy) + '\n')
next_buy *= p
print('next_buy = ', next_buy)
except Exception as e:
print('Error placing order: ', e)
except Exception as e:
print('Error fetching current price: ', e)
sleep(60)
|
N, K = map(int, input().split())
x = list(map(int, input().split()))
p = N
for i in range(N):
if x[i] > 0:
p = i
break
result = float('inf')
for i in range(K + 1):
if p - i < 0 or p + K - i > N:
continue
if i == 0:
result = min(result, x[p + K - 1])
elif i == K:
result = min(result, -x[p - K])
else:
l = x[p - i]
r = x[p + (K - i) - 1]
result = min(result, r - 2 * l, r * 2 - l)
print(result)
|
import tensorflow as tf
from tensorflow.contrib import layers
def semi_supervised_encoder_convolutional(input_tensor, z_dim, y_dim, batch_size, network_scale=1.0, img_res=28, img_channels=1):
f_multiplier = network_scale
net = tf.reshape(input_tensor, [-1, img_res, img_res, img_channels])
net = layers.conv2d(net, int(16*f_multiplier), 3, stride=2)
net = layers.conv2d(net, int(16*f_multiplier), 3, stride=1)
net = layers.conv2d(net, int(32*f_multiplier), 3, stride=2)
net = layers.conv2d(net, int(32*f_multiplier), 3, stride=1)
net = layers.conv2d(net, int(64*f_multiplier), 3, stride=2)
net = layers.conv2d(net, int(64*f_multiplier), 3, stride=1)
net = layers.conv2d(net, int(128*f_multiplier), 3, stride=2)
net = tf.reshape(net, [batch_size, -1])
net = layers.fully_connected(net, 1000)
y = layers.fully_connected(net, y_dim, activation_fn=None, normalizer_fn=None)
z = layers.fully_connected(net, z_dim, activation_fn=None)
return y, z
def semi_supervised_encoder_fully_connected(input_tensor, z_dim, y_dim, network_scale=1.0):
hidden_size = int(1000 * network_scale)
net = layers.fully_connected(input_tensor, hidden_size)
net = layers.fully_connected(net, hidden_size)
y = layers.fully_connected(net, y_dim, activation_fn=None, normalizer_fn=None)
z = layers.fully_connected(net, z_dim, activation_fn=None)
return y, z
def semi_supervised_encoder(input_tensor, z_dim, y_dim, batch_size, do_convolutional, network_scale=1.0, img_res=28, img_channels=1):
if do_convolutional:
return semi_supervised_encoder_convolutional(input_tensor, z_dim, y_dim, batch_size, network_scale, img_res, img_channels)
else:
return semi_supervised_encoder_fully_connected(input_tensor, z_dim, y_dim, network_scale)
def semi_supervised_decoder_convolutional(input_tensor, batch_size, n_dimensions, network_scale=1.0, img_res=28, img_channels=1):
f_multiplier = network_scale
net = layers.fully_connected(input_tensor, 2*2*int(128*f_multiplier))
net = tf.reshape(net, [-1, 2, 2, int(128*f_multiplier)])
assert(img_res in [28, 32])
if img_res==28:
net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=2)
net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=1)
net = layers.conv2d_transpose(net, int(32*f_multiplier), 4, stride=1, padding='VALID')
net = layers.conv2d_transpose(net, int(32*f_multiplier), 4, stride=1)
net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=2)
net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=1)
net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=2)
net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=1)
else:
net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=2)
net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=1)
net = layers.conv2d_transpose(net, int(32*f_multiplier), 3, stride=2)
net = layers.conv2d_transpose(net, int(32*f_multiplier), 3, stride=1)
net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=2)
net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=1)
net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=2)
net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=1)
net = layers.conv2d_transpose(net, img_channels, 5, stride=1, activation_fn=tf.nn.sigmoid)
net = layers.flatten(net)
return net
def semi_supervised_decoder_fully_connected(input_tensor, batch_size, n_dimensions, network_scale=1.0, img_res=28, img_channels=1):
output_size = img_res*img_res*img_channels
n_hid = int(1000*network_scale)
net = layers.fully_connected(input_tensor, n_hid)
net = layers.fully_connected(net, n_hid)
net = layers.fully_connected(net, output_size, activation_fn=tf.nn.sigmoid)
return net
def semi_supervised_decoder(input_tensor, batch_size, n_dimensions, do_convolutional, network_scale=1.0, img_res=28, img_channels=1):
if do_convolutional:
return semi_supervised_decoder_convolutional(input_tensor, batch_size, n_dimensions, network_scale, img_res, img_channels)
else:
return semi_supervised_decoder_fully_connected(input_tensor, batch_size, n_dimensions, network_scale, img_res, img_channels)
def aa_discriminator(input_tensor, batch_size, n_dimensions):
n_hid = 1000
net = layers.fully_connected(input_tensor, n_hid)
net = layers.fully_connected(net, n_hid)
return layers.fully_connected(net, 2, activation_fn=None)
def correlation_classifier(input_tensor, batch_size, n_classes=10):
n_hid = 1000
net = layers.fully_connected(input_tensor, n_hid)
net = layers.fully_connected(net, n_hid)
net = layers.fully_connected(net, n_classes, activation_fn=None)
return net |
#-*- coding: utf-8 -*-
#@File : mockTest.py
#@Time : 2021/6/4 20:32
#@Author : xintian
#@Email : 1730588479@qq.com
#@Software: PyCharm
#Date:2021/6/4
import requests
HOST = 'http://127.0.0.1:9999'
# def test():
# #1- url
# url = f'{HOST}/sq'
# payload = { "key1":"abc"}
# #发请求
# resp = requests.post(url,data=payload)
# return resp.text
#
# if __name__ == '__main__':
# res = test()
# print(res)
#1- 申请订单提交接口
def create_order(inData):
url = f'{HOST}/api/order/create/'
payload = inData
resp = requests.post(url,json=payload)
return resp.json()
#2- 查询申请结果接口
"""
查询接口注意事项:
1- 你使用什么去查询---申请的id
2- 频率 interval
3- 如果查询不到--超时 timeout
"""
import time
def get_order_result(orderID,interval=5,timeout=30):#单位是s
"""
:param orderID: 申请的id
:param interval: 频率 默认是5s
:param timeout: 超时时间 30s
:return:
"""
url = f'{HOST}/api/order/get_result01/'
payload = {"order_id": orderID}#
#1- 记录开始的时间
startTime = time.time()
#2- 结束时间:开始时间+超时时间
endTime = startTime+timeout
cnt = 0#计算查询的次数
#3- 循环发请求:while
while time.time()<endTime:#当前时间< 结束时间
resp = requests.get(url, params=payload)
cnt += 1#
if resp.text:#更加精准的判断是返回有效数据
print(f"第{cnt}次查询,已经有结果,停止查询!",resp.text)
break
else:
print(f"第{cnt}次查询,没有结果,请等待!")
time.sleep(interval) # 等待!
print("---查询结束!---")
return resp.text
import threading#多线程模块--自带的
if __name__ == '__main__':
startTime = time.time()#所有接口运行的开始时间
#测试数据
testData = {
"user_id": "sq123456",
"goods_id": "20200815",
"num": 1,
"amount": 200.6
}
#2- 调用提交申请请求--获取申请的id
id = create_order(testData)['order_id']
print(id)
#后面是其他的接口的自动化执行操作
for one in range(20):
time.sleep(1)#模拟下接口自动化测试的操作过程
print(f"{one}----我正在执行其他的接口的自动化执行操作----")
endTime = time.time() # 所有接口运行的开始时间
print(f"整个项目自动化测试完成,总耗时>>>{endTime-startTime}")#----80s
"""
创建子线程:
threading.Thread(target,args)
target 你需要把哪一个函数作为子线程执行,就把对应的函数名赋值给这个参数
args:这个函数,调用调用的时候,需要传递是么参数--参数的参数 ---元组类型
"""
t1 = threading.Thread(target=get_order_result,args=(id,))#创建子线程:
#主线程如果结束了/或者异常退出运行,子线程也应该直接退出!
#t1.setDaemon(True)#守护线程
#启动子线程
t1.start()
t1.join() # 阻塞,主线程运行完之前,会等到所有的 子线程结束再主线结束
# res2 = get_order_result(id)
# print(res2)
# #后面是其他的接口的自动化执行操作
# for one in range(20):
# time.sleep(1)#模拟下接口自动化测试的操作过程
# print(f"{one}----我正在执行其他的接口的自动化执行操作----")
# endTime = time.time() # 所有接口运行的开始时间
#
# print(f"整个项目自动化测试完成,总耗时>>>{endTime-startTime}")#----80s
"""
问题:
你的领导看到你的自动化测试非常的欣慰,但是在欣慰过后,
有一个小需求,能不能提高下执行效率---
方案分析:
1- 领导看到的是表面的一个现象
2- 慢的根本原因:运行机制问题---time.sleep(5)
3- 给出方案:
1- requests.xx()和sleep()----io阻塞模式
2- 采取多线程操作
1- 主线程---mian 主线程
2- 子线程---get_order_result
"""
|
# https://leetcode-cn.com/problems/hua-dong-chuang-kou-de-zui-da-zhi-lcof/
# 剑指 Offer 59 - I. 滑动窗口的最大值
from collections import deque
from typing import List
class Solution:
"""
将值依次存入堆中,一次获取最大值,
堆中存入 (值,值的索引)
解决的问题:
1. 最大值 a 从堆中 pop, 已经不再堆中, 但依然在窗口中,
下一个最大值依然为 a,因此,需要比较窗口左边界的索引与上次 pop
的最大值
2.从堆中 pop 的值不在窗口中,因此需要一直 pop 直到最大值在窗口中
"""
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
from heapq import heappop, heappush
res = []
left, right = 0, 0
heap = []
while right < k - 1:
heappush(heap, (-nums[right], right))
right += 1
e, i = float('inf'), 0
while right < len(nums):
heappush(heap, (-nums[right], right))
if i < left or e > -nums[right]:
e, i = heappop(heap)
while i < left:
e, i = heappop(heap)
res.append(-e)
right += 1
left += 1
return res
class Solution1:
"""
上面代码优化
不直接 pop 最大值而是直接访问堆的最大值(第一个元素)
"""
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
from heapq import heappop, heappush, heapify
if k == 0:
return []
n = len(nums)
q = [(-nums[i], i) for i in range(k)]
heapify(q)
# 堆的根为第一个元素
res = [-q[0][0]]
for i in range(k, n):
heappush(q, (-nums[i], i))
# i-k 为左边界索引 - 1
while q[0][1] <= i - k:
heappop(q)
res.append(-q[0][0])
return res
class Solution2:
"""
另一种解题方法: 参考:
59-II
可以设置一个辅助队列
"""
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
res = []
queue = deque()
for right in range(k):
while queue and queue[-1] < nums[right]:
queue.pop()
queue.append(nums[right])
res.append(queue[0])
for right in range(k, len(nums)):
while queue and queue[-1] < nums[right]:
queue.pop()
queue.append(nums[right])
if queue[0] == nums[right - k]:
queue.popleft()
res.append(queue[0])
return res
|
# Generated by Django 3.0.7 on 2021-05-30 05:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('carapp', '0004_remove_car_rcno'),
]
operations = [
migrations.RemoveField(
model_name='customers',
name='role',
),
]
|
#!/usr/bin/env python
from setuptools import setup, find_packages
version = '1.5'
long_desc = """
nose-perfdump is a Nose plugin that collects per-test performance metrics into an
SQLite3 database and reports the slowest tests, test files, and total time
spent in tests. It is designed to make profiling tests to improve their speed
easier.
[Github](https://github.com/etscrivner/nose-perfdump)
"""
setup(
name='nose-perfdump',
version=version,
description='Dump per-test performance metrics to an SQLite database for querying.',
long_description=long_desc,
author='Eric Scrivner',
keywords='nose,nose plugin,profiler,profiling,tests,unittest',
install_requires=['nose', 'pyparsing', 'prettytable'],
author_email='eric.t.scrivner@gmail.com',
url='https://github.com/etscrivner/nose-perfdump',
license='BSD',
packages=find_packages(),
include_package_data=True,
entry_points={
'nose.plugins.0.10': [
'perfdump = perfdump:PerfDumpPlugin'
],
'console_scripts': [
'perfdump-cli = perfdump.console:main'
]
}
)
|
'''
Given an undirected graph, return true if and only if it is bipartite.
Recall that a graph is bipartite if we can split its set of nodes into two independent subsets A and B, such that every edge in the graph has one node in A and another node in B.
The graph is given in the following form: graph[i] is a list of indexes j for which the edge between nodes i and j exists. Each node is an integer between 0 and graph.length - 1.
There are no self edges or parallel edges: graph[i] does not contain i, and it doesn't contain any element twice.
'''
class Solution:
def isBipartite(self, graph: List[List[int]]) -> bool:
Set = {}
def dfs(n):
for i in graph[n]:
if i in Set:
if Set[i] == Set[n]:
return False
else:
Set[i] = 1 - Set[n]
if not dfs(i):
return False
return True
for i in range(len(graph)):
if i not in Set:
Set[i] = 0
if not dfs(i):
return False
return True
|
# -*- coding: utf-8 -*-
import copy
import logging
import os
import re
import sys
import traceback
from logging.handlers import RotatingFileHandler
from Queue import Queue
from threading import Event, Thread
class Carrier(object):
''' スレッド間のデータ受け渡しとイベント通知を行う '''
def __init__(self, name):
self.name = name
self.queue = Queue()
self.event = Event()
def handover(self, packet):
''' データをキューに入れる. 配送先のイベントを起こす '''
self.queue.put_nowait(packet)
self.event.set()
self.event.clear()
def pickup(self):
''' 配達したデータを受け取ってもらう '''
return self.queue.get_nowait()
def wake(self):
''' イベントを起こす '''
self.event.set()
def clear(self):
''' キューを空っぽにする '''
while not self.empty():
self.pickup()
def empty(self):
''' キューが空かどうかを返す '''
return self.queue.empty()
def sleep(self):
''' イベントを眠らせる '''
self.event.wait()
class BaseThread(Thread):
''' master, logger, input, output(filter)の基底クラス.
master, logger, inputのoutput_carriersに, 配送先のCarrierオブジェクトを格納して引数に渡す.
'''
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None,
verbose=None, master=None, logger=None, output_carriers={}):
super(BaseThread, self).__init__(group, target, name, args, kwargs, verbose)
# kwargsをインスタンスにセットする
if isinstance(kwargs, dict):
for k,v in kwargs.items():
if not k in ("name", "master", "logger", "output_carriers"): # 予約語
setattr(self, k, v)
self.daemon = True
self.master = master
self.logger = logger
self.output_carriers = output_carriers
self.carrier = Carrier(name)
self.stopevent = Event()
self.history = []
self.init()
self.output_names = set(self.output_carriers.keys())
def init(self):
''' 初期処理をここに書く '''
pass
def cleanup(self):
''' 終了時の処理をここに書く '''
pass
def join(self, timeout=None):
self.cleanup()
super(BaseThread, self).join(timeout)
def call_master(self, data, type):
''' masterにデータを投げる. typeはエラー, モジュールの停止要請など '''
packet = {"data":data, "from":self.name, "type":type}
self.master.carrier.handover(packet)
def log(self, text, level='INFO'):
''' 自分のモジュールに関するログを書く '''
packet = {"text":text, "from":self.name, "level":level.upper()}
self.logger.carrier.handover(packet)
def send(self, data, target=[], exclude=[]):
''' これでデータを投げる '''
packet = {"data":data, "from":self.name}
# target, excludeの成形
if isinstance(target, str):
target = [target]
if isinstance(exclude, str):
exclude = [exclude]
# setで集合演算. targetを優先する.
if target:
output_names = self.output_names & set(target)
else:
output_names = self.output_names - set(exclude)
for name in output_names:
self.output_carriers[name].handover(copy.deepcopy(packet))
def sendable(self, message):
''' 送信可能かどうかをチェックする '''
if message in self.history:
return False
self.history.append(message)
if len(self.history) > 20:
self.history.pop(0)
return True
class Input(BaseThread):
''' ネットやファイルシステムなどBoxnyaの外界から定期的にデータを取ってきて, filter, outputに渡すスレッド '''
def fetch(self):
''' データを取ってくる処理をここに '''
#self.send(data, target, exclude)
def run(self):
try:
while not self.stopevent.is_set():
self.fetch()
except Exception:
self.call_master(sys.exc_info(), "error")
class Output(BaseThread):
''' inputから受け取ったデータをBoxnyaの外界に投げるスレッド '''
def throw(self, packet):
''' 受け取ったデータの最終処理 '''
pass
def run(self):
try:
while not self.stopevent.is_set():
self.carrier.sleep()
while not self.carrier.empty():
packet = self.carrier.pickup()
self.throw(packet)
except Exception:
self.call_master(sys.exc_info(), "error")
class Filter(Output):
''' 複数の入力をまとめてフィルター処理するのに使うスレッド.
inputからデータを受け取り, outputに渡す
'''
def filter(self, packet):
''' フィルター処理をここに書く '''
#self.send(data, target, exclude)
def throw(self, packet):
self.filter(packet)
class Logger(BaseThread):
''' ログ出力するためのスレッド. '''
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None,
verbose=None, master=None, settings=None):
super(Logger, self).__init__(group, target, "logger", args, kwargs, verbose)
self.master = master
self.logger = self
self.log_dir = settings["LOG_DIR"]
self.loggers = {}
# システムログを作る
self.loggers[self.master.name] = self._logger_fuctory(self.master.name)
# settings.LOG_MODにあるモジュールのロガーを作る
for name in settings["LOG_MOD"]:
self.loggers[name] = self._logger_fuctory(name)
def _logger_fuctory(self, name):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
handler = RotatingFileHandler(os.path.join(self.log_dir, "%s.log" % name), maxBytes=1000000, backupCount=5)
formatter = logging.Formatter(fmt="%(asctime)s %(levelname)s %(message)s", datefmt="%b %d %H:%M:%S %Y")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def _write(self, packet):
level = getattr(logging, packet["level"], logging.INFO)
logger = self.loggers.get(packet["from"])
if logger:
logger.log(level, packet["text"])
# エラー以上のログならば, settings.LOG_OUTにログテキストを渡す
if level >= logging.ERROR:
self.send(packet["text"])
def run(self):
try:
while not self.stopevent.is_set():
self.carrier.sleep()
while not self.carrier.empty():
packet = self.carrier.pickup()
self._write(packet)
except Exception:
self.call_master(sys.exc_info(), "error") #TODO: Loggerの再起動処理
class Master(BaseThread):
''' 全てのモジュールを管理するスレッド. モジュールのstart, stopなどはこのスレッドが行う. '''
def __init__(self, settings, group=None, target=None, name=None, args=(), kwargs=None, verbose=None):
super(Master, self).__init__(group, target, "system", args, kwargs, verbose)
self.modules = {} # 全てのモジュールのインスタンスを保持
self.settings = {}
self.output_carriers = {}
self._set_settings(settings)
# ロガーの起動
if self.logging:
self.logger = Logger(master=self, settings=self.log_settings)
self.logger.start()
self.log("Boxnya system started.")
self._load_modules()
#outputモジュールをインスタンス化
for name, module in self.output_modules.items():
instance = module(name=name, kwargs=self.settings.get(name), master=self, logger=self.logger)
self.modules[name] = instance
self.output_carriers[name] = instance.carrier
# ロガーのエラー出力先outputをセット
if self.logging:
log_outputs = self.log_settings["LOG_OUT"]
self.logger.output_carriers = dict([(name, carrier) for name, carrier in self.output_carriers.items()
if name.split(".")[0] in log_outputs])
self.logger.output_names = set(log_outputs)
#フィルターに出力先を渡してインスタンス化
for name, module in self.filter_modules.items():
outputs = dict([(output_name, carrier) for output_name, carrier in self.output_carriers.items()
if output_name.split(".")[0] in self.input_to_output.get(name.split(".")[0])])
instance = module(name=name, kwargs=self.settings.get(name),
master=self, logger=self.logger, output_carriers=outputs)
self.modules[name] = instance
self.output_carriers[name] = instance.carrier
#入力モジュールに出力先のメッセージを渡してインスタンス化
for name, module in self.input_modules.items():
outputs = dict([(output_name, carrier) for output_name, carrier in self.output_carriers.items()
if output_name.split(".")[0] in self.input_to_output.get(name.split(".")[0])])
instance = module(name=name, kwargs=self.settings.get(name),
master=self, logger=self.logger, output_carriers=outputs)
self.modules[name] = instance
def _make_module_dict(self, dirname):
''' ディレクトリ名を引数にとって, ディレクトリ内のモジュール名と同名のクラスを全て読み込む. '''
names = __import__(dirname, fromlist=(dirname)).__all__
names.remove('__init__')
for name in names:
__import__('%s.%s' % (dirname, name), fromlist=(name))
modules = [(sys.modules.get('%s.%s' % (dirname,name)), name) for name in names]
module_dict = {}
for module, name in modules:
if self.enable_modules == [] or name in self.enable_modules: #enable_modulesが空なら常にTrue
try:
class_name = [obj_name for obj_name in dir(module) if re.match(name, obj_name, re.IGNORECASE)][0]
module_dict[name] = getattr(module, class_name)
except AttributeError:
pass
return module_dict
def _set_settings(self, settings):
self.logging = settings["LOGGING"]
self.log_settings = settings["LOG_SETTINGS"]
self.enable_modules = settings["ENABLE_MODULES"]
self.input_to_output = settings["INOUT"]
self.settings.update(settings["MODULE_SETTINGS"])
for name, dic in self.settings.items():
if "include" in dic:
for mod_name in dic["include"]:
self.settings[name][mod_name] = self.settings[mod_name]
def _load_modules(self):
self.input_modules = self._make_module_dict('lib.inputs')
self.output_modules = self._make_module_dict('lib.outputs')
self.filter_modules = self._make_module_dict('lib.filters')
if self.input_modules == {} or self.output_modules == {}:
self.log("no INPUT or OUTPUT module.", "ERROR")
self.log("Boxnya system terminate.")
sys.exit("Error : no INPUT or OUTPUT module.")
for input, outputs in self.input_to_output.items():
# INOUTで, inputに対応するoutputが[]のときは, 読み込まれたoutput全てを設定
if outputs == [] and input in self.input_modules:
self.input_to_output[input] = self.output_modules.keys() + self.filter_modules.keys()
elif outputs == [] and input in self.filter_modules:
self.input_to_output[input] = self.output_modules.keys()
# settingsにモジュールを多重化するように書いてあれば, そのモジュールをforkしておく
for name, confs in self.settings.items():
if isinstance(confs, list):
for i,conf in enumerate(confs):
if i==0: #0番目のモジュールはそのまま
self.settings[name] = conf
else:
new_name = "%s.%d" % (name, i)
self.settings[new_name] = conf
self._fork(new_name)
def _fork(self, name):
'''' モジュールをforkする. forkされた新しいモジュールの名前は, "hoge.1","hoge.2"になる. '''
original_name = name.split(".")[0]
for modules in (self.input_modules, self.filter_modules, self.output_modules):
if original_name in modules:
module = modules[original_name]
module = copy.deepcopy(module)
modules[name] = module
def run(self):
for instance in self.modules.values():
instance.start()
self.log("Boxnya module run.")
while not self.stopevent.is_set():
self.carrier.sleep()
while not self.carrier.empty():
packet = self.carrier.pickup()
if packet["type"] == "error":
self._error_handle(packet)
elif packet["type"] == "start":
self._start_module(packet["data"])
elif packet["type"] == "stop":
self._stop_module(packet["data"])
elif packet["type"] == "log":
if isinstance(packet["data"], str):
self.log(packet["data"])
def _error_handle(self, exception):
log_text = "Exception has occured in %s : %s %s" % (
exception["from"],
str(traceback.format_tb(exception["data"][2])),
str(exception["data"][1])
)
self.log(log_text, level="ERROR")
self._start_module(exception["from"])
def join(self, timeout=None, errmsg=""):
for name in self.modules.values():
self._stop_module(name)
self.log("Boxnya module terminated.")
self.log("Boxnya system terminate.")
self.logger.carrier.wake()
self.logger.stopevent.set()
self.logger.join()
self.carrier.wake()
self.stopevent.set()
super(Master, self).join(timeout)
def _start_module(self, name):
outputs = dict([(output_name, carrier) for output_name, carrier in self.output_carriers.items()
if output_name.split(".")[0] in self.input_to_output.get(name.split(".")[0],[])])
if name in self.input_modules:
instance = self.input_modules[name](name=name, kwargs=self.settings.get(name),
master=self, logger=self.logger, output_carriers=outputs)
instance.start()
self.modules[name] = instance
elif name in self.filter_modules:
instance = self.filter_modules[name](name=name, kwargs=self.settings.get(name),
master=self, logger=self.logger, output_carriers=outputs)
instance.carrier = self.output_carriers[name]
instance.carrier.clear()
instance.start()
self.modules[name] = instance
elif name in self.output_modules:
instance = self.output_modules[name](name=name, kwargs=self.settings.get(name),
master=self, logger=self.logger)
instance.carrier = self.output_carriers[name]
instance.carrier.clear()
instance.start()
self.modules[name] = instance
def _stop_module(self, name):
if name in self.modules:
instance = self.modules[name]
instance.carrier.wake()
instance.stopevent.set()
instance.join(1)
if not instance.is_alive():
self.modules.pop(name)
return True
return False |
"""
Given an integer k, we define the frequency array of a string Text as an array of length 4k, where the i-th element of
the array holds the number of times that the i-th k-mer (in the lexicographic order) appears in Text (see Figure 1.
Computing a Frequency Array
Generate the frequency array of a DNA string.
Given: A DNA string Text and an integer k.
Return: The frequency array of k-mers in Text.
"""
import math
from utilities import read_lines_from_dataset
from common import pattern_to_number
def compute_frequencies(text, k):
frequency_array = []
for index in range(int(math.pow(4, k))):
frequency_array.append(0)
for index in range((len(text) - k) + 1):
pattern = text[index:index + k]
pattern_index = pattern_to_number(pattern)
frequency_array[pattern_index] += 1
return frequency_array
if __name__ == '__main__':
lines = read_lines_from_dataset('1k')
text = lines[0]
k = int(lines[1])
result = compute_frequencies(text, k)
str_result = map(lambda x: str(x), result)
print(' '.join(str_result))
|
t=(int)(input())
for abc in range(t):
c=(int)(input())
n=(int)(input())
l=[]
a=raw_input().split(' ')
for x in a:
l.append((int)(x))
for i in range(n):
if c-l[i] in l:
try:
p=l.index(c-l[i],i+1)
if p>i:
print i+1,p+1
cc+=1
break
except:
p=l.index(c-l[i],i)
if p==i:
print i+1,p+1
break
|
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import csv
from datetime import datetime
################ SUPPORT FUNCTIONS #############
def getHTMLforResource(resource):
"""
accepts a resource as a string input, and outputs an html table. The function makes a get request to investing.com. From the response HTML, the BeautifulSoup Library selects the table (data) element.
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3"}
reg_url = f"https://www.investing.com/commodities/{resource}-historical-data"
req = Request(url=reg_url, headers=headers)
html = urlopen(req).read()
html = html.decode('utf-8')
soup = BeautifulSoup(html)
table = soup.find('table', {'class': 'genTbl closedTbl historicalTbl'})
return table
def table2Arrays(table):
""" accepts HTML table (parsed by BeautifulSoup) as an argument and parses data as an array of arrays.
Only the date and price fields are parsed
"""
headers = [field.text for field in table.find_all('th')]
data = [field.text for field in table.find_all('td')]
rows = [row[:2] for row in data2Rows(data, len(headers))]
return (headers[:2], *rows)
def data2Rows(data, fieldCount):
"""the function breaks a list into chunks of fieldCount"""
for i in range(0, len(data), fieldCount):
yield data[i:i + fieldCount]
def writeCSV(resource, dataAsArray):
with open(f'{resource}.csv', 'w', newline='') as csvfile:
dataWriter = csv.writer(csvfile, delimiter='|')
for row in dataAsArray:
dataWriter.writerow(row)
################ Execution #############
table = getHTMLforResource('gold')
rows = table2Arrays(table)
writeCSV('gold', rows)
table = getHTMLforResource('silver')
rows = table2Arrays(table)
writeCSV('silver', rows)
### stdout: date range of fetched data ###
def dateReformat(dateString):
return datetime.strptime(dateString, '%b %d, %Y').strftime('%Y-%m-%d')
lastDate = dateReformat(rows[1][0])
firstDate = dateReformat(rows[-1][0])
print(f'date fetched from {firstDate} to {lastDate}')
|
import datetime
from bin.leap import Leap
def test_leap_event():
leap = Leap("1", datetime.datetime(2021, 2, 2), "Test leap", 1)
e = leap.event()
assert len(leap.event().alarms) == 1
assert leap.event().alarms[0].trigger.days < 0
assert e.duration.days == 7
must_haves = [
"BEGIN:VEVENT",
"BEGIN:VALARM",
"TRIGGER:-PT12H",
"SUMMARY:Leap 1: Test leap",
"END:VALARM",
"DTSTART;VALUE=DATE:20210202",
"DTEND;VALUE=DATE:20210209",
"END:VEVENT",
]
for s in must_haves:
assert s in str(e)
|
import cv2,sys,numpy,random,os,math,select,time
from threading import Thread
import sqlite3,datetime,sys
camera_link = 0
camera_no = 0
def prompt() :
sys.stdout.write('\rWatchMen>> ')
sys.stdout.flush()
fn_dir='faces'
images = []
lables = []
names = {}
colours={}
id=0
for subdir in os.listdir(fn_dir):
if subdir[0]=='.':
continue
names[id]=subdir
colours[id]=(random.randrange(256),random.randrange(256),random.randrange(256))
subjectpath=os.path.join(fn_dir, subdir)
for filename in os.listdir(subjectpath):
if filename[0]=='.':
continue
path=os.path.join(subjectpath, filename)
lable=id
images.append(cv2.imread(path,0))
lables.append(int(lable))
id+=1
im_width=images[0].shape[0]
im_height=images[0].shape[1]
model = cv2.createLBPHFaceRecognizer()
model.load("training_data.xml")
conn = sqlite3.connect('Watchmen.db')
conn.execute("""CREATE TABLE IF NOT EXISTS People
(ID INTEGER PRIMARY KEY NOT NULL,
NAME TEXT NOT NULL,
CAMERA_NO TEXT NOT NULL,
LAST_SEEN_TIME TEXT NULL );""")
conn.close()
def save_person(person_id,person_name,camera):
pos = str(datetime.datetime.now()).find('.')
time_now=datetime.datetime.strptime(str(datetime.datetime.now())[:pos] , '%Y-%m-%d %H:%M:%S')
conn = sqlite3.connect('Watchmen.db')
#cur2 = conn.execute("SELECT * from PEOPLE WHERE NAME=:name",{"name":str(person_name)})
cur2 = conn.execute("SELECT Max(ID) FROM PEOPLE WHERE NAME='%s';"%(str(person_name)));
max_id = cur2.fetchone()[0]
if(max_id != None):
cur2 = conn.execute("SELECT LAST_SEEN_TIME, CAMERA_NO from PEOPLE WHERE NAME=:name and ID=:id",{"name":str(person_name),"id":int(max_id)})
need = cur2.fetchone()
last_known_time = need[0]
last_known = datetime.datetime.strptime(last_known_time , '%Y-%m-%d %H:%M:%S')
threshold = datetime.datetime.strptime("0:05:00" , '%H:%M:%S').time()
diff = str(time_now-last_known)[str(time_now-last_known).find(",")+1: ]
time_diff = datetime.datetime.strptime(diff , '%H:%M:%S').time()
if ((time_diff)>threshold):
conn.execute("INSERT INTO PEOPLE (NAME,CAMERA_NO,LAST_SEEN_TIME) VALUES (?,?,?)",(str(person_name),camera_no,str(time_now)));
conn.commit()
else:
if(int(need[1])==camera_no):
cursor = conn.execute("SELECT Max(ID) FROM PEOPLE WHERE NAME='%s';"%(str(person_name)));
max_id = cursor.fetchone()[0]
conn.execute("UPDATE PEOPLE SET LAST_SEEN_TIME=? WHERE id=?", (time_now, int(max_id)))
conn.commit()
else:
conn.execute("INSERT INTO PEOPLE (NAME,CAMERA_NO,LAST_SEEN_TIME) VALUES (?,?,?)",(str(person_name),camera_no,str(time_now)));
conn.commit()
else:
conn.execute("INSERT INTO PEOPLE (NAME,CAMERA_NO,LAST_SEEN_TIME) VALUES (?,?,?)",(str(person_name),camera_no,str(time_now)));
conn.commit()
conn.close()
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
def main(cam):
prev_x=-51
prev_y=0
p=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
z=["","","","","","",""]
prev_length=-1
prev_name="Bhai Patani"
prev_pred=0
video_capture = cv2.VideoCapture(cam)
while True:
i=0
# Capture frame-by-frame
ret, frame = video_capture.read()
frame=cv2.flip(frame,1,0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
length=len(faces)
if(length!=prev_length):
for x in xrange(0,len(p)):
p[x]=0
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
face = gray[y:y+h, x:x+w]
face_resize=cv2.resize(face,(im_width, im_height))
prediction=model.predict(face_resize)
cv2.rectangle(frame, (x,y), (x+w, y+h), colours[prediction[0]], 3)
if(p[i]==0):
z[i]=names[prediction[0]]
cv2.putText(frame, '%s %.0f'%(z[i], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, colours[prediction[0]])
save_person(prediction[0],names[prediction[0]],cam)
p[i]=p[i]+1
i=i+1
prev_length=length
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
main(camera_link)
|
from collections import defaultdict
def solution(words, queries):
word_map = Trie('')
reversed_word_map = Trie('')
for word in words:
word_map.insert(word)
reversed_word_map.insert(word[::-1])
result = list()
for query in queries:
if query.endswith('?'):
result.append(word_map.search(query))
else:
result.append(reversed_word_map.search(query[::-1]))
return result
class Trie(object):
def __init__(self, value):
self.value = value
self.children = {}
self.children_count = defaultdict(int)
def insert(self, word):
current = self
for i in range(len(word)):
length = len(word[i:])
current.children_count[length] += 1
if current.children.get(word[i]):
current = current.children[word[i]]
else:
new_node = Trie(word[i])
current.children[word[i]] = new_node
current = new_node
def search(self, word):
current = self
for i in range(len(word)):
if word[i] == '?':
return current.children_count[len(word[i:])]
else:
child = current.children.get(word[i])
if not child:
return 0
current = child
if __name__ == '__main__':
x = solution(["frodo", "front", "frost", "frozen", "frame", "kakao"], ["fro??", "????o", "fr???", "fro???", "pro?"])
print(x)
|
#!/usr/bin/env python
"""
Examples:
%s
%s -i /Network/Data/250k/db/dataset/call_method_32.tsv -o /tmp/call_32.vcf
Description:
2013.03
"""
import sys, os, math
__doc__ = __doc__%(sys.argv[0], sys.argv[0])
sys.path.insert(0, os.path.expanduser('~/lib/python'))
sys.path.insert(0, os.path.join(os.path.expanduser('~/script')))
from pymodule import ProcessOptions
from pymodule import SNPData, VCFFile
from pymodule import utils
from pymodule.yhio.SNP import number2di_nt
from pymodule.pegasus.mapper.AbstractMapper import AbstractMapper
class ConvertYuSNPFormat2VCF(AbstractMapper):
__doc__ = __doc__
option_default_dict = AbstractMapper.option_default_dict.copy()
option_default_dict.update({
('min_MAF', 0, float): [None, 'n', 1, 'minimum minor allele frequency', ],\
})
def __init__(self, **keywords):
"""
"""
AbstractMapper.__init__(self, **keywords)
def run(self):
"""
"""
if self.debug:
import pdb
pdb.set_trace()
snpData = SNPData(input_fname=self.inputFname, turn_into_array=1, ignore_2nd_column=1)
snpData = SNPData.removeMonomorphicCols(snpData, NA_set=set([]))
if self.min_MAF and self.min_MAF>0:
snpData = SNPData.removeColsByMAF(snpData,min_MAF=self.min_MAF, NA_set=set([]))
self.writer = VCFFile(outputFname=self.outputFname, openMode='w')
self.writer.makeupHeaderFromSampleIDList(sampleIDList=snpData.row_id_ls)
self.writer.writeMetaAndHeader()
counter = 0
for j in xrange(len(snpData.col_id_ls)):
snp_id = snpData.col_id_ls[j]
chromosome, start = snp_id.split('_')[:2]
genotype_ls = snpData.data_matrix[:,j]
genotype_ls = utils.dict_map(number2di_nt, genotype_ls)
genotype_ls_vcf = []
alleleNucleotide2Number = {}
alleleNumber2Nucleotide = {}
for genotype in genotype_ls:
if genotype=='NA':
genotype_ls_vcf.append("./.")
elif len(genotype)==2:
for allele in genotype:
if allele not in alleleNucleotide2Number:
alleleNumber = len(alleleNucleotide2Number)
alleleNucleotide2Number[allele] = alleleNumber
alleleNumber2Nucleotide[alleleNumber] = allele
genotype_ls_vcf.append("%s/%s"%(alleleNucleotide2Number[genotype[0]], alleleNucleotide2Number[genotype[1]]))
else:
genotype_ls_vcf.append("./.")
refAllele = alleleNumber2Nucleotide[0]
if 1 not in alleleNumber2Nucleotide:
altAllele = refAllele
else:
altAllele = alleleNumber2Nucleotide[1]
row=[chromosome, start, ".", refAllele, altAllele, 999, 'PASS', "DP=100", "GT"] + genotype_ls_vcf
self.writer.writerow(row)
counter += 1
sys.stderr.write(" %s records.\n"%(counter))
self.writer.close()
if __name__ == '__main__':
main_class = ConvertYuSNPFormat2VCF
po = ProcessOptions(sys.argv, main_class.option_default_dict, error_doc=main_class.__doc__)
instance = main_class(**po.long_option2value)
instance.run() |
import numpy as np
from dataloader import get_data
from models import Net
import tensorflow as tf
from tensorflow.keras.utils import Progbar
import pickle
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
BATCH_SIZE = 16
EPOCHS = 10
MODEL_NAME = 'fcmodel'
X_train, X_val, y_train, y_val = get_data()
print(X_train.shape, X_val.shape, y_train.shape, y_val.shape)
train_set = tf.data.Dataset.from_tensor_slices((X_train, y_train))
val_set = tf.data.Dataset.from_tensor_slices((X_val, y_val))
train_set_batch = train_set.batch(BATCH_SIZE)
val_set_batch = val_set.batch(BATCH_SIZE)
num_batches = len(X_train) // BATCH_SIZE
net = Net()
optim = tf.keras.optimizers.Adam(lr=1e-5)
loss = tf.keras.losses.sparse_categorical_crossentropy
@tf.function
def train_step(batch):
batch_x, batch_y = batch
with tf.GradientTape() as tape:
batch_pred = net(batch_x)
loss_output = loss(batch_y, batch_pred)
loss_output = tf.reduce_mean(loss_output)
grad = tape.gradient(loss_output, net.trainable_variables)
optim.apply_gradients(zip(grad, net.trainable_variables))
return loss_output
@tf.function
def val_step(batch):
batch_x, batch_y = batch
batch_pred = net(batch_x)
loss_output = loss(batch_y, batch_pred)
loss_output = tf.reduce_mean(loss_output)
return loss_output
train_loss_list = []
val_loss_list = []
# Initialising the loss file
with open('weights/losses.txt', 'w') as file:
pass
# Training
for epoch in range(EPOCHS):
idx = 0
prog = Progbar(num_batches)
train_loss =[]
for batch in train_set_batch:
loss_output = train_step(batch)
prog.update(idx, [('loss', loss_output.numpy()), ('epoch', epoch+1)])
train_loss.append(loss_output.numpy())
idx += 1
val_loss = []
for batch in val_set_batch:
val_loss.append(val_step(batch).numpy())
prog.update(idx, [('val_loss', np.mean(val_loss))])
train_loss_list.append(np.mean(train_loss))
val_loss_list.append(np.mean(val_loss))
with open('weights/losses.txt', 'a') as file:
file.write('{0:.6f}\t{1:.6f}\n'.format(np.mean(train_loss), np.mean(val_loss)))
net.save_weights('weights/{0:s}.{1:d}.h5'.format(MODEL_NAME, epoch))
|
import datetime
import uuid
from flask import session
from models.database import Database
def retblogs():
blog_data = []
coll=Database[blogs]
for i in coll:
blog_data.append(i)
return blog_data
|
from sklearn import linear_model
from sklearn import datasets
from sklearn.preprocessing import PolynomialFeatures
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
import numpy as np
import pandas as pd
import csv
import random
import sys
import math
import pickle
from sklearn.externals import joblib
'''
generate model from synthetic data
'''
'''
csv data
training_data = sys.argv[1]
with open('training_data_' + training_data + '_y.csv', 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ')
train_data_Y = list(csvreader)
train_data_Y = map(lambda x: float(x[0]), train_data_Y)
with open('training_data_' + training_data + '_x.csv', 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ')
train_data_X = list(csvreader)
train_data_X = map(lambda x: map(lambda y: float(y), x)[0:6], train_data_X)
# train_data_X = ap(lambda x: [0, 0, math.log(x[2]), 0, math.log(x[5])], train_data_X)
# train_data_X = map(lambda x: [x[0], x[1], max(0.0000000001, x[2]), x[3],x[4], max(0.0000000001, x[5])], train_data_X)
# train_data_X = map(lambda x: [x[0], x[1], math.log(x[2]), x[3],x[4], x[5]], train_data_X)
# train_data_X = map(lambda x: [ math.log(x[2])], train_data_X)
'''
# fields: scale, domain_size, error, data_range, std_dev, uniform_distance, epsilon
# 0 1 2 3 4 5 6
algs = ["HB", "AHP", "DPCube", "DAWA"]
algs = ["Privelet"]
features = ["scale", "domain_size", "error", "data_range", "std_dev", "uniform_distance"]
for alg in algs:
data = np.load("/home/famien/Code/pipe/"+alg+"_data_6_new.npy")
'''
data = np.load("/home/famien/Code/pipe/"+alg+"_data_6.npy")
data = np.load("/home/ubuntu/Code/dpcomp_core/"+alg+"_results_1-5.npy")
'''
'''
split into train and test data
'''
train = []
test = []
for i in range(len(data)):
if random.random() >= .5:
train.append(i)
else:
test.append(i)
train_X = []
train_y = []
test_X = []
test_y = []
all_train_data = []
all_test_data = []
print("total len: ", len(data))
for index in train:
train_X.append(data[index][0:6])
train_y.append(data[index][6])
all_train_data.append(data[index])
for index in test:
test_X.append(data[index][0:6])
test_y.append(data[index][6])
all_test_data.append(data[index])
np.save( "/home/famien/Code/pipe/"+alg+"_data_6_train.npy", all_train_data)
np.save("/home/famien/Code/pipe/"+alg+"_data_6_test.npy", all_test_data)
# train_X = map(lambda x: [x[0], x[1], max(0.0000000001, x[2]), x[3],x[4], max(0.0000000001, x[5])], train_X)
# test_X = map(lambda x: [x[0], x[1], max(0.0000000001, x[2]), x[3],x[4], max(0.0000000001, x[5])], test_X)
print("train len: ", len(train_X))
X_ = train_X
y = train_y
regr = RandomForestRegressor(oob_score = True, max_depth=12)
#regr = DecisionTreeRegressor(random_state=0)
regr.fit(X_,y)
#models = pickle.loads("models.pickle")
# try:
# models = joblib.load("models_6.pkl")
# except IOError:
# models = {}
models = {}
models[alg] = regr
joblib.dump(models, "models_6.pkl")
#print "accuracy: ", regr.score(test_X,test_y)
print("Alg: ", alg)
#print "\tvar explained: ", r2_score(test_y, epsilon_predict)
print("out of bag: ", regr.oob_score_)
print(sorted(zip(map(lambda x: float("{0:.2f}".format(round(x, 4))), regr.feature_importances_), features),
reverse=True))
epsilon_predict_test = regr.predict(test_X)
epsilon_predict_train = regr.predict(train_X)
print ("mean squared error train: ", mean_squared_error(train_y, epsilon_predict_train))
print ("train average, median", sum(epsilon_predict_train)/len(epsilon_predict_train), sorted(epsilon_predict_train)[int(len(epsilon_predict_train)/2)])
print ("mean squared error test: ", mean_squared_error(test_y, epsilon_predict_test))
print ("train average, median", sum(epsilon_predict_test)/len(epsilon_predict_test), sorted(epsilon_predict_test)[int(len(epsilon_predict_test)/2)])
|
"""
Corrects unjustified track breakages from third party storm identification and tracking algorithms
Best Track: Real Time (BTRT) is a Python package designed to read in the output of a
third-party storm identification and tracking algorithm (i.e. WDSS-II segmotion; ProbSevere)
and improve upon that algorithm’s tracking by correcting unjustifiable breaks in an object’s
track. BTRT is loosely modeled after the WDSS-II besttrack algorithm, but modified to support
real-time processing of an operational model’s output.
Author : David Harrison
Date : April 2017
"""
from btengine import btengine
import json
import geojson
import sys
import os
import numpy as np
import datetime
from shapely.geometry.polygon import Polygon
from mpl_toolkits.basemap import Basemap
import scipy.stats.mstats as stats
from bs4 import BeautifulSoup
# Mapping constants
MIN_LAT = 20
MAX_LAT = 51
MIN_LON = -119
MAX_LON = -62
# Function definition
# Adds support for versions < Python 2.7
def total_seconds(timedelta):
return((timedelta.microseconds + 0.0 + (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
#==================================================================================================================#
# #
# Read Files #
# #
#==================================================================================================================#
# Handle the old .ascii files from ProbSevere
def readProbSevereAscii(inDir, inSuffix, historyPath, startTime, endTime):
"""
Parses probSevere .ascii files
Parameters
----------
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
historyPath: string
The full file path (including extension) of the history json file
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
Dictionary
newCells - Dictionary containing all cells added by the most current file
Dictionary
stormCells - Dictionary of storm cells from the history file or past ascii files
int
totNumCells - Number of cells in the stormCells dictionary
"""
numFiles = 0
totNumCells = 0
stormCells = {}
newCells = {}
# Try to load the history file
try:
print 'Loading storm history...'
f = open(historyPath)
stormCells = json.load(f)
f.close
# Remove cells older than start time
oldCells = []
for cell in stormCells:
stormCells[cell]['time'] = datetime.datetime.strptime(stormCells[cell]['time'], '%Y%m%d_%H%M%S')
if stormCells[cell]['time'] < startTime: oldCells.append(cell)
for cell in oldCells:
stormCells.pop(cell, None)
print 'Number of old cells removed from history: ' + str(len(oldCells))
if len(stormCells) > 0:
startTime = endTime
totNumCells = max([int(key) for key in stormCells.keys()]) + 1
if totNumCells >= 1e7:
totNumCells = 0
print 'Sucessfully loaded history file. Loading most recent data...'
else:
print 'No recent storms in history.'
print 'Loading ascii files from ' + str(startTime) + ' to ' + str(endTime) + '...'
# If no history file, make one
except IOError, ValueError:
print 'Unable to find storm history file at ' + historyPath + '.'
print 'Loading ascii files from ' + str(startTime) + ' to ' + str(endTime) + '...'
# Read in ProbSevere files
for root, dirs, files in os.walk(inDir):
if inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue
for asciiFile in files:
if asciiFile.endswith('.ascii'):
# Skip hidden files
if asciiFile.startswith('._'): continue
# Check if file falls in date range
try:
date = str(asciiFile).split('.')[0].split('_')[3]
time = str(asciiFile).split('.')[0].split('_')[4]
fileDate = datetime.datetime.strptime(date + '_' + time, '%Y%m%d_%H%M%S')
except ValueError:
print 'File ' + str(asciiFile) + ' has an invalid name. Expected format SSEC_AWIPS_PROBSEVERE_YYYYMMDD_hhmmss.ascii...'
continue
if not startTime <= fileDate <= endTime:
continue
# Open file
f = open(root + '/' + asciiFile)
lines = f.readlines()
f.close()
print 'Reading ' + asciiFile
numFiles += 1
for line in lines:
if line.startswith('Valid:'): continue
data = str(line).split(':')
lats = map(float, data[7].split(',')[0::2])
lons = map(float, data[7].split(',')[1::2])
track = data[8]
prob = int(data[1])
meast = data[9]
msouth = data[10]
latr = (max(lats) - min(lats)) / 2.
lonr = abs(max(lons) - min(lons)) / 2.
# Calculate centroid
points = []
for i in range(0, len(lats)):
points.append((lons[i], lats[i]))
poly = Polygon(points)
if not poly.is_valid:
coords = np.array([float(x) for x in data[7].rstrip().split(',')])
coords = coords[::-1]
coords.shape = (len(coords)/2,2)
polyCoords = [coords.tolist()]
polyCoords = [[tuple(val) for val in elem] for elem in polyCoords]
polyCoordsCorrected = []
for coord in polyCoords[0]:
if coord not in polyCoordsCorrected:
polyCoordsCorrected.append(coord)
poly = Polygon(polyCoordsCorrected)
if not poly.is_valid:
poly = poly.convex_hull
lon = poly.centroid.x
lat = poly.centroid.y
cellID = totNumCells
if fileDate == endTime:
newCells[cellID] = {'prob': prob, 'time': fileDate, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon, 'meast': meast,
'msouth': msouth, 'orientation': 'NaN', 'track': track, 'shape_x': lons, 'shape_y': lats, 'ascii': data, 'oldtrack':track}
else:
stormCells[cellID] = {'prob': prob, 'time': fileDate, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon, 'meast': meast,
'msouth': msouth, 'orientation': 'NaN', 'track': track, 'shape_x': lons, 'shape_y': lats, 'ascii': data, 'oldtrack':track}
totNumCells += 1
print '\nSuccesfully loaded ' + str(numFiles) + ' ascii files.'
print 'Number of new cells: ' + str(len(newCells))
return newCells, stormCells, totNumCells
# Handle the new json files from ProbSevere
def readProbSevereJson(inDir, inSuffix, historyPath, startTime, endTime):
"""
Parses raw probSevere .json files
Parameters
----------
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
historyPath: string
The full file path (including extension) of the history json file
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
Dictionary
newCells - Dictionary containing all cells added by the most current file
Dictionary
stormCells - Dictionary of storm cells from the history file or past ascii files
int
totNumCells - Number of cells in the stormCells dictionary
"""
numFiles = 0
totNumCells = 0
stormCells = {}
newCells = {}
# Try to load the history file
try:
print 'Loading storm history...'
f = open(historyPath)
stormCells = json.load(f)
f.close
# Remove cells older than start time
oldCells = []
for cell in stormCells:
stormCells[cell]['time'] = datetime.datetime.strptime(stormCells[cell]['time'], '%Y%m%d_%H%M%S')
if stormCells[cell]['time'] < startTime: oldCells.append(cell)
for cell in oldCells:
stormCells.pop(cell, None)
print 'Number of old cells removed from history: ' + str(len(oldCells))
if len(stormCells) > 0:
startTime = endTime
totNumCells = max([int(key) for key in stormCells.keys()]) + 1
if totNumCells >= 1e7:
totNumCells = 0
print 'Sucessfully loaded history file. Loading most recent data...'
else:
print 'No recent storms in history.'
print 'Loading JSON files from ' + str(startTime) + ' to ' + str(endTime) + '...'
# If no history file, make one
except IOError, ValueError:
print 'Unable to find storm history file at ' + historyPath + '.'
print 'Loading ascii files from ' + str(startTime) + ' to ' + str(endTime) + '...'
# Read in ProbSevere files
for root, dirs, files in os.walk(inDir):
if inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue
for jsonFile in files:
if jsonFile.endswith('.json'):
# Skip hidden files
if jsonFile.startswith('._'): continue
# Check if file falls in date range
try:
date = str(jsonFile).split('.')[0].split('_')[3]
time = str(jsonFile).split('.')[0].split('_')[4]
fileDate = datetime.datetime.strptime(date + '_' + time, '%Y%m%d_%H%M%S')
except ValueError:
print 'File ' + str(asciiFile) + ' has an invalid name. Expected format SSEC_AWIPS_PROBSEVERE_YYYYMMDD_hhmmss.json...'
continue
if not startTime <= fileDate <= endTime:
continue
# Open file
f = open(root + '/' + jsonFile)
jFile = json.load(f)
f.close()
print 'Reading ' + jsonFile
numFiles += 1
index = 0
for feature in jFile['features']:
lats = [point[1] for point in feature['geometry']['coordinates'][0]]
lons = [point[0] for point in feature['geometry']['coordinates'][0]]
track = int(feature['properties']['ID'])
prob = int(feature['properties']['PROB'])
meast = feature['properties']['MOTION_EAST']
msouth = feature['properties']['MOTION_SOUTH']
latr = (max(lats) - min(lats)) / 2.
lonr = abs(max(lons) - min(lons)) / 2.
# Calculate centroid
points = []
for i in range(0, len(lats)):
points.append((lons[i], lats[i]))
poly = Polygon(points)
# attempt to fix invalid topologies
if not poly.is_valid:
polyCoordsCorrected = []
for coord in feature['geometry']['coordinates'][0]:
if coord not in polyCoordsCorrected:
polyCoordsCorrected.append(coord)
poly = Polygon(polyCoordsCorrected)
# last ditch attempt *puts face in hands*
if not poly.is_valid:
poly = poly.convex_hull
lon = poly.centroid.x
lat = poly.centroid.y
cellID = totNumCells
if fileDate == endTime:
newCells[cellID] = {'prob': prob, 'time': fileDate, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon, 'meast': meast,
'msouth': msouth, 'orientation': 'NaN', 'track': track, 'shape_x': lons, 'shape_y': lats, 'index': index, 'oldtrack': track}
else:
stormCells[cellID] = {'prob': prob, 'time': fileDate, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon, 'meast':meast,
'msouth': msouth, 'orientation': 'NaN', 'track': track, 'shape_x': lons, 'shape_y': lats, 'index': index, 'oldtrack':track}
totNumCells += 1
index += 1
print '\nSuccesfully loaded ' + str(numFiles) + ' json files.'
print 'Number of new cells: ' + str(len(newCells))
return newCells, stormCells, totNumCells, jFile
# Handle the xml files from segmotion
def readSegmotionXML(inDir, inSuffix, historyPath, startTime, endTime):
"""
Parses raw segmotion .xml files
Parameters
----------
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
historyPath: string
The full file path (including extension) of the history json file
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
Dictionary
newCells - Dictionary containing all cells added by the most current file
Dictionary
stormCells - Dictionary of storm cells from the history file or past ascii files
int
totNumCells - Number of cells in the stormCells dictionary
"""
numFiles = 0
totNumCells = 0
stormCells = {}
newCells = {}
# Try to load the history file
try:
print 'Loading storm history...'
f = open(historyPath)
stormCells = json.load(f)
f.close
# Remove cells older than start time
oldCells = []
for cell in stormCells:
stormCells[cell]['time'] = datetime.datetime.strptime(stormCells[cell]['time'], '%Y%m%d_%H%M%S')
stormCells[cell]['start'] = datetime.datetime.strptime(stormCells[cell]['start'], '%Y%m%d_%H%M%S')
if stormCells[cell]['time'] < startTime: oldCells.append(cell)
for cell in oldCells:
stormCells.pop(cell, None)
print 'Number of old cells removed from history: ' + str(len(oldCells))
if len(stormCells) > 0:
startTime = endTime
totNumCells = max([int(key) for key in stormCells.keys()]) + 1
if totNumCells >= 1e7:
totNumCells = 0
print 'Sucessfully loaded history file. Loading most recent data...'
else:
print 'No recent storms in history.'
print 'Loading XML files from ' + str(startTime) + ' to ' + str(endTime) + '...'
# If no history file, make one
except IOError, ValueError:
print 'Unable to find storm history file at ' + historyPath + '.'
print 'Loading XML files from ' + str(startTime) + ' to ' + str(endTime) + '...'
# Read in ProbSevere files
currentFile = None
for root, dirs, files in os.walk(inDir):
if inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue
for xmlFile in files:
if xmlFile.endswith('.xml'):
# Skip hidden files
if xmlFile.startswith('._'): continue
# Check if file falls in date range
try:
fileDate = datetime.datetime.strptime(str(xmlFile).split('.')[0], '%Y%m%d-%H%M%S')
except ValueError:
print 'File ' + str(xmlFile) + ' has an invalid name. Expected format YYYYMMDD-hhmmss.xml...'
continue
if not startTime <= fileDate <= endTime:
continue
if fileDate == endTime: currentFile = xmlFile
# Open file
f = open(root + '/' + xmlFile)
xFile = xmlFile
lines = BeautifulSoup(f, 'html.parser').find_all('datacolumn')
f.close()
print 'Reading ' + xmlFile
numFiles += 1
numCells = len(lines[2].find_all('item'))
f = open(root + '/' + xmlFile)
lines = BeautifulSoup(f, 'html.parser').find_all('data')
lines = str(lines[0]).split('</datacolumn>')
f.close()
for i in range(0, numCells):
data = []
for line in lines:
try:
name = line.split('"')[1]
units = line.split('"')[3]
value = line.split('item value')[i+1].split('"')[1]
data.append((name, units, value))
except IndexError:
continue
if name == 'LatRadius': latr = float(value)
elif name == 'Latitude': lat = float(value)
elif name == 'LonRadius': lonr = float(value)
elif name == 'Longitude': lon = float(value)
elif name == 'MotionEast': meast = float(value)
elif name == 'MotionSouth': msouth = float(value)
elif name == 'Orientation': orientation = float(value)
elif name == 'RowName': track = int(value)
elif name == 'Age': age = float(value)
elif name == 'StartTime': start = datetime.datetime.strptime(value, '%Y%m%d-%H%M%S')
data.append(('OldTrack', 'dimensionless', str(track)))
cellID = totNumCells
if fileDate == endTime:
newCells[cellID] = {'prob': 0, 'time': fileDate, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon, 'meast': meast, 'age': age, 'start': start,
'msouth': msouth, 'orientation': 'NaN', 'track': track, 'shape_x': [], 'shape_y': [], 'xml': data, 'oldtrack':track}
else:
stormCells[cellID] = {'prob': 0, 'time': fileDate, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon, 'meast': meast, 'age': age, 'start': start,
'msouth': msouth, 'orientation': orientation, 'track': track, 'shape_x': [], 'shape_y': [], 'xml': data, 'oldtrack':track}
totNumCells += 1
print '\nSuccesfully loaded ' + str(numFiles) + ' xml files.'
print 'Number of new cells: ' + str(len(newCells))
return newCells, stormCells, totNumCells, root + '/' + currentFile
#==================================================================================================================#
# #
# Compare Tracks #
# #
#==================================================================================================================#
def compareTracks(newCells, stormTracks, bufferTime, bufferDist, distanceRatio, existingObjects, modifiedTracksHistory, m, bt):
"""
Function to match new cells with existing tracks
All new cells are compared to each track and
added to the most appropriate one based on distance and time
Parameters
----------
newCells : Dictionary
Dictionary of stormCells that don't match an existing track
stormTracks : Dictionary
Full stormTracks dictionary containing information about the current
tracks and the cells contained within them
bufferTime : int
The time threshold to use when associated cells with a track
bufferDist : int
The distance threshold to use when associated cells with a track
distanceRatio : float
The ratio between x-y distances and lat-lon distances
existingOjbects : list
List of new object IDs that are already matched to a track
modifiedTracksHistory : Dictionary
Dictionary of previously modified tracks where the key is the original
object ID and the value is the modified value
m : Basemap
Current map projection
bt : btengine
The active btengine
Returns
-------
Dictionary
newCells dictionary containing the modified track values
"""
reservedTracks = []
changedCells = []
qlcsObjects = {}
counter = 0
qlcsTest = 0
for cell in newCells:
# Save cell original track (even if it doesn't change) for visualization
newCells[cell]['oldtrack'] = newCells[cell]['track']
# Skip new cells that already have a track
if cell in existingObjects:
# Save cell original track (even if it doesn't change) for visualization
reservedTracks.append(newCells[cell]['track'])
continue
cellTime = newCells[cell]['time']
cellX = newCells[cell]['x']
cellY = newCells[cell]['y']
# Calculate distances
squallRange = []
minDist = 1e9
minTrack = newCells[cell]['track']
for track in stormTracks:
# Make sure two storms don't have the same ID
if track in reservedTracks: continue
# Only compare to tracks in temporal range
if not (stormTracks[track]['tend'] < cellTime <= stormTracks[track]['tend'] + datetime.timedelta(minutes = bufferTime)):
continue
# Avoid having two cells in the same track at the same time
# The previous if statement should catch this, but explicitly state it here to be sure
elif stormTracks[track]['tend'] == cellTime: continue
if stormTracks[track]['u'] == 'NaN':
xPoint = stormTracks[track]['xf']
yPoint = stormTracks[track]['yf']
else:
xPoint = stormTracks[track]['xf'] + (stormTracks[track]['u'] * (total_seconds(cellTime - stormTracks[track]['tend'])))
yPoint = stormTracks[track]['yf'] + (stormTracks[track]['v'] * (total_seconds(cellTime - stormTracks[track]['tend'])))
dist = np.sqrt((cellX - xPoint) ** 2 + (cellY - yPoint) ** 2)
dist = dist * distanceRatio # Convert from x,y to km
if dist < minDist:
minDist = dist
minTrack = track
# Get objects in squall line range here to save a step later
# TODO: Make this range a user setting
if dist < 150:
squallRange.append(track)
# If a match is found, replace the original track with the new one and move on
if minDist <= bufferDist:
if minTrack != newCells[cell]['track'] and minTrack not in reservedTracks:
changedCells.append(cell)
reservedTracks.append(minTrack)
newCells[cell]['track'] = minTrack
stormTracks[minTrack]['cells'].append(newCells[cell])
stormTracks[minTrack] = bt.theil_sen_single(stormTracks[minTrack])
if counter % 10 == 0:
print '......' + str(counter) + ' of ' + str(len(newCells) - len(existingObjects)) + ' assigned......'
counter += 1
continue
# If no matches found with the conventional algorithm, try the QLCS algorithm
if len(newCells[cell]['shape_y']) > 0:
currentlats = map(float, newCells[cell]['shape_y'])
currentlons = map(float, newCells[cell]['shape_x'])
currentObj = Polygon([(currentlons[i], currentlats[i]) for i in range(len(currentlats))])
currentProb = float(newCells[cell]['prob'])
for track in squallRange:
# Make sure two storms don't have the same ID
if track in reservedTracks: continue
# Get last cell in track
times = {}
for trackCell in stormTracks[track]['cells']:
times[trackCell['time'].timetuple()] = trackCell
lastCell = times[max(times.keys())]
u_vel = (float(lastCell['meast']) / 1000.) / distanceRatio
v_vel = (-float(lastCell['msouth']) / 1000.) / distanceRatio
# Apply a 5 km buffer around the cell and interpolate to the current time
oldlats = map(float, lastCell['shape_y'])
oldlons = map(float, lastCell['shape_x'])
interpPoints = []
# Calculate Centroid
points = [m(oldlons[i], oldlats[i]) for i in range(len(oldlats))]
poly = Polygon(points)
if not poly.is_valid:
polyCoords = [points]
polyCoords = [[tuple(val) for val in elem] for elem in polyCoords]
polyCoordsCorrected = []
for coord in polyCoords[0]:
if coord not in polyCoordsCorrected:
polyCoordsCorrected.append(coord)
poly = Polygon(polyCoordsCorrected)
if not poly.is_valid:
poly = poly.convex_hull
x0 = poly.centroid.x
y0 = poly.centroid.y
# Calculate Buffer Points
for point in points:
x,y = point
dx = x - x0
dy = y - y0
theta = np.arctan(abs(dy)/float(abs(dx)))
bufferSize = 5. / distanceRatio
dx2 = abs(bufferSize * np.cos(theta))
dy2 = abs(bufferSize * np.sin(theta))
if dx < 0: xb = x - dx2
else: xb = x + dx2
if dy < 0: yb = y - dy2
else: yb = y + dy2
# After the buffer is applied, move the object downstream
xpoint = xb + u_vel * (total_seconds(cellTime - lastCell['time']))
ypoint = yb + v_vel * (total_seconds(cellTime - lastCell['time']))
interpPoints.append((m(xpoint, ypoint, inverse = True)[0], m(xpoint, ypoint, inverse = True)[1]))
interpObj = Polygon(interpPoints)
# Compare cell to interpolated object
# Case for splitting QLCS objects
if interpObj.contains(currentObj.centroid):
if track not in reservedTracks:
if track not in qlcsObjects:
qlcsObjects[track] = {cell: currentProb}
else:
qlcsObjects[track][cell] = currentProb
continue
# Case for merging QLCS objects
elif currentObj.contains(interpObj.centroid):
if track not in reservedTracks:
if track not in qlcsObjects:
qlcsObjects[track] = {cell: currentProb}
else:
qlcsObjects[track][cell] = currentProb
continue
# Do a final check to see if the object ID has been modified in previous runs
if newCells[cell]['track'] in modifiedTracksHistory.keys():
newCells[cell]['track'] = modifiedTracksHistory[newCells[cell]['track']]
reservedTracks.append(newCells[cell]['track'])
print '------------History correction----------'
if counter % 10 == 0:
print '......' + str(counter) + ' of ' + str(len(newCells) - len(existingObjects)) + ' assigned......'
counter += 1
# Prefer cell with the higher probability for merges and splits
print 'Handling merges and splits...'
for track in qlcsObjects:
maxProb = -1
preferredCell = -999
for cell in qlcsObjects[track]:
if qlcsObjects[track][cell] > maxProb:
maxProb = qlcsObjects[track][cell]
preferredCell = cell
if track != newCells[preferredCell]['track'] and track not in reservedTracks:
changedCells.append(preferredCell)
reservedTracks.append(track)
newCells[preferredCell]['track'] = track
qlcsTest += 1
print 'QLCS algoritm applied: ' + str(qlcsTest)
return newCells, changedCells
#==================================================================================================================#
# #
# Output #
# #
#==================================================================================================================#
# Handle old ascii input from ProbSevere
def outputAscii(currentTime, newCells, stormCells, changedCells, outDir, historyPath):
"""
Creates a new ascii and json file with the updated track information
Parameters
----------
currentTime : datetime
The date/time of the most current ascii file
newCells : Dictionary
Dictionary of the most current cells
stormCells : Dictionary
Dictionary containing all storm cells to be saved
changedCells : List
List of track IDs that were changed
outDir : string
Filepath where the output files will be saved
historyPath : string
The full file path (including extension) of the history json file
"""
# Save new ascii file
#filename = 'SSEC_AWIPS_CONVECTPROB_' + currentTime.strftime('%Y%m%d_%H%M%S') + '.ascii'
filename = 'SSEC_AWIPS_PROBSEVERE_' + currentTime.strftime('%Y%m%d_%H%M%S') + '.ascii'
print '\nSaving the most recent ascii file: ' + filename
f = open(outDir + filename, 'w')
f.write('Valid: ' + currentTime.strftime('%Y%m%d_%H%M%S') + '\n')
for cell in newCells:
newCells[cell]['ascii'][8] = str(newCells[cell]['track'])
newCells[cell]['ascii'][-1] = newCells[cell]['ascii'][-1].rstrip()
newCells[cell]['ascii'].append(str(newCells[cell]['oldtrack']) + '\n')
f.write(':'.join(newCells[cell]['ascii']))
f.close()
# Save new history file
print 'Saving the new history file...'
for cell in stormCells:
stormCells[cell]['time'] = stormCells[cell]['time'].strftime('%Y%m%d_%H%M%S')
with open(historyPath, 'w') as outfile:
json.dump(stormCells, outfile, sort_keys = True, indent=1)
outfile.close()
# Handle new json input from ProbSevere
def outputJson(currentTime, newCells, stormCells, changedCells, outDir, historyPath, jFile):
"""
Creates a new json and history json file with the updated track information
Parameters
----------
currentTime : datetime
The date/time of the most current ascii file
newCells : Dictionary
Dictionary of the most current cells
stormCells : Dictionary
Dictionary containing all storm cells to be saved
changedCells : List
List of track IDs that were changed
outDir : string
Filepath where the output files will be saved
historyPath : string
The full file path (including extension) of the history json file
jFile : Dictionary
The json dictionary from the original active json file
"""
# Save new json file
filename = 'SSEC_AWIPS_PROBSEVERE_' + currentTime.strftime('%Y%m%d_%H%M%S') + '.json'
print '\nSaving the most recent json file: ' + filename
for cell in newCells:
index = newCells[cell]['index']
jFile['features'][index]['properties']['ID'] = newCells[cell]['track']
jFile['features'][index]['properties']['besttrack'] = newCells[cell]['oldtrack']
jFile['features'][index]['geometry']['coordinates'] = json.JSONEncoder().encode(jFile['features'][index]['geometry']['coordinates'])
s = json.dumps(jFile, sort_keys = False, indent = 1).encode('ascii')
# Fix string formatting
with open(outDir + filename, 'w') as outfile:
for line in s.split('\n'):
if 'coordinates' in line:
tmp = line.split(':')
tmp[1] = tmp[1].replace('"', '')
line = tmp[0] + ':' + tmp[1]
outfile.write(line + '\n')
outfile.close()
# Save new history file
print 'Saving the new history file...'
for cell in stormCells:
stormCells[cell]['time'] = stormCells[cell]['time'].strftime('%Y%m%d_%H%M%S')
with open(historyPath, 'w') as outfile:
json.dump(stormCells, outfile, sort_keys = True)
outfile.close()
# Handle XML input from segmotion
def outputXML(currentTime, newCells, stormCells, changedCells, outDir, historyPath, xmlFile):
"""
Creates a new xml and json file with the updated track information
Parameters
----------
currentTime : datetime
The date/time of the most current xml file
newCells : Dictionary
Dictionary of the most current cells
stormCells : Dictionary
Dictionary containing all storm cells to be saved
changedCells : List
List of track IDs that were changed
outDir : string
Filepath where the output files will be saved
historyPath : string
The full file path (including extension) of the history json file
xmlFile : File
The current xml file being processed to use as a template
"""
# Save new xml file
filename = currentTime.strftime('%Y%m%d-%H%M%S') + '.xml'
print '\nSaving the most recent xml file: ' + filename
xfile = open(xmlFile)
lines = xfile.readlines()
xfile.close()
f = open(outDir + filename, 'w')
# Don't bother if there are no objects in the file
if len(newCells.keys()) < 1:
for line in lines:
f.write(line)
f.close()
print 'Saving the new history file...'
for cell in stormCells:
stormCells[cell]['time'] = stormCells[cell]['time'].strftime('%Y%m%d_%H%M%S')
with open(historyPath, 'w') as outfile:
json.dump(stormCells, outfile, sort_keys = True, indent=1)
outfile.close()
return
# Otherwise write out the header info
for line in lines:
if not line.strip().startswith('<data>'):
f.write(line)
else:
break
f.write(' <data>\n')
names = [column[0] for column in newCells[newCells.keys()[0]]['xml']]
units = [column[1] for column in newCells[newCells.keys()[0]]['xml']]
values = [[] for i in range(0, len(names))]
for cell in newCells:
# Last ditch attempt to catch any datetimes that slipped through...
if type(newCells[cell]['time']) is datetime.datetime:
newCells[cell]['time'] = newCells[cell]['time'].strftime('%Y%m%d_%H%M%S')
if type(newCells[cell]['start']) is datetime.datetime:
newCells[cell]['start'] = newCells[cell]['start'].strftime('%Y%m%d_%H%M%S')
for i in range(0, len(newCells[cell]['xml'])):
if names[i] == 'RowName':
names[i] = 'Track'
values[i].append(newCells[cell]['track'])
elif names[i] == 'Age': values[i].append(newCells[cell]['age'])
elif names[i] == 'StartTime': values[i].append(newCells[cell]['start'])
elif names[i] == 'MotionEast': values[i].append(str(newCells[cell]['meast']))
elif names[i] == 'MotionSouth': values[i].append(str(newCells[cell]['msouth']))
elif names[i] == 'Speed': values[i].append(str(newCells[cell]['speed']))
else: values[i].append(newCells[cell]['xml'][i][2])
for j in range(0, len(names)):
f.write(' <datacolumn name="' + names[j] + '" units="' + units[j] + '">\n')
for k in range(0, len(values[j])):
f.write(' <item value="' + str(values[j][k]) + '"/>\n')
f.write(' </datacolumn>\n')
f.write(' </data>\n')
f.write('</datatable>')
f.close()
# Save new history file
print 'Saving the new history file...'
for cell in stormCells:
if type(stormCells[cell]['time']) is datetime.datetime:
stormCells[cell]['time'] = stormCells[cell]['time'].strftime('%Y%m%d_%H%M%S')
if type(stormCells[cell]['start']) is datetime.datetime:
stormCells[cell]['start'] = stormCells[cell]['start'].strftime('%Y%m%d_%H%M%S')
with open(historyPath, 'w') as outfile:
json.dump(stormCells, outfile, sort_keys = True, indent=1)
outfile.close()
# Convert segmotion xml to json
def outputSegJson(currentTime, newCells, stormCells, changedCells, outDir, historyPath, xmlFile):
"""
Creates a new xml and json file with the updated track information
Parameters
----------
currentTime : datetime
The date/time of the most current xml file
newCells : Dictionary
Dictionary of the most current cells
stormCells : Dictionary
Dictionary containing all storm cells to be saved
changedCells : List
List of track IDs that were changed
outDir : string
Filepath where the output files will be saved
historyPath : string
The full file path (including extension) of the history json file
xmlFile : File
The current xml file being processed to use as a template
"""
# Save new json file
dataDicts = []
filename = 'segmotion_' + currentTime.strftime('%Y%m%d-%H%M%S') + '.json'
print '\nSaving the most recent segmotion-json file: ' + filename
xfile = open(xmlFile)
lines = xfile.readlines()
xfile.close()
f = open(outDir + filename, 'w')
# Don't bother if there are no objects in the file
if len(newCells.keys()) < 1:
f.close()
print 'Saving the new history file...'
for cell in stormCells:
stormCells[cell]['time'] = stormCells[cell]['time'].strftime('%Y%m%d_%H%M%S')
stormCells[cell]['start'] = stormCells[cell]['start'].strftime('%Y%m%d_%H%M%S')
with open(historyPath, 'w') as outfile:
json.dump(stormCells, outfile, sort_keys = True, indent=1)
outfile.close()
return
# Otherwise write out the header info
headerNames = []
headerValues = []
for line in lines:
if not line.strip().startswith('<data>'):
if line.strip().startswith('<datacolumn'):
headerNames.append(line.strip().split(' ')[1].split('"')[1])
elif line.strip().startswith('<item'):
headerValues.append(line.strip().split('"')[1])
else:
break
names = [column[0] for column in newCells[newCells.keys()[0]]['xml']]
# Get data from cells
for cell in newCells:
# Last ditch attempt to catch any datetimes that slipped through...
if type(newCells[cell]['time']) is datetime.datetime:
newCells[cell]['time'] = newCells[cell]['time'].strftime('%Y%m%d_%H%M%S')
if type(newCells[cell]['start']) is datetime.datetime:
newCells[cell]['start'] = newCells[cell]['start'].strftime('%Y%m%d_%H%M%S')
dataDict = {}
for i in range(len(newCells[cell]['xml'])):
if names[i] == 'RowName': dataDict['Track'] = int(newCells[cell]['track'])
elif names[i] == 'Age': dataDict[names[i]] = float(newCells[cell]['age'])
elif names[i] == 'StartTime': dataDict[names[i]] = newCells[cell]['start']
elif names[i] == 'OldTrack': dataDict['OldTrack'] = int(newCells[cell]['oldtrack'])
elif names[i] == 'MotionEast': dataDict['MotionEast'] = float(newCells[cell]['meast'])
elif names[i] == 'MotionSouth': dataDict['MotionSouth'] = float(newCells[cell]['msouth'])
elif names[i] == 'Speed': dataDict['Speed'] = float(newCells[cell]['speed'])
else: dataDict[names[i]] = float(newCells[cell]['xml'][i][2])
for i in range(len(headerNames)):
dataDict[headerNames[i]] = headerValues[i]
dataDicts.append(dataDict)
# Save JSON file
with open(outDir + filename, 'w') as outfile:
for jdict in dataDicts:
s = json.dumps(jdict, sort_keys = False).encode('ascii')
outfile.write(s + '\n')
outfile.close()
# Save new history file
print 'Saving the new history file...'
for cell in stormCells:
if type(stormCells[cell]['time']) is datetime.datetime:
stormCells[cell]['time'] = stormCells[cell]['time'].strftime('%Y%m%d_%H%M%S')
if type(stormCells[cell]['start']) is datetime.datetime:
stormCells[cell]['start'] = stormCells[cell]['start'].strftime('%Y%m%d_%H%M%S')
with open(historyPath, 'w') as outfile:
json.dump(stormCells, outfile, sort_keys = True, indent=1)
outfile.close()
#==================================================================================================================#
# #
# Main #
# #
#==================================================================================================================#
def besttrack_RT(currentTime, inDir, inSuffix, historyPath, bufferTime, bufferDist, historyTime, outDir, ftype, outtype = ''):
"""
Loads current probSevere object and assigns correct track
Parameters
----------
currentTime : datetime
The date/time of the most current ascii file
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
historyPath : string
The full file path (including extension) of the history json file
bufferTime : int
The time threshold to use when associating cells with a track (minutes)
bufferDist : int
The distance threshold to use when associating cells with a track (km)
historyTime : int
How long to keep old cells in the history file (minutes)
outDir : string
Filepath where the output files will be saved
ftype : string
Type of input files to process (ascii or json or xml)
outtype : string
Type of file to output (ascii, json, xml, or seg_json)
"""
print 'Running Best Track RT'
if outtype == '': outtype = ftype
# Compute start time
dt = datetime.timedelta(minutes = historyTime)
endTime = currentTime
startTime = currentTime - dt
# Load storm history and most current file
if ftype == 'ascii': newCells, stormCells, totNumCells = readProbSevereAscii(inDir, inSuffix, historyPath, startTime, endTime)
elif ftype == 'json': newCells, stormCells, totNumCells, jFile = readProbSevereJson(inDir, inSuffix, historyPath, startTime, endTime)
elif ftype == 'xml': newCells, stormCells, totNumCells, xmlFile = readSegmotionXML(inDir, inSuffix, historyPath, startTime, endTime)
else:
print 'Invalid file type. Expected "ascii" or "json" or "xml"'
return
# Set up the btengine
bt = btengine(None)
#==================================================================================================================#
# #
# Map projection #
# #
#==================================================================================================================#
# History of changed objects
modifiedTracksHistory = {}
# Projection variables
meanLat = np.mean([MIN_LAT, MAX_LAT])
meanLon = np.mean([MIN_LON, MAX_LON])
xyDistMax = 0
llDistMax = 0
distanceRatio = 0
# Setup equidistant map projection
m = Basemap(llcrnrlon=MIN_LON, llcrnrlat=MIN_LAT, urcrnrlon=MAX_LON, urcrnrlat=MAX_LAT,
projection='aeqd', lat_0=meanLat, lon_0=meanLon)
for cell in stormCells:
stormCells[cell]['x'] = m(stormCells[cell]['lon'], stormCells[cell]['lat'])[0]
stormCells[cell]['y'] = m(stormCells[cell]['lon'], stormCells[cell]['lat'])[1]
# Since we're already iterating, get the object's modification history
modifiedTracksHistory[stormCells[cell]['oldtrack']] = stormCells[cell]['track']
for cell in newCells:
newCells[cell]['x'] = m(newCells[cell]['lon'], newCells[cell]['lat'])[0]
newCells[cell]['y'] = m(newCells[cell]['lon'], newCells[cell]['lat'])[1]
# Find ratio between x-y distances and lat-lon distances
xMin, yMin = m(MIN_LON, MIN_LAT)
xMax, yMax = m(MAX_LON, MAX_LAT)
xyDistMax = np.sqrt((xMin - xMax) ** 2 + (yMin - yMax) ** 2)
# Find distance between two lat lon coordinates
# Source: https://en.wikipedia.org/wiki/Great-circle_distance
# point1 = [MAX_LON, MIN_LAT]
# point2 = [MIN_LON, MAX_LAT]
rlat1 = np.radians(MIN_LAT)
rlat2 = np.radians(MAX_LAT)
r = 6371 # Mean radius of Earth (km)
dlon = abs(MAX_LON - MIN_LON)
dsig = np.arccos(np.sin(rlat1) * np.sin(rlat2) + np.cos(rlat1) * np.cos(rlat2) * np.cos(np.radians(dlon)))
llDistMax = r * dsig
distanceRatio = llDistMax / xyDistMax
#==================================================================================================================#
# #
# Cluster Identification #
# #
#==================================================================================================================#
# Break down into tracks
print '\nFinding storm history tracks...'
stormTracks = bt.find_clusters(stormCells, stormCells.keys())
print 'Number of tracks: ' + str(len(stormTracks))
# Identify potentially bad objects
print '\nAdding new cells to existing tracks...'
existingObjects = []
for cell in newCells:
# If the cell belongs to an existing track, add it
if newCells[cell]['track'] in stormTracks:
stormCells[cell] = newCells[cell]
existingObjects.append(cell)
print 'Number of cells added to existing tracks: ' + str(len(existingObjects))
print 'Number of cells with new track ID: ' + str(len(newCells) - len(existingObjects))
#==================================================================================================================#
# #
# Track comparison #
# #
#==================================================================================================================#
# Compare new tracks with existing ones
print '\nComparing new tracks with existing ones...'
stormTracks = bt.find_clusters(stormCells, stormCells.keys())
stormTracks = bt.theil_sen_batch(stormTracks)
newCells, changedCells = compareTracks(newCells, stormTracks, bufferTime, bufferDist, distanceRatio, existingObjects, modifiedTracksHistory, m, bt)
# Update the tracks
for cell in newCells:
# If the cell belongs to an existing track, add it
if newCells[cell]['track'] in stormTracks and cell not in existingObjects:
stormCells[cell] = newCells[cell]
existingObjects.append(cell)
print 'Final number of cells added to existing tracks: ' + str(len(existingObjects))
print 'Final number of cells with new track ID: ' + str(len(newCells) - len(existingObjects))
print 'Number of broken tracks fixed: ' + str(len(changedCells))
# Put all cells into the stormCells Dict
for cell in newCells:
stormCells[cell] = newCells[cell]
# Update cell age and start times
# This only works on segmotion data right now
if ftype == 'xml':
stormTracks = bt.find_clusters(stormCells, stormCells.keys())
for track in stormTracks:
times = []
ids = []
# Sort cells by time
for cell in stormTracks[track]['cells']:
ID = stormCells.keys()[stormCells.values().index(cell)]
if ID not in ids:
ids.append(ID)
times.append(cell['time'])
ids = [ID for (time, ID) in sorted(zip(times, ids))]
for i in range(len(ids)):
if ids[i] in newCells:
if type(stormCells[ids[0]]['start']) is unicode: # Yes I'm still fighting this stupid issue
stormCells[ids[0]]['start'] = datetime.datetime.strptime(stormCells[ids[0]]['start'], '%Y%m%d_%H%M%S')
newCells[ids[i]]['start'] = stormCells[ids[0]]['start']
newCells[ids[i]]['age'] = total_seconds(newCells[ids[i]]['time'] - newCells[ids[i]]['start'])
if i > 0:
dt = total_seconds(newCells[ids[i]]['time'] - stormCells[ids[i-1]]['time'])
dx = newCells[ids[i]]['x'] - stormCells[ids[i-1]]['x']
dy = newCells[ids[i]]['y'] - stormCells[ids[i-1]]['y']
try:
newCells[ids[i]]['meast'] = (dx / dt) * distanceRatio * 1000. # m/s
newCells[ids[i]]['msouth'] = -(dy / dt) * distanceRatio * 1000. # m/s
except ZeroDivisionError:
newCells[ids[i]]['meast'] = 0 # m/s
newCells[ids[i]]['msouth'] = 0
newCells[ids[i]]['speed'] = np.sqrt(newCells[ids[i]]['meast']**2 + newCells[ids[i]]['msouth']**2) # m/s
#==================================================================================================================#
# #
# Output #
# #
#==================================================================================================================#
del stormTracks
# Save output
if outtype == 'ascii': outputAscii(currentTime, newCells, stormCells, changedCells, outDir, historyPath)
elif outtype == 'json': outputJson(currentTime, newCells, stormCells, changedCells, outDir, historyPath, jFile)
elif outtype == 'xml': outputXML(currentTime, newCells, stormCells, changedCells, outDir, historyPath, xmlFile)
elif outtype == 'seg_json': outputSegJson(currentTime, newCells, stormCells, changedCells, outDir, historyPath, xmlFile)
elif outtype == 'legacy':
if not os.path.exists(outDir):
os.makedirs(outDir)
print 'Saving the new history file...'
for cell in stormCells:
stormCells[cell]['time'] = stormCells[cell]['time'].strftime('%Y%m%d_%H%M%S')
#stormCells[cell]['start'] = stormCells[ids[i]]['start'].strftime('%Y%m%d_%H%M%S')
with open(historyPath, 'w') as outfile:
json.dump(stormCells, outfile, sort_keys = True)
outfile.close()
for cell in stormCells:
stormCells[cell]['time'] = datetime.datetime.strptime(stormCells[cell]['time'],'%Y%m%d_%H%M%S')
#stormCells[cell]['start'] = datetime.datetime.strptime(stormCells[cell]['start'],'%Y%m%d_%H%M%S')
return stormCells, distanceRatio
else: print 'Invalid file type. Expected "ascii", "json", "xml", or "seg_json"'
print 'Best Track RT complete!\n'
# Usage example and testing
if __name__ == '__main__':
currentTime = datetime.datetime.strptime('20150506_203639', '%Y%m%d_%H%M%S')
inDir = '/localdata/ProbSevere/new_json/20160901'
outDir = '/localdata/ProbSevere/new_json/test/'
historyPath = '/localdata/ProbSevere/new_json/test/history.json'
bufferTime = 3 # minutes
bufferDist = 10 # km
historyTime = 30 # minutes
besttrack_RT(currentTime, inDir, '', historyPath, bufferTime, bufferDist, historyTime, outDir, 'json')
|
cnt=0
def quicksort(arr):
global cnt
if len(arr)<=1:
return arr
p=arr.pop(0)
menores,mayores=[],[]
for e in arr:
cnt+=1
if e<=p:
menores.append(e)
else:
mayores.append(e)
return quicksort(menores) + [p] + quicksort(mayores)
a=[1, 0, 15, 6, 7]
print(a)
quicksort(a)
print(cnt)
|
# coding=utf-8
from .test_default_mirror import TestDefaultMirror
from .test_httpbin import TestHttpbin
from .test_verification import TestVerification, TestVerificationSingleAnswer
from .test_cache_system import TestCacheSystem
from .test_cdn import TestCDN
from .test_redirection import TestRedirection
from .test_functions import TestFunctions
from .test_custom_response_text_rewrite import TestCustomResponseRewriter
from .test_developer_functions import TestDeveloperFunctions
from .test_non_standard_port import TestNonStandardPort
from .test_regex import TestRegex
from .test_connection_pool import TestConnectionPool
from .test_custom_content_injection import TestContentInjection
|
#pip install paho-mqtt
import paho.mqtt.publish as publish
import Adafruit_DHT
import time
import datetime
import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
# colocamos el channelID de nuestro canal de thingspeak
channelID="1326958"
# colocamos el api key de nuestro canal
apiKey="QAAPTUMOAJJS7YPT"
# se coloca el nombre del host que es thingspeak MQTT
mqttHost = "mqtt.thingspeak.com"
# se realiza la configuración
# se importa ssl que es el modulo de seguridad en la capa de transporte
import ssl
#se especifica el tipo de conexión
tTransport = "websockets"
#version del protocolo de seguridad de la capa de transporte
tTLS = {'ca_certs':"/etc/ssl/certs/ca-certificates.crt",'tls_version':ssl.PROTOCOL_TLSv1}
# se selecciona el puerto
tPort = 443
# creamos el topic
topic = "channels/" + channelID + "/publish/" + apiKey
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# # create the cs (chip select)
cs = digitalio.DigitalInOut(board.D5)
# # create the mcp object
mcp = MCP.MCP3008(spi, cs)
# # create an analog input channel on pin 0
chan = AnalogIn(mcp, MCP.P0)
sensor=Adafruit_DHT.DHT22
pin=23
#archivo=open("humedad.txt","w")
#archivo.write("humedad"+" "+"temperatura"+" "+"co2")
while True:
humedad, temperatura = Adafruit_DHT.read_retry(sensor, pin)
concentracion= (159.6-(((chan.voltage)/10)*133.33))
if humedad is not None and temperatura is not None:
print(f'temperatura={temperatura:.2f}*C Humedad={humedad:.2f}%')
print('concentración', str(concentracion)+"ppm")
# fecha y hora actual para la estampa de tiempo
fecha=datetime.datetime.now()
# modificamos el formato de la estampa de tiempo
fecha=fecha.strftime('%Y-%m-%d-%H:%M:%S')
print('fecha=',fecha)
# cadena de envio
tPayload= "field1=" + str(temperatura) + (' fecha ') + str(fecha) + (' sensor dht22') + "&field2=" + str(humedad) + (' fecha ') + str(fecha) + (' sensor dht22 ') + "&field3=" + str(concentracion) + (' fecha ') + str(fecha) + (' sensor MQ135')
# se intenta publicar la cadena
try:
publish.single(topic, payload=tPayload, hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)
except (KeyboardInterrupt):
break
time.sleep(60)
else:
print('fallo lectura')
#archivo.write("\n"+str(humedad)+"%"+" "+str(temperatura)+"°C"+" "+str(concentracion)+"ppm")
#archivo.close
|
# Import bot token from environment variables
from os import environ
botToken = environ.get("MR_ROBOTO_TOKEN")
# Create updater and save reference to dispatcher
from telegram.ext import Updater
updater = Updater(token=botToken)
dispatcher = updater.dispatcher;
# Set up logging errors
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
# Function to run with the start command
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="beep boop")
# Add command handler for start command
from telegram.ext import CommandHandler
start_handler = CommandHandler("start", start)
dispatcher.add_handler(start_handler)
# Start polling commands
updater.start_polling()
|
# -*- coding=UTF-8 -*-
# pyright: strict, reportTypeCommentUsage=none
from __future__ import absolute_import, division, print_function, unicode_literals
from wulifang.vendor.wlf import path as wlf_path
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Text
def shot_from_filename(_filename):
# type: (Text) -> Text
"""The related shot for this footage.
>>> shot_from_filename('sc_001_v20.nk')
u'sc_001'
>>> shot_from_filename('hello world')
u'hello world'
>>> shot_from_filename('sc_001_v-1.nk')
u'sc_001_v-1'
>>> shot_from_filename('sc001V1.jpg')
u'sc001'
>>> shot_from_filename('sc001V1_no_bg.jpg')
u'sc001'
>>> shot_from_filename('suv2005_v2_m.jpg')
u'suv2005'
"""
# TODO: avoid use of library
return wlf_path.PurePath(_filename).shot # type: ignore
|
def min_manufacture(i, j, s):
global min_rate
if s >= min_rate:
return
if i == N-1:
min_rate = s
return
else:
for k in range(N):
visited[k][j] = 1
for l in range(N):
if visited[i+1][l] == 0:
min_manufacture(i+1, l, s+rate[i+1][l])
for k in range(N):
visited[k][j] = 0
for tc in range(1, int(input())+1):
N = int(input())
rate = [list(map(int, input().split())) for _ in range(N)]
visited = [[0]*N for _ in range(N)]
min_rate = 1000000
for j in range(N):
min_manufacture(0, j, rate[0][j])
print('#{} {}'.format(tc,min_rate)) |
#coding=utf-8
import os
import shutil
import unittest
import tempfile
from DB import DB
from DBOptions import DBOptions
from WriteOptions import WriteOptions
from ReadOptions import ReadOptions
class TestDB(unittest.TestCase):
def setUp(self):
self.db_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.db_dir, ignore_errors=True)
def test_db_put_get(self):
db_options = DBOptions()
db_options.create_if_missing = True
db, status = DB.Open(db_options, self.db_dir.encode())
self.assertTrue(status.OK())
write_options = WriteOptions()
status = db.Put(write_options, b"foo", b"bar")
self.assertTrue(status.OK())
read_options = ReadOptions()
value, status = db.Get(read_options, b"foo")
self.assertTrue(status.OK())
self.assertEqual(b"bar", value)
def test_tiered_db(self):
db_options = DBOptions()
db_options.create_if_missing = True
db_options.add_db_path(os.path.join(tempfile.mkdtemp(), "1"), 1024)
db_options.add_db_path(os.path.join(tempfile.mkdtemp(), "2"), 1024)
db, status = DB.Open(db_options, self.db_dir.encode())
self.assertTrue(status.OK())
write_options = WriteOptions()
status = db.Put(write_options, b"foo1", b"bar")
self.assertTrue(status.OK())
status = db.Put(write_options, b"foo2", b"bar" * 1024)
self.assertTrue(status.OK())
read_options = ReadOptions()
value, status = db.Get(read_options, b"foo1")
self.assertTrue(status.OK())
self.assertEqual(b"bar", value)
value, status = db.Get(read_options, b"foo3")
self.assertTrue(status.OK())
self.assertEqual(None, value)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 28 10:42:00 2017
@author: utente
Sbilanciamento 7 -- OUT OF SAMPLE ERROR CONTROL MODULE --
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
import os
####################################################################################################
def convertDates(vec):
CD = vec.apply(lambda x: datetime.datetime(year = int(str(x)[6:10]), month = int(str(x)[3:5]), day = int(str(x)[:2]), hour = int(str(x)[11:13])))
return CD
####################################################################################################
def Get_ZonalDataset(df, zona):
df = df.ix[df["CODICE RUC"] == "UC_DP1608_" + zona]
cd = convertDates(df['DATA RIFERIMENTO CORRISPETTIVO'])
df = df.set_index(cd.values)
df = df.ix[df.index.date > datetime.date(2016,12,31)]
dr = pd.date_range('2017-01-01', df.index.date[-1], freq = 'D')
res = []
for i in dr.tolist():
dfd = df.ix[df.index.date == i.to_pydatetime().date()]
if dfd.shape[0] == 24:
res.extend((dfd['MO [MWh]'].values).tolist())
else:
for hour in range(24):
dfdh = dfd.ix[dfd.index.hour == hour]
if dfdh.shape[0] == 0:
res.append(0)
elif dfdh.shape[0] == 2:
res.append(dfdh["MO [MWh]"].sum())
else:
res.append(dfdh["MO [MWh]"].values[0])
diz = pd.DataFrame(res)
diz.columns = [[zona]]
diz = diz.set_index(pd.date_range('2017-01-01', '2017-12-31', freq = 'H')[:diz.shape[0]])
return diz
####################################################################################################
def Error_Control(zona, month, what):
### @PARAM: zona and month are self-explanatory, what = {OOS, Sample, ZONA}
forecast = pd.read_excel('C:/Users/utente/Documents/Sbilanciamento/forecast_' + zona + '.xlsx')
if what == 'OOS':
oos = pd.read_hdf('C:/Users/utente/Documents/Sbilanciamento/storico_oos_' + zona + '.h5')
sample = pd.read_hdf('C:/Users/utente/Documents/Sbilanciamento/forecast_campione_' + zona + '.h5', 'sample_' + zona.lower())
foos = forecast - sample
oosm = oos.ix[oos.index.month == month]
foosm = foos.ix[foos.index.month == month]
error = oosm - foosm
den = oosm
SETPARAM = 'Out Of Sample'
elif what == 'Sample':
sample = pd.read_hdf('C:/Users/utente/Documents/Sbilanciamento/forecast_campione_' + zona + '.h5', 'sample_' + zona.lower())
dt = pd.read_excel("C:/Users/utente/Documents/Sbilanciamento/Aggregatore_orari-2017.xlsx")
dt.columns = [str(i) for i in dt.columns]
dt = dt[["POD", "Area", "Giorno", "1","2","3","4","5","6","7","8","9","10","11","12","13","14","15",
"16","17","18","19","20","21","22","23","24"]]
dt = dt.drop_duplicates(subset = ['POD', 'Area', 'Giorno'], keep = 'last')
Sample = Get_SampleAsTS(dt, zona)
samplem = sample.ix[sample.index.month == month]
Samplem = Sample.ix[Sample.index.month == month]
error = Samplem - samplem
den = Samplem
SETPARAM = 'sample'
else:
df = pd.read_excel('C:/Users/utente/Documents/misure/aggregato_sbilanciamento2.xlsx')
df = Get_ZonalDataset(df, zona)
dfy = df[zona].ix[df.index.year == 2017]
dfm = dfy.ix[dfy.index.month == month]
error = dfm - forecast.ix[forecast.index.month == month]
den = forecast.ix[forecast.index.month == month]
SETPARAM = 'ZONA ' #+ zona
print 'mean error on {}: {}'.format(SETPARAM, np.mean(error.values.ravel()))
print 'median error on {}: {}'.format(SETPARAM, np.median(error.values.ravel()))
print 'max error on {}: {}'.format(SETPARAM, np.max(error.values.ravel()))
print 'standard deviation of error on {}: {}'.format(SETPARAM, np.std(error.values.ravel()))
print 'mean absolute error on {}: {}'.format(SETPARAM, np.mean(np.abs(error.values.ravel())))
print 'median absolute error on {}: {}'.format(SETPARAM, np.median(np.abs(error.values.ravel())))
print 'max absolute error on {}: {}'.format(SETPARAM, np.max(np.abs(error.values.ravel())))
print 'standard deviation of absolute error on {}: {}'.format(SETPARAM, np.std(np.abs(error.values.ravel())))
Sbil = error/den
AbsSbil = error.abs()/den
plt.figure()
plt.plot(Sbil.index, Sbil.values.ravel())
plt.title('Sbilanciamento {} zona {} in month {}'.format(SETPARAM, zona, month))
plt.figure()
plt.plot(AbsSbil.index, AbsSbil.values.ravel())
plt.title('Sbilanciamento assoluto {} zona {} in month {}'.format(SETPARAM, zona, month))
print 'mean sbilanciamento on {}: {}'.format(SETPARAM, np.mean(Sbil.values.ravel()))
print 'median sbilanciameto on {}: {}'.format(SETPARAM, np.median(Sbil.values.ravel()))
print 'max sbilanciamento on {}: {}'.format(SETPARAM, np.max(Sbil.values.ravel()))
print 'min sbilanciamento on {}: {}'.format(SETPARAM, np.min(Sbil.values.ravel()))
print 'standard deviation of sbilanciamento on {}: {}'.format(SETPARAM, np.std(Sbil.values.ravel()))
print 'mean absolute sbilanciamento on {}: {}'.format(SETPARAM, np.mean(AbsSbil.values.ravel()))
print 'median absolute sbilanciamento on {}: {}'.format(SETPARAM, np.median(AbsSbil.values.ravel()))
print 'max absolute sbilanciamento on {}: {}'.format(SETPARAM, np.max(AbsSbil.values.ravel()))
print 'standard deviation of absolute sbilanciamento on {}: {}'.format(SETPARAM, np.std(AbsSbil.values.ravel()))
return 1
####################################################################################################
def Daily_Error_Control(day, zona):
sample = pd.read_hdf('C:/Users/utente/Documents/Sbilanciamento/forecast_campione_' + zona + '.h5', 'sample_' + zona.lower())
dt = pd.read_excel("C:/Users/utente/Documents/Sbilanciamento/Aggregatore_orari-2017.xlsx")
dt.columns = [str(i) for i in dt.columns]
dt = dt[["POD", "Area", "Giorno", "1","2","3","4","5","6","7","8","9","10","11","12","13","14","15",
"16","17","18","19","20","21","22","23","24"]]
dt = dt.drop_duplicates(subset = ['POD', 'Area', 'Giorno'], keep = 'last')
Sample = Get_SampleAsTS(dt, zona)
sampled = sample.ix[sample.index.date == day]
Sampled = Sample.ix[Sample.index.date == day]
SETPARAM = 'Sample'
error = Sampled - sampled
Sbil = error/Sampled
Asbil = error.abs()/Sampled
print 'mean error on {}: {}'.format(SETPARAM, np.mean(error.values.ravel()))
print 'median error on {}: {}'.format(SETPARAM, np.median(error.values.ravel()))
print 'max error on {}: {}'.format(SETPARAM, np.max(error.values.ravel()))
print 'standard deviation of error on {}: {}'.format(SETPARAM, np.std(error.values.ravel()))
print 'mean absolute error on {}: {}'.format(SETPARAM, np.mean(np.abs(error.values.ravel())))
print 'median absolute error on {}: {}'.format(SETPARAM, np.median(np.abs(error.values.ravel())))
print 'max absolute error on {}: {}'.format(SETPARAM, np.max(np.abs(error.values.ravel())))
print 'standard deviation of absolute error on {}: {}'.format(SETPARAM, np.std(np.abs(error.values.ravel())))
plt.figure()
plt.plot(Sbil.index, Sbil.values.ravel())
plt.title('Sbilanciamento {} zona {} in day {}'.format(SETPARAM, zona, day))
plt.figure()
plt.plot(Asbil.index, Asbil.values.ravel())
plt.title('Sbilanciamento assoluto {} zona {} in day {}'.format(SETPARAM, zona, day))
print 'mean sbilanciamento on {}: {}'.format(SETPARAM, np.mean(Sbil.values.ravel()))
print 'median sbilanciameto on {}: {}'.format(SETPARAM, np.median(Sbil.values.ravel()))
print 'max sbilanciamento on {}: {}'.format(SETPARAM, np.max(Sbil.values.ravel()))
print 'min sbilanciamento on {}: {}'.format(SETPARAM, np.min(Sbil.values.ravel()))
print 'standard deviation of sbilanciamento on {}: {}'.format(SETPARAM, np.std(Sbil.values.ravel()))
print 'mean absolute sbilanciamento on {}: {}'.format(SETPARAM, np.mean(Asbil.values.ravel()))
print 'median absolute sbilanciamento on {}: {}'.format(SETPARAM, np.median(Asbil.values.ravel()))
print 'max absolute sbilanciamento on {}: {}'.format(SETPARAM, np.max(Asbil.values.ravel()))
print 'standard deviation of absolute sbilanciamento on {}: {}'.format(SETPARAM, np.std(Asbil.values.ravel()))
return 1
####################################################################################################
def PDOs_To_TS(pdo, zona):
pdo["Giorno"] = pd.to_datetime(pdo["Giorno"])
pdo = pdo.ix[pdo["zona"] == zona]
dr = pd.date_range(min(pdo["Giorno"].values.ravel()), max(pdo["Giorno"].values.ravel()), freq = 'D')
res = []
for i in dr.tolist():
dbd = pdo[pdo.columns[4:]].ix[pdo["Giorno"] == i].sum()/1000
res.extend(dbd.values.tolist())
diz = pd.DataFrame(res)
diz.columns = [['MO [MWh]']]
diz = diz.set_index(pd.date_range(min(pdo["Giorno"].values.ravel()), '2017-12-31', freq = 'H')[:diz.shape[0]])
return diz
####################################################################################################
def Terna_vs_PDOs(terna, pdo, zona):
pdots = PDOs_To_TS(pdo, zona)
# common_indeces = list(set(terna.index).intersection(set(pdots.index)))
error = terna['MO [MWh]'].ix[terna.index.year > 2016].values.ravel() - pdots.ix[pdots.index.year > 2016].values.ravel()
return error
####################################################################################################
def ModelComparison(forecast1, forecast2, terna):
forecast1 = forecast1.ix[terna.index]
forecast2 = forecast2.ix[terna.index]
pred_error1 = terna['MO [MWh]'] - forecast1
pred_error2 = terna['MO [MWh]'] - forecast2
between_error = forecast1 - forecast2
return pred_error1, pred_error2, between_error
####################################################################################################
#def CompareTrendToTerna(df, dtc, zona):
### @BRIEF: function to compare the trend of the required dataset to the trend given by Terna.
### @PARAM: df is the dataset from Terna (assumed to be already a time series with the proper correct time index)
### dtc is the dates to compare (it could be OOS or S)
#df.loc[datetime.datetime()]
####################################################################################################
def PDOTaker(pdo, p, d):
if p in pdo['POD'].values.ravel().tolist():
pdop = pdo.ix[pdo['POD'] == p]
pdopd = pdop.ix[pdop['Giorno'] == d]
if pdopd.shape[0] > 0:
return pdopd[pdopd.columns[4:]].values.ravel()
else:
print '** POD not in PDO **'
return np.repeat(0,24)
####################################################################################################
def CompareTrueSample(db, zona):
### @BRIEF: this function compares the predicted sample to the correct measurments of the same PODs actually predicted
Sample = Get_SampleAsTS(db, zona)
sample = pd.read_excel('C:/Users/utente/Documents/Sbilanciamento/forecast_campione_' + zona + '.xlsx')
pdo = pd.read_hdf("C:/Users/utente/Documents/Sbilanciamento/DB_misure.h5")
list_of_dates = list(set(map(lambda date: date.date(), sample.index)))
meas = []
for d in list_of_dates:
AM = np.repeat(0,24)
sad = SampleAtDay(db, d, zona)
for p in sad:
predpdop = PDOTaker(pdo, p, d)
AM += predpdop/1000
meas.extend(AM.tolist())
Meas = pd.DataFrame({zona: meas}).set_index(sample.index)
return Meas
####################################################################################################
def ToTS(df):
ts = []
dmin = df['Giorno'].min()
dmax = df['Giorno'].max()
for i in df.index:
y = df[df.columns[-24:]].ix[i].values.ravel().tolist()
ts.extend(y)
DF = pd.DataFrame({'X': ts}).set_index(pd.date_range(dmin, dmax + datetime.timedelta(days = 1), freq = 'H')[:len(ts)])
return DF
####################################################################################################
def setRicalIndex(rical):
dl = []
for i in range(rical.shape[0]):
timestring = str(rical['Giorno'].ix[i])[:10]
dt = datetime.datetime.strptime(timestring, '%Y-%m-%d')
dt = dt.replace(hour = int(rical['Ora'].ix[i]))
dl.append(dt)
return dl
####################################################################################################
def getSbilanciamentoPOD(pod, rical, zona):
### @BREIF: returns the actual sbilanciamento given a POD
dl = setRicalIndex(rical)
val = rical[pod].values.ravel()
RP = pd.DataFrame({'X': val}).set_index(pd.date_range(dl[0].date(), dl[-1].date() + datetime.timedelta(days = 1), freq = 'H')[:len(dl)])
pdo = pd.read_hdf("C:/Users/utente/Documents/Sbilanciamento/DB_misure.h5")
pdop = pdo.ix[pdo['POD'] == pod]
pdop['Giorno'] = pdop['Giorno'].apply(lambda s: datetime.datetime.strptime(s, '%Y-%m-%d').date())
pdop = pdop.ix[pdop['Giorno'] > datetime.date(2016,12,31)]
pdop = pdop.drop_duplicates(subset = ['Giorno'], keep = 'last')
TS = ToTS(pdop)
ricalp = RP.ix[TS.index]
SBIL = (TS.values.ravel() - ricalp.values.ravel())/TS.values.ravel()
SBIL = pd.DataFrame({'Sbilanciamento_' + pod: SBIL}).set_index(TS.index)
return SBIL |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
import shutil
import sys
import json
from collections import namedtuple
def encode(data, encoding='utf-8'):
if sys.version_info.major == 2:
return data.encode(encoding)
return data
def decode(data, encoding='utf-8'):
if sys.version_info.major == 2:
return data.decode(encoding)
return data
def remove_path(path):
if os.path.exists(path):
shutil.rmtree(path)
# Read json file data
def read_json_file(input_file):
if not os.path.isfile(input_file):
raise OSError('{} not found'.format(input_file))
with open(input_file, 'rb') as input_f:
try:
data = json.load(input_f)
return data
except json.JSONDecodeError:
raise Exception('{} parsing error!'.format(input_file))
def dump_json_file(dump_file, json_data):
with open(dump_file, 'wt', encoding='utf-8') as json_file:
json.dump(json_data,
json_file,
ensure_ascii=False,
indent=2)
def get_input(msg):
try:
user_input = input
except NameError:
raise Exception('python2.x not supported')
return user_input(msg)
def exec_command(cmd, log_path='out/build.log', **kwargs):
useful_info_pattern = re.compile(r'\[\d+/\d+\].+')
is_log_filter = kwargs.pop('log_filter', False)
with open(log_path, 'at', encoding='utf-8') as log_file:
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8',
**kwargs)
for line in iter(process.stdout.readline, ''):
if is_log_filter:
info = re.findall(useful_info_pattern, line)
if len(info):
hb_info(info[0])
else:
hb_info(line)
log_file.write(line)
process.wait()
ret_code = process.returncode
if ret_code != 0:
with open(log_path, 'at', encoding='utf-8') as log_file:
for line in iter(process.stderr.readline, ''):
if 'ninja: warning' in line:
log_file.write(line)
continue
hb_error(line)
log_file.write(line)
if is_log_filter:
get_failed_log(log_path)
hb_error('you can check build log in {}'.format(log_path))
if isinstance(cmd, list):
cmd = ' '.join(cmd)
raise Exception("{} failed, return code is {}".format(cmd, ret_code))
def get_failed_log(log_path):
with open(log_path, 'rt', encoding='utf-8') as log_file:
data = log_file.read()
failed_pattern = re.compile(r'(\[\d+/\d+\].*?)(?=\[\d+/\d+\]|'
'ninja: build stopped)', re.DOTALL)
failed_log = failed_pattern.findall(data)
for log in failed_log:
if 'FAILED:' in log:
hb_error(log)
error_log = os.path.join(os.path.dirname(log_path), 'error.log')
if os.path.isfile(error_log):
with open(error_log, 'rt', encoding='utf-8') as log_file:
hb_error(log_file.read())
def check_output(cmd, **kwargs):
try:
ret = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True,
**kwargs)
except subprocess.CalledProcessError as called_exception:
ret = called_exception.output
if isinstance(cmd, list):
cmd = ' '.join(cmd)
raise Exception("{} failed, failed log is {}".format(cmd, ret))
return ret
def makedirs(path, exist_ok=True):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise Exception("{} makedirs failed".format(path))
if not exist_ok:
raise Exception("{} exists, makedirs failed".format(path))
def get_project_path(json_path):
json_data = read_json_file(json_path)
return json_data.get('root_path')
def args_factory(args_dict):
if not len(args_dict):
raise Exception('at least one k_v param is required in args_factory')
args_cls = namedtuple('Args', [key for key in args_dict.keys()])
args = args_cls(**args_dict)
return args
def hb_info(msg):
level = 'info'
for line in msg.splitlines():
sys.stdout.write(message(level, line))
sys.stdout.flush()
def hb_warning(msg):
level = 'warning'
for line in msg.splitlines():
sys.stderr.write(message(level, line))
sys.stderr.flush()
def hb_error(msg):
level = 'error'
for line in msg.splitlines():
sys.stderr.write(message(level, line))
sys.stderr.flush()
def message(level, msg):
if isinstance(msg, str) and not msg.endswith('\n'):
msg += '\n'
return '[OHOS {}] {}'.format(level.upper(), msg)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args,
**kwargs)
return cls._instances[cls]
|
import unittest
from datetime import datetime
import scraper as scr
import gearman
import bson
class TestScraping(unittest.TestCase):
def test_get_feed_data(self):
# The url is to a static RSS feed stolen from Hacker News
test_feed = scr.get_feed_data('http://u.m1cr0man.com/l/feed.xml')
title = 'Why it is NOT WISE to discuss personal information in front of smart TVs'
self.assertEqual(test_feed[0]['name'], title)
link = 'http://www.hackernews.org/2016/02/14/why-it-is-not-wise-to-discuss-personal-information-in-front-of-smart-tvs/'
self.assertEqual(test_feed[0]['link'], link)
pub_date = datetime(2016, 2, 14, 21, 10, 2)
self.assertEqual(test_feed[0]['pub_date'], pub_date)
with self.assertRaises(TypeError):
scr.get_feed_data(666)
"""def test_update_all(self):
# takes too long to do every build
gearman_client = gearman.GearmanClient(['localhost:4730'])
raw_result = gearman_client.submit_job('update-all-feeds', '')
result = bson.BSON.decode(bson.BSON(raw_result))
self.assertTrue("status" in result)
self.assertEqual(result["status"], "ok")"""
if __name__ == '__main__':
unittest.main()
|
"""
controller.py
Python3 script to control two servos and a raspberry pi camera in a pan and tilt mechanism.
The first servo pans the second servo and tilt mechanism which holds the raspberry pi camera.
dependencies:
pip3 install gpiozero
pip3 install picamera
Things also work much more smoothly if you use the pigpio pin factory
https://gpiozero.readthedocs.io/en/stable/api_pins.html#changing-pin-factory
Rob Lloyd
Lincoln, March 2021.
"""
# Import libraries
from gpiozero import AngularServo
from gpiozero import Button
from picamera import PiCamera
import os
import time
from time import sleep
panServoPin = 13
tiltServoPin = 12
buttonPin = 25
scanning = False
# These are angles from the centre point
panMin = -60
panMax = 60
tiltMin = -60
tiltMax = 60
# Scanning Parameters
scan_shape = [5,5] # X x Y positions..
home = [0,0] # Save the home position for later
# Pan and tilt Servo servos set up
panServo = AngularServo(panServoPin, initial_angle=panMin+panMax, min_angle=panMin, max_angle=panMax)
tiltServo = AngularServo(tiltServoPin, initial_angle=tiltMin+tiltMax, min_angle=tiltMin, max_angle=tiltMax)
# Button setup
button = Button(buttonPin, bounce_time = 0.1)
# Setup the camera
camera = PiCamera(resolution=(1280, 720), framerate=30)
# Set ISO to the desired value
camera.iso = 100
# Wait for the automatic gain control to settle
sleep(1)
# Now fix the values
#camera.shutter_speed = camera.exposure_speed
#camera.exposure_mode = 'off'
#g = camera.awb_gains
#camera.awb_mode = 'off'
#camera.awb_gains = g
def set_position(newPos):
print(f"Moving to: {newPos}")
panServo.angle = newPos[0]
tiltServo.angle = newPos[1]
def button_callback(self):
# Calculate the positions of the array
panStep = (panMax - panMin) / scan_shape[0]
tiltStep = (tiltMax - tiltMin) / scan_shape[1]
print(f"panStep = {panStep}, tiltStep = {tiltStep}")
set_position([panMax, tiltMax])
captureNext()
for pStep in range(1, scan_shape[0] + 1):
for tStep in range(1,scan_shape[1] + 1):
set_position([None, tiltMax - (tStep*tiltStep)])
captureNext()
set_position([panMax-(pStep*panStep), None])
captureNext()
# Go back to the centre point
set_position(home)
print("Scan Done")
sleep(0.25)
print("ready")
def captureNext():
# Dwell time for the camera to settle
dwell = 0.5
sleep(dwell)
file_name = os.path.join(output_folder, 'image_' + time.strftime("%H_%M_%S") + '.jpg')
print("*")
#camera.capture(file_name)
#print("captured image: " + 'image_' + time.strftime("%H_%M_%S") + '.jpg')
sleep(dwell)
# Handling the files
#get current working directory
path = os.getcwd()
# make the folder name
folder_name = 'captureSession_' + time.strftime("%Y_%m_%d_%H_%M_%S")
# make the folder
os.mkdir(folder_name)
# construct the output folder path
output_folder = os.path.join(path, folder_name)
# Callback for dealing with button press'
button.when_released = button_callback
panServo.angle = 5
tiltServo.angle = 5
sleep(0.25)
panServo.angle = 0
tiltServo.angle = 0
print("ready")
try:
while True:
# Erm... theres not much to do here. I'll have a nap
sleep(0.1)
pass
#Clean things up at the end
except KeyboardInterrupt:
print ("Goodbye")
"""
The short version of how servos are controlled
https://raspberrypi.stackexchange.com/questions/108111/what-is-the-relationship-between-angle-and-servo-motor-duty-cycle-how-do-i-impl
Servos are controlled by pulse width, the pulse width determines the horn angle.
A typical servo responds to pulse widths in the range 1000 to 2000 µs.
A pulse width of 1500 µs moves the servo to angle 0.
Each 10 µs increase in pulse width typically moves the servo 1 degree more clockwise.
Each 10 µs decrease in pulse width typically moves the servo 1 degree more anticlockwise.
Small 9g servos typically have an extended range and may respond to pulse widths in
the range 500 to 2500 µs.
Why do people think servos are controlled by duty cycle?
Because servos are typically given 50 pulses per second (50 Hz).
So each pulse is potentially a maximum of 20000 µs (1 million divided by 50).
A duty cycle is the percentage on time. 100% will be a 20000 µs pulse, way outside
the range accepted by a servo.
Do some calculations at 50 Hz for sample pulse widths.
500 / 20000 = 0.025 or 2.5 % dutycycle
1000 / 20000 = 0.05 or 5.0 % dutycycle
1500 / 20000 = 0.075 or 7.5 % dutycycle
2000 / 20000 = 0.1 or 10.0 % dutycycle
2500 / 20000 = 0.125 or 12.5 % dutycycle
Don't use dutycycles, if possible use pulse widths, and think in pulse widths.
If you send pulses at 60 Hz by duty cycle the servo will go to the wrong position.
"""
|
import cv2
import numpy as np
img = cv2.imread('dorm.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
suft = cv2.SIFT(500)
kp,des = suft.detectAndCompute(img,None)
img = cv2.drawKeypoints(gray,kp,None,(255,0,0),10)
print len(kp)
cv2.imshow('test',img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import logging
import os
import numpy as np
from collections import defaultdict
from rasa_nlu.config import RasaNLUConfig
from rasa_nlu.converters import load_data
from rasa_nlu.model import Interpreter
from rasa_nlu.model import Metadata
from rasa_nlu.model import Trainer, TrainingData
logger = logging.getLogger(__name__)
duckling_extractors = {"ner_duckling", "ner_duckling_http"}
known_duckling_dimensions = {"amount-of-money", "distance", "duration", "email", "number",
"ordinal", "phone-number", "timezone", "temperature", "time", "url", "volume"}
entity_processors = {"ner_synonyms"}
def create_argparser(): # pragma: no cover
import argparse
parser = argparse.ArgumentParser(
description='evaluate a Rasa NLU pipeline with cross validation or on external data')
parser.add_argument('-d', '--data', required=True,
help="file containing training/evaluation data")
parser.add_argument('--mode', required=False, default="evaluation",
help="evaluation|crossvalidation (evaluate pretrained model or train model by crossvalidation)")
parser.add_argument('-c', '--config', required=True,
help="config file")
parser.add_argument('-m', '--model', required=False,
help="path to model (evaluation only)")
parser.add_argument('-f', '--folds', required=False, default=10,
help="number of CV folds (crossvalidation only)")
return parser
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=None,
zmin=1): # pragma: no cover
"""Print and plot the confusion matrix for the intent classification.
Normalization can be applied by setting `normalize=True`.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
zmax = cm.max()
plt.clf()
plt.imshow(cm, interpolation='nearest', cmap=cmap if cmap else plt.cm.Blues,
aspect='auto', norm=LogNorm(vmin=zmin, vmax=zmax))
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
logger.info("Normalized confusion matrix: \n{}".format(cm))
else:
logger.info("Confusion matrix, without normalization: \n{}".format(cm))
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
def log_evaluation_table(test_y, preds): # pragma: no cover
from sklearn import metrics
report = metrics.classification_report(test_y, preds)
precision = metrics.precision_score(test_y, preds, average='weighted')
f1 = metrics.f1_score(test_y, preds, average='weighted')
accuracy = metrics.accuracy_score(test_y, preds)
logger.info("Intent Evaluation Results")
logger.info("F1-Score: {}".format(f1))
logger.info("Precision: {}".format(precision))
logger.info("Accuracy: {}".format(accuracy))
logger.info("Classification report: \n{}".format(report))
def remove_empty_intent_examples(targets, predictions):
"""Removes those examples without intent."""
targets = np.array(targets)
mask = targets != ""
targets = targets[mask]
predictions = np.array(predictions)[mask]
return targets, predictions
def prepare_data(data, cutoff = 5):
"""Remove intent groups with less than cutoff instances."""
data = data.sorted_intent_examples()
logger.info("Raw data intent examples: {}".format(len(data)))
# count intents
list_intents = []
n_intents = []
for intent, group in itertools.groupby(data, lambda e: e.get("intent")):
size = len(list(group))
n_intents.append(size)
list_intents.append(intent)
# only include intents with enough traing data
prep_data = []
good_intents = [list_intents[i] for i, _ in enumerate(list_intents) if n_intents[i] >= cutoff]
for ind, _ in enumerate(data):
if data[ind].get("intent") in good_intents:
prep_data.append(data[ind])
return prep_data
def evaluate_intents(targets, predictions): # pragma: no cover
"""Creates a confusion matrix and summary statistics for intent predictions.
Only considers those examples with a set intent. Others are filtered out.
"""
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import matplotlib.pyplot as plt
# remove empty intent targets
num_examples = len(targets)
targets, predictions = remove_empty_intent_examples(targets, predictions)
logger.info("Intent Evaluation: Only considering those {} examples that "
"have a defined intent out of {} examples".format(targets.size, num_examples))
log_evaluation_table(targets, predictions)
cnf_matrix = confusion_matrix(targets, predictions)
plot_confusion_matrix(cnf_matrix, classes=unique_labels(targets, predictions),
title='Intent Confusion matrix')
plt.show()
def merge_labels(aligned_predictions, extractor=None):
"""Concatenates all labels of the aligned predictions.
Takes the aligned prediction labels which are grouped for each message
and concatenates them.
"""
if extractor:
label_lists = [ap["extractor_labels"][extractor] for ap in aligned_predictions]
else:
label_lists = [ap["target_labels"] for ap in aligned_predictions]
flattened = list(itertools.chain(*label_lists))
return np.array(flattened)
def evaluate_entities(targets, predictions, tokens, extractors): # pragma: no cover
"""Creates summary statistics for each entity extractor.
Logs precision, recall, and F1 per entity type for each extractor.
"""
aligned_predictions = []
for ts, ps, tks in zip(targets, predictions, tokens):
aligned_predictions.append(align_entity_predictions(ts, ps, tks, extractors))
merged_targets = merge_labels(aligned_predictions)
for extractor in extractors:
merged_predictions = merge_labels(aligned_predictions, extractor)
logger.info("Evaluation for entity extractor: {}".format(extractor))
log_evaluation_table(merged_targets, merged_predictions)
def is_token_within_entity(token, entity):
"""Checks if a token is within the boundaries of an entity."""
return determine_intersection(token, entity) == len(token.text)
def does_token_cross_borders(token, entity):
"""Checks if a token crosses the boundaries of an entity."""
num_intersect = determine_intersection(token, entity)
return num_intersect > 0 and num_intersect < len(token.text)
def determine_intersection(token, entity):
"""Calculates how many characters a given token and entity share."""
pos_token = set(range(token.offset, token.end))
pos_entity = set(range(entity["start"], entity["end"]))
return len(pos_token.intersection(pos_entity))
def do_entities_overlap(entities):
"""Checks if entities overlap, i.e. cross each others start and end boundaries.
:param entities: list of entities
:return: boolean
"""
sorted_entities = sorted(entities, key=lambda e: e["start"])
for i in range(len(sorted_entities) - 1):
if sorted_entities[i + 1]["start"] < sorted_entities[i]["end"] \
and sorted_entities[i + 1]["entity"] != sorted_entities[i]["entity"]:
return True
return False
def find_intersecting_entites(token, entities):
"""Finds the entities that intersect with a token.
:param token: a single token
:param entities: entities found by a single extractor
:return: list of entities
"""
candidates = []
for e in entities:
if is_token_within_entity(token, e):
candidates.append(e)
elif does_token_cross_borders(token, e):
candidates.append(e)
logger.debug("Token boundary error for token {}({}, {}) and entity {}".format(
token.text, token.offset, token.end, e))
return candidates
def pick_best_entity_fit(token, candidates):
"""Determines the token label given intersecting entities.
:param token: a single token
:param entities: entities found by a single extractor
:return: entity type
"""
if len(candidates) == 0:
return "O"
elif len(candidates) == 1:
return candidates[0]["entity"]
else:
best_fit = np.argmax([determine_intersection(token, c) for c in candidates])
return candidates[best_fit]["entity"]
def determine_token_labels(token, entities):
"""Determines the token label given entities that do not overlap.
:param token: a single token
:param entities: entities found by a single extractor
:return: entity type
"""
if len(entities) == 0:
return "O"
if do_entities_overlap(entities):
raise ValueError("The possible entities should not overlap")
candidates = find_intersecting_entites(token, entities)
return pick_best_entity_fit(token, candidates)
def align_entity_predictions(targets, predictions, tokens, extractors):
"""Aligns entity predictions to the message tokens.
Determines for every token the true label based on the prediction targets and
the label assigned by each single extractor.
:param targets: list of target entities
:param predictions: list of predicted entities
:param tokens: original message tokens
:param extractors: the entity extractors that should be considered
:return: dictionary containing the true token labels and token labels from the extractors
"""
true_token_labels = []
entities_by_extractors = {extractor: [] for extractor in extractors}
for p in predictions:
entities_by_extractors[p["extractor"]].append(p)
extractor_labels = defaultdict(list)
for t in tokens:
true_token_labels.append(determine_token_labels(t, targets))
for extractor, entities in entities_by_extractors.items():
extractor_labels[extractor].append(determine_token_labels(t, entities))
return {"target_labels": true_token_labels, "extractor_labels": dict(extractor_labels)}
def get_targets(test_data): # pragma: no cover
"""Extracts targets from the test data."""
intent_targets = [e.get("intent", "") for e in test_data.training_examples]
entity_targets = [e.get("entities", []) for e in test_data.training_examples]
return intent_targets, entity_targets
def extract_intent(result): # pragma: no cover
"""Extracts the intent from a parsing result."""
return result['intent'].get('name') if 'intent' in result else None
def extract_entities(result): # pragma: no cover
"""Extracts entities from a parsing result."""
return result['entities'] if 'entities' in result else []
def get_predictions(interpreter, test_data): # pragma: no cover
"""Runs the model for the test set and extracts predictions and tokens."""
intent_predictions, entity_predictions, tokens = [], [], []
for e in test_data.training_examples:
res = interpreter.parse(e.text, only_output_properties=False)
intent_predictions.append(extract_intent(res))
entity_predictions.append(extract_entities(res))
tokens.append(res["tokens"])
return intent_predictions, entity_predictions, tokens
def get_entity_extractors(interpreter):
"""Finds the names of entity extractors used by the interpreter.
Processors are removed since they do not detect the boundaries themselves.
"""
extractors = set([c.name for c in interpreter.pipeline if "entities" in c.provides])
return extractors - entity_processors
def combine_extractor_and_dimension_name(extractor, dim):
"""Joins the duckling extractor name with a dimension's name."""
return "{} ({})".format(extractor, dim)
def get_duckling_dimensions(interpreter, duckling_extractor_name):
"""Gets the activated dimensions of a duckling extractor, or all known dimensions as a fallback."""
component = find_component(interpreter, duckling_extractor_name)
return component.dimensions if component.dimensions else known_duckling_dimensions
def find_component(interpreter, component_name):
"""Finds a component in a pipeline."""
return [c for c in interpreter.pipeline if c.name == component_name][0]
def patch_duckling_extractors(interpreter, extractors): # pragma: no cover
"""Removes the basic duckling extractor from the set of extractors and adds dimension-suffixed ones.
:param interpreter: a rasa nlu interpreter object
:param extractors: a set of entity extractor names used in the interpreter
"""
extractors = extractors.copy()
used_duckling_extractors = duckling_extractors.intersection(extractors)
for duckling_extractor in used_duckling_extractors:
extractors.remove(duckling_extractor)
for dim in get_duckling_dimensions(interpreter, duckling_extractor):
new_extractor_name = combine_extractor_and_dimension_name(duckling_extractor, dim)
extractors.add(new_extractor_name)
return extractors
def patch_duckling_entity(entity):
"""Patches a single entity by combining extractor and dimension name."""
if entity["extractor"] in duckling_extractors:
entity = entity.copy()
entity["extractor"] = combine_extractor_and_dimension_name(entity["extractor"], entity["entity"])
return entity
def patch_duckling_entities(entity_predictions):
"""Adds the duckling dimension as a suffix to the extractor name.
As a result, there is only is one prediction per token per extractor name.
"""
patched_entity_predictions = []
for entities in entity_predictions:
patched_entities = []
for e in entities:
patched_entities.append(patch_duckling_entity(e))
patched_entity_predictions.append(patched_entities)
return patched_entity_predictions
def run_evaluation(config, model_path, component_builder=None): # pragma: no cover
"""Evaluate intent classification and entity extraction."""
# get the metadata config from the package data
test_data = load_data(config['data'], config['language'])
interpreter = Interpreter.load(model_path, config, component_builder)
intent_targets, entity_targets = get_targets(test_data)
intent_predictions, entity_predictions, tokens = get_predictions(interpreter, test_data)
extractors = get_entity_extractors(interpreter)
if extractors.intersection(duckling_extractors):
entity_predictions = patch_duckling_entities(entity_predictions)
extractors = patch_duckling_extractors(interpreter, extractors)
evaluate_intents(intent_targets, intent_predictions)
evaluate_entities(entity_targets, entity_predictions, tokens, extractors)
def run_cv_evaluation(data, n_folds, nlu_config):
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from collections import defaultdict
# type: (List[rasa_nlu.training_data.Message], int, RasaNLUConfig) -> Dict[Text, List[float]]
"""Stratified cross validation on data
:param data: list of rasa_nlu.training_data.Message objects
:param n_folds: integer, number of cv folds
:param nlu_config: nlu config file
:return: dictionary with key, list structure, where each entry in list
corresponds to the relevant result for one fold
"""
trainer = Trainer(nlu_config)
results = defaultdict(list)
y_true = [e.get("intent") for e in data]
skf = StratifiedKFold(n_splits=n_folds, random_state=11, shuffle=True)
counter = 1
logger.info("Evaluation started")
for train_index, test_index in skf.split(data, y_true):
train = [data[i] for i in train_index]
test = [data[i] for i in test_index]
logger.debug("Fold: {}".format(counter))
logger.debug("Training ...")
trainer.train(TrainingData(training_examples=train))
model_directory = trainer.persist("projects/") # Returns the directory the model is stored in
logger.debug("Evaluation ...")
interpreter = Interpreter.load(model_directory, nlu_config)
test_y = [e.get("intent") for e in test]
preds = []
for e in test:
res = interpreter.parse(e.text)
if res.get('intent'):
preds.append(res['intent'].get('name'))
else:
preds.append(None)
# compute fold metrics
results["Accuracy"].append(metrics.accuracy_score(test_y, preds))
results["F1-score"].append(metrics.f1_score(test_y, preds, average='weighted'))
results["Precision"] = metrics.precision_score(test_y, preds, average='weighted')
# increase fold counter
counter += 1
return dict(results)
if __name__ == '__main__': # pragma: no cover
parser = create_argparser()
args = parser.parse_args()
# manual check argument dependency
if args.mode == "crossvalidation":
if args.model is not None:
parser.error("Crossvalidation will train a new model \
- do not specify external model")
nlu_config = RasaNLUConfig(args.config, os.environ, vars(args))
logging.basicConfig(level=nlu_config['log_level'])
if args.mode == "crossvalidation":
data = load_data(args.data)
data = prepare_data(data, cutoff = 5)
results = run_cv_evaluation(data, int(args.folds), nlu_config)
logger.info("CV evaluation (n={})".format(args.folds))
for k,v in results.items():
logger.info("{}: {:.3f} ({:.3f})".format(k, np.mean(v), np.std(v)))
elif args.mode == "evaluation":
run_evaluation(nlu_config, args.model)
logger.info("Finished evaluation")
|
from __future__ import print_function
import warnings
import os.path as op
import copy as cp
from nose.tools import assert_true, assert_raises, assert_equal
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import mne
from mne.datasets import testing
from mne.beamformer import dics, dics_epochs, dics_source_power, tf_dics
from mne.time_frequency import csd_epochs
from mne.externals.six import advance_iterator
from mne.utils import run_tests_if_main
# Note that this is the first test file, this will apply to all subsequent
# tests in a full nosetest:
warnings.simplefilter("always") # ensure we can verify expected warnings
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
label = 'Aud-lh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
def read_forward_solution_meg(*args, **kwargs):
fwd = mne.read_forward_solution(*args, **kwargs)
return mne.pick_types_forward(fwd, meg=True, eeg=False)
def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
"""Read in data used in tests
"""
label = mne.read_label(fname_label)
events = mne.read_events(fname_event)[:10]
raw = mne.io.read_raw_fif(fname_raw, preload=False)
raw.add_proj([], remove_existing=True) # we'll subselect so remove proj
forward = mne.read_forward_solution(fname_fwd)
if read_all_forward:
forward_surf_ori = read_forward_solution_meg(fname_fwd, surf_ori=True)
forward_fixed = read_forward_solution_meg(fname_fwd, force_fixed=True,
surf_ori=True)
forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True)
else:
forward_surf_ori = None
forward_fixed = None
forward_vol = None
event_id, tmin, tmax = 1, tmin, tmax
# Setup for reading the raw data
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# Set up pick list: MEG - bad channels
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False,
stim=True, eog=True, exclude='bads',
selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
epochs.resample(200, npad=0, n_jobs=2)
evoked = epochs.average()
# Computing the data and noise cross-spectral density matrices
if compute_csds:
data_csd = csd_epochs(epochs, mode='multitaper', tmin=0.045,
tmax=None, fmin=8, fmax=12,
mt_bandwidth=72.72)
noise_csd = csd_epochs(epochs, mode='multitaper', tmin=None,
tmax=0.0, fmin=8, fmax=12,
mt_bandwidth=72.72)
else:
data_csd, noise_csd = None, None
return raw, epochs, evoked, data_csd, noise_csd, label, forward,\
forward_surf_ori, forward_fixed, forward_vol
@testing.requires_testing_data
def test_dics():
"""Test DICS with evoked data and single trials
"""
raw, epochs, evoked, data_csd, noise_csd, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
stc = dics(evoked, forward, noise_csd=noise_csd, data_csd=data_csd,
label=label)
stc.crop(0, None)
stc_pow = np.sum(stc.data, axis=1)
idx = np.argmax(stc_pow)
max_stc = stc.data[idx]
tmax = stc.times[np.argmax(max_stc)]
# Incorrect due to limited number of epochs
assert_true(0.04 < tmax < 0.05)
assert_true(10 < np.max(max_stc) < 13)
# Test picking normal orientation
stc_normal = dics(evoked, forward_surf_ori, noise_csd, data_csd,
pick_ori="normal", label=label)
stc_normal.crop(0, None)
# The amplitude of normal orientation results should always be smaller than
# free orientation results
assert_true((np.abs(stc_normal.data) <= stc.data).all())
# Test if fixed forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_epochs, epochs, forward_fixed, noise_csd,
data_csd, pick_ori="normal")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
assert_raises(ValueError, dics_epochs, epochs, forward, noise_csd,
data_csd, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_epochs, epochs, forward_vol, noise_csd,
data_csd, pick_ori="normal")
# Now test single trial using fixed orientation forward solution
# so we can compare it to the evoked solution
stcs = dics_epochs(epochs, forward_fixed, noise_csd, data_csd, reg=0.01,
label=label)
# Testing returning of generator
stcs_ = dics_epochs(epochs, forward_fixed, noise_csd, data_csd, reg=0.01,
return_generator=True, label=label)
assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)
# Test whether correct number of trials was returned
epochs.drop_bad()
assert_true(len(epochs.events) == len(stcs))
# Average the single trial estimates
stc_avg = np.zeros_like(stc.data)
for this_stc in stcs:
stc_avg += this_stc.crop(0, None).data
stc_avg /= len(stcs)
idx = np.argmax(np.max(stc_avg, axis=1))
max_stc = stc_avg[idx]
tmax = stc.times[np.argmax(max_stc)]
assert_true(0.045 < tmax < 0.06) # incorrect due to limited # of epochs
assert_true(12 < np.max(max_stc) < 18.5)
@testing.requires_testing_data
def test_dics_source_power():
"""Test DICS source power computation
"""
raw, epochs, evoked, data_csd, noise_csd, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
stc_source_power = dics_source_power(epochs.info, forward, noise_csd,
data_csd, label=label)
max_source_idx = np.argmax(stc_source_power.data)
max_source_power = np.max(stc_source_power.data)
# TODO: Maybe these could be more directly compared to dics() results?
assert_true(max_source_idx == 0)
assert_true(0.5 < max_source_power < 1.15)
# Test picking normal orientation and using a list of CSD matrices
stc_normal = dics_source_power(epochs.info, forward_surf_ori,
[noise_csd] * 2, [data_csd] * 2,
pick_ori="normal", label=label)
assert_true(stc_normal.data.shape == (stc_source_power.data.shape[0], 2))
# The normal orientation results should always be smaller than free
# orientation results
assert_true((np.abs(stc_normal.data[:, 0]) <=
stc_source_power.data[:, 0]).all())
# Test if fixed forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_source_power, raw.info, forward_fixed,
noise_csd, data_csd, pick_ori="normal")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
assert_raises(ValueError, dics_source_power, raw.info, forward, noise_csd,
data_csd, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_source_power, epochs.info, forward_vol,
noise_csd, data_csd, pick_ori="normal")
# Test detection of different number of CSD matrices provided
assert_raises(ValueError, dics_source_power, epochs.info, forward,
[noise_csd] * 2, [data_csd] * 3)
# Test detection of different frequencies in noise and data CSD objects
noise_csd.frequencies = [1, 2]
data_csd.frequencies = [1, 2, 3]
assert_raises(ValueError, dics_source_power, epochs.info, forward,
noise_csd, data_csd)
# Test detection of uneven frequency spacing
data_csds = [cp.deepcopy(data_csd) for i in range(3)]
frequencies = [1, 3, 4]
for freq, data_csd in zip(frequencies, data_csds):
data_csd.frequencies = [freq]
noise_csds = data_csds
with warnings.catch_warnings(record=True) as w:
dics_source_power(epochs.info, forward, noise_csds, data_csds)
assert_equal(len(w), 1)
@testing.requires_testing_data
def test_tf_dics():
"""Test TF beamforming based on DICS
"""
tmin, tmax, tstep = -0.2, 0.2, 0.1
raw, epochs, _, _, _, label, forward, _, _, _ =\
_get_data(tmin, tmax, read_all_forward=False, compute_csds=False)
freq_bins = [(4, 20), (30, 55)]
win_lengths = [0.2, 0.2]
reg = 0.001
noise_csds = []
for freq_bin, win_length in zip(freq_bins, win_lengths):
noise_csd = csd_epochs(epochs, mode='fourier',
fmin=freq_bin[0], fmax=freq_bin[1],
fsum=True, tmin=tmin,
tmax=tmin + win_length)
noise_csds.append(noise_csd)
stcs = tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
freq_bins, reg=reg, label=label)
assert_true(len(stcs) == len(freq_bins))
assert_true(stcs[0].shape[1] == 4)
# Manually calculating source power in several time windows to compare
# results and test overlapping
source_power = []
time_windows = [(-0.1, 0.1), (0.0, 0.2)]
for time_window in time_windows:
data_csd = csd_epochs(epochs, mode='fourier',
fmin=freq_bins[0][0],
fmax=freq_bins[0][1], fsum=True,
tmin=time_window[0], tmax=time_window[1])
noise_csd = csd_epochs(epochs, mode='fourier',
fmin=freq_bins[0][0],
fmax=freq_bins[0][1], fsum=True,
tmin=-0.2, tmax=0.0)
data_csd.data /= data_csd.n_fft
noise_csd.data /= noise_csd.n_fft
stc_source_power = dics_source_power(epochs.info, forward, noise_csd,
data_csd, reg=reg, label=label)
source_power.append(stc_source_power.data)
# Averaging all time windows that overlap the time period 0 to 100 ms
source_power = np.mean(source_power, axis=0)
# Selecting the first frequency bin in tf_dics results
stc = stcs[0]
# Comparing tf_dics results with dics_source_power results
assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])
# Test if using unsupported max-power orientation is detected
assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
tstep, win_lengths, freq_bins=freq_bins,
pick_ori='max-power')
# Test if incorrect number of noise CSDs is detected
assert_raises(ValueError, tf_dics, epochs, forward, [noise_csds[0]], tmin,
tmax, tstep, win_lengths, freq_bins=freq_bins)
# Test if freq_bins and win_lengths incompatibility is detected
assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins)
# Test if time step exceeding window lengths is detected
assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins)
# Test if incorrect number of mt_bandwidths is detected
assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
tstep, win_lengths, freq_bins, mode='multitaper',
mt_bandwidths=[20])
# Pass only one epoch to test if subtracting evoked responses yields zeros
stcs = tf_dics(epochs[0], forward, noise_csds, tmin, tmax, tstep,
win_lengths, freq_bins, subtract_evoked=True, reg=reg,
label=label)
assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
run_tests_if_main()
|
import copy
def getValidMove(p, x, y, state):
m = []
left = 0
if p == "Star":
if x == 0:
return m
if x == 1 and y != 0 and state[x-1][y-1][0] != 'C':
m.append([x-1,y-1])
if x == 1 and state[x-1][y+1][0] != 'C':
m.append([x-1,y+1])
if x == 2 and state[x-1][y-1] == '0':
m.append([x-1,y-1])
left = 1
elif x == 2 and y-2 >= 0 and state[x-1][y-1][0] == 'C' and state[x-2][y-2][0] != 'C':
m.append([x-2,y-2])
if x == 2 and y < 7 and state[x-1][y+1] == '0':
m.append([x-1,y+1])
elif x == 2 and y+2 <= 7 and state[x-1][y+1] == 'C' and state[x-2][y+2][0] != 'C' and left != 1:
m.append([x-2,y+2])
elif x == 2 and y+2 <= 7 and state[x-1][y+1] == 'C' and state[x-2][y+2][0] != 'C' and left == 1:
m.pop()
m.append([x-2,y+2])
m.append([x-1,y-1])
if x > 2 and y-1 >= 0 and state[x-1][y-1] == '0':
m.append([x-1,y-1])
left = 1
elif x > 2 and y-2 >= 0 and state[x-1][y-1][0] == 'C' and state[x-2][y-2] == '0':
m.append([x-2,y-2])
if x > 2 and y+1 <= 7 and state[x-1][y+1] == '0':
m.append([x-1,y+1])
elif x > 2 and y+2 <= 7 and state[x-1][y+1][0] == 'C' and state[x-2][y+2] == '0' and left != 1:
m.append([x-2,y+2])
elif x > 2 and y+2 <= 7 and state[x-1][y+1][0] == 'C' and state[x-2][y+2] == '0' and left == 1:
m.pop()
m.append([x-2,y+2])
m.append([x-1,y-1])
elif p == "Circle":
if x == 7:
return m
if x == 6 and state[x+1][y-1][0] != 'S':
m.append([x+1,y-1])
if x == 6 and y != 7 and state[x+1][y+1][0] != 'S':
m.append([x+1,y+1])
if x == 5 and y != 0 and state[x+1][y-1] == '0':
m.append([x+1,y-1])
elif x == 5 and y-2 >= 0 and state[x+1][y-1][0] == 'S' and state[x+2][y-2][0] != 'S':
m.append([x+2,y-2])
left = 1
if x == 5 and state[x+1][y+1] == '0' and left != 1:
m.append([x+1,y+1])
elif x == 5 and state[x+1][y+1] == '0' and left == 1:
m.pop()
m.append([x+1,y+1])
m.append([x+2,y-2])
elif x == 5 and y+2 <= 7 and state[x+1][y+1] == 'S' and state[x+2][y+2][0] != 'S':
m.append([x+2,y+2])
if x < 5 and y-1 >= 0 and state[x+1][y-1] == '0':
m.append([x+1,y-1])
elif x < 5 and y-2 >= 0 and state[x+1][y-1][0] == 'S' and state[x+2][y-2] == '0':
m.append([x+2,y-2])
left = 1
if x < 5 and y+1 <= 7 and state[x+1][y+1] == '0' and left != 1:
m.append([x+1,y+1])
elif x < 5 and y+1 <= 7 and state[x+1][y+1] == '0' and left == 1:
m.pop()
m.append([x+1,y+1])
m.append([x+2,y-2])
elif x < 5 and y+2 <= 7 and state[x+1][y+1][0] == 'S' and state[x+2][y+2] == '0':
m.append([x+2,y+2])
return m
def getAllValidMove(state, player):
legalMove = []
initPosition = []
finalPosition = []
for i in range(0,8):
for j in range(0,8):
if state[i][j][0] == player[0]:
initPosition.append([i,j])
for i in range(0, len(initPosition)):
finalPosition.append(getValidMove(player, initPosition[i][0], initPosition[i][1], state))
temp = []
if(len(finalPosition[i]) != 0):
for j in range(0, len(finalPosition[i])):
temp.append(initPosition[i])
temp.append(finalPosition[i][j])
legalMove.append(temp)
temp = []
return legalMove
def getNextState(state, move, player):
nextState = copy.deepcopy(state)
nextState[move[0][0]][move[0][1]] = '0'
if nextState[move[1][0]][move[1][1]] == '0':
nextState[move[1][0]][move[1][1]] = player[0] + '1'
else:
nextState[move[1][0]][move[1][1]] = player[0] + str(int(nextState[move[1][0]][move[1][1]][1])+1)
if abs(move[1][0] - move[0][0]) > 1:
x = (move[0][0] + move[1][0])/2
y = (move[0][1] + move[1][1])/2
nextState[x][y] = '0'
return nextState
def isTerminal(state, numPass, depth):
numS = 0
numC = 0
for i in range(0,8):
for j in range(0,8):
if state[i][j][0] == 'S':
numS = numS + 1
elif state[i][j][0] == 'C':
numC = numC + 1
if numPass == 2:
return True
elif depth == 0:
return True
elif numS == 0 or numC == 0:
return True
else:
return False
def getUtility(state, player, weight):
utility = 0
if player == "Star":
opp = 'C'
for i in range(0,8):
for j in range(0,8):
if state[i][j][0] == player[0]:
utility = utility + int(state[i][j][1])*weight[7-i]
elif state[i][j][0] == opp:
utility = utility - int(state[i][j][1])*weight[i]
elif player == "Circle":
opp = 'S'
for i in range(0,8):
for j in range(0,8):
if state[i][j][0] == player[0]:
utility = utility + int(state[i][j][1])*weight[i]
elif state[i][j][0] == opp:
utility = utility - int(state[i][j][1])*weight[7-i]
return utility
def printPosition(move):
if move[0] == 0:
po = "H"
elif move[0] == 1:
po = "G"
elif move[0] == 2:
po = "F"
elif move[0] == 3:
po = "E"
elif move[0] == 4:
po = "D"
elif move[0] == 5:
po = "C"
elif move[0] == 6:
po = "B"
elif move[0] == 7:
po = "A"
po = po + str(move[1]+1)
return po
numNodes = []
def minPlay(state, player, opponent, depth, weight, numPass, numNodes, firstDepth):
numNodes.append([0])
if isTerminal(state, numPass, depth):
return getUtility(state, player, weight)
minValue = float('inf')
nextMoves = getAllValidMove(state, opponent)
if len(nextMoves) == 0:
numPass = numPass + 1
depth = depth -1
minValue = min(minValue, maxPlay(state, player, opponent, depth, weight, numPass, numNodes, firstDepth))
else:
numPass = 0
depth = depth - 1
for s in range(0, len(nextMoves)):
childState = getNextState(state, nextMoves[s], opponent)
minValue = min(minValue, maxPlay(childState, player, opponent, depth, weight, numPass, numNodes, firstDepth))
return minValue
maxNextMove = []
def maxPlay(state, player, opponent, depth, weight, numPass, numNodes, firstDepth):
numNodes.append([0])
if isTerminal(state, numPass, depth):
return getUtility(state, player, weight)
maxValue = float('-inf')
nextMoves = getAllValidMove(state, player)
if len(nextMoves) == 0:
numPass = numPass + 1
depth = depth - 1
minNodeValue = minPlay(state, player, opponent, depth, weight, numPass, numNodes, firstDepth)
maxValue = max(maxValue, minNodeValue)
else:
numPass = 0
depth = depth - 1
for s in range(0, len(nextMoves)):
childState = getNextState(state, nextMoves[s], player)
minNodeValue = minPlay(childState, player, opponent, depth, weight, numPass, numNodes, firstDepth)
if depth == firstDepth and minNodeValue > maxValue:
maxNextMove.append(nextMoves[s])
maxValue = max(maxValue, minNodeValue)
return maxValue
def minimaxSearch(p, depth, state, rowVal):
firstDepth = depth - 1
if p == "Star":
op = "Circle"
elif p == "Circle":
op = "Star"
farsighted_Utilty = maxPlay(state, p, op, depth, rowVal, 0, numNodes, firstDepth)
if len(maxNextMove) == 0:
nextState = state
else:
nextState = getNextState(state, maxNextMove[len(maxNextMove)-1], p)
myopic_Utility = getUtility(nextState, p, rowVal)
return maxNextMove, myopic_Utility, farsighted_Utilty, len(numNodes)
def maxNode(state, player, opponent, depth, weight, numPass, numNodes, alpha, beta):
numNodes.append([0])
if isTerminal(state, numPass, depth):
return getUtility(state, player, weight)
maxValue = float('-inf')
nextMoves = getAllValidMove(state, player)
if len(nextMoves) == 0:
numPass = numPass + 1
depth = depth - 1
minNodeValue = minNode(state, player, opponent, depth, weight, numPass, numNodes, alpha, beta)
maxValue = max(maxValue, minNodeValue)
if maxValue >= beta:
return maxValue
alpha = max(alpha, maxValue)
else:
numPass = 0
depth = depth - 1
for s in range(0, len(nextMoves)):
childState = getNextState(state, nextMoves[s], player)
minNodeValue = minNode(childState, player, opponent, depth, weight, numPass, numNodes, alpha, beta)
maxValue = max(maxValue, minNodeValue)
if maxValue >= beta:
return maxValue
alpha = max(alpha, maxValue)
return maxValue
def minNode(state, player, opponent, depth, weight, numPass, numNodes, alpha, beta):
numNodes.append([0])
if isTerminal(state, numPass, depth):
return getUtility(state, player, weight)
minValue = float('inf')
nextMoves = getAllValidMove(state, opponent)
if len(nextMoves) == 0:
numPass = numPass + 1
depth = depth -1
minValue = min(minValue, maxNode(state, player, opponent, depth, weight, numPass, numNodes, alpha, beta))
if minValue <= alpha:
return minValue
beta = min(beta, minValue)
else:
numPass = 0
depth = depth - 1
for s in range(0, len(nextMoves)):
childState = getNextState(state, nextMoves[s], opponent)
minValue = min(minValue, maxNode(childState, player, opponent, depth, weight, numPass, numNodes, alpha, beta))
if minValue <= alpha:
return minValue
beta = min(beta, minValue)
return minValue
def alphabetaSearch(p, depth, state, rowVal):
alpha = float('-inf')
beta = float('inf')
maxUtility = float('-inf')
depth = depth - 1
numPass = 0
if p == "Star":
op = "Circle"
elif p == "Circle":
op = "Star"
nextMoves = getAllValidMove(state, p)
if len(nextMoves) == 0:
numPass = numPass + 1
nextMoves.append([-1,-1])
nextMoveState = []
nextMove = []
for m in nextMoves:
if m == [-1,-1]:
childState = state
else:
childState = getNextState(state, m, p)
utility = minNode(childState, p, op, depth, rowVal, numPass, numNodes, alpha, beta)
if utility > maxUtility:
maxUtility = utility
alpha = maxUtility
nextMove.append(m)
if nextMove == [[-1,-1]]:
nextState = state
nextMove = []
else:
nextState = getNextState(state, nextMove[0], p)
myopic_Utility = getUtility(nextState, p, rowVal)
return nextMove, myopic_Utility, maxUtility, len(numNodes)+1
def main():
f = open("input.txt", "r")
content = f.read().splitlines()
f.close()
player = content[0]
algorithm = content[1]
depthLimit = int(content[2])
initState = []
for x in range(3,11):
inputRow = content[x]
curRow = []
head = 0
for y in range(0,len(content[x])):
if content[x][y] == ',':
curRow.append(content[x][head:y])
head = y+1
curRow.append(content[x][head:])
initState.append(curRow)
rowValue = []
h = 0
for i in range(0,len(content[11])):
if content[11][i] == ',':
rowValue.append(int(content[11][h:i]))
h = i+1
rowValue.append(int(content[11][h:]))
output = []
if algorithm == "MINIMAX":
output = minimaxSearch(player, depthLimit, initState, rowValue)
elif algorithm == "ALPHABETA":
output = alphabetaSearch(player, depthLimit, initState, rowValue)
if len(output[0]) == 0:
next_move = "pass"
else:
next_move = printPosition(output[0][len(maxNextMove)-1][0])
next_move = next_move + "-" + printPosition(output[0][len(maxNextMove)-1][1])
f = open("output.txt", "w")
f.write(next_move+"\n")
f.write(str(output[1])+"\n")
f.write(str(output[2])+"\n")
f.write(str(output[3]))
f.close()
main()
|
import numpy as np
from .transforms import *
class Dataset(object):
def __init__(self, train, test, mode='Train'):
super(Dataset, self).__init__()
self.train = train
self.test = test
self.mode = mode
if self.mode == 'Train':
self.data = self.train
elif self.mode == 'Test':
self.data = self.test
def return_data(self):
return self.data
def __getitem__(self, idx):
img, label = self.data[idx]
img = to_input_array(img)
return img, label
def __len__(self):
return len(self.data)
|
# -*- coding:utf-8 -*-
import numpy as np
import transformers
import torch
from model import ResidualModel
import numpy as np
import json
import os
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def model_init(model_path, device, length_num):
ckpt = torch.load(model_path, map_location='cpu')
residual_model = ResidualModel(device=device, inp_size=length_num*768, output_size=768,\
num_residual_layers=2, dropout=0.5)
residual_model.load_state_dict(ckpt['model'])
residual_model = residual_model.to(device)
residual_model.eval()
return residual_model
def beam_search(sentence_list, device, target_num=10, beam_size=5):
# target_num 想要连续生成的句子数
# 读取数据库
txt_path = './data/txt'
vec_path = './data/vec'
files = os.listdir(txt_path)
sentence_base = []
vecs = np.zeros((1, 768))
print('begin to read database!')
for file in files:
file_path = os.path.join(txt_path, file)
with open(file_path, 'r', encoding='utf-8') as load_f:
load_dict = json.load(load_f)
t_sentences = load_dict['content']
sentence_base += t_sentences
v_path = os.path.join(vec_path, file)
mat = np.loadtxt(v_path)
mat = mat.reshape(-1, 768)
vecs = np.concatenate((vecs, mat), axis=0)
vecs = vecs[1:]
print('all data base has been read.')
print('sentence number: ', len(vecs), len(sentence_base))
# 载入bert模型
print('Initialize the bert model...')
bert_model = transformers.BertModel.from_pretrained('./bert-base-chinese')
tokenizer = transformers.BertTokenizer.from_pretrained('./bert-base-chinese')
# 预测模型载入
model_path = './model/CKPT'
residual_model = model_init(model_path, device, length_num=4)
print('predict model loaded.')
# beam search, 存储格式为[[sequence_list], socre]
beam_candidates = [[sentence_list, 1.0]]
for n in range(target_num):
# 存储所有的候选 大小为 k * k
all_candidates = list()
#print(len(beam_candidates))
for i in range(len(beam_candidates)):
sequence, score = beam_candidates[i]
# 取最新的4句话用于检索下一句
seq = sequence[-4:]
#print(seq)
# 将4句话encode成为向量
h = torch.tensor([[1.]])
for sentence in sentence_list:
input_ids = torch.tensor([tokenizer.encode(sentence)])
h_t = bert_model(input_ids)[0][:,0,:]
h = torch.cat((h, h_t), dim=1)
h = h[0, 1:].view(1, -1).to(device)
# 模型做出预测
res = residual_model.work(h)
res_vec = res.cpu().detach().numpy()
scores = np.sum(res_vec * vecs, axis=1) / np.linalg.norm(res_vec, axis=1) / np.linalg.norm(vecs, axis=1)
idx = np.argsort(scores)[::-1]
# 将得分最高的 k 句话存入
for j in range(beam_size):
#print(sentence_base[idx[j]])
candidate = [sequence+ [sentence_base[idx[j]]], score * scores[idx[j]]]
all_candidates.append(candidate)
# 所有候选值 k*k 进行排序,选择前 k 个
#print(len(all_candidates))
ordered = sorted(all_candidates, key=lambda tup:tup[1])
beam_candidates = ordered[:beam_size]
return beam_candidates
s=["随着各大陆资源的枯竭和环境的恶化,世界把目光投向南极洲。",
"南极突然崛起的两大强国在世界政治格局中取得了与他们在足球场上同样的地位,使得南极条约成为一纸空文。",
"但人类的理智在另一方面取得了胜利,全球彻底销毁核武器的最后进程开始了。",
"随着全球无核化的实现,人类对南极大陆的争夺变得安全了一些。",]
res = beam_search(s, device, target_num=5, beam_size=3)
print(res) |
"""
Given an array of non-negative integers, you are
initially positioned at the first index of the array.
Each element in the array represents your maximum jump
length at that position.
Determine if you are able to reach the last index.
Example 1:
Input: [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3
steps to the last index.
Example 2:
Input: [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3
no matter what. Its maximum jump length is 0, which
makes it impossible to reach the last index.
"""
#valley peak approach
def can_jump(nums):
reach = 0
for i in range(len(nums)):
if reach < i:
return False
reach = max(reach, i + nums[i])
return True |
#encoding=utf-8
htmls=[]
#获取图片所在网页的html地址
class getHtml():
def __init__(self,target_url):
self.target_url=target_url
def html(self,start_page,page_num):
for i in xrange(start_page,page_num+1):
h=self.target_url % i
global htmls
htmls.append(h)
#获取图片地址
import requests,re
pic_urls=[]
class url():
def getUrl(self,pattern_words):
for url in htmls:
r=requests.get(url)
pics=re.findall(pattern_words,r.text)
for pic in pics:
pic='http://pic.netbian.com/tupian/'+pic+'.html'
global pic_urls
pic_urls.append(str(pic))
|
# score_file = open("score.txt","w",encoding="utf8")
# print("수학 :0",file = score_file)
# print("영어 :50", file = score_file)
# score_file.close()
# score_file = open("score.txt","r",encoding="utf8")
# print(score_file.read())
# score_file.close()
# score_file = open("score.txt","r",encoding="utf8")
# print(score_file.readline(),end='')
# print(score_file.readline())
# import pickle
# profile_file = open("profile.pickle","wb")
# profile = {"이름" : "우승일","나이":28 ,"취미":["운동","코딩"]}
# print(profile)
# pickle.dump(profile,profile_file)
# profile_file.close()
# profile_file = open("profile.pickle","rb")
# profile = pickle.load(profile_file)
# print(profile)
# profile_file.close()
#import pickle
# with open('profile.pickle',"rb") as profile_file:
# print(pickle.load(profile_file))
# with open('study.txt','w',encoding='utf8') as study_file:
# study_file.write("파이썬을 열심히 공부하고 있습니다.")
# with open('study.txt','r',encoding='utf8') as study_file:
# print(study_file.read())
# import inspect
# import random
# print(inspect.getfile(random))
# from bs4 import BeautifulSoup
# soup = BeautifulSoup("<p>Some<b>bad<i>HTML")
# print(soup.prettify())
# dir : 어떤 객체를 넘겨줬을 때 그 객체가 어떤 변수와 함수를 가지고 있는지 표시
# print(dir())
# import random
# print(dir(random))
# glob : 경로 내의 폴더 / 파일 목록 조회(윈도우 dir)
# import glob
# print(glob.glob("*.py")) # 확장자가 py 인 모든 파일
# os : 운영체제 에서 제공하는 기본 기능
# import os
# print(os.getcwd()) # 현재 디렉토리 표시
# time : 시간 관련 함수
# import time
# print(time.localtime())
# print(time.strftime("%Y-%m-%d %H:%M:%S"))
# import datetime
# # print("오늘 날짜는 ",datetime.date.today())
# # timedelta : 두 날짜 사이의 간격
# today = datetime.date.today()
# print("오늘 날짜는", today)
# td = datetime.timedelta(days=100)
# print("우리가 만난지 100일은 ", today + td)
# 모듈 사용 예제
import byme
byme.sign() |
#!/usr/bin/env python
# coding=utf-8
try:
import cupy as np
except ImportError:
import numpy as np
from nda.optimizers import Optimizer
from nda.optimizers import compressor
class CHOCO_SGD(Optimizer):
'''Decentralized Stochastic Optimization and Gossip Algorithms with Compressed Communication'''
def __init__(self, p, eta=0.1, gamma=0.1, batch_size=1, compressor_type=None, compressor_param=None, **kwargs):
super().__init__(p, **kwargs)
self.eta = eta
self.gamma = gamma
self.batch_size = batch_size
# Compressor
self.compressor_param = compressor_param
if compressor_type == 'top':
self.Q = compressor.top
elif compressor_type == 'random':
self.Q = compressor.random
elif compressor_type == 'gsgd':
self.Q = compressor.gsgd
else:
self.Q = compressor.identity
self.x_hat = np.zeros_like(self.x)
self.W_shifted = self.W - np.eye(self.p.n_agent)
def update(self):
self.comm_rounds += 1
samples = np.random.randint(0, self.p.m, (self.p.n_agent, self.batch_size))
grad = self.grad(self.x, j=samples)
self.x -= self.eta * grad
self.x_hat += self.Q(self.x - self.x_hat, self.compressor_param)
self.x += self.gamma * self.x_hat.dot(self.W_shifted)
|
import os
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.Collections import Collections
from robot.api import logger
import psutil
import shutil
import csv
from os import listdir, rmdir, remove
import json
import sys
import re
from sys import platform as _platform
import tempfile
DOWNLOADS_PATH = r'C:\Users\Administrator\Downloads'
GAPPS_BUILD_PATH = r'C:\WFDevGCAL\automation-gapps\builds'
GAPPS_PATH = r'C:\WFDevGCAL\automation-gapps\lib'
WIN_ATF_CONFIG_PATH = "C:\\ATF_ROBOT\\run\\GSuite\\configs\\"
WIN_ATF_LOG_PATH = "C:\\ATF_ROBOT\\run\\GSuite\\logs\\"
MAC_ATF_CONFIG_PATH = "populate_me"
LINUX_ATF_CONFIG_PATH = "populate_me"
OS_CONFIG_PATH = ""
auto_project_list = ['gsuite','boss','teamweb','teamios','mnm']
# mvilleda - Automation Test
class AutomationConfig(object):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
# ROBOT_LIBRARY_VERSION = VERSION
def __init__(self):
global OS_CONFIG_PATH
# try:
self._robot_runmodule_dir = BuiltIn().get_variable_value('${ROBOT_RUNMODULE_DIR}')
if self._robot_runmodule_dir == "None":
raise Exception("var: _robot_runmodule_dir is %s " % self._robot_runmodule_dir)
logger.warn("Loading %s " % self._robot_runmodule_dir)
# global ROBOT_RUNMODULE_DIR
print "DEBUG: In AutomationConfig init ***"
if _platform == "linux" or _platform == "linux2":
# linux
OS_CONFIG_PATH = LINUX_ATF_CONFIG_PATH
elif _platform == "darwin":
OS_CONFIG_PATH = MAC_ATF_CONFIG_PATH
# OS X
elif _platform == "win32":
# Windows...
OS_CONFIG_PATH = WIN_ATF_CONFIG_PATH
logger.warn("Loading %s robot parameters from %s\\%s: STARTED" % (_platform,OS_CONFIG_PATH,self._robot_runmodule_dir))
self.load_robot_automation_configs()
logger.warn("Loading %s robot parameters from %s\\%s: COMPLETE" % (_platform,OS_CONFIG_PATH,self._robot_runmodule_dir))
def load_phone_users(self):
"""
Load Phone user attributes
Make all users visible to robot
"""
# self.OS_CONFIG_PATH
self.available_phones = 0
row_index = 0
user_index = 0
users_list = []
user_dict = {}
# TODO remove hard code path
logger.info("Loading phone users cfg file %s " % self.user_cfg_file)
reader = csv.DictReader(open(self.user_cfg_file))
for row in reader:
for column, value in iter(row.items()):
user_dict.setdefault(column, []).append(value)
for is_avail in user_dict['is_available']:
if is_avail.lower() == "true":
self.available_phones += 1
users_list.append(row_index)
row_index += 1
for i in users_list:
tmp_user_dict = {}
user_index += 1
varname = '${user0%s}' % user_index
for key in user_dict.keys():
tmp_user_dict[key] = user_dict[key][i]
self.create_suite_variable(varname, tmp_user_dict)
logger.warn("Created user \"%s\" as dict %s" % (varname, tmp_user_dict))
def load_robot_automation_configs(self):
global OS_CONFIG_PATH
auto_project = BuiltIn().get_variable_value("${AUTOMATION_PROJECT}").lower()
logger.warn("project: %s" % auto_project)
for project in auto_project.split(','):
if project not in auto_project_list:
logger.error("project \"%s\" not in automation project list %s" % (project,auto_project_list))
sys.exit()
if "gsuite" in auto_project:
logger.warn("Loading GSUITE configs...")
self.load_testbed_config()
self.load_shoretel_users()
self.load_google_contacts()
is_mt = BuiltIn().get_variable_value('${is_runtype_mt}')
if is_mt == "true":
logger.warn("Skipping HQ2 user creation for MT run")
else:
self.load_shoretel_hq2_users()
def load_testbed_config(self):
"""Load testbed attributes"""
# Make all users visible to robot
global OS_CONFIG_PATH
filename = ""
is_mt = BuiltIn().get_variable_value('${is_runtype_mt}')
if is_mt == "true":
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\mt_testbed.cfg'
else:
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\testbed.cfg'
logger.info("Loading testbed configuration from config file %s " % filename)
testbedDict = {}
numItems = 0
with open(filename, 'r') as f:
for line in f:
line = line.rstrip() #removes trailing whitespace and '\n' chars
if "=" not in line: continue #skips blanks and comments w/o =
if line.startswith("#"): continue #skips comments which contain =
numItems = numItems+1
k, v = line.split("=", 1)
# logger.warn("num %s numItems:: col %s val %s" % (numItems,k,v))
testbedDict[k.strip()] = v.strip()
logger.info('testbed keys %s' % testbedDict.keys())
numTestbeds = 1
users_list = []
for i in range(0, int(numTestbeds)):
tbNum = i
#TEMPLATE_START
#TEMPLATE_REPLACE1
#TEMPLATE_REPLACE2
#TEMPLATE_END
# logger.warn("TESTBED %s" % testbed_factory)
testbedNum = tbNum + 1
varname = '${testbed0%s}' % testbedNum
BuiltIn().set_suite_variable(varname, testbed_factory)
def load_shoretel_users(self):
"""Load shoretel user attributes"""
# Make all users visible to robot
global OS_CONFIG_PATH
filename = ""
is_mt = BuiltIn().get_variable_value('${is_runtype_mt}')
if is_mt == "true":
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\pphone_mt_userInfo.csv'
else:
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\pphone_st_hq1_userInfo.csv'
logger.info("Loading hq users from config file %s " % filename)
with open(filename) as f_in:
lines = (line.rstrip() for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
numPhones = -1
for line in lines:
numPhones += 1
print numPhones
reader = csv.DictReader(open(filename))
userDict = {}
for row in reader:
for column, value in row.iteritems():
userDict.setdefault(column, []).append(value)
users_list = []
for i in range(0, int(numPhones)):
userNum = i
user_name = 'first_name=%s' % userDict["user_name"][userNum]
user_type = 'user_type=%s' % userDict["user_type"][userNum]
first_name = 'first_name=%s' % userDict["first_name"][userNum]
middle_name = 'middle_name=%s' % userDict["middle_name"][userNum]
last_name = 'last_name=%s' % userDict["last_name"][userNum]
extension = 'extension=%s' % userDict["extension"][userNum]
client_id = 'client_id=%s' % userDict["client_id"][userNum]
client_email = 'client_email=%s' % userDict["client_email"][userNum]
client_password = 'client_password=%s' % userDict["client_password"][userNum]
ip = 'ip=%s' % userDict["ip"][userNum]
mac = 'mac=%s' % userDict["mac"][userNum]
phone_type = 'phone_type=%s' % userDict["phone_model"][userNum]
server = 'server=%s' % userDict["server"][userNum]
home = 'home=%s' % userDict["home"][userNum]
work = 'work=%s' % userDict["work"][userNum]
fax = 'fax=%s' % userDict["fax"][userNum]
mobile = 'mobile=%s' % userDict["mobile"][userNum]
pager = 'pager=%s' % userDict["pager"][userNum]
sip_did = 'sip_did=%s' % userDict["sip_trunk_did"][userNum]
pri_dnis = 'pri_dnis=%s' % userDict["pri_trunk_dnis"][userNum]
vm_password = 'vm_password=%s' % userDict["vm_password"][userNum]
sip_password = 'sip_password=%s' % userDict["sip_password"][userNum]
tenant_id = 'tenant_id=%s' % userDict["tenant_id"][userNum]
robot_address = 'robot_address=%s' % userDict["robot_address"][userNum]
google_password = 'google_password=%s' % userDict["google_password"][userNum]
company = 'company=%s' % userDict["company"][userNum]
cas_session_id = 'cas_session_id=%s' % userDict["cas_session_id"][userNum]
hq_username = 'hq_username=%s' % userDict["hq_username"][userNum]
hq_password = 'hq_password=%s' % userDict["hq_password"][userNum]
user_factory = BuiltIn().create_dictionary(ip, extension, server,
phone_type, user_type, mac, first_name, middle_name,
last_name, home, work,
fax, mobile, pager,
sip_did, pri_dnis, client_password,
vm_password, sip_password, client_id,
client_email, tenant_id, robot_address,
google_password, company,cas_session_id,hq_username,hq_password)
print "USER %s" % user_factory
phoneNum = userNum + 1
if phoneNum < 10:
varname = '${user0%s}' % phoneNum
else:
varname = '${user%s}' % phoneNum
BuiltIn().set_suite_variable(varname, user_factory)
def load_shoretel_hq2_users(self):
"""Load shoretel hq2 user attributes"""
# Make all users visible to robot
global OS_CONFIG_PATH
filename = ""
is_mt = BuiltIn().get_variable_value('${is_runtype_mt}')
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\pphone_st_hq2_userInfo.csv'
logger.info("Loading hq2 users from config file %s " % filename)
with open(filename) as f_in:
lines = (line.rstrip() for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
numPhones = -1
for line in lines:
numPhones += 1
print numPhones
reader = csv.DictReader(open(filename))
userDict = {}
for row in reader:
for column, value in row.iteritems():
userDict.setdefault(column, []).append(value)
users_list = []
for i in range(0, int(numPhones)):
userNum = i
user_name = 'first_name=%s' % userDict["user_name"][userNum]
user_type = 'user_type=%s' % userDict["user_type"][userNum]
first_name = 'first_name=%s' % userDict["first_name"][userNum]
middle_name = 'middle_name=%s' % userDict["middle_name"][userNum]
last_name = 'last_name=%s' % userDict["last_name"][userNum]
extension = 'extension=%s' % userDict["extension"][userNum]
client_id = 'client_id=%s' % userDict["client_id"][userNum]
client_email = 'client_email=%s' % userDict["client_email"][userNum]
client_password = 'client_password=%s' % userDict["client_password"][userNum]
ip = 'ip=%s' % userDict["ip"][userNum]
mac = 'mac=%s' % userDict["mac"][userNum]
phone_type = 'phone_type=%s' % userDict["phone_model"][userNum]
server = 'server=%s' % userDict["server"][userNum]
home = 'home=%s' % userDict["home"][userNum]
work = 'work=%s' % userDict["work"][userNum]
fax = 'fax=%s' % userDict["fax"][userNum]
mobile = 'mobile=%s' % userDict["mobile"][userNum]
pager = 'pager=%s' % userDict["pager"][userNum]
sip_did = 'sip_did=%s' % userDict["sip_trunk_did"][userNum]
pri_dnis = 'pri_dnis=%s' % userDict["pri_trunk_dnis"][userNum]
vm_password = 'vm_password=%s' % userDict["vm_password"][userNum]
sip_password = 'sip_password=%s' % userDict["sip_password"][userNum]
tenant_id = 'tenant_id=%s' % userDict["tenant_id"][userNum]
robot_address = 'robot_address=%s' % userDict["robot_address"][userNum]
telnet_id = 'telnet_id=%s' % userDict["telnet_id"][userNum]
company = 'company=%s' % userDict["company"][userNum]
user_factory = BuiltIn().create_dictionary(ip, extension, server,
phone_type, user_type, mac, first_name, middle_name,
last_name, home, work,
fax, mobile, pager,
sip_did, pri_dnis, client_password,
vm_password, sip_password, client_id,
client_email, tenant_id, robot_address,
telnet_id, company)
print "USER %s" % user_factory
phoneNum = userNum + 1
if phoneNum < 10:
varname = '${hq2_user0%s}' % phoneNum
else:
varname = '${hq2_user%s}' % phoneNum
BuiltIn().set_suite_variable(varname, user_factory)
def load_boss_users(self):
"""Load boss user attributes"""
# Make all users visible to robot
global OS_CONFIG_PATH
is_mt = BuiltIn().get_variable_value('${is_runtype_mt}')
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\boss_userInfo.csv'
logger.info("Loading boss users from config file %s " % filename)
with open(filename) as f_in:
lines = (line.rstrip() for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
numPhones = -1
for line in lines:
numPhones += 1
print numPhones
reader = csv.DictReader(open(filename))
userDict = {}
for row in reader:
for column, value in row.iteritems():
userDict.setdefault(column, []).append(value)
users_list = []
for i in range(0, int(numPhones)):
userNum = i
user_name = 'first_name=%s' % userDict["user_name"][userNum]
user_type = 'user_type=%s' % userDict["user_type"][userNum]
first_name = 'first_name=%s' % userDict["first_name"][userNum]
middle_name = 'middle_name=%s' % userDict["middle_name"][userNum]
last_name = 'last_name=%s' % userDict["last_name"][userNum]
extension = 'extension=%s' % userDict["extension"][userNum]
client_id = 'client_id=%s' % userDict["client_id"][userNum]
client_email = 'client_email=%s' % userDict["client_email"][userNum]
client_password = 'client_password=%s' % userDict["client_password"][userNum]
ip = 'ip=%s' % userDict["ip"][userNum]
mac = 'mac=%s' % userDict["mac"][userNum]
phone_type = 'phone_type=%s' % userDict["phone_model"][userNum]
server = 'server=%s' % userDict["server"][userNum]
home = 'home=%s' % userDict["home"][userNum]
work = 'work=%s' % userDict["work"][userNum]
fax = 'fax=%s' % userDict["fax"][userNum]
mobile = 'mobile=%s' % userDict["mobile"][userNum]
pager = 'pager=%s' % userDict["pager"][userNum]
sip_did = 'sip_did=%s' % userDict["sip_trunk_did"][userNum]
pri_dnis = 'pri_dnis=%s' % userDict["pri_trunk_dnis"][userNum]
vm_password = 'vm_password=%s' % userDict["vm_password"][userNum]
sip_password = 'sip_password=%s' % userDict["sip_password"][userNum]
tenant_id = 'tenant_id=%s' % userDict["tenant_id"][userNum]
robot_address = 'robot_address=%s' % userDict["robot_address"][userNum]
telnet_id = 'telnet_id=%s' % userDict["telnet_id"][userNum]
company = 'company=%s' % userDict["company"][userNum]
cas_session_id = 'cas_session_id=%s' % userDict["cas_session_id"][userNum]
hq_username = 'hq_username=%s' % userDict["hq_username"][userNum]
hq_password = 'hq_password=%s' % userDict["hq_password"][userNum]
user_factory = BuiltIn().create_dictionary(ip, extension, server,
phone_type, user_type, mac, first_name, middle_name,
last_name, home, work,
fax, mobile, pager,
sip_did, pri_dnis, client_password,
vm_password, sip_password, client_id,
client_email, tenant_id, robot_address,
telnet_id, company,cas_session_id,hq_username,hq_password)
print "boss USER %s" % user_factory
# if userNum > 1:
# break
# TODO varname only allows ten users. Increase user num
phoneNum = userNum + 1
varname = '${boss0%s}' % phoneNum
logger.warn("Creating boss user dict \"%s\"" % varname)
BuiltIn().set_suite_variable(varname, user_factory)
logger.info("boss config loaded!")
def load_teamweb_users(self):
"""Load teamweb user attributes"""
pass
# Make all users visible to robot
global OS_CONFIG_PATH
filename = ""
is_mt = BuiltIn().get_variable_value('${is_runtype_mt}')
if is_mt == "true":
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\teamweb_mt_userInfo.csv'
else:
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\teamweb_st_userInfo.csv'
logger.info("Loading hq users from config file %s " % filename)
with open(filename) as f_in:
lines = (line.rstrip() for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
numPhones = -1
for line in lines:
numPhones += 1
print numPhones
reader = csv.DictReader(open(filename))
userDict = {}
for row in reader:
for column, value in row.iteritems():
userDict.setdefault(column, []).append(value)
users_list = []
for i in range(0, int(numPhones)):
userNum = i
user_name = 'first_name=%s' % userDict["user_name"][userNum]
user_type = 'user_type=%s' % userDict["user_type"][userNum]
first_name = 'first_name=%s' % userDict["first_name"][userNum]
middle_name = 'middle_name=%s' % userDict["middle_name"][userNum]
last_name = 'last_name=%s' % userDict["last_name"][userNum]
extension = 'extension=%s' % userDict["extension"][userNum]
client_id = 'client_id=%s' % userDict["client_id"][userNum]
client_email = 'client_email=%s' % userDict["client_email"][userNum]
client_password = 'client_password=%s' % userDict["client_password"][userNum]
ip = 'ip=%s' % userDict["ip"][userNum]
mac = 'mac=%s' % userDict["mac"][userNum]
phone_type = 'phone_type=%s' % userDict["phone_model"][userNum]
server = 'server=%s' % userDict["server"][userNum]
home = 'home=%s' % userDict["home"][userNum]
work = 'work=%s' % userDict["work"][userNum]
fax = 'fax=%s' % userDict["fax"][userNum]
mobile = 'mobile=%s' % userDict["mobile"][userNum]
pager = 'pager=%s' % userDict["pager"][userNum]
sip_did = 'sip_did=%s' % userDict["sip_trunk_did"][userNum]
pri_dnis = 'pri_dnis=%s' % userDict["pri_trunk_dnis"][userNum]
vm_password = 'vm_password=%s' % userDict["vm_password"][userNum]
sip_password = 'sip_password=%s' % userDict["sip_password"][userNum]
tenant_id = 'tenant_id=%s' % userDict["tenant_id"][userNum]
robot_address = 'robot_address=%s' % userDict["robot_address"][userNum]
telnet_id = 'telnet_id=%s' % userDict["telnet_id"][userNum]
company = 'company=%s' % userDict["company"][userNum]
cas_session_id = 'cas_session_id=%s' % userDict["cas_session_id"][userNum]
hq_username = 'hq_username=%s' % userDict["hq_username"][userNum]
hq_password = 'hq_password=%s' % userDict["hq_password"][userNum]
user_factory = BuiltIn().create_dictionary(ip, extension, server,
phone_type, user_type, mac, first_name, middle_name,
last_name, home, work,
fax, mobile, pager,
sip_did, pri_dnis, client_password,
vm_password, sip_password, client_id,
client_email, tenant_id, robot_address,
telnet_id, company,cas_session_id,hq_username,hq_password)
print "teamweb USER %s" % user_factory
# if userNum > 1:
# break
# TODO varname only allows ten users. Increase user num
phoneNum = userNum + 1
varname = '${teamweb0%s}' % phoneNum
logger.info("Creating teamweb user dict \"%s\"" % varname)
BuiltIn().set_suite_variable(varname, user_factory)
logger.info("teamweb config loaded!")
def load_teamios_users(self):
"""Load teamios user attributes"""
pass
# Make all users visible to robot
global OS_CONFIG_PATH
filename = ""
is_mt = BuiltIn().get_variable_value('${is_runtype_mt}')
if is_mt == "true":
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\teamios_mt_userInfo.csv'
else:
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\teamios_st_userInfo.csv'
logger.info("Loading hq users from config file %s " % filename)
with open(filename) as f_in:
lines = (line.rstrip() for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
numPhones = -1
for line in lines:
numPhones += 1
print numPhones
reader = csv.DictReader(open(filename))
userDict = {}
for row in reader:
for column, value in row.iteritems():
userDict.setdefault(column, []).append(value)
users_list = []
for i in range(0, int(numPhones)):
userNum = i
user_name = 'first_name=%s' % userDict["user_name"][userNum]
user_type = 'user_type=%s' % userDict["user_type"][userNum]
first_name = 'first_name=%s' % userDict["first_name"][userNum]
middle_name = 'middle_name=%s' % userDict["middle_name"][userNum]
last_name = 'last_name=%s' % userDict["last_name"][userNum]
extension = 'extension=%s' % userDict["extension"][userNum]
client_id = 'client_id=%s' % userDict["client_id"][userNum]
client_email = 'client_email=%s' % userDict["client_email"][userNum]
client_password = 'client_password=%s' % userDict["client_password"][userNum]
ip = 'ip=%s' % userDict["ip"][userNum]
mac = 'mac=%s' % userDict["mac"][userNum]
phone_type = 'phone_type=%s' % userDict["phone_model"][userNum]
server = 'server=%s' % userDict["server"][userNum]
home = 'home=%s' % userDict["home"][userNum]
work = 'work=%s' % userDict["work"][userNum]
fax = 'fax=%s' % userDict["fax"][userNum]
mobile = 'mobile=%s' % userDict["mobile"][userNum]
pager = 'pager=%s' % userDict["pager"][userNum]
sip_did = 'sip_did=%s' % userDict["sip_trunk_did"][userNum]
pri_dnis = 'pri_dnis=%s' % userDict["pri_trunk_dnis"][userNum]
vm_password = 'vm_password=%s' % userDict["vm_password"][userNum]
sip_password = 'sip_password=%s' % userDict["sip_password"][userNum]
tenant_id = 'tenant_id=%s' % userDict["tenant_id"][userNum]
robot_address = 'robot_address=%s' % userDict["robot_address"][userNum]
telnet_id = 'telnet_id=%s' % userDict["telnet_id"][userNum]
company = 'company=%s' % userDict["company"][userNum]
cas_session_id = 'cas_session_id=%s' % userDict["cas_session_id"][userNum]
hq_username = 'hq_username=%s' % userDict["hq_username"][userNum]
hq_password = 'hq_password=%s' % userDict["hq_password"][userNum]
user_factory = BuiltIn().create_dictionary(ip, extension, server,
phone_type, user_type, mac, first_name, middle_name,
last_name, home, work,
fax, mobile, pager,
sip_did, pri_dnis, client_password,
vm_password, sip_password, client_id,
client_email, tenant_id, robot_address,
telnet_id, company,cas_session_id,hq_username,hq_password)
print "teamios USER %s" % user_factory
# if userNum > 1:
# break
# TODO varname only allows ten users. Increase user num
phoneNum = userNum + 1
varname = '${teamios0%s}' % phoneNum
logger.info("Creating teamios user dict \"%s\"" % varname)
BuiltIn().set_suite_variable(varname, user_factory)
logger.info("teamios user config loaded!")
def load_google_contacts(self):
"""Load Google user attributes"""
# Make all users visible to robot
global OS_CONFIG_PATH
filename = ""
is_mt = BuiltIn().get_variable_value('${is_runtype_mt}')
if is_mt == "true":
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\google_userInfo.csv'
else:
filename = OS_CONFIG_PATH + self._robot_runmodule_dir + '\\google_userInfo.csv'
logger.info("Loading google users from config file %s " % filename)
with open(filename) as f_in:
lines = (line.rstrip() for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
numPhones = -1
for line in lines:
numPhones += 1
print numPhones
reader = csv.DictReader(open(filename))
userDict = {}
for row in reader:
for column, value in row.iteritems():
userDict.setdefault(column, []).append(value)
for i in range(0, int(numPhones)):
userNum = i
user_name = 'first_name=%s' % userDict["user_name"][userNum]
user_type = 'user_type=%s' % userDict["user_type"][userNum]
first_name = 'first_name=%s' % userDict["first_name"][userNum]
middle_name = 'middle_name=%s' % userDict["middle_name"][userNum]
last_name = 'last_name=%s' % userDict["last_name"][userNum]
client_email = 'client_email=%s' % userDict["client_email"][userNum]
client_password = 'client_password=%s' % userDict["client_password"][userNum]
# server = 'server=%s' % userDict["server"][userNum]
home = 'home=%s' % userDict["home"][userNum]
work = 'work=%s' % userDict["work"][userNum]
work_fax = 'work_fax=%s' % userDict["work_fax"][userNum]
mobile = 'mobile=%s' % userDict["mobile"][userNum]
pager = 'pager=%s' % userDict["pager"][userNum]
tenant_id = 'tenant_id=%s' % userDict["tenant_id"][userNum]
robot_address = 'robot_address=%s' % userDict["robot_address"][userNum]
telnet_id = 'telnet_id=%s' % userDict["telnet_id"][userNum]
company = 'company=%s' % userDict["company"][userNum]
g_user_factory = BuiltIn().create_dictionary(user_type, first_name, middle_name,
last_name, home, work,
work_fax, mobile, pager,
client_password,
client_email, tenant_id, robot_address,
telnet_id, company)
print "gUSER %s" % g_user_factory
phoneNum = userNum + 1
if phoneNum < 10:
varname = '${g_user0%s}' % phoneNum
else:
varname = '${g_user%s}' % phoneNum
BuiltIn().set_suite_variable(varname, g_user_factory)
def create_duts(self, numPhones):
pass
def dump_vars(self):
"""Populate testbed specific variables"""
variables = BuiltIn().get_variables()
print "DEBUG: variables %s" % variables
def process_latest_build(self):
"""Kills chrome.exe and chromedriver.exe"""
# Copy downloaded file to builds
path = 'C:/Users/Administrator/Downloads/'
file = path + "Google_Apps_LatestBuild_Main_GoogleApps_6_artifacts.zip"
dstdir = "../../builds"
if not os.path.exists(path):
raise Exception("path %s not found!!!!!!!" % path)
return
shutil.copy(file, dstdir)
def chrome_cleanup(self):
"""Kills chrome.exe and chromedriver.exe"""
logger.warn("DEBUG: Killing chrome.exe and chromedriver.exe")
#TODO add switch
for proc in psutil.process_iter():
if proc.name() == "chrome.exe":
proc.kill()
if proc.name() == "chromedriver.exe":
proc.kill()
if proc.name() == "ShoreTel.exe":
proc.kill()
def archive_output(self):
"""archive all outputs to _results"""
tcid = BuiltIn().get_variable_value('${kTest_id}')
# if not os.path.exists('_results\\'):
# os.makedirs('_results\\')
# dst = '_results\\' + str(tcid) + '_log.html'
# dst2 = '_results\\' + str(tcid) + '_report.html'
# dst3 = '_results\\' + str(tcid) + '_output.xml'
# print "DEBUG: archiving outputs to %s, %s, %s" % (dst, dst2, dst3)
# shutil.copy('log.html', dst)
# shutil.copy('report.html', dst2)
# shutil.copy('output.xml', dst3)
def verify_build_is_new(self, branch):
# Check version in manifest and compare with abco_version
with open(GAPPS_BUILD_PATH + '\\latestbuild\\manifest.json') as df:
data = json.load(df)
new_ver = data["version"].encode('utf-8')
if (branch == 'dev'):
fh_path = WIN_ATF_CONFIG_PATH + self._robot_runmodule_dir + '\\logs\\dev_version.txt'
else:
fh_path = WIN_ATF_CONFIG_PATH + self._robot_runmodule_dir + '\\logs\\main_version.txt'
logger.warn("Cross referencing new build with file \"%s\"" % fh_path)
fh = open(fh_path)
for line in fh:
pass
prev_ver = line.encode('utf-8')
prev = tuple(map(str, prev_ver.split(".")))
new = tuple(map(str, new_ver.split(".")))
if new <= prev:
logger.error("Skipping ABCO run. The latest build version %s is older/same as previous run version %s" % (
new_ver, prev_ver))
logger.error("Manual removal of collided build in st/mt abco_version.txt will avoid skipping run...")
logger.error("Sending email notification...TODO")
raise Exception("ERROR")
return 0
return new_ver
def build_and_update_google_suite(self, branch):
"""Copies zip extension to builds folder.
Unzips and creates/overwrites crx file
:return ret_val: none.
"""
global GAPPS_PATH
global GAPPS_BUILD_PATH
if (branch == 'dev'):
GAPPS_PATH = r'C:\WFDevGCAL\automation-gapps\lib'
GAPPS_BUILD_PATH = r'C:\WFDevGCAL\automation-gapps\builds'
else:
GAPPS_PATH = r'C:\WebFramework\automation-gapps\lib'
GAPPS_BUILD_PATH = r'C:\WebFramework\automation-gapps\builds'
filelist = [f for f in os.listdir(DOWNLOADS_PATH) if f.endswith(".zip")]
for f in filelist:
file = DOWNLOADS_PATH + "\\" + f
print file
shutil.copy2(file, GAPPS_BUILD_PATH + '\\build.zip')
# sleep 5 seconds
if not os.path.exists('latestbuild\\'):
os.makedirs('latestbuild\\')
if (branch == 'dev'):
os.system(
"\"c:\\Program Files\\7-Zip\\7z.exe\" x C:\\WFDevGCAL\\automation-gapps\\builds\*.zip -oC:\\WFDevGCAL\\automation-gapps\\builds\\latestbuild")
os.system(
"chrome.exe --pack-extension=\"c:\\WFDevGCAL\\automation-gapps\\builds\\latestbuild\" --pack-extension-key=\"c:\\WFDevGCAL\\automation-gapps\\builds\\build_key.pem""")
else:
os.system(
"\"c:\\Program Files\\7-Zip\\7z.exe\" x C:\\WebFramework\\automation-gapps\\builds\*.zip -oC:\\WebFramework\\automation-gapps\\builds\\latestbuild")
os.system(
"chrome.exe --pack-extension=\"c:\\WebFramework\\automation-gapps\\builds\\latestbuild\" --pack-extension-key=\"c:\\WebFramework\\automation-gapps\\builds\\build_key.pem""")
version = self.verify_build_is_new(branch)
if version == 0:
logger.warn("Aborting ABCO %s build update" % branch)
return 0
logger.warn("Pulling build version %s" % version)
if (branch == 'dev'):
# append new build version into abco_version.txt
write_path = WIN_ATF_CONFIG_PATH + self._robot_runmodule_dir + '\\logs\\dev_version.txt'
with open(write_path, "a") as myfile:
myfile.write(version + '\n')
else:
# append new build version into abco_version.txt
write_path = WIN_ATF_CONFIG_PATH + self._robot_runmodule_dir + '\\logs\\main_version.txt'
with open(write_path, "a") as myfile:
myfile.write(version + '\n')
# overwrite input config with new build
with open('C:\NextGenArc\etc\configs\input.config', 'r') as ifh, open('input.config', 'w') as ofh:
replace_build = "<client>%s</client>" % version
newline = re.sub("<client>.*</client>", replace_build, ifh.read())
ifh.close()
ofh.write(newline)
ofh.close()
shutil.copy('input.config', 'C:\NextGenArc\etc\configs\input.config')
# replace crx with c:\crx file
fpath = 'c:\gsuite.crx'
os.chmod(fpath, 0777)
os.remove(fpath)
shutil.copy2(GAPPS_BUILD_PATH + '\\latestbuild.crx', fpath)
archive_path = 'Z:\\ROBOT\\builds_archive\\' + branch + '_gapps_' + str(version) + '.crx'
shutil.copy2(GAPPS_BUILD_PATH + '\\latestbuild.crx', archive_path)
if (branch == 'dev'):
dev_crx_path = WIN_ATF_CONFIG_PATH + self._robot_runmodule_dir + '\\dev_gsuite.crx'
shutil.copy2(GAPPS_BUILD_PATH + '\\latestbuild.crx', dev_crx_path)
else:
main_crx_path = WIN_ATF_CONFIG_PATH + self._robot_runmodule_dir + '\\main_gsuite.crx'
shutil.copy2(GAPPS_BUILD_PATH + '\\latestbuild.crx', main_crx_path)
return version
def remove_old_downloaded_builds(self, branch):
filelist = [f for f in os.listdir("C:\Users\Administrator\Downloads") if f.endswith(".zip")]
for f in filelist:
file = "C:\\Users\\Administrator\\Downloads\\" + f
logger.warn("Removing file %s" % file)
os.remove(file)
if branch == "main":
if os.path.exists('C:\\WebFramework\\automation-gapps\\builds\\latestbuild\\'):
shutil.rmtree("C:\\WebFramework\\automation-gapps\\builds\\latestbuild\\")
else:
if os.path.exists('C:\\WFDevGCAL\\automation-gapps\\builds\\latestbuild\\'):
shutil.rmtree("C:\\WFDevGCAL\\automation-gapps\\builds\\latestbuild\\")
def create_gapp_id_url(self):
tempdir = tempfile.gettempdir()
for subdirs, dirs, files in os.walk(tempdir):
for dir in dirs:
if "scoped_dir" in dir:
# logger.warn(dir)
for files in os.listdir(os.path.join(tempdir,dir)):
if "extension_" in files:
# logger.warn(files)
scrap, id = files.split('_')
break
logger.warn(id)
# import rpdb2; rpdb2.start_embedded_debugger('admin1')
write_robot_dir = os.path.join(os.path.dirname(os.getcwd()),"variables")
write_robot_filename = "AppUrl_DoNotPush.robot"
robot_var = "${GSUITE_CLIENT_LOGIN_PAGE} chrome-extension://REPLACE/foreground.html"
robot_var = robot_var.replace("REPLACE", id)
with open(os.path.join(write_robot_dir,write_robot_filename), 'w') as fh:
fh.write("*** Variables ***\n")
fh.write(robot_var)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Created on 2020/4/15 21:16
@author: phil
"""
import numpy as np
class BagOfWord:
def __init__(self, do_lower_case=False):
self.vocab = {}
self.do_lower_case = do_lower_case
def fit(self, sent_list):
# sent_list 类型为 List
for sent in sent_list:
if self.do_lower_case:
sent = sent.lower()
words = sent.strip().split(" ")
for word in words:
if word not in self.vocab:
self.vocab[word] = len(self.vocab)
def transform(self, sent_list):
vocab_size = len(self.vocab)
bag_of_word_feature = np.zeros((len(sent_list), vocab_size))
for idx, sent in enumerate(sent_list):
if self.do_lower_case:
sent = sent.lower()
words = sent.strip().split(" ")
for word in words:
bag_of_word_feature[idx][self.vocab[word]] += 1
return bag_of_word_feature
def fit_transform(self, sent_list):
self.fit(sent_list)
return self.transform(sent_list)
class NGram:
def __init__(self, ngram, do_lower_case=False):
self.ngram = ngram
self.feature_map = {}
self.do_lower_case = do_lower_case
def fit(self, sentList):
for gram in self.ngram:
for sent in sentList:
if self.do_lower_case:
sent = sent.lower()
sent = sent.split(" ")
for i in range(len(sent) - gram + 1):
feature = "_".join(sent[i:i + gram])
if feature not in self.feature_map:
self.feature_map[feature] = len(self.feature_map)
def transform(self, sentList):
n = len(sentList)
m = len(self.feature_map)
ngram_feature = np.zeros((n, m))
for idx, sent in enumerate(sentList):
if self.do_lower_case:
sent = sent.lower()
sent = sent.split(" ")
for gram in self.ngram:
for i in range(len(sent) - gram + 1):
feature = "_".join(sent[i:i + gram])
if feature in self.feature_map:
ngram_feature[idx][self.feature_map[feature]] = 1
return ngram_feature
def fit_transform(self, sentList):
self.fit(sentList)
return self.transform(sentList)
if __name__ == "__main__":
gram = NGram((1, 2))
sents = ["I love you", "do you love yourself"]
feature = gram.fit_transform(sents)
print(gram.feature_map)
print(feature)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('registro', '0003_auto_20151120_1520'),
]
operations = [
migrations.AddField(
model_name='investigador',
name='comment_no',
field=models.TextField(default=b'', help_text=b'Comentar por que no puede asistir al evento', verbose_name=b'Comentarios'),
),
migrations.AddField(
model_name='investigador',
name='disponibilidad',
field=models.BooleanField(default=False, choices=[(True, b'Si'), (False, b'No')]),
),
migrations.AlterField(
model_name='investigador',
name='frascati_categoria',
field=smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'frascati_nivel', to='registro.Frascati_categoria', chained_field=b'frascati_nivel', verbose_name=b'Area de investigacion (Categoria)'),
),
migrations.AlterField(
model_name='investigador',
name='frascati_nivel',
field=models.ForeignKey(verbose_name=b'Area de investigacion (Nivel)', to='registro.Frascati_nivel'),
),
]
|
# Generated by Django 3.0.7 on 2020-07-25 13:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sliderpic', '0012_projects_project2'),
]
operations = [
migrations.CreateModel(
name='Walling',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('walling1', models.ImageField(default='default.jpg', upload_to='walling_images')),
('walling2', models.ImageField(default='default.jpg', upload_to='walling_images')),
('walling3', models.ImageField(default='default.jpg', upload_to='walling_images')),
('walling4', models.ImageField(default='default.jpg', upload_to='walling_images')),
('walling5', models.ImageField(default='default.jpg', upload_to='walling_images')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Roofing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('roofing1', models.ImageField(default='default.jpg', upload_to='roofing_images')),
('roofing2', models.ImageField(default='default.jpg', upload_to='roofing_images')),
('roofing3', models.ImageField(default='default.jpg', upload_to='roofing_images')),
('roofing4', models.ImageField(default='default.jpg', upload_to='roofing_images')),
('roofing5', models.ImageField(default='default.jpg', upload_to='roofing_images')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Renovation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('renovation1', models.ImageField(default='default.jpg', upload_to='renovation_images')),
('renovation2', models.ImageField(default='default.jpg', upload_to='renovation_images')),
('renovation3', models.ImageField(default='default.jpg', upload_to='renovation_images')),
('renovation4', models.ImageField(default='default.jpg', upload_to='renovation_images')),
('renovation5', models.ImageField(default='default.jpg', upload_to='renovation_images')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Plumbing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('plumbing1', models.ImageField(default='default.jpg', upload_to='plumbing_images')),
('plumbing2', models.ImageField(default='default.jpg', upload_to='plumbing_images')),
('plumbing3', models.ImageField(default='default.jpg', upload_to='plumbing_images')),
('plumbing4', models.ImageField(default='default.jpg', upload_to='plumbing_images')),
('plumbing5', models.ImageField(default='default.jpg', upload_to='plumbing_images')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Paving',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paving', models.ImageField(default='default.jpg', upload_to='paving_images')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Painting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('painting1', models.ImageField(default='default.jpg', upload_to='painting_images')),
('painting2', models.ImageField(default='default.jpg', upload_to='painting_images')),
('painting3', models.ImageField(default='default.jpg', upload_to='painting_images')),
('painting4', models.ImageField(default='default.jpg', upload_to='painting_images')),
('painting5', models.ImageField(default='default.jpg', upload_to='painting_images')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Flooring',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('flooring1', models.ImageField(default='default.jpg', upload_to='flooring_images')),
('flooring', models.ImageField(default='default.jpg', upload_to='flooring_images')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Extention',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('extention1', models.ImageField(default='default.jpg', upload_to='extention_images')),
('extention2', models.ImageField(default='default.jpg', upload_to='extention_images')),
('extention3', models.ImageField(default='default.jpg', upload_to='extention_images')),
('extention4', models.ImageField(default='default.jpg', upload_to='extention_images')),
('extention5', models.ImageField(default='default.jpg', upload_to='extention_images')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Electricity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('electricity1', models.ImageField(default='default.jpg', upload_to='electricity_images')),
('electricity2', models.ImageField(default='default.jpg', upload_to='electricity_images')),
('electricity3', models.ImageField(default='default.jpg', upload_to='electricity_images')),
('electricity4', models.ImageField(default='default.jpg', upload_to='electricity_images')),
('electricity5', models.ImageField(default='default.jpg', upload_to='electricity_images')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# Generated by Django 3.0.5 on 2020-04-17 22:06
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0003_auto_20200417_2152'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='text',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
migrations.AlterField(
model_name='newspost',
name='text',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
]
|
import email.message
import enum
import logging
import random
from collections import defaultdict
from contextlib import contextmanager
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Callable,
ContextManager,
Dict,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Type,
TypeVar,
cast,
)
from unittest.mock import MagicMock, Mock
from urllib.parse import ParseResult, parse_qs, urlparse
if TYPE_CHECKING:
import typing_extensions as te
def get_random_ip(ip_prefix: Optional[List[str]] = None) -> str:
if ip_prefix is None:
parts = ["127"]
for _ in range(4 - len(parts)):
parts.append(f"{random.randint(0, 255)}")
return ".".join(parts)
@contextmanager
def ctx_http_server(
handler: Type[BaseHTTPRequestHandler], host: Optional[str] = "127.0.0.1"
) -> Iterator[HTTPServer]:
host = get_random_ip() if host is None else host
server = HTTPServer((host, 0), handler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
yield server
server.shutdown()
server.socket.close()
server_thread.join()
GenericT = TypeVar("GenericT", bound=Any)
def make_spypair(method: GenericT) -> Tuple[GenericT, Mock]:
m = MagicMock()
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
m(*args, **kwargs)
return method(self, *args, **kwargs)
setattr(wrapper, "mock", m) # noqa
return cast(GenericT, wrapper), m
HeadersT = Dict[str, List[str]]
PathQueryT = Dict[str, List[str]]
class MethodName(str, enum.Enum):
CONNECT = enum.auto()
DELETE = enum.auto()
GET = enum.auto()
HEAD = enum.auto()
OPTIONS = enum.auto()
PATCH = enum.auto()
POST = enum.auto()
PUT = enum.auto()
TRACE = enum.auto()
class MockHTTPRequest(NamedTuple):
method: MethodName
path: str
parsed_path: ParseResult
path_query: PathQueryT
headers: email.message.Message
body: Optional[bytes]
class MockHTTPResponse(NamedTuple):
status_code: int
reason_phrase: str
body: bytes
headers: HeadersT
RequestDict = Dict[MethodName, List[MockHTTPRequest]]
ResponseDict = Dict[MethodName, List[MockHTTPResponse]]
class BaseHTTPServerMock:
def __init__(self) -> None:
self.requests: Dict[MethodName, List[MockHTTPRequest]] = defaultdict(
lambda: list()
)
self.responses: Dict[MethodName, List[MockHTTPResponse]] = defaultdict(
lambda: list()
)
self.mocks: Dict[MethodName, Mock] = {}
class Handler(BaseHTTPRequestHandler):
pass
self.Handler = Handler
for name in MethodName:
name_str = name.name
do_handler, mock = make_spypair(
self.make_do_handler(name, self.requests, self.responses)
)
setattr(self.Handler, f"do_{name_str}", do_handler)
self.mocks[name] = mock
@classmethod
def make_do_handler(
cls, method_name: MethodName, requests: RequestDict, responses: ResponseDict
) -> Callable[[BaseHTTPRequestHandler], None]:
def do_handler(handler: BaseHTTPRequestHandler) -> None:
parsed_path = urlparse(handler.path)
path_query = parse_qs(parsed_path.query)
body = None
content_length = handler.headers.get("Content-Length")
if content_length is not None:
body = handler.rfile.read(int(content_length))
request = MockHTTPRequest(
method_name,
handler.path,
parsed_path,
path_query,
handler.headers,
body,
)
logging.debug("handling %s request: %s", method_name, request)
logging.debug("headers %s", request.headers)
requests[method_name].append(request)
response = responses[method_name].pop(0)
handler.send_response(response.status_code, response.reason_phrase)
for header, values in response.headers.items():
for value in values:
handler.send_header(header, value)
handler.end_headers()
handler.wfile.write(response.body)
handler.wfile.flush()
return
return do_handler
def reset(self) -> None:
self.requests.clear()
self.responses.clear()
for name in MethodName:
self.mocks[name].reset_mock()
@property
def call_count(self) -> int:
return sum(self.mocks[name].call_count for name in MethodName)
class ServedBaseHTTPServerMock(
BaseHTTPServerMock, ContextManager["ServedBaseHTTPServerMock"]
):
def __init__(self, host: Optional[str] = "127.0.0.1") -> None:
super().__init__()
host = get_random_ip() if host is None else host
self.server = HTTPServer((host, 0), self.Handler)
self.server_thread = Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self) -> None:
self.server.shutdown()
self.server.socket.close()
self.server_thread.join()
@property
def address_string(self) -> str:
(host, port) = self.server.server_address
return f"{host}:{port}"
@property
def url(self) -> str:
return f"http://{self.address_string}"
def __enter__(self) -> "ServedBaseHTTPServerMock":
return self
def __exit__(
self,
__exc_type: Optional[Type[BaseException]],
__exc_value: Optional[BaseException],
__traceback: Optional[TracebackType],
) -> "te.Literal[False]":
self.stop()
return False
|
# A non-empty zero-indexed array A consisting of N integers is given. The consecutive elements of array A represent consecutive cars on a road.
# Array A contains only 0s and/or 1s:
# 0 represents a car traveling east,
# 1 represents a car traveling west.
# The goal is to count passing cars. We say that a pair of cars (P, Q), where 0 ≤ P < Q < N, is passing when P is traveling to the east and Q is traveling to the west.
# For example, consider array A such that:
# A[0] = 0
# A[1] = 1
# A[2] = 0
# A[3] = 1
# A[4] = 1
# We have five pairs of passing cars: (0, 1), (0, 3), (0, 4), (2, 3), (2, 4).
# Write a function:
# def solution(A)
# that, given a non-empty zero-indexed array A of N integers, returns the number of pairs of passing cars.
# The function should return −1 if the number of pairs of passing cars exceeds 1,000,000,000.
# For example, given:
# A[0] = 0
# A[1] = 1
# A[2] = 0
# A[3] = 1
# A[4] = 1
# the function should return 5, as explained above.
# Assume that:
# N is an integer within the range [1..100,000];
# each element of array A is an integer that can have one of the following values: 0, 1.
# Complexity:
# expected worst-case time complexity is O(N);
# expected worst-case space complexity is O(1), beyond input storage (not counting the storage required for input arguments).
#
# RESULT = 100% , O(N). Solved ONE Submit by Vu
# NOTE
# in Reversed Order
# Sum = Sum + Last_update + Counter
# 0 = 0
# 1, 1, 0 = 2 = Last_update
# 0, 1 , 0 , 1, 1, 0 = 2 + 1 = Last_update
# 0, 0, 1 , 0 , 1, 1, 0 = 2 + 1 + 0 = Last_update
import math
def solution(A):
# write your code in Python 2.7
Sum = 0
Last_update = 0
Counter = 0
for i in reversed(A):
if i == 1:
Counter += 1
elif i == 0:
Sum = Sum + Last_update + Counter
Last_update = Last_update + Counter
Counter = 0
else:
pass
if Sum > math.pow(10,9):
return -1
else:
return Sum |
import collections
import config
import cv2
import numpy as np
path = config.path
frame_num = 100 # config.frame_num
camera_num = config.camera_num
background_path = "/run/media/benjamin/HDD-3/Dataset/medialab_20210924/background"
# Full kernels
FULL_KERNEL_3 = np.ones((3, 3), np.uint8)
FULL_KERNEL_5 = np.ones((5, 5), np.uint8)
FULL_KERNEL_7 = np.ones((7, 7), np.uint8)
FULL_KERNEL_9 = np.ones((9, 9), np.uint8)
FULL_KERNEL_31 = np.ones((31, 31), np.uint8)
def morphological_filter(depth_map):
depth_map = cv2.dilate(depth_map, FULL_KERNEL_3, iterations=1)
depth_map = cv2.morphologyEx(depth_map, cv2.MORPH_CLOSE, FULL_KERNEL_3)
return depth_map
if __name__ == "__main__":
for cam_idx in range(camera_num):
depth_img = cv2.imread(f"{background_path}/depths/{cam_idx}.png")
depth_img = np.float32(depth_img)
depth_img = np.mean(depth_img, axis = 2)
depth_img = morphological_filter(depth_img)
cv2.imwrite(f"{background_path}/depths/post_{cam_idx}.png", depth_img) |
import torch
from torch import nn
from torch_geometric.nn import dense_diff_pool
from torch_geometric.nn import global_sort_pool
from torch_geometric.nn.inits import glorot
from utils import adj_to_edge_index
from boxx import timeit
class SortPool(torch.nn.Module):
def __init__(self, k):
super(SortPool, self).__init__()
self.k = k
def forward(self, x):
return global_sort_pool(x=x,
batch=torch.tensor([0 for i in range(x.size()[0])], dtype=torch.long, device=x.device),
k=self.k)
def __repr__(self):
return '{}(k_nodes_to_keep={})'.format(self.__class__.__name__,
self.k,
)
class DIFFPool(torch.nn.Module):
"""
Differentiable pooling operator from the `"Hierarchical Graph
Representation Learning with Differentiable Pooling"
<https://arxiv.org/abs/1806.08804>`_ paper.
"""
def __init__(self):
super(DIFFPool, self).__init__()
def forward(self, x, adj, s, short_cut=False):
"""
Returns pooled node feature matrix, coarsened adjacency matrix and the
auxiliary link prediction objective
Args:
adj: Adjacency matrix with shape [num_nodes, num_nodes]
"""
out_x, out_adj, reg = dense_diff_pool(x, adj, s)
out_adj = out_adj.squeeze(0) if out_adj.dim() == 3 else out_adj
out_x = out_x.squeeze(0) if out_x.dim() == 3 else out_x
if not short_cut:
out_edge_index, out_edge_attr = adj_to_edge_index(out_adj.detach())
else:
out_edge_index, out_edge_attr = None, None
return out_x, out_edge_index, out_edge_attr, out_adj, reg
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
|
"""
问题:
跟559类似,求树的深度。。。
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def getTreeDepth(node):
if node == None:
return 0
if node.left == None and node.right == None:
return 1
depth = 0
depth = max(depth, getTreeDepth(node.left), getTreeDepth(node.right))
return depth + 1
return getTreeDepth(root)
"""
#
# Tree 1
# 10
# / \
# 5 15
# / \ / \
# 2 8 12 20
# / \
# 6 9
"""
t1 = TreeNode(10)
t1.left = TreeNode(5)
t1.right = TreeNode(15)
t1.left.left = TreeNode(2)
t1.left.right = TreeNode(8)
t1.left.right.left = TreeNode(6)
t1.left.right.right = TreeNode(9)
t1.right.left = TreeNode(12)
t1.right.right = TreeNode(20)
solution = Solution()
print (solution.maxDepth(t1)) |
# Generated by Django 2.0.2 on 2018-02-23 09:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('talks', '0033_auto_20180217_1239'),
]
operations = [
]
|
# nums = [-2,1,-3,4,-1,2,1,-5,4]
# def maxSubArray(nums):
# if max(nums)<0:
# return max(nums)
# local_max, global_max = 0 , 0
# for num in nums:
# local_max = max(0,local_max+num)
# global_max= max(global_max,local_max)
# return global_max
# print(maxSubArray(nums))
# print('HelloWorld123')
# 118 pascal's Triangle
# numsRows = 3
# def generate(numRows):
# result=[] #making a empty list
# for i in range(numRows): #loop through rows
# print("this is i index number",i)
# result.append([]) #and add empe
# # print("this is result",result)
# for j in range(i+1):# there are "i" elements in each row and loop start from 0, so should be end at i+1
# print("2nd loop Print: i = ",i)
# print("this is j",j)
# if j in (0,i):
# result[i].append(1)
# else:
# result[i].append(result[i-1][j-1] + result[i-1][j])
# print("result: ",result)
# print("")
# return result
# print(generate(numsRows))
# 119 pascal's triangle
numsRows = 3
def generate(rows):
result = [1]+[0]*rows
for i in range(rows):
for j in range(i+1,0,-1):#stop at 0 , so 0 index in not included
result[j]=result[j]+result[j-1]
return result
print(generate(numsRows))
# # result[i][j] = result[i][j-1]+ result[i][j+1]
# [1],
# [1,1],
# [1,2,1],result[i][1] = result[i][j-1]+ result[i][j+1]
# [1,2,2,1],result[i][2] = result[i][j-1]+ result[i][j+1]
# [1,2,2,1,1],result[i][3] = result[i][j-1]+ result[i][j+1]
# class Solution:
# s = 'IV'
# def romanToInt(self, s: str) -> int:
# numeral_map={
# 'I':1,
# 'V':5,
# 'X':10,
# 'L':50,
# 'C':100,
# 'D':500,
# 'M':1000
# }
# result = 0
# for i in range(len(s)):
# if i >0 and numeral_map[s[i]]>numeral_map[s[i-1]]:
# result+=numeral_map[s[i]]-2*numeral_map[s[i-1]]
# else:
# result += numeral_map[s[i]]
# return result
# print(romanToInt(s))
# Longest Common Prefix
# def longestCommonPrefix(self, strs: List[str]) -> str:
# result = ''
# i=0
# while True:
# try:
# sets=set(string[i] for string in strs)
# if len(sets) == 1 :
# result += sets.pop()
# i+=1
# else:
# break
# except Exception as e:
# break
# LeetCode in Python 122. Best Time to Buy and Sell Stock II
# prices = [1,2,3,4,5]
# # output 4
# def maxProfit(nums):
# if len(nums)<=1:
# return 0
# total=0
# for i in range(1,len(nums)):#从因为要用当前和之前(i-1)位作比较
# if nums[i]>nums[i-1]:
# total +=nums[i]-nums[i-1]
# return total
# print(maxProfit(prices))
#162 Find Pick Element
# nums=[1,2,1,3,5,6,4]
# #output index 1 or 5
# def findPickElement(nms):
# left, right = 0,len(nms)-1
# while left < right:
# mid = (left+right)//2
# if nms[mid] < nms[mid+1]:
# left=mid+1
# else:
# right=mid
# return left
# print(findPickElement(nums))
#leetcode 167 Two sum II
# Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
# The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
# Note:
# Your returned answers (both index1 and index2) are not zero-based.
# You may assume that each input would have exactly one solution and you may not use the same element twice.
# numbers = [2,7,11,15]
# target = 9
# def twoSumsII(list, targetnum):
# start,end = 0,len(list)-1
# sum = 0
# while start != end :
# sum = list[start]+list[end]
# if sum>targetnum:
# end-=1
# elif sum<targetnum:
# start+=1
# else:
# return start+1, end+1
# print(twoSumsII(numbers,target))
# from typing import List
# 1480. Running Sum of 1d Array
# Given an array nums. We define a running sum of an array as runningSum[i] = sum(nums[0]…nums[i]).
# Return the running sum of nums.
# nums = [1,2,3,4]
# def revers(any):
# new_list = any[::-1]
# return new_list
# print(revers(nums))
# def rungsum(listNums):
# for i in range(1,len(listNums)):
# listNums[i]=listNums[i]+listNums[i-1]
# # 3+1=4
# # 4+2 = 6
# # 6+4 = 10
# return listNums
# print(rungsum(nums))
# 1431. Kids With the Greatest Number of Candies
# candies = [2,3,5,1,3]
# max(candies)
# print
# 189. Rotate Array
# Given an array, rotate the array to the right by k steps, where k is non-negative.
# Follow up:
# Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
# Could you do it in-place with O(1) extra space?
# nums = [1,2,3, 4,5,6,7] nums[:k] nums[:k]
# 5,6,7, 1,2,3,4 nums[len(nums-k:)]
# k = 3
# k就是indexnumber
# # Output: [5,6,7,1,2,3,4]
# # Explanation:
# # rotate 1 steps to the right: [7,1,2,3,4,5,6]
# # rotate 2 steps to the right: [6,7,1,2,3,4,5]
# # rotate 3 steps to the right: [5,6,7,1,2,3,4]
# # We can switch a group of nums end at 3rd number with the nums start from index 4 and end at the last number
# # 把数组的后三位数移动到前三位,或者说把这个数组向右推进三次
# # 1,2,3 4,5,6,7
# # 5,6,7 1,2,3,4
# # 第一步先求余数, 因为如果给出的数是11,那么就是说要推进11次。也就是说只要知道它的余数旧能知道有效推进的结果。
# k = k % len(nums)
# # 第二部, 从头到第K个数,和K个数之后的数进行调换。
# nums[:k] , nums[k:] = nums[len(nums)-k:] , nums[: len(nums)-k:]
# 1,2,3 4,5,6,7 = 5,6,7 1,2,3,4
# print(nums[len(nums)-k:])
# print(nums[:k])
# print(nums[k:])
|
import unittest
# Note: Python's sort algo is "Timsort" - a hybrid of merge sort and insertion sort
def solution(A):
A.sort() # I'd rather use sorted() and not modify the input but rules says I can't use additional memory
return max(A[-3] * A[-2] * A[-1], A[0] * A[1] * A[-1])
class TestMaxProductOfThree(unittest.TestCase):
def test_sample(self):
self.assertEqual(60, solution([-3, 1, 2, -2, 5, 6]))
def test_one_big_negative(self):
self.assertEqual(60, solution([-300, 1, 2, 2, 5, 6]))
def test_two_big_negatives(self):
self.assertEqual(3600, solution([-300, -1, 2, -2, 5, 6]))
|
try:
import cv2
import sys
import numpy as np
from os import listdir
from random import shuffle
from cod.extension import path_to_images, path_to_dataset_from_images
except ValueError:
print("Modules loading failed in " + sys.argv[0])
def save_data_to_file():
existing_folders = ['APC', 'LBB', 'NOR', 'PAB', 'PVC', 'RBB', 'VEB']
counter = 0
for folder in sorted(listdir(path_to_images)):
training_dataset = []
if str(folder) in existing_folders:
label = np.zeros(7)
label[counter] = 1
counter += 1
print('Creating Data From ' + folder)
for item in listdir(path_to_images + '/' + folder):
if item is not None:
image = cv2.imread(path_to_images + '/' + folder + '/' + item, 0)
if image is not None:
image = cv2.resize(image, (128, 128))
training_dataset.append([np.array(image), label])
else:
print('Image Not Found!')
else:
print('Folder Is Empty!')
shuffle(training_dataset)
np.save(path_to_dataset_from_images + '/' + folder + '.npy', training_dataset)
print('Data From ' + folder + ' Are Ready!')
def create_dataset_from_files():
dataset = []
for image_file in sorted(listdir(path_to_dataset_from_images)):
print('Collecting Data From ' + image_file)
data = np.load(path_to_dataset_from_images + '/' + image_file, allow_pickle=True)
for item in data:
dataset.append(item)
np.random.shuffle(dataset)
np.save('../dataset_128.npy', dataset)
print('Dataset Created!')
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = '_X.xx_'
__date__ = '2018/7/18 18:11'
from django.urls import path
from . import views
app_name = 'payinfo'
urlpatterns = [
path('', views.payinfo, name='payinfo'),
path('auth/', views.auth_test)
] |
S = input()
T = input()
ls = len(S)
lt = len(T)
for i in range(ls-lt,-1,-1):
for j in range(lt):
if S[i+j] != T[j] and S[i+j] != '?':
break
else:
print((S[:i] + T + S[i+lt:]).replace('?','a'))
break
else:
print('UNRESTORABLE')
|
from flask import Flask
from flask_restplus import Api, Resource, fields
app = Flask(__name__)
api = Api(app)
# The first argument is to specify the model name.
a_language = api.model('Language_Model', {'language' : fields.String('Please type in a language you want.')})
languages = []
python = {'language' : 'Python'}
languages.append(python)
@api.route('/language')
class Language(Resource):
def get(self):
# return {"Hello", "restplus"} # no need to use jsonify like regular flask API in here.
return languages
@api.expect(a_language)
def post(self):
languages.append(api.payload)
return {'result' : 'Language added'}, 201
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port=5000) |
import argparse
import math
import time
import os
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.utils.data
from torch.optim.lr_scheduler import ReduceLROnPlateau
from model import DAN
import dataloader
import Constants
from build_vocab import Vocabulary
from sklearn.metrics import roc_auc_score, average_precision_score
import pandas as pd
def test(model, test_data, loss_fn, device, opt):
''' Epoch operation in evaluation phase '''
model.eval()
count=0
total_loss = 0
true_all = []
pred_all = []
with torch.no_grad():
for batch in tqdm(
test_data, mininterval=2,
desc=' - (Validation) ', leave=False):
# prepare data
if opt.feature:
note, length, mortality, feature = map(lambda x: x.to(device), batch)
pred = model(note, length, feature)
else:
note, length, mortality = map(lambda x: x.to(device), batch)
pred = model(note, length)
# backward
loss = loss_fn(pred, mortality.view(-1))
# note keeping
total_loss += loss.item()
count +=1
# probability
true_all.append(mortality.view(-1))
pred_all.append(F.softmax(pred)[:,1].view(-1))
#print(pred)
true_all = torch.cat(true_all, axis=0)
pred_all = torch.cat(pred_all, axis=0)
#print(true_all, pred_all)
roc_auc = roc_auc_score(true_all.cpu(), pred_all.cpu())
pr_auc = average_precision_score(true_all.cpu(), pred_all.cpu())
loss_per_word = total_loss/count
return pred_all.cpu()
def main():
''' Main function '''
parser = argparse.ArgumentParser()
parser.add_argument('-epoch', type=int, default=20)
parser.add_argument('-batch_size', type=int, default=32)
parser.add_argument('-dropout', type=float, default=0.5)
parser.add_argument('-embedding_size', type=float, default=300)
parser.add_argument('-learning_rate', type=float, default=0.0003)
parser.add_argument('-name', type=str, default=None, choices=['all', 'all_but_discharge', 'physician', 'discharge', 'physician_nursing'])
parser.add_argument('-task', type=str, default=None, choices=['mortality', 'readmission'])
parser.add_argument('-data_name', type=str, default=None)
parser.add_argument('-period', type=str, choices=['24', '48', 'retro'])
parser.add_argument('-segment', type=str, default=None)
parser.add_argument('-text_length', type=int, help='text length', default=None)
parser.add_argument('-feature', action='store_true', default=False)
parser.add_argument('-text', action='store_true', default=False)
parser.add_argument('-log', type=str, default="/data/joe/physician_notes/Deep-Average-Network/log/")
parser.add_argument('-save_model', default=True)
parser.add_argument('-save_mode', type=str, choices=['all', 'best'], default='best')
parser.add_argument('-test_mode', action='store_true', default=False)
parser.add_argument('-no_cuda', action='store_true')
parser.add_argument('-device', type=str, default='0')
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
opt.compare_note = None
#========= Loading Dataset =========#
torch.manual_seed(1234)
training_data, validation_data, test_data, vocab, feature_len = dataloader.get_loaders(opt, is_test=opt.test_mode, is_feature = opt.feature)
#========= Preparing Model =========#
print(opt)
device = torch.device(f'cuda:{opt.device}' if opt.cuda else 'cpu')
dan = DAN(len(vocab), opt.embedding_size, feature_len, opt.dropout, opt.feature, opt.text).to(device)
loss_fn = nn.CrossEntropyLoss()
model_name = opt.task+'_'+opt.name +'_'+ opt.period + '.chkpt'
if opt.text:
model_name = "text_" + model_name
if opt.feature:
model_name = "feature_" + model_name
checkpoint = torch.load(f"/data/joe/physician_notes/Deep-Average-Network/models/{model_name}", map_location=device)
dan.load_state_dict(checkpoint['model'])
results = {}
for note_id in Constants.note_type[opt.name]:
opt.compare_note = note_id
training_data, validation_data, test_data, vocab, feature_len = dataloader.get_loaders(opt, is_test=opt.test_mode, is_feature = opt.feature)
res = test(dan, test_data, loss_fn, device, opt)
results[note_id] = res
#predict_prob(dan, test_data, loss_fn, device, opt)
TEST_NOTE_PATH = f"/data/joe/physician_notes/mimic-data/{opt.task}/{opt.name}_note_test_{opt.period}.csv"
test_file = pd.read_csv(TEST_NOTE_PATH)
df = pd.DataFrame(results)
df.insert(0,'stay', test_file['stay'])
if not os.path.exists('/home/joe/physician_notes/models/DeepAverageNetwork/compare_notes/'):
os.mkdir('/home/joe/physician_notes/models/DeepAverageNetwork/compare_notes/')
model_name = opt.task+'_'+opt.name +'_'+ opt.period + '.csv'
if opt.text:
model_name = "text_" + model_name
if opt.feature:
model_name = "feature_" + model_name
if opt.segment:
model_name = opt.segment+ "_" + model_name
df.to_csv(f'/home/joe/physician_notes/models/DeepAverageNetwork/compare_notes/{model_name}', index=False)
if __name__ == '__main__':
main()
|
import copy
import unittest
import networkx
from qubo_nn.problems import MaxCut
from qubo_nn.problems.max_cut import MaxCutMemoryEfficient
class TestMaxCut(unittest.TestCase):
def test_gen_qubo_matrix(self):
"""Test whether a correct QUBO is generated.
Test case from: https://arxiv.org/pdf/1811.11538.pdf
"""
graph = networkx.Graph(
[(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (2, 4)]
)
want = [
[-2, 1, 1, 0, 0],
[1, -2, 0, 1, 0],
[1, 0, -3, 1, 1],
[0, 1, 1, -3, 1],
[0, 0, 1, 1, -2]
]
problem = MaxCut({}, copy.deepcopy(graph))
matrix = problem.gen_qubo_matrix()
self.assertCountEqual(matrix.tolist(), want)
def test_gen_problems(self):
data = MaxCut.gen_problems({}, 1, size=(20, 25), seed=1)
self.assertCountEqual(data[0]["graph"].edges, [
(0, 12), (0, 14), (0, 10), (0, 17), (0, 13), (0, 9), (2, 8),
(3, 15), (3, 18), (3, 5), (3, 9), (4, 18), (6, 12), (6, 9), (7, 8),
(7, 16), (7, 17), (7, 11), (7, 14), (9, 18), (10, 16), (13, 19),
(13, 17), (13, 16), (14, 15)
])
class TestMaxCutMemoryEfficient(unittest.TestCase):
def test_gen_qubo_matrix(self):
"""Test whether a correct QUBO is generated.
Test case from: https://arxiv.org/pdf/1811.11538.pdf
"""
graph = networkx.Graph(
[(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (2, 4)]
)
want = [
[-2, 1, 1, 0, 0],
[1, -2, 0, 1, 0],
[1, 0, -3, 1, 1],
[0, 1, 1, -3, 1],
[0, 0, 1, 1, -2]
]
problem = MaxCutMemoryEfficient({}, graph.edges, 5)
matrix = problem.gen_qubo_matrix()
self.assertCountEqual(matrix.tolist(), want)
def test_gen_problems(self):
data = MaxCutMemoryEfficient.gen_problems({}, 1, size=(20, 25), seed=1)
self.assertCountEqual(data[0]["edge_list"], [
(0, 12), (0, 14), (0, 10), (0, 17), (0, 13), (0, 9), (2, 8),
(3, 15), (3, 18), (3, 5), (3, 9), (4, 18), (6, 12), (6, 9), (7, 8),
(7, 16), (7, 17), (7, 11), (7, 14), (9, 18), (10, 16), (13, 19),
(13, 17), (13, 16), (14, 15)
])
|
import resources.lib.nflcs
class Team(resources.lib.nflcs.NFLCS):
_short = "cowboys"
_cdaweb_url = "http://www.dallascowboys.com/cda-web/"
_categories = [
"Video - AskTheBoys",
"Videos - Cheerleaders",
"Video - Coaches-Executives",
"Videos - Draft",
"Videos - Exclusives",
"FirstTake",
"Video - Game-Highlights",
"Videos - History",
"Videos - Know The Enemy",
"Video - Live Reports",
"Video - NFL",
"Video - Players",
"Video - Quick Snap",
"Video - Reports",
"Videos - Scouting Report",
"Video - Shows - Best Of The Break",
"Video - Shows - Best Of The Draft Show",
"Video - Shows - Cowboys Break",
"Video - Shows - Cowboys Hour",
"Video - Shows - Draft Show",
"Video - Shows - On Air",
"Video - Shows - Talkin Cowboys",
"Video - Shows - The Legends Show",
"Video - The Blitz",
"Videos - Upon Further Review",
]
def __init__(self, parameters):
self._parameters = parameters
self.go()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.