text string | size int64 | token_count int64 |
|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-16 15:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0043_auto_20170116_1623'),
]
operations = [
migrations.RenameModel(
old_name='ClimateDataType',
new_name='DataType',
),
]
| 412 | 160 |
import numpy
import os
import json
import cv2
import csv
import os.path as osp
import mmcv
import numpy as np
def isgood(w,h):
if w<2 or h<2:
return False
if w /h >10.0 or h/w >10.0:
return False
return True
def bbox_iou(box1, box2):
b1_x1, b1_y1, b1_x2, b1_y2 = box1
b2_x1, b2_y1, b2_x2, b2_y2 = box2
#get the corrdinates of the intersection rectangle
inter_rect_x1 = max(b1_x1, b2_x1)
inter_rect_y1 = max(b1_y1, b2_y1)
inter_rect_x2 = min(b1_x2, b2_x2)
inter_rect_y2 = min(b1_y2, b2_y2)
#Intersection area
inter_width = inter_rect_x2 - inter_rect_x1 + 1
inter_height = inter_rect_y2 - inter_rect_y1 + 1
if inter_width > 0 and inter_height > 0:#strong condition
inter_area = inter_width * inter_height
#Union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
else:
iou = 0
return iou
def save_newanno(message, path):
with open(path,'a') as ann: # 追加模式
ann.write(message)
ann.write('\n')
def crop4patches(img_prefix,img_writen,istrain=True):
if not os.path.exists(img_writen+'annotations/'):
os.makedirs(img_writen+'annotations/')
if not os.path.exists(img_writen+'images/'):
os.makedirs(img_writen+'images/')
img_infos = []
img_file = img_prefix+'images/'
all_imgs_files = os.listdir(img_file)
for img_file in all_imgs_files:
img_id = img_file.split('.')[0]
anno_name ='annotations/{}.txt'.format(img_id)
img_name = 'images/{}.jpg'.format(img_id)
#p rint(filename)
print("dealing with {}".format(img_name))
img_path = osp.join(img_prefix, img_name)
anno_path = osp.join(img_prefix,anno_name)
img = cv2.imread(img_path)
h,w,c = img.shape
print("h {}".format(h))
print("w {}".format(w))
patch_width = int(w) // 2
patch_height = int(h) // 2
bboxes = []
bboxes.append(np.array([0,0,patch_width,patch_height]))
bboxes.append(np.array([0,patch_height,patch_width,h]))
bboxes.append(np.array([patch_width,0,w,patch_height]))
bboxes.append(np.array([patch_width,patch_height,w,h]))
padw = (w-patch_width)//2
padh = (h-patch_height)//2
if istrain:
bboxes.append(np.array([padw,padh,w-padw,h-padh]))
bboxes = np.array(bboxes)
img_patches = mmcv.imcrop(img,bboxes,scale=1.0)
for jj in range(len(img_patches)):
if istrain:
assert (len(img_patches)) == 5
else:
assert (len(img_patches)) == 4
cv2.imwrite(img_writen+'images/{}_{}.jpg'.format(img_id,jj+1),img_patches[jj])
with open(anno_path,'r') as ann:
note = ann.readlines()
# 计算中心 patch的标注
if istrain:
for item in note:
values_str = item.split(',')#list()
bbox_left,bbox_top,bbox_width,bbox_height,score,object_category,\
truncation,occulusion = int(values_str[0]),int(values_str[1]),\
int(values_str[2]),int(values_str[3]),int(values_str[4]),int(values_str[5]),\
int(values_str[6]),int(values_str[7])
# in central patch
if bbox_left>padw and bbox_top>padh and bbox_left<w-padw and bbox_top < h-padh:
if bbox_left+bbox_width>w-padw or bbox_top+bbox_height>h-padh:
if bbox_iou((bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height),\
(bbox_left,bbox_top,min(w-padw,bbox_left+bbox_width),min(h-padh,bbox_top+bbox_height))) > 0.5:
message = str(bbox_left-padw)+','+str(bbox_top-padh)+','+str(min(w-padw,bbox_left+bbox_width)-bbox_left)+','+str(min(h-padh,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(1)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,5)
save_newanno(message,path)
continue
else:
continue
else:
message = str(bbox_left-padw)+','+str(bbox_top-padh)+','+str(min(w-padw,bbox_left+bbox_width)-bbox_left)+','+str(min(h-padh,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,5)
#print("5loc {}".format(message))
save_newanno(message,path)
continue
for item in note:
values_str = item.split(',')#list()
bbox_left,bbox_top,bbox_width,bbox_height,score,object_category,\
truncation,occulusion = int(values_str[0]),int(values_str[1]),\
int(values_str[2]),int(values_str[3]),int(values_str[4]),int(values_str[5]),\
int(values_str[6]),int(values_str[7])
if bbox_left < patch_width and bbox_top < patch_height:# zuoshang
if bbox_left+bbox_width> patch_width or bbox_top+bbox_height > patch_height:# outline
if bbox_iou((bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height),\
(bbox_left,bbox_top,min(patch_width,bbox_left+bbox_width),min(patch_height,bbox_top+bbox_height))) > 0.5:
#save
message = str(bbox_left-0)+','+str(bbox_top-0)+','+str(min(patch_width,bbox_left+bbox_width)-bbox_left)+','+str(min(patch_height,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(1)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,1)
save_newanno(message,path)
continue
else:# dont save
continue
else: # 完整直接save
message = str(bbox_left-0)+','+str(bbox_top-0)+','+str(min(patch_width,bbox_left+bbox_width)-bbox_left)+','+str(min(patch_height,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,1)
save_newanno(message,path)
#print("1loc {}".format(message))
continue
#zuoxia
if bbox_left< patch_width and bbox_top >= patch_height:
if bbox_top+bbox_height > h:# 原本标注错误
raise IOError
if bbox_left+bbox_width > patch_width:# outline
if bbox_iou((bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height),\
(bbox_left,bbox_top,min(patch_width,bbox_left+bbox_width),min(patch_height,bbox_top+bbox_height))) > 0.5:
#save
message = str(bbox_left-0)+','+str(bbox_top-patch_height)+','+str(min(patch_width,bbox_left+bbox_width)-bbox_left)+','+str(min(h,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(1)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,2)
save_newanno(message,path)
continue
else:# dont save
continue
else:
#save
message = str(bbox_left-0)+','+str(bbox_top-patch_height)+','+str(min(patch_width,bbox_left+bbox_width)-bbox_left)+','+str(min(h,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,2)
save_newanno(message,path)
#print("2loc {}".format(message))
continue
#youshang
if bbox_left >= patch_width and bbox_top < patch_height:
if bbox_left + bbox_width > w:
raise IOError
if bbox_top + bbox_height > patch_height:# outline
if bbox_iou((bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height),\
(bbox_left,bbox_top,min(patch_width,bbox_left+bbox_width),min(patch_height,bbox_top+bbox_height))) > 0.5:
#save
message = str(bbox_left-patch_width)+','+str(bbox_top-0)+','+str(min(w,bbox_left+bbox_width)-bbox_left)+','+str(min(patch_height,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(1)+','+str(occulusion)# must trucncation
path = img_writen+'annotations/{}_{}.txt'.format(img_id,3)
save_newanno(message,path)
continue
else:# dont save
continue
else:
#save
message = str(bbox_left-patch_width)+','+str(bbox_top-0)+','+str(min(w,bbox_left+bbox_width)-bbox_left)+','+str(min(patch_height,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,3)
save_newanno(message,path)
#print("3loc {}".format(message))
continue
# youxia
if bbox_left >= patch_width and bbox_top >= patch_height:
if bbox_left+bbox_width>w or bbox_height+bbox_top>h:
raise IOError
# 第四个区域不会有 outline
message = str(bbox_left-patch_width)+','+str(bbox_top-patch_height)+','+str(bbox_width)+','+str(bbox_height)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,4)
save_newanno(message,path)
#print("4loc {}".format(message))
continue
#check if the image has no annotaion , delet it
for jj in range(len(img_patches)):
if istrain:
assert (len(img_patches)) == 5
else:
assert (len(img_patches)) == 4
if not os.path.exists(img_writen+'annotations/{}_{}.txt'.format(img_id,jj+1)):
os.remove(img_writen+'images/{}_{}.jpg'.format(img_id,jj+1))
#path = img_writen+'annotations/{}_{}.txt'.format(img_id,jj+1)
#with open(path,'w') as ann: # 追加模式
# pass
#print("empty {}".format('annotations/{}_{}.jpg'.format(img_id,jj+1)))
new_list = os.listdir(img_writen+'images/')
new_list_show = []
new_list_show.extend(new_list[:100])
new_list_show.extend(new_list[500:600])
for ii,item in enumerate(new_list_show):
showimg = cv2.imread(img_writen+'images/'+item)
id = item.split('.')[0]
annotation = img_writen+'annotations/'+id+'.txt'
#if not os.path.exists(annotation):
# continue
with open(annotation,'r') as ann:
note = ann.readlines()
bboxes = []
for jj in note:
values_str = jj.split(',')#list()
bbox_left,bbox_top,bbox_width,bbox_height,score,object_category,\
truncation,occulusion = int(values_str[0]),int(values_str[1]),\
int(values_str[2]),int(values_str[3]),int(values_str[4]),int(values_str[5]),\
int(values_str[6]),int(values_str[7])
bboxes.append(np.array([bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height]))
bboxes = np.array(bboxes)
print('/home/share2/VisDrone2019/vispatch/'+item)
if istrain:
mmcv.imshow_bboxes(showimg,bboxes,show=False,out_file='/home/share2/VisDrone2019/TASK1/trainpatch/'+item)
else:
mmcv.imshow_bboxes(showimg,bboxes,show=False,out_file='/home/share2/VisDrone2019/TASK1/valpatch/'+item)
if __name__ == '__main__':
import fire
fire.Fire()
#img_prefix = '/home/share2/VisDrone2019/TASK1/VisDrone2019-DET-val/'
#img_writen= '/home/share2/VisDrone2019/TASK1/VisDrone2019-DET-val-patches/'
#crop4patches(img_prefix=img_prefix,img_writen=img_writen,istrain=False)
| 13,528 | 4,748 |
#!/usr/bin/env python3
# Copyleft 2021 Sidon Duarte
#
import http
import sys
from typing import Any
import colorama
import requests
from rabbitgetapi import cli
from rabbitgetapi import exceptions
from rabbitgetapi import build_parser
def main() -> Any:
try:
result = cli.dispatch(sys.argv[1:])
except requests.HTTPError as exc:
status_code = exc.response.status_code
status_phrase = http.HTTPStatus(status_code).phrase
result = (
f"{exc.__class__.__name__}: {status_code} {status_phrase} "
f"from {exc.response.url}\n"
f"{exc.response.reason}"
)
except exceptions.GetRmqApiException as exc:
result = f"{exc.__class__.__name__}: {exc.args[0]}"
return _format_error(result) if isinstance(result, str) else result
def _format_error(message: str) -> str:
pre_style, post_style = "", ""
if not cli.args.no_color:
colorama.init()
pre_style, post_style = colorama.Fore.RED, colorama.Style.RESET_ALL
return f"{pre_style}{message}{post_style}"
if __name__ == "__main__":
sys.exit(main())
| 1,121 | 372 |
from typing import Union, Iterable
import hashlib
def hash_id(seeds:Union[str, Iterable], n:int=32)->str:
"""For the moment, use the default simple python hash func
"""
h = hashlib.sha256(''.join(seeds)).hexdigest()[:n]
return h | 245 | 84 |
import os
import numpy as np
import torch
from torch.utils.data import Dataset
import math
import torch
import torch.nn.functional as F
import random
import torchvision.datasets
from torchvision.transforms import *
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from PIL import Image, ImageEnhance, ImageOps
from torch.utils.data import Dataset
from torchdistill.datasets.wrapper import register_dataset_wrapper,BaseDatasetWrapper
def rotate_with_fill(img, magnitude):
rot = img.convert('RGBA').rotate(magnitude)
return Image.composite(rot, Image.new('RGBA', rot.size, (128,) * 4), rot).convert(img.mode)
def shearX(img,magnitude,fillcolor):
return img.transform(img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),Image.BICUBIC, fillcolor=fillcolor)
def shearY(img,magnitude,fillcolor):
return img.transform(img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),Image.BICUBIC, fillcolor=fillcolor)
def translateX(img,magnitude,fillcolor):
return img.transform( img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),fillcolor=fillcolor)
def translateY(img,magnitude,fillcolor):
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),fillcolor=fillcolor)
def rotate(img,magnitude,fillcolor):
return rotate_with_fill(img, magnitude)
def color(img,magnitude,fillcolor):
return ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1]))
def posterize(img,magnitude,fillcolor):
return ImageOps.posterize(img, magnitude)
def solarize(img,magnitude,fillcolor):
return ImageOps.solarize(img, magnitude)
def contrast(img,magnitude,fillcolor):
return ImageEnhance.Contrast(img).enhance(1 + magnitude * random.choice([-1, 1]))
def sharpness(img,magnitude,fillcolor):
return ImageEnhance.Sharpness(img).enhance(1 + magnitude * random.choice([-1, 1]))
def brightness(img,magnitude,fillcolor):
return ImageEnhance.Brightness(img).enhance(1 + magnitude * random.choice([-1, 1]))
def autocontrast(img,magnitude,fillcolor):
return ImageOps.autocontrast(img)
def equalize(img,magnitude,fillcolor):
return ImageOps.equalize(img)
def invert(img,magnitude,fillcolor):
return ImageOps.invert(img)
def rand_bbox(size, lam):
W = size[1]
H = size[2]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
class SubPolicy:
def __init__(self, p1, operation1, magnitude_idx1, fillcolor=(128, 128, 128)):
self.fillcolor=fillcolor
ranges = {
'shearX': np.linspace(0, 0.3, 10),
'shearY': np.linspace(0, 0.3, 10),
'translateX': np.linspace(0, 150 / 331, 10),
'translateY': np.linspace(0, 150 / 331, 10),
'rotate': np.linspace(0, 30, 10),
'color': np.linspace(0.0, 0.9, 10),
'posterize': np.round(np.linspace(8, 4, 10), 0).astype(np.int),
'solarize': np.linspace(256, 0, 10),
'contrast': np.linspace(0.0, 0.9, 10),
'sharpness': np.linspace(0.0, 0.9, 10),
'brightness': np.linspace(0.0, 0.9, 10),
'autocontrast': [0] * 10,
'equalize': [0] * 10,
'invert': [0] * 10
}
func = {
'shearX': shearX,
'shearY': shearY,
'translateX': translateX,
'translateY': translateY,
'rotate': rotate,
'color': color,
'posterize': posterize,
'solarize': solarize,
'contrast': contrast,
'sharpness': sharpness,
'brightness': brightness,
'autocontrast': autocontrast,
'equalize': equalize,
'invert': invert
}
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
def __call__(self, img):
label=0
if random.random() < self.p1:
img = self.operation1(img, self.magnitude1,self.fillcolor)
label=1
return img,label
@register_dataset_wrapper
class PolicyDataset(BaseDatasetWrapper):
def __init__(self,org_dataset,mixcut=False,mixcut_prob=0.1,beta=0.3):
super(PolicyDataset, self).__init__(org_dataset)
self.transform=org_dataset.transform
org_dataset.transform=None
self.policies = [
SubPolicy(0.5, 'invert', 7),
SubPolicy(0.5, 'rotate', 2),
SubPolicy(0.5, 'sharpness', 1),
SubPolicy(0.5, 'shearY', 8),
SubPolicy(0.5, 'autocontrast', 8),
SubPolicy(0.5, 'color', 3),
SubPolicy(0.5, 'sharpness', 9),
SubPolicy(0.5, 'equalize', 5),
SubPolicy(0.5, 'contrast', 7),
SubPolicy(0.5, 'translateY', 3),
SubPolicy(0.5, 'brightness',6),
SubPolicy(0.5, 'solarize', 2),
SubPolicy(0.5, 'translateX',3),
SubPolicy(0.5, 'shearX', 8),
]
self.policies_len=len(self.policies)
self.beta=beta
self.mixcut_prob=mixcut_prob
self.mixcut=mixcut
def __getitem__(self, index):
sample, target_a, supp_dict = super(PolicyDataset, self).__getitem__(index)
sample=self.transform(sample).detach()
r = np.random.rand(1)
if self.mixcut and self.beta > 0 and r < self.mixcut_prob:
lam = np.random.beta(self.beta, self.beta)
rand_index=random.randint(0,len(self)-1)
rsample,target_b, supp_dict = super(PolicyDataset, self).__getitem__(rand_index)
rsample = self.transform(rsample)
bbx1, bby1, bbx2, bby2 = rand_bbox(sample.size(), lam)
sample[ :, bbx1:bbx2, bby1:bby2] = rsample[ :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (sample.size()[-1] * sample.size()[-2]))
target=F.one_hot(torch.LongTensor([target_a]),10)*lam+ F.one_hot(torch.LongTensor([target_b]),10)*(1.-lam)
else:
target=target_a
new_sample=transforms.ToPILImage()(sample)
policy_index = torch.zeros(self.policies_len).float()
for i in range(self.policies_len):
new_sample,label=self.policies[i](new_sample)
policy_index[i]=label
new_sample=self.transform(new_sample).detach()
if not isinstance(target,torch.Tensor):
target=F.one_hot(torch.LongTensor([target]),10)
target=target.expand(2,-1) # 2,1
policy_target=torch.stack([torch.zeros(self.policies_len).float(),policy_index],0) # 2, policy_len
target=torch.cat([target,policy_target],1) # 2,num_classes+policy_len
sample=torch.stack([
sample,
new_sample,
])
return sample,target,supp_dict
@register_dataset_wrapper
class PolicyDatasetC100(BaseDatasetWrapper):
def __init__(self,org_dataset,mixcut=False,mixcut_prob=0.1,beta=0.3):
super(PolicyDatasetC100, self).__init__(org_dataset)
self.transform=org_dataset.transform
org_dataset.transform=None
self.policies = [
SubPolicy(0.5,'autocontrast', 2),
SubPolicy(0.5, 'contrast', 3),
SubPolicy(0.5, 'posterize', 0),
SubPolicy(0.5, 'solarize', 4),
SubPolicy(0.5, 'translateY', 8),
SubPolicy(0.5, 'shearX', 5),
SubPolicy(0.5, 'color', 3),
SubPolicy(0.5, 'shearY', 0),
SubPolicy(0.5, 'translateX', 1),
SubPolicy(0.5, 'sharpness', 5),
SubPolicy(0.5, 'invert', 4),
SubPolicy(0.5, 'color', 4),
SubPolicy(0.5, 'equalize', 8),
SubPolicy(0.5, 'rotate', 3),
]
self.beta=beta
self.mixcut_prob=mixcut_prob
self.mixcut=mixcut
self.policies_len=len(self.policies)
def __getitem__(self, index):
sample, target_a, supp_dict = super(PolicyDatasetC100, self).__getitem__(index)
sample=self.transform(sample).detach()
r = np.random.rand(1)
if self.mixcut and self.beta > 0 and r < self.mixcut_prob:
lam = np.random.beta(self.beta, self.beta)
rand_index=random.randint(0,len(self)-1)
rsample,target_b, supp_dict = super(PolicyDatasetC100, self).__getitem__(rand_index)
rsample = self.transform(rsample)
bbx1, bby1, bbx2, bby2 = rand_bbox(sample.size(), lam)
sample[ :, bbx1:bbx2, bby1:bby2] = rsample[ :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (sample.size()[-1] * sample.size()[-2]))
target=F.one_hot(torch.LongTensor([target_a]),100)*lam+ F.one_hot(torch.LongTensor([target_b]),100)*(1.-lam)
else:
target=target_a
new_sample=transforms.ToPILImage()(sample)
policy_index = torch.zeros(self.policies_len).float()
for i in range(self.policies_len):
new_sample,label=self.policies[i](new_sample)
policy_index[i]=label
new_sample=self.transform(new_sample).detach()
if not isinstance(target,torch.Tensor):
target=F.one_hot(torch.LongTensor([target]),100)
target=target.expand(2,-1) # 2,1
policy_target=torch.stack([torch.zeros(self.policies_len).float(),policy_index],0) # 2, policy_len
target=torch.cat([target,policy_target],1) # 2,num_classes+policy_len
sample=torch.stack([
sample,
new_sample,
])
return sample,target,supp_dict
def policy_classes_compute(hot):
l=hot.shape[0]
exp=torch.arange(0,l)
weight=2**exp
return (hot*weight).sum().long()
@register_dataset_wrapper
class ICPDataset(BaseDatasetWrapper):
def __init__(self,org_dataset):
super(ICPDataset, self).__init__(org_dataset)
self.transform=org_dataset.transform
org_dataset.transform=None
self.policies = [
SubPolicy(0.5, 'invert', 7),
SubPolicy(0.5, 'rotate', 2),
SubPolicy(0.5, 'sharpness', 1),
SubPolicy(0.5, 'shearY', 8),
SubPolicy(0.5, 'autocontrast', 8),
SubPolicy(0.5, 'color', 3),
SubPolicy(0.5, 'sharpness', 9),
SubPolicy(0.5, 'equalize', 5),
SubPolicy(0.5, 'contrast', 7),
SubPolicy(0.5, 'translateY', 3),
SubPolicy(0.5, 'brightness',6),
SubPolicy(0.5, 'solarize', 2),
SubPolicy(0.5, 'translateX',3),
SubPolicy(0.5, 'shearX', 8),
]
self.policies_len=len(self.policies)
def __getitem__(self, index):
sample,target,supp_dict=super(ICPDataset, self).__getitem__(index)
policy_index=torch.zeros(self.policies_len).float()
new_sample=sample
for i in range(self.policies_len):
new_sample,label=self.policies[i](new_sample)
policy_index[i]=label
new_sample=self.transform(new_sample).detach()
sample=self.transform(sample).detach()
if isinstance(target,torch.Tensor) and target.ndim==2 and target.shape[-1]!=1:
target=target.argmax(1)
elif not isinstance(target,torch.Tensor):
target=torch.LongTensor([target])
identity_target=torch.LongTensor([index]).unsqueeze(0).expand(2,-1)
classes_target=target.unsqueeze(0).expand(2,-1) # 2,1
policy_target = torch.stack([torch.zeros(self.policies_len).int(), policy_index.int()], 0) # 2, policy_len
target=torch.cat([identity_target,classes_target,policy_target],1) # 2,3
sample=torch.stack([
sample,
new_sample,
])
return sample,target,supp_dict
| 12,182 | 4,433 |
print(lambda x: x*x (10))
# may give address of lambda function | 67 | 25 |
from app import crud
from app.db.database import get_default_bucket
from app.models.config import ITEM_DOC_TYPE
from app.models.item import ItemCreate, ItemUpdate
from app.tests.utils.user import create_random_user
from app.tests.utils.utils import random_lower_string
def test_create_item():
title = random_lower_string()
description = random_lower_string()
id = crud.utils.generate_new_id()
item_in = ItemCreate(title=title, description=description)
bucket = get_default_bucket()
user = create_random_user()
item = crud.item.upsert(
bucket=bucket, id=id, doc_in=item_in, owner_username=user.username, persist_to=1
)
assert item.id == id
assert item.type == ITEM_DOC_TYPE
assert item.title == title
assert item.description == description
assert item.owner_username == user.username
def test_get_item():
title = random_lower_string()
description = random_lower_string()
id = crud.utils.generate_new_id()
item_in = ItemCreate(title=title, description=description)
bucket = get_default_bucket()
user = create_random_user()
item = crud.item.upsert(
bucket=bucket, id=id, doc_in=item_in, owner_username=user.username, persist_to=1
)
stored_item = crud.item.get(bucket=bucket, id=id)
assert item.id == stored_item.id
assert item.title == stored_item.title
assert item.description == stored_item.description
assert item.owner_username == stored_item.owner_username
def test_update_item():
title = random_lower_string()
description = random_lower_string()
id = crud.utils.generate_new_id()
item_in = ItemCreate(title=title, description=description)
bucket = get_default_bucket()
user = create_random_user()
item = crud.item.upsert(
bucket=bucket, id=id, doc_in=item_in, owner_username=user.username, persist_to=1
)
description2 = random_lower_string()
item_update = ItemUpdate(description=description2)
item2 = crud.item.update(
bucket=bucket,
id=id,
doc_in=item_update,
owner_username=item.owner_username,
persist_to=1,
)
assert item.id == item2.id
assert item.title == item2.title
assert item.description == description
assert item2.description == description2
assert item.owner_username == item2.owner_username
def test_delete_item():
title = random_lower_string()
description = random_lower_string()
id = crud.utils.generate_new_id()
item_in = ItemCreate(title=title, description=description)
bucket = get_default_bucket()
user = create_random_user()
item = crud.item.upsert(
bucket=bucket, id=id, doc_in=item_in, owner_username=user.username, persist_to=1
)
item2 = crud.item.remove(bucket=bucket, id=id, persist_to=1)
item3 = crud.item.get(bucket=bucket, id=id)
assert item3 is None
assert item2.id == id
assert item2.title == title
assert item2.description == description
assert item2.owner_username == user.username
| 3,029 | 943 |
MODEL_PATH = "./model/model.pt"
| 32 | 16 |
##**************************************************************************************##
## Step1. Load Packages and Input Data ##
##**************************************************************************************##
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm,metrics
from sklearn.svm import SVC,LinearSVC
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn.metrics import matthews_corrcoef,auc, roc_curve,plot_roc_curve, plot_precision_recall_curve,classification_report, confusion_matrix,average_precision_score, precision_recall_curve
from pandas.core.frame import DataFrame
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import imblearn
from collections import Counter
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
############################# Step2: input data processing #####################
## giessen data
gi_data = np.load("/gi_CIP_FCGR200/alt_cnn_input.npy")
gi_pheno = pd.read_csv("CIP_gi_pheno.csv",index_col=0)
gi_data.shape,gi_pheno.shape
gi_data2 = gi_data.reshape(900,40000)
gi_pheno2 = gi_pheno.values
gi_pheno3 = gi_pheno2.reshape(900,)
gi_data2.shape,gi_pheno3.shape
X = gi_data2
y = gi_pheno3
X.shape,y.shape
## pubdata
pub_data = np.load("/pub_CIP_FCGR200/alt_cnn_input.npy")
pub_pheno = pd.read_csv("CIP_pub_pheno.csv",index_col=0)
pub_data.shape
pub_data2 = pub_data.reshape(1496,40000)
pub_pheno2 = pub_pheno.values
pub_pheno3 = pub_pheno2.reshape(1496,)
pub_data2.shape,pub_pheno3.shape
x_test = pub_data2
y_test = pub_pheno3
undersample = RandomUnderSampler(sampling_strategy='majority')
pub_x_under,pub_y_under=undersample.fit_resample(pub_data2,pub_pheno3)
print(Counter(pub_y_under))
##**************************************************************************************##
## Step2. Training and evaluation of RF,LR, SVM ##
##**************************************************************************************##
## cross validation
cv = StratifiedKFold(n_splits=5)
rf = RandomForestClassifier(n_estimators=200, random_state=0)
lr = LogisticRegression(solver = 'lbfgs',max_iter=1000)
svm = SVC(kernel='linear', probability=True)
##*************** F1 + ROC curve
rf_tprs = []
rf_prs = []
rf_roc_aucs = []
rf_pr_aucs = []
rf_f1_matrix_out = []
rf_f1_report_out = []
rf_MCC_out = []
rf_pred_cls_out = []
rf_pred_prob_out = []
rf_y_test_out = []
rf_mean_fpr = np.linspace(0, 1, 100)
rf_mean_recall = np.linspace(0, 1, 100)
## LR
lr_tprs = []
lr_prs = []
lr_roc_aucs = []
lr_pr_aucs = []
lr_f1_matrix_out = []
lr_f1_report_out = []
lr_MCC_out = []
lr_pred_cls_out = []
lr_pred_prob_out = []
lr_y_test_out = []
lr_mean_fpr = np.linspace(0, 1, 100)
lr_mean_recall = np.linspace(0, 1, 100)
## SVM
svm_tprs = []
svm_prs = []
svm_roc_aucs = []
svm_pr_aucs = []
svm_f1_matrix_out = []
svm_f1_report_out = []
svm_MCC_out = []
svm_pred_cls_out = []
svm_pred_prob_out = []
svm_y_test_out = []
svm_mean_fpr = np.linspace(0, 1, 100)
svm_mean_recall = np.linspace(0, 1, 100)
fig,[ax1,ax2,ax3] = plt.subplots(nrows=1,ncols=3,figsize=(15, 4))
for i, (train, test) in enumerate(cv.split(X, y)):
## train the new model
rf.fit(X[train], y[train])
## roc curve
rf_viz = plot_roc_curve(rf, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax1)
rf_interp_tpr = np.interp(rf_mean_fpr, rf_viz.fpr, rf_viz.tpr)
rf_interp_tpr[0] = 0.0
rf_tprs.append(rf_interp_tpr)
rf_roc_aucs.append(rf_viz.roc_auc)
## evaluation metrics
rf_pred_cls = rf.predict(X[test])
rf_pred_prob = rf.predict_proba(X[test])[:,1]
rf_f1_matrix = confusion_matrix(y[test],rf_pred_cls)
rf_f1_report = classification_report(y[test],rf_pred_cls)
rf_MCC = matthews_corrcoef(y[test],rf_pred_cls)
### save evalu_metrics out
rf_pred_cls_out.append(rf_pred_cls)
rf_pred_prob_out.append(rf_pred_prob)
rf_f1_matrix_out.append(rf_f1_matrix)
rf_f1_report_out.append(rf_f1_report)
rf_MCC_out.append(rf_MCC)
rf_y_test_out.append(y[test])
## LR
lr.fit(X[train], y[train])
## roc curve
lr_viz = plot_roc_curve(lr, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax2)
lr_interp_tpr = np.interp(lr_mean_fpr, lr_viz.fpr, lr_viz.tpr)
lr_interp_tpr[0] = 0.0
lr_tprs.append(lr_interp_tpr)
lr_roc_aucs.append(lr_viz.roc_auc)
## evaluation metrics
lr_pred_cls = lr.predict(X[test])
lr_pred_prob = lr.predict_proba(X[test])[:,1]
lr_f1_matrix = confusion_matrix(y[test],lr_pred_cls)
lr_f1_report = classification_report(y[test],lr_pred_cls)
lr_MCC = matthews_corrcoef(y[test],lr_pred_cls)
### save evalu_metrics out
lr_pred_cls_out.append(lr_pred_cls)
lr_pred_prob_out.append(lr_pred_prob)
lr_f1_matrix_out.append(lr_f1_matrix)
lr_f1_report_out.append(lr_f1_report)
lr_MCC_out.append(lr_MCC)
lr_y_test_out.append(y[test])
## SVM
svm.fit(X[train], y[train])
## roc curve
svm_viz = plot_roc_curve(svm, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax3)
svm_interp_tpr = np.interp(svm_mean_fpr, svm_viz.fpr, svm_viz.tpr)
svm_interp_tpr[0] = 0.0
svm_tprs.append(svm_interp_tpr)
svm_roc_aucs.append(svm_viz.roc_auc)
## evaluation metrics
svm_pred_cls = svm.predict(X[test])
svm_pred_prob = svm.predict_proba(X[test])[:,1]
svm_f1_matrix = confusion_matrix(y[test],svm_pred_cls)
svm_f1_report = classification_report(y[test],svm_pred_cls)
svm_MCC = matthews_corrcoef(y[test],svm_pred_cls)
### save evalu_metrics out
svm_pred_cls_out.append(svm_pred_cls)
svm_pred_prob_out.append(svm_pred_prob)
svm_f1_matrix_out.append(svm_f1_matrix)
svm_f1_report_out.append(svm_f1_report)
svm_MCC_out.append(svm_MCC)
svm_y_test_out.append(y[test])
#### save predit_prob out
np.save("CIP_gi_FCGR_RF_y_pred_prob_out.npy",rf_pred_prob_out)
np.save("CIP_gi_FCGR_RF_y_test_out.npy",rf_y_test_out)
np.save("CIP_gi_FCGR_LR_y_pred_prob_out.npy",lr_pred_prob_out)
np.save("CIP_gi_FCGR_LR_y_test_out.npy",lr_y_test_out)
np.save("CIP_gi_FCGR_SVM_y_pred_prob_out.npy",svm_pred_prob_out)
np.save("CIP_gi_FCGR_SVM_y_test_out.npy",svm_y_test_out)
#### evaluation
rf_eva_pred_prob = rf.predict_proba(pub_data2)[:,1]
lr_eva_pred_prob = lr.predict_proba(pub_data2)[:,1]
svm_eva_pred_prob = svm.predict_proba(pub_data2)[:,1]
np.save("CIP_FCGR_RF_test_y_pred_prob.npy",rf_eva_pred_prob)
np.save("CIP_FCGR_LR_test_y_pred_prob.npy",lr_eva_pred_prob)
np.save("CIP_FCGR_SVM_test_y_pred_prob.npy",svm_eva_pred_prob)
np.save("CIP_FCGR_test_y_out.npy",pub_pheno3)
#### evaluation for under sample
#pub_x_under,pub_y_under
rf_eva_under_pred_prob = rf.predict_proba(pub_x_under)[:,1]
lr_eva_under_pred_prob = lr.predict_proba(pub_x_under)[:,1]
svm_eva_under_pred_prob = svm.predict_proba(pub_x_under)[:,1]
##**************************************************************************************##
## Step3. Training and evaluation of CNN ##
##**************************************************************************************##
############################# Step1: load pacakge #####################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from keras.utils import to_categorical
from keras.models import Sequential
from tensorflow.keras import activations
from sklearn.model_selection import KFold,StratifiedKFold
from keras.layers import Dense,Dropout, Flatten, Conv1D, Conv2D, MaxPooling1D,MaxPooling2D
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.layers import BatchNormalization
############################# Step2: load metrics function #####################
### F1 score, precision, recall and accuracy metrics
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
############################# Step3: input data processing #####################
X.shape,y.shape,pub_data2.shape,pub_pheno3.shape
#((900, 40000),(900,), (1496, 40000), (1496,))
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=123)
x_train.shape,x_test.shape,y_train.shape,y_test.shape
#((720, 40000), (180, 40000), (720,), (180,))
inputs = x_train.reshape(720,200,200,1)
inputs = inputs.astype('float32')
targets = to_categorical(y_train)
inputs.shape,targets.shape
x_test2 = x_test.reshape(180,200,200,1)
x_test2 = x_test2.astype('float32')
y_test2 = to_categorical(y_test)
pub_x_test = pub_data2.reshape(1496,200,200,1)
pub_x_test = pub_x_test.astype('float32')
pub_y_test = pub_pheno3
############################# Step4: model training #####################
batch_size = 8
no_classes = 2
no_epochs = 50
verbosity = 1
num_folds = 5
# Define the K-fold Cross Validator
kfold = KFold(n_splits=num_folds, shuffle=True)
# K-fold Cross Validation model evaluation
fold_no = 1
model_history=[]
for train, test in kfold.split(inputs, targets):
model = Sequential()
model.add(Conv2D(filters=8, kernel_size=3,activation='relu', input_shape=(200,200,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=8, kernel_size=3, padding='same', activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2)))
model.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(2,activation='softmax'))
# Compile the model
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['acc',f1_m,precision_m, recall_m])
# Generate a print
print('--------------------------------')
print(f'Training for fold {fold_no} ...')
## checkpoint for saving model
filepath="CIP_gi_FCGR_CNN_weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True,mode='max')
callbacks_list = [checkpoint]
# Fit data to model
train_model = model.fit(inputs[train], targets[train],batch_size=batch_size,epochs=no_epochs,callbacks=callbacks_list,verbose=verbosity,validation_data=(inputs[test], targets[test]))
model_history.append(train_model.history)
# Increase fold number
fold_no = fold_no + 1
########## (2) save model
model.save_weights('CIP_gi_FCGR_CNN.model.h5')
# save model history
from pandas.core.frame import DataFrame
model_out = DataFrame(model_history)
model_out.to_csv("CIP_gi_FCGR_CNN_model_history_out.csv",index=False)
############# Evaluation on pub data
### ROC
y_pred_keras = model.predict_proba(pub_x_test)
### evaluation for under-sample
undersample = RandomUnderSampler(sampling_strategy='majority')
pub_x_under,pub_y_under=undersample.fit_resample(pub_data2,pub_pheno3)
print(Counter(pub_y_under))
pub_x_under = pub_x_under.reshape(534,200,200,1)
pub_x_under = pub_x_under.astype('float32')
y_pred_keras = model.predict_proba(pub_x_under)
| 12,271 | 5,181 |
name = "testspeed"
from time import time
from sys import argv
from os import system
tic = time()
system('python %s' % (argv[1]))
toc = time()
print('used %s seconds' % (toc - tic))
| 189 | 70 |
from osgeo import ogr
from typing import List, Union
import math
import os
import warnings
import numpy as np
from gdalhelpers.checks import values_checks, datasource_checks, layer_checks
from gdalhelpers.helpers import layer_helpers, datasource_helpers, geometry_helpers
def create_points_at_angles_distance_in_direction(start_points: ogr.DataSource,
main_direction_point: ogr.DataSource,
distance: Union[int, float] = 10,
angle_offset: Union[int, float] = 10,
angle_density: Union[int, float] = 1,
angles_specification_degrees: bool = True,
input_points_id_field: str = None) -> ogr.DataSource:
"""
Function that generates for every `Feature` in `start_points` set of points at specified `distance` in direction of
`main_direction_point`.
Parameters
----------
start_points : ogr.DataSource
Points to generate new points around. Can be of geometrical types: `ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbPointM, ogr.wkbPointZM`.
main_direction_point : ogr.DataSource
Layer with single feature that specifies the direction in which the new points are generated.
distance : float or int
Distance at which the new points are generated. Default value is `10` and it is specified in units of layer
`start_points`.
angle_offset : float or int
Specification of angle offset on each side from `main_direction_point`. The points are generated in interval
`[main_angle - angle_offset, main_angle + angle_offset]`, where `main_angle` is angle between specific feature
of `start_points` and `main_direction_point`. Default value is `10`, which gives over angle width of `20`.
angle_density : float or int
How often points are generated in inverval given by `angle_offset`. Default value is `1`.
angles_specification_degrees : bool
Are the angles specified in degrees? Default values is `True`, if `False` the values are in radians.
input_points_id_field : str
Name of ID (or other) field from `input_points_ds` that should be carried over the resulting DataSource.
Returns
-------
ogr.DataSource
Virtual `ogr.DataSource` in memory with one layer (named `points`) containing the points.
Raises
------
Various Errors can be raise while checking for validity of inputs.
Warns
-------
UserWarning
If the field of given name (`input_points_id_field`) is not present or if its not of type `ogr.OFTInteger`.
"""
output_points_ds = datasource_helpers.create_temp_gpkg_datasource()
datasource_checks.check_is_ogr_datasource(start_points, "start_points")
datasource_checks.check_is_ogr_datasource(main_direction_point, "main_direction_point")
values_checks.check_value_is_zero_or_positive(distance, "distance")
values_checks.check_number(angle_offset, "angle_offset")
values_checks.check_number(angle_density, "angle_density")
if angles_specification_degrees:
angle_offset = ((2*math.pi)/360)*angle_offset
angle_density = ((2*math.pi)/360)*angle_density
input_points_layer = start_points.GetLayer()
layer_checks.check_is_layer_geometry_type(input_points_layer, "input_points_layer", [ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbPointM, ogr.wkbPointZM])
input_points_srs = input_points_layer.GetSpatialRef()
main_point_layer = main_direction_point.GetLayer()
layer_checks.check_is_layer_geometry_type(main_point_layer, "main_point_layer", [ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbPointM, ogr.wkbPointZM])
layer_checks.check_number_of_features(main_point_layer, "main_point_layer", 1)
if input_points_id_field is not None:
if not layer_checks.does_field_exist(input_points_layer, input_points_id_field):
input_points_id_field = None
warnings.warn(
"Field {0} does not exist in {1}. Defaulting to FID.".format(input_points_id_field,
os.path.basename(start_points.GetDescription()))
)
else:
if not layer_checks.is_field_of_type(input_points_layer, input_points_id_field, ogr.OFTInteger):
input_points_id_field = None
warnings.warn(
"Field {0} in {1} is not `Integer`. Defaulting to FID.".format(input_points_id_field,
os.path.basename(start_points.GetDescription()))
)
if input_points_id_field is None:
field_name_id = "input_point_FID"
else:
field_name_id = "input_point_ID"
field_name_angle = "angle"
layer_helpers.create_layer_points(output_points_ds, input_points_srs, "points")
output_points_layer = output_points_ds.GetLayer()
fields = {field_name_id: ogr.OFTInteger,
field_name_angle: ogr.OFTReal}
layer_helpers.add_fields_from_dict(output_points_layer, fields)
output_points_def = output_points_layer.GetLayerDefn()
for main_feature in main_point_layer:
main_geom = main_feature.GetGeometryRef()
for feature in input_points_layer:
geom = feature.GetGeometryRef()
if input_points_id_field is None:
f_id = feature.GetFID()
else:
f_id = feature.GetField(input_points_id_field)
main_angle = geometry_helpers.angle_points(geom, main_geom)
angles = np.arange(main_angle - angle_offset,
np.nextafter(main_angle + angle_offset, np.Inf),
step=angle_density)
for angle in angles:
p = geometry_helpers.point_at_angle_distance(geom, distance, angle)
output_point_feature = ogr.Feature(output_points_def)
output_point_feature.SetGeometry(p)
values = {field_name_id: f_id,
field_name_angle: angle}
layer_helpers.add_values_from_dict(output_point_feature, values)
output_points_layer.CreateFeature(output_point_feature)
return output_points_ds
| 6,660 | 1,910 |
import unittest
from rotor_tests import *
from rotor_settings_tests import *
from reflector_tests import *
from enigma_tests import *
unittest.main()
| 151 | 46 |
from werkzeug import find_modules, import_string
from forums import routes
from forums.modifications import modify_core
def init_app(app):
with app.app_context():
for name in find_modules('forums', recursive=True):
import_string(name)
app.register_blueprint(routes.bp)
modify_core()
| 320 | 95 |
"""Routes related to ingesting data from the robot"""
import os
import logging
from pathlib import Path
from flask import Blueprint, request, current_app
from pydantic import ValidationError
from werkzeug.utils import secure_filename
from polybot.models import UVVisExperiment
logger = logging.getLogger(__name__)
bp = Blueprint('ingest', __name__, url_prefix='/ingest')
@bp.route('/', methods=('POST',))
def upload_data():
"""Intake a file from the robot and save it to disk"""
# Check the format of the request
if 'file' not in request.files:
logger.info('Bad request, missing the file')
return {
'success': False,
'error': 'File not included in the message'
}
try:
metadata = UVVisExperiment.parse_obj(request.form)
except ValidationError as exc:
logger.info('Bad request, failed validation')
return {
'success': False,
'error': str(exc)
}
# Save the file somewhere accessible
filename = secure_filename(f'{metadata.name}.csv')
os.makedirs(current_app.config['UPLOAD_FOLDER'], exist_ok=True)
output_path = Path(current_app.config['UPLOAD_FOLDER']) / filename
logger.info(f'Saving file to: {output_path}')
file = request.files['file']
file.save(output_path)
return {'success': True, 'filename': output_path.name}
| 1,380 | 407 |
#dependency Module
class AppModule:
@singleton
@provider
def provide_business_logic(self, api: Api) -> BusinessLogic:
return BusinessLogic(api=api)
@singleton
@provider
def provide_api(self) -> Api:
configuration
return Api()
if __name__ == '__main__':
injector = Injector(AppModule())
logic = injector.get(BusinessLogic)
logic.do_stuff() | 405 | 132 |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.web import UIModule
class BaseUIModule(UIModule):
"""Base class for all UiModule so we can centralize functionality"""
def _is_local(self):
return ('localhost' in self.request.headers['host'] or
'127.0.0.1' in self.request.headers['host'])
| 639 | 167 |
# This file is part of beets.
# Copyright 2015
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from mock import Mock
from test._common import unittest
from test.helper import TestHelper
from beets.library import Item
from beetsplug.mpdstats import MPDStats
class MPDStatsTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('mpdstats')
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
def test_update_rating(self):
item = Item(title='title', path='', id=1)
item.add(self.lib)
log = Mock()
mpdstats = MPDStats(self.lib, log)
self.assertFalse(mpdstats.update_rating(item, True))
self.assertFalse(mpdstats.update_rating(None, True))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| 1,572 | 457 |
import tools.libtcod.libtcodpy as libtcod
class Console(object):
def __init__(self, x=[0,0], y=[0,0], parent_console=None):
self._settings = {
"x": x,
"y": y,
"Parent_Console": parent_console
}
self._settings["Width"] = int(max(self._settings["x"][1], self._settings["x"][0]) - min(self._settings["x"][1], self._settings["x"][0]))
self._settings["Height"] = int(max(self._settings["y"][1], self._settings["y"][0]) - min(self._settings["y"][1], self._settings["y"][0]))
self._settings["Console"] = libtcod.console_new(self._settings["Width"], self._settings["Height"])
@property
def x(self):
return min(self._settings["x"][1], self._settings["x"][0])
@property
def y(self):
return min(self._settings["y"][1], self._settings["y"][0])
@property
def height(self):
return self._settings["Height"]
@property
def width(self):
return self._settings["Width"]
@property
def console(self):
return self._settings["Console"]
@property
def parent_console(self):
return self._settings["Parent_Console"]
# destination_console | The destination to be blitted to.
# foregroundAlpha, backgroundAlpha | Normalized Alpha transparency of the blitted console.
def blit(self, destination_console = None, foregroundAlpha = 1.0, backgroundAlpha = 1.0):
destination_console = destination_console or self.parent_console.console
libtcod.console_blit(self._settings["Console"], 0, 0, self.width, self.height, destination_console, self.x, self.y, foregroundAlpha, backgroundAlpha) | 1,714 | 520 |
# MINLP written by GAMS Convert at 04/21/18 13:54:11
#
# Equation counts
# Total E G L N X C B
# 497 61 388 48 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 292 217 75 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 1283 1148 135 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x46 = Var(within=Reals,bounds=(1.92,2.02),initialize=1.92)
m.x47 = Var(within=Reals,bounds=(3.82,4.01333333333333),initialize=3.82)
m.x48 = Var(within=Reals,bounds=(4.53333333333333,4.76),initialize=4.53333333333333)
m.x49 = Var(within=Reals,bounds=(5.39333333333333,5.96),initialize=5.39333333333333)
m.x50 = Var(within=Reals,bounds=(36.3533333333333,42.0933333333333),initialize=36.3533333333333)
m.x51 = Var(within=Reals,bounds=(85.7466666666667,99.28),initialize=85.7466666666667)
m.x52 = Var(within=Reals,bounds=(6.28,6.59333333333333),initialize=6.28)
m.x53 = Var(within=Reals,bounds=(53.4333333333333,61.8666666666667),initialize=53.4333333333333)
m.x54 = Var(within=Reals,bounds=(48.6133333333333,56.2866666666667),initialize=48.6133333333333)
m.x55 = Var(within=Reals,bounds=(33.9533333333333,41.5),initialize=33.9533333333333)
m.x56 = Var(within=Reals,bounds=(53.9666666666667,62.4933333333333),initialize=53.9666666666667)
m.x57 = Var(within=Reals,bounds=(77.0533333333333,80.9066666666667),initialize=77.0533333333333)
m.x58 = Var(within=Reals,bounds=(24.9066666666667,26.1466666666667),initialize=24.9066666666667)
m.x59 = Var(within=Reals,bounds=(36.1866666666667,38),initialize=36.1866666666667)
m.x60 = Var(within=Reals,bounds=(56.3133333333333,62.24),initialize=56.3133333333333)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b73 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b74 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b75 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b76 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b77 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b78 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b79 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b80 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b81 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b82 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b83 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b84 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b85 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.x121 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x122 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x123 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x124 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x125 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x126 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x127 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.b262 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b263 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b264 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b265 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b266 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b267 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b268 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b269 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b270 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b271 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b272 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b273 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b274 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b275 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b276 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b277 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b278 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b279 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b280 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b281 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b282 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b283 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b284 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b285 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b286 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b287 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b288 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b289 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b290 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b291 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= m.x106 + m.x107 + m.x108 + m.x109 + m.x110 + m.x111 + m.x112 + m.x113 + m.x114 + m.x115
+ m.x116 + m.x117 + m.x118 + m.x119 + m.x120, sense=minimize)
m.c2 = Constraint(expr=-m.x46*m.x1*m.b61 + m.x217 >= 0)
m.c3 = Constraint(expr=-m.x46*m.x2*m.b62 + m.x218 >= 0)
m.c4 = Constraint(expr=-m.x46*m.x3*m.b63 + m.x219 >= 0)
m.c5 = Constraint(expr=-m.x47*m.x4*m.b64 + m.x220 >= 0)
m.c6 = Constraint(expr=-m.x47*m.x5*m.b65 + m.x221 >= 0)
m.c7 = Constraint(expr=-m.x47*m.x6*m.b66 + m.x222 >= 0)
m.c8 = Constraint(expr=-m.x48*m.x7*m.b67 + m.x223 >= 0)
m.c9 = Constraint(expr=-m.x48*m.x8*m.b68 + m.x224 >= 0)
m.c10 = Constraint(expr=-m.x48*m.x9*m.b69 + m.x225 >= 0)
m.c11 = Constraint(expr=-m.x49*m.x10*m.b70 + m.x226 >= 0)
m.c12 = Constraint(expr=-m.x49*m.x11*m.b71 + m.x227 >= 0)
m.c13 = Constraint(expr=-m.x49*m.x12*m.b72 + m.x228 >= 0)
m.c14 = Constraint(expr=-m.x50*m.x13*m.b73 + m.x229 >= 0)
m.c15 = Constraint(expr=-m.x50*m.x14*m.b74 + m.x230 >= 0)
m.c16 = Constraint(expr=-m.x50*m.x15*m.b75 + m.x231 >= 0)
m.c17 = Constraint(expr=-m.x51*m.x16*m.b76 + m.x232 >= 0)
m.c18 = Constraint(expr=-m.x51*m.x17*m.b77 + m.x233 >= 0)
m.c19 = Constraint(expr=-m.x51*m.x18*m.b78 + m.x234 >= 0)
m.c20 = Constraint(expr=-m.x52*m.x19*m.b79 + m.x235 >= 0)
m.c21 = Constraint(expr=-m.x52*m.x20*m.b80 + m.x236 >= 0)
m.c22 = Constraint(expr=-m.x52*m.x21*m.b81 + m.x237 >= 0)
m.c23 = Constraint(expr=-m.x53*m.x22*m.b82 + m.x238 >= 0)
m.c24 = Constraint(expr=-m.x53*m.x23*m.b83 + m.x239 >= 0)
m.c25 = Constraint(expr=-m.x53*m.x24*m.b84 + m.x240 >= 0)
m.c26 = Constraint(expr=-m.x54*m.x25*m.b85 + m.x241 >= 0)
m.c27 = Constraint(expr=-m.x54*m.x26*m.b86 + m.x242 >= 0)
m.c28 = Constraint(expr=-m.x54*m.x27*m.b87 + m.x243 >= 0)
m.c29 = Constraint(expr=-m.x55*m.x28*m.b88 + m.x244 >= 0)
m.c30 = Constraint(expr=-m.x55*m.x29*m.b89 + m.x245 >= 0)
m.c31 = Constraint(expr=-m.x55*m.x30*m.b90 + m.x246 >= 0)
m.c32 = Constraint(expr=-m.x56*m.x31*m.b91 + m.x247 >= 0)
m.c33 = Constraint(expr=-m.x56*m.x32*m.b92 + m.x248 >= 0)
m.c34 = Constraint(expr=-m.x56*m.x33*m.b93 + m.x249 >= 0)
m.c35 = Constraint(expr=-m.x57*m.x34*m.b94 + m.x250 >= 0)
m.c36 = Constraint(expr=-m.x57*m.x35*m.b95 + m.x251 >= 0)
m.c37 = Constraint(expr=-m.x57*m.x36*m.b96 + m.x252 >= 0)
m.c38 = Constraint(expr=-m.x58*m.x37*m.b97 + m.x253 >= 0)
m.c39 = Constraint(expr=-m.x58*m.x38*m.b98 + m.x254 >= 0)
m.c40 = Constraint(expr=-m.x58*m.x39*m.b99 + m.x255 >= 0)
m.c41 = Constraint(expr=-m.x59*m.x40*m.b100 + m.x256 >= 0)
m.c42 = Constraint(expr=-m.x59*m.x41*m.b101 + m.x257 >= 0)
m.c43 = Constraint(expr=-m.x59*m.x42*m.b102 + m.x258 >= 0)
m.c44 = Constraint(expr=-m.x60*m.x43*m.b103 + m.x259 >= 0)
m.c45 = Constraint(expr=-m.x60*m.x44*m.b104 + m.x260 >= 0)
m.c46 = Constraint(expr=-m.x60*m.x45*m.b105 + m.x261 >= 0)
m.c47 = Constraint(expr= m.b61 + m.b62 + m.b63 == 1)
m.c48 = Constraint(expr= m.b64 + m.b65 + m.b66 == 1)
m.c49 = Constraint(expr= m.b67 + m.b68 + m.b69 == 1)
m.c50 = Constraint(expr= m.b70 + m.b71 + m.b72 == 1)
m.c51 = Constraint(expr= m.b73 + m.b74 + m.b75 == 1)
m.c52 = Constraint(expr= m.b76 + m.b77 + m.b78 == 1)
m.c53 = Constraint(expr= m.b79 + m.b80 + m.b81 == 1)
m.c54 = Constraint(expr= m.b82 + m.b83 + m.b84 == 1)
m.c55 = Constraint(expr= m.b85 + m.b86 + m.b87 == 1)
m.c56 = Constraint(expr= m.b88 + m.b89 + m.b90 == 1)
m.c57 = Constraint(expr= m.b91 + m.b92 + m.b93 == 1)
m.c58 = Constraint(expr= m.b94 + m.b95 + m.b96 == 1)
m.c59 = Constraint(expr= m.b97 + m.b98 + m.b99 == 1)
m.c60 = Constraint(expr= m.b100 + m.b101 + m.b102 == 1)
m.c61 = Constraint(expr= m.b103 + m.b104 + m.b105 == 1)
m.c62 = Constraint(expr= 2.02*m.b61 + 4.01333333333333*m.b64 + 4.76*m.b67 + 5.96*m.b70 + 42.0933333333333*m.b73
+ 99.28*m.b76 + 6.59333333333333*m.b79 + 61.8666666666667*m.b82 + 56.2866666666667*m.b85
+ 41.5*m.b88 + 62.4933333333333*m.b91 + 80.9066666666667*m.b94 + 26.1466666666667*m.b97
+ 38*m.b100 + 62.24*m.b103 <= 213.053333333333)
m.c63 = Constraint(expr= 2.02*m.b62 + 4.01333333333333*m.b65 + 4.76*m.b68 + 5.96*m.b71 + 42.0933333333333*m.b74
+ 99.28*m.b77 + 6.59333333333333*m.b80 + 61.8666666666667*m.b83 + 56.2866666666667*m.b86
+ 41.5*m.b89 + 62.4933333333333*m.b92 + 80.9066666666667*m.b95 + 26.1466666666667*m.b98
+ 38*m.b101 + 62.24*m.b104 <= 213.053333333333)
m.c64 = Constraint(expr= 2.02*m.b63 + 4.01333333333333*m.b66 + 4.76*m.b69 + 5.96*m.b72 + 42.0933333333333*m.b75
+ 99.28*m.b78 + 6.59333333333333*m.b81 + 61.8666666666667*m.b84 + 56.2866666666667*m.b87
+ 41.5*m.b90 + 62.4933333333333*m.b93 + 80.9066666666667*m.b96 + 26.1466666666667*m.b99
+ 38*m.b102 + 62.24*m.b105 <= 213.053333333333)
m.c65 = Constraint(expr= m.x121 + m.x127 >= 0.29424122)
m.c66 = Constraint(expr= m.x122 + m.x128 >= 0.29424122)
m.c67 = Constraint(expr= m.x123 + m.x129 >= 0.29424122)
m.c68 = Constraint(expr= m.x121 + m.x130 >= 0.29760193)
m.c69 = Constraint(expr= m.x122 + m.x131 >= 0.29760193)
m.c70 = Constraint(expr= m.x123 + m.x132 >= 0.29760193)
m.c71 = Constraint(expr= m.x121 + m.x133 >= 0.35149534)
m.c72 = Constraint(expr= m.x122 + m.x134 >= 0.35149534)
m.c73 = Constraint(expr= m.x123 + m.x135 >= 0.35149534)
m.c74 = Constraint(expr= m.x121 + m.x136 >= 0.30458283)
m.c75 = Constraint(expr= m.x122 + m.x137 >= 0.30458283)
m.c76 = Constraint(expr= m.x123 + m.x138 >= 0.30458283)
m.c77 = Constraint(expr= m.x121 + m.x139 >= 0.29951066)
m.c78 = Constraint(expr= m.x122 + m.x140 >= 0.29951066)
m.c79 = Constraint(expr= m.x123 + m.x141 >= 0.29951066)
m.c80 = Constraint(expr= m.x121 + m.x142 >= 0.30694357)
m.c81 = Constraint(expr= m.x122 + m.x143 >= 0.30694357)
m.c82 = Constraint(expr= m.x123 + m.x144 >= 0.30694357)
m.c83 = Constraint(expr= m.x121 + m.x145 >= 0.33520661)
m.c84 = Constraint(expr= m.x122 + m.x146 >= 0.33520661)
m.c85 = Constraint(expr= m.x123 + m.x147 >= 0.33520661)
m.c86 = Constraint(expr= m.x121 + m.x148 >= 0.3400071)
m.c87 = Constraint(expr= m.x122 + m.x149 >= 0.3400071)
m.c88 = Constraint(expr= m.x123 + m.x150 >= 0.3400071)
m.c89 = Constraint(expr= m.x121 + m.x151 >= 0.35227087)
m.c90 = Constraint(expr= m.x122 + m.x152 >= 0.35227087)
m.c91 = Constraint(expr= m.x123 + m.x153 >= 0.35227087)
m.c92 = Constraint(expr= m.x121 + m.x154 >= 0.34225726)
m.c93 = Constraint(expr= m.x122 + m.x155 >= 0.34225726)
m.c94 = Constraint(expr= m.x123 + m.x156 >= 0.34225726)
m.c95 = Constraint(expr= m.x121 + m.x157 >= 0.32776566)
m.c96 = Constraint(expr= m.x122 + m.x158 >= 0.32776566)
m.c97 = Constraint(expr= m.x123 + m.x159 >= 0.32776566)
m.c98 = Constraint(expr= m.x121 + m.x160 >= 0.30438256)
m.c99 = Constraint(expr= m.x122 + m.x161 >= 0.30438256)
m.c100 = Constraint(expr= m.x123 + m.x162 >= 0.30438256)
m.c101 = Constraint(expr= m.x121 + m.x163 >= 0.28538336)
m.c102 = Constraint(expr= m.x122 + m.x164 >= 0.28538336)
m.c103 = Constraint(expr= m.x123 + m.x165 >= 0.28538336)
m.c104 = Constraint(expr= m.x121 + m.x166 >= 0.27950575)
m.c105 = Constraint(expr= m.x122 + m.x167 >= 0.27950575)
m.c106 = Constraint(expr= m.x123 + m.x168 >= 0.27950575)
m.c107 = Constraint(expr= - m.x121 + m.x127 >= -0.29424122)
m.c108 = Constraint(expr= - m.x122 + m.x128 >= -0.29424122)
m.c109 = Constraint(expr= - m.x123 + m.x129 >= -0.29424122)
m.c110 = Constraint(expr= - m.x121 + m.x130 >= -0.29760193)
m.c111 = Constraint(expr= - m.x122 + m.x131 >= -0.29760193)
m.c112 = Constraint(expr= - m.x123 + m.x132 >= -0.29760193)
m.c113 = Constraint(expr= - m.x121 + m.x133 >= -0.35149534)
m.c114 = Constraint(expr= - m.x122 + m.x134 >= -0.35149534)
m.c115 = Constraint(expr= - m.x123 + m.x135 >= -0.35149534)
m.c116 = Constraint(expr= - m.x121 + m.x136 >= -0.30458283)
m.c117 = Constraint(expr= - m.x122 + m.x137 >= -0.30458283)
m.c118 = Constraint(expr= - m.x123 + m.x138 >= -0.30458283)
m.c119 = Constraint(expr= - m.x121 + m.x139 >= -0.29951066)
m.c120 = Constraint(expr= - m.x122 + m.x140 >= -0.29951066)
m.c121 = Constraint(expr= - m.x123 + m.x141 >= -0.29951066)
m.c122 = Constraint(expr= - m.x121 + m.x142 >= -0.30694357)
m.c123 = Constraint(expr= - m.x122 + m.x143 >= -0.30694357)
m.c124 = Constraint(expr= - m.x123 + m.x144 >= -0.30694357)
m.c125 = Constraint(expr= - m.x121 + m.x145 >= -0.33520661)
m.c126 = Constraint(expr= - m.x122 + m.x146 >= -0.33520661)
m.c127 = Constraint(expr= - m.x123 + m.x147 >= -0.33520661)
m.c128 = Constraint(expr= - m.x121 + m.x148 >= -0.3400071)
m.c129 = Constraint(expr= - m.x122 + m.x149 >= -0.3400071)
m.c130 = Constraint(expr= - m.x123 + m.x150 >= -0.3400071)
m.c131 = Constraint(expr= - m.x121 + m.x154 >= -0.34225726)
m.c132 = Constraint(expr= - m.x122 + m.x155 >= -0.34225726)
m.c133 = Constraint(expr= - m.x123 + m.x156 >= -0.34225726)
m.c134 = Constraint(expr= - m.x121 + m.x157 >= -0.32776566)
m.c135 = Constraint(expr= - m.x122 + m.x158 >= -0.32776566)
m.c136 = Constraint(expr= - m.x123 + m.x159 >= -0.32776566)
m.c137 = Constraint(expr= - m.x121 + m.x160 >= -0.30438256)
m.c138 = Constraint(expr= - m.x122 + m.x161 >= -0.30438256)
m.c139 = Constraint(expr= - m.x123 + m.x162 >= -0.30438256)
m.c140 = Constraint(expr= - m.x121 + m.x163 >= -0.28538336)
m.c141 = Constraint(expr= - m.x122 + m.x164 >= -0.28538336)
m.c142 = Constraint(expr= - m.x123 + m.x165 >= -0.28538336)
m.c143 = Constraint(expr= - m.x121 + m.x166 >= -0.27950575)
m.c144 = Constraint(expr= - m.x122 + m.x167 >= -0.27950575)
m.c145 = Constraint(expr= - m.x123 + m.x168 >= -0.27950575)
m.c146 = Constraint(expr= - m.x121 + m.x169 >= -0.25788969)
m.c147 = Constraint(expr= - m.x122 + m.x170 >= -0.25788969)
m.c148 = Constraint(expr= - m.x123 + m.x171 >= -0.25788969)
m.c149 = Constraint(expr= m.x124 + m.x175 >= -0.9536939)
m.c150 = Constraint(expr= m.x125 + m.x176 >= -0.9536939)
m.c151 = Constraint(expr= m.x126 + m.x177 >= -0.9536939)
m.c152 = Constraint(expr= m.x124 + m.x178 >= -0.9004898)
m.c153 = Constraint(expr= m.x125 + m.x179 >= -0.9004898)
m.c154 = Constraint(expr= m.x126 + m.x180 >= -0.9004898)
m.c155 = Constraint(expr= m.x124 + m.x181 >= -0.9114032)
m.c156 = Constraint(expr= m.x125 + m.x182 >= -0.9114032)
m.c157 = Constraint(expr= m.x126 + m.x183 >= -0.9114032)
m.c158 = Constraint(expr= m.x124 + m.x184 >= -0.90071532)
m.c159 = Constraint(expr= m.x125 + m.x185 >= -0.90071532)
m.c160 = Constraint(expr= m.x126 + m.x186 >= -0.90071532)
m.c161 = Constraint(expr= m.x124 + m.x187 >= -0.88043054)
m.c162 = Constraint(expr= m.x125 + m.x188 >= -0.88043054)
m.c163 = Constraint(expr= m.x126 + m.x189 >= -0.88043054)
m.c164 = Constraint(expr= m.x124 + m.x190 >= -0.8680249)
m.c165 = Constraint(expr= m.x125 + m.x191 >= -0.8680249)
m.c166 = Constraint(expr= m.x126 + m.x192 >= -0.8680249)
m.c167 = Constraint(expr= m.x124 + m.x193 >= -0.81034814)
m.c168 = Constraint(expr= m.x125 + m.x194 >= -0.81034814)
m.c169 = Constraint(expr= m.x126 + m.x195 >= -0.81034814)
m.c170 = Constraint(expr= m.x124 + m.x196 >= -0.80843127)
m.c171 = Constraint(expr= m.x125 + m.x197 >= -0.80843127)
m.c172 = Constraint(expr= m.x126 + m.x198 >= -0.80843127)
m.c173 = Constraint(expr= m.x124 + m.x199 >= -0.7794471)
m.c174 = Constraint(expr= m.x125 + m.x200 >= -0.7794471)
m.c175 = Constraint(expr= m.x126 + m.x201 >= -0.7794471)
m.c176 = Constraint(expr= m.x124 + m.x202 >= -0.79930922)
m.c177 = Constraint(expr= m.x125 + m.x203 >= -0.79930922)
m.c178 = Constraint(expr= m.x126 + m.x204 >= -0.79930922)
m.c179 = Constraint(expr= m.x124 + m.x205 >= -0.84280733)
m.c180 = Constraint(expr= m.x125 + m.x206 >= -0.84280733)
m.c181 = Constraint(expr= m.x126 + m.x207 >= -0.84280733)
m.c182 = Constraint(expr= m.x124 + m.x208 >= -0.81379236)
m.c183 = Constraint(expr= m.x125 + m.x209 >= -0.81379236)
m.c184 = Constraint(expr= m.x126 + m.x210 >= -0.81379236)
m.c185 = Constraint(expr= m.x124 + m.x211 >= -0.82457178)
m.c186 = Constraint(expr= m.x125 + m.x212 >= -0.82457178)
m.c187 = Constraint(expr= m.x126 + m.x213 >= -0.82457178)
m.c188 = Constraint(expr= m.x124 + m.x214 >= -0.80226439)
m.c189 = Constraint(expr= m.x125 + m.x215 >= -0.80226439)
m.c190 = Constraint(expr= m.x126 + m.x216 >= -0.80226439)
m.c191 = Constraint(expr= - m.x124 + m.x172 >= 0.98493628)
m.c192 = Constraint(expr= - m.x125 + m.x173 >= 0.98493628)
m.c193 = Constraint(expr= - m.x126 + m.x174 >= 0.98493628)
m.c194 = Constraint(expr= - m.x124 + m.x175 >= 0.9536939)
m.c195 = Constraint(expr= - m.x125 + m.x176 >= 0.9536939)
m.c196 = Constraint(expr= - m.x126 + m.x177 >= 0.9536939)
m.c197 = Constraint(expr= - m.x124 + m.x178 >= 0.9004898)
m.c198 = Constraint(expr= - m.x125 + m.x179 >= 0.9004898)
m.c199 = Constraint(expr= - m.x126 + m.x180 >= 0.9004898)
m.c200 = Constraint(expr= - m.x124 + m.x181 >= 0.9114032)
m.c201 = Constraint(expr= - m.x125 + m.x182 >= 0.9114032)
m.c202 = Constraint(expr= - m.x126 + m.x183 >= 0.9114032)
m.c203 = Constraint(expr= - m.x124 + m.x184 >= 0.90071532)
m.c204 = Constraint(expr= - m.x125 + m.x185 >= 0.90071532)
m.c205 = Constraint(expr= - m.x126 + m.x186 >= 0.90071532)
m.c206 = Constraint(expr= - m.x124 + m.x187 >= 0.88043054)
m.c207 = Constraint(expr= - m.x125 + m.x188 >= 0.88043054)
m.c208 = Constraint(expr= - m.x126 + m.x189 >= 0.88043054)
m.c209 = Constraint(expr= - m.x124 + m.x190 >= 0.8680249)
m.c210 = Constraint(expr= - m.x125 + m.x191 >= 0.8680249)
m.c211 = Constraint(expr= - m.x126 + m.x192 >= 0.8680249)
m.c212 = Constraint(expr= - m.x124 + m.x193 >= 0.81034814)
m.c213 = Constraint(expr= - m.x125 + m.x194 >= 0.81034814)
m.c214 = Constraint(expr= - m.x126 + m.x195 >= 0.81034814)
m.c215 = Constraint(expr= - m.x124 + m.x196 >= 0.80843127)
m.c216 = Constraint(expr= - m.x125 + m.x197 >= 0.80843127)
m.c217 = Constraint(expr= - m.x126 + m.x198 >= 0.80843127)
m.c218 = Constraint(expr= - m.x124 + m.x202 >= 0.79930922)
m.c219 = Constraint(expr= - m.x125 + m.x203 >= 0.79930922)
m.c220 = Constraint(expr= - m.x126 + m.x204 >= 0.79930922)
m.c221 = Constraint(expr= - m.x124 + m.x205 >= 0.84280733)
m.c222 = Constraint(expr= - m.x125 + m.x206 >= 0.84280733)
m.c223 = Constraint(expr= - m.x126 + m.x207 >= 0.84280733)
m.c224 = Constraint(expr= - m.x124 + m.x208 >= 0.81379236)
m.c225 = Constraint(expr= - m.x125 + m.x209 >= 0.81379236)
m.c226 = Constraint(expr= - m.x126 + m.x210 >= 0.81379236)
m.c227 = Constraint(expr= - m.x124 + m.x211 >= 0.82457178)
m.c228 = Constraint(expr= - m.x125 + m.x212 >= 0.82457178)
m.c229 = Constraint(expr= - m.x126 + m.x213 >= 0.82457178)
m.c230 = Constraint(expr= - m.x124 + m.x214 >= 0.80226439)
m.c231 = Constraint(expr= - m.x125 + m.x215 >= 0.80226439)
m.c232 = Constraint(expr= - m.x126 + m.x216 >= 0.80226439)
m.c233 = Constraint(expr= m.x1 - m.x127 - m.x172 == 0)
m.c234 = Constraint(expr= m.x2 - m.x128 - m.x173 == 0)
m.c235 = Constraint(expr= m.x3 - m.x129 - m.x174 == 0)
m.c236 = Constraint(expr= m.x4 - m.x130 - m.x175 == 0)
m.c237 = Constraint(expr= m.x5 - m.x131 - m.x176 == 0)
m.c238 = Constraint(expr= m.x6 - m.x132 - m.x177 == 0)
m.c239 = Constraint(expr= m.x7 - m.x133 - m.x178 == 0)
m.c240 = Constraint(expr= m.x8 - m.x134 - m.x179 == 0)
m.c241 = Constraint(expr= m.x9 - m.x135 - m.x180 == 0)
m.c242 = Constraint(expr= m.x10 - m.x136 - m.x181 == 0)
m.c243 = Constraint(expr= m.x11 - m.x137 - m.x182 == 0)
m.c244 = Constraint(expr= m.x12 - m.x138 - m.x183 == 0)
m.c245 = Constraint(expr= m.x13 - m.x139 - m.x184 == 0)
m.c246 = Constraint(expr= m.x14 - m.x140 - m.x185 == 0)
m.c247 = Constraint(expr= m.x15 - m.x141 - m.x186 == 0)
m.c248 = Constraint(expr= m.x16 - m.x142 - m.x187 == 0)
m.c249 = Constraint(expr= m.x17 - m.x143 - m.x188 == 0)
m.c250 = Constraint(expr= m.x18 - m.x144 - m.x189 == 0)
m.c251 = Constraint(expr= m.x19 - m.x145 - m.x190 == 0)
m.c252 = Constraint(expr= m.x20 - m.x146 - m.x191 == 0)
m.c253 = Constraint(expr= m.x21 - m.x147 - m.x192 == 0)
m.c254 = Constraint(expr= m.x22 - m.x148 - m.x193 == 0)
m.c255 = Constraint(expr= m.x23 - m.x149 - m.x194 == 0)
m.c256 = Constraint(expr= m.x24 - m.x150 - m.x195 == 0)
m.c257 = Constraint(expr= m.x25 - m.x151 - m.x196 == 0)
m.c258 = Constraint(expr= m.x26 - m.x152 - m.x197 == 0)
m.c259 = Constraint(expr= m.x27 - m.x153 - m.x198 == 0)
m.c260 = Constraint(expr= m.x28 - m.x154 - m.x199 == 0)
m.c261 = Constraint(expr= m.x29 - m.x155 - m.x200 == 0)
m.c262 = Constraint(expr= m.x30 - m.x156 - m.x201 == 0)
m.c263 = Constraint(expr= m.x31 - m.x157 - m.x202 == 0)
m.c264 = Constraint(expr= m.x32 - m.x158 - m.x203 == 0)
m.c265 = Constraint(expr= m.x33 - m.x159 - m.x204 == 0)
m.c266 = Constraint(expr= m.x34 - m.x160 - m.x205 == 0)
m.c267 = Constraint(expr= m.x35 - m.x161 - m.x206 == 0)
m.c268 = Constraint(expr= m.x36 - m.x162 - m.x207 == 0)
m.c269 = Constraint(expr= m.x37 - m.x163 - m.x208 == 0)
m.c270 = Constraint(expr= m.x38 - m.x164 - m.x209 == 0)
m.c271 = Constraint(expr= m.x39 - m.x165 - m.x210 == 0)
m.c272 = Constraint(expr= m.x40 - m.x166 - m.x211 == 0)
m.c273 = Constraint(expr= m.x41 - m.x167 - m.x212 == 0)
m.c274 = Constraint(expr= m.x42 - m.x168 - m.x213 == 0)
m.c275 = Constraint(expr= m.x43 - m.x169 - m.x214 == 0)
m.c276 = Constraint(expr= m.x44 - m.x170 - m.x215 == 0)
m.c277 = Constraint(expr= m.x45 - m.x171 - m.x216 == 0)
m.c278 = Constraint(expr= m.b269 + m.b270 >= 1)
m.c279 = Constraint(expr= m.b267 + m.b272 >= 1)
m.c280 = Constraint(expr= m.b266 + m.b270 >= 1)
m.c281 = Constraint(expr= m.b266 + m.b269 + m.b271 >= 1)
m.c282 = Constraint(expr= m.b266 + m.b268 + m.b272 >= 1)
m.c283 = Constraint(expr= m.b266 + m.b267 >= 1)
m.c284 = Constraint(expr= m.b265 + m.b272 >= 1)
m.c285 = Constraint(expr= m.b265 + m.b269 >= 1)
m.c286 = Constraint(expr= m.b264 + m.b271 >= 1)
m.c287 = Constraint(expr= m.b264 + m.b269 + m.b272 >= 1)
m.c288 = Constraint(expr= m.b264 + m.b268 >= 1)
m.c289 = Constraint(expr= m.b264 + m.b266 + m.b272 >= 1)
m.c290 = Constraint(expr= m.b264 + m.b266 + m.b269 >= 1)
m.c291 = Constraint(expr= m.b264 + m.b265 >= 1)
m.c292 = Constraint(expr= m.b263 + m.b271 >= 1)
m.c293 = Constraint(expr= m.b263 + m.b269 + m.b272 >= 1)
m.c294 = Constraint(expr= m.b263 + m.b268 >= 1)
m.c295 = Constraint(expr= m.b263 + m.b266 >= 1)
m.c296 = Constraint(expr= m.b263 + m.b264 >= 1)
m.c297 = Constraint(expr= m.b262 + m.b271 >= 1)
m.c298 = Constraint(expr= m.b262 + m.b269 + m.b272 >= 1)
m.c299 = Constraint(expr= m.b262 + m.b268 >= 1)
m.c300 = Constraint(expr= m.b262 + m.b266 + m.b272 >= 1)
m.c301 = Constraint(expr= m.b262 + m.b266 + m.b269 >= 1)
m.c302 = Constraint(expr= m.b262 + m.b265 >= 1)
m.c303 = Constraint(expr= m.b262 + m.b264 >= 1)
m.c304 = Constraint(expr= m.b262 + m.b263 >= 1)
m.c305 = Constraint(expr= m.b272 + m.b277 >= 1)
m.c306 = Constraint(expr= m.b272 + m.b276 + m.b278 >= 1)
m.c307 = Constraint(expr= m.b272 + m.b275 + m.b279 >= 1)
m.c308 = Constraint(expr= m.b272 + m.b274 >= 1)
m.c309 = Constraint(expr= m.b272 + m.b273 + m.b279 >= 1)
m.c310 = Constraint(expr= m.b272 + m.b273 + m.b276 >= 1)
m.c311 = Constraint(expr= m.b271 + m.b278 >= 1)
m.c312 = Constraint(expr= m.b271 + m.b276 + m.b279 >= 1)
m.c313 = Constraint(expr= m.b271 + m.b275 >= 1)
m.c314 = Constraint(expr= m.b271 + m.b273 >= 1)
m.c315 = Constraint(expr= m.b270 + m.b279 >= 1)
m.c316 = Constraint(expr= m.b270 + m.b276 >= 1)
m.c317 = Constraint(expr= m.b270 + m.b273 >= 1)
m.c318 = Constraint(expr= m.b269 + m.b277 >= 1)
m.c319 = Constraint(expr= m.b269 + m.b276 + m.b278 >= 1)
m.c320 = Constraint(expr= m.b269 + m.b275 + m.b279 >= 1)
m.c321 = Constraint(expr= m.b269 + m.b274 >= 1)
m.c322 = Constraint(expr= m.b269 + m.b273 + m.b279 >= 1)
m.c323 = Constraint(expr= m.b269 + m.b273 + m.b276 >= 1)
m.c324 = Constraint(expr= m.b269 + m.b272 + m.b278 >= 1)
m.c325 = Constraint(expr= m.b269 + m.b272 + m.b276 + m.b279 >= 1)
m.c326 = Constraint(expr= m.b269 + m.b272 + m.b275 >= 1)
m.c327 = Constraint(expr= m.b269 + m.b272 + m.b273 >= 1)
m.c328 = Constraint(expr= m.b269 + m.b271 + m.b279 >= 1)
m.c329 = Constraint(expr= m.b269 + m.b271 + m.b276 >= 1)
m.c330 = Constraint(expr= m.b269 + m.b271 + m.b273 >= 1)
m.c331 = Constraint(expr= m.b268 + m.b278 >= 1)
m.c332 = Constraint(expr= m.b268 + m.b276 + m.b279 >= 1)
m.c333 = Constraint(expr= m.b268 + m.b275 >= 1)
m.c334 = Constraint(expr= m.b268 + m.b273 >= 1)
m.c335 = Constraint(expr= m.b268 + m.b272 + m.b279 >= 1)
m.c336 = Constraint(expr= m.b268 + m.b272 + m.b276 >= 1)
m.c337 = Constraint(expr= m.b268 + m.b272 + m.b273 >= 1)
m.c338 = Constraint(expr= m.b268 + m.b271 + m.b279 >= 1)
m.c339 = Constraint(expr= m.b268 + m.b271 + m.b276 >= 1)
m.c340 = Constraint(expr= m.b268 + m.b271 + m.b273 >= 1)
m.c341 = Constraint(expr= m.b267 + m.b279 >= 1)
m.c342 = Constraint(expr= m.b267 + m.b276 >= 1)
m.c343 = Constraint(expr= m.b267 + m.b273 >= 1)
m.c344 = Constraint(expr= m.b266 + m.b277 >= 1)
m.c345 = Constraint(expr= m.b266 + m.b276 + m.b278 >= 1)
m.c346 = Constraint(expr= m.b266 + m.b275 + m.b279 >= 1)
m.c347 = Constraint(expr= m.b266 + m.b274 >= 1)
m.c348 = Constraint(expr= m.b266 + m.b273 + m.b279 >= 1)
m.c349 = Constraint(expr= m.b266 + m.b273 + m.b276 >= 1)
m.c350 = Constraint(expr= m.b266 + m.b272 + m.b278 >= 1)
m.c351 = Constraint(expr= m.b266 + m.b272 + m.b276 + m.b279 >= 1)
m.c352 = Constraint(expr= m.b266 + m.b272 + m.b275 >= 1)
m.c353 = Constraint(expr= m.b266 + m.b272 + m.b273 >= 1)
m.c354 = Constraint(expr= m.b266 + m.b271 + m.b279 >= 1)
m.c355 = Constraint(expr= m.b266 + m.b271 + m.b276 >= 1)
m.c356 = Constraint(expr= m.b266 + m.b271 + m.b273 >= 1)
m.c357 = Constraint(expr= m.b266 + m.b269 + m.b278 >= 1)
m.c358 = Constraint(expr= m.b266 + m.b269 + m.b276 + m.b279 >= 1)
m.c359 = Constraint(expr= m.b266 + m.b269 + m.b275 >= 1)
m.c360 = Constraint(expr= m.b266 + m.b269 + m.b273 >= 1)
m.c361 = Constraint(expr= m.b266 + m.b269 + m.b272 + m.b279 >= 1)
m.c362 = Constraint(expr= m.b266 + m.b269 + m.b272 + m.b276 >= 1)
m.c363 = Constraint(expr= m.b266 + m.b269 + m.b272 + m.b273 >= 1)
m.c364 = Constraint(expr= m.b266 + m.b268 + m.b279 >= 1)
m.c365 = Constraint(expr= m.b266 + m.b268 + m.b276 >= 1)
m.c366 = Constraint(expr= m.b266 + m.b268 + m.b273 >= 1)
m.c367 = Constraint(expr= m.b265 + m.b279 >= 1)
m.c368 = Constraint(expr= m.b265 + m.b276 >= 1)
m.c369 = Constraint(expr= m.b265 + m.b273 >= 1)
m.c370 = Constraint(expr= m.b264 + m.b278 >= 1)
m.c371 = Constraint(expr= m.b264 + m.b276 + m.b279 >= 1)
m.c372 = Constraint(expr= m.b264 + m.b275 >= 1)
m.c373 = Constraint(expr= m.b264 + m.b273 >= 1)
m.c374 = Constraint(expr= m.b264 + m.b272 + m.b279 >= 1)
m.c375 = Constraint(expr= m.b264 + m.b272 + m.b276 >= 1)
m.c376 = Constraint(expr= m.b264 + m.b272 + m.b273 >= 1)
m.c377 = Constraint(expr= m.b264 + m.b269 + m.b279 >= 1)
m.c378 = Constraint(expr= m.b264 + m.b269 + m.b276 >= 1)
m.c379 = Constraint(expr= m.b264 + m.b269 + m.b273 >= 1)
m.c380 = Constraint(expr= m.b264 + m.b266 + m.b279 >= 1)
m.c381 = Constraint(expr= m.b264 + m.b266 + m.b276 >= 1)
m.c382 = Constraint(expr= m.b264 + m.b266 + m.b273 >= 1)
m.c383 = Constraint(expr= m.b263 + m.b278 >= 1)
m.c384 = Constraint(expr= m.b263 + m.b276 + m.b279 >= 1)
m.c385 = Constraint(expr= m.b263 + m.b275 >= 1)
m.c386 = Constraint(expr= m.b263 + m.b273 >= 1)
m.c387 = Constraint(expr= m.b263 + m.b272 + m.b279 >= 1)
m.c388 = Constraint(expr= m.b263 + m.b272 + m.b276 >= 1)
m.c389 = Constraint(expr= m.b263 + m.b272 + m.b273 >= 1)
m.c390 = Constraint(expr= m.b263 + m.b269 + m.b279 >= 1)
m.c391 = Constraint(expr= m.b263 + m.b269 + m.b276 >= 1)
m.c392 = Constraint(expr= m.b263 + m.b269 + m.b273 >= 1)
m.c393 = Constraint(expr= m.b262 + m.b278 >= 1)
m.c394 = Constraint(expr= m.b262 + m.b276 + m.b279 >= 1)
m.c395 = Constraint(expr= m.b262 + m.b275 >= 1)
m.c396 = Constraint(expr= m.b262 + m.b273 >= 1)
m.c397 = Constraint(expr= m.b262 + m.b272 + m.b279 >= 1)
m.c398 = Constraint(expr= m.b262 + m.b272 + m.b276 >= 1)
m.c399 = Constraint(expr= m.b262 + m.b272 + m.b273 >= 1)
m.c400 = Constraint(expr= m.b262 + m.b269 + m.b279 >= 1)
m.c401 = Constraint(expr= m.b262 + m.b269 + m.b276 >= 1)
m.c402 = Constraint(expr= m.b262 + m.b269 + m.b273 >= 1)
m.c403 = Constraint(expr= m.b262 + m.b266 + m.b279 >= 1)
m.c404 = Constraint(expr= m.b262 + m.b266 + m.b276 >= 1)
m.c405 = Constraint(expr= m.b262 + m.b266 + m.b273 >= 1)
m.c406 = Constraint(expr= m.x46 - 2.02*m.b262 >= 0)
m.c407 = Constraint(expr= m.x47 - 4.01333333333333*m.b263 >= 0)
m.c408 = Constraint(expr= m.x48 - 4.76*m.b264 >= 0)
m.c409 = Constraint(expr= m.x49 - 5.68*m.b265 >= 0)
m.c410 = Constraint(expr= m.x49 - 5.96*m.b266 >= 0)
m.c411 = Constraint(expr= m.x50 - 38.2666666666667*m.b267 >= 0)
m.c412 = Constraint(expr= m.x50 - 40.18*m.b268 >= 0)
m.c413 = Constraint(expr= m.x50 - 42.0933333333333*m.b269 >= 0)
m.c414 = Constraint(expr= m.x51 - 90.2533333333333*m.b270 >= 0)
m.c415 = Constraint(expr= m.x51 - 94.7666666666667*m.b271 >= 0)
m.c416 = Constraint(expr= m.x51 - 99.28*m.b272 >= 0)
m.c417 = Constraint(expr= m.x52 - 6.59333333333333*m.b273 >= 0)
m.c418 = Constraint(expr= m.x53 - 56.24*m.b274 >= 0)
m.c419 = Constraint(expr= m.x53 - 59.0533333333333*m.b275 >= 0)
m.c420 = Constraint(expr= m.x53 - 61.8666666666667*m.b276 >= 0)
m.c421 = Constraint(expr= m.x54 - 51.1733333333333*m.b277 >= 0)
m.c422 = Constraint(expr= m.x54 - 53.7333333333333*m.b278 >= 0)
m.c423 = Constraint(expr= m.x54 - 56.2866666666667*m.b279 >= 0)
m.c424 = Constraint(expr= m.x55 - 35.84*m.b280 >= 0)
m.c425 = Constraint(expr= m.x55 - 37.7266666666667*m.b281 >= 0)
m.c426 = Constraint(expr= m.x55 - 39.6133333333333*m.b282 >= 0)
m.c427 = Constraint(expr= m.x55 - 41.5*m.b283 >= 0)
m.c428 = Constraint(expr= m.x56 - 56.8066666666667*m.b284 >= 0)
m.c429 = Constraint(expr= m.x56 - 59.6466666666667*m.b285 >= 0)
m.c430 = Constraint(expr= m.x56 - 62.4933333333333*m.b286 >= 0)
m.c431 = Constraint(expr= m.x57 - 80.9066666666667*m.b287 >= 0)
m.c432 = Constraint(expr= m.x58 - 26.1466666666667*m.b288 >= 0)
m.c433 = Constraint(expr= m.x59 - 38*m.b289 >= 0)
m.c434 = Constraint(expr= m.x60 - 59.2733333333333*m.b290 >= 0)
m.c435 = Constraint(expr= m.x60 - 62.24*m.b291 >= 0)
m.c436 = Constraint(expr= - m.x106 + m.x217 <= 0)
m.c437 = Constraint(expr= - m.x106 + m.x218 <= 0)
m.c438 = Constraint(expr= - m.x106 + m.x219 <= 0)
m.c439 = Constraint(expr= - m.x107 + m.x220 <= 0)
m.c440 = Constraint(expr= - m.x107 + m.x221 <= 0)
m.c441 = Constraint(expr= - m.x107 + m.x222 <= 0)
m.c442 = Constraint(expr= - m.x108 + m.x223 <= 0)
m.c443 = Constraint(expr= - m.x108 + m.x224 <= 0)
m.c444 = Constraint(expr= - m.x108 + m.x225 <= 0)
m.c445 = Constraint(expr= - m.x109 + m.x226 <= 0)
m.c446 = Constraint(expr= - m.x109 + m.x227 <= 0)
m.c447 = Constraint(expr= - m.x109 + m.x228 <= 0)
m.c448 = Constraint(expr= - m.x110 + m.x229 <= 0)
m.c449 = Constraint(expr= - m.x110 + m.x230 <= 0)
m.c450 = Constraint(expr= - m.x110 + m.x231 <= 0)
m.c451 = Constraint(expr= - m.x111 + m.x232 <= 0)
m.c452 = Constraint(expr= - m.x111 + m.x233 <= 0)
m.c453 = Constraint(expr= - m.x111 + m.x234 <= 0)
m.c454 = Constraint(expr= - m.x112 + m.x235 <= 0)
m.c455 = Constraint(expr= - m.x112 + m.x236 <= 0)
m.c456 = Constraint(expr= - m.x112 + m.x237 <= 0)
m.c457 = Constraint(expr= - m.x113 + m.x238 <= 0)
m.c458 = Constraint(expr= - m.x113 + m.x239 <= 0)
m.c459 = Constraint(expr= - m.x113 + m.x240 <= 0)
m.c460 = Constraint(expr= - m.x114 + m.x241 <= 0)
m.c461 = Constraint(expr= - m.x114 + m.x242 <= 0)
m.c462 = Constraint(expr= - m.x114 + m.x243 <= 0)
m.c463 = Constraint(expr= - m.x115 + m.x244 <= 0)
m.c464 = Constraint(expr= - m.x115 + m.x245 <= 0)
m.c465 = Constraint(expr= - m.x115 + m.x246 <= 0)
m.c466 = Constraint(expr= - m.x116 + m.x247 <= 0)
m.c467 = Constraint(expr= - m.x116 + m.x248 <= 0)
m.c468 = Constraint(expr= - m.x116 + m.x249 <= 0)
m.c469 = Constraint(expr= - m.x117 + m.x250 <= 0)
m.c470 = Constraint(expr= - m.x117 + m.x251 <= 0)
m.c471 = Constraint(expr= - m.x117 + m.x252 <= 0)
m.c472 = Constraint(expr= - m.x118 + m.x253 <= 0)
m.c473 = Constraint(expr= - m.x118 + m.x254 <= 0)
m.c474 = Constraint(expr= - m.x118 + m.x255 <= 0)
m.c475 = Constraint(expr= - m.x119 + m.x256 <= 0)
m.c476 = Constraint(expr= - m.x119 + m.x257 <= 0)
m.c477 = Constraint(expr= - m.x119 + m.x258 <= 0)
m.c478 = Constraint(expr= - m.x120 + m.x259 <= 0)
m.c479 = Constraint(expr= - m.x120 + m.x260 <= 0)
m.c480 = Constraint(expr= - m.x120 + m.x261 <= 0)
m.c481 = Constraint(expr= m.b265 - m.b266 >= 0)
m.c482 = Constraint(expr= m.b267 - m.b268 >= 0)
m.c483 = Constraint(expr= m.b268 - m.b269 >= 0)
m.c484 = Constraint(expr= m.b270 - m.b271 >= 0)
m.c485 = Constraint(expr= m.b271 - m.b272 >= 0)
m.c486 = Constraint(expr= m.b274 - m.b275 >= 0)
m.c487 = Constraint(expr= m.b275 - m.b276 >= 0)
m.c488 = Constraint(expr= m.b277 - m.b278 >= 0)
m.c489 = Constraint(expr= m.b278 - m.b279 >= 0)
m.c490 = Constraint(expr= m.b280 - m.b281 >= 0)
m.c491 = Constraint(expr= m.b281 - m.b282 >= 0)
m.c492 = Constraint(expr= m.b282 - m.b283 >= 0)
m.c493 = Constraint(expr= m.b284 - m.b285 >= 0)
m.c494 = Constraint(expr= m.b285 - m.b286 >= 0)
m.c495 = Constraint(expr= m.b290 - m.b291 >= 0)
m.c496 = Constraint(expr= m.x124 - m.x125 >= 0)
m.c497 = Constraint(expr= m.x125 - m.x126 >= 0)
| 48,942 | 30,870 |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class StampInfo(object):
def __init__(self, stampId=None, stampName=None, stampContent=None, stampDigest=None, createTime=None):
"""
:param stampId: (Optional) 印章ID
:param stampName: (Optional) 印章名称
:param stampContent: (Optional) 印章图片(base64)
:param stampDigest: (Optional) 印章摘要
:param createTime: (Optional) 印章上传时间
"""
self.stampId = stampId
self.stampName = stampName
self.stampContent = stampContent
self.stampDigest = stampDigest
self.createTime = createTime
| 1,236 | 396 |
from rest_framework import status
from django.urls import reverse
from authors.apps.articles.models import Article
from authors.base_test_config import TestConfiguration
slug = None
class TestLikeDislike(TestConfiguration):
"""
Class to test for liking and disliking of articles.
"""
def create_article(self):
"""
Method to create an article first and return a token.
"""
article = {
"article": {
"title": "How To Train Your Dragon",
"description": "Ever wonder how?",
"body": "It takes a Jacobian"
}
}
# register the user and verify email
self.email_verification(self.reg_user)
# login the registered user
response = self.login(self.log_user)
# grab the token from the response data
token = response.data["token"]
# Create an article using the authentication token
self.client.post(
reverse("articles"),
article,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
return token
def test_article_like(self):
"""
Test if an article can be liked.
"""
# create an article and get user token
token = self.create_article()
# get the article slug
article = Article.objects.all().first()
global slug
slug = article.slug
# set the url
url = '/api/articles/{}/like/'.format(slug)
like_response = self.client.post(
url,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
# Test for correct response
self.assertEqual(like_response.status_code, status.HTTP_201_CREATED)
# Test response data to see if the article has 1 like
self.assertEqual(like_response.data["total_likes"], 1)
def test_dislike_article(self):
"""
Test if an article can be disliked
"""
token = self.create_article()
url = '/api/articles/{}/dislike/'.format(slug)
like_response = self.client.post(
url,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
# Test for correct response
self.assertEqual(like_response.status_code, status.HTTP_201_CREATED)
# Test response data to see if the article has 1 dislike
self.assertEqual(like_response.data["total_dislikes"], 1)
def test_for_already_liked_article(self):
"""
Test deletion of vote.
If a user likes an article they have already liked,
the vote is removed
"""
token = self.create_article()
url = '/api/articles/{}/like/'.format(slug)
# Post a like to the article
self.client.post(
url,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
# Like the article twice
like_response = self.client.post(
url,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
# Test for correct response
self.assertEqual(like_response.status_code, status.HTTP_201_CREATED)
# Test response data to see if the article has 0 likes
self.assertEqual(like_response.data["total_likes"], 0)
| 3,462 | 936 |
# ------------------------------------------------------------
# Training script example for Keras implementation
#
# Kaisar Kushibar (2019)
# kaisar.kushibar@udg.edu
# ------------------------------------------------------------
import os
import sys
import numpy as np
from functools import partial
from tkinter import filedialog
from tkinter import *
from tkinter.ttk import *
import Queue
import ConfigParser
import nibabel as nib
from cnn_cort.load_options import *
from keras.utils import np_utils
CURRENT_PATH = os.getcwd()
user_config = ConfigParser.RawConfigParser()
user_config.read(os.path.join(CURRENT_PATH, 'configuration.cfg'))
options = load_options(user_config)
from cnn_cort.base import load_data, generate_training_set, testing
from cnn_cort.keras_net import get_callbacks, get_model
from train_test_task import TestTask, TrainTask
class StdoutRedirector(object):
def __init__(self, tLog):
self.log = tLog
def write(self, msg):
self.log.insert(END, msg)
self.log.see(END)
def flush(self):
self.log.see(END)
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.pack()
self.show_main_window()
self.isProcessing = False
self.queue = Queue.Queue()
def get_label_entry_button(self, parent, row, labelTitle, buttonTitle):
label = Label(parent, text=labelTitle)
entry = Entry(parent)
button = Button(parent, text=buttonTitle)
label.grid(row=row, column=0, sticky=W+E+N+S, padx=5, pady=5)
entry.grid(row=row, column=1, sticky=W+E+N+S, padx=5, pady=5)
button.grid(row=row, column=2, sticky=W+E+N+S, padx=5, pady=5)
parent.grid_columnconfigure(1, weight=2)
return entry, button
def get_label_entry(self, parent, row, labelTitle, entryText):
label = Label(parent, text=labelTitle)
entry = Entry(parent)
entry.insert(0, entryText)
label.grid(row=row, column=0, sticky=W+E+N+S, ipadx=5, ipady=5, padx=5, pady=5)
entry.grid(row=row, column=1, sticky=W+E+N+S, ipadx=5, ipady=5, padx=5, pady=5)
parent.grid_columnconfigure(1, weight=2)
return entry
def tab_train_test(self, parent):
# create widgets
pWindow = PanedWindow(parent, orient='vertical')
# Train frame
fTrain = LabelFrame(pWindow, text='Train')
eTrainFolder, bTrainFolder = self.get_label_entry_button(fTrain, 0, 'Train path:', 'Browse')
bTrainFolder.config(command=partial(self.select_path, eTrainFolder, 'DIR'))
eModelFolder, bModelFolder = self.get_label_entry_button(fTrain, 1, 'Pre-trained model:', 'Browse')
bModelFolder.config(command=partial(self.select_path, eModelFolder, 'FILE'))
eSaveFolder, bSaveFolder = self.get_label_entry_button(fTrain, 2, 'Save path:', 'Browse')
bSaveFolder.config(command=partial(self.select_path, eSaveFolder, 'DIR'))
eModelName = self.get_label_entry(fTrain, 3, 'Save model name:', '')
bStartTrain = Button(fTrain, text='Start training')
bStartTrain.grid(row=4, columnspan=3, sticky=N+S, padx=5, pady=5)
# Log frame
fLog = LabelFrame(pWindow, text='Log')
sLog = Scrollbar(fLog)
tLog = Text(fLog, wrap=WORD, yscrollcommand=sLog.set, bg="#000000", fg='#42f450', borderwidth=0, highlightthickness=0)
sLog.config(command=tLog.yview)
sLog.pack(side=RIGHT, fill=Y)
tLog.pack(side=LEFT, fill=BOTH, expand=True)
# Redirect all standard outputs to the tLog
sys.stdout = StdoutRedirector(tLog)
# Test frame, appears before Log frame, but need the log screen declared first
fTest = LabelFrame(pWindow, text='Test')
eTestFolder, bTestFolder = self.get_label_entry_button(fTest, 0, 'Test path:', 'Browse')
bTestFolder.config(command=partial(self.select_path, eTestFolder, 'DIR'))
eTestModel, bTestModel = self.get_label_entry_button(fTest, 1, 'Model:', 'Browse')
bTestModel.config(command=partial(self.select_path, eTestModel, 'FILE'))
bStartTest = Button(fTest, text='Start testing', command=partial(self.start_testing, eTestModel, eTestFolder))
bStartTest.grid(row=2, columnspan=3, sticky=N+S, padx=5, pady=5)
bStartTrain.config(command=partial(self.start_training, eTrainFolder, eSaveFolder, eModelName, eModelFolder))
# set window title
self.winfo_toplevel().title('Brain sub-cortical structure segmentation tool')
# set geometry
pWindow.add(fTrain, weight=33)
pWindow.add(fTest, weight=33)
pWindow.add(fLog, weight=33)
pWindow.pack(fill=BOTH, expand=True)
return pWindow
def start_training(self, trainPath, savePath, modelName, savedModel):
if self.queue.empty():
TrainTask(self.queue, trainPath, savePath, modelName, savedModel, [self.eT1Name, self.eGTName, self.eOutputName]).start()
else:
print 'WAITING FOR A PROCESS TO FINISH...'
def start_testing(self, modelPath, testPath):
if self.queue.empty():
TestTask(self.queue, modelPath, testPath, [self.eT1Name, self.eGTName, self.eOutputName]).start()
else:
print 'WAITING FOR A PROCESS TO FINISH...'
def tab_config(self, parent):
pWindow = PanedWindow(parent, orient='vertical')
frame = Frame(pWindow)
self.eT1Name = self.get_label_entry(frame, 0, 'T1 name', 'T1.nii.gz')
self.eGTName = self.get_label_entry(frame, 1, 'Ground truth name', 'ground_truth.nii.gz')
self.eOutputName = self.get_label_entry(frame, 2, 'Output file name', 'seg_out.nii.gz')
pWindow.add(frame)
pWindow.pack(side=TOP, fill=BOTH, expand=True)
return pWindow
def show_main_window(self):
# tabs
nMainWindow = Notebook(self.master, width=700, height=650)
nMainWindow.add(self.tab_train_test(nMainWindow), text='Train and test')
nMainWindow.add(self.tab_config(nMainWindow), text='Configurations')
nMainWindow.pack(fill=BOTH, expand=True, ipadx=10, ipady=10)
def select_path(self, entry, mode):
name = ''
if mode == 'DIR':
name = filedialog.askdirectory(title='Select path')
if mode == 'FILE':
name = filedialog.askopenfilename(title='Select pre-trained model', filetypes=(('HDF5 files', '*.h5'), ("all files","*.*")))
if mode == 'SAVE':
name = filedialog.asksaveasfilename(title='Save trained model', filetypes=(('HDF5 files', '*.h5'), ("all files","*.*")))
entry.delete(0, END)
entry.insert(0, name)
root = Tk()
app = Application(master=root)
app.mainloop()
| 6,843 | 2,314 |
"""
=======================
Yahoo Finance source
=======================
"""
import re
import requests
import time
from json import loads
from bs4 import BeautifulSoup
from yahoofinancials import YahooFinancials
# Yahoo Finance data source
class YahooFinanceSource(YahooFinancials):
def __init__(self, ticker):
super(YahooFinanceSource, self).__init__(ticker)
# private static method to scrap data from yahoo finance
@staticmethod
def _scrape_dividend_data(url, tech_type, statement_type):
response = requests.get(url)
time.sleep(7)
soup = BeautifulSoup(response.content, "html.parser")
script = soup.find("script", text=re.compile("root.App.main")).text
data = loads(re.search("root.App.main\s+=\s+(\{.*\})", script).group(1))
stores = data["context"]["dispatcher"]["stores"]["HistoricalPriceStore"]
return stores
# Private Method to clean the dates of the newly returns historical stock data into readable format
def _clean_historical_div_data(self, hist_data):
data = {}
for k, v in hist_data.items():
if 'date' in k.lower():
cleaned_date = self.format_date(v, 'standard')
dict_ent = {k: {u'' + 'formatted_date': cleaned_date, 'date': v}}
data.update(dict_ent)
elif isinstance(v, list):
sub_dict_list = []
for sub_dict in v:
type = sub_dict.get('type', '')
if (type.upper() == 'DIVIDEND'):
sub_dict[u'' + 'formatted_date'] = self.format_date(sub_dict['date'], 'standard')
sub_dict_list.append(sub_dict)
dict_ent = {k: sub_dict_list}
data.update(dict_ent)
else:
dict_ent = {k: v}
data.update(dict_ent)
return data
# Private method to get time interval code
def _build_historical_dividend_url(self, ticker, hist_oj, filter='div'):
url = self._BASE_YAHOO_URL + ticker + '/history?period1=' + str(hist_oj['start']) + '&period2=' + \
str(hist_oj['end']) + '&interval=' + hist_oj['interval'] + '&filter=' + filter + '&frequency=' + \
hist_oj['interval']
return url
# Private Method to take scrapped data and build a data dictionary with
def _create_dict_ent_div(self, ticker, statement_type, tech_type, report_name, hist_obj):
up_ticker = ticker.upper()
YAHOO_URL = self._build_historical_dividend_url(up_ticker, hist_obj)
re_data = self._scrape_dividend_data(YAHOO_URL, tech_type, statement_type)
cleaned_re_data = self._clean_historical_div_data(re_data)
dict_ent = {up_ticker: cleaned_re_data}
return dict_ent
# Public Method for user to get historical stock dividend data
def get_historical_stock_dividend_data(self, start_date, end_date, time_interval):
interval_code = self.get_time_code(time_interval)
start = self.format_date(start_date, 'unixstamp')
end = self.format_date(end_date, 'unixstamp')
hist_obj = {'start': start, 'end': end, 'interval': interval_code}
data = self.get_stock_dividend_data('history', hist_obj=hist_obj)
return data
# Public Method to get stock data
def get_stock_dividend_data(self, statement_type='history', tech_type='', report_name='', hist_obj={}):
data = {}
if isinstance(self.ticker, str):
dict_ent = self._create_dict_ent_div(self.ticker, statement_type, tech_type, report_name, hist_obj)
data.update(dict_ent)
else:
for tick in self.ticker:
dict_ent = self._create_dict_ent_div(tick, statement_type, tech_type, report_name, hist_obj)
data.update(dict_ent)
return data
| 3,875 | 1,204 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class EnterpriseItem(scrapy.Item):
# define the fields for your item here like:
searchTime = scrapy.Field()
searchCriteria = scrapy.Field()
startDateMonth = scrapy.Field()
startDateInput = scrapy.Field()
startDateTime = scrapy.Field()
endDateMonth = scrapy.Field()
endDateInput = scrapy.Field()
endDateTime = scrapy.Field()
optionalCode = scrapy.Field()
location = scrapy.Field()
car_class = scrapy.Field()
car_price = scrapy.Field()
car_total_price = scrapy.Field()
| 695 | 228 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 10:02:58 2020
@author: Xavier Jimenez
"""
#------------------------------------------------------------------#
# # # # # Imports # # # # #
#------------------------------------------------------------------#
import numpy as np
import os
import shutil
import glob
import pandas as pd
import importlib
from joblib import Parallel, delayed
from tqdm import tqdm
import argparse
import warnings
warnings.filterwarnings('ignore')
from functions import *
#------------------------------------------------------------------#
# # # # # Create catalog # # # # #
#------------------------------------------------------------------#
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--nodes", required=False, type=int, nargs="?", const=1)
parser.add_argument("-s", "--survey", required=False, type=str, nargs="?", const='test')
parser.add_argument("-c", "--clean", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-m", "--make", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-j", "--join", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-g", "--generate_plots", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-p", "--preprocess", required=False, type=str, nargs="?", const=None)
parser.add_argument("-l", "--learning", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-o", "--optimize", required=False, type=str, nargs="?", const=None)
parser.add_argument("-a", "--algorithm", required=False, type=str, nargs="?", const='RF')
parser.add_argument("-i", "--input", required=False, type=str)
args = parser.parse_args()
#------------------------------------------------------------------#
# # # # # PS3PI # # # # #
#------------------------------------------------------------------#
path = os.getcwd() + '/'
if args.input is None:
import params
else:
params = importlib.import_module(args.input)
if args.nodes is None:
args.nodes = 1
if args.algorithm is None:
args.algorithm = 'RF'
if args.survey is None:
args.survey = 'test'
if args.survey == 'test':
print('Modules loaded properly')
if args.preprocess is None:
args.preprocess = 'drop'
elif args.survey == 'ps3pi_cfis' or args.survey == 'unions':
bands = params.bands
output_path = params.output_path
output_name = params.output_name
temp_path = params.temp_path
#------------------------------------------------------------------#
# # # # # CLEAN # # # # #
#------------------------------------------------------------------#
if args.clean == True:
GenFiles = GenerateFiles(args.survey, bands, temp_path, output_name, output_path)
GenFiles.clean_temp_directories()
GenFiles.make_directories()
#------------------------------------------------------------------#
# # # # # MAKE INDIVIDUAL TILE CATALOGS # # # # #
#------------------------------------------------------------------#
if args.make == True:
spectral_path = params.spectral_path
spectral_names = params.spectral_names
path_to_tile_run = params.path_to_tile_run
spectral_surveys = params.spectral_surveys
vignet = params.vignet
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
for i in range(len(spectral_names)):
cat.make_survey_catalog(spectral_path, spectral_names[i])
if params.input_path == None:
out_dir = os.listdir(path_to_tile_run + args.survey + '/%s/output/'%(spectral_surveys[i]))[-1]
input_path = path_to_tile_run + args.survey + '/%s/output/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir)
else:
input_path = params.input_path
paste_dir = os.listdir(input_path)
Parallel(n_jobs=args.nodes)(delayed(cat.make_catalog)(p, paste_dir, input_path, spectral_names[i], vignet=vignet) for p in tqdm(range(len(paste_dir))))
#------------------------------------------------------------------#
# # # # # JOIN INDIVIDUAL TILE CATALOGS # # # # #
#------------------------------------------------------------------#
if args.join == True:
vignet = params.vignet
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
cat.merge_catalogs(vignet=vignet)
#------------------------------------------------------------------#
# # # # # SAVE FIGURES # # # # #
#------------------------------------------------------------------#
if args.generate_plots == True:
spectral_names = params.spectral_names
GenPlot = GeneratePlots(args.survey, bands, temp_path, output_name=output_name, spectral_names=spectral_names, output_path=output_path)
GenPlot.plot_matched_z_spec_hist()
GenPlot.plot_unmatched_z_spec_hist()
#------------------------------------------------------------------#
# # # # # MACHINE LEARNING ALGORITHMS # # # # #
#------------------------------------------------------------------#
if args.learning == True:
GenFiles = GenerateFiles(args.survey, bands, path, output_name, output_path=output_path)
GenFiles.make_directories(output=True)
path_to_csv = params.path_to_csv
spectral_names = params.spectral_names
weights = params.weights
cv = params.cv
max_evals = params.max_evals
feature_engineering = params.feature_engineering
feature_importance = params.feature_importance
plot = params.plot
if path_to_csv is None:
if args.survey == 'ps3pi_cfis':
path_to_csv = output_path + 'output/' + args.survey + '/' + output_name + '/files/' + output_name + '.csv'
ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
df, df_unmatched = ML.merge_cfis_r_cfht_u_medium_deep_i_g_z()
if feature_engineering == True:
# df_list = ML.feature_engineering(df, bands=['r', 'u', 'i', 'z', 'g'])
df_list = ML.feature_engineering(df, bands=['r', 'u', 'i', 'z', 'g'], color_order=['i', 'g' , 'r', 'z', 'u'])
else:
df_list = [df]
# print(df.head(10))
if plot == True:
ML.plot_corrmat(df)
GenPlot = GeneratePlots(args.survey, bands, temp_path, output_name=output_name, output_path=output_path, spectral_names=spectral_names)
# GenPlot.plot_mags(df, df_unmatched)
elif args.survey == 'unions':
path_to_csv = output_path + 'output/' + args.survey + '/' + output_name + '/files/' + output_name + '.csv'
ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
df = ML.dataframe()
df_unmatched = ML.unmatched_dataframe()
df = ML.gal_g()
if plot == True:
ML.plot_corrmat(df)
GenPlot = GeneratePlots(args.survey, bands, temp_path, output_name=output_name, output_path=output_path, spectral_names=spectral_names)
GenPlot.plot_mags(df, df_unmatched)
else:
raise TypeError("--survey needs to be set to 'unions' or 'ps3pi_cfis', please specify the full path to your DataFrame")
elif path_to_csv is not None:
ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
df = ML.dataframe()
# ML.plot_corrmat(df)
algs = {'RF': RandomForest, 'ANN': ArtificialNeuralNetwork, 'LASSO': LassoRegression, 'ENET': ElasticNetRegression,
'XGB':XGBoost, 'KRR':KernelRidgeRegression, 'SVR': SupportVectorRegression, 'LGB': LightGBM, 'GBR': GradientBoostingRegression}
if args.algorithm == 'BEST':
algs = {'RF': RandomForest, 'ANN': ArtificialNeuralNetwork, 'SVR': SupportVectorRegression, 'GBR': GradientBoostingRegression}
best_score = 1
best_alg = 'none'
# alg_names = np.array(list(algs.items()))[:,1]
if weights == True:
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
weights = cat.compute_weights(df, column = 'r')
elif type(weights) == str:
weights = np.load(weights)
else:
weights = None
global_score = 1
best_dict = pd.DataFrame(data={}, index=['score', 'score std'])
y_pred_dict = {}
y_test_dict = {}
for alg_name in algs:
best_score= 1
alg = algs[alg_name]
print('[Feature engineering]')
print('---------------------------------------------------------------')
for df in df_list:
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
score = method.score()
print(list(df.columns))
print('[preprocess] %s'%score[4])
print('[%s '%alg_name +'score] {:.3f} ± {:.3f}'.format(score[5], score[6]))
if score[5] < best_score:
print('[NEW BEST]')
print("%s: "%alg_name + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(score[0], score[1], score[2]*100, score[3]*100), end='\r')
best_score = score[5]
best_score_std = score[6]
bscore = score
df_best = df
best_columns = df.columns
best_preprocess = score[4]
best_dict[alg_name] = [best_score, best_score_std]
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=best_preprocess, n_jobs=args.nodes)
_, y_pred, y_test = method.model()
y_pred_dict[alg] = y_pred
y_test_dict[alg] = y_test
break
best_dict.to_cs(path + 'output/%s/%s/files/'%(args.survey, output_name) + 'Best_scores_' + output_name + '.csv', index=False)
# score = method.score()
print('---------------------------------------------------------------')
print("%s: "%alg_name + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(bscore[0], bscore[1], bscore[2]*100, bscore[3]*100))
if best_score < global_score:
global_score = best_score
global_score_std = best_score_std
gscore = bscore
best_alg = alg_name
df_global = df_best
global_columns = best_columns
global_preprocess = best_preprocess
print('[NEW BEST] %s'%best_alg + ' score: {:.3f} ± {:.3f}'.format(global_score, global_score_std))
print('---------------------------------------------------------------')
best_dict.sort_values(by = 'score', axis = 1, inplace=True)
print(best_dict.head())
df_best = df_global
alg = algs[best_alg]
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
if feature_importance == True:
if best_alg != 'ANN':
method.permutation()
if plot == True:
method.plot(lim=1.8)
print('---------------------------------------------------------------')
print('[BEST] preprocess: %s'%global_preprocess)
print('[BEST] score: {:.3f} ± {:.3f}'.format(global_score, global_score_std))
print(list(global_columns))
print("[%s] "%args.algorithm + "%s: "%best_alg + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(gscore[0], gscore[1], gscore[2]*100, bscore[3]*100))
print('---------------------------------------------------------------')
else:
try:
alg = algs[args.algorithm]
except:
raise TypeError('MLM is not defined')
if weights == True:
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
weights = cat.compute_weights(df, column = 'r')
elif type(weights) == str:
weights = np.load(weights)
else:
weights = None
best_score = 1
print('[Feature engineering]')
print('---------------------------------------------------------------')
for df in df_list:
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# method.plot(lim=1.8)
# method.permutation()
# df = method.filter()
# df.drop(columns=['r-z'], inplace=True)
score = method.score(df)
print(list(df.columns))
print('[preprocess] %s'%score[4])
print('[%s '%args.algorithm + 'score] {:.3f} ± {:.3f}'.format(score[5], score[6]))
if score[5] < best_score:
print('[NEW BEST]')
print("%s: "%args.algorithm + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(score[0], score[1], score[2]*100, score[3]*100))
best_score = score[5]
best_score_std = score[6]
bscore = score
df_best = df
best_columns = df.columns
best_preprocess = score[4]
# break
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
if feature_importance == True:
if args.algorithm != 'ANN':
method.permutation()
if plot == True:
method.plot(lim=1.5)
if params.morph_importance == True and params.weights == False and args.algorithm == 'RF':
method.morph_importance(df_best)
print('---------------------------------------------------------------')
print('[BEST] preprocess: %s'%best_preprocess)
print('[BEST] score: {:.3f} ± {:.3f}'.format(best_score, best_score_std))
print(list(best_columns))
print("%s: "%args.algorithm + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(bscore[0], bscore[1], bscore[2]*100, bscore[3]*100))
print('---------------------------------------------------------------')
#------------------------------------------------------------------#
# # # # # OPTIMIZE LEARNING ALGORITHMS # # # # #
#------------------------------------------------------------------#
if args.optimize == 'HyperOpt' or args.optimize == 'RandomSearch' or args.optimize == 'GridSearch':
# GenFiles = GenerateFiles(args.survey, bands, path, output_name, output_path=output_path)
# GenFiles.make_directories(output=True)
# path_to_csv = params.path_to_csv
# max_evals = params.max_evals
weights = params.weights
# cv = params.cv
algs = {'RF': RandomForestOptimizer, 'SVR': SVROptimizer, 'XGB': XGBoostOptimizer, 'KRR': KRROptimizer, 'ANN': ANNOptimizer}
try:
alg = algs[args.algorithm]
except:
raise ValueError('Method does not have an optimization algorithm')
if weights == True:
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
weights = cat.compute_weights(df_best, column = 'r')
elif type(weights) == str:
weights = np.load(weights)
else:
weights = None
print('[%s] optimization'%args.optimize)
# if args.algorithm == 'ANN':
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, validation_set=True)
# X_train, X_val, X_test, Y_train, Y_val, Y_test = ML.data()
# X_train, Y_train, X_val, Y_val = data()
# trials = Trials()
# _, best_model = optim.minimize(model=model,data=data,algo=tpe.suggest, max_evals=max_evals, trials=trials)
# Y_pred = best_model.predict(X_test, verbose = 0)
# print(type(Y_pred), type(Y_test))
# sigma, eta = sigma_eta(Y_test.to_numpy().flatten(), Y_pred.flatten())
# print("%s Opt : "%args.algorithm + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
# ML.plot_zphot_zspec(Y_pred.flatten(), method='ANN_Opt', lim=1.8)
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# df = ML.dataframe()
# ML.plot_corrmat(df)
# ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df, path_to_csv=None, validation_set=False)
# _, sigma, eta = ModelOptimizer.best_params(max_evals=10)
# print("%s Opt : "%args.algorithm + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
# if path_to_csv is None:
# path_to_csv = output_path + 'output/' + args.survey + '/' + output_name + '/files/' + output_name + '.csv'
# ML = LearningAlgorithms(survey = args.survey, bands = bands, dataframe=df_best, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# df, df_unmatched = ML.merge_cfis_r_cfht_u_medium_deep_i_g_z()
# ML.plot_corrmat(df_best, figure_name=args.algorithm+'_best_corrmat')
ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=best_preprocess, n_jobs=args.nodes)
# ModelOptimizer.debug()
_, sigma, eta, score = ModelOptimizer.best_params(max_evals=max_evals, method=args.optimize)
print('---------------------------------------------------------------')
print('[BEST OPT] score: {:.3f}'.format(score))
print("%s %s : "%(args.algorithm, args.optimize) + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
print('---------------------------------------------------------------')
# elif path_to_csv is not None:
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# df = ML.dataframe()
# ML.plot_corrmat(df)
# ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# _, sigma, eta = ModelOptimizer.best_params(max_evals=max_evals, method=args.optimize)
# print("%s %s : "%(args.algorithm, args.optimize) + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
# else:
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name)
# df = ML.dataframe()
# df = ML.preprocess(df, method = args.preprocess)
# ML.plot_corrmat(df)
# ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df, path_to_csv=False, validation_set=False)
# _, sigma, eta = ModelOptimizer.best_params(max_evals=max_evals, method=args.optimize)
# print("%s %s : "%(args.algorithm, args.optimize) + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
#------------------------------------------------------------------#
# # # # # UNIONS # # # # #
#------------------------------------------------------------------#
elif args.survey == 'unions_deprecated':
spectral_path = '/home/mkilbing/astro/data/CFIS/spectro_surveys/'
spectral_names = ['data_DR14_LRG_N', 'data_DR14_LRG_S', 'galaxy_DR12v5_CMASSLOWZTOT_North', 'galaxy_DR12v5_CMASSLOWZTOT_South','sdss_main_gal']
# spectral_names = ['sdss_main_gal']
spectral_surveys = ['SDSS', 'SDSS', 'eBOSS', 'eBOSS', 'SDSS_2']
# spectral_surveys = ['SDSS_2']
output_name = 'CFIS_matched_eBOSS_SDSS_catalog_RUIZ'
# output_name = 'CFIS_matched_SDSS_2_catalog_RUIZ'
output_path = path
temp_path = '/n17data/jimenez/temp/'
bands = ['R', 'U', 'I', 'Z']
# out_dir = os.listdir("/n17data/jimenez/shaperun_unions/output_%s/"%(spectral_surveys[i]))[-1]
# path_to_tile_run = '/n17data/jimenez/shaperun/'
# input_path = path_to_tile_run + args.survey + '/%s/output/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir)
# paste_dir = os.listdir(input_path)
if args.clean == True:
GenFiles = GenerateFiles(args.survey, bands, temp_path)
GenFiles.clean_temp_directories()
GenFiles.make_directories()
elif args.make == True:
cat = MakeCatalogs(args.survey, bands, temp_path)
# vignet = [False, False, False, False, False]
for i in range(len(spectral_names)):
cat.make_survey_catalog(spectral_path, spectral_names[i])
out_dir = os.listdir("/n17data/jimenez/shaperun_unions/output_%s/"%(spectral_surveys[i]))[-1]
paste_dir = os.listdir('/n17data/jimenez/shaperun_unions/output_%s/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir))
input_path = '/n17data/jimenez/shaperun_unions/output_%s/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir)
Parallel(n_jobs=args.nodes)(delayed(cat.make_catalog)(p, paste_dir, input_path, spectral_names[i], vignet=False) for p in tqdm(range(len(paste_dir))))
elif args.join == True:
cat = MakeCatalogs(args.survey, bands, temp_path)
cat.merge_catalogs(output_name, vignet=False)
elif args.generate_plots == True:
GenPlot = GeneratePlots(args.survey, bands, temp_path, csv_name=output_name, spectral_names=spectral_names)
# GenPlot.plot_d2d()
GenPlot.plot_matched_r_i_i_z()
GenPlot.plot_matched_u_r_r_i()
GenPlot.plot_matched_z_spec_hist()
# GenPlot.plot_unmatched_r_i_i_z()
# GenPlot.plot_unmatched_u_r_r_i()
GenPlot.plot_unmatched_z_spec_hist()
# if args.survey != 'unions' or args.survey != 'ps3pi_cfis':
# print("Survey must either be 'unions' or 'ps3pi_cfis'")
# raise SyntaxError("Survey must either be 'unions' or 'ps3pi_cfis'")
| 26,510 | 8,005 |
#!/usr/bin/env python
import betterwin
G=betterwin.confcfg.load_global_config()
def read_skell(skel):
with open(skel) as template:
tpl= template.readlines()
def head(tpl):
for idx,line in enumerate(tpl):
if 'cfg_start' in line:
header=tpl[0:idx]
return header
def bottom(tpl):
for idx,line in enumerate(tpl):
if 'cfg_stop' in line:
bottom=tpl[idx:]
return bottom
return [[head(tpl)],[bottom(tpl)]]
def create_binpy():
pass
def winebin():
bins=['msiexec','notepad','regedit','regsvr32','wine','wine64','wineboot','winecfg','wineconsole','winepath','wineserver']
for bin in bins:
yield bin
print((next(bins))
def main(pfx):
PFX=pfx
EXEC=''
print(next(winebin()))
print(next(winebin()))
if __name__ == '__main__':
| 776 | 349 |
import torch.nn as nn
import torch
import torch.nn.functional as F
import absl.flags as flags
from absl import app
FLAGS = flags.FLAGS
# Point_center encode the segmented point cloud
# one more conv layer compared to original paper
class Pose_Ts(nn.Module):
def __init__(self):
super(Pose_Ts, self).__init__()
self.f = FLAGS.feat_c_ts
self.k = FLAGS.Ts_c
self.conv1 = torch.nn.Conv1d(self.f, 1024, 1)
self.conv2 = torch.nn.Conv1d(1024, 256, 1)
self.conv3 = torch.nn.Conv1d(256, 256, 1)
self.conv4 = torch.nn.Conv1d(256, self.k, 1)
self.drop1 = nn.Dropout(0.2)
self.bn1 = nn.BatchNorm1d(1024)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(256)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = F.relu(self.bn3(self.conv3(x)))
x = self.drop1(x)
x = self.conv4(x)
x = x.squeeze(2)
x = x.contiguous()
xt = x[:, 0:3]
xs = x[:, 3:6]
return xt, xs
def main(argv):
feature = torch.rand(3, 3, 1000)
obj_id = torch.randint(low=0, high=15, size=[3, 1])
net = Pose_Ts()
out = net(feature, obj_id)
t = 1
if __name__ == "__main__":
app.run(main)
| 1,348 | 611 |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.util import get_file_path
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class XGBoostExportNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'model_in'
self.OUTPUT_PORT_NAME = 'filename'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: ["xgboost.Booster", "builtins.dict"]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["builtins.str"]
}
}
cols_required = {}
addition = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "XGBoost Export Configure",
"type": "object",
"description": """Export the xgboost model to a file
""",
"properties": {
"path": {
"type": "string",
"description":
"""The output filepath for the xgboost
model"""
}
},
"required": ["path"],
}
ui = {}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
dump the model into the file
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
model = inputs[self.INPUT_PORT_NAME]
if isinstance(model, dict):
model = model['booster']
pathname = get_file_path(self.conf['path'])
model.save_model(pathname)
return {self.OUTPUT_PORT_NAME: pathname}
| 2,653 | 750 |
import mandelbrot as mand
from PIL import Image
width = 1280
height = 720
scale = 2
def pixelToCoord( pos ):
(x, y) = pos
return ( 4*(x/height - 0.5)/scale , -4*(y/height - 0.5)/scale)
def main():
me = mand.mandelbrot(2)
img = Image.new('RGB', (width,height), color = 'white')
for y in range(0, height):
for x in range(0,width):
c = pixelToCoord((x,y))
if(me.isInSet(complex(c[0], c[1]), 1024)):
img.putpixel((x,y), (0,0,0))
if y%25 == 0:
print("Row " + str(y))
img.save("output2.png")
if __name__ == "__main__":
main()
| 623 | 259 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.utils.weight_init import trunc_normal_
#adapted from open-mmlab implementation of swin transformer
class RelativePositionBias(nn.Module):
def __init__(self,
window_size=(7, 7),
num_heads=8
):
super().__init__()
self.window_size = window_size
self.num_heads = num_heads
# define parameter table and idx of relative position bias
Wh, Ww = self.window_size
num_rows = (2 * Wh - 1) * (2 * Ww - 1)
self.relative_position_bias_table = nn.Parameter(
torch.zeros(num_rows, num_heads)
)
rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)
rel_position_index = rel_index_coords + rel_index_coords.T
rel_position_index = rel_position_index.flip(1).contiguous().view(-1)
self.register_buffer('relative_position_index', rel_position_index)
self.init_weights()
def init_weights(self): #important!
trunc_normal_(self.relative_position_bias_table, std=0.02)
def forward(self, *args, **kwargs):
Wh, Ww = self.window_size
bias = self.relative_position_bias_table[self.relative_position_index]
bias = bias.view(Wh * Ww, Wh * Ww, -1)
bias = bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
bias = bias.unsqueeze(0) # 1 nH Wh*Ww Wh*Ww
return bias
@staticmethod
def double_step_seq(step1, len1, step2, len2):
seq1 = torch.arange(0, step1 * len1, step1)
seq2 = torch.arange(0, step2 * len2, step2)
return (seq1[:, None] + seq2[None, :]).reshape(1, -1)
| 1,699 | 609 |
#Import some stuff
import os
import zipfile
import ConfigParser
import string
import argparse
from xml.etree.ElementTree import ElementTree
from constants import SHORT_NAME, VERSION_NUMBER, FULL_NAME, GEOGEBRA_XML_LOCATION
from diagram import AsyDiagram, doCompileDiagramObjects, drawDiagram
# Argument parser {{{
parser = argparse.ArgumentParser(
description = "%s %s, by v_Enhance: %s" %(SHORT_NAME, VERSION_NUMBER, FULL_NAME),
epilog = "Note: This is, and probably always will be, an unfinished work. It may not always produce large, in-scale, clearly labelled diagram made with drawing instruments (compass, ruler, protractor, graph paper, carbon paper)."
)
parser.add_argument("FILENAME",
action = "store",
metavar = "FILE",
help = "The .ggb file to be converted. Obviously,this argument is required."
)
#Non-bool arguments
parser.add_argument('--size', '-s',
action = "store",
dest = "IMG_SIZE",
metavar = "SIZE",
default = "11cm",
help = "The size of the image to be produce. Defaults to 11cm."
)
parser.add_argument('--linescale',
action = "store",
dest = "LINE_SCALE_FACTOR",
metavar = "FACTOR",
default = 2011,
help = "Defines the constant by which lines are extended. The image may break if this is too small, since interesecting lines may return errors. Default is 2011."
)
parser.add_argument('--labelscale',
action = "store",
dest = "LABEL_SCALE_FACTOR",
metavar = "FACTOR",
default = 0.8,
help = "Defines the constant LSF which is used when labelling points. This is 0.4 by default."
)
parser.add_argument('--fontsize',
action = "store",
dest = "FONT_SIZE",
metavar = "SIZE",
default = "10pt",
help = "Default font size, in arbitrary units. Defaults to \'10pt\'."
)
parser.add_argument('--config',
action = "store",
dest = "CONFIG_FILENAME",
metavar = "FILENAME",
default = "",
help = "If specified, uses the specified .cfg files for this diagram only. Defaults to FILENAME.cfg"
)
#Bool arguments
parser.add_argument("--xml",
action = "store_const",
dest = "DO_XML_ONLY",
const = 1,
default = 0,
help = "Prints the XML of the input file and exits. Mainly for debugging"
)
parser.add_argument('--clip',
action = "store_const",
dest = "CLIP_IMG",
const = 1,
default = 0,
help = "If true, clips the image according to the viewport specified in Geogebra. Defaults to false."
)
parser.add_argument('--concise',
action = "store_const",
dest = "CONCISE_MODE",
const = 1,
default = 0,
help = "Turns on concise mode, which shortens the code. By default, this is turned off."
)
parser.add_argument('--cse', '--cse5',
action = "store_const",
dest = "CSE_MODE",
const = 1,
default = 0,
help = "Allows the usage of CSE5 whenever possible."
)
parser.add_argument('--verbose',
action = "store_const",
dest = "CONCISE_MODE",
const = 0,
default = 0,
help = "Turns off concise mode. This is the default."
)
parser.add_argument('--nocse',
action = "store_const",
dest = "CSE_MODE",
const = 1,
default = 0,
help = "Forbids the usage of CSE5 except when necessary. This is the default."
)
parser.add_argument('--csecolors',
action = "store_const",
dest = "CSE_COLORS",
const = 1,
default = 0,
help = "When using CSE5, use the default pathpen and pointpen (blue/red). This is off by default."
)
parser.add_argument('--version',
action = "version",
version = "DRAGON %s, by v_Enhance" %VERSION_NUMBER
)
# }}}
opts = vars(parser.parse_args())
opts['LINE_SCALE_FACTOR'] = float(opts['LINE_SCALE_FACTOR'])
opts['LABEL_SCALE_FACTOR'] = float(opts['LABEL_SCALE_FACTOR'])
if __name__ == "__main__":
#Get the desired file and parse it
FILENAME = opts['FILENAME']
if not "." in FILENAME:
#Extension isn't given, let's assume it was omitted
FILENAME += ".ggb"
elif FILENAME[-1] == ".":
#Last character is ".", add in "ggb"
FILENAME += "ggb"
ggb = zipfile.ZipFile(FILENAME)
xmlFile = ggb.open(GEOGEBRA_XML_LOCATION)
#Read configuration file
config_filename = opts['CONFIG_FILENAME']
if config_filename.strip() == "":
config_filename = FILENAME[:FILENAME.find('.')] + '.cfg'
label_dict = {}
if os.path.isfile(config_filename):
config = ConfigParser.RawConfigParser()
config.optionxform = str # makes names case-sensitive
config.read(config_filename)
var_cfg = config.items("var") if config.has_section("var") else {}
for key, val in var_cfg:
try:
opts[string.upper(key)] = eval(val)
except (NameError, SyntaxError):
opts[string.upper(key)] = val
label_cfg = config.items("label") if config.has_section("label") else {}
for key, val in label_cfg:
label_dict[key] = "lsf * " + val
# Print XML file only, then exit
if opts['DO_XML_ONLY']:
print ''.join(xmlFile.readlines())
exit()
#Convert to tree
ggb_tree = ElementTree()
ggb_tree.parse(xmlFile)
#Retrieve the provided values of the viewport {{{
window_width = float(ggb_tree.find("euclidianView").find("size").attrib["width"])
window_height = float(ggb_tree.find("euclidianView").find("size").attrib["height"])
xzero = float(ggb_tree.find("euclidianView").find("coordSystem").attrib["xZero"])
yzero = float(ggb_tree.find("euclidianView").find("coordSystem").attrib["yZero"])
xscale = float(ggb_tree.find("euclidianView").find("coordSystem").attrib["scale"])
yscale = float(ggb_tree.find("euclidianView").find("coordSystem").attrib["yscale"])
#Compute the viewport coordinates from this information
xmin = -xzero/float(xscale)
xmax = (window_width - xzero)/float(xscale)
ymin = -(window_height -yzero)/float(yscale)
ymax = yzero/float(yscale)
view = (xmin, xmax, ymin, ymax)
# }}}
#Do the construction
construct_tree = ggb_tree.find("construction")
theMainDiagram = AsyDiagram()
doCompileDiagramObjects(construct_tree, theMainDiagram)
if opts['CLIP_IMG'] == 0:
print drawDiagram(theMainDiagram, label_dict, opts=opts).replace(u"\u03B1", "alpha")
else:
print drawDiagram(theMainDiagram, label_dict, view=view, opts=opts).replace(u"\u03B1", "alpha")
| 6,065 | 2,320 |
import torch
from utils import get_teacher1, get_teacher2, get_student
def collect_model(args, data_info_s, data_info_t1, data_info_t2):
"""This is the function that constructs the dictionary containing the models and the corresponding optimizers
Args:
args (parse_args): parser arguments
data_info_s (dict): the dictionary containing the data information of the student
data_info_t1 (dict): the dictionary containing the data information of teacher #1
data_info_t2 (dict): the dictionary containing the data information of teacher #2
Returns:
dict: model dictionary ([model_name][model/optimizer])
"""
device = torch.device("cpu") if args.gpu < 0 else torch.device("cuda:" + str(args.gpu))
# initialize the two teacher GNNs and the student GNN
s_model = get_student(args, data_info_s)
s_model.to(device)
t1_model = get_teacher1(args, data_info_t1)
t1_model.to(device)
t2_model = get_teacher2(args, data_info_t2)
t2_model.to(device)
# define the corresponding optimizers of the teacher GNNs and the student GNN
params = s_model.parameters()
s_model_optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
t1_model_optimizer = None
t2_model_optimizer = None
# construct the model dictionary containing the models and the corresponding optimizers
model_dict = {}
model_dict['s_model'] = {'model':s_model, 'optimizer':s_model_optimizer}
model_dict['t1_model'] = {'model':t1_model, 'optimizer':t1_model_optimizer}
model_dict['t2_model'] = {'model':t2_model, 'optimizer':t2_model_optimizer}
return model_dict
| 1,714 | 542 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Item'
db.create_table(u'core_item', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('image', self.gf('django.db.models.fields.files.ImageField')
(max_length=100)),
('source_url', self.gf('django.db.models.fields.TextField')()),
('message', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
))
db.send_create_signal(u'core', ['Item'])
# Adding model 'Board'
db.create_table(u'core_board', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')
(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')
(max_length=50)),
))
db.send_create_signal(u'core', ['Board'])
# Adding model 'Pin'
db.create_table(u'core_pin', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('item', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['core.Item'])),
('board', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['core.Board'])),
('influencer', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='influenced_pins', to=orm['auth.User'])),
('message', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
))
db.send_create_signal(u'core', ['Pin'])
# Adding model 'Follow'
db.create_table(u'core_follow', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='following_set', to=orm['auth.User'])),
('target', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='follower_set', to=orm['auth.User'])),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')
(null=True, blank=True)),
))
db.send_create_signal(u'core', ['Follow'])
def backwards(self, orm):
# Deleting model 'Item'
db.delete_table(u'core_item')
# Deleting model 'Board'
db.delete_table(u'core_board')
# Deleting model 'Pin'
db.delete_table(u'core_pin')
# Deleting model 'Follow'
db.delete_table(u'core_follow')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.board': {
'Meta': {'object_name': 'Board'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'core.follow': {
'Meta': {'object_name': 'Follow'},
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'follower_set'", 'to': u"orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following_set'", 'to': u"orm['auth.User']"})
},
u'core.item': {
'Meta': {'object_name': 'Item'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_url': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'core.pin': {
'Meta': {'object_name': 'Pin'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Board']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'influencer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'influenced_pins'", 'to': u"orm['auth.User']"}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Item']"}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['core']
| 8,759 | 2,789 |
r"""
===================================================
ConeCyl (:mod:`desicos.abaqus.conecyl`)
===================================================
.. currentmodule:: desicos.abaqus.conecyl
Cone/Cylinder Model
=====================
Figure 1 provides a schematic view of the typical model created using this
module. Two coordinate systems are defined: one rectangular with axes `X_1`,
`X_2`, `X_3` and a cylindrical with axes `R`, `Th`, `Z`.
.. _figure_conecyl:
.. figure:: ../../../figures/modules/abaqus/conecyl/conecyl_model.png
:width: 400
Figure 1: Cone/Cylinder Model
The complexity of the actual model created in Abaqus goes beyond the
simplification above
Boundary Conditions
===================
Based on the coordinate systems shown in Figure 1 the following boundary
condition parameters can be controlled:
- constraint for radial and circumferential displacement (`u_R` and `v`) at
the bottom and top edges
- simply supported or clamped bottom and top edges, consisting in the
rotational constraint along the meridional coordinate, called `\phi_x`.
- use of resin rings as described in :ref:`the next section <resin_rings>`
- the use of distributed or concentrated load at the top edge will be
automatically determined depending on the attributes of the current
:class:`.ConeCyl` object
- application of shims at the top edge as detailed in
:meth:`.ImpConf.add_shim_top_edge`, following this example::
from desicos.abaqus.conecyl import ConeCyl
cc = ConeCyl()
cc.from_DB('castro_2014_c02')
cc.impconf.add_shim(thetadeg, thick, width)
- application of uneven top edges as detailed in
:meth:`.UnevenTopEdge.add_measured_u3s`, following this example::
thetadegs = [0.0, 22.5, 45.0, 67.5, 90.0, 112.5, 135.0, 157.5, 180.0,
202.5, 225.0, 247.5, 270.0, 292.5, 315.0, 337.5, 360.0]
u3s = [0.0762, 0.0508, 0.1270, 0.0000, 0.0000, 0.0762, 0.2794, 0.1778,
0.0000, 0.0000, 0.0762, 0.0000, 0.1016, 0.2032, 0.0381, 0.0000,
0.0762]
cc.impconf.add_measured_u3s_top_edge(thetadegs, u3s)
.. _resin_rings:
Resin Rings
===========
When resin rings are used the actual boundary condition will be determined by
the parameters defining the resin rings (cf. Figure 2), and therefore no clamped conditions
will be applied in the shell edges.
.. figure:: ../../../figures/modules/abaqus/conecyl/resin_rings.png
:width: 400
Figure 2: Resin Rings
Defining resin rings can be done following the example below, where each
attribute is detailed in the :class:`.ConeCyl` class description::
from desicos.abaqus.conecyl import ConeCyl
cc = Conecyl()
cc.from_DB('castro_2014_c02')
cc.resin_add_BIR = False
cc.resin_add_BOR = True
cc.resin_add_TIR = False
cc.resin_add_TOR = True
cc.resin_E = 2454.5336
cc.resin_nu = 0.3
cc.resin_numel = 3
cc.resin_bot_h = 25.4
cc.resin_top_h = 25.4
cc.resin_bir_w1 = 25.4
cc.resin_bir_w2 = 25.4
cc.resin_bor_w1 = 25.4
cc.resin_bor_w2 = 25.4
cc.resin_tir_w1 = 25.4
cc.resin_tir_w2 = 25.4
cc.resin_tor_w1 = 25.4
cc.resin_tor_w2 = 25.4
The ConeCyl Class
=================
.. automodule:: desicos.abaqus.conecyl.conecyl
:members:
"""
from __future__ import absolute_import
from .conecyl import *
| 3,309 | 1,327 |
def generate_structure(thread_len, max_posts):
time_delay_ids = [0] * thread_len + [1] * (max_posts - thread_len)
structure_ids = [
[3] * idx + [4] + [2] * (thread_len - 1 - idx) + [5] * (max_posts - thread_len)
for idx in range(thread_len)
] + [[5] * max_posts] * (max_posts - thread_len)
post_attention_mask = [1] * thread_len + [0] * (max_posts - thread_len)
return [time_delay_ids], [structure_ids], [post_attention_mask]
| 465 | 180 |
class _R(int):
def __repr__(self):
return "r%s"%(super(_R, self).__repr__(),)
__str__ = __repr__
class _F(int):
def __repr__(self):
return "fr%s"%(super(_F, self).__repr__(),)
__str__ = __repr__
class _V(int):
def __repr__(self):
return "vr%s"%(super(_V, self).__repr__(),)
__str__ = __repr__
r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, \
r13, r14, r15, r16, r17, r18, r19, r20, r21, r22, \
r23, r24, r25, r26, r27, r28, r29, r30, r31 = map(_R, range(32))
fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9, fr10, fr11, fr12, \
fr13, fr14, fr15, fr16, fr17, fr18, fr19, fr20, fr21, fr22, \
fr23, fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31 = map(_F, range(32))
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, vr8, vr9, vr10, vr11, vr12, vr13, \
vr14, vr15, vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23, vr24, vr25, \
vr26, vr27, vr28, vr29, vr30, vr31, vr32, vr33, vr34, vr35, vr36, vr37, \
vr38, vr39, vr40, vr41, vr42, vr43, vr44, vr45, vr46, vr47, vr48, \
vr49, vr50, vr51, vr52, vr53, vr54, vr55, vr56, vr57, vr58, vr59, vr60, \
vr61, vr62, vr63 = map(_V, range(64))
crf0, crf1, crf2, crf3, crf4, crf5, crf6, crf7 = range(8)
| 1,230 | 725 |
"""Problem 9: Special Pythagorean triplet.
Brute force."""
import unittest
def find_triple(s):
"""Returns abc where a^2+b^2=c^2 with a+b+c=s."""
a, b, c = 998, 1, 1
while b < 999:
if a**2 + b**2 == c**2:
return a*b*c
if a == 1:
c += 1
b = 1
a = 1000 - b - c
else:
b += 1
a -= 1
if __name__ == "__main__":
print(find_triple(1000))
| 445 | 191 |
import uuid
import datetime
import utilities.file_utilities as file_utilities
EBXML_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def get_uuid():
"""Generate a UUID suitable for sending in messages to Spine.
:return: A string representation of the UUID.
"""
return str(uuid.uuid4()).upper()
def get_timestamp():
"""Generate a timestamp in a format suitable for sending in ebXML messages.
:return: A string representation of the timestamp
"""
current_utc_time = datetime.datetime.utcnow()
return current_utc_time.strftime(EBXML_TIMESTAMP_FORMAT)
def load_test_data(message_dir, filename_without_extension):
message = file_utilities.get_file_string(message_dir / (filename_without_extension + ".msg"))
ebxml = file_utilities.get_file_string(message_dir / (filename_without_extension + ".ebxml"))
message = message.replace("{{ebxml}}", ebxml)
return message, ebxml
| 920 | 291 |
from test_couch import *
from test_toggle import *
from test_quickcache import *
| 81 | 24 |
import sqlite3
from helpers import get_db_path, get_timeframes
from traceback import print_tb
timeframes = get_timeframes()
print(timeframes)
for timeframe in timeframes:
with sqlite3.connect(get_db_path(timeframe)) as connection:
try:
c = connection.cursor()
print("Cleanin up!")
c.execute('BEGIN TRANSACTION')
# Remove values that we don't want
sql = "DELETE FROM parent_reply WHERE parent IS NULL OR parent == 'False' OR parent == '0'"
c.execute(sql)
connection.commit()
# c.execute("VACUUM")
# connection.commit()
sql = "SELECT COUNT(comment_id) FROM parent_reply"
c.execute(sql)
result = c.fetchone()
if result is not None:
res = result[0]
print(f'Cleanup done, paired rows: {res}')
except Exception as e:
print('Something broke')
print(e)
print('Done')
| 998 | 280 |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn as sns
import pandas as pd
import glob
import re
from itertools import combinations
import matplotlib
matplotlib.rcParams['text.usetex'] = True
def plot_probabilities(X, probabilities, titles, suptitle):
norm = plt.Normalize(0, 1)
n = len(titles)
nrows = int(np.ceil(n / 2))
sns.set_context('paper')
cmap = sns.cubehelix_palette(rot=-.5,light=1.5,dark=-.5,as_cmap=True)
f, axarr = plt.subplots(nrows, min(n,2))
if n < 2:
axarr.scatter(X[:, 0], X[:, 1], c=probabilities[0],
cmap=cmap, norm=norm, edgecolor='k',s=60)
axarr.set_title(titles[0])
#f.set_size_inches(8, 8)
else:
i = j = 0
for idx, t in enumerate(titles):
axarr[i, j].scatter(X[:, 0], X[:, 1], c=probabilities[idx],
cmap=cmap, norm=norm, edgecolor='k')
axarr[i, j].set_title(t)
j += 1
if j == 2:
j = 0
i += 1
if n % 2 != 0:
axarr[-1, -1].axis('off')
f.set_size_inches(10, 30)
f.suptitle(suptitle)
f.subplots_adjust(hspace=0.7)
return f
def plot_parameters(X, delta, a):
sns.set_context('paper')
cmap1 = sns.cubehelix_palette(rot=-.5,light=1.5,dark=-.5,as_cmap=True)
gs = gridspec.GridSpec(2, 2, height_ratios=[4, 2])
f = plt.figure(figsize=(12,6))
axarr = np.array([[None]*2]*2)
for i in range(2):
for j in range(2):
axarr[i,j] = plt.subplot(gs[i*2+j])
axarr[0, 0].scatter(X[:, 0], X[:, 1], c=delta, cmap=cmap1,
edgecolor='k',s=40)
axarr[0, 0].set_title('$\mathbf{\delta}$ (Difficulty)',fontsize=16)
axarr[0, 1].scatter(X[:, 0], X[:, 1], c=a, cmap=cmap1,
edgecolor='k',s=40)
axarr[0, 1].set_title('$\mathbf{a}$ (Discrimination)',fontsize=16)
#axarr[1, 0].hist(delta,bins=100)
sns.distplot(delta,bins=100,ax=axarr[1,0])
axarr[1, 0].set_title('Histogram of $\mathbf{\delta}$',fontsize=16)
#axarr[1, 1].hist(a,bins=100)
sns.distplot(a,bins=100,ax=axarr[1,1])
axarr[1, 1].set_title('Histogram of $\mathbf{a}$',fontsize=16)
f.suptitle('IRT item parameters')
#f.set_size_inches(20, 20)
f.subplots_adjust(hspace=0.3)
return f
def plot_noisy_points(xtest, disc=None):
sns.set_context('paper')
cls = sns.color_palette("BuGn_r")
lgd = []
f = plt.figure()
plt.scatter(xtest.x[xtest.noise==0],xtest.y[xtest.noise==0],facecolors='none',edgecolors='k',s=60)
lgd.append('non-noise item')
plt.scatter(xtest.x[xtest.noise>0],xtest.y[xtest.noise>0],c=cls[3],s=60)
lgd.append('noise item')
if not disc is None:
plt.scatter(xtest.x[disc<0],xtest.y[disc<0],c=cls[0],marker='+',facecolors='none')
lgd.append('detected noise item')
plt.title('True and detected noise items')
l = plt.legend(lgd,frameon=True,fontsize=12)
l.get_frame().set_edgecolor('g')
return f
def plot_item_parameters_corr(irt_prob_avg,difficulty,noise,disc=None):
sns.set_context('paper')
cls = sns.color_palette("BuGn_r")
lgd = []
f = plt.figure()
plt.xlim([0.,1.])
plt.ylim([0.,1.])
plt.scatter(irt_prob_avg[noise>0],difficulty[noise>0],c=cls[3],s=60)
lgd.append('noise item')
if not disc is None:
plt.scatter(irt_prob_avg[disc<0],difficulty[disc<0],c=cls[0],marker='+',facecolors='none')
lgd.append('detected noise item')
plt.scatter(irt_prob_avg[noise==0],difficulty[noise==0],facecolors='none',edgecolors='k',s=60)
lgd.append('non-noise item')
plt.title('Correlation between difficulty and response')
plt.xlabel('Average response',fontsize=14)
plt.ylabel('Difficulty',fontsize=14)
l=plt.legend(lgd,frameon=True,fontsize=12)
l.get_frame().set_edgecolor('g')
return f
def vis_performance(gather_prec,gather_recal,path,asd='as1@5',vtype='nfrac'):
fig = plt.figure()
plt.plot(gather_recal.index, gather_recal.mean(axis=1),marker='o')
plt.plot(gather_prec.index, gather_prec.mean(axis=1),marker='^')
plt.errorbar(gather_recal.index, gather_recal.mean(axis=1), gather_recal.std(axis=1), linestyle='None')
plt.errorbar(gather_prec.index, gather_prec.mean(axis=1), gather_prec.std(axis=1), linestyle='None')
if vtype=='nfrac':
plt.title('Precision and recall under different noise fractions')
plt.xlabel('Noise fraction (percentile)')
plt.ylim(-0.05,1.1)
plt.yticks(np.arange(0,1.2,0.2))
plt.legend(['Recall','Precision'],loc=0)
plt.savefig(path+'gathered_dnoise_performance_nfrac_'+asd+'.pdf')
elif vtype=='astd':
plt.title('Precision and recall under different prior SD')
plt.xlabel('Prior standard deviation of discrimination')
plt.xlim(0.5,3.25)
plt.ylim(-0.05,1.1)
plt.yticks(np.arange(0,1.2,0.2))
plt.legend(['Recall','Precision'],loc=0)
plt.savefig(path+'gathered_dnoise_performance_asd_nfrac20.pdf')
plt.close(fig)
def gather_vary_nfrac(path,dataset,a_prior_std=1.5,clcomb='79',mcomb='m10',idx = [2,5,10,20,30,40,50,55]):
prefix = path+'dnoise_performance_'+dataset+'_s400_'
files = glob.glob(prefix+'*.txt')
#print(len(files))
asd = 'as'+str(a_prior_std).replace('.','@')
files = filter(lambda f: '_'+mcomb+'_' in f and asd in f and 'cl'+clcomb in f , files)
gather_prec = pd.DataFrame(index=idx)
gather_recal = pd.DataFrame(index=idx)
pfix1 = 'precision = '
pfix2 = 'recall = '
err_files = []
for f in files:
parse = re.split('_|\.',f[len(prefix)+1:])
#print(parse)
frac = int(parse[0])
#print(frac)
if frac not in idx:
continue
seed = parse[1]
with open(f,'r') as fr:
l = fr.readlines()
gather_prec.loc[frac,seed] = float(l[0][len(pfix1):])
gather_recal.loc[frac,seed] = float(l[1][len(pfix2):])
if np.isnan(gather_prec.loc[frac,seed]) or \
np.isnan(gather_recal.loc[frac,seed]):
print('find nan:',parse)
err_files.append('./test_data/noise_test/'+dataset+'/bc4/'+mcomb+'/'+parse[2]+'/irt_data_'+dataset+'_s400_f'+parse[0]+'_'+parse[1]+'_'+parse[2]+'_'+mcomb+'.csv')
return gather_prec,gather_recal,err_files
def vis_avg_all_clscombs_perform(dataset='mnist',a_prior_std=1.5,mcomb='m10',rpath='./results/bc4/mnist/m10/'):
errs = []
gather_precs=None
gather_recals=None
gather_prec_allcl = pd.DataFrame()
gather_recal_allcl = pd.DataFrame()
asd = 'as'+str(a_prior_std).replace('.','@')
for i,cls in enumerate(combinations(np.arange(10),2)):
#print(i)
cl1, cl2 = cls[0],cls[1]
comb = str(cl1)+str(cl2)
path = rpath+'cl'+comb+'/'
gather_prec,gather_recal, err = gather_vary_nfrac(path,dataset,a_prior_std,clcomb=comb,mcomb=mcomb)
if len(err)==0:
vis_performance(gather_prec,gather_recal,path,asd=asd)
errs+=err
if gather_precs is None:
gather_precs = gather_prec
gather_recals = gather_recal
gather_prec_allcl = pd.DataFrame(index=gather_prec.index)
gather_recal_allcl = pd.DataFrame(index=gather_recal.index)
else:
gather_precs+=gather_prec
gather_recals+=gather_recal
gather_prec_allcl[comb] = gather_prec.values.mean(axis=1)
gather_recal_allcl[comb] = gather_recal.values.mean(axis=1)
gather_precs /= i
gather_recals /= i
#vis_performance(gather_precs,gather_recals,rpath)
vis_performance(gather_prec_allcl,gather_recal_allcl,rpath,asd=asd)
if len(errs) > 0:
with open('./retest.sh','w') as wf:
for ef in errs:
wf.writelines('python betairt_test.py '+ef+' a_prior_std:'+str(a_prior_std)+'\n') | 8,059 | 3,282 |
from os.path import join, exists, isdir, relpath, abspath, dirname
import datetime as dt
import posixpath
import logging
import tempfile
from os import stat, makedirs, remove
import random
import uuid
from cStringIO import StringIO
import time
from six import string_types
try:
import gevent
except:
gevent = None
from ..clients.http import Client
from .. import settings
from ..serialization import deserializer, serializer
from ..errors import KitchenSinkError
from ..utils.pathutils import urlsplit, dirsplit, urljoin
from .funcs import get_info_bulk, hosts
logger = logging.getLogger(__name__)
def _write(finput, f):
if isinstance(finput, string_types):
finput = StringIO(finput)
if isinstance(f, string_types):
f = open(f, 'wb+')
try:
while True:
data = finput.read(settings.chunk_size)
if data:
f.write(data)
if gevent:
gevent.sleep(0)
else:
break
finally:
finput.close()
f.close()
def _read(path):
with open(path, "rb") as f:
return f.read()
class Catalog(object):
"""
location_key - set of which host names contain this data url
info_key - hashset, metadata about the file
one of these keys, 'state', is either 'starting', or 'ready'
presence of the location_key indicates that the dataset exists in the system
where as info_key will have state set to 'starting' if it's just pending
"""
def __init__(self, connection, datadir, hostname, prefix=None):
"""connection - redis connection
hostname - name
"""
if prefix is None:
prefix = settings.prefix
self.prefix = ""
self.conn = connection
self.datadir = datadir
self.hostname = hostname
def host_url(self, hostname=None):
if hostname is None:
hostname = self.hostname
return settings.server_manager.host_url(hostname)
def location_key(self, key):
"""redis key for storing local paths for remote data sources
"""
if self.prefix:
return self.prefix + ":data:path:" + key
else:
return "data:path:" + key
def data_key(self, key):
"""redis key for metadata about the remote data source
(filesizes, datatype, format)
"""
if self.prefix:
return self.prefix + ":data:info:" + key
else:
return "data:info:" + key
def search(self, pattern):
"""search the catalog for remote data urls
currently implemented with redis.keys which
isn't recommended for production use
"""
prefix = self.location_key('')
pattern = self.location_key(pattern)
### implement this with scan later
keys = self.conn.keys(pattern)
return [x[len(prefix):] for x in keys]
def setup_file_path_from_url(self, url):
"""given a url, setup the file path in the data directory
ensuring that necessary subdirs are created, and that the
file path is valid
"""
splits = urlsplit(url, "")
file_path = abspath(join(self.datadir, *splits))
if not file_path.startswith(self.datadir):
raise KitchenSinkError, "Must be inside datadir"
if not exists(dirname(file_path)):
makedirs(dirname(file_path))
return file_path
def bootstrap(self, url, data_type='object', fmt='cloudpickle'):
file_path = self.setup_file_path_from_url(url)
if self.url_exists(url):
is_new = False
else:
is_new = True
if is_new:
size = stat(file_path).st_size
self.set_metadata(url, size, data_type=data_type, fmt=fmt)
self.add(file_path, url)
def write(self, finput, url, is_new=True, data_type="object", fmt="cloudpickle"):
"""writes a file, to a data url.
is_new - is this a new object in a catalog, or a copy of an existing one.
data_type - data_type of the object (object or file)
fmt - one of our serialization formats
if the targetfile path exists - do nothing (this should't happen, maybe we should throw
an error)
if this file is new, then write the metadata into the catalog
otherwise just write the data to local disk
"""
file_path = self.setup_file_path_from_url(url)
if is_new:
if self.url_exists(url):
raise KitchenSinkError("path already being used %s" % url)
else:
self.begin_addition(file_path, url)
if not exists(file_path):
_write(finput, file_path)
if is_new:
size = stat(file_path).st_size
self.set_metadata(url, size, data_type=data_type, fmt=fmt)
self.add(file_path, url)
return file_path
def delete(self, url):
"""delete the data url from this node only. to truly remove it
you need to delete it from all nodes
"""
location_key = self.location_key(url)
data_key = self.data_key(url)
self.conn.srem(location_key, self.hostname)
if not self.conn.exists(location_key):
self.conn.delete(data_key)
file_path = self.setup_file_path_from_url(url)
remove(file_path)
def write_chunked(self, iterator, url, is_new=True):
"""same as chunked, but write data from an iterator
(we use chunked reads during pipelining data, so we can
stream data in as it's being processed)
"""
file_path = self.setup_file_path_from_url(url)
if is_new:
if self.url_exists(url):
raise KitchenSinkError("path already being used")
else:
self.begin_addition(file_path, url)
with open(file_path, "wb+") as f:
for chunk in iterator:
f.write(chunk)
if is_new:
size = stat(file_path).st_size
self.set_metadata(url, size, data_type=data_type)
self.add(file_path, url)
return file_path
def url_exists(self, url):
location_key = self.location_key(url)
return self.conn.exists(location_key)
def add(self, file_path, url):
"""add a url to the catalog
"""
location_key = self.location_key(url)
self.conn.sadd(location_key, self.hostname)
return file_path
def begin_addition(self, file_path, url):
"""declare our intention that we are beginnning to add
a data url to the catalog
"""
self.conn.hset(self.data_key(url), 'state', 'starting')
def set_metadata(self, url, size, data_type="object", fmt="cloudpickle"):
data_key = self.data_key(url)
self.conn.hmset(data_key,
{'state' : 'ready',
'size' : str(size),
'data_type' : data_type,
'fmt' : fmt}
)
def get_chunked_iterator(self, url, length, hostname=None, host_url=None):
"""return a chunked iterator for the contents of a file
at another host. This is used for pipelining, so that
we can stream data in while it's being written
"""
if hostname is None:
hostname = self.hostname
if host_url is None:
host_url = self.host_url(hostname=hostname)
data_read = 0
c = Client(host_url, rpc_name='data', queue_name='data')
while True:
if data_read == length:
break
data = c._get_data(url, data_read, settings.chunk_size)
if data.status_code != 200:
raise KitchenSinkError("http error")
data = data.raw.read()
data_read += len(data)
logger.debug ("read %s of %s from %s to %s" % (data_read, length,
host_url, settings.host_url))
if gevent:
gevent.sleep(0)
if len(data) == 0:
time.sleep(1.0)
yield data
def get(self, url, hostname=None, host_url=None):
"""returns a stream for the given data url
it is up to the caller of this to close the stream!
"""
if hostname is None:
hostname = self.hostname
if host_url is None:
host_url = self.host_url(hostname=hostname)
hosts, location_info, data_info = self.get_info(url)
if self.hostname in location_info:
file_path = self.setup_file_path_from_url(url)
return open(file_path, 'rb')
else:
hostname = random.choice(list(location_info))
host_url = hosts[hostname]
logger.info("retrieving %s from %s", url, host_url)
c = Client(host_url, rpc_name='data', queue_name='data')
return c._get_data(url).raw
def _get_info(self, url):
"""retrieves all information about a data url
does not take into account whether hosts are active,
etc..
"""
location_info = self.conn.smembers(self.location_key(url))
data_info = self.conn.hgetall(self.data_key(url))
if 'size' in data_info:
data_info['size'] = int(data_info['size'])
return (location_info, data_info)
def get_info(self, url):
hosts , results = get_info_bulk([url])
location_info, data_info = results[url]
return hosts, location_info, data_info
def get_file_path(self, url, unfinished=False):
"""retrieve file path for the url.
unfinished means the data is not in the catalog yet
(we're pipelining it)
if the url does not exist on this host, return None
"""
path = self.setup_file_path_from_url(url)
if unfinished and exists(path):
return path
hosts, location_info, data_info = self.get_info(url)
if self.hostname not in location_info:
return None
return path
| 10,125 | 2,914 |
# Generated by Django 2.0.3 on 2018-04-06 18:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('nadine', '0033_payment_method'),
]
operations = [
migrations.CreateModel(
name='StripeBillingProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_email', models.EmailField(help_text='Customer email address used with Stripe customer record', max_length=254)),
('customer_id', models.CharField(help_text='Stripe customer ID used for billing via Stripe', max_length=128)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 982 | 303 |
from generate import generate
from datetime import datetime
from time import sleep
# sorting algorithms
def linear_sort(unsorted_list):
list_len = len(unsorted_list)
smallest_int = 0
sorted_list = list()
for index in range(list_len):
smallest_int = 1000000
for element in unsorted_list:
if element < smallest_int:
smallest_int = element
unsorted_list.remove(smallest_int)
sorted_list.append(smallest_int)
return sorted_list
def bubble_sort(unsorted_list):
is_sorted = False
while not is_sorted:
changes_made = 0
for i in range(len(unsorted_list) - 1):
if unsorted_list[i] > unsorted_list[i + 1]:
unsorted_list[i], unsorted_list[i + 1] = unsorted_list[i + 1], unsorted_list[i]
changes_made += 1
if changes_made == 0:
is_sorted = True
sorted_list = unsorted_list
return sorted_list
def merge_sort(unsorted_list):
# if list is large than 1
if len(unsorted_list) > 1:
# split list in halves
left_half, right_half = split(unsorted_list)
# sort left half
sorted_left_half = merge_sort(left_half)
# sort right half
sorted_right_half = merge_sort(right_half)
# merge halves
sorted_list = merge(sorted_left_half, sorted_right_half)
# return sorted list
return sorted_list
# if list is only one number
else:
# this is just for clarification
sorted_list = unsorted_list
return sorted_list
# merge_sort helper functions
def split(full_list):
'''
get length of list
initialize both halves
'''
list_len = len(full_list)
left_half, right_half = list(), list()
'''
iterate over each item in full_list
and append to left half until i is greater than length / 2
'''
for i in range(list_len):
if i < list_len / 2:
left_half.append(full_list[i])
else:
right_half.append(full_list[i])
return left_half, right_half
def merge(left_half, right_half):
merged_list = list()
both_halves_len = len(left_half) + len(right_half)
for _ in range(both_halves_len):
# if both lists have elements left
if len(left_half) > 0 and len(right_half) > 0:
# if right list has smallest number
if left_half[0] > right_half[0]:
merged_list.append(right_half[0])
right_half.pop(0)
# if left list has smallest number
elif left_half[0] < right_half[0]:
merged_list.append(left_half[0])
left_half.pop(0)
# if both lists have the same smallest number
# use left list
else:
merged_list.append(left_half[0])
left_half.pop(0)
# else if only one list has element left
else:
if len(left_half) > 0:
merged_list.append(left_half[0])
left_half.pop(0)
else:
merged_list.append(right_half[0])
right_half.pop(0)
return merged_list
# print function
def sorting_animation(algorithm):
for i in range(3):
print('Sorting' + '.' * (i + 1), end='')
print(' ' * (3 - i) + ' ' + algorithm)
sleep(1)
# main function
def main():
# user input for length
n = int(input('\nLength of unsorted list: '))
linear = generate(n)
bubble = linear.copy()
merge = linear.copy()
print('\n-------- unsorted list --------')
if len(linear) > 9:
for i in range(9):
print(linear[i], end=', ')
print('...')
else:
for i in range(len(linear) - 1):
print(linear[i], end=', ')
print(linear[-1])
print('-' * 31, end='\n\n')
input('Press enter to start sorting...')
print()
sorting_animation('linear')
timestamp_start = datetime.now()
sorted_list = linear_sort(linear)
timestamp_finish = datetime.now()
print('Done', end='\n\n')
sleep(1)
duration_linear = timestamp_finish - timestamp_start
sorting_animation('bubble')
timestamp_start = datetime.now()
sorted_list = bubble_sort(bubble)
timestamp_finish = datetime.now()
print('Done', end='\n\n')
sleep(1)
duration_bubble = timestamp_finish - timestamp_start
sorting_animation('merge')
timestamp_start = datetime.now()
sorted_list = merge_sort(merge)
timestamp_finish = datetime.now()
print('Done', end='\n\n')
sleep(1)
duration_merge = timestamp_finish - timestamp_start
print('\n------ sorting durations ------')
duration = duration_linear
print(f'Linear sort: {duration.seconds}.{duration.microseconds}s')
duration = duration_bubble
print(f'Bubble sort: {duration.seconds}.{duration.microseconds}s')
duration = duration_merge
print(f'Merge sort: {duration.seconds}.{duration.microseconds}s')
print('-' * 31)
main()
| 5,212 | 1,612 |
#!/usr/bin/env python
# coding=utf-8
import web
import lib.user as user
"""首页[done]"""
class index:
"""首页"""
def GET(self):
html = web.template.frender('templates/index.html')
name = user.getName()
return html(name)
| 251 | 89 |
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME, BertConfig
from pytorch_transformers.modeling_bert import *
from pytorch_transformers.tokenization_bert import BertTokenizer
import pytorch_transformers.optimization
class BertEmbeddingsDNA(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddingsDNA, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
# label should not need to have ordering ?
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.config = config
if self.config.aa_type_emb:
print ('\n\nturn on the token-type style embed.\n\n')
## okay to say 4 groups + 1 extra , we need special token to map to all 0, so CLS SEP PAD --> group 0
## 20 major amino acids --> 4 major groups
## or... we have mutation/not --> 2 major groups. set not mutation = 0 as base case
## we did not see experiment with AA type greatly improve outcome
## !! notice that padding_idx=0 will not be 0 because of initialization MUST MANUAL RESET 0
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# if token_type_ids is None:
# token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.config.aa_type_emb:
# @token_type_ids is batch x aa_len x domain_type --> output batch x aa_len x domain_type x dim
token_type_embeddings = self.token_type_embeddings(token_type_ids)
## must sum over domain (additive effect)
token_type_embeddings = torch.sum(token_type_embeddings,dim=2) # get batch x aa_len x dim
embeddings = words_embeddings + position_embeddings + token_type_embeddings
else:
embeddings = words_embeddings + position_embeddings # + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertEmbeddingsLabel(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddingsLabel, self).__init__()
print('TODO BertEmbeddingsLabel config, should log')
print(config)
self.config = config
self.word_embeddings = nn.Embedding(config.label_size, config.hidden_size) ## , padding_idx=0
# label should not need to have ordering ?
# self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
if self.config.scale_label_vec:
## if we freeze, then we will not use any layer norm. let's try using the vectors as they are.
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
## should always drop to avoid overfit
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
# seq_length = input_ids.size(1)
# if position_ids is None:
# position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
# position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# if token_type_ids is None:
# token_type_ids = torch.zeros_like(input_ids)
# embeddings = self.word_embeddings(input_ids)
# position_embeddings = self.position_embeddings(position_ids)
# token_type_embeddings = self.token_type_embeddings(token_type_ids)
# embeddings = words_embeddings # + position_embeddings + token_type_embeddings
# if self.config.scale_label_vec:
# embeddings = self.LayerNorm(embeddings)
## should always drop to avoid overfit
# embeddings = self.dropout(embeddings)
##!! COMMENT we always use all the labels, so that we do not need to specify label-indexing.
## need only call @self.word_embeddings.weight
embeddings = self.LayerNorm(self.word_embeddings.weight)
embeddings = embeddings.expand(input_ids.shape[0],-1,-1) ## batch x num_label x dim
embeddings = self.dropout( embeddings )
return embeddings
class BertModel2Emb(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape
``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel2Emb, self).__init__(config)
self.embeddings = BertEmbeddingsDNA(config)
self.embeddings_label = BertEmbeddingsLabel(config) ## label takes its own emb layer
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _resize_label_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings_label.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings_label.word_embeddings = new_embeddings
return self.embeddings_label.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def resize_label_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens
``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_label_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.label_size = new_num_tokens
base_model.label_size = new_num_tokens
# Tie weights again if needed
if hasattr(self, 'tie_weights'):
self.tie_weights()
return model_embeds
def forward(self, input_ids, input_DNA, label_index_id, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None):
##!! to avoid a lot of re-structuring, let's define @input_ids=>protein_vector from interaction network
## assume @input_ids is batch x 1 x dim, each batch is a protein so it has 1 vector
# if attention_mask is None:
# attention_mask = torch.ones_like(input_ids) ## probably don't need this very much.
# if we pass in mask and token_type, which we always do for batch mode
# # if token_type_ids is None:
# # token_type_ids = torch.zeros_like(input_ids)
# # We create a 3D attention mask from a 2D tensor mask.
# # Sizes are [batch_size, 1, 1, to_seq_length]
# # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# # this attention mask is more simple than the triangular masking of causal attention
# # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# # masked positions, this operation will create a tensor which is 0.0 for
# # positions we want to attend and -10000.0 for masked positions.
# # Since we are adding it to the raw scores before the softmax, this is
# # effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
# extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
#print('Bert forward')
#print('input_DNA')
#print(input_DNA)
#print('____')
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
## need to split the @input_ids into AA side and label side, @input_DNA @label_index_id
## COMMENT
embedding_output = self.embeddings(input_DNA, position_ids=position_ids, token_type_ids=token_type_ids)
embedding_output_label = self.embeddings_label(label_index_id, position_ids=None, token_type_ids=None)
# concat into the original embedding
if self.config.ppi_front:
## masking may vary, because some proteins don't have vec emb
embedding_output = torch.cat([input_ids,embedding_output,embedding_output_label], dim=1)
## we add protein_vector as variable @input_ids
else:
## COMMENT
embedding_output = torch.cat([embedding_output,embedding_output_label], dim=1)
## @embedding_output is batch x num_aa x dim so append @embedding_output_label to dim=1
## (basically adding more words to @embedding_output)
# @embedding_output is just some type of embedding, the @encoder will apply attention weights
encoder_outputs = self.encoder(embedding_output,
attention_mask=None,
head_mask=head_mask)
## @extended_attention_mask must mask using the entire set of sequence + label input
sequence_output = encoder_outputs[0]
# pooled_output = self.pooler(sequence_output)
outputs = (sequence_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here pooled_output
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class ElementWiseMultiplyLayer(nn.Module):
def __init__(self, hidden_size, n_genomic_features):
super(ElementWiseMultiplyLayer, self).__init__()
self.embedding_dim = hidden_size
self.num_labels = n_genomic_features ## about 919 for histone marks
self.weightMat = nn.Parameter(torch.Tensor(self.num_labels, self.embedding_dim)) # define the trainable parameter
self.bias = nn.Parameter(torch.Tensor(n_genomic_features))
def forward(self, x):
# x is [batch, nlabel, hidden]. element-wise mult and sum over hidden
return torch.sum(x * self.weightMat, dim = 2) + self.bias
class TokenClassificationBase (BertPreTrainedModel):
## !! we change this to do 1-hot prediction
## take in K labels so we have vector of 1-hot length K
## for each label, we get a vector output from BERT, then we predict 0/1
def __init__(self, config_name, sequence_length, n_genomic_features):
## create config object base on path name. bert needs config object
self.config = BertConfig.from_pretrained(config_name)
super(TokenClassificationBase, self).__init__(self.config)
self.sequence_length = sequence_length
self.num_labels = n_genomic_features ## about 919 for histone marks
self.bert = BertModel2Emb(self.config)
self.dropout = nn.Dropout(self.config.hidden_dropout_prob)
# this classifier uses same weights for all labels
self.classifier = nn.Sequential(nn.Linear(self.config.hidden_size, 1),
nn.Sigmoid())
# this classifier using CLS embedding with different weights for each label
#self.classifier2 = nn.Sequential(nn.Linear(self.config.hidden_size, self.num_labels),
# nn.Sigmoid())
# uses label embeddings and learns different weights for each
print('TokenClassificationBase: Using classifier1')
self.classifier3 = nn.Sequential(
ElementWiseMultiplyLayer(self.config.hidden_size, self.num_labels),
nn.Sigmoid()
)
self.init_weights() # https://github.com/lonePatient/Bert-Multi-Label-Text-Classification/issues/19
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def _init_weights(self, module):
# https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_bert.py#L535
""" Initialize the weights, including for our custom layer """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, ElementWiseMultiplyLayer):
module.weightMat.data.normal_(mean=0.0, std=self.config.initializer_range)
module.bias.data.zero_()
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, x):
##!! @x in Transformer is batch x word_indexing
## COMMENT: original model must take only x=batch x 4 x 1000 because @selene pipeline requires only this input
## default @x is DNA + label --> so it is already an embedding
## COMMENT convert @x into word-indexing style. so we want @x = [[1,1,2,2,...], [3,3,4,4,...]] --> batch x seq_len
##!! @label_index_id can be determined ahead of time
# label_index_id = self.label_range.expand(real_batch_size,-1) ## batch x num_label ... 1 row for 1 ob in batch
## COMMENT use @x as indexing-style
##!! observe that we pass in @x twice. this is a trick to get batch_size.
outputs = self.bert(None, x, x, position_ids=None, token_type_ids=None)
sequence_output = outputs[0][:,self.sequence_length::,:] ## last layer.
## last layer outputs is batch_num x num_label x hidden_dim
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output).squeeze(2) ## want batch x len x 1 --> batch x num_label
#logits = self.classifier3(sequence_output) ## want batch x len x 1 --> batch x num_label
#cls_output = outputs[0][:,0,:]
#cls_output = self.dropout(cls_output)
#print('TODO cls', cls_output.shape)
#logits = self.classifier2(cls_output)
#print('TODO logits', logits.shape)
#print(logits.shape)
return logits # batch x num_label
def criterion():
return nn.BCELoss()
def get_optimizer(lr):
# adam with L2 norm
#return (torch.optim.Adam, {"lr": lr, "weight_decay": 1e-6})
#https://github.com/datduong/BertGOAnnotation/blob/master/finetune/RunTokenClassifyProtData.py#L313
# Prepare optimizer and schedule (linear warmup and decay)
'''
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
'''
return (pytorch_transformers.optimization.AdamW, {"lr":lr, "weight_decay": 1e-6})
# using deepsea optimizer
#return (torch.optim.SGD,
# {"lr": lr, "weight_decay": 1e-6, "momentum": 0.9})
| 19,448 | 6,327 |
from setuptools import setup
__version__ = '0.0.3'
REQUIRES = ['psycopg2-binary']
EXTRAS_REQUIRE = {
'sqlalchemy': ['sqlalchemy'],
'jinjasql': ['jinjasql'],
'pandas': ['jinjasql', 'pandas'],
}
extras_lists = [vals for k, vals in EXTRAS_REQUIRE.items()]
# flattening the values in EXTRAS_REQUIRE from popular stack overflow question 952914
all_extras_require = list(set([item for sublist in extras_lists for item in sublist]))
EXTRAS_REQUIRE['all'] = all_extras_require
TESTS_REQUIRE = REQUIRES + all_extras_require + ['pytest', 'testing.postgresql']
setup_dict = dict(name='dbactor',
version=__version__,
description='DBActor: ORM helper and alternative',
long_description=open('README.md').read(),
url='http://github.com/jackschultz/dbactor',
author='Jack Schultz',
author_email='jackschultz23@gmail.com',
license='MIT',
install_requires=REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=TESTS_REQUIRE,
packages=['dbactor'])
setup(**setup_dict)
| 1,166 | 392 |
"""
Figurenerkennung for German literary texts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`figur` is very easy to use:
```
>>> import figur
>>> text = "Der Gärtner entfernte sich eilig, und Eduard folgte bald."
>>> figur.tag(text)
SentenceId Token Tag
0 0 Der _
1 0 Gärtner AppTdfW
2 0 entfernte _
3 0 sich Pron
4 0 eilig, _
5 0 und _
6 0 Eduard Core
7 0 folgte _
8 0 bald. _
```
"""
from .api import tag
| 603 | 183 |
import cv2
import numpy as np
import os
def frames_to_video(inputpath,outputpath,fps):
image_array = []
files = [f for f in os.listdir(inputpath) if isfile(join(inputpath, f))]
files.sort(key = lambda x: int(x[5:-4]))
for i in range(len(files)):
img = cv2.imread(inputpath + files[i])
size = (img.shape[1],img.shape[0])
img = cv2.resize(img,size)
image_array.append(img)
fourcc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
out = cv2.VideoWriter(outputpath,fourcc, fps, size)
for i in range(len(image_array)):
out.write(image_array[i])
out.release()
inputpath = 'folder path'
outpath = 'video file path/video.mp4'
fps = 29
frames_to_video(inputpath,outpath,fps)
| 725 | 279 |
import tensorflow as tf
import numpy as np
import traceback
import os.path
from .worker import Worker
from .param import *
from .params import *
import logging
logger = logging.getLogger(__name__)
class HeartbeatHook(tf.train.SessionRunHook):
def __init__(self, heatbeat, should_continue):
self.heatbeat = heatbeat
self.should_continue = should_continue
def after_run(self, run_context, run_values):
self.heatbeat()
try:
self.should_continue()
except StopIteration:
run_context.request_stop()
def end(self, session):
self.heatbeat()
class HeatbeatSaverListener(tf.train.CheckpointSaverListener):
def __init__(self, heartbeat):
self.heartbeat = heartbeat
def after_save(self, session, global_step_value):
self.heartbeat()
def resize_and_load(var, val, sess):
o_shape = var.get_shape().as_list()
i_shape = list(val.shape)
if o_shape != i_shape:
resize_dim = 1 # may not always hold true, assumption for now
delta = o_shape[resize_dim] - i_shape[resize_dim]
if delta != 0:
tf.logging.info("reshape var {} by {}".format(var.name, deta))
if delta < 0:
val = val[:,:o_shape[1]]
elif delta > 0:
val = np.pad(val, ((0,0),(0, delta)), 'reflect')
v.load(val, self.sess)
def gen_scaffold(params):
def init_fn(scaffold, session):
tf.logging.info("Running Scaffold init_fn", params)
vs = params["vars"]
if vs is not None:
for var in tf.trainable_variables():
if var.name in vs:
val = vs[var.name]
resize_and_load(var, val, session)
# return tf.train.Scaffold(init_fn=lambda scaffold, session: True)
return tf.train.Scaffold(init_fn=init_fn)
class MetricHook(tf.train.SessionRunHook):
def __init__(self, metrics, cb, key=0):
self.metrics = metrics
self.key = key
self.readings = []
def before_run(self, run_context):
return tf.train.SessionRunArgs(self.metrics)
def after_run(self, run_context, run_values):
if run_values.results is not None:
self.readings.append(run_values.results[self.key][1])
def end(self, session):
if len(self.readings) > 0:
self.cb(np.average(self.readings))
self.readings.clear()
class EstimatorWorker(Worker):
def __init__(self, init_params, hyperparam_spec):
self.estimator = None
self.trained = False
if init_params["use_warm_start"]:
assert "model_id" in hyperparam_spec, "Warm start requires model_id hyperparam"
super().__init__(init_params, hyperparam_spec)
def setup_estimator(self):
if self.init_params["use_warm_start"] and self.warm_start_dir is not None:
model_dir = self.model_dir
warm_start = self.warm_start_dir
else:
model_dir = os.path.join(self.init_params["model_dir"], self.init_params["run"], str(uuid.uuid4()))
warm_start = None
self.estimator = tf.estimator.Estimator(
model_fn=self.init_params["model_fn"],
model_dir=model_dir,
config=self.init_params.get("run_config", None),
params=vars(self.friendly_params),
warm_start_from=warm_start
)
self.trained = False
def ensure_warm(self):
if self.estimator is None:
self.setup_estimator()
# We need to warm up the estimator
if not self.init_params["use_warm_start"] and not self.trained:
self.do_step(1, lambda:None, lambda:None)
def extract_vars(self):
if "vars" in self._params:
self.ensure_warm()
var_names = self.estimator.get_variable_names()
vals = {k:self.estimator.get_variable_value(k) for k in var_names}
self._params["vars"] = VariableParam(vals)
# --------------------------------------------------------------------------
# Worker class stub impl
# --------------------------------------------------------------------------
def pre_params_get(self):
if not self.init_params["use_warm_start"]:
self.extract_vars()
def post_params_set(self):
self.setup_estimator()
def do_step(self, steps, heartbeat, should_continue):
# We lazily initialise the estimator as during unpickling we may not have all the params
if self.estimator is None:
self.setup_estimator()
self.estimator.train(
self.init_params["train_input_fn"](self.friendly_params),
steps=steps,
hooks=[HeartbeatHook(heartbeat, should_continue)],
saving_listeners=[HeatbeatSaverListener(heartbeat)],
)
# TODO: put heartbeat and should_continue into a hook
heartbeat()
self.trained = True
def do_eval(self):
self.ensure_warm()
return self.estimator.evaluate(self.init_params["eval_input_fn"](self.friendly_params))
# --------------------------------------------------------------------------
# Pickling
# --------------------------------------------------------------------------
def __getstate__(self):
return {
"_params": self.params,
"results": self.results,
"id": self.id,
"total_steps": self.total_steps,
"recent_steps": self.recent_steps,
"time_started": self.time_started,
}
def __setstate__(self, state):
self.id = state.get("id", uuid.uuid4())
self.time_started = 0
self.performance = (0,0)
self.total_steps = state.get("total_steps", 0)
self.recent_steps = state.get("recent_steps", 0)
self.results = state.get("results", {})
self._params = state.get("_params", {})
self.estimator = None
self.trained = False
| 5,303 | 2,076 |
import os
import glob
import yaml
import torch
import argparse
from addict import Dict
from dataset import *
from init import *
from utilities import *
from train import *
def parse_args():
parser = argparse.ArgumentParser(description='infer')
parser.add_argument('--config', type=str, default='./tracer/train_config.yaml',
help='path to config file')
return parser.parse_args()
def main(args):
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load config file
cfgs = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
cfgs = Dict(cfgs)
# get train, val dataset
train_data_path = sorted(glob.glob(cfgs.train_path + '/**/'))
train_set = GetDateset(train_data_path, cfgs)
train_loader = DataLoader(train_set, batch_size=1, shuffle=True, num_workers=0)
# initialize everything
policy_net, target_net, optimizer, scheduler, memory, steps_done, start_epoch = \
inilization(cfgs, args)
state_dict = {}
all_trace_length = 0
all_tree_length = 0
# train epoch
for epoch in range(start_epoch, cfgs.epoch):
print('epoch: {}'.format(epoch))
for _, sample in enumerate(train_loader):
env, tree, start_pts, name = tensor_to_numpy(sample)
# prepare training regions and data mode
training_list = prepare_training_area(start_pts)
# save some statistic values
state_dict = prepare_stat_dict(state_dict, name)
all_trace_length = 0
all_tree_length = 0
for item in training_list:
print('training information', item)
start_num, region = item
traing_agent = Training_Agent(args, cfgs, target_net, policy_net,
env, tree, start_num, steps_done, optimizer, scheduler, memory)
target_net, policy_net, trace_trajectory, STEPS_DONE = traing_agent.train()
region_tree, _ = get_region_tree(start_num, tree)
match_rate = get_match_rate(region_tree, trace_trajectory, cfgs.match_dist)
print('match rate', np.round(match_rate * 100, 2))
if region == 'l':
state_dict[name]['LCA progress'].append(np.round(match_rate*100, 2))
elif region == 'r':
state_dict[name]['RCA progress'].append(np.round(match_rate*100, 2))
all_tree_length += len(region_tree)
all_trace_length += len(region_tree) * match_rate
all_finish_rate = np.round(all_trace_length / all_tree_length, 2) * 100
state_dict[name]['ALL progress'].append(all_finish_rate)
if len(state_dict[name]['LCA progress']) > 0:
state_dict[name]['LCA average finish rate'] = sum(state_dict[name]['LCA progress'])/len(state_dict[name]['LCA progress'])
if len(state_dict[name]['RCA progress']) > 0:
state_dict[name]['RCA average finish rate'] = sum(state_dict[name]['RCA progress'])/len(state_dict[name]['RCA progress'])
state_dict[name]['ALL average finish rate'] = sum(state_dict[name]['ALL progress'])/len(state_dict[name]['ALL progress'])
# print stat dict
for key in sorted(state_dict.keys()):
print(key, state_dict[key])
# Update the target network
if epoch % cfgs.update_epoch == 0:
target_net.load_state_dict(policy_net.state_dict())
# save model
if (epoch+1)%cfgs.save_freq==0:
if not os.path.exists(cfgs.save_path):
os.makedirs(cfgs.save_path)
torch.save({
'model_state_dict': target_net.module.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'frames': memory.frame,
'steps': steps_done,
'epochs': epoch
}, cfgs.save_path + '/agent_' + str(epoch+1) + '.pth')
if __name__ == '__main__':
args = parse_args()
main(args) | 4,064 | 1,273 |
"""acceptance tests"""
import unittest
from nose.plugins.attrib import attr
@attr('acc')
class AcceptanceTestCase(unittest.TestCase):
"""Base AcceptanceTestCase"""
pass
| 181 | 58 |
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin SV developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time, math
from test_framework.blocktools import create_block, create_coinbase
class BsvProtoconfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.setup_nodes()
def run_test(self):
# Testing scope: our maximal protocol message length is smaller than remote node's message length, remote node has to respect this.
ELEMENTS_PER_1MiB = 29126
ELEMENTS_PER_2MiB = 58254
expected_inv_len = CInv.estimateMaxInvElements(LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH) #29126 elements
assert_equal(expected_inv_len, ELEMENTS_PER_1MiB)
logger.info("Our max message size: {} B, which represents {} elements. ".format(LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH, expected_inv_len))
test_node = NodeConnCB()
wanted_inv_lengths = []
def on_getdata(conn, message):
wanted_inv_lengths.append(len(message.inv))
test_node.on_getdata = on_getdata
connections = []
connections.append(
NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
test_node.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
def send_protoconf_default_msg_length(conn):
conn.send_message(msg_protoconf(CProtoconf(1, LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH)))
test_node.send_protoconf = send_protoconf_default_msg_length
# 0. Prepare initial block. Needed so that GETDATA can be send back.
self.nodes[0].generate(1)
# 1. Receive bitcoind's protoconf and save max_recv_payload_length.
test_node.wait_for_protoconf()
max_recv_payload_length = test_node.last_message["protoconf"].protoconf.max_recv_payload_length
maxInvElements = CInv.estimateMaxInvElements(max_recv_payload_length) #58254
assert_equal(maxInvElements, ELEMENTS_PER_2MiB)
logger.info("Received bitcoind max message size: {} B, which represents {} elements. ".format(max_recv_payload_length, maxInvElements))
# 2. Send bitcoind Inv message.
test_node.send_message(msg_inv([CInv(1, i) for i in range(0, maxInvElements)]))
test_node.sync_with_ping()
assert_equal(len(self.nodes[0].listbanned()), 0) #not banned
# 2.1. Receive GetData.
test_node.wait_for_getdata()
# 2.2. We should receive 2 GetData messages with 1MB size (29126 elements) and 1 GetData message with 2 elements.
assert_equal(wanted_inv_lengths[0], expected_inv_len)
assert_equal(wanted_inv_lengths[1], expected_inv_len)
assert_equal(wanted_inv_lengths[2], 2)
assert_equal(len(wanted_inv_lengths), 3)
### TEST WITH maxInvElements - 1, maxInvElements and maxInvElements + 1
# 1. Send bitcoind Inv message that is smaller than max_recv_payload_length.
test_node.send_message(msg_inv([CInv(1, i) for i in range(0, maxInvElements - 1)]))
test_node.sync_with_ping()
assert_equal(len(self.nodes[0].listbanned()), 0) #not banned
# 2. Send bitcoind Inv message that is equal to max_recv_payload_length.
test_node.send_message(msg_inv([CInv(1, i) for i in range(0, maxInvElements)]))
test_node.sync_with_ping()
assert_equal(len(self.nodes[0].listbanned()), 0) #not banned
# 3. Send bitcoind Inv message that is larger than max_recv_payload_length.
test_node.send_message(msg_inv([CInv(1, i) for i in range(0, maxInvElements + 1)]))
test_node.wait_for_disconnect()
assert(self.nodes[0].closed)# disconnected
assert_equal(len(self.nodes[0].listbanned()), 1) #banned
logger.info("Banned nodes : {}".format(self.nodes[0].listbanned()))
if __name__ == '__main__':
BsvProtoconfTest().main()
| 4,222 | 1,424 |
# Generated by Django 2.1.8 on 2020-01-08 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0062_auto_20191205_1656'),
]
operations = [
migrations.RenameField(
model_name='replgoodstype',
old_name='price',
new_name='credit',
),
migrations.AlterField(
model_name='replgoodstype',
name='credit',
field=models.PositiveIntegerField(default=0, verbose_name='积分'),
preserve_default=False,
),
migrations.AddField(
model_name='replgoodstype',
name='market_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=15, verbose_name='市场价格'),
),
migrations.AddField(
model_name='replgoodstype',
name='price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=15, verbose_name='价格'),
),
]
| 1,022 | 343 |
import gym
import numpy as np
import tensorflow as tf
import time
from actor_critic.policy import A2CBuilder
from actor_critic.util import discount_with_dones, cat_entropy, fix_tf_name
from common.model import NetworkBase
from common.multiprocessing_env import SubprocVecEnv
from tqdm import tqdm
class ActorCritic(NetworkBase):
def __init__(self, sess, a2c_arch, ob_space, ac_space,
pg_coeff=1.0, vf_coeff=0.5, ent_coeff=0.01, max_grad_norm=0.5,
lr=7e-4, alpha=0.99, epsilon=1e-5, summarize=False):
self.sess = sess
self.nact = ac_space.n
self.ob_space = ob_space
# Actions, Advantages, and Reward
self.actions = tf.placeholder(tf.int32, [None], name='actions')
self.advantages = tf.placeholder(tf.float32, [None], name='advantages')
self.rewards = tf.placeholder(tf.float32, [None], name='rewards')
self.depth = tf.placeholder(tf.float32, [None], name='scramble_depth')
# setup the models
self.step_model = A2CBuilder(self.sess, a2c_arch, ob_space, ac_space, reuse=False)
self.train_model = A2CBuilder(self.sess, a2c_arch, ob_space, ac_space, reuse=True)
# Negative log probs of actions
neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.train_model.pi, labels=self.actions)
# Policy Gradients Loss, Value Function Loss, Entropy, and Full Loss
self.pg_loss = tf.reduce_mean(self.advantages * neglogpac)
self.vf_loss = tf.reduce_mean(tf.square(tf.squeeze(self.train_model.vf) - self.rewards) / 2.0)
self.entropy = tf.reduce_mean(cat_entropy(self.train_model.pi))
self.loss = pg_coeff*self.pg_loss - ent_coeff*self.entropy + vf_coeff*self.vf_loss
self.mean_rew= tf.reduce_mean(self.rewards)
self.mean_depth = tf.reduce_mean(self.depth)
# Find the model parameters and their gradients
with tf.variable_scope('a2c_model'):
self.params = tf.trainable_variables()
grads = tf.gradients(self.loss, self.params)
if max_grad_norm is not None:
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, self.params))
# Setup the optimizer
trainer = tf.train.RMSPropOptimizer(learning_rate=lr, decay=alpha, epsilon=epsilon)
self.opt = trainer.apply_gradients(grads)
# For some awesome tensorboard stuff
if summarize:
tf.summary.scalar('Loss', self.loss)
tf.summary.scalar('Entropy', self.entropy)
tf.summary.scalar('Policy Gradient Loss', self.pg_loss)
tf.summary.scalar('Value Function Loss', self.vf_loss)
tf.summary.scalar('Rewards', self.mean_rew)
tf.summary.scalar('Depth', self.mean_depth)
# fix tf scopes if we are loading a scope that is different from the saved instance
#name_scope = tf.contrib.framework.get_name_scope()
#if len(name_scope) != 0:
# self.params = { fix_tf_name(v.name, name_scope): v for v in self.params }
#else:
# self.params = { fix_tf_name(v.name): v for v in self.params }
# Initialize the tensorflow saver
self.saver = tf.train.Saver(self.params, max_to_keep=5)
# Single training step
def train(self, obs, rewards, masks, actions, values, depth, step, summary_op=None):
advantages = rewards - values
feed_dict = {
self.actions: actions,
self.advantages: advantages,
self.rewards: rewards,
self.depth: depth,
}
inputs = self.train_model.get_inputs()
mapped_input = self.train_model.transform_input(obs)
for transformed_input, inp in zip(mapped_input, inputs):
feed_dict[inp] = transformed_input
ret_vals = [
self.loss,
self.pg_loss,
self.vf_loss,
self.entropy,
self.mean_rew,
self.mean_depth,
self.opt,
]
if summary_op is not None:
ret_vals.append(summary_op)
return self.sess.run(ret_vals, feed_dict=feed_dict)
# Given an observation, perform an action
def act(self, obs, stochastic=True):
return self.step_model.step(obs, stochastic=stochastic)
# Return the value of the value function
def critique(self, obs):
return self.step_model.value(obs)
# The function that trains the a2c model
def train(env_fn = None,
spectrum = False,
a2c_arch = None,
nenvs = 16,
nsteps = 100,
max_iters = 1e6,
gamma = 0.99,
pg_coeff = 1.0,
vf_coeff = 0.5,
ent_coeff = 0.01,
max_grad_norm = 0.5,
lr = 7e-4,
alpha = 0.99,
epsilon = 1e-5,
log_interval = 100,
summarize = True,
load_path = None,
log_path = None,
cpu_cores = 1):
# Construct the vectorized parallel environments
envs = [ env_fn for _ in range(nenvs) ]
envs = SubprocVecEnv(envs)
# Set some random seeds for the environment
envs.seed(0)
if spectrum:
envs.spectrum()
ob_space = envs.observation_space.shape
nw, nh, nc = ob_space
ac_space = envs.action_space
obs = envs.reset()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=cpu_cores,
intra_op_parallelism_threads=cpu_cores )
tf_config.gpu_options.allow_growth = True
with tf.Session(config=tf_config) as sess:
actor_critic = ActorCritic(sess, a2c_arch, ob_space, ac_space,
pg_coeff, vf_coeff, ent_coeff, max_grad_norm,
lr, alpha, epsilon, summarize)
load_count = 0
if load_path is not None:
actor_critic.load(load_path)
print('Loaded a2c')
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path, graph=sess.graph)
sess.run(tf.global_variables_initializer())
batch_ob_shape = (-1, nw, nh, nc)
dones = [False for _ in range(nenvs)]
episode_rewards = np.zeros((nenvs, ))
final_rewards = np.zeros((nenvs, ))
print('a2c Training Start!')
print('Model will be saved on intervals of %i' % (log_interval))
for i in tqdm(range(load_count + 1, int(max_iters) + 1), ascii=True, desc='ActorCritic'):
# Create the minibatch lists
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_depth = [], [], [], [], [], []
total_reward = 0
for n in range(nsteps):
# Get the actions and values from the actor critic, we don't need neglogp
actions, values, neglogp = actor_critic.act(obs)
mb_obs.append(np.copy(obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(dones)
obs, rewards, dones, info = envs.step(actions)
total_reward += np.sum(rewards)
episode_rewards += rewards
masks = 1 - np.array(dones)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
mb_rewards.append(rewards)
mb_depth.append(np.array([ info_item['scramble_depth'] for info_item in info ]))
mb_dones.append(dones)
# Convert batch steps to batch rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32).swapaxes(1,0).reshape(batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1,0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1,0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1,0)
mb_dones = np.asarray(mb_dones, dtype=np.float32).swapaxes(1,0)
mb_depth = np.asarray(mb_depth, dtype=np.int32).swapaxes(1,0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = actor_critic.critique(obs).tolist()
# discounting
for n, (rewards, d, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
d = d.tolist()
if d[-1] == 0:
rewards = discount_with_dones(rewards+[value], d+[0], gamma)[:-1]
else:
rewards = discount_with_dones(rewards, d, gamma)
mb_rewards[n] = rewards
# Flatten the whole minibatch
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
mb_depth = mb_depth.flatten()
# Save the information to tensorboard
if summarize:
loss, policy_loss, value_loss, policy_ent, mrew, mdp, _, summary = actor_critic.train(
mb_obs, mb_rewards, mb_masks, mb_actions, mb_values, mb_depth, i, summary_op)
writer.add_summary(summary, i)
else:
loss, policy_loss, value_loss, policy_ent, mrew, mdp, _ = actor_critic.train(
mb_obs, mb_rewards, mb_masks, mb_actions, mb_values, mb_depth, i)
if i % log_interval == 0:
actor_critic.save(log_path, i)
actor_critic.save(log_path, 'final')
print('a2c model is finished training')
| 9,838 | 3,287 |
from enum import Enum
import hashlib
import math
import os
import random
import re
from chainmap import ChainMap
from torch.autograd import Variable
import librosa
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from .manage_audio import AudioPreprocessor
from torchdiffeq import odeint_adjoint as odeint
import pickle
class SimpleCache(dict):
def __init__(self, limit):
super().__init__()
self.limit = limit
self.n_keys = 0
def __setitem__(self, key, value):
if key in self.keys():
super().__setitem__(key, value)
elif self.n_keys < self.limit:
self.n_keys += 1
super().__setitem__(key, value)
return value
class ConfigType(Enum):
ODE_TCNN = "ode-tcnn"
ODE_TDNN = "ode-tdnn"
def find_model(conf):
if isinstance(conf, ConfigType):
conf = conf.value
if conf.startswith("ode-tcnn"):
print("ODE-TCNN")
return SpeechOdeTCNNModel
elif conf.startswith("ode-tdnn"):
print("ODE-TDNN")
return SpeechOdeTDNNModel
print("model is not specified.")
return None
def find_config(conf):
if isinstance(conf, ConfigType):
conf = conf.value
return _configs[conf]
def truncated_normal(tensor, std_dev=0.01):
tensor.zero_()
tensor.normal_(std=std_dev)
while torch.sum(torch.abs(tensor) > 2 * std_dev) > 0:
t = tensor[torch.abs(tensor) > 2 * std_dev]
t.zero_()
tensor[torch.abs(tensor) > 2 * std_dev] = torch.normal(t, std=std_dev)
class BNStatistics(object):
def __init__(self, max_t):
self.max_t = max_t
self.mean_t = [None] * self.max_t
self.var_t = [None] * self.max_t
self.count = [0] * self.max_t
self.poly_coeff_mean = None # for polyfit
self.poly_coeff_var = None # for polyfit
def reset(self):
del self.mean_t
del self.var_t
del self.count
del self.poly_coeff_mean
del self.poly_coeff_var
self.mean_t = [None] * self.max_t
self.var_t = [None] * self.max_t
self.count = [0] * self.max_t
self.poly_coeff_mean = None
self.poly_coeff_var = None
def average(self):
for i in range(self.max_t):
if self.count[i] > 0:
self.mean_t[i] = self.mean_t[i] / self.count[i]
self.var_t[i] = self.var_t[i] / self.count[i]
class SerializableModule(nn.Module):
def __init__(self):
super().__init__()
self.item_list = []
self.odefunc = None
def save(self, filename):
torch.save(self.state_dict(), filename)
def load(self, filename):
self.load_state_dict(torch.load(filename, map_location=lambda storage, loc: storage))
def switch_forward(self):
self.odefunc.bForward = True
def switch_backward(self):
self.odefunc.bForward = False
def init_bn_statistics(self, odefunc, item_list, max_t):
self.odefunc = odefunc
self.item_list = item_list
for item in self.item_list:
self.odefunc.bn_statistics[item] = BNStatistics(max_t)
def save_bn_statistics(self, filename):
f_pickle = open(filename, "wb")
pickle.dump(self.odefunc.bn_statistics, f_pickle)
f_pickle.close()
def load_bn_statistics(self, filename):
f_pickle = open(filename, "rb")
self.odefunc.bn_statistics = pickle.load(f_pickle)
f_pickle.close()
def reset_bn_statistics(self):
for item in self.item_list:
self.odefunc.bn_statistics[item].reset()
def average_bn_statistics(self):
for item in self.item_list:
self.odefunc.bn_statistics[item].average()
class ODEBlock(nn.Module):
def __init__(self, odefunc, it=1, tol=1e-3):
super(ODEBlock, self).__init__()
self.odefunc = odefunc
self.integration_time = torch.tensor([0, it]).float()
self.tol = tol
def forward(self, x):
self.integration_time = self.integration_time.type_as(x)
out = odeint(self.odefunc, x, self.integration_time, rtol=self.tol, atol=self.tol)
return out[1]
def set_integration_time(self, it):
self.integration_time = torch.tensor([0, it]).float()
@property
def nfe(self):
return self.odefunc.nfe
@nfe.setter
def nfe(self, value):
self.odefunc.nfe = value
def complement_run_bn(data, max_t, t):
low = None
high = None
tl = t - 1
while tl >= 0:
if type(data[tl]) == torch.Tensor:
low = data[tl]
break
tl -= 1
th = t + 1
while th < max_t:
if type(data[th]) == torch.Tensor:
high = data[th]
break
th += 1
if type(low) != torch.Tensor:
if type(high) != torch.Tensor:
print("Complement failed ({} {}) ...".format(tl, th))
exit()
else:
print("low is not found, and thus high ({}) is used in stead.".format(th))
return high
elif type(high) != torch.Tensor:
if type(low) != torch.Tensor:
print("Complement failed ({} {}) ...".format(tl, th))
exit()
else:
print("high is not found, and thus low ({}) is used in stead.".format(tl))
return low
return low + (high-low)*(float(t-tl)/float(th-tl))
def complement_simple(norm, bn_statistics, tm):
t = round(tm.item()*100)
mean_t = bn_statistics.mean_t
var_t = bn_statistics.var_t
if t >= len(mean_t):
print("t is too large ({} >= {})".format(t, len(mean_t)))
t = len(mean_t) - 1
if type(mean_t[t]) != torch.Tensor:
print("complement at t = {}".format(t))
max_t = len(mean_t)
mean_t[t] = complement_run_bn(mean_t, max_t, t)
var_t[t] = complement_run_bn(var_t, max_t, t)
norm.running_mean = mean_t[t]
norm.running_var = var_t[t]
def calc_poly_coeff(data):
dtype = None
device = None
x = []
y = None
for i in range(len(data)):
if type(data[i]) == torch.Tensor:
dtype = data[i].dtype
device = data[i].device
x.append(i/100.0)
if type(y) != np.ndarray:
y = data[i].cpu().numpy()
else:
y = np.vstack((y, data[i].cpu().numpy()))
x = np.array(x)
coef = np.polyfit(x,y,2)
y_pred = coef[0].reshape(1,-1)*(x**2).reshape(-1,1) + coef[1].reshape(1,-1)*x.reshape(-1,1) + coef[2].reshape(1,-1)*np.ones((len(x),1))
y_bar = np.mean(y, axis=0) * np.ones((len(x),1))
r2 = np.ones(y.shape[1]) - np.sum((y-y_pred)**2, axis=0) / np.sum((y-y_bar)**2, axis=0)
t_coef = torch.from_numpy(coef)
if type(device) == torch.device:
t_coef = t_coef.to(device)
if type(dtype) == torch.dtype:
t_coef = t_coef.to(dtype)
return t_coef
def complement_polyfit2(norm, bn_statistics, t):
if type(bn_statistics.poly_coeff_mean) != torch.Tensor:
print("Calculating polynomial coefficients...")
bn_statistics.poly_coeff_mean = calc_poly_coeff(bn_statistics.mean_t)
bn_statistics.poly_coeff_var = calc_poly_coeff(bn_statistics.var_t)
norm.running_mean = bn_statistics.poly_coeff_mean[0]*(t**2) + bn_statistics.poly_coeff_mean[1]*t + bn_statistics.poly_coeff_mean[2]
norm.running_var = bn_statistics.poly_coeff_var[0]*(t**2) + bn_statistics.poly_coeff_var[1]*t + bn_statistics.poly_coeff_var[2]
complement_simple(norm, bn_statistics, t)
def collect_statistics(norm, mean_t, var_t, count, tm):
t = round(tm.item()*100)
if t >= len(mean_t):
print("list index out of range: {} > {}".format(t, len(mean_t)))
return
if type(mean_t[t]) != torch.Tensor:
mean_t[t] = torch.zeros(norm.num_features)
var_t[t] = torch.zeros(norm.num_features)
mean_t[t] += norm.running_mean
var_t[t] += norm.running_var
count[t] += 1
def run_norm(x, t, norm, bn_statistics, training, bForward, complement_statistics_func=complement_simple):
if training:
if bForward:
norm.running_mean.zero_()
norm.running_var.fill_(1)
norm.num_batches_tracked.zero_()
else:
complement_statistics_func(norm, bn_statistics, t)
norm.num_batches_tracked.zero_()
out = norm(x)
if training and bForward:
collect_statistics(norm, bn_statistics.mean_t, bn_statistics.var_t, bn_statistics.count, t)
return out
bn_complement_func = { "complement": complement_simple, "polyfit2": complement_polyfit2 }
class TCNN_ODEfunc(nn.Module):
def __init__(self, n_maps):
super(TCNN_ODEfunc, self).__init__()
self.norm1 = nn.BatchNorm2d(n_maps, affine=False, momentum=None)
self.conv1 = nn.Conv2d(n_maps, n_maps, (9, 1), padding=(4,0), dilation=1, bias=False)
self.norm2 = nn.BatchNorm2d(n_maps, affine=False, momentum=None)
self.conv2 = nn.Conv2d(n_maps, n_maps, (9, 1), padding=(4,0), dilation=1, bias=False)
self.norm3 = nn.BatchNorm2d(n_maps, affine=False, momentum=None)
self.conv3 = nn.Conv2d(n_maps, n_maps, (1, 1), dilation=1, bias=False)
self.bn_statistics = {}
self.bForward = True
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
out = self.conv1(x)
out = run_norm(out, t, self.norm1, self.bn_statistics["norm1"], self.training, self.bForward)
out = F.relu(out)
out = self.conv2(out)
out = run_norm(out, t, self.norm2, self.bn_statistics["norm2"], self.training, self.bForward)
# branch
out3 = self.conv3(x)
out3 = run_norm(out3, t, self.norm3, self.bn_statistics["norm3"], self.training, self.bForward)
out3 = F.relu(out3)
out = F.relu(out + out3)
return out
class SpeechOdeTCNNModel(SerializableModule):
def __init__(self, config):
it = config["integration_time"]
super().__init__()
n_labels = config["n_labels"]
n_mels = config["n_mels"]
n_maps = config["n_feature_maps"]
it = config["integration_time"]
tol = config["tol"]
print("n_mels = {} --> n_maps = {}".format(n_mels, n_maps))
self.conv0 = nn.Conv2d(n_mels, n_maps, (3, 1), padding=(1,0), dilation=1, bias=False)
self.norm_in = nn.BatchNorm2d(n_maps, affine=False)
if "res_pool" in config:
self.pool = nn.AvgPool2d(config["res_pool"])
self.odeblock = ODEBlock(TCNN_ODEfunc(n_maps), it, tol)
self.output = nn.Linear(n_maps, n_labels)
self.init_bn_statistics(self.odeblock.odefunc, ["norm1", "norm2", "norm3"], int(it*100)+100)
def forward(self, x):
x = x.unsqueeze(3)
x = self.conv0(x)
x = F.relu(self.norm_in(x))
if hasattr(self, "pool"):
x = self.pool(x)
x = self.odeblock(x)
x = x.view(x.size(0), x.size(1), -1) # shape: (batch, feats, o3)
x = torch.mean(x, 2)
return self.output(x)
# TDNN is based on the following implementation:
# https://github.com/cvqluu/TDNN
class TDNN(nn.Module):
def __init__(
self,
input_dim=23,
output_dim=512,
context_size=5,
stride=1,
dilation=1,
padding=0
):
'''
TDNN as defined by https://www.danielpovey.com/files/2015_interspeech_multisplice.pdf
Affine transformation not applied globally to all frames but smaller windows with local context
batch_norm: True to include batch normalisation after the non linearity
Context size and dilation determine the frames selected
(although context size is not really defined in the traditional sense)
For example:
context size 5 and dilation 1 is equivalent to [-2,-1,0,1,2]
context size 3 and dilation 2 is equivalent to [-2, 0, 2]
context size 1 and dilation 1 is equivalent to [0]
'''
super(TDNN, self).__init__()
self.context_size = context_size
self.stride = stride
self.input_dim = input_dim
self.output_dim = output_dim
self.dilation = dilation
self.padding = padding
self.kernel = nn.Linear(input_dim*context_size, output_dim)
# Xavier initialization
nn.init.xavier_normal_(self.kernel.weight)
def forward(self, x):
'''
input: size (batch, seq_len, input_features)
outpu: size (batch, new_seq_len, output_features)
'''
_, _, d = x.shape
assert (d == self.input_dim), 'Input dimension was wrong. Expected ({}), got ({})'.format(self.input_dim, d)
x = x.unsqueeze(1)
# Unfold input into smaller temporal contexts
x = F.unfold(
x,
(self.context_size, self.input_dim),
#stride=(1,self.input_dim),
stride=(self.stride,self.input_dim),
dilation=(self.dilation,1),
padding=(self.padding,0)
)
x = x.transpose(1,2)
x = self.kernel(x)
return x
class TDNN_ODEfunc(nn.Module):
def __init__(self, n_maps, window):
super(TDNN_ODEfunc, self).__init__()
self.norm1 = nn.BatchNorm1d(n_maps, affine=False, momentum=None)
self.tdnn1 = TDNN(input_dim=n_maps, output_dim=n_maps, context_size=window, stride=1, dilation=1, padding=int((window-1)/2))
self.bn_statistics = {}
self.bForward = True
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
out = self.tdnn1(x)
out = F.relu(out)
out = out.transpose(1,2)
out = run_norm(out, t, self.norm1, self.bn_statistics["norm1"], self.training, self.bForward)
out = out.transpose(1,2)
return out
class SpeechOdeTDNNModel(SerializableModule):
def __init__(self, config):
it = config["integration_time"]
super().__init__()
n_labels = config["n_labels"]
n_mels = config["n_mels"]
n_maps = config["n_feature_maps"]
tol = config["tol"]
print("n_mels = {} --> n_maps = {}".format(n_mels, n_maps))
print("sub_sampe: window = {}, stride = {}".format(config["sub_sample_window"], config["sub_sample_stride"]))
print("tdnn: window = {}".format(config["tdnn_window"]))
self.tdnn0 = TDNN(input_dim=n_mels, output_dim=n_maps, context_size=config["sub_sample_window"], stride=config["sub_sample_stride"], dilation=1, padding=int((config["sub_sample_window"]-1)/2))
self.norm_in = nn.BatchNorm1d(n_maps, affine=False)
self.odeblock = ODEBlock(TDNN_ODEfunc(n_maps, config["tdnn_window"]), it, tol)
self.output = nn.Linear(n_maps, n_labels)
self.init_bn_statistics(self.odeblock.odefunc, ["norm1"], int(it*100)+100)
def forward(self, x):
x = F.relu(self.tdnn0(x))
x = x.transpose(1,2)
x = self.norm_in(x)
x = x.transpose(1,2)
x = self.odeblock(x)
x = torch.mean(x, 1)
return self.output(x)
class DatasetType(Enum):
TRAIN = 0
DEV = 1
TEST = 2
class SpeechDataset(data.Dataset):
LABEL_SILENCE = "__silence__"
LABEL_UNKNOWN = "__unknown__"
def __init__(self, data, set_type, config):
super().__init__()
self.audio_files = list(data.keys())
self.set_type = set_type
self.audio_labels = list(data.values())
config["bg_noise_files"] = list(filter(lambda x: x.endswith("wav"), config.get("bg_noise_files", [])))
self.bg_noise_audio = [librosa.core.load(file, sr=16000)[0] for file in config["bg_noise_files"]]
self.unknown_prob = config["unknown_prob"]
self.silence_prob = config["silence_prob"]
self.noise_prob = config["noise_prob"]
self.input_length = config["input_length"]
self.timeshift_ms = config["timeshift_ms"]
self._audio_cache = SimpleCache(config["cache_size"])
self._file_cache = SimpleCache(config["cache_size"])
n_unk = len(list(filter(lambda x: x == 1, self.audio_labels)))
self.n_silence = int(self.silence_prob * (len(self.audio_labels) - n_unk))
self.n_mels = config["n_mels"]
self.hop_ms = config["hop_ms"]
self.n_fft = config["n_fft"]
self.audio_processor = AudioPreprocessor(n_mels=self.n_mels, n_dct_filters=config["n_dct_filters"], hop_ms=self.hop_ms, n_fft=self.n_fft)
self.audio_preprocess_type = config["audio_preprocess_type"]
@staticmethod
def default_config():
config = {}
config["group_speakers_by_id"] = True
config["silence_prob"] = 0.1
config["noise_prob"] = 0.8
config["n_dct_filters"] = 40
config["input_length"] = 16000
config["n_mels"] = 40
config["timeshift_ms"] = 100
config["unknown_prob"] = 0.1
config["train_pct"] = 80
config["dev_pct"] = 10
config["test_pct"] = 10
config["wanted_words"] = ["command", "random"]
config["data_folder"] = "/data/speech_dataset"
config["audio_preprocess_type"] = "MFCCs"
return config
def collate_fn(self, data):
x = None
y = []
for audio_data, label in data:
if self.audio_preprocess_type == "MFCCs":
#audio_tensor = torch.from_numpy(self.audio_processor.compute_mfccs(audio_data).reshape(1, 101, 40))
audio_tensor = torch.from_numpy(self.audio_processor.compute_mfccs(audio_data).reshape(1, (1000//self.hop_ms)+1, self.n_mels))
x = audio_tensor if x is None else torch.cat((x, audio_tensor), 0)
elif self.audio_preprocess_type == "MFCC_TCNN":
audio_tensor = torch.from_numpy(self.audio_processor.compute_mfccs(audio_data).T)
x = audio_tensor if x is None else torch.cat((x, audio_tensor), 0)
elif self.audio_preprocess_type == "PCEN":
audio_tensor = torch.from_numpy(np.expand_dims(audio_data, axis=0))
audio_tensor = self.audio_processor.compute_pcen(audio_tensor)
x = audio_tensor if x is None else torch.cat((x, audio_tensor), 0)
y.append(label)
return x, torch.tensor(y)
def _timeshift_audio(self, data):
shift = (16000 * self.timeshift_ms) // 1000
shift = random.randint(-shift, shift)
a = -min(0, shift)
b = max(0, shift)
data = np.pad(data, (a, b), "constant")
return data[:len(data) - a] if a else data[b:]
def load_audio(self, example, silence=False):
if silence:
example = "__silence__"
if random.random() < 0.7:
try:
return self._audio_cache[example]
except KeyError:
pass
in_len = self.input_length
if self.bg_noise_audio:
bg_noise = random.choice(self.bg_noise_audio)
a = random.randint(0, len(bg_noise) - in_len - 1)
bg_noise = bg_noise[a:a + in_len]
else:
bg_noise = np.zeros(in_len)
if silence:
data = np.zeros(in_len, dtype=np.float32)
else:
file_data = self._file_cache.get(example)
data = librosa.core.load(example, sr=16000)[0] if file_data is None else file_data
self._file_cache[example] = data
data = np.pad(data, (0, max(0, in_len - len(data))), "constant")
if self.set_type == DatasetType.TRAIN:
data = self._timeshift_audio(data)
if random.random() < self.noise_prob or silence:
a = random.random() * 0.1
data = np.clip(a * bg_noise + data, -1, 1)
self._audio_cache[example] = data
return data
@classmethod
def splits(cls, config):
folder = config["data_folder"]
wanted_words = config["wanted_words"]
unknown_prob = config["unknown_prob"]
train_pct = config["train_pct"]
dev_pct = config["dev_pct"]
test_pct = config["test_pct"]
words = {word: i + 2 for i, word in enumerate(wanted_words)}
words.update({cls.LABEL_SILENCE:0, cls.LABEL_UNKNOWN:1})
sets = [{}, {}, {}]
unknowns = [0] * 3
bg_noise_files = []
unknown_files = []
for folder_name in os.listdir(folder):
path_name = os.path.join(folder, folder_name)
is_bg_noise = False
if os.path.isfile(path_name):
continue
if folder_name in words:
label = words[folder_name]
elif folder_name == "_background_noise_":
is_bg_noise = True
else:
label = words[cls.LABEL_UNKNOWN]
for filename in os.listdir(path_name):
wav_name = os.path.join(path_name, filename)
if is_bg_noise and os.path.isfile(wav_name):
bg_noise_files.append(wav_name)
continue
elif label == words[cls.LABEL_UNKNOWN]:
unknown_files.append(wav_name)
continue
if config["group_speakers_by_id"]:
hashname = re.sub(r"_nohash_.*$", "", filename)
max_no_wavs = 2**27 - 1
bucket = int(hashlib.sha1(hashname.encode()).hexdigest(), 16)
bucket = (bucket % (max_no_wavs + 1)) * (100. / max_no_wavs)
if bucket < dev_pct:
tag = DatasetType.DEV
elif bucket < test_pct + dev_pct:
tag = DatasetType.TEST
else:
tag = DatasetType.TRAIN
sets[tag.value][wav_name] = label
for tag in range(len(sets)):
unknowns[tag] = int(unknown_prob * len(sets[tag]))
random.shuffle(unknown_files)
a = 0
for i, dataset in enumerate(sets):
b = a + unknowns[i]
unk_dict = {u: words[cls.LABEL_UNKNOWN] for u in unknown_files[a:b]}
dataset.update(unk_dict)
a = b
train_cfg = ChainMap(dict(bg_noise_files=bg_noise_files), config)
test_cfg = ChainMap(dict(bg_noise_files=bg_noise_files, noise_prob=0), config)
datasets = (cls(sets[0], DatasetType.TRAIN, train_cfg), cls(sets[1], DatasetType.DEV, test_cfg),
cls(sets[2], DatasetType.TEST, test_cfg))
return datasets
def __getitem__(self, index):
if index >= len(self.audio_labels):
return self.load_audio(None, silence=True), 0
return self.load_audio(self.audio_files[index]), self.audio_labels[index]
def __len__(self):
return len(self.audio_labels) + self.n_silence
_configs = {
ConfigType.ODE_TCNN.value: dict(n_labels=12, n_feature_maps=20, res_pool=(4, 1), use_dilation=False),
ConfigType.ODE_TDNN.value: dict(n_labels=12, n_feature_maps=32, sub_sample_window=3, sub_sample_stride=3, tdnn_window=3),
}
| 23,504 | 8,286 |
import json
import os
import arrow
import libvirt
import pytest
from virt_backup.backups import DomBackup
from virt_backup.domains import get_xml_block_of_disk
from virt_backup.backups.snapshot import DomExtSnapshot, DomExtSnapshotCallbackRegistrer
from virt_backup.exceptions import DiskNotFoundError, SnapshotNotStarted
from helper.virt_backup import MockSnapshot
class TestDomExtSnapshot:
snapshot_helper = None
@pytest.fixture(autouse=True)
def gen_snapshot_helper(self, build_mock_domain):
dom = build_mock_domain
callbacks_registrer = DomExtSnapshotCallbackRegistrer(dom._conn)
self.snapshot_helper = DomExtSnapshot(
dom=dom,
callbacks_registrer=callbacks_registrer,
disks={
"vda": {"src": "/vda.qcow2", "type": "qcow2"},
"vdb": {"src": "/vdb.qcow2", "type": "qcow2"},
},
)
def test_snapshot_logic_date(self, monkeypatch):
"""
Create a DomBackup and test to add vdc
"""
pre_snap_date = arrow.now()
metadatas = self.start_snapshot(monkeypatch)
post_snap_date = arrow.now()
snapshot_date = metadatas["date"]
assert snapshot_date >= pre_snap_date
assert snapshot_date <= post_snap_date
def test_snapshot_disks_infos(self, monkeypatch):
"""
Check if metadatas contains the necessary infos
"""
metadatas = self.start_snapshot(monkeypatch)
assert len(self.snapshot_helper.disks) == len(metadatas["disks"])
for disk in self.snapshot_helper.disks:
assert sorted(("snapshot", "src", "type")) == sorted(
metadatas["disks"][disk].keys()
)
def test_snapshot_correct_snapshot_path(self, monkeypatch):
"""
Check if the snapshot is done is the same path as its source disk
"""
metadatas = self.start_snapshot(monkeypatch)
for disk in metadatas["disks"].values():
assert os.path.dirname(disk["src"]) == os.path.dirname(disk["snapshot"])
def start_snapshot(self, monkeypatch):
monkeypatch.setattr(
self.snapshot_helper, "external_snapshot", lambda: MockSnapshot("123")
)
return self.snapshot_helper.start()
def test_external_snapshot(self):
snap = self.snapshot_helper.external_snapshot()
assert isinstance(snap, MockSnapshot)
def test_external_snapshot_quiesce_fallback(self):
tried = {"quiesce": False}
def mock_quiesce_failure(_, flags):
if (flags & libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) != 0:
tried["quiesce"] = True
raise libvirt.libvirtError("quiesce error")
return MockSnapshot("123")
self.snapshot_helper.dom.set_mock_snapshot_create(mock_quiesce_failure)
self.snapshot_helper.quiesce = True
snap = self.snapshot_helper.external_snapshot()
assert tried["quiesce"]
assert isinstance(snap, MockSnapshot)
def test_get_snapshot_flags(self):
flags = self.snapshot_helper._get_snapshot_flags()
assert flags == (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA
)
def test_get_snapshot_flags_quiesce(self):
flags = self.snapshot_helper._get_snapshot_flags(quiesce=True)
assert (flags & libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) != 0
def test_gen_libvirt_snapshot_xml(self):
expected_xml = (
"<domainsnapshot>\n"
" <description>Pre-backup external snapshot</description>\n"
" <disks>\n"
' <disk name="vda" snapshot="external"/>\n'
' <disk name="vdb" snapshot="external"/>\n'
' <disk name="vdz" snapshot="no"/>\n'
" </disks>\n"
"</domainsnapshot>\n"
)
assert self.snapshot_helper.gen_libvirt_snapshot_xml() == expected_xml
def test_gen_libvirt_snapshot_xml_ignored_disk(self):
self.snapshot_helper.disks.pop("vdb")
expected_xml = (
"<domainsnapshot>\n"
" <description>Pre-backup external snapshot</description>\n"
" <disks>\n"
' <disk name="vda" snapshot="external"/>\n'
' <disk name="vdb" snapshot="no"/>\n'
' <disk name="vdz" snapshot="no"/>\n'
" </disks>\n"
"</domainsnapshot>\n"
)
assert self.snapshot_helper.gen_libvirt_snapshot_xml() == expected_xml
def test_manually_pivot_disk(self, build_mock_libvirtconn):
self.snapshot_helper.conn = build_mock_libvirtconn
self.snapshot_helper._manually_pivot_disk("vda", "/testvda", "qcow2")
dom_xml = self.snapshot_helper.dom.XMLDesc()
assert self.get_src_for_disk(dom_xml, "vda") == "/testvda"
def get_src_for_disk(self, dom_xml, disk):
elem = get_xml_block_of_disk(dom_xml, disk)
return elem.xpath("source")[0].get("file")
def test_manually_pivot_disk_libvirt_2(self, build_mock_libvirtconn):
"""
Test manual pivot with libvirt < 3.0
"""
conn = build_mock_libvirtconn
conn._libvirt_version = 2000000
conn._domains.append(self.snapshot_helper.dom)
return self.test_manually_pivot_disk(conn)
def test_manually_pivot_unexistant_disk(self):
with pytest.raises(DiskNotFoundError):
self.snapshot_helper._manually_pivot_disk("sda", "/testvda", "qcow2")
def test_clean_no_metadata(self):
with pytest.raises(SnapshotNotStarted):
self.snapshot_helper.clean()
def test_clean(self, monkeypatch, tmpdir):
snapdir = self.prepare_test_clean(monkeypatch, tmpdir)
self.snapshot_helper.clean()
assert len(snapdir.listdir()) == 0
def prepare_test_clean(self, monkeypatch, tmpdir):
snapshots = self.create_temp_snapshot_files(tmpdir)
self.mock_pivot_mechanism(monkeypatch)
# set the domain unactive to avoid the blockcommit
self.snapshot_helper.dom.set_state(0, 0)
self.snapshot_helper.metadatas = {
"date": arrow.now(),
"disks": {
disk: {"src": prop["src"], "snapshot": snapshots[disk], "type": "qcow2"}
for disk, prop in self.snapshot_helper.disks.items()
},
}
return tmpdir.join("snaps")
def create_temp_snapshot_files(self, tmpdir):
tmpdir = tmpdir.mkdir("snaps")
self.snapshot_helper.dom.set_storage_basedir(os.path.abspath(str(tmpdir)))
snapshots = {}
# swap disk and snapshots, to just change the domain basedir
for disk, prop in self.snapshot_helper.disks.items():
dom_disk_path = (
(get_xml_block_of_disk(self.snapshot_helper.dom.XMLDesc(), disk))
.xpath("source")[0]
.get("file")
)
tmpdir.join(os.path.basename(dom_disk_path)).write("")
prop["snapshot"] = dom_disk_path
disk_path = tmpdir.join("{}.qcow2.{}".format(disk, "123"))
prop["src"] = str(disk_path)
snapshots[disk] = prop["snapshot"]
return snapshots
def mock_pivot_mechanism(self, monkeypatch):
monkeypatch.setattr(
self.snapshot_helper, "_qemu_img_commit", lambda *args: None
)
monkeypatch.setattr(
self.snapshot_helper, "_manually_pivot_disk", lambda *args: None
)
| 7,642 | 2,476 |
import re
from . import base
class regex_search_ternary(base.Function):
"""
Ternary regex operator, it takes arguments in the following form
STR1, REGEX, STR2, STR3
If REGEX matches STR1 (re.search is used), STR2 is returned,
otherwise STR3 is returned
"""
def __init__(self):
# 4 arguments
super(regex_search_ternary, self).__init__("regex_search_ternary", 4, 4)
def execute(self, args):
if not super(regex_search_ternary, self).execute(args):
return None
if re.search(args[1], args[0]):
return args[2]
else:
return args[3]
| 554 | 211 |
__version__ = "2.4.3"
from . import errors
from .client import Client
from .interpreter import Interpreter
from .time import Time
from .formatters import format_property, format_decision_rules
from .reducer import reduce_decision_rules
from .tree_utils import (
extract_decision_paths_from_tree,
extract_decision_path_neighbors,
extract_output_tree,
)
import nest_asyncio
# this is to patch asyncio to allow a nested asyncio loop
# nested asyncio loop allow the client to use websocket call inside jupyter
# and other webbrowser based IDE
nest_asyncio.apply()
# Defining what will be imported when doing `from craft_ai import *`
__all__ = [
"Client",
"errors",
"Interpreter",
"Time",
"format_property",
"format_decision_rules",
"reduce_decision_rules",
"extract_output_tree",
"extract_decision_paths_from_tree",
"extract_decision_path_neighbors",
]
| 905 | 291 |
# -*- coding: utf-8 -*-
from .instance import BlockchainInstance
from ..block import Block as SyncBlock, BlockHeader as SyncBlockHeader
from graphenecommon.aio.block import (
Block as GrapheneBlock,
BlockHeader as GrapheneBlockHeader,
)
@BlockchainInstance.inject
class Block(GrapheneBlock, SyncBlock):
"""
Read a single block from the chain.
:param int block: block number
:param bitshares.aio.bitshares.BitShares blockchain_instance: BitShares
instance
:param bool lazy: Use lazy loading
:param loop: async event loop
Instances of this class are dictionaries that come with additional
methods (see below) that allow dealing with a block and it's
corresponding functions.
.. code-block:: python
from bitshares.aio.block import Block
block = await Block(1)
print(block)
"""
pass
@BlockchainInstance.inject
class BlockHeader(GrapheneBlockHeader, SyncBlockHeader):
pass
| 969 | 273 |
# NxN 시험관, 바이러스 매 초 상하좌우로 증식, 낮은 번호의 바이러스부터 우선순위
# 시간동안(for) 낮은 번호부터 증식 시작. 바이러스가 있거나 matrix 범위 이상이면 stop.
# 바이러스 종류별 좌표 추가
n, k = map(int, input().split())
matrix = []
for _ in range(n):
matrix.append(list(map(int, input().split())))
s, x, y = map(int, input().split())
# 상 하 좌 우
dx = [-1, 1, 0, 0] # 위아래
dy = [0, 0, -1, 1] # 좌우
# 바이러스 좌표 dict. initial
virus = {}
for i in range(k):
virus[i+1] = []
for i in range(n):
for j in range(n):
if matrix[i][j] != 0:
virus[matrix[i][j]].append((i,j))
def move(cord):
x, y = cord
v_num = matrix[x][y]
for i in range(4):
n_x = x+dx[i]
n_y = y+dy[i]
if n_x == n or n_y == n or n_x < 0 or n_y < 0:
continue
if matrix[n_x][n_y] == 0:
matrix[n_x][n_y] = v_num
virus[v_num].append((n_x, n_y))
for _ in range(s):
for idx in sorted(virus.keys()):
for cord in virus[idx]:
move(cord)
# answer. initial cord = (1,1)
print(matrix[x-1][y-1]) | 1,015 | 575 |
"""
Internationalization tasks
"""
import re
import subprocess
import sys
from path import Path as path
from paver.easy import cmdopts, needs, sh, task
from .utils.cmd import django_cmd
from .utils.envs import Env
from .utils.timer import timed
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text
DEFAULT_SETTINGS = Env.DEVSTACK_SETTINGS
@task
@needs(
"pavelib.prereqs.install_prereqs",
"pavelib.i18n.i18n_validate_gettext",
)
@cmdopts([
("verbose", "v", "Sets 'verbose' to True"),
])
@timed
def i18n_extract(options):
"""
Extract localizable strings from sources
"""
verbose = getattr(options, "verbose", None)
cmd = "i18n_tool extract"
if verbose:
cmd += " -v"
sh(cmd)
@task
@timed
def i18n_fastgenerate():
"""
Compile localizable strings from sources without re-extracting strings first.
"""
sh("i18n_tool generate")
@task
@needs("pavelib.i18n.i18n_extract")
@timed
def i18n_generate():
"""
Compile localizable strings from sources, extracting strings first.
"""
sh("i18n_tool generate")
@task
@needs("pavelib.i18n.i18n_extract")
@timed
def i18n_generate_strict():
"""
Compile localizable strings from sources, extracting strings first.
Complains if files are missing.
"""
sh("i18n_tool generate --strict")
@task
@needs("pavelib.i18n.i18n_extract")
@timed
def i18n_dummy():
"""
Simulate international translation by generating dummy strings
corresponding to source strings.
"""
sh("i18n_tool dummy")
# Need to then compile the new dummy strings
sh("i18n_tool generate")
@task
@needs(
"pavelib.prereqs.install_prereqs",
)
@timed
def i18n_compilejs(options): # lint-amnesty, pylint: disable=unused-argument
"""
Generating djangojs.js files using django-statici18n
"""
settings = 'devstack_docker'
# Generate static i18n JS files.
for system in ['lms', 'cms']:
sh(django_cmd(system, settings, 'compilejsi18n'))
@task
@timed
def i18n_validate_gettext():
"""
Make sure GNU gettext utilities are available
"""
returncode = subprocess.call(['which', 'xgettext'])
if returncode != 0:
msg = colorize(
'red',
"Cannot locate GNU gettext utilities, which are "
"required by django for internationalization.\n (see "
"https://docs.djangoproject.com/en/dev/topics/i18n/"
"translation/#message-files)\nTry downloading them from "
"http://www.gnu.org/software/gettext/ \n"
)
sys.stderr.write(msg)
sys.exit(1)
@task
@timed
def i18n_validate_transifex_config():
"""
Make sure config file with username/password exists
"""
home = path('~').expanduser()
config = home / '.transifexrc'
if not config.isfile or config.getsize == 0:
msg = colorize(
'red',
"Cannot connect to Transifex, config file is missing"
" or empty: {config} \nSee "
"http://help.transifex.com/features/client/#transifexrc \n".format(
config=config,
)
)
sys.stderr.write(msg)
sys.exit(1)
@task
@needs("pavelib.i18n.i18n_validate_transifex_config")
@timed
def i18n_transifex_push():
"""
Push source strings to Transifex for translation
"""
sh("i18n_tool transifex push")
@task
@needs("pavelib.i18n.i18n_validate_transifex_config")
@timed
def i18n_transifex_pull():
"""
Pull translated strings from Transifex
"""
sh("i18n_tool transifex pull")
@task
@timed
def i18n_rtl():
"""
Pull all RTL translations (reviewed AND unreviewed) from Transifex
"""
sh("i18n_tool transifex rtl")
print("Now generating langugage files...")
sh("i18n_tool generate --rtl")
print("Committing translations...")
sh('git clean -fdX conf/locale')
sh('git add conf/locale')
sh('git commit --amend')
@task
@timed
def i18n_ltr():
"""
Pull all LTR translations (reviewed AND unreviewed) from Transifex
"""
sh("i18n_tool transifex ltr")
print("Now generating langugage files...")
sh("i18n_tool generate --ltr")
print("Committing translations...")
sh('git clean -fdX conf/locale')
sh('git add conf/locale')
sh('git commit --amend')
@task
@needs(
"pavelib.i18n.i18n_clean",
"pavelib.i18n.i18n_transifex_pull",
"pavelib.i18n.i18n_extract",
"pavelib.i18n.i18n_dummy",
"pavelib.i18n.i18n_generate_strict",
)
@timed
def i18n_robot_pull():
"""
Pull source strings, generate po and mo files, and validate
"""
# sh('paver test_i18n')
# Tests were removed from repo, but there should still be tests covering the translations
# TODO: Validate the recently pulled translations, and give a bail option
sh('git clean -fdX conf/locale/rtl')
sh('git clean -fdX conf/locale/eo')
print("\n\nValidating translations with `i18n_tool validate`...")
sh("i18n_tool validate")
con = input("Continue with committing these translations (y/n)? ")
if con.lower() == 'y':
sh('git add conf/locale')
sh('git add cms/static/js/i18n')
sh('git add lms/static/js/i18n')
sh(
'git commit --message='
'"Update translations (autogenerated message)" --edit'
)
@task
@timed
def i18n_clean():
"""
Clean the i18n directory of artifacts
"""
sh('git clean -fdX conf/locale')
@task
@needs(
"pavelib.i18n.i18n_clean",
"pavelib.i18n.i18n_extract",
"pavelib.i18n.i18n_transifex_push",
)
@timed
def i18n_robot_push():
"""
Extract new strings, and push to transifex
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@task
@needs(
"pavelib.i18n.i18n_validate_transifex_config",
"pavelib.i18n.i18n_generate",
)
@timed
def i18n_release_push():
"""
Push release-specific resources to Transifex.
"""
resources = find_release_resources()
sh("i18n_tool transifex push " + " ".join(resources))
@task
@needs(
"pavelib.i18n.i18n_validate_transifex_config",
)
@timed
def i18n_release_pull():
"""
Pull release-specific translations from Transifex.
"""
resources = find_release_resources()
sh("i18n_tool transifex pull " + " ".join(resources))
def find_release_resources():
"""
Validate the .tx/config file for release files, returning the resource names.
For working with release files, the .tx/config file should have exactly
two resources defined named "release-*". Check that this is true. If
there's a problem, print messages about it.
Returns a list of resource names, or raises ValueError if .tx/config
doesn't have two resources.
"""
# An entry in .tx/config for a release will look like this:
#
# [edx-platform.release-dogwood]
# file_filter = conf/locale/<lang>/LC_MESSAGES/django.po
# source_file = conf/locale/en/LC_MESSAGES/django.po
# source_lang = en
# type = PO
#
# [edx-platform.release-dogwood-js]
# file_filter = conf/locale/<lang>/LC_MESSAGES/djangojs.po
# source_file = conf/locale/en/LC_MESSAGES/djangojs.po
# source_lang = en
# type = PO
rx_release = r"^\[([\w-]+\.release-[\w-]+)\]$"
with open(".tx/config") as tx_config:
resources = re.findall(rx_release, tx_config.read(), re.MULTILINE)
if len(resources) == 2:
return resources
if not resources: # lint-amnesty, pylint: disable=no-else-raise
raise ValueError("You need two release-* resources defined to use this command.")
else:
msg = "Strange Transifex config! Found these release-* resources:\n" + "\n".join(resources)
raise ValueError(msg)
| 7,843 | 2,815 |
import sympy.physics.mechanics as _me
import sympy as _sm
import math as m
import numpy as _np
frame_n = _me.ReferenceFrame('n')
frame_a = _me.ReferenceFrame('a')
a = 0
d = _me.inertia(frame_a, 1, 1, 1)
point_po1 = _me.Point('po1')
point_po2 = _me.Point('po2')
particle_p1 = _me.Particle('p1', _me.Point('p1_pt'), _sm.Symbol('m'))
particle_p2 = _me.Particle('p2', _me.Point('p2_pt'), _sm.Symbol('m'))
c1, c2, c3 = _me.dynamicsymbols('c1 c2 c3')
c1_d, c2_d, c3_d = _me.dynamicsymbols('c1_ c2_ c3_', 1)
body_r_cm = _me.Point('r_cm')
body_r_cm.set_vel(frame_n, 0)
body_r_f = _me.ReferenceFrame('r_f')
body_r = _me.RigidBody('r', body_r_cm, body_r_f, _sm.symbols('m'), (_me.outer(body_r_f.x,body_r_f.x),body_r_cm))
point_po2.set_pos(particle_p1.point, c1*frame_a.x)
v = 2*point_po2.pos_from(particle_p1.point)+c2*frame_a.y
frame_a.set_ang_vel(frame_n, c3*frame_a.z)
v = 2*frame_a.ang_vel_in(frame_n)+c2*frame_a.y
body_r_f.set_ang_vel(frame_n, c3*frame_a.z)
v = 2*body_r_f.ang_vel_in(frame_n)+c2*frame_a.y
frame_a.set_ang_acc(frame_n, (frame_a.ang_vel_in(frame_n)).dt(frame_a))
v = 2*frame_a.ang_acc_in(frame_n)+c2*frame_a.y
particle_p1.point.set_vel(frame_a, c1*frame_a.x+c3*frame_a.y)
body_r_cm.set_acc(frame_n, c2*frame_a.y)
v_a = _me.cross(body_r_cm.acc(frame_n), particle_p1.point.vel(frame_a))
x_b_c = v_a
x_b_d = 2*x_b_c
a_b_c_d_e = x_b_d*2
a_b_c = 2*c1*c2*c3
a_b_c += 2*c1
a_b_c = 3*c1
q1, q2, u1, u2 = _me.dynamicsymbols('q1 q2 u1 u2')
q1_d, q2_d, u1_d, u2_d = _me.dynamicsymbols('q1_ q2_ u1_ u2_', 1)
x, y = _me.dynamicsymbols('x y')
x_d, y_d = _me.dynamicsymbols('x_ y_', 1)
x_dd, y_dd = _me.dynamicsymbols('x_ y_', 2)
yy = _me.dynamicsymbols('yy')
yy = x*x_d**2+1
m = _sm.Matrix([[0]])
m[0] = 2*x
m = m.row_insert(m.shape[0], _sm.Matrix([[0]]))
m[m.shape[0]-1] = 2*y
a = 2*m[0]
m = _sm.Matrix([1,2,3,4,5,6,7,8,9]).reshape(3, 3)
m[0,1] = 5
a = m[0, 1]*2
force_ro = q1*frame_n.x
torque_a = q2*frame_n.z
force_ro = q1*frame_n.x + q2*frame_n.y
f = force_ro*2
| 2,020 | 1,167 |
import filecmp, os, tempfile, unittest
from click.testing import CliRunner
from tenx.asm_stats import asm_stats_cmd, get_contig_lengths, get_scaffold_and_contig_lengths, get_stats, length_buckets
class AsmStatsTest(unittest.TestCase):
def setUp(self):
self.data_dn = os.path.join(os.path.dirname(__file__), "data", "asm-stats")
self.fasta1_fn = os.path.join(self.data_dn, "asm.fasta")
self.fasta2_fn = os.path.join(self.data_dn, "asm.scaffolded.fasta")
self.fasta2_stats_fn = os.path.join(self.data_dn, "asm.scaffolded.fasta.stats")
self.expected_scaffolds = [ 17004, 350002, 1000001]
self.expected_contigs = [1, 2001, 5001, 10001, 100001, 250001, 1000001]
self.temp_d = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_d.cleanup()
def test1_get_contig_lengths(self):
contigs = get_contig_lengths(self.fasta1_fn)
self.assertEqual(contigs, self.expected_contigs)
def test1_get_scaffold_and_contig_lengths(self):
scaffolds, contigs = get_scaffold_and_contig_lengths(self.fasta2_fn, 2)
self.assertEqual(scaffolds, self.expected_scaffolds)
self.assertEqual(contigs, self.expected_contigs)
def test2_get_stats(self):
total = sum(self.expected_scaffolds)
count = len(self.expected_scaffolds)
expected_stats = {
"total": total,
"count": count,
"mean": int(total/count),
"max": self.expected_scaffolds[-1],
"genome_n50": int(total/2),
"n50_length": total,
}
for b in length_buckets():
expected_stats["_".join([str(b), "count"])] = 0
expected_stats["_".join([str(b), "length"])] = 0
expected_stats["1000000_length"] = self.expected_scaffolds[-1]
expected_stats["1000000_count"] = 1
expected_stats["250000_length"] = self.expected_scaffolds[1]
expected_stats["250000_count"] = 1
expected_stats["10000_length"] = self.expected_scaffolds[0]
expected_stats["10000_count"] = 1
stats = get_stats(self.expected_scaffolds)
self.assertEqual(stats, expected_stats)
total = sum(self.expected_contigs)
count = len(self.expected_contigs)
expected_stats = {
"total": total,
"count": count,
"mean": int(total/count),
"max": self.expected_contigs[-1],
"genome_n50": int(total/2),
"n50_length": total,
}
for b in length_buckets():
expected_stats["_".join([str(b), "count"])] = 1
expected_stats["_".join([str(b), "length"])] = b + 1
stats = get_stats(self.expected_contigs)
self.assertEqual(stats, expected_stats)
def test4_asm_stats_cmd(self):
runner = CliRunner()
result = runner.invoke(asm_stats_cmd, ["--help"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(asm_stats_cmd, [])
self.assertEqual(result.exit_code, 2)
stats_fn = os.path.join(self.temp_d.name, "stats.txt")
result = runner.invoke(asm_stats_cmd, ["-i", self.fasta2_fn, "-n", 2, "-o", stats_fn, "-f", "quick"])
try:
self.assertEqual(result.exit_code, 0)
except:
print(result.output)
raise
self.assertEqual(filecmp.cmp(stats_fn, self.fasta2_stats_fn), True)
result = runner.invoke(asm_stats_cmd, ["-i", self.fasta2_fn, "-n", 2, "-o", stats_fn, "-f", "json"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(asm_stats_cmd, ["-i", self.fasta2_fn, "-n", 2, "-o", stats_fn, "-f", "yaml"])
self.assertEqual(result.exit_code, 0)
# -- AsmStatsTest
if __name__ == '__main__':
unittest.main(verbosity=2)
#-- __main__
| 3,830 | 1,414 |
import socket
import pickle
import random
import string
import time
import hashlib
import os
BUFFER_SIZE = 65536
def send_message(msg, port):
# Setup socket for the user to be send
s_temp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_temp.connect((socket.gethostname(), port))
# encode and send message
msg = pickle.dumps(msg)
s_temp.send(msg)
# Receive ack.
ack = pickle.loads(s_temp.recv(BUFFER_SIZE))
# message_logger.info(f'Port {port} sends {ack}\n')
s_temp.close()
def receive_message(connection):
# Receive message and send acknowledgement.
header, sender, receiver, message = pickle.loads(connection.recv(BUFFER_SIZE))
connection.send(pickle.dumps('ACK'))
return header, sender, receiver, message
def generate_random_string_with_ending(length, ending):
found = False
s = ""
while not found:
s = ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))
if s[-1] in ending:
found = True
return s
def get_hash(transactions, nonce):
will_encode = str((tuple(transactions), nonce))
return hashlib.sha3_256(will_encode.encode('utf-8')).hexdigest()
def read_first_blockchain():
def prepare_block(blockchain, transactions, term):
found = False
nonce = None
while not found:
nonce = generate_random_string_with_ending(length=6, ending={'0', '1', '2'})
cur_pow = get_hash(transactions, nonce)
if '2' >= cur_pow[-1] >= '0':
found = True
phash = None
if len(blockchain) > 0:
previous_nonce = blockchain[-1]['nonce']
previous_transactions = blockchain[-1]['transactions']
phash = get_hash(previous_transactions, previous_nonce)
return {'term': term, 'phash': phash, 'nonce': nonce, 'transactions': transactions}
if not os.path.exists('first_blockchain_processed.pkl'):
blockchain = []
file_path = 'first_blockchain.txt'
with open(file_path, 'r') as _file:
term = -1
transactions = []
for line in _file.readlines():
sender, receiver, amount = map(int, tuple(line.split()))
transaction_id = time.time()
transaction = (transaction_id, (sender, receiver, amount))
transactions.append(transaction)
if len(transactions) == 3:
# block is finished, find nonce...
block = prepare_block(blockchain, transactions, term)
blockchain.append(block)
transactions = []
if len(transactions) > 0:
transactions += [None for _ in range(3 - len(transactions))]
block = prepare_block(blockchain, transactions, term)
blockchain.append(block)
with open('first_blockchain_processed.pkl', 'wb') as _fb:
pickle.dump(blockchain, _fb)
def blockchain_print_format(blockchain):
blockchain_str = ""
for i, block in enumerate(blockchain):
term = block['term']
transactions = block['transactions']
new_block_str = f'[({term}) {[transaction[1] for transaction in transactions if transaction is not None]}]'
if i < len(blockchain) - 1:
new_block_str += ' -> '
blockchain_str += new_block_str
return blockchain_str
| 3,435 | 1,024 |
import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from data_refinery_api.test.test_api_general import API_VERSION
from data_refinery_common.models import (
Experiment,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
Organism,
Sample,
)
class APITestCases(APITestCase):
def setUp(self):
self.homo_sapiens = Organism(name="HOMO_SAPIENS", taxonomy_id=9606, is_scientific_name=True)
self.homo_sapiens.save()
def test_dataset_stats(self):
""" Test the dataset stats endpoint """
gallus_gallus = Organism(name="GALLUS_GALLUS", taxonomy_id=9031, is_scientific_name=True)
gallus_gallus.save()
equus_ferus = Organism(name="EQUUS_FERUS", taxonomy_id=1114792, is_scientific_name=True)
equus_ferus.save()
ex = Experiment()
ex.accession_code = "XYZ123"
ex.title = "XYZ123"
ex.description = "XYZ123"
ex.technology = "MICROARRAY"
ex.submitter_institution = "XYZ123"
ex.save()
ex2 = Experiment()
ex2.accession_code = "ABC789"
ex2.title = "ABC789"
ex2.description = "ABC789"
ex2.technology = "RNA-SEQ"
ex2.submitter_institution = "Funkytown"
ex2.save()
sample1 = Sample()
sample1.title = "1"
sample1.accession_code = "1"
sample1.platform_name = "AFFY"
sample1.is_processed = True
sample1.organism = self.homo_sapiens
sample1.save()
sample2 = Sample()
sample2.title = "2"
sample2.accession_code = "2"
sample2.platform_name = "ILLUMINA"
sample2.is_processed = True
sample2.organism = gallus_gallus
sample2.save()
sample3 = Sample()
sample3.title = "3"
sample3.accession_code = "3"
sample3.platform_name = "ILLUMINA"
sample3.is_processed = True
sample3.organism = gallus_gallus
sample3.save()
xoa = ExperimentOrganismAssociation()
xoa.experiment = ex
xoa.organism = self.homo_sapiens
xoa.save()
xoa = ExperimentOrganismAssociation()
xoa.experiment = ex2
xoa.organism = gallus_gallus
xoa.save()
xoa = ExperimentOrganismAssociation()
xoa.experiment = ex2
xoa.organism = equus_ferus
xoa.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample1
experiment_sample_association.experiment = ex
experiment_sample_association.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample2
experiment_sample_association.experiment = ex2
experiment_sample_association.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample3
experiment_sample_association.experiment = ex2
experiment_sample_association.save()
jdata = json.dumps(
{"email_address": "baz@gmail.com", "data": {"XYZ123": ["1"], "ABC789": ["2"]}}
)
response = self.client.post(
reverse("create_dataset", kwargs={"version": API_VERSION}),
jdata,
content_type="application/json",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json()["data"], json.loads(jdata)["data"])
good_id = response.json()["id"]
# Check that we can fetch these sample details via samples API
response = self.client.get(
reverse("samples", kwargs={"version": API_VERSION}), {"dataset_id": good_id}
)
self.assertEqual(response.json()["count"], 2)
| 3,850 | 1,237 |
import pdftotext
import sys
import numpy as np
import pandas as pd
import regex as re
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_server_type(server_type_str):
"""Check wether string is contained"""
server_type_list = server_type_str.split(' ')
if len(server_type_list) < 2:
if server_type_str == 'Backup':
return 'backup'
else:
return 'unknown'
return server_type_list[1].split('-')[0]
def regex_match(server_type_str, regex, ret_id=1):
"""Applies a regular expression and returns a match """
m = re.match(regex, server_type_str)
return np.NaN if m is None else m.group(ret_id)
def regex_search(server_type_str, regex, ret_id=1):
"""Applies a regular expression and returns a match """
m = re.search(regex, server_type_str)
return np.NaN if m is None else m.group(ret_id)
def hetzner_fix_report(csv_path, pdf_path):
# Keys for originally fucked CSV
df_keys = [
'server_type_str',
'comment',
'date_from',
'date_to',
'quantity',
'price',
'price_net',
'empty',
]
# Keys' new order
df_keys_reorder = ['server_id', 'name', 'project', 'type', 'quantity', 'usage_hours', 'price', 'price_max',
'price_net', 'price_gross', 'vat', 'date_from', 'date_to', 'is_backup', 'is_server', 'is_ceph']
# Load originally fucked CSV
df = pd.read_csv(csv_path, sep=',', names=df_keys)
# Wether entry is backup
df['is_backup'] = df.server_type_str.apply(lambda x: 'Backup' in x)
# Wether entry is server instance
df['is_server'] = df.server_type_str.apply(lambda x: 'Server' in x)
# Wether entry uses Ceph
df['is_ceph'] = df.server_type_str.apply(lambda x: 'ceph' in x)
# Server types according to https://www.hetzner.de/cloud
df['type'] = df.server_type_str.apply(get_server_type)
# Hetzner's instance id
df['server_id'] = df.comment.apply(lambda x: regex_match(x, r'.*#([0-9]+) ".*'))
# Maximum price for hourly rated servers
df['price_max'] = df.comment.apply(lambda x: regex_search(x,
r'(?:period|Zeitraum).*?((?:€\s*[\d.]+)|(?:[\d,]+\s*€))'))
df_price_max_mask = ~df.price_max.isna()
df.loc[df_price_max_mask, 'price_max'] = \
df.price_max.loc[df_price_max_mask].apply(lambda x: float(x.replace('€', '').replace(',', '.')))
# Set server name
df['name'] = df.comment.apply(lambda x: regex_match(x, r'.+"([^"]+)"'))
df.loc[df['name'] == 'instance', 'name'] = np.nan
# Usage in hours
df['usage_hours'] = df.comment.apply(lambda x: regex_search(x, r'(?:Usage|Nutzung):.*?(\d+)\s*h'))
# Drop unnecessary columns
df.drop(['comment', 'server_type_str', 'empty'], axis=1, inplace=True)
# Combine with pdf to get project names
with open(pdf_path, 'rb') as f:
pdf = pdftotext.PDF(f)
# Collect VAT value
vat = None
for page in pdf:
m = re.search(r'(USt\.|VAT) \(([0-9.,]+) ?%\)', page)
if m is not None:
vat = float(m[2])
break
else:
m = re.search(r'Tax rate.*\n.*?([\d.,]+) ?%', page)
if m is not None:
vat = float(m[1])
if vat is None:
eprint('VAT information could not be found!')
sys.exit(1)
df['vat'] = vat / 100
df['price_net'] = df.quantity * df.price
df['price_gross'] = df.price_net * (1 + df.vat)
# Collect individual projects' names
projects = []
for page in pdf:
projects += re.findall(r'Proje[ck]t "([^"]+)"', page)
projects = np.array(projects)
# Collect individual projects' string locations
page_factor = 1e6
projects_loc = []
for project in projects:
for i, page in enumerate(pdf):
loc = page.find(project)
if loc != -1:
# Add page offset to make locations comparable
projects_loc.append(loc + i * page_factor)
projects_loc = np.array(projects_loc)
# Collect individual server ids' string locations and map them to nearest previous project name
df['project'] = np.nan
sid_loc = []
for idx, sid in df.server_id[df.server_id.notnull()].items():
for i, page in enumerate(pdf):
loc = page.find(sid)
if loc == -1:
continue
# Add page offset to make locations comparable
loc = np.array(loc + i * page_factor)
sid_loc.append(loc)
diff_loc = projects_loc - loc
project_name = projects[np.where(diff_loc < 0, diff_loc, -np.inf).argmax()]
df.loc[idx, 'project'] = project_name
# Reorder columns
df = df[df_keys_reorder]
return df
| 4,804 | 1,651 |
# random_forest.py
# does the random forest calcutlaions
import decision_tree
import partition
import heapq
import table_utils
import classifier_util
from homework_util import strat_folds
def run_a_table(table, indexes, class_index, N, M, F):
""" Takes a table, splits it into a training and test set. Creates a
random forest for the training set. Then tests the forest off of
the test set
:param table: a table of values
:param indexes: The indexes to partition on
:param class_index: The index of the label to predict
:param N: Number of trees to produce
:param M: Number of the best trees to choose
:param F: Subset size of random attributes
:return: Returns a list of tuples. Of the actual, predicted label and
training and test
[(actual1,predicted1), (actual2,predicted2), ...], training, test
"""
domains = table_utils.get_domains(table, indexes)
folds = strat_folds(table, class_index, 3)
training = folds[0]
training.extend(folds[1])
test = folds[2]
forest = _random_forest(test, indexes, class_index, domains, N, M, F)
return [(row[class_index], predict_label(forest, row)) for row in test], \
training, test
def _random_forest(table, indexes, class_index, att_domains, N, M, F):
""" Generates a random forest classifier for a given table
:param table: a table
:param indexes: a list of indexes to partition on
:param class_index: the index of the class label to predict
:param N: Number of trees to produce
:param M: Number of the best trees to choose
:param F: Subset size of random attributes
:return: A list of lists. Trees and thier accuracies
[(accuracy1, tree1), ... , (accuracyM, treeM)]
"""
# We store the accuracies and trees in a priority queue
# lower numbers = higher priority
priority_queue = [] # see: https://docs.python.org/3/library/heapq.html#basic-examples
attributes = indexes
# Uses a training and remainder set from bootsraping to create each tree
bags = partition.bagging(table, N)
for bag_set in bags:
tree = decision_tree.tdidt_RF(bag_set[0], attributes, att_domains, class_index, F)
acc = _accuracy_for_tree(tree,class_index, bag_set[1])
heapq.heappush(priority_queue, (acc, tree))
#push to the priorityQueue
# Since our priority queue is backwards (and I dunno how to reverse that)
# we pop off all the ones we don't need. N - M
for i in range(N - M):
heapq.heappop(priority_queue)
# Now our priority queue will be our list that we can return
return priority_queue
def _accuracy_for_tree(tree, class_index, test_set):
labels = decision_tree.classify_with_tree(tree, class_index, test_set)
return classifier_util.accuracy(labels)
def predict_label(forest, instance):
""" predicts the label of an instance given a forest using weighted
voting with accuracies
:param forest: a list of lists in te form returned by random_forest()
:param instance: an row to have a class label predicted
:return: a class label
"""
labels = {}
for acc_and_tree in forest:
prediction = decision_tree.get_label(acc_and_tree[1], instance)
# totals the accuracy predicted for each label
try:
labels[prediction] += acc_and_tree[0]
except KeyError:
labels[prediction] = acc_and_tree[0]
# gets the label with the highest predicted value
highest_value = 0
highest_label = 0
for current_label, value in labels.items():
if value > highest_value:
highest_label = current_label
return highest_label
| 3,705 | 1,101 |
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.contrib.slim.python.slim.nets import alexnet
from tensorflow.python.ops import random_ops
from tensorflow.python.tools import optimize_for_inference_lib
from tensorflow.python.framework import dtypes
from tensorflow.core.framework import graph_pb2
import tensorflow.contrib.slim as slim
import network
import os
batch_size = 128
height = width = 224
num_classes = 1000
def save_model(model_path):
input = tf.placeholder(tf.float32,(None,height,width,3),'input_tensor')
logits, _ = alexnet.alexnet_v2(input, num_classes)
output_tensor = tf.identity(logits,name='output_tensor')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
chkp = saver.save(sess, model_path)
print( 'Save to ' + chkp )
def freeze_model_single(model_path):
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_path)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
# absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])
# output_graph = absolute_model_folder + "/frozen_model.pb"
output_graph = 'model/' + 'frozen_model.pb'
# 'output_tensor1,output_tensor2,...'
output_node_names = "anr".split(",")
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True)
input_graph_def = tf.get_default_graph().as_graph_def()
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
train_vars = slim.get_model_variables()
for each in train_vars:
print each.op.name, each.eval()
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def, # The graph_def is used to retrieve the nodes
output_node_names # The output node names are used to select the usefull nodes
)
with tf.gfile.FastGFile(output_graph, "w") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
def freeze_model(model_folder):
# We precise the file fullname of our freezed graph
output_graph = 'model/'+ 'frozen_model.pb'
# 'output_tensor1,output_tensor2,...'
output_node_names = network.evaluate_network_slim()
input_graph_def = tf.get_default_graph().as_graph_def()
def name_in_checkpoint(var, type):
if type in var.op.name:
return var.op.name.replace(type, "student")
vars_to_restore = slim.get_model_variables()
vars_color_to_restore = {name_in_checkpoint(var,'color'): var for var in vars_to_restore if 'color' in var.op.name}
color_restorer = tf.train.Saver(vars_color_to_restore)
vars_gray_to_restore = {name_in_checkpoint(var, 'gray'): var for var in vars_to_restore if 'gray' in var.op.name}
gray_restorer = tf.train.Saver(vars_gray_to_restore)
vars_gradient_to_restore = {name_in_checkpoint(var, 'gradient'): var for var in vars_to_restore if 'gradient' in var.op.name}
gradient_restorer = tf.train.Saver(vars_gradient_to_restore)
with tf.Session() as sess:
# We retrieve our checkpoint fullpath
color_ckpt = tf.train.get_checkpoint_state( os.path.join(model_folder,'color_0') )
gray_ckpt = tf.train.get_checkpoint_state(os.path.join(model_folder, 'gray_0'))
gradient_ckpt = tf.train.get_checkpoint_state(os.path.join(model_folder, 'gradient_0'))
#sess.run(tf.global_variables_initializer())
#train_vars = slim.get_model_variables()
#for each in train_vars:
# print each.op.name, each.eval()
color_restorer.restore(sess, color_ckpt.model_checkpoint_path)
gray_restorer.restore(sess, gray_ckpt.model_checkpoint_path)
gradient_restorer.restore(sess, gradient_ckpt.model_checkpoint_path)
train_vars = slim.get_model_variables()
for each in train_vars:
#if 'conv1/weights' in each.op.name:
print each.op.name, each.eval()
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def, # The graph_def is used to retrieve the nodes
output_node_names # The output node names are used to select the usefull nodes
)
with tf.gfile.FastGFile(output_graph, "w") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
def optimize_model(model_path):
if not tf.gfile.Exists(model_path):
return -1
input_graph_def = graph_pb2.GraphDef()
with tf.gfile.Open(model_path, "rb") as f:
data = f.read()
input_graph_def.ParseFromString(data)
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def,
'color_ph,gray_ph,gradient_ph'.split(','),
'color_fea,gray_fea,gradient_fea'.split(','), dtypes.float32.as_datatype_enum)
with tf.gfile.FastGFile('model/optimzed_model.pb','w') as f:
f.write(output_graph_def.SerializeToString())
def main(_):
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
#save_model('model/MDP.chkp')
#freeze_model_single('/home/xiao/projects/tensoflow_pillrec/model_alexnet/color_0')
#freeze_model( '/home/xiao/projects/tensoflow_pillrec/model_alexnet')
optimize_model('model/frozen_model.pb')
if __name__ == '__main__':
tf.app.run() | 5,682 | 1,926 |
default_app_config = "comic.container_exec.apps.CoreConfig"
| 60 | 21 |
import requests
import json
import os
from bs4 import BeautifulSoup as bs
from secret import *
from smtplib import SMTP
from datetime import datetime
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def maths(num1, num2, num3=None):
num1 = int(''.join(num1.split(',')))
num2 = int(''.join(num2.split(',')))
if num3:
num3 = int(''.join(num3.split(',')))
num = '{:,}'.format(num1 - num2 - num3)
else:
num = '{:,}'.format(num1 - num2)
return num
def scraper():
r = requests.get(
'https://www.cdc.gov/coronavirus/2019-ncov/cases-updates/cases-in-us.html')
page = r.text
soup = bs(page, 'lxml')
nowFormatted = datetime.now().strftime('%-m/%-d/%y %-I:%M %p')
totals = soup.findAll(attrs={'class': 'count'})
newCasesData = soup.findAll(attrs={'class': 'new-cases'})
newCasesText = newCasesData[0].text
newCases = newCasesText[:len(newCasesText) - 11]
newDeathsText = newCasesData[1].text
newDeaths = newDeathsText[:len(newDeathsText) - 12]
r2 = requests.get(
'https://www.sfchronicle.com/bayarea/article/Coronavirus-live-updates-news-bay-area-15237940.php')
page2 = r2.text
soup2 = bs(page2, 'lxml')
pTags = soup2.findAll('p')
californiaParts = pTags[3].text[2:].split()
californiaCases = californiaParts[0]
californiaDeaths = californiaParts[len(californiaParts) - 2]
bayAreaParts = pTags[4].text[2:].split()
bayAreaCases = bayAreaParts[0]
bayAreaDeaths = bayAreaParts[len(bayAreaParts) - 2]
with open(jsonFilePath, 'r') as jsonFile:
jsonDataRead = json.load(jsonFile)
try:
calCasesToday = int(''.join(californiaCases.split(',')))
except BaseException:
calCasesToday = jsonDataRead['calCasesToday']
try:
calDeathsToday = int(''.join(californiaDeaths.split(',')))
except BaseException:
calDeathsToday = jsonDataRead['calDeathsToday']
try:
baCasesToday = int(''.join(bayAreaCases.split(',')))
except BaseException:
baCasesToday = jsonDataRead['baCasesToday']
bayAreaCases = jsonDataRead['baCasesToday']
try:
baDeathsToday = int(''.join(bayAreaDeaths.split(',')))
except BaseException:
baDeathsToday = jsonDataRead['baDeathsToday']
bayAreaDeaths = jsonDataRead['baDeathsToday']
r3 = requests.get('https://www.worldometers.info/coronavirus/')
page3 = r3.text
soup3 = bs(page3, 'lxml')
spanTags = soup3.findAll('span')
totalsWorld = soup3.findAll('div', attrs={'class': 'number-table-main'})
worldCases = spanTags[4].text
worldDeaths = spanTags[5].text
worldRecoveries = spanTags[6].text
mildCasesWorld = spanTags[8].text
criticalCasesWorld = spanTags[9].text
recoveredWorld = spanTags[11].text
currentWorldCases = totalsWorld[0].text
currentWorldClosed = totalsWorld[1].text
worldCasesToday = int(''.join(worldCases.split(',')))
worldDeathsToday = int(''.join(worldDeaths.split(',')))
worldRecoveriesToday = int(''.join(worldRecoveries.split(',')))
if os.path.isfile(jsonFilePath) == False:
jsonData = {
'other': {
'currentWorldCases': currentWorldCases,
'uscases': totals[0].text,
'usnewcases': newCases,
'usenewdeaths': newDeaths,
'usdeaths': totals[1].text},
'past': {
'calCasesToday': calCasesToday,
'calDeathsToday': calDeathsToday,
'baCasesToday': baCasesToday,
'baDeathsToday': baDeathsToday,
'worldCases': worldCasesToday,
'worldDeaths': worldDeathsToday,
'worldRecoveries': worldRecoveriesToday},
'past2': {
'calCasesToday': calCasesToday,
'calDeathsToday': calDeathsToday,
'baCasesToday': baCasesToday,
'baDeathsToday': baDeathsToday,
'worldCases': worldCasesToday,
'worldDeaths': worldDeathsToday,
'worldRecoveries': worldRecoveriesToday}}
with open(jsonFilePath, 'w') as jsonFile:
json.dump(jsonData, jsonFile)
calDifferenceCases = 'Unknown'
calDifferenceDeaths = 'Unknown'
baDifferenceCases = 'Unknown'
baDifferencesDeaths = 'Unknown'
wDifferenceCases = 'Unknown'
wDifferenceDeath = 'Unknown'
wDifferenceRecoveries = 'Unknown'
calDifferenceCases1 = 'Unknown'
calDifferenceDeaths1 = 'Unknown'
baDifferenceCases1 = 'Unknown'
baDifferencesDeaths1 = 'Unknown'
wDifferenceCases1 = 'Unknown'
wDifferenceDeath1 = 'Unknown'
wDifferenceRecoveries1 = 'Unknown'
else:
with open(jsonFilePath, 'r') as jsonFile:
jsonDataFile = json.load(jsonFile)
calDifferenceCases = '{:,}'.format(
calCasesToday - jsonDataFile['past']['calCasesToday'])
calDifferenceDeaths = '{:,}'.format(
calDeathsToday - jsonDataFile['past']['calDeathsToday'])
baDifferenceCases = '{:,}'.format(
baCasesToday - jsonDataFile['past']['baCasesToday'])
baDifferencesDeaths = '{:,}'.format(
baDeathsToday - jsonDataFile['past']['baDeathsToday'])
wDifferenceCases = '{:,}'.format(
worldCasesToday - int(jsonDataFile['past']['worldCases']))
wDifferenceDeath = '{:,}'.format(
worldDeathsToday - int(jsonDataFile['past']['worldDeaths']))
wDifferenceRecoveries = '{:,}'.format(
worldRecoveriesToday - int(jsonDataFile['past']['worldRecoveries']))
calDifferenceCases1 = '{:,}'.format(
jsonDataFile['past']['calCasesToday'] -
jsonDataFile['past2']['calCasesToday'])
calDifferenceDeaths1 = '{:,}'.format(
jsonDataFile['past']['calDeathsToday'] -
jsonDataFile['past2']['calDeathsToday'])
baDifferenceCases1 = '{:,}'.format(
jsonDataFile['past']['baCasesToday'] -
jsonDataFile['past2']['baCasesToday'])
baDifferencesDeaths1 = '{:,}'.format(
jsonDataFile['past']['baDeathsToday'] -
jsonDataFile['past2']['baDeathsToday'])
wDifferenceCases1 = '{:,}'.format(
jsonDataFile['past']['worldCases'] - int(jsonDataFile['past2']['worldCases']))
wDifferenceDeath1 = '{:,}'.format(
jsonDataFile['past']['worldDeaths'] - int(jsonDataFile['past2']['worldDeaths']))
wDifferenceRecoveries1 = '{:,}'.format(
jsonDataFile['past']['worldRecoveries'] - int(jsonDataFile['past2']['worldRecoveries']))
pastWorldCases = jsonDataFile['other']['currentWorldCases']
pastUsCases = jsonDataFile['other']['uscases']
pastUsNewCases = jsonDataFile['other']['usnewcases']
pastUsDeaths = jsonDataFile['other']['usdeaths']
pastUsNewDeaths = jsonDataFile['other']['usenewdeaths']
jsonDataFile['past2']['calCasesToday'] = jsonDataFile['past']['calCasesToday']
jsonDataFile['past2']['calDeathsToday'] = jsonDataFile['past']['calDeathsToday']
jsonDataFile['past2']['baCasesToday'] = jsonDataFile['past']['baCasesToday']
jsonDataFile['past2']['baDeathsToday'] = jsonDataFile['past']['baDeathsToday']
jsonDataFile['past2']['worldCases'] = jsonDataFile['past']['worldCases']
jsonDataFile['past2']['worldDeaths'] = jsonDataFile['past']['worldDeaths']
jsonDataFile['past2']['worldRecoveries'] = jsonDataFile['past']['worldRecoveries']
jsonDataFile['past']['calCasesToday'] = calCasesToday
jsonDataFile['past']['calDeathsToday'] = calDeathsToday
jsonDataFile['past']['baCasesToday'] = baCasesToday
jsonDataFile['past']['baDeathsToday'] = baDeathsToday
jsonDataFile['past']['worldCases'] = worldCasesToday
jsonDataFile['past']['worldDeaths'] = worldDeathsToday
jsonDataFile['past']['worldRecoveries'] = worldRecoveriesToday
jsonDataFile['other'] = {
'currentWorldCases': currentWorldCases,
'uscases': totals[0].text,
'usnewcases': newCases,
'usenewdeaths': newDeaths,
'usdeaths': totals[1].text}
with open(jsonFilePath, 'w') as jsonFile:
json.dump(jsonDataFile, jsonFile)
emailMessage = (f'''
Hello,
Update: {nowFormatted}
World Data from WorldOMeter:
Total cases since outbreak: {worldCases}, Yesterday: {maths(worldCases,wDifferenceCases)}
Total current cases: {currentWorldCases}, Yesterday: {pastWorldCases}
New cases: {wDifferenceCases}, Yesterday: {wDifferenceCases1}
Total closed cases: {currentWorldClosed}, Yesterday: {maths(currentWorldClosed,wDifferenceDeath,wDifferenceRecoveries)}
Total deaths: {worldDeaths}, Yesterday: {maths(worldDeaths,wDifferenceDeath)}
New deaths: {wDifferenceDeath}, Yesterday: {wDifferenceDeath1}
Total Recoveries: {worldRecoveries}, Yesterday: {maths(worldRecoveries,wDifferenceRecoveries)}
New Recoveries: {wDifferenceRecoveries}, Yesterday: {wDifferenceRecoveries1}
United States Data from CDC:
Total cases: {totals[0].text}, Yesterday: {pastUsCases}
New cases: {newCases}, Yesterday: {pastUsNewCases}
Total deaths: {totals[1].text}, Yesterday: {pastUsDeaths}
New deaths: {newDeaths}, Yesterday: {pastUsNewDeaths}
California Data from SF Chronicle:
Total cases: {californiaCases}, Yesterday: {maths(californiaCases,calDifferenceCases)}
New cases: {calDifferenceCases}, Yesterday: {calDifferenceCases1}
Total deaths: {californiaDeaths}, Yesterday: {maths(californiaDeaths,calDifferenceDeaths)}
New deaths: {calDifferenceDeaths}, Yesterday: {calDifferenceDeaths1}
Bay Area from SF Chronicle:
Total cases: {bayAreaCases}, Yesterday: {maths(bayAreaCases,baDifferenceCases)}
New cases: {baDifferenceCases}, Yesterday: {baDifferenceCases1}
Total deaths: {bayAreaDeaths}, Yesterday: {maths(bayAreaDeaths,baDifferencesDeaths)}
New deaths: {baDifferencesDeaths}, Yesterday: {baDifferencesDeaths1}
- COVID-19 Reporter
(Created by Rafael Cenzano)''')
emailMessageHtml = (f'''
<html lang="en">
<head></head>
<body>
<p>Hello,</p>
<p>Update: {nowFormatted}</p>
<br>
<h2>World Data from <a href="https://www.worldometers.info/coronavirus/" target="_blank">WorldOMeter</a>:</h2>
<table border="0" cellpadding="4px" cellspacing="0" height="auto" width="auto%">
<tr>
<td align="center" valign="top">Info</td>
<td align="center" valign="top">Today's Data</td>
<td align="center" valign="top">Yesterday's Data</td>
</tr>
<tr>
<td align="left" valign="top">Cases since outbreak</td>
<td align="left" valign="top">{worldCases}</td>
<td align="left" valign="top">{maths(worldCases,wDifferenceCases)}</td>
</tr>
<tr>
<td align="left" valign="top">Current Cases</td>
<td align="left" valign="top">{currentWorldCases}</td>
<td align="left" valign="top">{pastWorldCases}</td>
</tr>
<tr>
<td align="left" valign="top">New Cases</td>
<td align="left" valign="top">{wDifferenceCases}</td>
<td align="left" valign="top">{wDifferenceCases1}</td>
</tr>
<tr>
<td align="left" valign="top">Closed Cases</td>
<td align="left" valign="top">{currentWorldClosed}</td>
<td align="left" valign="top">{maths(currentWorldClosed,wDifferenceDeath,wDifferenceRecoveries)}</td>
</tr>
<tr>
<td align="left" valign="top">Total Deaths</td>
<td align="left" valign="top">{worldDeaths}</td>
<td align="left" valign="top">{maths(worldDeaths,wDifferenceDeath)}</td>
</tr>
<tr>
<td align="left" valign="top">New Deaths</td>
<td align="left" valign="top">{wDifferenceDeath}</td>
<td align="left" valign="top">{wDifferenceDeath1}</td>
</tr>
<tr>
<td align="left" valign="top">Total Recoveries</td>
<td align="left" valign="top">{worldRecoveries}</td>
<td align="left" valign="top">{maths(worldRecoveries,wDifferenceRecoveries)}</td>
</tr>
<tr>
<td align="left" valign="top">New Recoveries</td>
<td align="left" valign="top">{wDifferenceRecoveries}</td>
<td align="left" valign="top">{wDifferenceRecoveries1}</td>
</tr>
</table>
<br>
<h2>United States Data from <a href="https://www.cdc.gov/coronavirus/2019-ncov/cases-updates/cases-in-us.html" target="_blank">CDC</a>:</h2>
<table border="0" cellpadding="4px" cellspacing="0" height="auto" width="auto%">
<tr>
<td align="center" valign="top">Info</td>
<td align="center" valign="top">Today's Data</td>
<td align="center" valign="top">Yesterday's Data</td>
</tr>
<tr>
<td align="left" valign="top">Total Cases</td>
<td align="left" valign="top">{totals[0].text}</td>
<td align="left" valign="top">{pastUsCases}</td>
</tr>
<tr>
<td align="left" valign="top">New Cases</td>
<td align="left" valign="top">{newCases}</td>
<td align="left" valign="top">{pastUsNewCases}</td>
</tr>
<tr>
<td align="left" valign="top">Total Deaths</td>
<td align="left" valign="top">{totals[1].text}</td>
<td align="left" valign="top">{pastUsDeaths}</td>
</tr>
<tr>
<td align="left" valign="top">New Deaths</td>
<td align="left" valign="top">{newDeaths}</td>
<td align="left" valign="top">{pastUsNewDeaths}</td>
</tr>
</table>
<br>
<h2>California Data from <a href="https://www.sfchronicle.com/bayarea/article/Coronavirus-live-updates-news-bay-area-15237940.php" target="_blank">SF Chronicle</a>:</h2>
<table border="0" cellpadding="4px" cellspacing="0" height="auto" width="auto%">
<tr>
<td align="center" valign="top">Info</td>
<td align="center" valign="top">Today's Data</td>
<td align="center" valign="top">Yesterday's Data</td>
</tr>
<tr>
<td align="left" valign="top">Total Cases</td>
<td align="left" valign="top">{californiaCases}</td>
<td align="left" valign="top">{maths(californiaCases,calDifferenceCases)}</td>
</tr>
<tr>
<td align="left" valign="top">New Cases</td>
<td align="left" valign="top">{calDifferenceCases}</td>
<td align="left" valign="top">{calDifferenceCases1}</td>
</tr>
<tr>
<td align="left" valign="top">Total Deaths</td>
<td align="left" valign="top">{californiaDeaths}</td>
<td align="left" valign="top">{maths(californiaDeaths,calDifferenceDeaths)}</td>
</tr>
<tr>
<td align="left" valign="top">New Deaths</td>
<td align="left" valign="top">{calDifferenceDeaths}</td>
<td align="left" valign="top">{calDifferenceDeaths1}</td>
</tr>
</table>
<br>
<h2>Bay Area from <a href="https://www.sfchronicle.com/bayarea/article/Coronavirus-live-updates-news-bay-area-15237940.php" target="_blank">SF Chronicle</a>:</h2>
<table border="0" cellpadding="4px" cellspacing="0" height="auto" width="auto%">
<tr>
<td align="center" valign="top">Info</td>
<td align="center" valign="top">Today's Data</td>
<td align="center" valign="top">Yesterday's Data</td>
</tr>
<tr>
<td align="left" valign="top">Total Cases</td>
<td align="left" valign="top">{bayAreaCases}</td>
<td align="left" valign="top">{maths(bayAreaCases,baDifferenceCases)}</td>
</tr>
<tr>
<td align="left" valign="top">New Cases</td>
<td align="left" valign="top">{baDifferenceCases}</td>
<td align="left" valign="top">{baDifferenceCases1}</td>
</tr>
<tr>
<td align="left" valign="top">Total Deaths</td>
<td align="left" valign="top">{bayAreaDeaths}</td>
<td align="left" valign="top">{maths(bayAreaDeaths,baDifferencesDeaths)}</td>
</tr>
<tr>
<td align="left" valign="top">New Deaths</td>
<td align="left" valign="top">{baDifferencesDeaths}</td>
<td align="left" valign="top">{baDifferencesDeaths1}</td>
</tr>
</table>
<br>
<h4>- COVID-19 Reporter</h4>
<p>(Created by <a href="https://rafaelcenzano.com" target="_blank">Rafael Cenzano</a>)</p>
</body>
</html>''')
for recieverEmail in recieverEmails:
msg = MIMEMultipart('alternative')
msg['From'] = f'COVID-19 Reporter <{senderEmail}>'
msg['To'] = recieverEmail
msg['Subject'] = f'CoronaVirus update: {nowFormatted}'
part1 = MIMEText(emailMessage, 'plain')
part2 = MIMEText(emailMessageHtml, 'html')
msg.attach(part1)
msg.attach(part2)
message = msg.as_string()
smtp_server = SMTP('smtp.gmail.com', 587)
smtp_server.ehlo_or_helo_if_needed()
smtp_server.starttls()
smtp_server.ehlo_or_helo_if_needed()
smtp_server.login(senderEmail, senderPassword)
smtp_server.sendmail(senderEmail, recieverEmail, message)
smtp_server.quit()
print(f'Email sent to {recieverEmail} @ {nowFormatted}')
if __name__ == '__main__':
scraper()
| 18,320 | 6,265 |
'''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Author: Mark Harvey, Xilinx Inc
'''
from ctypes import *
import cv2
import numpy as np
import runner
import os
import xir.graph
import pathlib
import xir.subgraph
import threading
import time
import sys
import argparse
divider = '-----------------------------------------------'
def preprocess_fn(image_path):
'''
Image pre-processing.
Rearranges from BGR to RGB then normalizes to range 0:1
input arg: path of image file
return: numpy array
'''
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image/255.0
return image
def get_subgraph (g):
'''
interrogate model file to return subgraphs
Returns a list of subgraph objects
'''
sub = []
root = g.get_root_subgraph()
sub = [ s for s in root.children
if s.metadata.get_attr_str ("device") == "DPU"]
return sub
def runDPU(id,start,dpu,img):
'''
DPU execution - called in thread from app function.
Arguments:
id: integer to identify thread - not currently used
start: Start index for writes to out_q.
dpu: runner
img: list of pre-processed images to pass into DPU
'''
''' input/output tensor information
get_input_tensors() and get_output_tensors() return lists of tensors objects.
The lists will contain one element for each input or output of the network.
The shape of each tensor object is (batch,height,width,channels)
For Edge DPU, batchsize is always 1.
'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
#print('Input tensor :',inputTensors[0].name,inputTensors[0].shape)
#print('Output Tensor:',outputTensors[0].name,outputTensors[0].shape)
outputSize = outputTensors[0].dims[1]*outputTensors[0].dims[2]*outputTensors[0].dims[3]
shapeIn = inputTensors[0].shape
shapeOut = outputTensors[0].shape
for i in range(len(img)):
'''prepare lists of np arrays to hold input & output tensors '''
inputData = []
inputData.append(img[i].reshape(shapeIn))
outputData = []
outputData.append(np.empty((shapeOut), dtype = np.float32, order = 'C'))
'''start DPU, wait until it finishes '''
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
''' output data shape is currently (batch,height,width,channels)
so flatten it into (batch,height*width*channels)'''
outputData[0] = outputData[0].reshape(1, outputSize)
''' store results in global lists '''
out_q[start+i] = outputData[0][0]
return
def app(image_dir,threads,model):
'''
main application function
'''
listimage=os.listdir(image_dir)
runTotal = len(listimage[:2500])
print('Found',len(listimage),'images - processing',runTotal,'of them')
''' global list that all threads can write results to '''
global out_q
out_q = [None] * runTotal
''' get a list of subgraphs from the compiled model file '''
g = xir.graph.Graph.deserialize(pathlib.Path(model))
subgraphs = get_subgraph (g)
print('Found',len(subgraphs),'subgraphs in',model)
''' preprocess images '''
print('Pre-processing',runTotal,'images...')
img = []
for i in range(runTotal):
path = os.path.join(image_dir,listimage[i])
img.append(preprocess_fn(path))
''' create dpu runners
Each thread receives a dpu runner.
Each dpu runner executes a subgraph
'''
all_dpu_runners = []
for i in range(threads):
all_dpu_runners.append(runner.Runner(subgraphs[0], "run"))
''' create threads
Each thread receives a section of the preprocessed images list as input and
will write results into the corresponding section of the global out_q list.
'''
threadAll = []
start=0
for i in range(threads):
if (i==threads-1):
end = len(img)
else:
end = start+(len(img)//threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
threadAll.append(t1)
start=end
'''run threads '''
print('Starting',threads,'threads...')
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
threads_time = time2 - time1
''' post-processing '''
classes = ['dog','cat']
correct = 0
wrong = 0
for i in range(len(out_q)):
argmax = np.argmax((out_q[i]))
prediction = classes[argmax]
ground_truth, _ = listimage[i].split('.',1)
if (ground_truth==prediction):
correct += 1
else:
wrong += 1
accuracy = correct/len(out_q)
print (divider)
print('Correct:',correct,'Wrong:',wrong,'Accuracy:', accuracy)
print (divider)
fps = float(runTotal / threads_time)
print('FPS: %.2f, total frames: %.0f, total time: %.3f seconds' %(fps,runTotal,threads_time))
print (divider)
return
# only used if script is run as 'main' from command line
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--image_dir',type=str,default='images', help='Path to folder of images. Default is images')
ap.add_argument('-t', '--threads', type=int,default=1, help='Number of threads. Default is 1')
ap.add_argument('-m', '--model', type=str,default='model_dir/dpu_alexnet_np.elf', help='Path of folder with .elf or .xmodel. Default is model_dir/dpu_alexnet_np.elf')
args = ap.parse_args()
print (divider)
print ('Command line options:')
print (' --image_dir : ', args.image_dir)
print (' --threads : ', args.threads)
print (' --model : ', args.model)
print (divider)
app(args.image_dir,args.threads,args.model)
if __name__ == '__main__':
main()
| 6,494 | 2,108 |
# Generated by Django 3.0 on 2020-01-07 07:56
import datetime
from django.db import migrations
from django.utils.timezone import utc
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('photos', '0007_auto_20200107_1022'),
]
operations = [
migrations.AddField(
model_name='image',
name='post',
field=tinymce.models.HTMLField(default=datetime.datetime(2020, 1, 7, 7, 56, 37, 798866, tzinfo=utc)),
preserve_default=False,
),
]
| 545 | 208 |
from vcache_utils import VCacheStats
from bfs_common import BFSParameters
import sys
import pandas as pd
class BFSVCacheStats(VCacheStats):
def _subclass_init_add_group_by_fields(self):
self._parameters = BFSParameters(self.filename)
self._parameters.updateDataFrame(self._data)
self._group_by_fields += self._parameters.parameters
return
data = pd.DataFrame()
for filename in sys.argv[1:]:
data = data.append(BFSVCacheStats(filename).diffed_data)
data.to_csv("vcache.summary.csv")
| 525 | 160 |
import json
import logging
import re
from pathlib import Path
from typing import Optional, Union
import pandas as pd
from . import DOCSURL, DS_URL_PREFIX, readers
# configure logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
DEFAULT_READERS = {
"loom": readers.read_loom_to_anndata,
"rds": readers.read_seurat_to_anndata,
"h5ad": readers.read_anndata_to_anndata,
"hdf5": readers.read_10xhdf5_to_anndata,
"h5": readers.read_10xhdf5_to_anndata,
"tsv": readers.read_densetsv_to_anndata,
"csv": readers.read_densecsv_to_anndata,
}
DATA_DIR = Path("/fastgenomics/data")
DF_SORT_ORDER = [
"title",
"id",
"organism",
"tissue",
"numberOfCells",
"numberOfGenes",
"path",
"numberOfExpressionDataFiles",
"expressionDataFileNames",
"numberOfMetaDataFiles",
"metaDataFileNames",
"expressionDataFileInfos",
"metaDataFileInfos",
]
def get_datasets_df(data_dir: Path = DATA_DIR) -> pd.DataFrame:
"""Constructs a :py:func:`pandas.DataFrame` from all available datasets.
Parameters
----------
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
Returns
-------
pd.DataFrame
A pandas DataFrame containing all available datasets
"""
ds_paths = get_ds_paths(data_dir=data_dir)
ds_df = pd.DataFrame()
for ds_path in ds_paths:
with open(ds_path / "dataset_info.json") as f:
info_df = json.load(f)
info_df["path"] = str(ds_path)
info_df["numberOfExpressionDataFiles"] = len(
info_df["expressionDataFileInfos"]
)
info_df["numberOfMetaDataFiles"] = len(info_df["metaDataFileInfos"])
_ = info_df.pop("schemaVersion", None)
ds_df = ds_df.append(info_df, ignore_index=True)
# sort colnames
col_names = ds_df.columns.values.tolist()
col_names_sorted = [name for name in DF_SORT_ORDER if name in col_names]
[col_names.remove(name) for name in DF_SORT_ORDER if name in col_names]
col_names_sorted.extend(col_names)
ds_df = ds_df[col_names_sorted]
# Format types
ds_df = ds_df.astype(
{
"numberOfCells": "int32",
"numberOfGenes": "int32",
"numberOfExpressionDataFiles": "int32",
"numberOfMetaDataFiles": "int32",
}
)
return ds_df
def ds_info(
ds: Optional[str] = None,
pretty: bool = None,
output: bool = None,
data_dir: Path = DATA_DIR,
) -> pd.DataFrame:
"""Get information on all available datasets in this analysis.
Parameters
----------
ds : Optional[str], optional
A single dataset ID or dataset title. If set, only this dataset will be displayed. Recommended to use with ``pretty``, by default None
pretty : bool, optional
Whether to display some nicely formatted output, by default True
output : bool, optional
Whether to return a DataFrame or not, by default True
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
Returns
-------
pd.DataFrame
A pandas DataFrame containing all, or a single dataset (depends on ``ds``)
"""
if pretty is None:
pretty = ds is not None
if output is None:
output = ds is None
if not pretty and not output:
logger.warning(
'You have set "pretty" and "output" to false. Hence, this function will do/return nothing.'
)
return
try:
ds_df = get_datasets_df(data_dir=data_dir)
except NoDatasetsError as err:
logger.warning(err)
return pd.DataFrame()
def add_url(title, id):
return f'<a href="{DS_URL_PREFIX}{id}" target="_blank">{title}</a>'
def disp_pretty_df(df, index=True, header=True):
try:
from IPython.display import display, Markdown
df_html = df.to_html(
render_links=True,
escape=False,
header=header,
index=index,
justify="center",
)
display(Markdown(df_html))
except:
logger.warning(
"IPython not available. Pretty printing only works in Jupyter Notebooks."
)
if ds:
single_ds_df = select_ds_id(ds, df=ds_df)
single_ds_df["expressionDataFileNames"] = ", ".join(
[
expr["name"]
for expr in single_ds_df.loc[0, "expressionDataFileInfos"]
]
)
single_ds_df["metaDataFileNames"] = ", ".join(
[expr["name"] for expr in single_ds_df.loc[0, "metaDataFileInfos"]]
)
# Sort columns
single_col_names = single_ds_df.columns.values.tolist()
single_col_names_sorted = [
name for name in DF_SORT_ORDER if name in single_col_names
]
[
single_col_names.remove(name)
for name in DF_SORT_ORDER
if name in single_col_names
]
single_col_names_sorted.extend(single_col_names)
single_ds_df = single_ds_df[single_col_names_sorted]
if pretty:
pretty_df = single_ds_df
pretty_df["expressionDataFileNames"] = "<br>".join(
[
expr["name"]
for expr in pretty_df.loc[0, "expressionDataFileInfos"]
]
)
pretty_df["metaDataFileNames"] = ", ".join(
[expr["name"] for expr in pretty_df.loc[0, "metaDataFileInfos"]]
)
empty_cols = [
col for col in pretty_df.columns if pretty_df.loc[0, col] == ""
]
pretty_df = pretty_df.drop(
labels=["expressionDataFileInfos", "metaDataFileInfos"]
+ empty_cols,
axis=1,
errors="ignore",
)
pretty_df.loc[0, "title"] = pretty_df.apply(
lambda x: add_url(x.title, x.id), axis=1
).squeeze()
disp_pretty_df(pretty_df.T, header=False)
if output:
return single_ds_df
else:
if pretty:
pretty_df = ds_df.drop(
labels=[
"description",
"license",
"preprocessing",
"citation",
"webLink",
"file",
"expressionDataFileInfos",
"metaDataFileInfos",
],
axis=1,
errors="ignore",
)
pretty_df["title"] = pretty_df.apply(
lambda x: add_url(x.title, x.id), axis=1
)
disp_pretty_df(pretty_df)
if output:
return ds_df
def load_data(
ds: Optional[str] = None,
data_dir: Path = DATA_DIR,
additional_readers: dict = {},
expression_file: Optional[str] = None,
as_format: Optional[str] = None,
):
"""This function loads a single dataset into an AnnData object.
If there are multiple datasets available you need to specify one by setting
``ds`` to a dataset `id` or dataset `title`.
To get an overview of availabe dataset use :py:func:`ds_info`
Parameters
----------
ds : str, optional
A single dataset ID or dataset title to select a dataset to be loaded.
If only one dataset is available you do not need to set this parameter, by default None
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
additional_readers : dict, optional
Used to specify your own readers for the specific data set format.
Dict key needs to be file extension (e.g., h5ad), dict value a function.
Still experimental, by default {}
expression_file: str, Optional
The name of the expression file to load.
Only needed when there are multiple expression files in a dataset.
as_format: str, optional
Specifies which reader should be uses for this dataset. Overwrites the auto-detection
of the format. Possible parameters are the file extensions of our supported data
formats: ``h5ad``, ``h5``, ``hdf5``, ``loom``, ``rds``, ``csv``, ``tsv``.
Returns
-------
AnnData Object
A single AnnData object with dataset id in `obs` and all dataset metadata in `uns`
Examples
--------
To use a custom reader for files with the extension ".fg", you have to define a function first:
>>> def my_loader(file):
... anndata = magic_file_loading(file)
... return anndata
You can then use this reader like this:
>>> fgread.load_data("my_dataset", additional_readers={"fg": my_loader})
"""
readers = {**DEFAULT_READERS, **additional_readers}
if ds:
single_df = select_ds_id(ds, df=get_datasets_df(data_dir=data_dir))
else:
single_df = get_datasets_df(data_dir=data_dir)
if len(single_df) > 1:
raise RuntimeError(
"There is more than one dataset available in this analysis. "
"Please select one by its ID or title. "
'You can list available datasets by using "fgread.ds_info()".'
)
exp_count = single_df.loc[0, "numberOfExpressionDataFiles"]
meta_count = single_df.loc[0, "numberOfMetaDataFiles"]
if exp_count == 0:
raise TypeError(
f"There is no expression data available in this data set.\n"
f"Metadata files: {meta_count}."
)
exp_files = [
exp["name"] for exp in single_df.loc[0, "expressionDataFileInfos"]
]
if expression_file:
if expression_file in exp_files:
file = expression_file
else:
raise KeyError(
f'Expression file "{expression_file}" not found in dataset. '
f"Available expression files are: {exp_files}."
)
else:
if exp_count == 1:
file = single_df.loc[0, "expressionDataFileInfos"][0]["name"]
else:
raise TypeError(
f"There are {exp_count} expression data files in this dataset. "
'Please specify which one you want to load using the parameter "expression_file". '
f"Available expression files are: {exp_files}."
)
title = single_df.loc[0, "title"]
ds_id = single_df.loc[0, "id"]
path = single_df.loc[0, "path"]
metadata_dict = single_df.loc[0].to_dict()
if as_format:
format = as_format.lower()
else:
try:
format = file.rsplit(".", 1)[1].lower()
logger.info(f'Expression file "{file}" with format "{format}".')
except ValueError as e:
raise ValueError(
f'The expression file "{file}" has no valid file suffix.'
).with_traceback(e.__traceback__)
if format in readers:
if meta_count != 0:
logger.info(
f"There are {meta_count} metadata files in this dataset. "
"This data will not be integrated into the anndata object."
)
logger.info(
f'Loading file "{file}" from dataset "{title}" in format "{format}" from directory "{path}"...\n'
)
adata = readers[format](Path(path) / file)
adata.uns["ds_metadata"] = {ds_id: {"title": title}}
adata.uns["ds_metadata_raw"] = {ds_id: str(metadata_dict)}
adata.obs["fg_id"] = ds_id
n_genes = adata.shape[1]
n_cells = adata.shape[0]
logger.info(
f'Loaded dataset "{title}" with {n_cells} cells and {n_genes} genes.\n'
f"==================================================================\n"
)
return adata
else:
raise KeyError(
f'Unsupported file format "{format}", use one of {list(readers)}. '
f'You can force the usage of a specific reader by setting "as_format" to a supported format. '
f"In addition, you can also implement your own reading function. See {DOCSURL} for more information."
)
def select_ds_id(ds: str, df: pd.DataFrame = None) -> pd.DataFrame:
"""Select a single dataset from a pandas DataFrame by its ID or title
Parameters
----------
ds : str
A single dataset ID or dataset title for selection
df : pd.DataFrame, optional
A pandas DataFrame from which a single entry is selected, by default None
Returns
-------
pd.DataFrame
A pandas DataFrame with only the selected dataset.
"""
single_df = df.loc[(df["id"] == ds) | (df["title"] == ds)].reset_index(
drop=True
)
len_df = len(single_df)
if len_df == 1:
return single_df.copy()
elif len_df == 0:
add_err = ""
if not ds.startswith("dataset-"):
add_err = " Please note that dataset titles can be changed by the owner. To be safe, you might want to consider dataset IDs instead."
raise KeyError("Your selection matches no datasets." + add_err)
else:
display(single_df)
raise KeyError(
f"Your selection matches {len_df} datasets. Please make sure to select exactly one."
)
def get_ds_paths(data_dir: Union[str, Path] = DATA_DIR) -> list:
"""Gets available datasets for this analysis from path.
Parameters
----------
data_dir : Union[str,Path], optional
Directory containing the datasets, e.g. "fastgenomics/data", by default DATA_DIR
Returns
-------
list
A list of dataset paths
"""
data_dir = Path(data_dir)
if not data_dir.exists():
raise NoDatasetsError(
f'There are no datasets attached to this analysis. Path "{data_dir}" does not exist.'
)
paths = [
Path(subdir)
for subdir in sorted(data_dir.iterdir())
if subdir.is_dir() and re.match(r"^dataset_\d{4}$", subdir.name)
]
if not paths:
raise NoDatasetsError(
f'There are no datasets attached to this analysis. Path "{data_dir}" is empty.'
)
return paths
class NoDatasetsError(Exception):
"""Raised when no datasets are attached"""
pass
| 14,511 | 4,287 |
# This program rolls two dices and prints what you got
# We set two variables (min and max) , lowest and highest number of the dice.
import random
min = 1
max = 6
roll_again = "yes"
# We then use a while loop, so that the user can roll the dice again.
while roll_again == "yes" or roll_again == "y":
print "Rolling the dices:"
print "The values are:"
print random.randint(min, max)
print random.randint(min, max)
roll_again = raw_input("Roll the dices again?")
| 487 | 163 |
#TEMA:GENERADORES
######################################################################
#Funcion que me regresa un numero determinado de numeros pares
def generaParesFuncion(limite):
num=1
miLista=[]
while num<limite:
miLista.append(num*2)
num = num+1
return miLista
######################################################################
######################################################################
print("Ejemplo #1")
print(generaParesFuncion(10))
print()
print()
print()
print()
######################################################################
######################################################################
#Generador que hace lo mismo que el ejemplo anterior
def generaPares(limite):
num=1
while num<limite:
yield num*2
num = num+1
######################################################################
######################################################################
print("Ejemplo #2")
miGenerador = generaPares(10) #Creo mi generdor
for i in miGenerador:
print(i)
print()
print()
print()
print()
print()
######################################################################
######################################################################
#Otro Uso del generador
print("Ejemplo #3")
miGenerador02= generaPares(12)
print("Primera llamada")
print(next(miGenerador02))
print()
print("Segunda llamada")
print(next(miGenerador02))
print()
print("Tercera llamada")
print(next(miGenerador02))
print()
print()
print()
print()
print()
###################################################################### | 1,584 | 440 |
# -*- coding: utf-8 -*-
"""
lantz.qt.widgets
~~~~~~~~~~~~~~~~
PyQt widgets wrapped to work with lantz.
:copyright: 2018 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from . import feat, nonnumeric, numeric
from .common import WidgetMixin, ChildrenWidgets
from .initialize import InitializeWindow, InitializeDialog
from .testgui import DriverTestWidget, SetupTestWidget | 443 | 146 |
# -*- coding: utf-8 -*-
import struct
import iota
from iotapy.storage import converter as conv
TRANSACTION_METADATA_TRITS_LENGTH = 1604
HASH_BYTES_LENGTH = 49
HASH_TRITS_LENGTH = 243
def get_key(bytes_: bytes):
# Convert key bytes to iota.TransactionHash
if not isinstance(bytes_, bytes):
raise TypeError
key = iota.TransactionHash.from_trits(conv.from_binary_to_trits(bytes_, HASH_TRITS_LENGTH))
return key
def get(bytes_: bytes, key=None):
if bytes_ is None:
return None
if not isinstance(bytes_, bytes):
raise TypeError
i = 0
address = iota.Address.from_trits(conv.from_binary_to_trits(bytes_[:HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
bundle = iota.BundleHash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
trunk = iota.TransactionHash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
branch = iota.TransactionHash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
legacy_tag = iota.Hash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
value = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
current_index = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
last_index = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
timestamp = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
tag = iota.Hash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
attachment_timestamp = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
attachment_timestamp_lower_bound = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
attachment_timestamp_upper_bound = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
validity = struct.unpack('>l', bytes_[i:i + 4])[0]
i += 4
type_ = struct.unpack('>l', bytes_[i:i + 4])[0]
i += 4
arrival_time = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
height = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
# Is confirmed?
solid = bytes_[i] == 1
i += 1
snapshot = struct.unpack('>l', bytes_[i:i + 4])[0]
i += 4
sender = bytes_[i:]
return {
'address': address,
'bundle_hash': bundle,
'trunk_transaction_hash': trunk,
'branch_transaction_hash': branch,
'legacy_tag': legacy_tag,
'value': value,
'current_index': current_index,
'last_index': last_index,
'timestamp': timestamp,
'tag': tag,
'attachment_timestamp': attachment_timestamp,
'attachment_timestamp_lower_bound': attachment_timestamp_lower_bound,
'attachment_timestamp_upper_bound': attachment_timestamp_upper_bound,
'validity': validity,
'type': type_,
'arrival_time': arrival_time,
'height': height,
'solid': solid,
'snapshot': snapshot,
'sender': sender
}
def save(value: iota.Transaction):
buf = b''
buf += conv.from_trits_to_binary(value.address.as_trits())
buf += conv.from_trits_to_binary(value.bundle_hash.as_trits())
buf += conv.from_trits_to_binary(value.trunk_transaction_hash.as_trits())
buf += conv.from_trits_to_binary(value.branch_transaction_hash.as_trits())
buf += conv.from_trits_to_binary(iota.Hash.from_trits(value.legacy_tag.as_trits()).as_trits())
buf += struct.pack('>q', value.value)
buf += struct.pack('>q', value.current_index)
buf += struct.pack('>q', value.last_index)
buf += struct.pack('>q', value.timestamp)
buf += conv.from_trits_to_binary(iota.Hash.from_trits(value.tag.as_trits()).as_trits())
buf += struct.pack('>q', value.attachment_timestamp)
buf += struct.pack('>q', value.attachment_timestamp_lower_bound)
buf += struct.pack('>q', value.attachment_timestamp_upper_bound)
buf += struct.pack('>l', value.validity)
buf += struct.pack('>l', value.type)
buf += struct.pack('>q', value.arrival_time)
buf += struct.pack('>q', value.height)
buf += struct.pack('>?', value.solid)
buf += struct.pack('>l', value.snapshot)
buf += value.sender
return buf
| 4,368 | 1,674 |
#! /usr/bin/env python
import tensorflow as tf
import tensorflow.contrib.slim as slim
seed = 0
def fc2d(inputs,
num_outputs,
activation_fn,
scope, ):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as s:
n0, n1, n2 = inputs.get_shape().as_list()
weights = tf.get_variable(name='weights',
shape=[n2, num_outputs],
initializer=tf.contrib.layers.xavier_initializer(seed=seed),
trainable=True)
wx = tf.einsum('ijk,kl->ijl', inputs, weights)
biases = tf.get_variable(name='biases',
shape=[num_outputs],
initializer=tf.zeros_initializer(),
trainable=True)
wx_b = wx + biases
result = wx_b if activation_fn is None else activation_fn(wx_b, name=s.name)
return result
def conv3d(scope_name,
input,
filter_size):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
conv_filter = tf.get_variable(name='weights',
shape=filter_size,
initializer=tf.contrib.layers.xavier_initializer(seed=seed),
trainable=True)
conv = tf.nn.conv3d(input=input,
filter=conv_filter,
strides=[1, 1, 1, 1, 1],
padding='VALID')
biases = tf.get_variable(name='biases',
shape=[filter_size[-1]],
initializer=tf.zeros_initializer(),
trainable=True)
bias = tf.nn.bias_add(conv, biases)
result = tf.nn.relu(bias, name=scope.name)
return result
class OC_Network():
def __init__(self,
window_size,
num_labels,
num_options,
action_size,
history_steps,
scope
):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1], dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
related_visions = fc2d(inputs=self.visions,
num_outputs=1,
activation_fn=None,
scope='vision_preprocess')
related_visions = slim.flatten(related_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=related_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
hidden_targets = slim.fully_connected(inputs=self.targets,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='target_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths, hidden_targets], -1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
option_qvalues = slim.fully_connected(inputs=embed_feature,
num_outputs=num_options,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='option_qvalue')
self.option_qvalues = option_qvalues
action_policy = slim.fully_connected(inputs=embed_feature,
num_outputs=num_options*action_size,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='action_policy')
self.action_policy = tf.nn.softmax(tf.reshape(action_policy, [-1, num_options, action_size]), axis=-1)
terminations = slim.fully_connected(inputs=embed_feature,
num_outputs=num_options,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='termination')
self.terminations = tf.sigmoid(terminations)
# highlevel training
if not scope.startswith('global'):
self.chosen_options = tf.placeholder(shape=[None], dtype=tf.int32)
self.target_option_qvalues = tf.placeholder(shape=[None], dtype=tf.float32)
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.lr = tf.placeholder(dtype=tf.float32)
self.termination_reg = tf.placeholder(dtype=tf.float32)
options_onehot = tf.one_hot(self.chosen_options, num_options, dtype=tf.float32)
qvalues_for_chosen_options = tf.reduce_sum(self.option_qvalues*options_onehot, axis=1)
option_td_error = tf.square(self.target_option_qvalues - qvalues_for_chosen_options)
self.option_qvalue_loss = 0.5*tf.reduce_mean(option_td_error)
option_onehot_expanded = tf.tile(tf.expand_dims(options_onehot, 2), [1, 1, action_size])
pi_for_chosen_options = tf.reduce_sum(self.action_policy * option_onehot_expanded, axis=1)
logpi_for_chosen_options = tf.log(tf.clip_by_value(pi_for_chosen_options, 0.000001, 0.999999))
action_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
logpi_for_chosen_actions = tf.reduce_sum(logpi_for_chosen_options * action_onehot, axis=-1)
advantage = self.target_option_qvalues - qvalues_for_chosen_options
self.action_policy_loss = -tf.reduce_mean(logpi_for_chosen_actions * tf.stop_gradient(advantage))
self.entropy_loss = -tf.reduce_mean(
tf.reduce_sum(pi_for_chosen_options * (-logpi_for_chosen_options), axis=-1))
chosen_terminations = tf.reduce_sum(self.terminations * options_onehot, axis=1)
self.termination_loss = tf.reduce_mean(chosen_terminations *
tf.stop_gradient(
qvalues_for_chosen_options - tf.reduce_max(self.option_qvalues, axis=-1) + self.termination_reg))
# factor = tf.stop_gradient(qvalues_for_chosen_options - tf.reduce_max(self.option_qvalues, axis=-1) + self.termination_reg)
# sign = tf.stop_gradient(tf.where(tf.greater_equal(factor, 0.0), tf.ones_like(factor), tf.zeros_like(factor)))
# self.termination_loss = tf.reduce_mean(sign*chosen_terminations*factor +
# (1-sign)*(1-chosen_terminations)*(-factor))
# self.loss = self.option_qvalue_loss + self.action_policy_loss + 0 * self.entropy_loss + self.termination_loss
trainer = tf.train.RMSPropOptimizer(learning_rate=self.lr)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
global_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global/main')
gradients = tf.gradients(self.option_qvalue_loss, params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
self.option_update = trainer.apply_gradients(zip(norm_gradients, global_params))
gradients = tf.gradients(self.action_policy_loss + 0.01*self.entropy_loss, params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
self.action_update = trainer.apply_gradients(zip(norm_gradients, global_params))
gradients = tf.gradients(self.termination_loss, params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
self.term_update = trainer.apply_gradients(zip(norm_gradients, global_params))
class Lowlevel_Network():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope
):
with tf.variable_scope('lowlevel'):
with tf.variable_scope(scope):
self.visions = tf.placeholder(
shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1],
dtype=tf.float32)
self.subtargets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
subtargets_expanded = tf.tile(tf.expand_dims(self.subtargets, 1),
[1, history_steps * window_size * window_size, 1])
masked_visions = tf.reduce_sum(self.visions * subtargets_expanded, axis=-1)
masked_visions = slim.flatten(masked_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=masked_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths], 1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
# policy estimation
hidden_policy = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='policy_hidden')
self.policy = slim.fully_connected(inputs=hidden_policy,
num_outputs=action_size,
activation_fn=tf.nn.softmax,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='policy')
# value estimation
hidden_value = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='value_hidden')
self.value = slim.fully_connected(inputs=hidden_value,
num_outputs=1,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='value')
# Lowlevel training
if not scope.startswith('global'):
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.target_values = tf.placeholder(shape=[None], dtype=tf.float32)
self.lowlevel_lr = tf.placeholder(dtype=tf.float32)
self.er = tf.placeholder(dtype=tf.float32)
actions_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
log_policy = tf.log(tf.clip_by_value(self.policy, 0.000001, 0.999999))
log_pi_for_action = tf.reduce_sum(tf.multiply(log_policy, actions_onehot), axis=1)
self.value_loss = 0.5 * tf.reduce_mean(tf.square(self.target_values - self.value))
self.policy_loss = -tf.reduce_mean(log_pi_for_action * self.advantages)
self.entropy_loss = -tf.reduce_mean(tf.reduce_sum(self.policy * (-log_policy), axis=1))
self.lowlevel_loss = self.value_loss + self.policy_loss + self.er * self.entropy_loss
local_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/%s'%scope)
gradients = tf.gradients(self.lowlevel_loss, local_lowlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
lowlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.lowlevel_lr)
global_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/global')
self.lowlevel_update = lowlevel_trainer.apply_gradients(zip(norm_gradients, global_lowlevel_params))
class Lowlevel_Network_ex():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope
):
with tf.variable_scope('lowlevel'):
with tf.variable_scope(scope):
self.visions = tf.placeholder(
shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1],
dtype=tf.float32)
self.subtargets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
subtargets_expanded = tf.tile(tf.expand_dims(self.subtargets, 1),
[1, history_steps * window_size * window_size, 1])
masked_visions = tf.reduce_sum(self.visions * subtargets_expanded, axis=-1)
masked_visions = slim.flatten(masked_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=masked_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
hidden_targets = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='target_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths, hidden_targets], 1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
self.qvalues = slim.fully_connected(inputs=embed_feature,
num_outputs=action_size,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='qvalue')
# Lowlevel training
if not scope.startswith('global'):
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.target_q_values = tf.placeholder(shape=[None], dtype=tf.float32)
self.lowlevel_lr = tf.placeholder(dtype=tf.float32)
actions_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
q_values_for_chosen_actions = tf.reduce_sum(self.qvalues*actions_onehot, axis=1)
td_error = tf.square(self.target_q_values - q_values_for_chosen_actions)
self.qvalue_loss = 0.5*tf.reduce_mean(td_error)
lowlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.lowlevel_lr)
lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/%s' % scope)
gradients = tf.gradients(self.qvalue_loss, lowlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
global_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/global/ex/main')
self.lowlevel_update = lowlevel_trainer.apply_gradients(zip(norm_gradients, global_lowlevel_params))
| 22,271 | 5,798 |
from tkinter import*
import website
import tkinter.font as font
from PIL import ImageTk,Image
import os
import sqlite3
import webbrowser
def main():
cgnc=Tk()
cgnc.title('Show')
cgnc.iconbitmap("logo/spectrumlogo.ico")
f=font.Font(family='Bookman Old Style',size=10,weight='bold')
f1=font.Font(family='Bookman Old Style',size=10)
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
#query the database
c.execute("SELECT *,oid FROM mark_list")
records=c.fetchall()
l=len(c.fetchall())
ch=records[l-1][4]
ma=records[l-1][5]
co=records[l-1][6]
us=records[l-1][0]
#commit_changes
db.commit()
#close connection
db.close()
def cgpa():
cg1=((ch+ma+co)/3)/9.5
cg="{:.2f}".format(cg1)
db=sqlite3.connect("mark_list.db")
c=db.cursor()
#query the database
c.execute("SELECT *,oid FROM mark_list")
records=c.fetchall()
l=len(c.fetchall())
n6=records[l-1][1]
c.execute("""UPDATE mark_list SET cgpa=? WHERE name=?""",(cg,n6))
#commit_changes
db.commit()
#close connection
db.close()
entry.delete(0,END)
entry.insert(0,cg)
def grad():
av=((ch+ma+co)/3)
if av<=100 and av>=90:
gr='O'
elif av<90 and av>=80:
gr='E'
elif av<80 and av>=70:
gr='A'
elif av<70 and av>=60:
gr='B'
elif av<60 and av>=50:
gr='C'
elif av<50 and av>=40:
gr='D'
elif av<40:
gr='F'
db=sqlite3.connect("mark_list.db")
c=db.cursor()
#query the database
c.execute("SELECT *,oid FROM mark_list")
records=c.fetchall()
l=len(c.fetchall())
n6=records[l-1][1]
c.execute("""UPDATE mark_list SET grade=? WHERE name=?""",(gr,n6))
#commit_changes
db.commit()
#close connection
db.close()
entry.delete(0,END)
entry.insert(0,gr)
#buttons
cgpa=Button(cgnc,text='CGPA',bg='yellow',fg='black',borderwidth=3,padx=25,pady=20,command=cgpa,font=f)
cgpa.grid(row=0,column=0)
grade=Button(cgnc,text='GRADE',bg='yellow',fg='black',borderwidth=3,padx=20,pady=20,command=grad,font=f)
grade.grid(row=0,column=1)
Label(cgnc,text="\n").grid(row=1)
def new():
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
#insert into tabels
c.execute("INSERT INTO mark_list VALUES(:user_name,:name,:registration_no,:branch,:chemistry,:math,:computer,:cgpa,:grade)",
{
'user_name':us,
'name':' ',
'registration_no':' ',
'branch':' ',
'chemistry':0,
'math':0,
'computer':0,
'cgpa':0,
'grade':' '
})
#commit_changes
db.commit()
#close connection
db.close()
cgnc.destroy()
import input_details
input_details.main()
def close():
os._exit(1)
new_input=Button(cgnc,text='New Input',bg='yellow',fg='black',borderwidth=3,padx=10,pady=20,command=new,font=f)
new_input.grid(row=2,column=0)
close=Button(cgnc,text='Close',bg='yellow',fg='black',borderwidth=3,command=close,padx=20,pady=20,font=f)
close.grid(row=2,column=1)
Label(cgnc,text="\n").grid(row=3)
entry=Entry(cgnc,borderwidth=3,width=44)
entry.grid(row=4,column=0,columnspan=2,padx=20)
def show_en():
show_ent=Toplevel()
show_ent.geometry("600x450")
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
#query the database
c.execute("SELECT *,oid FROM mark_list")
records=c.fetchall()
f=font.Font(family='Bookman Old Style',size=10,weight='bold')
l=len(c.fetchall())
Label(show_ent,text="Username",font=f,fg='red').grid(row=0,column=0)
Label(show_ent,text="Name",font=f,fg='red').grid(row=0,column=1)
Label(show_ent,text="Registration ID",font=f,fg='red').grid(row=0,column=2)
Label(show_ent,text="Branch",font=f,fg='red').grid(row=0,column=3)
Label(show_ent,text="Chemistry",font=f,fg='red').grid(row=0,column=4)
Label(show_ent,text="Math",font=f,fg='red').grid(row=0,column=5)
Label(show_ent,text="Computer",font=f,fg='red').grid(row=0,column=6)
Label(show_ent,text="Cgpa",font=f,fg='red').grid(row=0,column=7)
Label(show_ent,text="Grade",font=f,fg='red').grid(row=0,column=8)
r=1
r1=0
for record in records:
if(records[l-1][0]==record[0]):
l1=list(record)
for c in range(0,9):
Label(show_ent,text=l1[c],fg='blue',font=f1).grid(row=r1+1,column=c)
r+=1
r=r+1
r1=r1+1
#commit_changes
db.commit()
#close connection
db.close()
show=Button(cgnc,text='Show Entries',bg='yellow',fg='black',borderwidth=3,command=show_en,padx=84,pady=5,font=f)
show.grid(row=5,column=0,columnspan=2,padx=40)
fo=font.Font(family='36 DAYS',size=10)
def call(url):
webbrowser.open_new(url)
Label(cgnc,text="\nVisit our club website:",fg='blue',font=fo).grid(row=6,column=0,columnspan=2)
l=Label(cgnc,text="https://spectrumcet.com/",fg='blue',font=fo)
l.bind("<Button-1>",lambda x:call('https://spectrumcet.com/'))
l.grid(row=7,column=0,columnspan=2)
mainloop()
if __name__=='__main__':
main()
| 5,980 | 2,296 |
import requests
from typing import List
from fastapi import FastAPI, Path
from pydantic import BaseModel, HttpUrl
from fastapi.middleware.cors import CORSMiddleware
cors_origins = [
'https://www.govdirectory.org',
'https://www.wikidata.org',
]
user_agent_external = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:90.0) Gecko/20100101 Firefox/90.0 Govdirectory.org account existence checker'
user_agent_wikimedia = 'Wikidata:WikiProject Govdirectory (health check service)'
url_properties = [
{
'name': 'official website',
'prop': 'P856',
},
{
'name': 'URL for citizen\'s initiatives',
'prop': 'P9732',
},
]
platform_properties = [
{
'name': 'Twitter username',
'prop': 'P2002',
'formatter_url': 'https://twitter.com/$1',
},
{
'name': 'YouTube channel ID',
'prop': 'P2397',
'formatter_url': 'https://www.youtube.com/channel/$1',
},
{
'name': 'Facebook ID',
'prop': 'P2013',
'formatter_url': 'https://www.facebook.com/$1',
},
{
'name': 'Instagram username',
'prop': 'P2003',
'formatter_url': 'https://www.instagram.com/$1/',
},
{
'name': 'GitHub username',
'prop': 'P2037',
'formatter_url': 'https://github.com/$1',
},
{
'name': 'Vimeo identifier',
'prop': 'P4015',
'formatter_url': 'https://vimeo.com/$1',
},
{
'name': 'Flickr user ID',
'prop': 'P3267',
'formatter_url': 'https://www.flickr.com/people/$1',
},
{
'name': 'Pinterest username',
'prop': 'P3836',
'formatter_url': 'https://www.pinterest.com/$1/',
},
{
'name': 'Dailymotion channel ID',
'prop': 'P2942',
'formatter_url': 'https://www.dailymotion.com/$1',
},
{
'name': 'TikTok username',
'prop': 'P7085',
'formatter_url': 'https://www.tiktok.com/@$1',
},
{
'name': 'SlideShare username',
'prop': 'P4016',
'formatter_url': 'https://www.slideshare.net/$1',
},
]
def check_url(url: HttpUrl):
r = requests.head(url, headers={ 'User-Agent': user_agent_external, 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br' })
if r.status_code >= 400:
return r.status_code
return None
app = FastAPI(
title='Govdirectory Health Check Service',
description='Microservice that validates various external identifiers and URLs associated with a given Wikidata identifier.',
version='0.1.0',
docs_url='/',
)
app.add_middleware(
CORSMiddleware,
allow_origins=cors_origins,
allow_credentials=True,
allow_methods=['GET'],
allow_headers=['*'],
)
class Error(BaseModel):
prop: str
prop_name: str
url: HttpUrl
status_code: int
@app.get('/{qid}', response_model=List[Error])
async def read_item(qid: str = Path(..., title='Wikidata identfier', min_length=2, regex='^Q\d+$')):
r = requests.get('https://www.wikidata.org/w/api.php?action=wbgetentities&props=claims&utf8=1&format=json&ids=' + qid, headers={ 'User-Agent': user_agent_wikimedia })
item_statements = list(r.json()['entities'][qid]['claims'].items())
errors = []
for p in url_properties:
for claim in item_statements:
if not claim[0] == p['prop']:
continue
for statement in claim[1]: # needed in case a prop has several values
url = statement['mainsnak']['datavalue']['value']
negative_status = check_url(url)
if negative_status:
error = {
'prop': p['prop'],
'prop_name': p['name'],
'url': url,
'status_code': negative_status,
}
errors.append(error)
for p in platform_properties:
for claim in item_statements:
if not claim[0] == p['prop']:
continue
for statement in claim[1]: # needed in case a prop has several values
identifier = statement['mainsnak']['datavalue']['value']
url = p['formatter_url'].replace('$1', identifier, 1)
negative_status = check_url(url)
if negative_status:
error = {
'prop': p['prop'],
'prop_name': p['name'],
'url': url,
'status_code': negative_status,
}
errors.append(error)
return errors
| 4,650 | 1,525 |
from ..field import Field
class Password(Field):
min = None
max = None
messages = {
'min': 'Must be at least {min} characters long.',
'max': 'Must have no more than {max} characters.',
}
def is_empty(self, value):
return value is None or value is ''
def validate(self, value):
value = str(value)
if self.min is not None and len(value) < self.min:
raise self.error('min')
if self.max is not None and len(value) > self.max:
raise self.error('max')
return value
| 569 | 171 |
"""
This script adds a specific column to the `bug_type_entropy_projectname_old` tables. The added column contains the nesting depth (>=0) of each line.
"""
import os, sys, psycopg2, ntpath, traceback, subprocess
from pprint import pprint
#--------------------------------------------------------------------------------------------------------------------------
def get_BTE_data(project_name):
BTE_old_table_name = "err_corr_c.bug_type_entropy_" + project_name + "_old"
BTE_old_table_name = BT_old_table_name.replace('-', '_')
BTE_data = []
try:
con = psycopg2.connect(database='saheel', user='saheel')
cur = con.cursor()
cur.execute("SELECT file_name, sha, line_num, parents_all FROM " + BTE_old_table_name)
BTE_data = list(cur.fetchall())
except Exception as e:
print(traceback.print_exc())
print(str(e))
raise e
if con:
con.close()
# Make it a list of lists instead of list of tuples
for index, BTE_tuple in enumerate(BTE_data):
BTE_data[index] = list(BTE_tuple)
return BTE_data
#--------------------------------------------------------------------------------------------------------------------------
def dump_BTE_prime_table(BTE_data, project_name):
BTE_prime_table_name = "err_corr_c.BTE_prime_" + project_name
BTE_prime_table_name = BTE_prime_table_name.replace('-', '_')
try:
con = psycopg2.connect(database='saheel', user='saheel')
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS " + BTE_prime_table_name + " ")
query = """
CREATE TABLE """ + BTE_prime_table_name + """ (file_name varchar(100),
sha varchar(42),
line_num integer,
parents_all varchar(144),
depth integer)
"""
cur.execute(query)
query = "INSERT INTO " + BTE_prime_table_name + " (file_name, sha, line_num, parents_all, depth) VALUES (%s, %s, %s, %s, %s)"
cur.executemany(query, BTE_data)
con.commit()
except Exception as e:
print(traceback.print_exc())
print(str(e))
raise e
if con:
con.close()
#--------------------------------------------------------------------------------------------------------------------------
def join_BTE_old_and_BTE_prime(project_name):
BTE_old_table_name = "err_corr_c.bug_type_entropy_" + project_name + "_old"
BTE_old_table_name = BTE_old_table_name.replace('-', '_')
BTE_prime_table_name = "err_corr_c.BTE_prime_" + project_name
BTE_prime_table_name = BTE_prime_table_name.replace('-', '_')
BTE_merged_table_name = "err_corr_c.bug_type_entropy_" + project_name + "_old_wd"
BTE_merged_table_name = BTE_merged_table_name.replace('-', '_')
try:
con = psycopg2.connect(database='saheel', user='saheel')
cur = con.cursor()
cur.execute("ALTER TABLE " + BTE_old_table_name + " DROP COLUMN IF EXISTS depth")
query = """
SELECT old.*, prime.depth
INTO """ + BTE_merged_table_name + """
FROM """ + BTE_old_table_name + """ as old
JOIN """ + BTE_prime_table_name + """ as prime
ON (old.file_name = prime.file_name AND
old.sha = prime.sha AND
old.line_num = prime.line_num)
"""
cur.execute(query)
con.commit()
cur.execute("DROP TABLE " + BTE_prime_table_name)
cur.execute("DROP TABLE " + BTE_old_table_name)
cur.execute("ALTER TABLE " + BTE_merged_table_name + " RENAME TO " + BTE_old_table_name.split('.')[1])
con.commit()
except Exception as e:
print(traceback.print_exc())
print(str(e))
raise e
if con:
con.close()
#--------------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) != 2:
print("\nUsage: python add_depth_to_BTE_table.py <project_name>")
print("\nSample usage: python add_depth_to_BTE_table.py libgit2")
raise ValueError("Incorrect input arguments. Aborting...")
project_name = sys.argv[1]
# depth_dict = get_depth_data(project_name)
# if not depth_dict:
# raise ValueError("`get_depth_data` returned an empty `depth_dict` dictionary. Aborting...")
print("\nNow fetching BTE_old_data...")
# BTE_data is a list of lists; each element list = [file_name, sha, line_num, parents_all]
BTE_data = get_BTE_data(project_name)
if not BTE_data:
raise ValueError("`get_BTE_data` returned an empty `BTE_data` list. Aborting...")
print("\nNow creating BTE_prime_data, i.e., table with `depth` appended to BTE_old_data...")
# We will add `depth` attribute to each row in BTE_data
error_count = 0
for index, BTE_tuple in enumerate(BTE_data):
# `depth` = number of parents as given in `parents_all` column of BTE table
depth = BTE_tuple[3].count('-') + 1
if BTE_tuple[3] == '':
BTE_data[index].append(0)
else:
BTE_data[index].append(depth)
print("\nNow dumping the temporary table BTE_prime. This may take approx. 3-4 min per million LOC...")
dump_BTE_prime_table(BTE_data, project_name)
print("\nNow joining BTE_old and BTE_prime to get desired table. This takes about 2 min per million LOC...")
join_BTE_old_and_BTE_prime(project_name)
#--------------------------------------------------------------------------------------------------------------------------
| 5,933 | 1,807 |
import matplotlib.pyplot as plt
import numpy as np
import cv2
import scipy.spatial
from sklearn.linear_model import RANSACRegressor
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import lib.settings
def dense_map(Pts, n, m, grid):
'''
interpolate lidar depth
:param Pts: num observations of (W, H, D) lidar coordinates (D - depth corrsponding to (W,H) image positions), Pts.shape==(3, num)
:param n: image width
:param m: image height
:param grid: (grid*2+1) is neighborhood size
:return:
'''
ng = 2 * grid + 1
mX = np.zeros((m, n)) + np.float("inf")
mY = np.zeros((m, n)) + np.float("inf")
mD = np.zeros((m, n))
mX[np.int32(Pts[1]), np.int32(Pts[0])] = Pts[0] - np.round(Pts[0])
mY[np.int32(Pts[1]), np.int32(Pts[0])] = Pts[1] - np.round(Pts[1])
mD[np.int32(Pts[1]), np.int32(Pts[0])] = Pts[2]
KmX = np.zeros((ng, ng, m - ng, n - ng))
KmY = np.zeros((ng, ng, m - ng, n - ng))
KmD = np.zeros((ng, ng, m - ng, n - ng))
for i in range(ng):
for j in range(ng):
KmX[i, j] = mX[i: (m - ng + i), j: (n - ng + j)] - grid - 1 + i
KmY[i, j] = mY[i: (m - ng + i), j: (n - ng + j)] - grid - 1 + i
KmD[i, j] = mD[i: (m - ng + i), j: (n - ng + j)]
S = np.zeros_like(KmD[0, 0])
Y = np.zeros_like(KmD[0, 0])
for i in range(ng):
for j in range(ng):
s = 1 / np.sqrt(KmX[i, j] * KmX[i, j] + KmY[i, j] * KmY[i, j])
Y = Y + s * KmD[i, j]
S = S + s
S[S == 0] = 1
out = np.zeros((m, n))
out[grid + 1: -grid, grid + 1: -grid] = Y / S
return out
def project_pointcloud(lidar, vtc, velodyne_to_camera, image_shape, init=None, draw_big_circle=False):
def py_func_project_3D_to_2D(points_3D, P):
# Project on image
points_2D = np.matmul(P, np.vstack((points_3D, np.ones([1, np.shape(points_3D)[1]]))))
# scale projected points
points_2D[0][:] = points_2D[0][:] / points_2D[2][:]
points_2D[1][:] = points_2D[1][:] / points_2D[2][:]
points_2D = points_2D[0:2]
return points_2D.transpose()
def py_func_create_lidar_img(lidar_points_2D, lidar_points, img_width=1248, img_height=375, init=None):
# lidar_points_2d shape (19988, 2), each line is 2d image coordinates
# lidar_points shape (3, 19988)
within_image_boarder_width = np.logical_and(img_width > lidar_points_2D[:, 0], lidar_points_2D[:, 0] >= 0)
within_image_boarder_height = np.logical_and(img_height > lidar_points_2D[:, 1], lidar_points_2D[:, 1] >= 0)
valid_points = np.logical_and(within_image_boarder_width, within_image_boarder_height) #(19988,) boolean array
coordinates = np.where(valid_points)[0] #(4222,) - enteries of valid points in lidar_points
values = lidar_points[:, coordinates] #(3, 4222)
if init is None:
image = -120.0 * np.ones((img_width, img_height, 3)) #image.shape==(1920, 1024, 3)
else:
image = init.transpose((1, 0, 2)) #image.shape==(1920, 1024, 3), zeroes
img_coordinates = lidar_points_2D[coordinates, :].astype(dtype=np.int32) #(4222, 2)
final_coordinates = np.concatenate((img_coordinates, values.transpose()[:, 1][:, np.newaxis]), 1).transpose()
inter_image = dense_map(final_coordinates, img_width, img_height, grid=lib.settings.grid_size)
import matplotlib as mpl
import matplotlib.cm as cm
norm = mpl.colors.Normalize(vmin=0, vmax=80)
cmap = cm.jet
m = cm.ScalarMappable(norm, cmap)
depth_map_color = np.copy(inter_image).reshape(-1)
depth_map_color = m.to_rgba(depth_map_color)
depth_map_color = (255 * depth_map_color).astype(dtype=np.uint8)
depth_map_color = np.array(depth_map_color)[:, :3]
depth_map_color = depth_map_color.reshape((inter_image.shape[0], inter_image.shape[1], 3))
inter_image_colormap = depth_map_color
if not draw_big_circle:
image[img_coordinates[:, 0], img_coordinates[:, 1], :] = values.transpose() # image is (1920, 1024, 3), values.transpose() is (4222, 3)
else:
# Slow elementwise circle drawing through opencv
len = img_coordinates.shape[0]
image = image.transpose([1, 0, 2]).squeeze().copy()
depth_map_color = values.transpose()[:, 1] #values.transpose is (4222, 3), [:, 1] is depth (z)
depth_map_color = m.to_rgba(depth_map_color)
depth_map_color = (255 * depth_map_color).astype(dtype=np.uint8)
for idx in range(len):
x, y = img_coordinates[idx, :]
value = depth_map_color[idx]
# print value
tupel_value = (int(value[0]), int(value[1]), int(value[2]))
# print tupel_value
cv2.circle(image, (x, y), 1, tupel_value, -1) # TODO was 3
return image, inter_image, inter_image_colormap #(1024, 1920, 3)
return image.transpose([1, 0, 2]).squeeze() #(1024, 1920, 3)
def py_func_lidar_projection(lidar_points_3D, vtc, velodyne_to_camera, shape, init=None): # input):
img_width = shape[1]
img_height = shape[0]
# print img_height, img_width
lidar_points_3D = lidar_points_3D[:, 0:4] #(54837, 4)
# Filer away all points behind image plane
min_x = 2.5
valid = lidar_points_3D[:, 0] > min_x
# extend projection matrix to 5d to efficiently parse intensity
lidar_points_3D = lidar_points_3D[np.where(valid)]
lidar_points_3D2 = np.ones((lidar_points_3D.shape[0], lidar_points_3D.shape[1] + 1))
lidar_points_3D2[:, 0:3] = lidar_points_3D[:, 0:3]
lidar_points_3D2[:, 4] = lidar_points_3D[:, 3]
# Extend projection matric to pass trough intensities
velodyne_to_camera2 = np.zeros((5, 5))
velodyne_to_camera2[0:4, 0:4] = velodyne_to_camera
velodyne_to_camera2[4, 4] = 1
lidar_points_2D = py_func_project_3D_to_2D(lidar_points_3D.transpose()[:][0:3], vtc) #lidar_points_2d.shape=(19988, 2)
pts_3D = np.matmul(velodyne_to_camera2, lidar_points_3D2.transpose())
# detelete placeholder 1 axis
pts_3D = np.delete(pts_3D, 3, axis=0) #pts_3d.shape==(4, 19988)
pts_3D_yzi = pts_3D[1:, :] #(3, 19988)
return py_func_create_lidar_img(lidar_points_2D, pts_3D_yzi, img_width=img_width,
img_height=img_height, init=init)
# lidar.shape==(54837, 5)
return py_func_lidar_projection(lidar, vtc, velodyne_to_camera, image_shape, init=init)
def find_missing_points(last, strongest):
last_set = set([tuple(x) for x in last])
strong_set = set([tuple(x) for x in strongest])
remaining_last = np.array([x for x in last_set - strong_set])
remaining_strong = np.array([x for x in strong_set - last_set])
return remaining_last, remaining_strong
def transform_coordinates(xyz):
"""
Takes as input a Pointcloud with xyz coordinates and appends spherical coordinates as columns
:param xyz:
:return: Pointcloud with following columns, r, phi, theta, ring, intensity, x, y, z, intensity, ring
"""
ptsnew = np.hstack((np.zeros_like(xyz), xyz))
r_phi = xyz[:, 0] ** 2 + xyz[:, 1] ** 2
ptsnew[:, 0] = np.sqrt(r_phi + xyz[:, 2] ** 2)
ptsnew[:, 2] = np.pi / 2 - np.arctan2(np.sqrt(r_phi), xyz[:, 2]) # for elevation angle defined from Z-axis down
ptsnew[:, 1] = np.arctan2(xyz[:, 1], xyz[:, 0])
ptsnew[:, 3] = xyz[:, 4]
ptsnew[:, 4] = xyz[:, 3]
return ptsnew
def find_closest_neighbors(x, reference):
"""
This function allows you to match strongest and last echos and reason about scattering distributions.
:param x: Pointcloud which should be matched
:param reference: Reference Pointcloud
:return: returns valid matching indexes
"""
tree = scipy.spatial.KDTree(reference[:, 1:4])
distances, indexes = tree.query(x[:, 1:4], p=2)
print('indexes', indexes)
print('found matches', len(indexes), len(set(indexes)))
# return 0
valid = []
# not matching contains all not explainable scattered mismatching particles
not_matching = []
for idx, i in enumerate(indexes):
delta = reference[i, :] - x[idx, :]
# Laser Ring has to match
if delta[-1] == 0:
# Follows assumption that strongest echo has higher intensity than last and that the range is more distant
# for the last return. The sensor can report 2 strongest echo if strongest and last echo are matching.
# Here those points are not being matched.
if delta[-2] < 0 and delta[0] > 0:
valid.append((i, idx))
else:
not_matching.append((i, idx))
else:
not_matching.append((i, idx))
return valid
def filter(lidar_data, distance):
"""
Takes lidar Pointcloud as ibnput and filters point below distance threshold
:param lidar_data: Input Pointcloud
:param distance: Minimum distance for filtering
:return: Filtered Pointcloud
"""
r = np.sqrt(lidar_data[:, 0] ** 2 + lidar_data[:, 1] ** 2 + lidar_data[:, 2] ** 2)
true_idx = np.where(r > distance)
lidar_data = lidar_data[true_idx, :]
return lidar_data[0]
def read_split(split):
with open(split, 'r') as f:
entry_ids = f.readlines()
entry_ids = [i.replace('\n', '') for i in entry_ids]
return entry_ids
def filter_below_groundplane(pointcloud, tolerance=1):
valid_loc = (pointcloud[:, 2] < -1.4) & \
(pointcloud[:, 2] > -1.86) & \
(pointcloud[:, 0] > 0) & \
(pointcloud[:, 0] < 40) & \
(pointcloud[:, 1] > -15) & \
(pointcloud[:, 1] < 15)
pc_rect = pointcloud[valid_loc]
print(pc_rect.shape)
if pc_rect.shape[0] <= pc_rect.shape[1]:
w = [0, 0, 1]
h = -1.55
else:
reg = RANSACRegressor().fit(pc_rect[:, [0, 1]], pc_rect[:, 2])
w = np.zeros(3)
w[0] = reg.estimator_.coef_[0]
w[1] = reg.estimator_.coef_[1]
w[2] = 1.0
h = reg.estimator_.intercept_
w = w / np.linalg.norm(w)
print(reg.estimator_.coef_)
print(reg.get_params())
print(w, h)
height_over_ground = np.matmul(pointcloud[:, :3], np.asarray(w))
height_over_ground = height_over_ground.reshape((len(height_over_ground), 1))
above_ground = np.matmul(pointcloud[:, :3], np.asarray(w)) - h > -tolerance
print(above_ground.shape)
return np.hstack((pointcloud[above_ground, :], height_over_ground[above_ground]))
| 10,862 | 4,240 |
from setup import *
import pygame, sys, os
from pygame.locals import *
class Menu( object ):
def __init__( self ):
self.init = pygame.init()
self.font = pygame.font.Font(None, 80)
self.screen = pygame.display.set_mode( [ LARGURA, ALTURA ] )
self.background = pygame.image.load( "image/menu1.png" ).convert()
self.fps = pygame.time.Clock().tick( 60 )
pygame.font.init()
self.collors = { "white": (255, 255, 255), "green": (0, 255, 0) }
self.options = []
self.menuOptionsFonts()
def renderFont( self, nameFont, collor ):
return self.font.render(nameFont, True, collor)
def menuOptionsFonts( self ):
fonts = ["Play", "Settings", "Credits", "Exit"]
posX = 320
posY = 150
selected = 0
collor = "green"
for nameFont in fonts:
self.options.append( { "font" : ( nameFont, self.collors[ collor ] ),
"pos" : ( posX, posY ), "selected": selected } )
posY += 100
# mudar a cor das opcoes
def changeOption( self, index, selected):
if index < len(self.options) and index >= 0:
print self.options[index]["font"], self.options[index]["selected"]
self.options[index]["selected"] = selected
print self.options[index]["font"], self.options[index]["selected"]
def updateScreenOptionsFonts( self ):
for font in self.options:
if font["selected"] == 1:
self.screen.blit( self.renderFont( font[ "font" ][ 0 ], self.collors["white"] ), font[ "pos" ] )
else:
self.screen.blit( self.renderFont( font[ "font" ][ 0 ], font[ "font" ] [ 1 ] ), font[ "pos" ] )
def executeOptions(self, index):
nameFont = self.options[index]["font"][0]
if nameFont == "Play":
return 1
if nameFont == "Settings":
pass
if nameFont == "Credits":
return 2
if nameFont == "Exit":
exit()
def drawAndUpdateMenu( self ):
self.screen.blit( self.background, ( 0, 0 ) )
self.updateScreenOptionsFonts()
pygame.display.update()
| 2,221 | 714 |
import json
from investing_algorithm_framework.app.stateless.action_handlers \
.action_handler_strategy import ActionHandlerStrategy
class CheckOnlineHandler(ActionHandlerStrategy):
MESSAGE = {"message": "online"}
def handle_event(self, payload, algorithm_context):
return {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps(CheckOnlineHandler.MESSAGE)
}
| 459 | 129 |
# ---------------------------------------------------------------ALL REQUIRD FILES-------------------------------------------------------------
from tkinter import *
import tkinter.ttk as ttk
import tkinter.messagebox as msg
import tkinter.filedialog as tf
from ttkthemes import ThemedStyle
from PIL import Image, ImageTk
import random,pickle,os,playsound,datetime
root = Tk()
style = ThemedStyle(root)
root.wm_iconbitmap("data/img/ico/icon.ico")
root.title('Hand Cricket')
if os.path.isfile('data/files/app_data.p'):
f1 = open('data/files/app_data.p','rb')
theme = pickle.load(f1)
else:
theme=2
if theme ==2:
bg_color='gray10'
fg_color='dodgerblue'
root.config(bg='gray10')
label_bg_color = 'gray20'
label_fg_color = 'dodgerblue'
elif theme ==1:
bg_color='white'
fg_color='dodgerblue'
root.config(bg='white')
label_bg_color = 'dodgerblue'
label_fg_color = 'white'
style.set_theme("vista")
root.geometry('300x520')
root.maxsize(300,518)
# --------------------------------------------------------------------VARIBILES-----------------------------------------------------------------
# n=0
player_run=0
comp_run=0
Total_runs=0
comp_Total_runs=0
player_wicket=0
comp_wicket=0
players_balls=0
comp_balls=0
target=0
Total_overs = 0
Total_wicket =0
who_win = ''
player_bat_choice={}
comp_bat_choice={}
# -------------------------------------------------------------------FUNCTIONS------------------------------------------------------------------
def raise_frame(frame):
frame.tkraise()
def effect(file):
playsound.playsound(file)
def comp_score_board():
comp_score['text']=f'{comp_Total_runs}/{comp_wicket}'
balls_remain['text']=f'Balls : {comp_balls}'
def player_score_board():
score['text']=f'{Total_runs}/{player_wicket}'
balls_remain['text']=f'Balls : {players_balls}'
def overs(o,w):
global players_balls, comp_balls
if int(w) == 0 or int(o) == 0:
pass
else:
global Total_overs, Total_wicket
Total_overs = int(o)
Total_wicket = int(w)
players_balls=Total_overs*6
comp_balls=Total_overs*6
balls_remain['text']=f'Balls : {players_balls}'
over_count['text']=f'Total Overs : {Total_overs}'
def comp_bat():
comp_bat_choice[f"{comp_balls}"] = [player_run,comp_run]
def player_bat():
player_bat_choice[f"{players_balls}"] = [player_run,comp_run]
def player_bat_match_result():
global who_win
if players_balls==0 and comp_balls==0 or comp_wicket==Total_wicket:
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='disabled')
if Total_runs > comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='green')
concustion_label['text']= f'YOU WIN'
who_win = 'p'
effect('data\sound\win.mp3')
elif Total_runs==comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'TIE'
who_win = 't'
effect("data\sound\loss.mp3")
else:
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'YOU LOSS'
who_win= 'c'
effect("data\sound\loss.mp3")
elif players_balls == 0 and Total_runs < comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'YOU LOSS'
who_win = 'c'
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='disabled')
effect("data\sound\loss.mp3")
def comp_bat_match_result():
global who_win
if players_balls==0 and comp_balls==0 or player_wicket==Total_wicket:
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='disabled')
if Total_runs > comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='green')
concustion_label['text']= f'YOU WIN'
effect('data\sound\win.mp3')
who_win = 'p'
elif Total_runs==comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'TIE'
who_win='t'
effect("data\sound\loss.mp3")
else:
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'YOU LOSS'
who_win='c'
effect("data\sound\loss.mp3")
elif comp_balls == 0 and Total_runs > comp_Total_runs:
conc_style.configure('conc.TLabel',background=bg_color,foreground='green')
concustion_label['text']= f'YOU WIN'
who_win='p'
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='disabled')
effect('data\sound\win.mp3')
def player_bat_match():
global Total_runs,target,player_wicket,players_balls,comp_balls,comp_wicket,comp_Total_runs
player_select_no['text']=f"{player_run}"
comp_select_no['text']=f"{comp_run}"
if players_balls==0 or player_wicket==Total_wicket:
who_ball['text']='Bowlling : You'
who_bat['text']='Batting : Comp'
players_balls=0
target = Total_runs+1
target_label['text']=f'Target : {target}'
if comp_run == player_run:
comp_wicket +=1
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'Out'
comp_balls -= 1
comp_score_board()
else:
comp_Total_runs+=comp_run
comp_balls -= 1
comp_score_board()
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'Continue'
comp_bat()
else:
who_ball['text']='Bowlling : Comp'
who_bat['text']='Batting : You'
if comp_run == player_run:
player_wicket +=1
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'Out'
players_balls -= 1
player_score_board()
else:
Total_runs+=player_run
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'Continue'
players_balls -= 1
player_score_board()
player_bat()
player_bat_match_result()
def comp_bat_match():
global Total_runs,target,player_wicket,players_balls,comp_balls,comp_wicket,comp_Total_runs
player_select_no['text']=f"{player_run}"
comp_select_no['text']=f"{comp_run}"
if comp_balls==0 or comp_wicket==Total_wicket:
who_ball['text']='Bowlling : Comp'
who_bat['text']='Batting : You'
comp_balls=0
target = comp_Total_runs+1
target_label['text']=f'Target : {target}'
if comp_run == player_run:
player_wicket +=1
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'Out'
players_balls -= 1
player_score_board()
else:
Total_runs+=player_run
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'Continue'
players_balls -= 1
player_score_board()
player_bat()
else:
who_bat['text']='Batting : Comp'
who_ball['text']='Bowlling : You'
if comp_run == player_run:
comp_wicket +=1
conc_style.configure('conc.TLabel',background=bg_color,foreground='red')
concustion_label['text']= f'Out'
comp_balls -= 1
comp_score_board()
# effect(out)
else:
comp_Total_runs+=comp_run
conc_style.configure('conc.TLabel',background=bg_color,foreground='gray40')
concustion_label['text']= f'Continue'
comp_balls -= 1
comp_score_board()
comp_bat()
comp_bat_match_result()
def comp_select():
global comp_run
comp_run = random.choice((0,1,2,3,4,4,6,3,6))
def add_runs(run):
global player_run
global player_wicket,match_is_of
player_run = run
comp_select()
if First_to.get()=='ba':
comp_bat_match()
match_is_of=1
elif First_to.get()=='b':
player_bat_match()
match_is_of=2
def coin_toss(select):
effect('data\sound\coinflip.mp3')
overs(over.get(),wicket.get())
coin_face = random.choice(('h','t'))
if select== coin_face:
raise_frame(root_frame3)
else:
raise_frame(root_frame2)
First_to.set('ba')
def quitapp():
root.destroy()
def newgame():
global Total_runs,target,player_wicket,players_balls,comp_balls,comp_wicket,comp_Total_runs,Total_overs,Total_wicket
global player_run
global player_wicket
player_run=0
comp_run=0
player_wicket=0
comp_wicket=0
Total_runs=0
comp_Total_runs=0
players_balls=0
comp_balls=0
target=0
Total_overs = 0
Total_wicket =0
who_win=''
for i in range(0,7):
if i==5:
continue
else:
globals()['but%s'%i].config(state='normal')
raise_frame(root_frame1)
balls_remain['text']=f'Balls : {comp_balls}'
comp_score['text']=f'{comp_Total_runs}/{comp_wicket}'
conc_style.configure('conc.TLabel',background=bg_color,foreground='white')
concustion_label['text']= f'-'
player_select_no['text']=f"{player_run}"
comp_select_no['text']=f"{comp_run}"
score['text']=f'{Total_runs}/{player_wicket}'
target_label['text']=f'Target : {target}'
comp_bat_choice.clear()
player_bat_choice.clear()
def save_game():
if who_win == '':
msg.showwarning("Warning", 'You have not Played a Game or\nYou have not Completed Your Game\nPlease do it First then only\nYou can save a Game File ')
else:
name = tf.asksaveasfilename(defaultextension=".txt",
filetypes=[("Text files",".txt"),
("Word files",".doc")],
initialdir="dir",
initialfile='game.txt',
title="Save as")
if name != '':
with open(name,'w') as file:
file.write(f'{datetime.datetime.now().strftime("%B %d, %Y")}\n')
file.write(f'{datetime.datetime.now().strftime("%H:%M:%S")}\n\n')
if who_win == 'p':
file.write("PLAYER WINS")
elif who_win == 'c':
file.write("COMP WINS")
elif who_win == 't':
file.write("MATCH TIE")
file.write(f'\nTotal Over : {Total_overs}\t\tTotal Balls : {Total_overs*6}\nTotal Wicket : {Total_wicket}\tTarget : {target}\n\n\n')
if match_is_of == 1:
file.write('First Inning\nBAT : comp , BALL : player\n')
file.write(f'Score : {comp_Total_runs}/{comp_wicket}\n\n')
for k,v in comp_bat_choice.items():
file.write(f'player choics : {v[0]} , comp choics : {v[1]} balls remain : {k}\n')
file.write('\n\nSecond Inning\nBAT : player , BALL : comp\n')
file.write(f'Score : {Total_runs}/{player_wicket}\n\n')
for k,v in player_bat_choice.items():
file.write(f'player choics : {v[0]} , comp choics : {v[1]} balls remain : {k}\n')
elif match_is_of == 2:
file.write('First Inning\nBAT : player , BALL : comp\n')
file.write(f'Score : {Total_runs}/{player_wicket}\n\n')
for k,v in player_bat_choice.items():
file.write(f'player choics : {v[0]} , comp choics : {v[1]} balls remain : {k}\n')
file.write('\n\nSecond Inning\nBAT : comp , BALL : player\n')
file.write(f'Score : {comp_Total_runs}/{comp_wicket}\n\n')
for k,v in comp_bat_choice.items():
file.write(f'player choics : {v[0]} , comp choics : {v[1]} balls remain : {k}\n')
else:
msg.showwarning("Warn",'You have Not Select or Set the Game File\nSo Game file is Not Save')
# ------------------------------------------------------------FRAMES AND MAIN PROGRAM-----------------------------------------------------------
# ----------------------------------------------------------------------FRAME1------------------------------------------------------------------
root_frame1=Frame(root,bg=bg_color)
root_frame2=Frame(root,bg=bg_color)
root_frame3=Frame(root,bg=bg_color)
for frame in (root_frame1,root_frame2,root_frame3):
frame.grid(row=0,column=0,sticky='news')
raise_frame(root_frame1)
root_frame1_label_style=ttk.Style()
root_frame1_label_style.configure('TLabel',background=bg_color,foreground=fg_color)
over_select_label=ttk.Label(root_frame1,text='Select No. of Overs',font="Helvetica 15 bold",style='TLabel')
over_select_label.config(anchor=CENTER)
over_select_label.pack(padx=(23,0),pady=(20,0))
over=StringVar()
over.set('0')
over_select= ttk.Spinbox(root_frame1,from_=1,to=50,font='Helvetica 15 bold',textvariable=over)
over_select.pack(pady=8,padx=(23,0))
wicket=StringVar()
wicket.set('0')
player_select_label=ttk.Label(root_frame1,text='Select No. of Players',font="Helvetica 15 bold",style='TLabel')
player_select_label.config(anchor=CENTER)
player_select_label.pack(padx=(23,0))
no_of_players=ttk.Spinbox(root_frame1,from_=1,to=50,font='Helvetica 15 bold',textvariable=wicket)
no_of_players.pack(pady=8,padx=(23,0))
style_checkbutton=ttk.Style()
style_checkbutton.configure('TCheckbutton',width=10,hight=100,background=bg_color,foreground=fg_color,font='Helvetica 15 bold')
toss_label=ttk.Label(root_frame1,text='Select the Face',font='Helvetica 15 bold',style='TLabel')
toss_label.pack(pady=(10,5))
toss=StringVar()
head=ttk.Checkbutton(root_frame1,text='HEADS',variable=toss,onvalue='h',style='TCheckbutton')
tails=ttk.Checkbutton(root_frame1,text='TAILS',variable=toss,onvalue='t',style='TCheckbutton')
head.pack()
tails.pack()
over_selected=ttk.Button(root_frame1,text='Toss',command=lambda : coin_toss(toss.get()))
over_selected.pack(pady=15,padx=(23,0))
# ----------------------------------------------------------------------FRAME3------------------------------------------------------------------
First_to=StringVar()
label1=Label(root_frame3,text='YOU WIN THE TOSS',background=bg_color,foreground=fg_color,font='Helvetica 15 bold')
label1.pack(padx=(20,0))
bat=ttk.Checkbutton(root_frame3,text='BAT',variable=First_to,onvalue='b',style='TCheckbutton')
ball=ttk.Checkbutton(root_frame3,text='BALL',variable=First_to,onvalue='ba',style='TCheckbutton')
bat.pack(pady=5,padx=(52,0))
ball.pack(pady=5,padx=(52,0))
buttton_of_match=ttk.Button(root_frame3,text="Start",command=lambda : raise_frame(root_frame2))
buttton_of_match.pack(pady=10)
# ----------------------------------------------------------------------FRAME2------------------------------------------------------------------
selected_no_frame=Frame(root_frame2,bg=bg_color)
selected_no_frame.pack()
player_select_no_label=ttk.Label(selected_no_frame,text=' You Select ',font="none 10 bold",style='TLabel')
player_select_no_label.grid(row=0,column=0,padx=(15,5),pady=5)
comp_select_no_label=ttk.Label(selected_no_frame,text='Comp Select',font="none 10 bold",style='TLabel')
comp_select_no_label.grid(row=0,column=1,padx=(40,0),pady=5)
player_select_no=ttk.Label(selected_no_frame,text='-',font='Helvetica 30 bold',style='TLabel')
comp_select_no=ttk.Label(selected_no_frame,text='-',font='Helvetica 30 bold',style='TLabel')
player_select_no.grid(row=1,column=0,padx=(15,5),pady=(5,2))
comp_select_no.grid(row=1,column=1,padx=(40,0),pady=(5,2))
conc_frame=Frame(root_frame2,bg=bg_color, relief=SUNKEN)
conc_frame.pack()
conc_style=ttk.Style()
conc_style.configure('conc.TLabel',background=bg_color,foreground='white')
concustion_label=ttk.Label(conc_frame,text='-',font='Helvetica 15 bold',style='conc.TLabel')
concustion_label.pack(padx=(31,10),pady=(0,15))
button_frame=Frame(root_frame2,bg=bg_color)
button_frame.pack(pady=20)
for i in range(0,7):
if i==5:
continue
else:
globals()['img%s'%i]= ImageTk.PhotoImage(Image.open(f"data/img/hand_numbers/img{i}.png"))
but0=Button(button_frame,text=i,image=img0,borderwidth=2,command= lambda : add_runs(0) )
but1=Button(button_frame,text=i,image=img1,borderwidth=2,command= lambda : add_runs(1) )
but2=Button(button_frame,text=i,image=img2,borderwidth=2,command= lambda : add_runs(2) )
but3=Button(button_frame,text=i,image=img3,borderwidth=2,command= lambda : add_runs(3) )
but4=Button(button_frame,text=i,image=img4,borderwidth=2,command= lambda : add_runs(4) )
but6=Button(button_frame,text=i,image=img6,borderwidth=2,command= lambda : add_runs(6) )
but0.grid(row=0,column=0,padx=(25,6),pady=5)
but1.grid(row=0,column=1,padx=(4,0),pady=5)
but2.grid(row=0,column=2,padx=(10,0),pady=5)
but3.grid(row=1,column=0,padx=(25,6),pady=5)
but4.grid(row=1,column=1,padx=(4,0),pady=5)
but6.grid(row=1,column=2,padx=(10,0),pady=5)
scrore_frame=Frame(root_frame2,bg=bg_color)
scrore_frame.pack(pady=10)
score_name_label=ttk.Label(scrore_frame,text='Your Score : ',font='Helvetica 20 bold')
score_name_label.grid(row=2,column=0,sticky=W,pady=(3,0),padx=(8,0))
score=ttk.Label(scrore_frame,text=f'{Total_runs}/{player_wicket}',font='Helvetica 20 bold')
score.grid(row=2,column=1,sticky=W,pady=(3,0))
comp_score_name_label=ttk.Label(scrore_frame,text='Comp Score : ',font='Helvetica 20 bold')
comp_score_name_label.grid(row=3,column=0,sticky=W,pady=(3,0),padx=(8,0))
comp_score=ttk.Label(scrore_frame,text=f'{comp_Total_runs}/{comp_wicket}',font='Helvetica 20 bold')
comp_score.grid(row=3,column=1,sticky=W,pady=(3,0))
over_count=ttk.Label(scrore_frame,text='Over : 3',font='Helvetica 13 bold')
over_count.grid(row=4,column=0,sticky=W,padx=9)
balls_remain=ttk.Label(scrore_frame,text='Balls : 0',font='Helvetica 13 bold')
balls_remain.grid(row=4,column=1,sticky=W,padx=0)
target_label=ttk.Label(scrore_frame,text=f'Target : {target}',font='Helvetica 13 bold')
target_label.grid(row=5,column=0,sticky=W,padx=8)
who_bat=ttk.Label(scrore_frame,text='Batting : -',font='Helvetica 10 ')
who_ball=ttk.Label(scrore_frame,text='Bowling : -' ,font='Helvetica 10 ')
who_bat.grid(row=6,column=0,sticky=W,padx=(10,0))
who_ball.grid(row=7,column=0,sticky=W,padx=(10,0))
# --------------------------------------------------------------------MENU----------------------------------------------------------------------
mainmenu = Menu(root, activebackground=label_bg_color)
root.config(menu=mainmenu)
m1 = Menu(mainmenu, tearoff=0, bg=bg_color, fg=fg_color,activebackground=label_bg_color, activeforeground=label_fg_color)
m1.add_command(label='New Game',command=newgame)
m1.add_command(label='Save Game',command=save_game)
m1.add_separator()
m1.add_command(label='Exit',command=quitapp)
mainmenu.add_cascade(label='Menu', menu=m1)
def temp_light():
global theme
theme=1
msg.showinfo("RESTART", 'Please Restart the application for apply the Theme')
def temp_dark():
global theme
theme=2
msg.showinfo("RESTART", 'Please Restart the application for apply the Theme')
m2 = Menu(mainmenu, tearoff=0, bg=bg_color, fg=fg_color,activebackground=label_bg_color, activeforeground=label_fg_color)
m2_sub = Menu(m2,tearoff=0, bg=bg_color, fg=fg_color,activebackground=label_bg_color, activeforeground=label_fg_color)
m2_sub.add_command(label='Dark', command=temp_dark)
m2_sub.add_command(label='Light', command=temp_light)
m2.add_cascade(label='Theme',menu=m2_sub)
m2.add_command(label='Help', command=lambda: msg.showinfo('Help', 'We will help you soon'))
m2.add_command(label='More About', command=lambda: msg.showinfo('About', 'This GUI is created by AKG007\n Made in India'))
mainmenu.add_cascade(label='Settings', menu=m2)
root.mainloop()
f1= open('data/files/app_data.p','wb')
pickle.dump(theme,f1)
f1.close() | 20,757 | 7,451 |
#!/usr/local/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
@author: MarkLiu
@time : 17-6-19 下午8:44
""" | 108 | 63 |
import sys #Fornece funções e variáveis para manipular partes do ambiente de tempo de execução do Python
from time import sleep
import pygame
from settings import Settings
from game_stats import GameStats
from bullet import Bullet
from alien import Alien
def check_keydown_events(event, ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde a pressionamentos de tecla"""
if event.key == pygame.K_RIGHT or event.key == pygame.K_d:
# Move a nave para a direita
ship.moving_right = True
elif event.key == pygame.K_LEFT or event.key == pygame.K_a:
# Move a nave para a esquerda
ship.moving_left = True
elif event.key == pygame.K_UP or event.key == pygame.K_w:
# Move a nave para cima
ship.moving_top = True
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
# Move a nave para baixo
ship.moving_bottom = True
elif event.key == pygame.K_SPACE:
# Cria um novo projetil e adiciona ao grupo de projeteis
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_ESCAPE:
# O jogo finaliza quando o jogador tecla esc
sys.exit()
elif event.key == pygame.K_p and (stats.game_start or stats.game_over):
start_game(ai_settings, screen, stats, sb, ship, aliens, bullets)
ai_settings.initialize_dynamic_settings()
def check_keyup_events(event, ship):
"""Respostas a solturas de tecla"""
if event.key == pygame.K_RIGHT or event.key == pygame.K_d:
ship.moving_right = False
elif event.key == pygame.K_LEFT or event.key == pygame.K_a:
ship.moving_left = False
elif event.key == pygame.K_UP or event.key == pygame.K_w:
ship.moving_top = False
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
ship.moving_bottom = False
def fire_bullet(ai_settings, screen, ship, bullets):
"""Dispara um projetil se o limite ainda não for alcançado"""
# Cria um novo projetil e o adiciona no grupo dos projeteis
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
# Tocando o som de tiro laser
shoot_sound = pygame.mixer.Sound('Sounds/shoot.wav')
pygame.mixer.Sound.set_volume(shoot_sound, 0.1)
shoot_sound.play()
def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets):
"""Responde eventos de teclado e de mouse"""
for event in pygame.event.get():
# Jogador apertar o x de sair
if event.type == pygame.QUIT:
sys.exit()
# Jogador apertar a seta direita
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, stats, sb, ship, aliens, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
# elif event.type == pygame.MOUSEBUTTONDOWN:
# mouse_x, mouse_y = pygame.mouse.get_pos()
# check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y)
def start_game(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Inicia um novo jogo quando o jogador clicar em play ou quando o jogador teclar p"""
# Oculta o cursor do mouse
# pygame.mouse.set_visible(False)
# Reinicia os dados estatisticcos e apresenta a tela do jogo
stats.reset_stats()
stats.game_active = True
stats.game_start = False
# Reinicia as imagens do painel de pontuação
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
# Esvazia a lista de alienigenas e de projéteis
aliens.empty()
bullets.empty()
# Cria uma nova frota e centraliza a espaçonave
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# def check_play_button(ai_settings, screen, stats, play_button, ship, aliens, bullets, mouse_x, mouse_y):
# """Inicia um novo jogo quando o jogador clicar em play"""
# button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
# if button_clicked and not stats.game_active:
# start_game(ai_settings, screen, stats, ship, aliens, bullets)
def update_start(screen, background_start):
"""Inicia o jogo com uma tela amigavél"""
screen.blit(background_start, (0, 0))
# Atualiza a tela
pygame.display.flip()
def update_menu(screen, background_menu, play_button):
"""Apresenta o menu na tela com o botão de play"""
# Tempo de transição de tela
sleep(3)
screen.blit(background_menu, (0, 0))
play_button.draw_button()
# Atualiza a tela
pygame.display.flip()
def update_game_over(screen, background_game_over, play_button):
"""Apresenta a tela de game-over com o botão de play"""
# Tempo de transição de tela
sleep(3)
screen.blit(background_game_over, (0, 0))
play_button.draw_button()
# Atualiza a tela
pygame.display.flip()
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button, background):
"""Atualiza as imagens na tela e alterna para a nova tela"""
screen.fill(ai_settings.bg_color)
screen.blit(background, (0, 0))
# Desenha a informação sobre pontuação
sb.show_score()
# Redesenha a tela a cada passagem pelo laço
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
# Atualiza a tela
pygame.display.flip()
def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Atualiza a posição dos projéteis e se livra dos projéteis antigos"""
# Atualiza as posições dos projéteis
bullets.update()
# Livra dos projeteis que ultrapassam a tela
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde a colisões entre projéteis e alienigenas."""
# Remove qualquer projétil e alienigena que tenham colidido
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
# Destroi os projéteis existentes, aumenta a velocidade do jogo, cria uma nova frota e aumenta o nivel.
bullets.empty()
ai_settings.increase_speed()
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Toca o som de troca de fase
# Toca o som de explosão da nave
next_level = pygame.mixer.Sound('Sounds/ufo_lowpitch.wav')
pygame.mixer.Sound.set_volume(next_level, 0.3)
next_level.play()
# Aumenta o nivel
stats.level += 1
sb.prep_level()
def get_number_aliens_x(ai_settings, alien_width):
"""Determina o número de aliens que cabem em uma linha"""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(ai_settings, ship_height, alien_height):
"""Determina o número de linhas com aliens que cabem na tela"""
available_space_y = (ai_settings.screen_height - (3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_aliens(ai_settings, screen, aliens, alien_number, row_number):
"""Cria um alien e o posiciona na tela"""
alien = Alien(ai_settings, screen)
position_aliens(alien, aliens, alien_number, row_number)
def position_aliens(alien, aliens, alien_number, row_number):
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings, screen, ship, aliens):
"""Cria uma frota completa de alienigenas"""
# Cria um alien e calcula o número de aliens em uma linha
# O espaçamento entre os aliens é igual à largura de um alienigena
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)
#Cria a frota de alienigenas
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
create_aliens(ai_settings, screen, aliens, alien_number, row_number)
def check_fleet_edges(ai_settings, aliens):
"""Responde apropriadamente se algum alienígena alcançou uma borda"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
"""Faz toda a frota descer e muda a sua direção"""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Verifica se a frota está em uma das bordas
e então atualiza as posições de todos os alienigenas da frota"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
# Verifica se houve colisões entre alienigenas e a espaçonave
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
# Verifica se há algum alien que atingiu a parte inferior da tela
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde ao fato de a espaçonave ter sido atingida por um alienigena"""
if stats.ships_left > 1:
# Decrementa ships_left
stats.ships_left -= 1
# Atualiza o painel de pontuações
sb.prep_ships()
stats.score -= ai_settings.alien_points * (36 - len(aliens))
sb.prep_score()
# Esvazia a lista de aliens e de projéteis
aliens.empty()
bullets.empty()
# Cria uma nova frota e centraliza a espaçonave
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Toca o som de explosão da nave
explotion_sound = pygame.mixer.Sound('Sounds/explosion.wav')
pygame.mixer.Sound.set_volume(explotion_sound, 0.1)
explotion_sound.play()
# Faz uma pausa
sleep(0.5)
else:
stats.game_active = False
stats.game_over = True
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Verifica se algum alienígena alcançou a parte inferior da tela"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
# Trata esse caso do mesmo modo que é feito quando a espaçonave é atingida
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
break
def check_high_score(stats, sb):
"""Verifica se há uma nova pontuação máxima"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
| 11,451 | 3,905 |
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
from sonarsensor_service import *
| 98 | 42 |
import numpy as np
from CelestialMechanics.kepler.constants import K
def mu_sun(m2_over_m1: float) -> float:
"""
mu = k * sqrt(1 + m2/m1)
:param m2_over_m1:
:type m2_over_m1:
:return: mu
:rtype: float
"""
mu = K * np.sqrt(1. + m2_over_m1)
return mu * mu
def mu_na(n: float, a: float) -> float:
"""
mu = n^2 / a^3
:param n: mean motion in degrees
:type n: float
:param a: semi-major axis
:type a: float
:return: mu
:rtype: float
"""
return n * n * a * a * a
def mu_gm1m2(m1: float, m2: float) -> float:
"""
mu = G (m1 + m2)
:param m1: mass 1
:type m1: float
:param m2: mass 2
:type m2: float
:return: mu
:rtype: float
"""
from astropy.constants import G
return G * (m1 + m2)
| 806 | 345 |
#!/usr/bin/python3
import hl_utils
from hl_constants import *
import string
import re
from datetime import datetime
def guthaben():
guthaben = ''
if hl_utils.is_cip():
raw = ""
with open(hl_utils.hlpath(PRINT_LOG)) as f:
raw = f.read();
guthaben = "Druckerguthaben: " + raw + " Euro"
col = hl_utils.get_color(float(raw),0,COLOR_BORDER)
guthaben = hl_utils.color_panel(guthaben,col)
return guthaben;
def quota():
q = ''
if not hl_utils.is_cip():
return ''
else:
with open(hl_utils.hlpath("quota.cip")) as f:
return f.read()
def vpn():
vpn = ''
if hl_utils.is_cip():
return ''
else:
try:
with open(hl_utils.hlpath(VPN_LOG)) as f:
tmp = f.read()
tmp = ' '+tmp
return tmp;
except FileNotFoundError:
return hl_utils.color_panel("NO VPN INFORMATION",YELLOW)
def ip():
try:
with open(hl_utils.hlpath(IP_LOG)) as f:
tmp = f.read()
if "[" in tmp:
tmp = hl_utils.color_panel("Public IP: IP6 ",GREEN)
tmp = ' '+tmp
return tmp;
except Exception:
return hl_utils.color_panel("Public IP: No Data",YELLOW)
def battery():
if hl_utils.is_laptop():
try:
with open(hl_utils.hlpath(BATTERY_LOG)) as f:
tmp = f.read()
tmp = ' '+tmp
return tmp;
except FileNotFoundError as e:
return hl_utils.color_panel(str(e),RED)
else:
return ""
def date():
return hl_utils.shexec("date +' ^fg(#efefef)%H:%M^fg(#909090), %Y-%m-^fg(#efefef)%d'")
def logins():
try:
with open(hl_utils.hlpath(LOGINS_LOG),'r') as f:
return f.read()
except:
return ""
def bcw():
try:
with open(hl_utils.hlpath(BC_WORD_LOG),'r') as f:
tmp = int(f.read())
string = "{} words".format(tmp)
return hl_utils.color_panel(string,hl_utils.get_color(tmp,0,7000,reverse=False))
except:
return ""
def bwp():
tmp = ""
cur = 29
try:
with open(hl_utils.hlpath(BC_PAGE_LOG),'r') as f:
tmp = "{} pages".format(f.read().strip())
except:
tmp = "{} pages".format(cur)
tmp = hl_utils.color_panel(tmp,hl_utils.get_color(cur,0,44,reverse=False))
return tmp
def countdown():
delta = datetime(year=2018,month=7,day=23,hour=12) - datetime.now()
if delta.total_seconds() < 0:
return hl_utils.color_panel('Have a nice life without me fuckers.',RED)
tmp = "{} days {} hours remaining".format(delta.days,int(delta.seconds/60/60))
tmp = hl_utils.color_panel(tmp,hl_utils.get_color(delta.days,0,180))
return tmp
if __name__ == "__main__":
print(logins(),ip(),vpn(),guthaben(),battery(),date(),sep='',end='')
| 3,276 | 1,066 |
import datetime
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from vigil_ctf_app.EmailBackEnd import EmailBackEnd
#Authentication views ONLY ONLY
def register(request):
return render(request,'authentication/signup.html')
def show_login(request):
return render(request,'authentication/signin.html')
def doLogin(request):
if request.method!="POST":
return HttpResponse("<h2>Method Not Allowed</h2>")
else:
user=EmailBackEnd.authenticate(request,username=request.POST.get("email"),password=request.POST.get("password"))
if user!=None:
login(request,user)
if user.user_type=="1":
return HttpResponseRedirect('admin')
else:
messages.error(request,"Invalid User Details")
return HttpResponseRedirect("/")
def GetUserDetails(request):
if request.user!=None:
return HttpResponse("User : "+request.user.email+" usertype : "+str(request.user.user_type))
else:
return HttpResponse("Please Login First")
def logout_user(request):
logout(request)
return HttpResponseRedirect("/")
def policy(request):
return render(request,'rules/policy.html')
| 1,368 | 368 |
import os
import pickle
from functools import partial
from itertools import permutations, combinations
import networkx as nx
import numpy as np
from bitstring import BitArray
from collections import Counter
try:
from graph_measures.features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
except ModuleNotFoundError as e:
from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
CUR_PATH = os.path.realpath(__file__)
BASE_PATH = os.path.dirname(os.path.dirname(CUR_PATH))
VERBOSE = False
DEBUG =False
SAVE_COUNTED_MOTIFS = False
interesting_groups = [
sorted([0, 1, 8, 27])
]
class MotifsNodeCalculator(NodeFeatureCalculator):
def __init__(self, *args, level=3, **kwargs):
super(MotifsNodeCalculator, self).__init__(*args, **kwargs)
assert level in [3, 4], "Unsupported motif level %d" % (level,)
self._level = level
self._node_variations = {}
self._all_motifs = None
self._print_name += "_%d" % (self._level,)
self._gnx = self._gnx.copy()
self._load_variations()
self._counted_motifs = set() # Only used if SAVE_COUNTED_MOTIFS is set
self._double_counter = Counter()
def is_relevant(self):
return True
@classmethod
def print_name(cls, level=None):
print_name = super(MotifsNodeCalculator, cls).print_name()
if level is None:
return print_name
return "%s_%d" % (print_name, level)
# name = super(MotifsNodeCalculator, cls).print_name()
# name.split("_")[0]
def _load_variations_file(self):
fname = "%d_%sdirected.pkl" % (self._level, "" if self._gnx.is_directed() else "un")
fpath = os.path.join(BASE_PATH, "motif_variations", fname)
return pickle.load(open(fpath, "rb"))
def _load_variations(self):
self._node_variations = self._load_variations_file()
self._all_motifs = set(self._node_variations.values())
# here we pass on the edges of the sub-graph containing only the bunch nodes
# and calculate the expected index of each edge (with respect to whether the graph is directed on not)
# the formulas were calculated by common reason
# combinations index: sum_0_to_n1-1((n - i) - 1) + n2 - n1 - 1
# permutations index: each set has (n - 1) items, so determining the set is by n1, and inside the set by n2
def _get_group_number_opt1(self, nbunch):
subgnx = self._gnx.subgraph(nbunch)
nodes = {node: i for i, node in enumerate(subgnx)}
n = len(nodes)
if subgnx.is_directed():
def edge_index(n1, n2):
return n1 * (n - 1) + n2 - (1 * (n2 > n1))
else:
def edge_index(n1, n2):
n1, n2 = min(n1, n2), max(n1, n2)
return (n1 / 2) * (2 * n - 3 - n1) + n2 - 1
return sum(2 ** edge_index(nodes[edge[0]], nodes[edge[1]]) for edge in subgnx.edges())
# passing on all:
# * undirected graph: combinations [(n*(n-1)/2) combs - handshake lemma]
# * directed graph: permutations [(n*(n-1) perms - handshake lemma with respect to order]
# checking whether the edge exist in the graph - and construct a bitmask of the existing edges
def _get_group_number(self, nbunch):
func = permutations if self._gnx.is_directed() else combinations
if DEBUG:
pass
return BitArray(self._gnx.has_edge(n1, n2) for n1, n2 in func(nbunch, 2)).uint
# def _get_motif_sub_tree(self, root, length):
# implementing the "Kavosh" algorithm for subgroups of length 3
def _get_motif3_sub_tree(self, root):
visited_vertices = {root: 0}
visited_index = 1
# variation == (1, 1)
first_neighbors = set(nx.all_neighbors(self._gnx, root))
# neighbors, visited_neighbors = tee(first_neighbors)
for n1 in first_neighbors:
visited_vertices[n1] = visited_index
visited_index += 1
for n1 in first_neighbors:
last_neighbors = set(nx.all_neighbors(self._gnx, n1))
for n2 in last_neighbors:
if n2 in visited_vertices:
if visited_vertices[n1] < visited_vertices[n2]:
yield [root, n1, n2]
else:
visited_vertices[n2] = visited_index
visited_index += 1
yield [root, n1, n2]
# variation == (2, 0)
for n1, n2 in combinations(first_neighbors, 2):
if (visited_vertices[n1] < visited_vertices[n2]) and \
not (self._gnx.has_edge(n1, n2) or self._gnx.has_edge(n2, n1)):
yield [root, n1, n2]
# implementing the "Kavosh" algorithm for subgroups of length 4
def _get_motif4_sub_tree(self, root):
visited_vertices = {root: 0}
# visited_index = 1
# variation == (1, 1, 1)
neighbors_first_deg = set(nx.all_neighbors(self._gnx, root))
# neighbors_first_deg, visited_neighbors, len_a = tee(neighbors_first_deg, 3)
neighbors_first_deg = visited_neighbors = list(neighbors_first_deg)
for n1 in visited_neighbors:
visited_vertices[n1] = 1
for n1, n2, n3 in combinations(neighbors_first_deg, 3):
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
for n1 in neighbors_first_deg:
if DEBUG:
pass
neighbors_sec_deg = set(nx.all_neighbors(self._gnx, n1))
# neighbors_sec_deg, visited_neighbors, len_b = tee(neighbors_sec_deg, 3)
neighbors_sec_deg = visited_neighbors = list(neighbors_sec_deg)
for n in visited_neighbors:
if n not in visited_vertices:
if DEBUG:
if n is 1:
hi = 0.5
visited_vertices[n] = 2
for n2 in neighbors_sec_deg:
for n11 in neighbors_first_deg:
if visited_vertices[n2] == 2 and n1 != n11:
edge_exists = (self._gnx.has_edge(n2, n11) or self._gnx.has_edge(n11, n2))
if (not edge_exists) or (edge_exists and n1 < n11):
group = [root, n1, n11, n2]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
# for n1 in neighbors_first_deg:
# if DEBUG:
# if root is 41:
# print('n1', n1)
# neighbors_sec_deg = set(nx.all_neighbors(self._gnx, n1))
# # neighbors_sec_deg, visited_neighbors, len_b = tee(neighbors_sec_deg, 3)
# neighbors_sec_deg = visited_neighbors = list(neighbors_sec_deg)
for comb in combinations(neighbors_sec_deg, 2):
if DEBUG:
if root is 41:
hi = 1
if 2 == visited_vertices[comb[0]] and visited_vertices[comb[1]] == 2:
group = [root, n1, comb[0], comb[1]]
if DEBUG:
if root is 41:
print('A 41 group:', group)
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
for n1 in neighbors_first_deg:
if DEBUG:
pass
neighbors_sec_deg = set(nx.all_neighbors(self._gnx, n1))
# neighbors_sec_deg, visited_neighbors, len_b = tee(neighbors_sec_deg, 3)
neighbors_sec_deg = visited_neighbors = list(neighbors_sec_deg)
for n2 in neighbors_sec_deg:
if visited_vertices[n2] == 1:
continue
for n3 in set(nx.all_neighbors(self._gnx, n2)):
if DEBUG:
if root is 0 and n1 is 27 and n2 is 8 and n3 is 1:
hi = 1.5
if n3 not in visited_vertices:
if DEBUG:
pass
visited_vertices[n3] = 3
if visited_vertices[n2] == 2:
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
else:
if visited_vertices[n3] == 1:
continue
if visited_vertices[n3] == 2 and not (self._gnx.has_edge(n1, n3) or self._gnx.has_edge(n3, n1)):
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
elif visited_vertices[n3] == 3 and visited_vertices[n2] == 2:
group = [root, n1, n2, n3]
if DEBUG:
if sorted(group) in interesting_groups:
print('An interesting group:', group)
yield group
def _order_by_degree(self, gnx=None):
if gnx is None:
gnx = self._gnx
return sorted(gnx, key=lambda n: len(list(nx.all_neighbors(gnx, n))), reverse=True)
def _calculate_motif(self):
# consider first calculating the nth neighborhood of a node
# and then iterate only over the corresponding graph
motif_func = self._get_motif3_sub_tree if self._level == 3 else self._get_motif4_sub_tree
sorted_nodes = self._order_by_degree()
for node in sorted_nodes:
for group in motif_func(node):
group_num = self._get_group_number(group)
motif_num = self._node_variations[group_num]
yield group, group_num, motif_num
if VERBOSE:
self._logger.debug("Finished node: %s" % node)
self._gnx.remove_node(node)
def _update_nodes_group(self, group, motif_num):
for node in group:
self._features[node][motif_num] += 1
def _calculate(self, include=None):
m_gnx = self._gnx.copy()
motif_counter = {motif_number: 0 for motif_number in self._all_motifs}
self._features = {node: motif_counter.copy() for node in self._gnx}
for i, (group, group_num, motif_num) in enumerate(self._calculate_motif()):
if DEBUG:
if 21 in group and motif_num is 47:
print('A 21/47 group:', group, motif_num)
pass
if sorted(group) in interesting_groups:
print('An interesting group:', group, motif_num)
if SAVE_COUNTED_MOTIFS:
h = hash(frozenset(group))
# h = frozenset(group)
if h in self._counted_motifs:
print("\033[91m Group {} already counted \033[00m".format(group))
self._double_counter[frozenset(group)] += 1
else:
self._counted_motifs.add(h)
self._update_nodes_group(group, motif_num)
if (i + 1) % 1000 == 0 and VERBOSE:
self._logger.debug("Groups: %d" % i)
# print('Max num of duplicates:', max(self._double_counter.values()))
# print('Number of motifs counted twice:', len(self._double_counter))
self._gnx = m_gnx
def _get_feature(self, element):
all_motifs = self._all_motifs.difference(set([None]))
cur_feature = self._features[element]
return np.array([cur_feature[motif_num] for motif_num in sorted(all_motifs)])
# consider ignoring node's data
class MotifsEdgeCalculator(MotifsNodeCalculator):
def __init__(self, *args, include_nodes=False, **kwargs):
self._edge_variations = {}
self._should_include_nodes = include_nodes
super(MotifsEdgeCalculator, self).__init__(*args, **kwargs)
def is_relevant(self):
# if graph is not directed, there is no use of edge variations
return self._gnx.is_directed()
def _calculate_motif_dictionaries(self):
# calculating the node variations
super(MotifsEdgeCalculator, self)._load_variations_file()
if not self._gnx.is_directed():
# if graph is not directed, there is no use of edge variations
return
motif_edges = list(permutations(range(self._level), 2))
# level * (level - 1) is number of permutations of size 2
num_edges = self._level * (self._level - 1)
for group_num, motif_num in self._node_variations.items():
bin_repr = BitArray(length=num_edges, int=group_num)
self._edge_variations[group_num] = set([edge_type for bit, edge_type in zip(bin_repr, motif_edges) if bit])
# noinspection PyMethodOverriding
def _calculate(self, include=None):
for group, group_num, motif_num in self._calculate_motif():
if self._should_include_nodes:
self._update_nodes_group(group, motif_num)
for edge_type in self._edge_variations[group_num]:
edge = tuple(map(lambda idx: group[idx], edge_type))
if edge not in self._features:
self._features[edge] = {motif_number: 0 for motif_number in self._all_motifs}
self._features[edge][motif_num] += 1
def nth_nodes_motif(motif_level):
return partial(MotifsNodeCalculator, level=motif_level)
def nth_edges_motif(motif_level):
return partial(MotifsNodeCalculator, level=motif_level)
feature_node_entry = {
"motif3": FeatureMeta(nth_nodes_motif(3), {"m3"}),
"motif4": FeatureMeta(nth_nodes_motif(4), {"m4"}),
}
feature_edge_entry = {
"motif3_edge": FeatureMeta(nth_edges_motif(3), {"me3"}),
"motif4_edge": FeatureMeta(nth_edges_motif(4), {"me4"}),
}
if __name__ == "__main__":
from measure_tests.specific_feature_test import test_specific_feature
# Previous version contained a bug while counting twice sub-groups with double edges
# test_specific_feature(nth_edges_motif(3), is_max_connected=True)
test_specific_feature(nth_edges_motif(4), is_max_connected=True)
# def _calculate_motif_dictionaries(self):
# motifs_edges_dict = {}
# motifs_vertices_dict = {}
# motif_edges = list(permutations(range(self._level), 2))
#
# motif_file = pandas.read_csv(self._motif_path(), delimiter="\t")
# if not self._gnx.is_directed():
# motifs_vertices_dict = {BitArray(length=3, int=int(y)).bin: int(x) for i, (x, y) in motif_file.iterrows()}
# else:
# num_edges = len(motif_edges)
# for _, (x, y) in motif_file.iterrows():
# bin_repr = BitArray(length=num_edges, int=int(y))
# motifs_vertices_dict[bin_repr.bin] = int(x)
# motifs_edges_dict[bin_repr.bin] = [edge_type for bit, edge_type in zip(bin_repr, motif_edges) if bit]
#
# return {'v': motifs_vertices_dict, 'e': motifs_edges_dict}
###########################################################################################
# def _calculate(self, include=None):
# all_motifs = set(self._node_variations.values())
# undirected_gnx = self._gnx.to_undirected()
# for node in self._order_by_degree():
# history = set()
# self._features[node] = {motif_number: 0 for motif_number in all_motifs}
# neighbors_gnx = self._gnx.subgraph(self._get_neighborhood(node, self._level, gnx=undirected_gnx))
# for group in self._get_subgroups(node, self._level, gnx=neighbors_gnx):
# group = sorted(group)
# if group in history:
# continue
# history.add(group)
# motif_number = self._get_motif_number(group)
# self._features[node][motif_number] += 1
# self._gnx.remove_node(node)
#
# def _subgroups(self, node, level, gnx=None):
# if gnx is None:
# gnx = self._gnx
# if level == 1:
# return node
#
# def _calculate1(self):
# for node in self._order_by_degree():
# history = {}
# for sub_group in self._subgroups(node, self._level):
# if sub_group in history:
# continue
#
# # this might be more efficient than dijkstra (with cutoff) - a simple BFS
# def _get_neighborhood(self, node, dist, gnx=None):
# dist -= 1
# if gnx is None:
# gnx = self._gnx
# neighborhood = set()
# queue = [(node, 0)]
# while queue:
# cur_node, node_dist = queue.pop(0)
# neighborhood.add(cur_node)
# neighbors = set(nx.all_neighbors(gnx, cur_node)).difference(neighborhood)
# if node_dist >= dist - 1:
# neighborhood.update(neighbors)
# else: # node_dist is lower than (dist - 1)
# queue.extend((n, node_dist + 1) for n in neighbors)
# return neighborhood
#
# # seems more simple - but it's more costly
# def _get_neighborhood_dijkstra(self, node, dist, gnx=None):
# if gnx is None:
# gnx = self._gnx
# return set(nx.single_source_dijkstra_path_length(gnx, node, cutoff=dist))
#
# def _calculate2(self):
# self._undirected_gnx = self._gnx.to_undirected()
# for node in self._order_by_degree(self._undirected_gnx):
# # calculating the nth neighborhood of the node - is working on the neighborhood graph more efficient?
# neighbors_gnx = self._gnx.subgraph(self._get_neighborhood(node, self._level))
# history = {}
# for sub_group in self._subgroups(node, self._level, gnx=neighbors_gnx):
# if sub_group in history:
# continue
# self._gnx.remove_node(node)
# TODO: consider removing
# def _initialize_motif_hist(self):
# length = max(self._node_variations.values()) + 1
# return {n: [0] * length for n in self._gnx}
#
# def _initialize_motif_hist(self):
# node_hist = super(MotifsEdgeCalculator, self)._initialize_motif_hist()
#
# length = max(self._edge_variations.values()) + 1
# edge_hist = {e: [0] * length for e in self._gnx.edges()}
# return {'v': node_hist, 'e': edge_hist}
| 19,223 | 6,424 |
from src import XMLAnalyzerException
import lxml.etree as ET
from src.Structures import *
from src import XMLFilter
from src import XMLUtil
import constants
import re
from src.xml_decoder import html_entitize
def apply_all(xml, post_processes):
for post_process in post_processes:
xml = apply(xml, post_process)
return xml
def apply(xml, post_process):
if post_process.tag == 'filter':
return apply_filter(xml, post_processing_string_splitter(post_process.text))
elif post_process.tag == 'text_formatting':
if post_process.text == 'compress':
return XMLUtil.compress_xml(xml)
elif post_process.text == 'indent':
return XMLUtil.indent_xml(xml)
elif post_process.tag == 'html_entitize':
tag_to_entitize = xml.find('.//{*}'+post_process.text)
if len(tag_to_entitize) > 1:
raise XMLAnalyzerException.TooManyChildrenException(tag_to_entitize.tag, [x.tag for x in tag_to_entitize], 1)
content=ET.tostring(tag_to_entitize[0]).decode()
tag_to_entitize.text = html_entitize(content)
tag_to_entitize.remove(tag_to_entitize[0]) # remove child
return xml
else:
raise XMLAnalyzerException.InvalidPostProcessTagException(post_process.tag)
def apply_filter(extracted_xml, args, args_stack=[]):
if len(args) == 4:
# readying up arguments
for i in range(len(args)):
if re.match('^\$param\(.*\)$', args[i]):
if args_stack == []:
args[i] = input(args[i][8:-2])
else:
args[i] = args_stack.pop(0)
ct = makeConditionalTuple(args[0], args[1], args[2], args[3])
else:
raise XMLAnalyzerException.IncorrectArgumentNumberException(4, args)
XMLFilter.filter_xml_tree(ct, extracted_xml)
return extracted_xml
# split at every space, except if it is inside "$param('<here>') statement
def post_processing_string_splitter(string):
in_param = False
markers = []
i = 0
while i < len(string):
if not in_param:
if string[i] == ' ':
markers.append(i)
if string[i:i+8] == "$param('":
i += 8
in_param = True
else:
i += 1
else:
if string[i] == "'" and string[i+1] == ')':
i += 2
markers.append(i)
in_param = False
else:
i += 1
out = []
base = 0
for mark in markers:
out.append(string[base:mark])
base = mark+1
return out
# todo: move to tests file
# string_splitter("Employee EmployeeIDExternal == $param('please provide the EmployeeIDExternal you want to keep: ')")
# string_splitter("Employee $param('test param with spaces') == $param('please want to keep: ')")
# both passed
| 2,885 | 900 |
"""RF Pulse Simulation Functions.
"""
from sigpy import backend
__all__ = ['abrm', 'abrm_nd', 'abrm_hp']
def abrm(rf, x, balanced=False):
r"""1D RF pulse simulation, with simultaneous RF + gradient rotations.
Args:
rf (array): rf waveform input.
x (array): spatial locations.
balanced (bool): toggles application of rewinder.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
Pauly, J., Le Roux, Patrick., Nishimura, D., and Macovski, A.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
eps = 1e-16
g = xp.ones(xp.size(rf)) * 2 * xp.pi / xp.size(rf)
a = xp.ones(xp.size(x), dtype=complex)
b = xp.zeros(xp.size(x), dtype=complex)
for mm in range(xp.size(rf)):
om = x * g[mm]
phi = xp.sqrt(xp.abs(rf[mm]) ** 2 + om ** 2) + eps
n = xp.column_stack((xp.real(rf[mm]) / phi,
xp.imag(rf[mm]) / phi,
om / phi))
av = xp.cos(phi / 2) - 1j * n[:, 2] * xp.sin(phi / 2)
bv = -1j * (n[:, 0] + 1j * n[:, 1]) * xp.sin(phi / 2)
at = av * a - xp.conj(bv) * b
bt = bv * a + xp.conj(av) * b
a = at
b = bt
if balanced: # apply a rewinder
g = -2 * xp.pi / 2
om = x * g
phi = xp.abs(om) + eps
nz = om / phi
av = xp.cos(phi / 2) - 1j * nz * xp.sin(phi / 2)
a = av * a
b = xp.conj(av) * b
return a, b
def abrm_nd(rf, x, g):
r"""N-dim RF pulse simulation
Assumes that x has inverse spatial units of g, and g has gamma*dt applied.
Assumes dimensions x = [...,Ndim], g = [Ndim,Nt].
Args:
rf (array): rf waveform input.
x (array): spatial locations.
g (array): gradient array.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
Pauly, J., Le Roux, Patrick., Nishimura, D., and Macovski, A.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
eps = 1e-16
a = xp.ones(xp.shape(x)[0], dtype=complex)
b = xp.zeros(xp.shape(x)[0], dtype=complex)
for mm in range(xp.size(rf)):
om = x @ g[mm, :]
phi = xp.sqrt(xp.abs(rf[mm]) ** 2 + om ** 2)
n = xp.column_stack((xp.real(rf[mm]) / (phi + eps),
xp.imag(rf[mm]) / (phi + eps),
om / (phi + eps)))
av = xp.cos(phi / 2) - 1j * n[:, 2] * xp.sin(phi / 2)
bv = -1j * (n[:, 0] + 1j * n[:, 1]) * xp.sin(phi / 2)
at = av * a - xp.conj(bv) * b
bt = bv * a + xp.conj(av) * b
a = at
b = bt
return a, b
def abrm_hp(rf, gamgdt, xx, dom0dt=0):
r"""1D RF pulse simulation, with non-simultaneous RF + gradient rotations.
Args:
rf (array): rf pulse samples in radians.
gamdt (array): gradient samples in radians/(units of xx).
xx (array): spatial locations.
dom0dt (float): off-resonance phase in radians.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
Pauly, J., Le Roux, Patrick., Nishimura, D., and Macovski, A.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
Ns = xp.shape(xx)
Ns = Ns[0] # Ns: # of spatial locs
Nt = xp.shape(gamgdt)
Nt = Nt[0] # Nt: # time points
a = xp.ones((Ns,))
b = xp.zeros((Ns,))
for ii in xp.arange(Nt):
# apply phase accural
z = xp.exp(-1j * (xx * gamgdt[ii, ] + dom0dt))
b = b * z
# apply rf
C = xp.cos(xp.abs(rf[ii]) / 2)
S = 1j * xp.exp(1j * xp.angle(rf[ii])) * xp.sin(xp.abs(rf[ii]) / 2)
at = a * C - b * xp.conj(S)
bt = a * S + b * C
a = at
b = bt
z = xp.exp(1j / 2 * (xx * xp.sum(gamgdt, axis=0) + Nt * dom0dt))
a = a * z
b = b * z
return a, b
| 4,954 | 1,887 |
#!/usr/bin/env python
# apachelint - simple tool to cleanup apache conf files
# USAGE: apachelint [conffile]
# -*-Python-*-
import sys
import re
filename = sys.argv[1]
indentlevel = 0
indentstep = 4
prevlineblank = False
with open(filename) as f:
for line in f:
# strip leading & trailing space / line ending
line = re.sub('\s+$', '', line)
line = re.sub('^\s+', '', line)
# compress blank lines
if line == "":
if prevlineblank:
next
else:
prevlineblank = True
else:
prevlineblank = False
if re.search('</', line):
indentlevel -= 1
indent = ' ' * indentlevel * indentstep
print indent + line
if re.search('<(?!/)', line):
indentlevel += 1 | 821 | 244 |
"""Vectorized math formulae"""
from numba import vectorize, int64, float64
from math import lgamma, exp, isnan, log
__all__ = ["binom", "xlogy"]
@vectorize([float64(int64, int64)], fastmath=True)
def binom(n, k):
"""Obtain the binomial coefficient, using a definition that is mathematically
equivalent but numerically stable to avoid arithmetic overflow.
The result of this method is "n choose k", the number of ways choose an
(unordered) subset of k elements from a fixed set of n elements.
Source: https://en.wikipedia.org/wiki/Binomial_coefficient
"""
return exp(lgamma(n + 1) - lgamma(k + 1) - lgamma(n - k + 1))
@vectorize([float64(float64, float64)])
def xlogy(x, y):
"""Compute ``x*log(y)`` so that the result is 0 if ``x = 0``,
even if y is negative
Source: ``scipy.special.xlogy``
"""
if x == 0 and not isnan(y):
return 0
else:
return x * log(y)
| 931 | 325 |
# @Copyright [2021] [Yash Bajaj]
import fileinput as fi
# This module replaces the word <|SPACE|> with a new line (code line 18)
def writer():
with open("c:/PycharmProjects/copy_data_from_1_file_to_another/input.txt", "w") as writer:
data = input("Whatever you will write will be present in input.txt - ")
writer.write(data)
# This is a input function whatever you will write that will come in input.txt
def copy():
with open("c:/PycharmProjects/copy_data_from_1_file_to_another/input.txt", "r") as f:
with open("c:/PycharmProjects/copy_data_from_1_file_to_another/copyied.txt", "w") as f1:
font = f.read()
f1.write(font)
# This is a function to copy data from input.txt and paste it in copyied.txt
def editer():
with fi.FileInput("c:/PycharmProjects/copy_data_from_1_file_to_another/copyied.txt", inplace=True, backup=".bak") as r:
for line in r:
print(line.replace(' ', '''
'''), end='')
# This function replaces <|SPACE|> with new line this will also create one backup file with extention .bak
if __name__ == '__main__':
writer()
copy()
editer()
# This will run the code
| 1,171 | 391 |