id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11467418
|
import sys
import re
import yaml
import random
from glob import glob
from collections import defaultdict
from fractions import Fraction
import argparse
# Bresenham's line algorithm from Rosetta Code
# https://rosettacode.org/wiki/Bitmap/Bresenham%27s_line_algorithm#Not_relying_on_floats
def line(xy0, xy1):
y0, x0 = xy0
y1, x1 = xy1
if x0==x1 and y0==y1:
return [] # [ (x1,y1),]
res = []
rev = reversed
if abs(y1 - y0) <= abs(x1 - x0):
x0, y0, x1, y1 = y0, x0, y1, x1
rev = lambda x: x
if x1 < x0:
x0, y0, x1, y1 = x1, y1, x0, y0
leny = abs(y1 - y0)
return [tuple(rev((round(Fraction(i, leny) * (x1 - x0)) + x0, (1 if y1 > y0 else -1) * i + y0))) for i in range(leny + 1)]
def flood_shape(cells, start):
seen = set()
active = [start,]
while active:
cur = active.pop()
seen.add(cur)
for dx,dy in ((-1,0),(1,0),(0,-1),(0,1)):
new_cell = (cur[0]+dx,cur[1]+dy)
if new_cell in cells and new_cell not in seen:
active.append( (cur[0]+dx,cur[1]+dy) )
return seen
def try_shift(left, right, shift=1):
shifted = set([(x+shift,y) for x,y in left])
return len(shifted&right)==0
def best_shift(left, right, padding, max_shift):
working_shifts = [0, ]
for p in range(1,max_shift+1):
if try_shift(left, right, shift=p):
working_shifts.append( p )
else:
break
if len(working_shifts)>=padding:
return working_shifts[-1-padding]
return 0
def squeeze_space(space, padding, max_shift):
collected_shifts = defaultdict(int)
xses = list(sorted(set(map(lambda x:x[0], space.keys()))))
ranges = []
cur_range = None
for x in xses:
if not cur_range or x>max(cur_range)+1:
if cur_range: ranges.append( cur_range )
cur_range = []
cur_range.append( x )
if cur_range: ranges.append( cur_range )
done = set()
for r in ranges:
cells_in_cur_range = set()
for x,y in space.keys():
if x in r:
cells_in_cur_range.add( (x,y) )
while cells_in_cur_range:
start = list(sorted(cells_in_cur_range))[0]
flooded = flood_shape(cells_in_cur_range, start)
cells_in_cur_range -= flooded
done |= flooded
if cells_in_cur_range - done:
shift = best_shift(done, cells_in_cur_range - done, padding, max_shift)
if shift>0:
new_space = defaultdict(str)
for pos, mark in space.items():
if pos in done:
new_pos = (pos[0]+shift, pos[1])
else:
new_pos = pos
new_space[new_pos] = mark
return new_space, True
return space, False
def draw_space(space, config=None):
if config and config.get('fsq',-1)>=0:
repeat = True
while repeat:
space, repeat = squeeze_space(space, config.get('fsq',-1), 2*config.get('w') )
if space:
for y in range(max(map(lambda x:x[1],space.keys()))+1,min(map(lambda x:x[1],space.keys()))-2,-1):
row = []
for x in range(min(map(lambda x:x[0],space.keys()))-1,max(map(lambda x:x[0],space.keys()))+2):
row.append( space.get((x,y)," ") )
print("".join(row))
def primitives2pixels(space, anchors2points, edges, config):
for edge in edges:
for dot in line(anchors2points[edge[0]],anchors2points[edge[1]]):
for dx in range(config.get('dx',1)):
for dy in range(config.get('dy',1)):
space[(dot[0]+dx,dot[1]+dy)] = config.get('e','X')
for anchor in anchors2points.values():
for dx in range(config.get('dx',1)):
for dy in range(config.get('dy',1)):
space[(anchor[0]+dx,anchor[1]+dy)] = config.get('a','#')
return space
def put_text_plain(space,text,config,geometry):
anchors2points = dict()
edges = []
shift = 0
for i,c in enumerate(text.upper()):
shape = geometry.get(c,geometry[' '])
if shape['anchors']:
for anchor, anchor_pos in shape['anchors'].items():
x = shift+anchor_pos[0]*config['w']
y = config[anchor_pos[1]] # config['h']-
anchors2points["%d_%s"%(i,anchor)] = (x,y)
if shape['edges']:
for edge in shape['edges']:
edges.append( ("%d_%s"%(i,edge[0]), "%d_%s"%(i,edge[1]), None, edge[2]) )
if shape['anchors']:
shift += max([x[0] for x in shape['anchors'].values()])*config['w']
shift += config.get('pad',0)
return primitives2pixels(space, anchors2points, edges, config)
def put_text_greedy(space,text, config, geometry):
anchors2points = dict()
edges = []
shift = 0
last_taken = [i for i in range(config['h']+1)]
for i,c in enumerate(text.upper()):
la2ga = dict()
if c == '~':
last_taken = [i for i in range(config['h']+1)]
continue
shape = geometry.get(c,geometry[' '])
if not shape['anchors']:
if c == ' ':
shift += config.get('pad',0)
continue
left_anchors = [ (anchor[0],anchor[1][0],anchor[1][1]) for anchor in shape['anchors'].items() if anchor[1][0] == 0]
left_anchors_pos = dict([(anchor[0],anchor[1][1]) for anchor in shape['anchors'].items() if anchor[1][0] == 0])
left_edges = [edge for edge in shape['edges'] if edge[0] in left_anchors_pos and edge[1] in left_anchors_pos]
found = False
for py in range(config['h']-config['f']):
for my in range(config['h'],py+config['f'],-1):
a2p = dict([(a,(0,py+(config[y]*(my-py))//config['h'])) for a,y in left_anchors_pos.items()])
subspace = primitives2pixels(defaultdict(str), a2p, left_edges, config)
taken = [key[1] for key in subspace.keys()]
if not set(taken)&set(last_taken):
found = True
break
if found:
break
if not found:
py = 0
my = config['h']
right_column = max([x[0] for x in shape['anchors'].values()])
right_anchors = set()
for anchor, anchor_pos in shape['anchors'].items():
x = shift+anchor_pos[0]*config['w']
if not found: x += config.get('pad',0)
if not anchor_pos[0]:
y = py+(config[anchor_pos[1]]*(my-py))//config['h']
else:
y = config[anchor_pos[1]]
broken = False
for edge in shape['edges']:
if edge[0] == anchor and edge[1] in la2ga:
ly = config[anchor_pos[1]]
ry = anchors2points[la2ga[edge[1]]][1]
elif edge[1] == anchor and edge[0] in la2ga:
ry = config[anchor_pos[1]]
ly = anchors2points[la2ga[edge[0]]][1]
else:
continue
if edge[2] == '=' and ly != ry:
broken = True
elif edge[2] == '<' and ly >= ry:
broken = True
elif edge[2] == '<=' and ly > ry:
broken = True
elif edge[2] == '>' and ly <= ry:
broken = True
elif edge[2] == '>=' and ly < ry:
broken = True
if broken:
break
if broken:
y = py+(config[anchor_pos[1]]*(my-py))//config['h'] # config['h']-
anchors2points["%d_%s"%(i,anchor)] = (x,y)
la2ga[anchor] = "%d_%s"%(i,anchor)
if anchor_pos[0] == right_column:
right_anchors.add("%d_%s"%(i,anchor))
right_edges = []
for edge in shape['edges']:
edges.append( ("%d_%s"%(i,edge[0]), "%d_%s"%(i,edge[1]), None, edge[2]) )
if edges[-1][0] in right_anchors and edges[-1][1] in right_anchors:
right_edges.append( edges[-1] )
subspace = primitives2pixels(
defaultdict(str),
dict([ item for item in anchors2points.items() if item[0] in right_anchors]),
right_edges,
config
)
taken = [key[1] for key in subspace.keys()]
last_taken = taken[:]
for i in taken:
for j in range(-config['vc'],config['vc']+1):
last_taken.append(i+j)
shift += right_column*config['w']
if not found:
shift += config.get('pad',0)
return primitives2pixels(space, anchors2points, edges, config)
def pre_render_vert(anchors, edges, config, low_y, high_y):
# print(anchors)
anchors = dict([(a,(0,low_y+y*(high_y-low_y)//config['h'])) for a,y in anchors.items()])
bolder_config = dict(config)
bolder_config['dx'] += config['vc']
bolder_config['dy'] += config['vc']
subspace = primitives2pixels(defaultdict(str), anchors, edges, bolder_config)
taken = list(sorted(set([key[1] for key in sorted(subspace.keys())])))
return taken
def pre_render_field(anchors, edges, config, shift_x = 0, shift_y = 0):
anchors = dict([(a,(pos[0]+shift_x,pos[1]+shift_y)) for a,pos in anchors.items()])
bolder_config = dict(config)
bolder_config['dx'] += config['vc']
bolder_config['dy'] += config['vc']
subspace = primitives2pixels(defaultdict(str), anchors, edges, bolder_config)
taken = set( subspace.keys() )
return taken
def rename_anchor(a,iteration,text,right=False):
q = a
if right: text = "r_"+text
if "_" in q: q = q.split('_',1)[1]
q = f'{iteration}_{text}_{q}'
return q
def check_equations(matched, left_item, right_item, left_item_right_anchors, right_item_left_anchors, config):
left_item_edge_anchors_y = {}
left_broken = False
if matched and matched[0] != (0, config['h']):
# check if we can distort an edge column without resizing full left item
low_y, high_y = matched[0]
for a,y in left_item_right_anchors.items():
left_item_edge_anchors_y[a] = low_y + y*(high_y - low_y)/config['h']
broken = False
for edge in left_item['shape']['edges']:
if edge[1] in left_item_edge_anchors_y:
ly = left_item['shape']['anchors'][edge[0]][1]
ry = left_item_edge_anchors_y[edge[1]]
elif edge[0] in left_item_edge_anchors_y:
ry = left_item_edge_anchors_y[edge[0]]
ly = left_item['shape']['anchors'][edge[1]][1]
else:
continue
if edge[2] == '=' and ly != ry:
broken = True
elif edge[2] == '<' and ly >= ry:
broken = True
elif edge[2] == '<=' and ly > ry:
broken = True
elif edge[2] == '>' and ly <= ry:
broken = True
elif edge[2] == '>=' and ly < ry:
broken = True
if broken:
break
left_broken = broken
right_item_edge_anchors_y = {}
right_broken = False
if matched and matched[1] != (0, config['h']):
# check if we can distort an edge column without resizing full right item
low_y, high_y = matched[1]
for a,y in right_item_left_anchors.items():
right_item_edge_anchors_y[a] = low_y + y*(high_y - low_y)/config['h']
broken = False
for edge in right_item['shape']['edges']:
if edge[1][-2] == edge[0][-2]: continue
if edge[1] in right_item_edge_anchors_y:
ry = right_item['shape']['anchors'][edge[0]][1]
ly = right_item_edge_anchors_y[edge[1]]
elif edge[0] in right_item_edge_anchors_y:
ly = right_item_edge_anchors_y[edge[0]]
ry = right_item['shape']['anchors'][edge[1]][1]
else:
continue
if edge[2] == '=' and ly != ry:
broken = True
elif edge[2] == '<' and ly >= ry:
broken = True
elif edge[2] == '<=' and ly > ry:
broken = True
elif edge[2] == '>' and ly <= ry:
broken = True
elif edge[2] == '>=' and ly < ry:
broken = True
if broken:
break
right_broken = broken
return left_broken, right_broken, left_item_edge_anchors_y, right_item_edge_anchors_y
def merge_items(left_item, right_item, iteration, config):
if left_item['text'] in " ~":
result = right_item
result['text'] = left_item['text']+result['text']
return result
if right_item['text'] in " ~":
result = left_item
result['text'] = result['text']+right_item['text']
return result
matched = False
right_item_left_column = min([x[0] for x in right_item['shape']['anchors'].values()])
left_item_right_column = max([x[0] for x in left_item['shape']['anchors'].values()])
right_item_left_anchors = dict([
(anchor[0],anchor[1][1])
for anchor in right_item['shape']['anchors'].items()
if anchor[1][0] == right_item_left_column
])
right_item_left_edges = [
edge for edge in right_item['shape']['edges']
if edge[0] in right_item_left_anchors and edge[1] in right_item_left_anchors
]
left_item_right_anchors = dict([
(anchor[0],anchor[1][1])
for anchor in left_item['shape']['anchors'].items()
if anchor[1][0] == left_item_right_column
])
left_item_right_edges = [
edge for edge in left_item['shape']['edges']
if edge[0] in left_item_right_anchors and edge[1] in left_item_right_anchors
]
if left_item['text'][-1] not in " ~" and right_item['text'][0] not in " ~":
left_mappers = dict()
right_mappers = dict()
for low_y in range(config['f']):
for high_y in range(config['h'],config['h']-config['f'],-1):
_left_broken, _right_broken, _, _ = check_equations(
((low_y, high_y),(low_y, high_y)),
left_item, right_item,
left_item_right_anchors,
right_item_left_anchors,
config
)
_left_distortion = 1.
_resize_coef = (high_y - low_y)/config['h']
for d in left_item['distortion_vector']:
if _left_broken:
_left_distortion *= d*_resize_coef
else:
_left_distortion *= d
if not _left_broken:
_left_distortion *= _resize_coef
_right_distortion = 1.
for d in right_item['distortion_vector']:
if _right_broken:
_right_distortion *= d*_resize_coef
else:
_right_distortion *= d
if not _right_broken:
_right_distortion *= _resize_coef
left_mappers[(low_y, high_y)] = (
pre_render_vert(left_item_right_anchors, left_item_right_edges, config, low_y, high_y),
_left_distortion, (high_y - low_y)/config['h']
)
right_mappers[(low_y, high_y)] = (
pre_render_vert(right_item_left_anchors, right_item_left_edges, config, low_y, high_y),
_right_distortion, (high_y - low_y)/config['h']
)
matches = defaultdict(list)
for lo, lv in left_mappers.items():
for ro, rv in right_mappers.items():
if not(set(lv[0])&set(rv[0])):
matches[lv[1]*rv[1]].append( (lo, ro, lv[2], rv[2]) )
if matches:
best_distortion = max(matches)
matches = matches[best_distortion]
matched = random.choice(matches)
right_item_shift = left_item_right_column
if not matched:
right_item_shift += config['pad']
best_distortion = 1.
matched = ((0, config['h']), (0, config['h']), 1., 1.)
if left_item['text'][-1] == " " or right_item['text'][0] == " ":
right_item_shift += config['w']
left_broken, right_broken, left_item_edge_anchors_y, right_item_edge_anchors_y = check_equations(
matched, left_item, right_item, left_item_right_anchors, right_item_left_anchors, config
)
matched = list(matched)
if not left_broken: matched[2] = 1.
left_distortion = list(map(lambda x:x*matched[2], left_item['distortion_vector']))
if not right_broken: matched[3] = 1.
right_distortion = list(map(lambda x:x*matched[3], right_item['distortion_vector']))
result_anchors = dict()
result_edges = list()
low_y, high_y = matched[0]
for a,v in left_item['shape']['anchors'].items():
if not left_broken and a not in left_item_edge_anchors_y:
result_anchors[rename_anchor(a,iteration,left_item['text'])] = v
else:
result_anchors[rename_anchor(a,iteration,left_item['text'])] = (
v[0], low_y+v[1]*(high_y-low_y)//config['h']
)
for e in left_item['shape']['edges']:
result_edges.append(
(
rename_anchor(e[0],iteration,left_item['text']),
rename_anchor(e[1],iteration,left_item['text']),
e[2]
)
)
left_part = pre_render_field(result_anchors,result_edges,config)
result_anchors_right = dict()
result_edges_right = list()
low_y, high_y = matched[1]
for a,v in right_item['shape']['anchors'].items():
if not right_broken and a not in right_item_edge_anchors_y:
result_anchors_right[rename_anchor(a,iteration,right_item['text'],right=True)] = (right_item_shift+v[0], v[1])
else:
result_anchors_right[rename_anchor(a,iteration,right_item['text'],right=True)] = (
right_item_shift+v[0], low_y+v[1]*(high_y-low_y)//config['h']
)
for e in right_item['shape']['edges']:
result_edges_right.append(
(
rename_anchor(e[0],iteration,right_item['text'],right=True),
rename_anchor(e[1],iteration,right_item['text'],right=True),
e[2]
)
)
for pad in range(-config['w']*config['sq'],1):
if not left_part&pre_render_field(result_anchors_right,result_edges_right,config,shift_x=pad):
# print(pad)
break
for a,v in result_anchors_right.items():
result_anchors[a] = (v[0]+pad,v[1])
result_edges.extend( result_edges_right )
return {
'text':left_item['text']+right_item['text'],
'shape':{'anchors':result_anchors, 'edges':result_edges},
'distortion_vector': left_distortion+right_distortion
}
def unfold_shape(shape, config, prefix = ""):
if shape['anchors']:
shape['anchors'] = dict(
[
(prefix+anchor[0],(config['w']*anchor[1][0],config[anchor[1][1]]))
for anchor in shape['anchors'].items()
]
)
if shape['edges']:
shape['edges'] = [(prefix+e[0],prefix+e[1],e[2]) for e in shape['edges']]
return shape
def put_text_random(space,text, config, geometry, return_raw = False):
items = []
new_text = ""
for i,c in enumerate(text.upper()):
if c in geometry:
new_text += c
else:
new_text += " "
text = re.sub(r' +', ' ', new_text.strip())
for i,c in enumerate(text.upper()):
items.append( {
'text':c,
'shape':unfold_shape(
dict( geometry.get(c,geometry[' ']) ),
config,
prefix = f'{i}_'
),
'distortion_vector':[1.,]
} )
iteration = 0
while len(items)>1:
idx = random.randint(1,len(items)-1)
left_item = items[idx-1]
right_item = items[idx]
items = items[:idx-1] + [merge_items(left_item, right_item, iteration, config),] + items[idx+1:]
iteration += 1
if return_raw:
return items[0]
return primitives2pixels(space, items[0]['shape']['anchors'], items[0]['shape']['edges'], config)
def put_text_random_best(space,text, config, geometry, attempts=None):
best_score = None
best_result = None
if attempts is None:
attempts = config.get('rtr',16)
for _ in range(attempts):
res = put_text_random(space,text, config, geometry, return_raw = True)
width = max(map(lambda x:x[0],res['shape']['anchors'].values()))/len(text)
distortion = 1.
for d in res['distortion_vector']:
distortion *= d
if best_score is None or best_score<width*pow(distortion,config.get('dp',1.0)):
best_score = width*pow(distortion,config.get('dp',1.0))
best_result = res.copy()
return primitives2pixels(
defaultdict(str),
best_result['shape']['anchors'],
best_result['shape']['edges'],
config
)
methods = [
('plain', put_text_plain),
('greedy', put_text_greedy),
('random', put_text_random),
('random_best', put_text_random_best)
]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c', type=str, default='cfg.yaml',
help='config file')
parser.add_argument('--geometry', '-g', type=str, default='geometry.yaml',
help='font geometry file')
parser.add_argument('--text', '-t', type=str, default='text',
help='text to draw')
parser.add_argument('--seed', '-s', type=int, default=-1,
help='random seed')
parser.add_argument('--method', '-m', type=str, default='random_best',
help='method to use')
args = parser.parse_args()
if args.seed != -1: random.seed(args.seed)
if args.config == 'shuf':
args.config = random.choice( glob('cfg*.yaml') )
if args.method == 'shuf':
args.method = random.choice( ['greedy', 'random', 'random_best'] )
config = yaml.load(open(args.config, encoding='utf-8').read(), Loader=yaml.FullLoader)
geometry = yaml.load(open(args.geometry, encoding='utf-8').read(), Loader=yaml.FullLoader)
if args.method in dict(methods):
space = dict(methods)[args.method](defaultdict(str), args.text, config, geometry)
draw_space(space, config)
else:
print('Select one of the existing algorithms:')
for p,fn in methods:
print(f'Method: {p}')
space = fn(defaultdict(str), args.text, config, geometry)
draw_space(space, config)
if __name__ == '__main__':
main()
|
11467419
|
import numpy as np
mapping = {0:0,1:1,2:2,3:3,4:4,5:5,6:10,7:11,8:12,9:13,10:14,11:15,12:8,14:9}
def stabilize(crazy):
numVisible = crazy.shape[0]
joints = -1*np.ones((16,2))
cluttered = 13
for i in range(numVisible):
ithJoint = crazy[i]
id = ithJoint[0][0][0]
x = ithJoint[1][0][0]
y = ithJoint[2][0][0]
if (id == 13):
continue
cluttered -= 1
joints[mapping[id]][0] = x
joints[mapping[id]][1] = y
if ((joints[2,0] == -1) or (joints[3,0] == -1)):
pass
else :
joints[6,:] = (joints[2,:] + joints[3,:])/2
if ((joints[6,0] == -1) or (joints[8,0] == -1)):
pass
else :
joints[7,:] = (joints[8,:] + joints[6,:])/2
return (cluttered, joints)
|
11467436
|
import io
from http import HTTPStatus
from pathlib import Path
from typing import Optional, Tuple
import pytest
import requests
from requests import Response
@pytest.mark.parametrize(["extension"], [["zip"], ["jar"]])
def test_upload_get_delete(
resources_folder: Path, server_url: str, extension: str
) -> None:
plugins_xml = requests.get(f"{server_url}/?build=").text
assert plugins_xml == "<plugins/>"
upload_response, plugin_data = upload_resource_plugin(
server_url, resources_folder, f"plugin.{extension}"
)
assert upload_response.status_code == HTTPStatus.CREATED, upload_response.text
# noinspection SpellCheckingInspection
expected_plugin_xml = (
f'<plugin id="rez.nik" url="/get_plugin/1-plugin.{extension}" version="1">'
f'<idea-version since-build="192"/>'
f"<name>Reznik</name>"
f"</plugin>"
)
plugins_xml = requests.get(f"{server_url}/?build=").text
assert expected_plugin_xml in plugins_xml
plugin_from_server = requests.get(
f"{server_url}/get_plugin/1-plugin.{extension}"
).content
assert plugin_from_server == plugin_data
# noinspection SpellCheckingInspection
delete_response = requests.delete(f"{server_url}/?plugin=Reznik&version=1")
assert delete_response.status_code == HTTPStatus.NO_CONTENT, delete_response.text
plugins_xml = requests.get("http://localhost:80/?build=").text
assert plugins_xml == "<plugins/>"
def test_delete_plugin_that_does_not_exist(server_url: str) -> None:
# noinspection SpellCheckingInspection
delete_response = requests.delete(f"{server_url}/?plugin=Kuku&version=1")
assert delete_response.status_code == HTTPStatus.NOT_FOUND, delete_response.text
@pytest.mark.parametrize(["extension"], [["zip"], ["jar"]])
def test_upload_file_with_different_extension(
server_url: str, resources_folder: Path, extension: str
):
upload_response, _ = upload_resource_plugin(
server_url,
resources_folder,
f"plugin.{extension}",
"plugin.exe",
)
assert upload_response.status_code == HTTPStatus.BAD_REQUEST, upload_response.text
@pytest.mark.parametrize(["extension"], [["zip"], ["jar"]])
def test_upload_wrong_file_type(
server_url: str, resources_folder: Path, extension: str
):
upload_response, _ = upload_resource_plugin(
server_url,
resources_folder,
f"fake_{extension}.{extension}",
)
assert upload_response.status_code == HTTPStatus.BAD_REQUEST, upload_response.text
def test_upload_jar_that_is_not_a_plugin(server_url: str, resources_folder: Path):
upload_response, _ = upload_resource_plugin(
server_url, resources_folder, "not_plugin.jar"
)
assert upload_response.status_code == HTTPStatus.BAD_REQUEST, upload_response.text
def test_upload_two_plugins_once(server_url: str, resources_folder: Path):
with open(resources_folder / "plugin.jar", "rb") as j, open(
resources_folder / "plugin.zip", "rb"
) as z:
jar_data = j.read()
zip_data = z.read()
files = [
("plugin_files", ("plugin.jar", io.BytesIO(jar_data))),
("plugin_files", ("plugin.zip", io.BytesIO(zip_data))),
]
upload_response = requests.post(f"{server_url}/upload", files=files)
assert upload_response.status_code == HTTPStatus.CREATED, upload_response.text
plugins_xml = requests.get(f"{server_url}/?build=").text
expected_plugin_xml = (
'<plugin id="rez.nik" url="/get_plugin/1-plugin.jar" version="1">'
'<idea-version since-build="192"/>'
"<name>Reznik</name>"
"</plugin>"
'<plugin id="rez.nik" url="/get_plugin/1-plugin.zip" version="1">'
'<idea-version since-build="192"/>'
"<name>Reznik</name>"
"</plugin>"
)
assert expected_plugin_xml in plugins_xml
@pytest.mark.parametrize(
("plugin_name"),
(
("Docker-213.4250.391.zip"),
("go-213.4631.20.zip"),
("IdeaVim-1.7.2.zip"),
("intellij-rainbow-brackets-6.21.zip"),
("js-karma-213.4631.9.zip"),
("Key-Promoter-X-2021.2.zip"),
("makefile-213.4250.391.zip"),
("poetry-pycharm-plugin.zip"),
("python-213.4631.20.zip"),
("StringManipulation.zip"),
),
)
def test_upload_real_plugins(
server_url: str, real_plugins_folder: Path, plugin_name: str
):
upload_response, plugin_data = upload_resource_plugin(
server_url, real_plugins_folder, plugin_name
)
assert upload_response.status_code == HTTPStatus.CREATED, upload_response.text
def upload_resource_plugin(
server_url: str,
resources_folder: Path,
file_name: str,
server_file_name: Optional[str] = None,
) -> Tuple[Response, bytes]:
server_file_name = server_file_name or file_name
plugin_path = resources_folder / file_name
with open(plugin_path, "rb") as f:
plugin_data = f.read()
upload_response = requests.post(
f"{server_url}/upload",
files=[
("plugin_files", (server_file_name, io.BytesIO(plugin_data))),
],
)
return upload_response, plugin_data
|
11467452
|
print ((1+3)/2)
print ((4/2)+3)
print ((1L+3L)/2L)
print ((1L+3L)/3L)
print ((1L/2L)+1L)
print ((4L/2L)+3L)
print 4L**2L-5L+3L/7L%2L
print (4L**2L)-5L+((3L/7L)%2L)
|
11467454
|
import numpy as np
import ipywidgets as widgets
import shutil
import matplotlib.pyplot as plt
import struct
global base
global all_volts
global times
global stim
dt = 0.1
from tkinter import *
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import askdirectory
from ipywidgets import *
def test():
root = Tk()
root.withdraw()
root.call('wm', 'attributes', '.', '-topmost', True)
infiles = askdirectory()
return infiles
style = {'description_width': 'initial'}
def init_working_dir():
global base
text_neurogpu_dir = widgets.Text(description="location:", style=style, layout=Layout(width='600px'))
text_neurogpu_dir.value = 'choose where simulation output is located'
text_neurogpu_dir.value = 'C:/BBP_new/Data/'
base = text_neurogpu_dir.value
base = text_neurogpu_dir.value.replace('/', '\\')
text_neurogpu_dir.width = '50%'
display(text_neurogpu_dir)
button = widgets.Button(description="Select Directory:", layout=Layout(width='300px'))
display(button)
def on_button_clicked0_1(b):
text_neurogpu_dir.value = test()
base = text_neurogpu_dir.value.replace('/', '\\')
button.on_click(on_button_clicked0_1)
def nrnMread(fileName):
f = open(fileName, "rb")
nparam = struct.unpack('i', f.read(4))[0]
print(nparam)
typeFlg = struct.unpack('i', f.read(4))[0]
return np.fromfile(f, np.double)
def plotModel(model_ind, stim_ind):
volts = all_volts[int(model_ind), :]
plt.xlabel('timestep')
plt.ylabel('Volts [mV]')
plt.title('Stimulation')
plt.plot(times, volts)
plt.show()
def saveModel(model_ind, stim_ind,folder):
volts = all_volts[int(model_ind), :]
fn = folder + 'traces_' + str(model_ind) + '.csv'
fndvdt = folder + 'dvdt_' + str(model_ind) + '.csv'
dvdt = np.diff(volts)
dvdt = dvdt/dt
print(fn)
volts = np.array(volts)
np.savetxt(fn,volts,delimiter='\n')
np.savetxt(fndvdt,dvdt,delimiter='\n')
button = widgets.Button(description="Read Output:", layout=Layout(width='300px'))
display(button)
def on_button_clicked0_1(b):
button.on_click(on_button_clicked0_1)
def readOutput(folder,vhot_fn):
global base
global all_volts
global times
global stim
timesFN = folder + 'times.csv'
time_steps = np.genfromtxt(timesFN, delimiter=',')
times = np.cumsum(time_steps)
Nt = time_steps.size
stimFN = folder + 'Stim_raw.csv'
stim = np.genfromtxt(stimFN, delimiter=',')
all_volts = nrnMread(folder + vhot_fn)
all_volts = np.array(all_volts)
if stim.ndim == 2:
Nstim = params.shape[0]
else:
Nstim = 1
psize = int(len(all_volts) / Nt)
all_volts = np.reshape(all_volts, [psize, Nt])
stim = stim[:Nt]
text_nmodels = widgets.Text(description="#Models:", style=style, layout=Layout(width='600px'), disabled=True)
text_nmodels.value = str(psize)
display(text_nmodels)
text_nstims = widgets.Text(description="#Stims:", style=style, layout=Layout(width='600px'), disabled=True)
text_nstims.value = str(Nstim)
display(text_nstims)
text_chooseModel = widgets.Text(description="Choose Model:", style=style, layout=Layout(width='600px'))
text_chooseModel.value = str(1)
display(text_chooseModel)
text_chooseStim = widgets.Text(description="Choose Stim:", style=style, layout=Layout(width='600px'))
text_chooseStim.value = str(1)
display(text_chooseStim)
plotbutton = widgets.Button(description="plot model:", layout=Layout(width='300px'))
display(plotbutton)
def on_button_clicked0_1(b):
plotModel(text_chooseModel.value, text_chooseStim.value)
savebutton = widgets.Button(description="save volts:", layout=Layout(width='300px'))
display(savebutton)
def on_button_clicked0_2(b):
saveModel(text_chooseModel.value, text_chooseStim.value,folder)
plotbutton.on_click(on_button_clicked0_1)
savebutton.on_click(on_button_clicked0_2)
|
11467468
|
import nltk
from nltk.corpus import treebank_chunk
print(treebank_chunk.chunked_sents()[1])
treebank_chunk.chunked_sents()[1].draw()
|
11467497
|
import pkg_resources
from chef.api import ChefAPI
from chef.exceptions import ChefObjectTypeError
from chef.permissions import Permissions
class Acl(object):
"""
Acl class provides access to the Acl in the Chef 12
Acl(object_type, name, api, skip_load=False)
- object_type - type of the Chef object. Can be one of the following value: "clients", "containers", "cookbooks",
"data", "environments", "groups", "nodes", "roles"
- name - name of the Chef object (e.g. node name)
- api - object of the ChefAPI class, configured to work with Chef server
- skip_load - is skip_load is False, new object will be initialized with current Acl settings of the specified
object
Example::
from chef import ChefAPI
from chef.acl import Acl
api = ChefAPI('http://chef.com:4000', 'chef-developer.pem', 'chef-developer', '12.0.0')
acl = Acl('nodes', 'i-022fcb0d', api)
Each object of the Acl class contains the following properties:
create, read, update, delete, grant
each property represents corresponding access rights to the Chef object.
each property contains the following fields (https://github.com/astryia/pychef/blob/acls/chef/permissions.py):
- actors - list of the users, which have corresponding permissions
- groups - list of the groups, which have corresponding permissions
Example::
print acl.update.groups
>>> ['admins', 'clients']
Each object of the class Acl contains the following methods:
- reload() - reload current Acls from the Chef server
- save() - save updated Acl object to the Chef server
- is_supported() - return true if current Api version supports work with Acls
Example::
from chef import ChefAPI
from chef.acl import Acl
api = ChefAPI('http://chef.com:4000', 'chef-developer.pem', 'chef-developer', '12.0.0')
acl = Acl('nodes', 'i-022fcb0d', api)
print acl.update.groups
>>> ['admins']
acl.update.groups.append('clients')
acl.save()
acl.reload()
print acl.update.groups
>>> ['admins', 'clients']
Each class which represents Chef object contains method get_acl() method
Example::
from chef import ChefAPI
from chef.node import Node
api = ChefAPI('http://chef.com:4000', 'chef-developer.pem', 'chef-developer', '12.0.0')
node = Node('i-022fcb0d', api)
acl = node.get_acl()
print acl.read.groups
>>> ['admins']
acl.save()
Note about versions
Chef server with version < 12 doesn't have Acl endpoint, so, I've introduced method is_supported() for Acl class.
This method check if api version is greater than 12.
So you should pass valid Chef server version to the ChefAPI constructor
Example::
api = ChefAPI('http://chef.com:4000', 'chef-developer.pem', 'chef-developer', '12.0.0')
acl = Acl('nodes', 'i-022fcb0d', api)
print acl.is_supported()
>>> True
api = ChefAPI('http://chef.com:4000', 'chef-developer.pem', 'chef-developer', '11.2.0')
acl = Acl('nodes', 'i-022fcb0d', api)
print acl.is_supported()
>>> False
But if you pass string '12.0.0' when actual Chef server version is 11.2, you will receive an error when you try
to build Acl object.
"""
ace_types = ["create", "read", "update", "delete", "grant"]
object_types = ["clients", "containers", "cookbooks", "data", "environments", "groups", "nodes", "roles"]
""" ALC API available only in Chef server from version 12.0"""
version = pkg_resources.parse_version("12.0.0")
def __init__(self, object_type, name, api, skip_load=False):
self._check_object_type(object_type)
self.object_type = object_type
self.name = name
self.url = "/%s/%s/_acl/" % (object_type, name)
self.api = api or ChefAPI.get_global()
self.attributes_map = {}
for t in self.ace_types:
self.attributes_map[t] = Permissions()
if (not skip_load) and self.is_supported():
self.reload()
@property
def create(self):
""" Gets Create permissions """
return self.attributes_map["create"]
@property
def read(self):
""" Gets Read permissions """
return self.attributes_map["read"]
@property
def update(self):
""" Gets Update permissions """
return self.attributes_map["update"]
@property
def delete(self):
""" Gets Delete permissions """
return self.attributes_map["delete"]
@property
def grant(self):
""" Gets Grant permissions """
return self.attributes_map["grant"]
def save(self):
""" Save updated permissions objects to the Chef server """
for t in self.ace_types:
self.api.api_request('PUT', self.url + t, data={t: self[t]})
def __getitem__(self, key):
if key in self.attributes_map.keys():
return self.attributes_map[key]
else:
return {}
def reload(self):
""" Load current permissions for the object """
data = self.api.api_request('GET', self.url)
for t in self.ace_types:
self[t].actors = data[t]['actors']
self[t].groups = data[t]['groups']
def to_dict(self):
d = {}
for t in self.ace_types:
d[t] = self[t].to_dict()
return d
def _check_object_type(self, object_type):
if object_type not in self.object_types:
raise ChefObjectTypeError('Object type %s is not allowed' % object_type)
def is_supported(self):
return self.api.version_parsed >= self.version
|
11467508
|
import datetime
import json
import os
import random
import string
import time
class PPasteException(Exception):
'''Custom exception'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PasteManager:
NAME_LEN = 6
ALPHABET = list(string.ascii_uppercase) + list(string.digits)
PASTE_LOCATION = os.path.join(os.getcwd(), 'pastes')
@classmethod
def check_pastes_directory(cls):
'''
Check function that raises an exception if the pastes directory
doesn't exists
'''
if not os.path.isdir(cls.PASTE_LOCATION):
raise PPasteException(
'Pastes directory ({}) does not exist'.format(
cls.PASTE_LOCATION))
@classmethod
def get_rand_paste_name(cls):
return ''.join(
random.choice(cls.ALPHABET)
for _ in range(cls.NAME_LEN)
)
@classmethod
def craft_paste_path(cls, paste_name):
return os.path.join(cls.PASTE_LOCATION, paste_name)
@classmethod
def save_paste(cls, paste):
cls.check_pastes_directory()
path = cls.craft_paste_path(paste.name)
if os.path.exists(path):
raise PPasteException('Paste file {} already exists'.format(path))
try:
with open(path, 'w') as f:
json.dump(paste.get_dict(), f)
except OSError as e:
raise PPasteException('Cannot write file {} - {}'.format(
path,
e
))
@classmethod
def fetch_paste(cls, name):
cls.check_pastes_directory()
path = cls.craft_paste_path(name)
if not os.path.exists(path):
raise PPasteException(
'Paste file {} does not exists'.format(path))
try:
with open(path, 'r') as f:
d = json.load(f)
return Paste(
title=d['title'],
content=d['content'],
hl_alias=d['hl_alias'],
is_private=d['is_private'],
date=d['date'],
name=name,
)
except OSError as e:
raise PPasteException('Cannot load file {} - {}'.format(
path,
e
))
@classmethod
def fetch_public_pastes(cls):
cls.check_pastes_directory()
return sorted(
filter(
lambda p: not p.is_private,
(cls.fetch_paste(name)
for name
in os.listdir(cls.PASTE_LOCATION))
),
key=lambda p: -p.date
)
class Paste:
def __init__(self, title='', content='', hl_alias='',
is_private=False, name=None, date=None):
self.title = title or ''
self.content = content or ''
# When no highlighting is specified, we default it to text so that
# fragments can work properly
self.hl_alias = hl_alias or 'text'
self.is_private = is_private
self.name = PasteManager.get_rand_paste_name() \
if name is None else name
self.date = int(time.time()) \
if date is None else date
def get_dict(self):
return {
'title': self.title,
'content': self.content,
'hl_alias': self.hl_alias,
'is_private': self.is_private,
'name': self.name,
'date': self.date
}
def save(self):
PasteManager.check_pastes_directory()
PasteManager.save_paste(self)
def pprint_date(self):
return datetime.datetime.fromtimestamp(
int(self.date)
).strftime('%Y-%m-%d %H:%M')
|
11467520
|
from art_web import app
from flask import render_template, request, flash, redirect, url_for, session
from models import db, User, Feedback, Paintings, Artist, AdminUser
from forms import SignupForm, SigninForm , FeedbackForm, SubmitArt
from flask_admin import Admin
from flask_admin.base import MenuLink
from flask_admin.contrib.sqla import ModelView
# from flask_basicauth import BasicAuth
class MyModelView(ModelView):
column_display_pk = True
column_hide_backrefs = True
# Admin views
admin = Admin(app, name='Art Gallery', template_mode='bootstrap3')
# admin = Admin(app, name='art_gallery', base_template='base.html')
admin.add_view(MyModelView(AdminUser, db.session))
admin.add_view(MyModelView(User, db.session))
admin.add_view(MyModelView(Feedback, db.session))
admin.add_view(MyModelView(Paintings, db.session))
admin.add_view(MyModelView(Artist, db.session))
# Add home link by url
admin.add_link(MenuLink(name='Back', url='/'))
@app.route('/testdb')
def testdb():
user = Paintings.query.with_entities(Paintings.name, Paintings.painting_photo).filter_by(artist_id=1)
print dict(user.all())
for u in user.all():
print u
# return render_template("login.html",title="login")
return "done!"
@app.route('/', methods=['GET', 'POST'])
def index():
title = 'Home'
form = FeedbackForm()
pnt = Paintings.query.with_entities(Paintings.name, Paintings.painting_photo).filter_by(artist_id=1)
scp = Paintings.query.with_entities(Paintings.name, Paintings.painting_photo).filter_by(artist_id=2)
paintings = dict(pnt.all())
sculptures = dict(scp.all())
if request.method == 'POST':
if form.validate() == False:
return render_template('index.html',title = title, paintings = paintings, sculptures = sculptures, form = form)
else:
feedback = Feedback(form.name.data, form.email.data, form.subject.data, form.comment.data)
db.session.add(feedback)
db.session.commit()
flash("Form submitted successfully!", "success")
return redirect(url_for('index'))
return render_template('index.html',title = title, paintings = paintings, sculptures = sculptures, form = form)
@app.route('/profile')
def profile():
title = 'Profile'
if 'email' not in session:
return redirect(url_for('signin'))
# Admin user
if session['email'] == 'admin':
return render_template('profile.html',title=title)
# Normal user
user = User.query.filter_by(email = session['email']).first()
if user is None:
return redirect(url_for('signin'))
else:
return render_template('profile.html',title=title)
@app.route('/signout')
def signout():
if 'email' not in session:
return redirect(url_for('signin'))
session.pop('email', None)
return redirect(url_for('index'))
@app.route('/signup', methods=['GET', 'POST'])
def signup():
title = 'Sign Up'
form = SignupForm()
# If user is signed in
if 'email' in session:
return redirect(url_for('profile'))
if request.method == 'POST':
if form.validate() == False:
return render_template('signup.html',title = title, form=form)
else:
newuser = User(form.firstname.data, form.lastname.data, form.email.data\
, form.password.data, form.phone_number.data, form.gender.data, form.address.data\
, form.city.data, form.country.data)
db.session.add(newuser)
db.session.commit()
session['email'] = newuser.email
return redirect(url_for('profile'))
elif request.method == 'GET':
return render_template('signup.html',title = title, form=form)
@app.route('/signin', methods=['GET', 'POST'])
def signin():
title = 'Login'
form = SigninForm()
# If user is signed in
if 'email' in session:
return redirect(url_for('profile'))
if request.method == 'POST':
auser = AdminUser.query.filter_by(email = form.email.data.lower()).first()
# Admin login
if auser and auser.check_password(form.password.data):
session['email'] = 'admin'
return redirect('/admin')
elif form.validate() == False:
return render_template('signin.html',title = title, form=form)
else:
# return "hello!"
session['email'] = form.email.data
return redirect(url_for('profile'))
elif request.method == 'GET':
return render_template('signin.html',title = title, form=form)
@app.route('/art', methods=['GET','POST'])
def art():
title = "Submit art"
form = SubmitArt()
if 'email' not in session:
flash("Signin first!","error")
return redirect(url_for(signin))
elif request.method == 'POST':
if form.validate() == False:
flash("Enter correct values!","error")
return render_template('submit_art.html',title = title, form=form)
else:
newart = Paintings(form.name.data, form.location.data, form.artist_id.data)
db.session.add(newart)
db.session.commit()
flash("Form submitted successfully!", "success")
return redirect(url_for('art'))
return render_template("submit_art.html",title=title, form=form)
|
11467527
|
import pytest
import theano
import theano.tensor as T
import numpy as np
def test_cosine_similarity():
from ntm.similarities import cosine_similarity
key_var, memory_var = T.tensor3s('key', 'memory')
cosine_similarity_fn = theano.function([key_var, memory_var], \
cosine_similarity(key_var, memory_var, eps=1e-6))
test_key = np.random.rand(16, 4, 20)
test_memory = np.random.rand(16, 128, 20)
test_output = cosine_similarity_fn(test_key, test_memory)
test_output_manual = np.zeros_like(test_output)
for i in range(16):
for j in range(4):
for k in range(128):
test_output_manual[i, j, k] = np.dot(test_key[i, j], test_memory[i, k]) / \
np.sqrt(np.sum(test_key[i, j] * test_key[i, j]) * np.sum(test_memory[i, k] * \
test_memory[i, k]) + 1e-6)
assert np.allclose(test_output, test_output_manual)
|
11467585
|
from .base import * # noqa
# We want to set the task to be run as syncronous as this make testing easier.
IEVV_BATCHFRAMEWORK_ALWAYS_SYNCRONOUS = True
testfilesdir = 'devilry_testfiles'
if not exists(testfilesdir):
os.mkdir(testfilesdir)
logdir = join(testfilesdir, 'log')
if not exists(logdir):
os.mkdir(logdir)
MEDIA_ROOT = join(testfilesdir, "filestore")
DEVILRY_FSHIERDELIVERYSTORE_ROOT = join(testfilesdir, 'deliverystorehier')
#: Where to store compressed archives for download.
DEVILRY_COMPRESSED_ARCHIVES_DIRECTORY = os.path.join(testfilesdir, 'devilry_compressed_archives', '')
#: Remove tracback logging middleware when running tests.
if 'devilry.utils.logexceptionsmiddleware.TracebackLoggingMiddleware' in MIDDLEWARE:
MIDDLEWARE.remove('devilry.utils.logexceptionsmiddleware.TracebackLoggingMiddleware')
#: Remove django toolbar middleware while running tests.
if 'debug_toolbar.middleware.DebugToolbarMiddleware' in MIDDLEWARE:
MIDDLEWARE.remove('debug_toolbar.middleware.DebugToolbarMiddleware')
#: Remove django toolbar from installed apps.
INSTALLED_APPS += [
'devilry.devilry_dbcache.devilry_dbcache_testapp',
]
if 'debug_toolbar' in INSTALLED_APPS:
INSTALLED_APPS.remove('debug_toolbar')
# We need to use this because loads of tests uses username and password to login
CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND = False
AUTHENTICATION_BACKENDS = (
'devilry.devilry_account.authbackend.default.UsernameAuthBackend',
'devilry.devilry_account.authbackend.default.EmailAuthBackend',
)
# Ensures we are testing against the default translation strings.
DEVILRY_JAVASCRIPT_LOCALE_OVERRIDE_APPS = []
# Default to skipping selenium tests
SKIP_SELENIUMTESTS = True
# SELENIUM_BROWSER = 'phantomjs'
# SELENIUM_DEFAULT_TIMEOUT = 20
# Disable migrations when running tests
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
# def __getitem__(self, key):
# return 'notmigrations'
# Add this to run a database for anonymization
# DATABASES['anonymize_db'] = DATABASES['default'].copy()
# DATABASES['anonymize_db']['NAME'] = 'dbdev_anonymize'
MIGRATION_MODULES = DisableMigrations()
# DEVILRY_V2_DATABASE_MAX_BULK_CREATE_OVERRIDE = 100
DEVILRY_V2_DATABASE_PRINT_PROGRESS_DOTS = False
###################################################################################
# RQ
###################################################################################
# RQ runs synchronously by default for tests.
RQ_QUEUES['default']['ASYNC'] = False
RQ_QUEUES['email']['ASYNC'] = False
RQ_QUEUES['highpriority']['ASYNC'] = False
|
11467600
|
CLASSES = 14
WIDTH = 224
HEIGHT = 224
CHANNELS = 3
LR = 0.0001
EPOCHS = 5
BATCHSIZE = 64
IMAGENET_RGB_MEAN = [0.485, 0.456, 0.406]
IMAGENET_RGB_SD = [0.229, 0.224, 0.225]
TOT_PATIENT_NUMBER = 30805 # From data
|
11467630
|
from glob import glob
from pathlib import Path
from .constants import DEFAULT_FRAMERATE, TEMP_AUDIO_FILENAME
from .command import Command
import ffmpeg
import os
import logging
logger = logging.getLogger(__name__)
FRAME_FILENAME_LENGTH = 4
def _getwh(path):
data = probe(path)
width = data["streams"][0]["width"]
height = data["streams"][0]["height"]
wh = f"{width}x{height}"
return wh
def _run(cmd):
command = " ".join(cmd.compile())
logging.debug(command)
cmd.run()
"""
Does something like this:
ffmpeg -i a.mp4 -i a.wav -c copy -map 0:v:0 -map 1:a:0 -shortest -c:a aac -b:a 192k b.mp4
"""
def combineaudio(inp, audio, out):
logging.debug(f"Combining audio '{audio}' to '{inp}' as '{out}'")
cmd_str = f"ffmpeg -i {inp} -i {audio} -c copy -map 0:v:0 -map 1:a:0 -shortest -c:a aac -b:a 192k {out}"
# FIXME and use ffmpeg instead of command
cmd = Command()
cmd.call(cmd_str)
"""
Does something like this:
ffmpeg -r 24.89 -f image2 -s 480x360 -i "video-in/%04d.jpg" -vcodec libx264 -crf 25 -pix_fmt yuv420p movie.mp4
"""
def combineframes(inp, out, framerate = DEFAULT_FRAMERATE):
if os.path.isdir(inp):
path = f"{inp}/%04d.jpg"
# Use the first file to get wh
first_file = list(glob(f"{inp}/*"))[0]
wh = _getwh(first_file)
cmd = ffmpeg.input(path,
r = framerate,
f = "image2",
s = wh
).output(out,
vcodec = "libx264",
crf = "25",
pix_fmt = "yuv420p"
)
_run(cmd)
def extractaudio(inp, out):
# Extract audio as a WAV, because re-adding it as MP3 somehow
# doesn't work
cmd = ffmpeg.input(inp).output(f"{out}/{TEMP_AUDIO_FILENAME}")
_run(cmd)
def extractframes(inp, out):
data = probe(inp)
output = f"{out}/%{FRAME_FILENAME_LENGTH}d.jpg"
cmd = ffmpeg.input(inp).output(output, **{"q:v" : 2})
_run(cmd)
def is_image(inp):
if not os.path.isfile(inp):
return False
data = probe(inp)
return data["format"]["format_name"] == "image2"
# FIXME: this is obviously pretty ugly
def is_video(inp):
return not is_image(inp)
def probe(inp = None):
return ffmpeg.probe(inp)
|
11467710
|
import math
import random
import torch
from torch.autograd import Variable
def calculate_gain(nonlinearity, param=None):
"""Return the recommended gain value for the given nonlinearity function. The values are as follows:
============ ==========================================
nonlinearity gain
============ ==========================================
linear :math:`1`
conv{1,2,3}d :math:`1`
sigmoid :math:`1`
tanh :math:`5 / 3`
relu :math:`\sqrt{2}`
leaky_relu :math:`\sqrt{2 / (1 + negative\_slope^2)}`
============ ==========================================
Args:
nonlinearity: the nonlinear function (`nn.functional` name)
param: optional parameter for the nonlinear function
Examples:
>>> gain = nn.init.gain('leaky_relu')
"""
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
return 1
elif nonlinearity == 'tanh':
return 5.0 / 3
elif nonlinearity == 'relu':
return math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
negative_slope = 0.01
elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError("negative_slope {} not a valid number".format(param))
return math.sqrt(2.0 / (1 + negative_slope ** 2))
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
def uniform(tensor, a=0, b=1):
"""Fills the input Tensor or Variable with values drawn from the uniform distribution :math:`U(a, b)`.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.uniform(w)
"""
if isinstance(tensor, Variable):
uniform(tensor.data, a=a, b=b)
return tensor
return tensor.uniform_(a, b)
def normal(tensor, mean=0, std=1):
"""Fills the input Tensor or Variable with values drawn from the normal distribution :math:`N(mean, std)`.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.normal(w)
"""
if isinstance(tensor, Variable):
normal(tensor.data, mean=mean, std=std)
return tensor
return tensor.normal_(mean, std)
def constant(tensor, val):
"""Fills the input Tensor or Variable with the value `val`.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
val: the value to fill the tensor with
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.constant(w)
"""
if isinstance(tensor, Variable):
constant(tensor.data, val)
return tensor
return tensor.fill_(val)
def eye(tensor):
"""Fills the 2-dimensional input Tensor or Variable with the identity matrix. Preserves the identity of the inputs
in Linear layers, where as many inputs are preserved as possible.
Args:
tensor: a 2-dimensional torch.Tensor or autograd.Variable
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.eye(w)
"""
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
if isinstance(tensor, Variable):
eye(tensor.data)
return tensor
return tensor.copy_(torch.eye(tensor.size(0), tensor.size(1)))
def dirac(tensor):
"""Fills the {3, 4, 5}-dimensional input Tensor or Variable with the Dirac delta function. Preserves the identity of
the inputs in Convolutional layers, where as many input channels are preserved as possible.
Args:
tensor: a {3, 4, 5}-dimensional torch.Tensor or autograd.Variable
Examples:
>>> w = torch.Tensor(3, 16, 5, 5)
>>> nn.init.dirac(w)
"""
dimensions = tensor.ndimension()
if dimensions not in [3, 4, 5]:
raise ValueError("Only tensors with 3, 4, or 5 dimensions are supported")
if isinstance(tensor, Variable):
dirac(tensor.data)
return tensor
sizes = tensor.size()
min_dim = min(sizes[0], sizes[1])
tensor.zero_()
for d in range(min_dim):
if dimensions == 3: # Temporal convolution
tensor[d, d, tensor.size(2) // 2] = 1
elif dimensions == 4: # Spatial convolution
tensor[d, d, tensor.size(2) // 2, tensor.size(3) // 2] = 1
else: # Volumetric convolution
tensor[d, d, tensor.size(2) // 2, tensor.size(3) // 2, tensor.size(4) // 2] = 1
return tensor
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def xavier_uniform(tensor, gain=1):
"""Fills the input Tensor or Variable with values according to the method described in "Understanding the
difficulty of training deep feedforward neural networks" - <NAME>. & <NAME>. (2010), using a uniform
distribution. The resulting tensor will have values sampled from :math:`U(-a, a)` where
:math:`a = gain \\times \sqrt{2 / (fan\_in + fan\_out)} \\times \sqrt{3}`. Also known as Glorot initialisation.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
gain: an optional scaling factor
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.xavier_uniform(w, gain=nn.init.calculate_gain('relu'))
"""
if isinstance(tensor, Variable):
xavier_uniform(tensor.data, gain=gain)
return tensor
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return tensor.uniform_(-a, a)
def xavier_normal(tensor, gain=1):
"""Fills the input Tensor or Variable with values according to the method described in "Understanding the
difficulty of training deep feedforward neural networks" - <NAME>. & <NAME>. (2010), using a normal
distribution. The resulting tensor will have values sampled from :math:`N(0, std)` where
:math:`std = gain \\times \sqrt{2 / (fan\_in + fan\_out)}`. Also known as Glorot initialisation.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
gain: an optional scaling factor
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.xavier_normal(w)
"""
if isinstance(tensor, Variable):
xavier_normal(tensor.data, gain=gain)
return tensor
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
return tensor.normal_(0, std)
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
def kaiming_uniform(tensor, a=0, mode='fan_in'):
"""Fills the input Tensor or Variable with values according to the method described in "Delving deep into
rectifiers: Surpassing human-level performance on ImageNet classification" - <NAME>. et al. (2015), using a uniform
distribution. The resulting tensor will have values sampled from :math:`U(-bound, bound)` where
:math:`bound = \sqrt{2 / ((1 + a^2) \\times fan\_in)} \\times \sqrt{3}`. Also known as He initialisation.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
a: the negative slope of the rectifier used after this layer (0 for ReLU by default)
mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the
weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass.
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.kaiming_uniform(w, mode='fan_in')
"""
if isinstance(tensor, Variable):
kaiming_uniform(tensor.data, a=a, mode=mode)
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain('leaky_relu', a)
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return tensor.uniform_(-bound, bound)
def kaiming_normal(tensor, a=0, mode='fan_in'):
"""Fills the input Tensor or Variable with values according to the method described in "Delving deep into
rectifiers: Surpassing human-level performance on ImageNet classification" - <NAME>. et al. (2015), using a normal
distribution. The resulting tensor will have values sampled from :math:`N(0, std)` where
:math:`std = \sqrt{2 / ((1 + a^2) \\times fan\_in)}`. Also known as He initialisation.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
a: the negative slope of the rectifier used after this layer (0 for ReLU by default)
mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the
weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass.
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.kaiming_normal(w, mode='fan_out')
"""
if isinstance(tensor, Variable):
kaiming_normal(tensor.data, a=a, mode=mode)
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain('leaky_relu', a)
std = gain / math.sqrt(fan)
return tensor.normal_(0, std)
def orthogonal(tensor, gain=1):
"""Fills the input Tensor or Variable with a (semi) orthogonal matrix, as described in "Exact solutions to the
nonlinear dynamics of learning in deep linear neural networks" - <NAME>. et al. (2013). The input tensor must have
at least 2 dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable, where n >= 2
gain: optional scaling factor
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.orthogonal(w)
"""
if isinstance(tensor, Variable):
orthogonal(tensor.data, gain=gain)
return tensor
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor[0].numel()
flattened = torch.Tensor(rows, cols).normal_(0, 1)
# Compute the qr factorization
q, r = torch.qr(flattened)
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph.expand_as(q)
# Pad zeros to Q (if rows smaller than cols)
if rows < cols:
padding = torch.zeros(rows, cols - rows)
if q.is_cuda:
q = torch.cat([q, padding.cuda()], 1)
else:
q = torch.cat([q, padding], 1)
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
def sparse(tensor, sparsity, std=0.01):
"""Fills the 2D input Tensor or Variable as a sparse matrix, where the non-zero elements will be drawn from
the normal distribution :math:`N(0, 0.01)`, as described in "Deep learning via
Hessian-free optimization" - <NAME>. (2010).
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
sparsity: The fraction of elements in each column to be set to zero
std: the standard deviation of the normal distribution used to generate the non-zero values
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.sparse(w, sparsity=0.1)
"""
if isinstance(tensor, Variable):
sparse(tensor.data, sparsity, std=std)
return tensor
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
tensor.normal_(0, std)
rows, cols = tensor.size(0), tensor.size(1)
num_zeros = int(math.ceil(cols * sparsity))
for col_idx in range(tensor.size(1)):
row_indices = list(range(rows))
random.shuffle(row_indices)
zero_indices = row_indices[:num_zeros]
for row_idx in zero_indices:
tensor[row_idx, col_idx] = 0
return tensor
|
11467711
|
import hyperchamber as hc
from hypergan.losses.base_loss import BaseLoss
class SoftmaxLoss(BaseLoss):
"""https://arxiv.org/abs/1704.06191"""
def _forward(self, d_real, d_fake):
ln_zb = (((-d_real).exp().sum()+(-d_fake).exp().sum())+1e-12).log()
d_target = 1.0 / d_real.shape[0]
g_target = d_target / 2.0
g_loss = g_target * (d_fake.sum() + d_real.sum()) + ln_zb
d_loss = d_target * d_real.sum() + ln_zb
return [d_loss, g_loss]
|
11467742
|
from models.preliminary_contest import PreliminaryProblem
from flask import Blueprint
from main import db, config
from common.utils import unpack_argument
from utils import make_response
from models import PreliminaryContest, PreliminaryContest, PreliminaryProblemType
from models.user import User
import math
router = Blueprint("preliminary", __name__)
@router.route("/contest/list", methods=["POST"])
@unpack_argument
def preliminary_contest_list(page: int):
query = db.session.query(
PreliminaryContest.title,
PreliminaryContest.id,
PreliminaryContest.duration
)
page_count = int(
math.ceil(query.count()/config.PRELIMINARY_CONTESTS_PER_PAGE))
result = query.slice((page-1)*config.PRELIMINARY_CONTESTS_PER_PAGE,
page*config.PRELIMINARY_CONTESTS_PER_PAGE).all()
return make_response(0, data=[
{
"title": item.title,
"id": item.id,
"duration": item.duration
} for item in result
], pageCount=page_count)
@router.route("/contest/detail", methods=["POST"])
@unpack_argument
def preliminary_contest_detail(id: int):
"""
{
"title":"比赛标题",
"description":"比赛描述",
"uploader":{
"uid":"",
"username":""
},
"duration":"",
"upload_time":"",
"problems":[]
}
"""
contest: PreliminaryContest = db.session.query(
PreliminaryContest.id,
PreliminaryContest.title,
PreliminaryContest.description,
PreliminaryContest.uploader,
PreliminaryContest.duration,
PreliminaryContest.upload_time,
User.username
).join(User, User.id == PreliminaryContest.uploader).filter(PreliminaryContest.id == id).one_or_none()
if not contest:
return make_response(-1, message="比赛不存在")
result = {
"title": contest.title,
"description": contest.description,
"uploader": {
"uid": contest.uploader,
"username": contest.username
},
"duration": contest.duration,
"upload_time": str(contest.upload_time),
"problems": []
}
problems = db.session.query(PreliminaryProblem).filter_by(
contest=contest.id).order_by(PreliminaryProblem.problem_id.asc()).all()
result["problems"] = [
{
"problemType": str(item.problem_type.value),
"problemID": item.problem_id,
"content": item.content,
"questions": item.questions,
"score": item.score
} for item in problems
]
return make_response(0, data=result)
|
11467753
|
def show(o):
"""
This tests that the proper method is called.
"""
o.msg()
def show2(o):
"""
This tests oo inheritance.
"""
o.print_msg()
class A(object):
def __init__(self):
self._a = 5
def msg(self):
print("A.msg()")
def print_msg(self):
self.msg()
print(self._a)
class B(A):
def msg(self):
print("B.msg()")
a = A()
show(a)
show2(a)
b = B()
show(b)
show2(b)
|
11467756
|
import einops
import tensorflow as tf
from einops import rearrange
from einops.layers.tensorflow import Rearrange
class Attention(tf.keras.layers.Layer):
def __init__(
self, dim, heads=8, dim_head=64, dropout=0.0, max_pos_emb=512, **kwargs
):
super(Attention, self).__init__(**kwargs)
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.to_q = tf.keras.layers.Dense(inner_dim, use_bias=False)
self.to_kv = tf.keras.layers.Dense(inner_dim * 2, use_bias=False)
self.to_out = tf.keras.layers.Dense(dim)
self.max_pos_emb = max_pos_emb
self.rel_pos_emb = tf.keras.layers.Embedding(2 * max_pos_emb + 1, dim_head)
self.dropout = tf.keras.layers.Dropout(dropout)
def call(self, inputs, context=None, mask=None, context_mask=None):
n = inputs.shape[-2]
heads = self.heads
max_pos_emb = self.max_pos_emb
if context is None:
has_context = False
context = inputs
else:
has_context = True
kv = tf.split(self.to_kv(context), num_or_size_splits=2, axis=-1)
q, k, v = (self.to_q(inputs), *kv)
q, k, v = map(
lambda t: rearrange(t, "b n (h d) -> b h n d", h=heads), (q, k, v)
)
dots = tf.einsum("b h i d, b h j d -> b h i j", q, k) * self.scale
seq = tf.range(n)
dist = rearrange(seq, "i -> i ()") - rearrange(seq, "j -> () j")
dist = (
tf.clip_by_value(
dist, clip_value_min=-max_pos_emb, clip_value_max=max_pos_emb
)
+ max_pos_emb
)
rel_pos_emb = self.rel_pos_emb(dist)
pos_attn = tf.einsum("b h n d, n r d -> b h n r", q, rel_pos_emb) * self.scale
dots = dots + pos_attn
if mask is not None or context_mask is not None:
if mask is not None:
mask = tf.ones(*inputs.shape[:2])
if not has_context:
if context_mask is None:
context_mask = mask
else:
if context_mask is None:
context_mask = tf.ones(*context.shape[:2])
mask_value = -tf.experimental.numpy.finfo(dots.dtype).max
mask = rearrange(mask, "b i -> b () i ()") * rearrange(
context_mask, "b j -> b () () j"
)
dots = tf.where(mask, mask_value, dots)
attn = tf.nn.softmax(dots, axis=-1)
out = tf.einsum("b h i j, b h j d -> b h i d", attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
out = self.to_out(out)
return self.dropout(out)
|
11467761
|
import docassemble.base.config
import docassemble.webapp.user_database
import sys
if __name__ == "__main__":
docassemble.base.config.load()
if len(sys.argv) > 1:
db_config = sys.argv[1]
else:
db_config = 'db'
print(docassemble.webapp.user_database.alchemy_url(db_config))
|
11467801
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_mapr_user(host):
u = host.user('mapr')
assert u.exists
assert u.name == "mapr"
assert u.group == "mapr"
for g in ['mapr']:
assert g in u.groups
assert u.gid == 5000
home = host.file("/home/mapr")
assert home.is_directory
assert home.user == "mapr"
assert home.group == "mapr"
|
11467819
|
import os
import requests
import zipfile
########################################
def extract(url, base, path, name):
if not os.path.exists(base + "/zips/" + name + ".zip"):
print(f"Downloading from {url}")
r = requests.get(url)
with open(base + "/zips/" + name + ".zip",'wb') as f:
f.write(r.content)
if not os.path.exists(base + path):
os.makedirs(base + path)
with zipfile.ZipFile(base + "/zips/" + name + ".zip", 'r') as zip_ref:
zip_ref.extractall(base + path)
print("Saved extracted files at " + base + path)
#########################################
def get_data(path):
base = path + "/data"
if not os.path.exists(base + "/zips"):
os.makedirs(base + "/zips")
urls = ["https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Annotations_Val_mscoco.zip",
"https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Questions_Train_mscoco.zip",
"http://nlp.stanford.edu/data/glove.6B.zip",
"https://filebox.ece.vt.edu/~jiasenlu/codeRelease/vqaRelease/train_only/data_train_val.zip",]
paths = ["/val_annotations", "/train_ques", "/glove", "/image_data"]
for i in range(4):
extract(urls[i], base, paths[i], str(i))
if not os.path.exists(base + '/ckpts'):
os.makedirs(base + '/ckpts')
|
11467827
|
import unittest
import data
class TestCora(unittest.TestCase):
def setUp(self):
self.A, self.X, self.Y = data.parse_cora()
def test_A_symmetry(self):
self.assertTrue( (self.A == self.A.T).all() )
def test_A_min(self):
self.assertTrue( self.A.min() >= 0.0 )
def test_A_max(self):
self.assertTrue( self.A.max() <= 1.0 )
def test_max_degree(self):
self.assertTrue(self.A.sum(0).max() == 168.0)
self.assertTrue(self.A.sum(1).max() == 168.0)
def test_no_self_loops(self):
self_loops = False
for i in range(self.A.shape[0]):
self_loops = self_loops or self.A[i,i] != 0.0
self.assertFalse(self_loops)
class TestSparseCora(TestCora):
def setUp(self):
self.A, self.X, self.Y = data.parse_cora_sparse()
def test_A_symmetry(self):
# Not sure how to do this
self.assertTrue(False)
|
11467832
|
from MDRSREID.DataLoaders.Datasets import Dataset
from MDRSREID.utils.get_files_by_pattern import get_files_by_pattern
import os.path as osp
class DukeMTMCreID(Dataset):
def __init__(self,
cfg=None,
mode=None,
domain=None,
name=None,
authority=None,
train_type=None,
items=None):
super(DukeMTMCreID, self).__init__(cfg)
self.cfg = cfg
self.num_cam = 8
self.train_type = train_type # Supervised or Unsupervised
self.mode = mode # for transform
self.domain = domain
self.dataset_root = osp.join(cfg.dataset.root, name)
self.authority = authority
if cfg.dataset.use_occlude_duke:
self.im_root = 'Occluded_Duke'
else:
self.im_root = 'DukeMTMC-reID'
self.train_dir = osp.join(self.im_root, 'bounding_box_train')
self.query_dir = osp.join(self.im_root, 'query')
self.gallery_dir = osp.join(self.im_root, 'bounding_box_test')
self.im_authority = {
'train': {
'dir': self.train_dir,
'pattern': '{}/bounding_box_train/*.jpg'.format(self.im_root),
'map_label': True},
'query': {
'dir': self.query_dir,
'pattern': '{}/query/*.jpg'.format(self.im_root),
'map_label': False},
'gallery': {
'dir': self.gallery_dir,
'pattern': '{}/bounding_box_test/*.jpg'.format(self.im_root),
'map_label': False},
}
# all im path in a list
self.im_path = sorted(get_files_by_pattern(root=self.dataset_root,
pattern=self.im_authority[self.authority]['pattern'],
strip_root=True)
)
self._id2label = {_id: idx for idx, _id in enumerate(self.unique_ids)}
if items is None:
self.items = self.get_items()
@staticmethod
def id(file_path):
"""
:param file_path: unix style file path
:return: person id
"""
return int(file_path.replace('\\', '/').split('/')[-1].split('_')[0])
@staticmethod
def camera(file_path):
"""
:param file_path: unix style file path
:return: camera id
"""
return int(file_path.replace('\\', '/').split('/')[-1].split('_')[1][1])
@property
def ids(self):
"""
:return: person id list corresponding to dataset image paths
"""
return [self.id(path) for path in self.im_path]
@property
def unique_ids(self):
"""
:return: unique person ids in ascending order
"""
return sorted(set(self.ids))
@property
def num_ids(self):
"""
:return: unique person ids number
"""
return len(self.unique_ids)
@property
def cameras(self):
"""
:return: camera id list corresponding to dataset image paths
"""
return [self.camera(path) for path in self.im_path]
def get_items(self):
"""
:return: get the items:
'im_path':
'label':
'cam':
The label may be index or not by 'map_label'
"""
if self.im_authority[self.authority]['map_label'] is False:
items = {
i:
{
'im_path': self.im_path[i],
'label': self.id(self.im_path[i]),
'cam': self.camera(self.im_path[i])
}
for i in range(len(self.im_path))}
else:
items = {
i:
{
'im_path': self.im_path[i],
'label': self._id2label[self.id(self.im_path[i])],
'cam': self.camera(self.im_path[i])
}
for i in range(len(self.im_path))}
return items
def _get_ps_label_path(self, im_path):
"""
:param im_path: the ps_label path
:return:
"""
cfg = self.cfg
if self.authority == 'train_market1501_style':
path = '{}_ps_label/bounding_box_train/{}'.format(self.im_root, osp.basename(im_path))
else:
path = im_path.replace(self.im_root, self.im_root + '_ps_label')
path = path.replace('.jpg', '.png')
path = osp.join(self.dataset_root, path)
return path
def _get_pose_landmark_mask(self, im_path):
path = im_path.replace(self.im_root, self.im_root + '/heatmaps')
path = path.replace('.jpg', '.npy')
path = osp.join(self.dataset_root, path)
return path
def _get_test_pose_path(self, im_path):
path = im_path.replace(self.im_root + '/' + self.authority,
self.im_root + '/test_pose_storage/' + self.authority + '/sep-json')
path = path.replace('.jpg', '.json')
path = osp.join(self.dataset_root, path)
return path
def _check_before_get_im_path(self):
"""
:return:
"""
self._check_dir = osp.join(self.dataset_root, self.im_authority[self.authority]['dir']).replace('\\', '/')
if not osp.exists(self.dataset_root):
raise RuntimeError("'{}' is not available".format(self.dataset_root))
if not osp.exists(self._check_dir):
raise RuntimeError("'{}' is not available".format(self._check_dir))
print("check {} dataset success!!".format(self.authority))
|
11467860
|
import numpy as np
import torch
from torchvision.transforms import RandomVerticalFlip, RandomHorizontalFlip, RandomCrop
from torchvision.transforms import ColorJitter, ToTensor
class GlobalContrastNormalize(object):
"""
Code adapted from https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/expr/preprocessing.py#L16
"""
def __init__(self, scale=1., subtract_mean=True,
use_std=False, sqrt_bias=0., min_divisor=1e-8):
self.scale = scale
self.subtract_mean = subtract_mean
self.use_std = use_std
self.sqrt_bias = sqrt_bias
self.min_divisor = min_divisor
def __call__(self, img_tensor):
if self.subtract_mean:
mean = torch.mean(img_tensor)
img_tensor -= mean
if self.use_std:
img_var = torch.var(img_tensor)
else:
img_var = torch.sum(torch.pow(img_tensor, 2.0))
normalizer = np.sqrt(self.sqrt_bias + img_var) / self.scale
normalizer = 1.0 if normalizer < self.min_divisor else normalizer
return img_tensor / normalizer
train_data_transform = [
RandomCrop(size=32, padding=4),
RandomHorizontalFlip(p=0.5),
RandomVerticalFlip(p=0.5),
ColorJitter(hue=0.1, brightness=0.1),
ToTensor(),
# https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/scripts/datasets/make_cifar10_gcn_whitened.py#L19
GlobalContrastNormalize(scale=55.0)
]
val_data_transform = [
RandomHorizontalFlip(p=0.5),
RandomVerticalFlip(p=0.5),
ToTensor(),
# https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/scripts/datasets/make_cifar10_gcn_whitened.py#L58
GlobalContrastNormalize(scale=55.0)
]
test_data_transform = val_data_transform
|
11467875
|
from .fixtures import get_product, paddle_client # NOQA: F401
def test_list_products(paddle_client, get_product): # NOQA: F811
response = paddle_client.list_products()
assert 'count' in response
assert 'total' in response
for product in response['products']:
assert 'id' in product
assert 'name' in product
assert 'description' in product
assert 'base_price' in product
assert 'sale_price' in product
assert 'screenshots' in product
assert 'icon' in product
|
11467915
|
from . import *
from pya import *
class Contra_DC_SWG_segmented(pya.PCellDeclarationHelper):
"""
Author: <NAME>
<EMAIL>
"""
def __init__(self):
# Important: initialize the super class
super(Contra_DC_SWG_segmented, self).__init__()
TECHNOLOGY = get_technology_by_name('EBeam')
# declare the parameters
self.param("number_of_periods", self.TypeInt, "Number of grating periods", default = 300)
self.param("grating_period", self.TypeDouble, "Sub-wavelength period (microns)", default = 0.24)
self.param("cdc_period", self.TypeDouble, "Pertrubation period (microns)", default = 0.464)
self.param("gap", self.TypeDouble, "Minimum gap (microns)", default = 0.1)
self.param("corrugation_width", self.TypeDouble, "Waveguide Corrugration width (microns)", default = 0.12)
self.param("wg1_width", self.TypeDouble, "Waveguide 1 width", default = 0.5)
self.param("wg2_width", self.TypeDouble, "Waveguide 2 width", default = 0.38)
self.param("duty", self.TypeDouble, "Duty cycle (0 to 1)", default = 0.5)
self.param("layer", self.TypeLayer, "Layer", default = TECHNOLOGY['Waveguide'])
self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec'])
self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec'])
# self.param("textl", self.TypeLayer, "Text Layer", default = LayerInfo(10, 0))
def display_text_impl(self):
# Provide a descriptive text for the cell
return "Contra_DC_SWG%s-%.3f" % \
(self.number_of_periods, self.grating_period)
def coerce_parameters_impl(self):
pass
def can_create_from_shape(self, layout, shape, layer):
return False
def produce_impl(self):
# fetch the parameters
dbu = self.layout.dbu
ly = self.layout
shapes = self.cell.shapes
LayerSi = self.layer
LayerSiN = ly.layer(LayerSi)
LayerPinRecN = ly.layer(self.pinrec)
LayerDevRecN = ly.layer(self.devrec)
from SiEPIC.extend import to_itype
N = self.number_of_periods
grating_period = int(round(self.grating_period/dbu))
cdc_period = int(round(self.cdc_period/dbu))
misalignment = 0
# Determine the period such that the waveguide length is as desired. Slight adjustment to period
N_boxes = N
# Draw the Bragg grating:
box_width = int(round(grating_period*self.duty))
w1 = self.wg1_width / dbu
half_w1 = w1/2
w2 = self.wg2_width / dbu
half_w2 = w2/2
w = self.corrugation_width / dbu
half_w = w/2
gap = int(round(self.gap/dbu))
vertical_offset = int(round(self.wg2_width/2/dbu))+2*gap+int(round(self.wg1_width/2/dbu))+int(round(w))
t = Trans(Trans.R0, to_itype(0,dbu),vertical_offset)
for i in range(0,N_boxes+1):
if i%2 == True:
x = int(round((i * grating_period - box_width/2)))
box1_a = Box(x, -half_w1, x + box_width, half_w1)
shapes(LayerSiN).insert(box1_a)
box2_a = Box(x+grating_period, -half_w2, x + grating_period+box_width, half_w2).transformed(t)
shapes(LayerSiN).insert(box2_a)
else:
x = int(round((i * grating_period - box_width/2)))
box1_b = Box(x, -half_w1, x + box_width, half_w1)
shapes(LayerSiN).insert(box1_b)
box2_b = Box(x+grating_period, -half_w2, x +grating_period+ box_width, half_w2).transformed(t)
shapes(LayerSiN).insert(box2_b)
# compensate length of SWG boxes vs cdc boxes
x_cdc = int(round(N_boxes * cdc_period)/2)
xk = int(round(N_boxes * grating_period))
N_cdc_boxes = 2*int(round((xk - x_cdc)/cdc_period))
print(N_cdc_boxes)
for i in range(0,N_boxes+1+N_cdc_boxes):
if i%2 == True:
x_cdc = int(round((i * cdc_period/2 - box_width/2)))
boxw_a = Box(x_cdc, -half_w1-gap, x_cdc + cdc_period/2, -w-half_w1-gap,)
shapes(LayerSiN).insert(boxw_a)
boxw_a = Box(x_cdc, half_w2+gap, x_cdc + cdc_period/2, w+half_w2+gap,).transformed(t)
shapes(LayerSiN).insert(boxw_a)
else:
x_cdc = int(round((i * cdc_period/2 - box_width/2)))
boxw_a = Box(x_cdc, half_w1+gap, x_cdc +cdc_period/2, w+half_w1+gap,)
shapes(LayerSiN).insert(boxw_a)
# missing periods due to misalignments
box_final = Box(x+grating_period, -half_w1, x +grating_period+ box_width, half_w1)
shapes(LayerSiN).insert(box_final)
box_final = Box(-box_width/2, -half_w2, box_width/2, half_w2).transformed(t)
shapes(LayerSiN).insert(box_final)
# Create the pins on the waveguides, as short paths:
from SiEPIC._globals import PIN_LENGTH as pin_length
w = to_itype(self.wg1_width,dbu)
t = Trans(Trans.R0, to_itype(-box_width/2,dbu*1000),0)
pin = Path([Point(pin_length/2, 0), Point(-pin_length/2, 0)], w)
pin_t = pin.transformed(t)
shapes(LayerPinRecN).insert(pin_t)
text = Text ("pin1", t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
w = to_itype(self.wg2_width,dbu)
t = Trans(Trans.R0, to_itype(-box_width/2,dbu*1000),vertical_offset)
pin = Path([Point(pin_length/2, 0), Point(-pin_length/2, 0)], w)
pin_t = pin.transformed(t)
shapes(LayerPinRecN).insert(pin_t)
text = Text ("pin2", t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
w = to_itype(self.wg2_width,dbu)
t = Trans(Trans.R0, to_itype(x+grating_period+box_width,dbu*1000),vertical_offset)
pin = Path([Point(-pin_length/2, 0), Point(pin_length/2, 0)], w)
pin_t = pin.transformed(t)
shapes(LayerPinRecN).insert(pin_t)
text = Text ("pin3", t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
w = to_itype(self.wg1_width,dbu)
t = Trans(Trans.R0, to_itype(x+grating_period+box_width,dbu*1000),0)
pin = Path([Point(-pin_length/2, 0), Point(pin_length/2, 0)], w)
pin_t = pin.transformed(t)
shapes(LayerPinRecN).insert(pin_t)
text = Text ("pin4", t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
|
11467922
|
import tensorflow as tf
import numpy as np
import math
import time
from pixel_cnn_pp.nn import *
def lrelu(x, rate=0.1):
# return tf.nn.relu(x)
return tf.maximum(tf.minimum(x * rate, 0), x)
def fc_lrelu(inputs, num_outputs):
fc = tf.contrib.layers.fully_connected(inputs, num_outputs,
weights_initializer=tf.random_normal_initializer(stddev=0.02),
activation_fn=tf.identity)
fc = lrelu(fc)
return fc
def mlp_discriminator(x, reuse=False):
with tf.variable_scope('d_net') as vs:
if reuse:
vs.reuse_variables()
x = tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])])
fc1 = fc_lrelu(x, 512)
fc2 = fc_lrelu(fc1, 512)
fc3 = tf.contrib.layers.fully_connected(fc2, 1, activation_fn=tf.identity)
return fc3
class ConvolutionalEncoder(object):
def __init__(self, X, reg_type, latent_dim, z=None):
'''
This is the 6-layer architecture for Convolutional Autoencoder
mentioned in the original paper:
Stacked Convolutional Auto-Encoders for Hierarchical Feature Extraction
Note that only the encoder part is implemented as PixelCNN is taken
as the decoder.
'''
self.x = X
conv1 = conv2d(X, 64, [4, 4], [2, 2], name='encoder_conv1')
conv1 = lrelu(conv1)
conv2 = conv2d(conv1, 128, [4, 4], [2, 2], name='encoder_conv2')
conv2 = lrelu(conv2)
conv3 = conv2d(conv2, 256, [4, 4], [2, 2], name='encoder_conv3')
conv3 = lrelu(conv3)
conv3 = tf.reshape(conv3, [-1, np.prod(conv3.get_shape().as_list()[1:])])
fc1 = dense(conv3, 512, name='encoder_fc1')
fc1 = lrelu(fc1)
self.mean = dense(fc1, latent_dim, name='encoder_mean')
self.stddev = tf.nn.sigmoid(dense(fc1, latent_dim, name='encoder_stddev'))
self.stddev = tf.maximum(self.stddev, 0.01)
self.pred = self.mean + tf.multiply(self.stddev,
tf.random_normal(tf.stack([tf.shape(X)[0], latent_dim])))
if "elbo" in reg_type:
self.reg_loss = tf.reduce_sum(-tf.log(self.stddev) + 0.5 * tf.square(self.stddev) +
0.5 * tf.square(self.mean) - 0.5)
elif "2norm" in reg_type:
self.reg_loss = tf.reduce_sum(0.5 * tf.square(self.pred))
elif "center" in reg_type:
self.reg_loss = tf.reduce_sum(-tf.log(self.stddev) + 0.5 * tf.square(self.mean))
elif "elbo0_1" in reg_type:
self.reg_loss = 0.1 * tf.reduce_sum(-tf.log(self.stddev) + 0.5 * tf.square(self.stddev) +
0.5 * tf.square(self.mean) - 0.5)
elif "no_reg" in reg_type:
self.reg_loss = 0.0 # Add something for stability
elif "stein" in reg_type:
stein_grad = tf.stop_gradient(self.tf_stein_gradient(self.pred, 1.0))
self.reg_loss = -10000.0 * tf.reduce_sum(tf.multiply(self.pred, stein_grad))
elif "adv" in reg_type:
true_samples = tf.random_normal(tf.stack([tf.shape(X)[0], latent_dim]))
self.d = mlp_discriminator(true_samples)
self.d_ = mlp_discriminator(self.pred, reuse=True)
epsilon = tf.random_uniform([], 0.0, 1.0)
x_hat = epsilon * true_samples + (1 - epsilon) * self.pred
d_hat = mlp_discriminator(x_hat, reuse=True)
ddx = tf.gradients(d_hat, x_hat)[0]
ddx = tf.sqrt(tf.reduce_sum(tf.square(ddx), axis=1))
self.d_grad_loss = tf.reduce_mean(tf.square(ddx - 1.0) * 10.0)
self.d_loss_x = -tf.reduce_mean(self.d)
self.d_loss_e = tf.reduce_mean(self.d_)
self.d_loss = self.d_loss_x + self.d_loss_e + self.d_grad_loss
self.d_vars = [var for var in tf.global_variables() if 'd_net' in var.name]
self.d_train = tf.train.AdamOptimizer(learning_rate=0.00002, beta1=0.5, beta2=0.9).minimize(self.d_loss,
var_list=self.d_vars)
tf.summary.scalar('d_loss_x', self.d_loss_x)
tf.summary.scalar('d_loss_e', self.d_loss_e)
self.reg_loss = -tf.reduce_mean(self.d_)
self.reg_loss *= 100
elif "moment" in reg_type:
mean = tf.reduce_mean(self.pred, axis=0, keep_dims=True)
var = tf.reduce_mean(tf.square(self.pred - mean), axis=0)
mean_loss = tf.reduce_mean(tf.abs(mean))
var_loss = tf.reduce_mean(tf.abs(var - 1.0))
tf.summary.scalar('mean', mean_loss)
tf.summary.scalar('variance', var_loss)
self.reg_loss = mean_loss + var_loss
elif "kernel" in reg_type:
true_samples = tf.random_normal(tf.stack([200, latent_dim]))
pred_kernel = self.compute_kernel(self.pred, self.pred)
sample_kernel = self.compute_kernel(true_samples, true_samples)
mix_kernel = self.compute_kernel(self.pred, true_samples)
self.reg_loss = tf.reduce_mean(pred_kernel) + tf.reduce_mean(sample_kernel) - 2 * tf.reduce_mean(mix_kernel)
self.reg_loss *= 100000.0
else:
print("Unknown regularization %s" % str(reg_type))
exit(0)
self.elbo_loss = tf.reduce_mean(-tf.log(self.stddev) + 0.5 * tf.square(self.stddev) +
0.5 * tf.square(self.mean) - 0.5)
def compute_kernel(self, x, y, sigma_sqr=1.0):
x_size = tf.shape(x)[0]
y_size = tf.shape(y)[0]
dim = tf.shape(x)[1]
tiled_x = tf.tile(tf.reshape(x, tf.stack([x_size, 1, dim])), tf.stack([1, y_size, 1]))
tiled_y = tf.tile(tf.reshape(y, tf.stack([1, y_size, dim])), tf.stack([x_size, 1, 1]))
kernel = tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / 2.0 / sigma_sqr)
return kernel
# x_sample is input of size (batch_size, dim)
def tf_stein_gradient(seff, x_sample, sigma_sqr):
x_size = x_sample.get_shape()[0].value
x_dim = x_sample.get_shape()[1].value
x_sample = tf.reshape(x_sample, [x_size, 1, x_dim])
sample_mat_y = tf.tile(x_sample, (1, x_size, 1))
sample_mat_x = tf.transpose(sample_mat_y, perm=(1, 0, 2))
kernel_matrix = tf.exp(-tf.reduce_sum(tf.square(sample_mat_x - sample_mat_y), axis=2) / (2 * sigma_sqr * x_dim))
# np.multiply(-self.kernel(x, y), np.divide(x - y, self.sigma_sqr))./
tiled_kernel = tf.tile(tf.reshape(kernel_matrix, [x_size, x_size, 1]), [1, 1, x_dim])
kernel_grad_matrix = tf.multiply(tiled_kernel, tf.div(sample_mat_y - sample_mat_x, sigma_sqr * x_dim))
gradient = tf.reshape(-x_sample, [x_size, 1, x_dim]) # Gradient of standard Gaussian
tiled_gradient = tf.tile(gradient, [1, x_size, 1])
weighted_gradient = tf.multiply(tiled_kernel, tiled_gradient)
return tf.div(tf.reduce_sum(weighted_gradient, axis=0) +
tf.reduce_sum(kernel_grad_matrix, axis=1), x_size)
"""
class ComputeLL:
def __init__(self, latent_dim):
self.mean = tf.placeholder(tf.float32, shape=(None, latent_dim))
self.stddev = tf.placeholder(tf.float32, shape=(None, latent_dim))
self.sample = tf.placeholder(tf.float32, shape=(None, latent_dim))
mu = tf.reshape(self.mean, shape=tf.pack([tf.shape(self.mean)[0], 1, latent_dim]))
mu = tf.tile(mu, tf.pack([1, tf.shape(self.sample)[0], 1]))
sig = tf.reshape(self.stddev, shape=tf.pack([tf.shape(self.stddev)[0], 1, latent_dim]))
sig = tf.tile(sig, tf.pack([1, tf.shape(self.sample)[0], 1]))
z = tf.reshape(self.sample, shape=tf.pack([1, tf.shape(self.sample)[0], latent_dim]))
z = tf.tile(z, tf.pack([tf.shape(self.mean)[0], 1, 1]))
coeff = tf.div(1.0 / math.sqrt(2 * math.pi), sig)
ll = coeff * tf.exp(-tf.div(tf.square(z - mu), 2 * tf.square(sig)))
ll = tf.reduce_prod(ll, axis=2)
self.prob = ll
def compute_mutual_information(data, args, sess, encoder_list, ll_compute):
print("Evaluating Mutual Information")
start_time = time.time()
num_batch = 1000
z_batch_cnt = 10 # This must divide num_batch
dist_batch_cnt = 10
assert num_batch % z_batch_cnt == 0
assert num_batch % dist_batch_cnt == 0
batch_size = args.batch_size * args.nr_gpu
sample_batches = np.zeros((num_batch*batch_size, args.latent_dim))
mean_batches = np.zeros((num_batch*batch_size, args.latent_dim))
stddev_batches = np.zeros((num_batch*batch_size, args.latent_dim))
for batch in range(num_batch):
x = data.next(args.batch_size * args.nr_gpu) # manually retrieve exactly init_batch_size examples
x = np.split(x, args.nr_gpu)
feed_dict = {encoder_list[i].x: x[i] for i in range(args.nr_gpu)}
result = sess.run([encoder.pred for encoder in encoder_list] +
[encoder.mean for encoder in encoder_list] +
[encoder.stddev for encoder in encoder_list], feed_dict=feed_dict)
sample = np.concatenate(result[0:args.nr_gpu], 0)
z_mean = np.concatenate(result[args.nr_gpu:args.nr_gpu*2], 0)
z_stddev = np.concatenate(result[args.nr_gpu*2:], 0)
sample_batches[batch*batch_size:(batch+1)*batch_size, :] = sample
mean_batches[batch*batch_size:(batch+1)*batch_size, :] = z_mean
stddev_batches[batch*batch_size:(batch+1)*batch_size, :] = z_stddev
z_batch_size = batch_size * z_batch_cnt
dist_batch_size = batch_size * dist_batch_cnt
prob_array = np.zeros((num_batch*batch_size, num_batch*batch_size), dtype=np.float)
for z_ind in range(num_batch // z_batch_cnt):
for dist_ind in range(num_batch // dist_batch_cnt):
mean = mean_batches[dist_ind*dist_batch_size:(dist_ind+1)*dist_batch_size, :]
stddev = stddev_batches[dist_ind*dist_batch_size:(dist_ind+1)*dist_batch_size, :]
sample = sample_batches[z_ind*z_batch_size:(z_ind+1)*z_batch_size, :]
probs = sess.run(ll_compute.prob, feed_dict={ll_compute.mean: mean,
ll_compute.stddev: stddev,
ll_compute.sample: sample})
prob_array[dist_ind*dist_batch_size:(dist_ind+1)*dist_batch_size, z_ind*z_batch_size:(z_ind+1)*z_batch_size] = probs
# print()
# print(np.sum(prob_array))
marginal = np.sum(prob_array, axis=0)
ratio = np.log(np.divide(np.diagonal(prob_array), marginal)) + np.log(num_batch*batch_size)
mutual_info = np.mean(ratio)
print("Mutual Information %f, time elapsed %fs" % (mutual_info, time.time() - start_time))
return mutual_info
"""
|
11467937
|
from FreeTAKServer.model.SpecificCoT.SendHealthCheck import SendHealthCheck
class SendHealthCheckController:
# TODO: deprecate this function
def __init__(self, RawCoT):
self.RawCoT = RawCoT
self.HealthCheck = SendHealthCheck()
self.HealthCheck.xmlString = RawCoT.xmlString
self.HealthCheck.clientInformation = RawCoT.clientInformation
def getObject(self):
return self.HealthCheck
|
11467960
|
import itertools
a = [1, 2, 3]
b = ["One", "Two"]
result1 = list(zip(a, b))
result2 = list(itertools.zip_longest(a, b))
print(result1)
print(result2)
|
11467966
|
from __future__ import print_function
import csv
import distutils
import shutil
import pandas as pd
import os
import re
from collections import defaultdict
import six
import sys
from Bio import SeqIO
from tracerlib.tracer_func import process_chunk, find_possible_alignments
from tracerlib.core import Invar_cell
import glob
import pdb
import json
def makeOutputDir(output_dir_path):
if not os.path.exists(output_dir_path):
os.makedirs(output_dir_path)
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def clean_file_list(file_list):
return_list = []
trinity_pattern = re.compile(r"(.+)\.Trinity\.fasta")
for file_name in file_list:
clean_name = os.path.split(file_name)[1]
trinity_match = trinity_pattern.search(clean_name)
if trinity_match:
clean_name = trinity_match.group(1)
return_list.append(clean_name)
return (sorted(return_list))
def get_filename_and_locus(name):
name = name.split("_")
cell = name[0]
locus = "_".join(name[1:3])
return ([cell, locus])
def sort_locus_names(dictionary_to_sort):
for key, value in six.iteritems(dictionary_to_sort):
sorted_value = sorted(value)
dictionary_to_sort[key] = sorted_value
return (dictionary_to_sort)
def load_IMGT_seqs(file):
seqs = {}
with open(file, 'rU') as fh:
for record in SeqIO.parse(fh, 'fasta'):
seqs[record.id] = str(record.seq)
return (seqs)
def parse_IgBLAST(receptor, loci, output_dir, cell_name, raw_seq_dir, species,
seq_method, max_junc_len=50, invariant_seqs=None):
IMGT_seqs = dict()
# expecting_D = dict()
loci_for_segments = defaultdict(list)
# for locus in loci:
# expecting_D[locus] = False
for locus in loci:
seq_files = glob.glob(os.path.join(raw_seq_dir,
"{receptor}_{locus}_*.fa".format(
receptor=receptor,
locus=locus)))
for f in seq_files:
# if not f.endswith("_C.fa"):
segment_name = os.path.splitext(os.path.split(f)[1])[0]
IMGT_seqs[segment_name] = load_IMGT_seqs(f)
# if segment_name.split("_")[2] == 'D':
# expecting_D[locus] = True
loci_for_segments[segment_name.split("_")[2]].append(locus)
# segment_names = ['TRAJ', 'TRAV', 'TRBD', 'TRBJ', 'TRBV']
# IMGT_seqs = dict()
# for segment in segment_names:
# IMGT_seqs[segment] = load_IMGT_seqs("{}/{}.fa".format(imgt_seq_location, segment))
locus_names = ["_".join([receptor, x]) for x in loci]
all_locus_data = defaultdict(dict)
for locus in locus_names:
file = "{output_dir}/IgBLAST_output/{cell_name}_{locus}.IgBLASTOut".format(
output_dir=output_dir,
cell_name=cell_name, locus=locus)
if os.path.isfile(file):
igblast_result_chunks = split_igblast_file(file)
for chunk in igblast_result_chunks:
(query_name, chunk_details) = process_chunk(chunk)
if query_name is not None:
all_locus_data[locus][query_name] = chunk_details
else:
all_locus_data[locus] = None
cell = find_possible_alignments(all_locus_data, locus_names, cell_name,
IMGT_seqs, output_dir, species, seq_method,
invariant_seqs, loci_for_segments, receptor,
loci, max_junc_len)
return (cell)
def split_igblast_file(filename):
# code adapted from http://stackoverflow.com/questions/19575702/pythonhow-to-split-file-into-chunks-by-the-occurrence-of-the-header-word
token = '# IGBLASTN'
chunks = []
current_chunk = []
with open(filename) as fh:
for line in fh:
line = line.rstrip()
if line.startswith(token) and current_chunk and not line.startswith(
"Total "):
# if line starts with token and the current chunk is not empty
chunks.append(current_chunk[:]) # add not empty chunk to chunks
current_chunk = [] # make current chunk blank
# just append a line to the current chunk on each iteration
if not line.startswith("Total "):
current_chunk.append(line)
chunks.append(current_chunk) # append the last chunk outside the loop
return (chunks)
def check_binary(name, user_path=None):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if user_path:
if is_exe(user_path):
return user_path
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, name)
if is_exe(exe_file):
return exe_file
raise OSError(
"Required binary not found: {name}. Please add to PATH or specify location in config file."
.format(name=name))
def parse_invariant_cells(filename):
invariant_cells = []
with open(filename) as fh:
json_dict = json.load(fh)
for c in json_dict:
invariant_cells.append(Invar_cell(c))
return invariant_cells
def read_colour_file(filename, return_used_list=False, receptor_name=None):
colour_map = dict()
used_colours = set()
with open(filename) as f:
for line in f:
line = line.rstrip()
receptor, locus, prod_colour, nonprod_colour = line.split(",")
d = {locus: (prod_colour, nonprod_colour)}
if receptor in colour_map:
colour_map[receptor].update(d)
else:
colour_map[receptor] = d
if receptor_name is not None and receptor == receptor_name:
used_colours.add(prod_colour)
elif receptor_name is None:
used_colours.add(prod_colour)
if return_used_list:
t = (colour_map, used_colours)
return t
else:
return colour_map
def write_colour_file(filename, colour_map):
sorted_receptors = sorted(colour_map.keys())
with open(filename, 'w') as f:
for receptor in sorted_receptors:
sorted_loci = sorted(colour_map[receptor].keys())
for l in sorted_loci:
colours = colour_map[receptor][l]
f.write(
"{},{},{},{}\n".format(receptor, l, colours[0], colours[1]))
|
11467973
|
import numpy as np
import copy
from scipy import sparse
from argoverse.map_representation.map_api import ArgoverseMap
class GraphExtractor(object):
def __init__(self, config, mode='train'):
self.am = ArgoverseMap()
self.config = config
self.mode = mode
def __del__(self):
del self.am
def dilated_nbrs(self, nbr, num_nodes, num_scales):
data = np.ones(len(nbr['u']), np.bool)
csr = sparse.csr_matrix((data, (nbr['u'], nbr['v'])), shape=(num_nodes, num_nodes))
mat = csr
nbrs = []
for i in range(1, num_scales):
mat = mat * mat
nbr = dict()
coo = mat.tocoo()
nbr['u'] = coo.row.astype(np.int16)
nbr['v'] = coo.col.astype(np.int16)
nbrs.append(nbr)
return nbrs
def extract(self, data):
"""Get a rectangle area defined by pred_range."""
x_min, x_max, y_min, y_max = self.config['pred_range']
radius = max(abs(x_min), abs(x_max)) + max(abs(y_min), abs(y_max))
lane_ids = self.am.get_lane_ids_in_xy_bbox(data['orig'][0], data['orig'][1], data['city'], radius)
lane_ids = copy.deepcopy(lane_ids)
"""Get all lane within self.config['pred_range'], convert centerline and polygon to rotated and biased"""
# what's polygon
lanes = dict()
for lane_id in lane_ids:
lane = self.am.city_lane_centerlines_dict[data['city']][lane_id]
lane = copy.deepcopy(lane)
centerline = np.matmul(data['rot'], (lane.centerline - data['orig'].reshape(-1, 2)).T).T
x, y = centerline[:, 0], centerline[:, 1]
if x.max() < x_min or x.min() > x_max or y.max() < y_min or y.min() > y_max:
continue
else:
"""Getting polygons requires original centerline"""
polygon = self.am.get_lane_segment_polygon(lane_id, data['city'])
polygon = copy.deepcopy(polygon)
lane.centerline = centerline
lane.polygon = np.matmul(data['rot'], (polygon[:, :2] - data['orig'].reshape(-1, 2)).T).T
lanes[lane_id] = lane
"""Lane feature: ctrs(position), feats(shape), turn, control, intersect"""
lane_ids = list(lanes.keys())
ctrs, feats, turn, control, intersect = [], [], [], [], []
for lane_id in lane_ids:
lane = lanes[lane_id]
ctrln = lane.centerline
num_segs = len(ctrln) - 1
ctrs.append(np.asarray((ctrln[:-1] + ctrln[1:]) / 2.0, np.float32))
feats.append(np.asarray(ctrln[1:] - ctrln[:-1], np.float32))
x = np.zeros((num_segs, 2), np.float32)
if lane.turn_direction == 'LEFT':
x[:, 0] = 1
elif lane.turn_direction == 'RIGHT':
x[:, 1] = 1
else:
pass
turn.append(x)
control.append(lane.has_traffic_control * np.ones(num_segs, np.float32))
intersect.append(lane.is_intersection * np.ones(num_segs, np.float32))
# -------------------------node_idcs---------------------
node_idcs = []
count = 0
for ctr in ctrs: # node_idcs: list, i-th element: i-th lane nodes ids
node_idcs.append(range(count, count + len(ctr)))
count += len(ctr)
num_nodes = count
# -------------------------lane_idcs---------------------
# lane[idc] = a means idc-th node belongs to the a-th lane
lane_idcs = []
for i, idcs in enumerate(node_idcs):
lane_idcs.append(i * np.ones(len(idcs), np.int64)) # TODO: what does lane_idcs do?
lane_idcs = np.concatenate(lane_idcs, 0)
# **********************************Map Related work***************************
# =========================================
# ==============Hdmap Graph Build==========
# =========================================
# -------all in all, pairs is for lanes; no pairs is for lanes--------
# ---------------------------pre and suc for lanes--------------------
pre_pairs, suc_pairs, left_pairs, right_pairs = [], [], [], []
for i, lane_id in enumerate(lane_ids):
lane = lanes[lane_id]
nbr_ids = lane.predecessors
if nbr_ids is not None:
for nbr_id in nbr_ids:
if nbr_id in lane_ids:
j = lane_ids.index(nbr_id)
pre_pairs.append([i, j])
nbr_ids = lane.successors
if nbr_ids is not None:
for nbr_id in nbr_ids:
if nbr_id in lane_ids:
j = lane_ids.index(nbr_id)
suc_pairs.append([i, j])
nbr_id = lane.l_neighbor_id
if nbr_id is not None:
if nbr_id in lane_ids:
j = lane_ids.index(nbr_id)
left_pairs.append([i, j])
nbr_id = lane.r_neighbor_id
if nbr_id is not None:
if nbr_id in lane_ids:
j = lane_ids.index(nbr_id)
right_pairs.append([i, j])
pre_pairs = np.asarray(pre_pairs, np.int16)
suc_pairs = np.asarray(suc_pairs, np.int16)
left_pairs = np.asarray(left_pairs, np.int16)
right_pairs = np.asarray(right_pairs, np.int16)
# ---------------------------pre and suc for nodes--------------------
pre, suc = dict(), dict()
for key in ['u', 'v']:
pre[key], suc[key] = [], []
for i, lane_id in enumerate(lane_ids):
lane = lanes[lane_id]
idcs = node_idcs[i]
pre['u'] += idcs[1:]
pre['v'] += idcs[:-1]
if lane.predecessors is not None:
for nbr_id in lane.predecessors:
if nbr_id in lane_ids:
j = lane_ids.index(nbr_id)
pre['u'].append(idcs[0])
pre['v'].append(node_idcs[j][-1]) # v is the pre of u, v is src, u is dest
suc['u'] += idcs[:-1]
suc['v'] += idcs[1:]
if lane.successors is not None:
for nbr_id in lane.successors:
if nbr_id in lane_ids:
j = lane_ids.index(nbr_id)
suc['u'].append(idcs[-1])
suc['v'].append(node_idcs[j][0])
pre['u'] = np.asarray(pre['u'], dtype=np.int16)
pre['v'] = np.asarray(pre['v'], dtype=np.int16)
suc['u'] = np.asarray(suc['u'], dtype=np.int16)
suc['v'] = np.asarray(suc['v'], dtype=np.int16)
# -------------------dilate pre and suc: opition 1--------------------
dilated_pre = [pre]
dilated_pre += self.dilated_nbrs(pre, num_nodes, self.config['num_scales'])
dilated_suc = [suc]
dilated_suc += self.dilated_nbrs(suc, num_nodes, self.config['num_scales'])
# --------------------build nodes left and right graph-----------------
num_lanes = lane_idcs[-1].item() + 1
left, right = dict(), dict()
dist = np.expand_dims(ctrs, axis=1) - np.expand_dims(ctrs, axis=0)
dist = np.sqrt((dist ** 2).sum(2))
hi = np.arange(num_nodes).reshape(-1, 1).repeat(num_nodes, axis=1).reshape(-1)
wi = np.arange(num_nodes).reshape(1, -1).repeat(num_nodes, axis=0).reshape(-1)
row_idcs = np.arange(num_nodes)
pre_mat = np.zeros((num_lanes, num_lanes))
pre_mat[pre_pairs[:, 0], pre_pairs[:, 1]] = 1
suc_mat = np.zeros((num_lanes, num_lanes))
suc_mat[suc_pairs[:, 0], suc_pairs[:, 1]] = 1
pairs = left_pairs
if len(pairs) > 0:
# construct lane left graph
mat = np.zeros((num_lanes, num_lanes))
mat[pairs[:, 0], pairs[:, 1]] = 1
mat = (np.matmul(mat, pre_mat) + np.matmul(mat, suc_mat) + mat) > 0.5 # left lane's suc or pre lane is also self's left lane
# filter with distance
left_dist = dist.copy()
# if lane j is the lane i's left, then all nodes in lane j is the left of any node in lane i
mask = np.logical_not(mat[lane_idcs[hi], lane_idcs[wi]])
# set the distance between nodes that has no left relation are very vert large
left_dist[hi[mask], wi[mask]] = 1e6
# find the each node's nearest node
min_dist, min_idcs = left_dist.min(1), left_dist.argmin(1)
# if nearest node's distance > self.config['cross_dist'], then this node does not have left node
mask = min_dist < self.config['cross_dist']
# if the angle between nearest node is too big , the this node does not have left node
ui = row_idcs[mask]
vi = min_idcs[mask]
f1 = feats[ui]
f2 = feats[vi]
t1 = np.arctan2(f1[:, 1], f1[:, 0])
t2 = np.arctan2(f2[:, 1], f2[:, 0])
dt = np.abs(t1 - t2)
m = dt > np.pi
dt[m] = np.abs(dt[m] - 2 * np.pi)
m = dt < 0.25 * np.pi
ui = ui[m]
vi = vi[m]
left['u'] = ui.astype(np.int16) # u is the idx of node that has left neighbor
left['v'] = vi.astype(np.int16) # v[i] is the idx of left neighbor of node u[i]
else:
left['u'] = np.zeros(0, np.int16)
left['v'] = np.zeros(0, np.int16)
pairs = right_pairs
if len(pairs) > 0:
mat = np.zeros((num_lanes, num_lanes))
mat[pairs[:, 0], pairs[:, 1]] = 1
mat = (np.matmul(mat, pre_mat) + np.matmul(mat, suc_mat) + mat) > 0.5
right_dist = dist.copy()
mask = np.logical_not(mat[lane_idcs[hi], lane_idcs[wi]])
right_dist[hi[mask], wi[mask]] = 1e6
min_dist, min_idcs = right_dist.min(1), right_dist.argmin(1)
mask = min_dist < self.config['cross_dist']
ui = row_idcs[mask]
vi = min_idcs[mask]
f1 = feats[ui]
f2 = feats[vi]
t1 = np.arctan2(f1[:, 1], f1[:, 0])
t2 = np.arctan2(f2[:, 1], f2[:, 0])
dt = np.abs(t1 - t2)
m = dt > np.pi
dt[m] = np.abs(dt[m] - 2 * np.pi)
m = dt < 0.25 * np.pi
ui = ui[m]
vi = vi[m]
right['u'] = ui.astype(np.int16)
right['v'] = vi.astype(np.int16)
else:
right['u'] = np.zeros(0, np.int16)
right['v'] = np.zeros(0, np.int16)
graph = dict()
graph['num_nodes'] = num_nodes
# map node feats
graph['ctrs'] = ctrs
graph['feats'] = feats
graph['turn'] = np.concatenate(turn, 0)
graph['control'] = np.concatenate(control, 0)
graph['intersect'] = np.concatenate(intersect, 0)
# map node graph
graph['pre'] = dilated_pre
graph['suc'] = dilated_suc
graph['left'] = left
graph['right'] = right
# lane pairs
graph['lane_idcs'] = lane_idcs
graph['pre_pairs'] = pre_pairs
graph['suc_pairs'] = suc_pairs
graph['left_pairs'] = left_pairs
graph['right_pairs'] = right_pairs
return graph
'''
name; type; shape; meaning
suppose num of lanes is N, num of nodes is M
---------------map nodes level-------------
num_nodes: int; 1; num of nodes
=================feature===============
ctrs: ndarray; (M, 2); position
feats: ndarray; (M, 2); shape
turn: ndarray; (M, 2); turn type, [i, 0] = 1: left turn, [i, 1] = 1, right turn
control: ndarray; (M,); has control or not, [i] = 1, has contorl, [i] = 0, no control
intersect: ndarray; (M,); in intersect or not, [i] = 1, in, [i] = 0, not in
==================graph================
***************************************
pre: [dict]; (dilated neighbors adjacency matrix)
pre[i]: dict neighors within 2^i step
pre[i]['u']: ; array of nodes idx
pre[i]['v']: ; array of nodes idx
pre[i]['v'][j] is the pre within 2^i step neighbor node of pre[i]['u'][j]
***************************************
suc: [dict];
suc[i]: dict neighors within 2^i step
suc[i]['u']: ; array of nodes idx
suc[i]['v']: ; array of nodes idx
suc[i]['v'][j] is the suc within 2^i step neighbor node of suc[i]['u'][j]
***************************************
left: dict;
left['u']; ndarray; (None,); array of nodes idx
left['v']; ndarray; (None,); array of nodes idx
left['v'][i] is the left node of left['u'][i]
***************************************
right: dict;
right['u']; ndarray; (None,); array of nodes idx
right['v']; ndarray; (None,); array of nodes idx
right['v'][i] is the right node of right['u'][i]
---------------middle level-------------
lane_idcs: ndarray; (M,); [i] = n means node with id i belongs to lane with id n
---------------lane level---------------
pre_pairs; ndarray; (N, 2); [i, 1] is the pre lane of [i, 0]
suc_pairs; ndarray; (N, 2); [i, 1] is the suc lane of [i, 0]
left_pairs; ndarray; (N, 2); [i, 1] is the left lane of [i, 0]
right_pairs; ndarray; (N, 2); [i, 1] is the right lane of [i, 0]
'''
|
11467991
|
from problems.tsp.problem_tsp import TSP
from problems.vrp.problem_vrp import CVRP, SDVRP
from problems.op.problem_op import OP
from problems.pctsp.problem_pctsp import PCTSPDet, PCTSPStoch
from problems.pdp.problem_pdp import PDP
|
11467998
|
from math import exp
from tempfile import NamedTemporaryFile
import cv2
import torch
from torch.nn.functional import conv2d
def gaussian(window_size, sigma):
"""Gaussian window.
https://en.wikipedia.org/wiki/Window_function#Gaussian_window
"""
_exp = [exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]
gauss = torch.Tensor(_exp)
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
padding_size = window_size // 2
mu1 = conv2d(img1, window, padding=padding_size, groups=channel)
mu2 = conv2d(img2, window, padding=padding_size, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = conv2d(img1 * img1, window, padding=padding_size, groups=channel) - mu1_sq
sigma2_sq = conv2d(img2 * img2, window, padding=padding_size, groups=channel) - mu2_sq
sigma12 = conv2d(img1 * img2, window, padding=padding_size, groups=channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
_ssim_quotient = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2))
_ssim_divident = ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
ssim_map = _ssim_quotient / _ssim_divident
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
def ssim(img1, img2, window_size=11, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
def psnr(img1, img2):
mse = torch.mean((img1 - img2) ** 2)
return 20 * torch.log10(2.0 / torch.sqrt(mse))
def mjpeg(x):
"""
Write each video to disk and re-read it from disk.
Input: (N, 3, L, H, W)
Output: (N, 3, L, H, W)
"""
y = torch.zeros(x.size())
_, _, _, height, width = x.size()
for n in range(x.size(0)):
tempfile = NamedTemporaryFile(suffix=".avi")
vout = cv2.VideoWriter(tempfile.name, cv2.VideoWriter_fourcc(*'MJPG'),
20.0, (width, height))
for l in range(x.size(2)):
image = x[n, :, l, :, :] # (3, H, W)
image = torch.clamp(image.permute(1, 2, 0), min=-1.0, max=1.0)
vout.write(((image + 1.0) * 127.5).detach().cpu().numpy().astype("uint8"))
vout.release()
vin = cv2.VideoCapture(tempfile.name)
for l in range(x.size(2)):
_, frame = vin.read() # (H, W, 3)
frame = torch.tensor(frame / 127.5 - 1.0)
y[n, :, l, :, :] = frame.permute(2, 0, 1)
tempfile.close()
return y.to(x.device)
|
11468051
|
from .account import *
from .instrument import *
from .order import *
from .position import *
from .pricing import *
from .trade import *
from .transaction import *
from .user import *
__all__ = (account.__all__ +
instrument.__all__ +
order.__all__ +
pricing.__all__ +
trade.__all__ +
transaction.__all__ +
user.__all__)
|
11468054
|
import logging
import re
from django.conf import settings
from oldp.apps.courts.models import Court
from oldp.apps.courts.processing import CourtProcessingStep
logger = logging.getLogger(__name__)
class ProcessingStep(CourtProcessingStep):
description = 'Assign jurisdiction'
def process(self, court: Court) -> Court:
"""
Assign jurisdiction and level_of_appeal with regex on court name
"""
# Test all types with regex
for name in settings.COURT_JURISDICTIONS:
if re.compile(settings.COURT_JURISDICTIONS[name], re.IGNORECASE).search(court.name):
court.jurisdiction = name
break
# Test all types with regex
for name in settings.COURT_LEVELS_OF_APPEAL:
if re.compile(settings.COURT_LEVELS_OF_APPEAL[name], re.IGNORECASE).search(court.name):
court.level_of_appeal = name
break
return court
|
11468089
|
import numpy as np
from matplotlib import pyplot as plt
import os
##### Functions that calculate things #####
def local_maxima_detector(y):
'''
Finds local maxima in ordered data y.
:param y: 1d numpy float array
:return maxima: 1d numpy bool array
*maxima* is *True* at a local maximum, *False* otherwise.
This function makes no attempt to reject spurious maxima of various sorts.
That task is left to other functions.
'''
length = y.size
greater_than_follower = np.zeros(length, dtype=bool)
greater_than_leader = np.zeros(length, dtype=bool)
greater_than_follower[:-1] = np.greater(y[:-1], y[1:])
greater_than_leader[1:] = np.greater(y[1:], y[:-1])
maxima = np.logical_and(greater_than_follower, greater_than_leader)
# End points
maxima[0] = greater_than_follower[0]
maxima[-1] = greater_than_leader[-1]
return maxima
def local_minima_detector(y):
'''
Finds local minima in ordered data *y*.
:param y: 1d numpy float array
:return minima: 1d numpy bool array
*minima* is *True* at a local minimum, *False* otherwise.
This function makes no attempt to reject spurious minima of various sorts.
That task is left to other functions.
'''
minima = local_maxima_detector(-y)
return minima
def noiseless_curvature(x, y):
'''
Finds the curvature of y locally. Does not account for noise.
:param x: numpy float array, independent variable
:param y: numpy float array, dependent variable
:return curvature: numpy float array
Compares subsequent pixels to find a local slope.
Compares subsequent slopes to find a local curvature.
The curvature is defined at a location 0.5*(x3 + x1) = 0.5*(x[2:] + x[:-2]).
For evenly spaced data, the curvature is defined at x2 = x[1:-1].
The curvature is not defined (np.nan) for the endpoints.
'''
curvature = np.zeros(x.size, dtype=float)
y1 = y[:-2]
y2 = y[1:-1]
y3 = y[2:]
x1 = x[:-2]
x2 = x[1:-1]
x3 = x[2:]
# First derivatives
yprime_one = (y2 - y1) / (x2 - x1) # Defined at location 0.5*(x1 + x2)
yprime_two = (y3 - y2) / (x3 - x2) # Defined at location 0.5*(x2 + x3)
# Second derivative
# Defined at location 0.5*(x3 + x1). For evenly spaced data, defined at x2.
curvature[1:-1] = (yprime_two - yprime_one) / (0.5 * (x3 - x1))
# Undefined at endpoints
curvature[0] = np.nan
curvature[-1] = np.nan
return curvature
def real_max(y):
return y[~np.isnan(y)].max()
def real_min(y):
return y[~np.isnan(y)].min()
def isolate_outliers(y, n):
'''
Isolates high and low outliers from normally distributed data.
:param y: 1d numpy float array
:param n: int
:return normals, high_outliers, low_outliers: 1d numpy bool array x3
*y* is assumed to be primarily normally distributed data. Order of *y* is irrelevant.
*n* is the number of standard deviations that is the cutoff for 'normal';
in general this should be determined based on the size of *y*,
but a value of 4 or 5 is good for most cases (<10,000 data points).
Determination of an appropriate value of *n* is left to the user.
'''
normals = np.ones(y.size, bool)
old_high_outliers = np.zeros(y.size, bool)
old_low_outliers = np.zeros(y.size, bool)
new_outliers = 1
while new_outliers > 0:
median = np.median(y[normals])
std = np.std(y[normals])
high_outliers = np.greater(y, median + n * std)
low_outliers = np.less(y, median - n * std)
new_high_outliers = high_outliers & (~old_high_outliers)
new_low_outliers = low_outliers & (~old_low_outliers)
new_outliers = new_high_outliers.sum() + new_low_outliers.sum()
# Prep for next round, which may or may not trigger
old_high_outliers = high_outliers[:]
old_low_outliers = low_outliers[:]
normals = (~high_outliers) & (~low_outliers)
return normals, high_outliers, low_outliers
def find_zeros(y):
'''
Identifies the pixels just before zero-crossings.
:param y: 1d numpy float array
:return zeros: 1d numpy bool array
*y* is ordered data.
The discrete nature of arrays means that zero crossings generally happen between pixels,
rather than on a specific unambiguous pixel.
I arbitrarily chose to identify the pixel just before a zero crossing,
i.e. with lower index number, rather than just after.
*zeros* is a boolean array with value *True* for pixels
just before *y* crosses from positive to negative
and just before *y* crosses from negative to positive,
*False* otherwise.
'''
positive = np.greater(y, 0)
negative = np.less(y, 0)
next_positive = np.zeros(y.size, dtype=bool)
next_negative = np.zeros(y.size, dtype=bool)
next_positive[:-1] = positive[1:]
next_negative[:-1] = negative[1:]
rising = positive & next_negative
falling = negative & next_positive
zeros = rising | falling
return zeros
def linear_backgrounds(x, y, low_anchor_indices, high_anchor_indices):
'''
Finds parameters of a line connecting two points.
:param x: 1d numpy float array
:param y: 1d numpy float array
:param low_anchor_indices: 1d numpy int array
:param high_anchor_indices: 1d numpy int array
:return slope, offset: 1d numpy float arrays
*x* and *y* are paired, ordered data,
where *x* is the independent variable (coordinate)
and *y* is the dependent variable (value).
*low_anchor_indices* indicate the start-points of a line segment
and *high_anchor_indices* indicate the end-points of the same.
The solution given is not fitted and does not account for data values between the end-points.
It is simply a description of the line connecting those end-points.
'''
x1 = x[low_anchor_indices]
x2 = x[high_anchor_indices]
y1 = y[low_anchor_indices]
y2 = y[high_anchor_indices]
slope = (y2 - y1) / (x2 - x1)
offset = y1 - slope * x1
return slope, offset
def nested_boolean_indexing(boolean_slice_1, boolean_slice_2):
'''
Finds one array that slices like two input boolean arrays.
:param boolean_slice_1: 1d numpy bool array
:param boolean_slice_2: 1d numpy bool array
:return ultimate_truth: 1d numpy bool array
'Advanced indexing' in numpy returns, or sometimes returns,
a copy instead of a view of the array being sliced. I'm not 100% on the rules.
This is my workaround for a case where a copy is definitely returned:
when you slice like *(some_array[boolean_slice_1])[boolean_slice_2]*.
Problematic when you are attempting to change specific elements of some_array.
Finds a boolean array ultimate_truth such that
*(some_array[boolean_slice_1])[boolean_slice_2] = some_array[ultimate_truth]*
and corresponding integer array ultimate_indices such that
*(some_array[boolean_slice_1])[boolean_slice_2] = some_array[ultimate_indices]*
'''
indices = np.arange(boolean_slice_1.size, dtype=int)
indices_1 = indices[boolean_slice_1]
ultimate_indices = indices_1[boolean_slice_2]
ultimate_truth = np.zeros(boolean_slice_1.size, dtype=bool)
ultimate_truth[ultimate_indices] = True
return ultimate_truth, ultimate_indices
def integer_index_to_boolean(int_index, size):
'''
Converts an integer advanced indexing to a boolean advanced indexing.
:param int_index: 1d numpy integer array
:param size: integer
:return bool_index: 1d numpy boolean array
Finds an array *bool_index* such that
*some_array[bool_index] = some_array[int_index]*
for any some_array possessing *size* elements.
'''
bool_index = np.zeros(size, dtype=bool)
bool_index[int_index] = True
return bool_index
def boolean_index_to_integer(bool_index):
'''
Converts a boolean advanced indexing to an integer advanced indexing.
:param bool_index: 1d numpy boolean array
:return int_index: 1d numpy integer array
Finds an array *int_index* such that
*some_array[int_index] = some_array[bool_index]*.
'''
int_index = np.arange(bool_index.size, dtype=int)
int_index = int_index[bool_index]
return int_index
def shift_stack(y, n1, n2):
'''
Creates a stack of index-shifted versions of y.
:param y: 1d numpy float array
:param n1: int
:param n2: int
:return local_neighborhood: 2d numpy float array
:return element_exists: 2d numpy bool array
Creates shifted versions of the input *y*,
with shifts up to and including *n1* spaces downward in index
and up to and including *n2* spaces upwards.
The shifted versions are stacked together as *local_neighborhood*, like this
(shown for a *y* of length 16 and *n1 = 4*, *n2 = 2*)
[4 5 6 7 ... 15 __ __ __ __]
[3 4 5 6 ... 14 15 __ __ __]
[2 3 4 5 ... 13 14 15 __ __]
[1 2 3 4 ... 12 13 14 15 __]
[0 1 2 3 ... 11 12 13 14 15]
[_ 0 1 2 ... 10 11 12 13 14]
[_ _ 0 1 ... 9 10 11 12 13]
with a corresponding mask array, *element_exists*,
indicating whether an element holds information or not, like this
[1 1 1 1 ... 1 0 0 0 0]
[1 1 1 1 ... 1 1 0 0 0]
[1 1 1 1 ... 1 1 1 0 0]
[1 1 1 1 ... 1 1 1 1 0]
[1 1 1 1 ... 1 1 1 1 1]
[0 1 1 1 ... 1 1 1 1 1]
[0 0 1 1 ... 1 1 1 1 1]
'''
local_neighborhood = np.zeros(((n1 + n2 + 1), y.size), dtype=float)
element_exists = np.zeros(((n1 + n2 + 1), y.size), dtype=bool)
for ii in range(n1 + n2 + 1):
# ii ranges from 0 to n1 + n2; jj ranges from -n1 to n2
jj = ii - n1
if jj < 0:
local_neighborhood[ii, :jj] = y[-jj:]
element_exists[ii, :jj] = True
elif jj == 0:
local_neighborhood[ii, :] = y[:]
element_exists[ii, :] = True
else:
local_neighborhood[ii, jj:] = y[:-jj]
element_exists[ii, jj:] = True
return local_neighborhood, element_exists
def calc_running_local_variance(y, n):
'''
Calculates the variance of pixel group, n to each side.
:param y: 1d numpy float array
:param n: int
:return running_local_variance: 1d numpy float array
*y* is ordered data.
*n* is the number of pixels to each side included in the running variance.
*n* should not be smaller than 1, and *n* usually should be much smaller than the size of *y*.
*shift_stack()* creates shifted versions of the input y and stacks them together;
*masked_variance_2d_axis_0()* finds the variance along axis 0, i.e. along each column.
See *shift_stack()* and *masked_variance_2d_axis_0()* for further documentation.
'''
# local_neighborhood is shifted, concatenated data; element_exists is its mask
local_neighborhood, element_exists = shift_stack(y, n, n)
running_local_variance = masked_variance_2d_axis_0(local_neighborhood, element_exists)
return running_local_variance
def find_low_variance(local_variance, noise_factor):
'''
Finds areas with low variance; smaller noise_factor is more strict.
:param local_variance: 1d numpy float array
:param noise_factor: float
:return low_variance: 1d numpy bool array
Permitted values of noise_factor are between 0 and 1, inclusive.
For *noise_factor = 0*, the *variance_cutoff* is *median_variance*.
For *noise_factor = 1*, the *variance_cutoff* is *mean_variance*.
For values between 0 and 1, the *variance_cutoff* scales logarithmically between the two boundaries.
Returns a boolean array with *True* for low-variance pixels, *False* otherwise.
'''
median_variance = np.median(local_variance)
mean_variance = local_variance.mean()
variance_cutoff = median_variance * (mean_variance / median_variance) ** noise_factor
low_variance = np.less(local_variance, variance_cutoff)
return low_variance
def sufficiently_separated_curv_zeros(feature_index, curv_zeros):
'''
Finds candidate items before and after, but not adjacent to, feature_index.
:param feature_index: int
:param curv_zeros: 1d numpy bool array
:return curv_zeros_two_before, curv_zeros_two_after: 1d numpy bool arrays
Returns candidate *curv_zeros* preceding *feature_index*,
except the one immediately before, as *curv_zeros_two_before*.
Returns candidate *curv_zeros* following *feature_index*,
except the one immediately after, as *curv_zeros_two_after*.
'''
num_datapoints = curv_zeros.size
indices = np.arange(num_datapoints, dtype=int)
curv_zeros_indices = indices[curv_zeros]
curv_zeros_two_before_indices = curv_zeros_indices[np.less_equal(curv_zeros_indices, feature_index)][:-1]
curv_zeros_two_after_indices = curv_zeros_indices[np.greater_equal(curv_zeros_indices, feature_index)][1:]
curv_zeros_two_before = integer_index_to_boolean(curv_zeros_two_before_indices, num_datapoints)
curv_zeros_two_after = integer_index_to_boolean(curv_zeros_two_after_indices, num_datapoints)
return curv_zeros_two_before, curv_zeros_two_after
def find_alternate_high_bound(curv_zeros_before_allowed, num_datapoints):
'''
Finds another point to serve as a false "high bound".
:param curv_zeros_before_allowed: 1d numpy bool array
:param num_datapoints: int
:return high_bound: int
This function is called when there are no permissible *high_bound* values
that are actually higher than the feature you are trying to model.
Instead of having one bound higher than the feature and one bound lower,
and interpolating between them, two "bounds" both lower than the feature are found,
and a linear relationship is extrapolated from their positions.
Other than their *x*-value relative to the feature,
they satisfy the same requirements as ordinary bounding points.
'''
indices = np.arange(num_datapoints, dtype=int)
try:
high_bound = (indices[curv_zeros_before_allowed])[-3]
except IndexError:
high_bound = (indices[curv_zeros_before_allowed])[-2]
return high_bound
def find_alternate_low_bound(curv_zeros_after_allowed, num_datapoints):
'''
Finds another point to serve as a false "low bound".
:param curv_zeros_after_allowed: 1d numpy bool array
:param num_datapoints: int
:return high_bound: int
This function is called when there are no permissible *low_bound* values
that are actually lower than the feature you are trying to model.
Instead of having one bound higher than the feature and one bound lower,
and interpolating between them, two "bounds" both higher than the feature are found,
and a linear relationship is extrapolated from their positions.
Other than their *x*-value relative to the feature,
they satisfy the same requirements as ordinary bounding points.
'''
indices = np.arange(num_datapoints, dtype=int)
try:
low_bound = (indices[curv_zeros_after_allowed])[2]
except IndexError:
low_bound = (indices[curv_zeros_after_allowed])[1]
return low_bound
def pick_slope_anchors(local_variance, gaussian_feature_indices, curv_zeros, noise_factor=0):
'''
Chooses anchor points for a linear background about features.
:param local_variance: 1d numpy float array
:param gaussian_feature_indices: 1d numpy int array
:param noise_factor: float
:return suggested_low_bound_indices, suggested_high_bound_indices: 1d numpy int arrays
*local_variance* is a running local variance of some ordered data.
*gaussian_feature_indices* are indices of centroids of additive gaussian features.
*noise_factor* is a float between zero and one, inclusive.
Lower values of *noise_factor* are more strict. See *find_low_variance()* for further documentation.
On my very nice (low-noise) test data a value of zero works well.
Some data may need a larger value, or different provisions altogether.
*suggested_low_bound_indices* and *suggested_high_bound_indices*
are the start and end points for a linear background fit for each feature.
'''
num_datapoints = local_variance.size
num_features = gaussian_feature_indices.size
low_variance = find_low_variance(local_variance, noise_factor)
indices = np.arange(num_datapoints, dtype=int)
suggested_low_bound_indices = np.zeros(gaussian_feature_indices.size, dtype=int)
suggested_high_bound_indices = np.zeros(gaussian_feature_indices.size, dtype=int)
no_good_background = np.zeros(num_features, dtype=bool)
extrapolated_background = np.zeros(num_features, dtype=bool)
for ii in range(num_features):
jj = gaussian_feature_indices[ii]
curv_zeros_two_before, curv_zeros_two_after = sufficiently_separated_curv_zeros(jj, curv_zeros)
curv_zeros_before_allowed = curv_zeros_two_before & low_variance
curv_zeros_after_allowed = curv_zeros_two_after & low_variance
near_low_end = False
near_high_end = False
try:
suggested_low_bound_indices[ii] = (indices[curv_zeros_before_allowed])[-1]
except IndexError:
near_low_end = True
extrapolated_background[ii] = True
try:
suggested_high_bound_indices[ii] = (indices[curv_zeros_after_allowed])[0]
except IndexError:
near_high_end = True
extrapolated_background[ii] = True
if (near_low_end & near_low_end):
no_good_background[ii] = True
elif near_high_end:
try:
suggested_high_bound_indices[ii] = find_alternate_high_bound(curv_zeros_before_allowed, num_datapoints)
except IndexError:
no_good_background[ii] = True
elif near_low_end:
try:
suggested_low_bound_indices[ii] = find_alternate_low_bound(curv_zeros_after_allowed, num_datapoints)
except IndexError:
no_good_background[ii] = True
return suggested_low_bound_indices, suggested_high_bound_indices, no_good_background, extrapolated_background
def gauss_guess(x, y, curvature, low_anchor_indices, high_anchor_indices, feature_indices):
'''
Guesses a gaussian + linear model for data segments.
:param x: 1d numpy float array
:param y: 1d numpy float array
:param curvature: 1d numpy float array
:param low_anchor_indices: 1d numpy int array
:param high_anchor_indices: 1d numpy int array
:param feature_indices: 1d numpy int array
:return slope, offset, intensity, sigma: 1d numpy float arrays
*x* and *y* are paired, ordered data,
where *x* is the independent variable
and *y* is the dependent variable.
*curvature* is the second derivative of *y* with respect to *x*.
*low_anchor_indices* indicate the start-points of a data segment about a feature
and *high_anchor_indices* indicate the end-points of the same.
At the anchor indices, *y* should be close to its background behavior,
i.e. not dominated by the gaussian feature.
*feature_indices* indicate the locations of the features themselves.
The solution given is not fitted; it is a first estimate to be used in fitting.
'''
number_features = low_anchor_indices.size
slope, offset = linear_backgrounds(x, y, low_anchor_indices, high_anchor_indices)
intensity = np.zeros(number_features, dtype=float)
sigma = np.zeros(number_features, dtype=float)
for ii in range(number_features):
background_ii = slope[ii] * x + offset[ii]
signal_ii = y - background_ii
magnitude_ii = signal_ii[feature_indices[ii]]
curvature_ii = curvature[feature_indices[ii]]
sigma[ii] = (-magnitude_ii / curvature_ii) ** 0.5
intensity[ii] = magnitude_ii * sigma[ii] * (2 * np.pi) ** 0.5
return slope, offset, intensity, sigma
def masked_mean_2d_axis_0(y2d, mask2d):
'''
Takes the mean of masked data along axis 0.
:param y2d: 2d numpy float array
:param mask2d: 2d numpy bool array
:return mean: 1d numpy float array
*y2d* is data; *mask2d* is its corresponding mask
with values *True* for legitimate data, *False* otherwise.
Assumes that each column of *y2d* has at least one valid element;
otherwise the mean along axis 0 is not defined.
Returns *mean*, the mean of *y2d* along axis 0.
'''
sum = (y2d * mask2d).sum(axis=0)
num_elements = mask2d.sum(axis=0)
mean = sum / num_elements
return mean
def masked_variance_2d_axis_0(y2d, mask2d):
'''
Takes the variance of masked data along axis 0.
:param y2d: 2d numpy float array
:param mask2d: 2d numpy bool array
:return variance: 1d numpy float array
*y2d* is data; *mask2d* is its corresponding mask
with values *True* for legitimate data, *False* otherwise.
Assumes that each column of *y2d* has at least two valid elements;
otherwise the variance along axis 0 is not defined.
Returns *variance*, the variance of *y2d* along axis 0.
'''
mean = masked_mean_2d_axis_0(y2d, mask2d)
difference = (y2d - mean) * mask2d
num_elements = mask2d.sum(axis=0)
variance = (difference ** 2).sum(axis=0) / (num_elements - 1)
return variance
def read_mega_spreadsheet(filename, data_folder):
'''
Strictly for reading some data Liheng gave me.
:param filename:
:param data_folder:
:return trimmed_data_list: A list of lists of 1d numpy float arrays
'''
data = np.genfromtxt(data_folder + filename, delimiter=',')
# (length, width) = data.shape # (429, 27)
# Split data into components
data1 = data[:, 0:3]
data2 = data[:, 4:7]
data3 = data[:, 8:11]
data4 = data[:, 12:15]
data5 = data[:, 16:19]
data6 = data[:, 20:23]
data7 = data[:, 24:27]
data_list = [data1, data2, data3, data4, data5, data6, data7]
# print 'data 1 end', data1[-20:, :]
# print 'data 7 end', data7[-20:, :]
trimmed_data_list = []
for i in data_list:
i_mask = ~np.isnan(i[:, 0])
x = (i[:, 0])[i_mask]
y = (i[:, 1])[i_mask]
dy = (i[:, 2])[i_mask]
trimmed_data_list.append([x, y, dy])
return trimmed_data_list
##### Figure-making functions #####
def figure_initial_maxima(x, y, maxima):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hold(True)
ax.plot(x, y, ls='-', color='black', marker='None')
ax.plot(x[maxima], y[maxima], ls='None', marker='+', color='red')
ax.set_title('Naively detected local maxima')
ax.set_xlabel('q')
ax.set_ylabel('Intensity')
# plt.savefig('initial_maxima.pdf')
return fig, ax
def figure_maxima_curvature(x, y, maxima, normed_curv, curvature_legit):
# Two subplots, the axes array is 1-d
fig, axarray = plt.subplots(2, sharex=True)
axarray[0].set_title('Maxima of intensity, local curvature')
axarray[0].plot(x, y, ls='-', color='black', marker='None')
axarray[0].plot(x[maxima], y[maxima], ls='None', marker='+', color='red')
axarray[0].set_ylabel('Intensity')
axarray[1].plot(x[curvature_legit], normed_curv[curvature_legit], ls='-', marker='None', color='black')
axarray[1].plot(x[maxima], normed_curv[maxima], ls='None', marker='+', color='red')
axarray[1].set_ylabel('Curvature (scaled)')
# fig.savefig('maxima_curvature.pdf')
return fig, axarray
def figure_curv_vs_max(x, y, exclusive_maxima, exclusive_curv_minima, max_and_curvmin, normed_curv, curvature_legit):
fig, axarray = plt.subplots(2, sharex=True)
axarray[0].set_title('Local maxima of intensity vs curvature minima')
axarray[0].plot(x, y, ls='-', color='black', marker='None', lw=1)
axarray[0].plot(x[exclusive_maxima], y[exclusive_maxima], ls='None', marker='+', color='red', ms=10)
axarray[0].plot(x[exclusive_curv_minima], y[exclusive_curv_minima], ls='None', marker='+', color='blue', ms=10)
axarray[0].plot(x[max_and_curvmin], y[max_and_curvmin], ls='None', marker='+', color='green', ms=10)
axarray[0].set_ylabel('Intensity')
axarray[1].plot(x[curvature_legit], normed_curv[curvature_legit], ls='-', marker='None', color='black', lw=1)
axarray[1].plot(x[exclusive_maxima], normed_curv[exclusive_maxima], ls='None', marker='+', color='red', ms=10)
axarray[1].plot(x[exclusive_curv_minima], normed_curv[exclusive_curv_minima], ls='None', marker='+', color='blue',
ms=10)
axarray[1].plot(x[max_and_curvmin], normed_curv[max_and_curvmin], ls='None', marker='+', color='green', ms=10)
axarray[1].set_ylabel('Curvature (scaled)')
# fig.savefig('curv_vs_max.pdf')
return fig, axarray
def figure_curv_minima(x, y, curv_minima):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hold(True)
ax.plot(x, y, ls='-', color='black', marker=',')
ax.plot(x[curv_minima], y[curv_minima], ls='None', marker='+', color='red')
ax.set_title('Curvature minima')
ax.set_xlabel('q')
ax.set_ylabel('Intensity')
# plt.savefig('curv_minima.pdf')
return fig, ax
def figure_curv_minima_curvature(x, y, curv_minima, normed_curv, curvature_legit):
fig, axarray = plt.subplots(2, sharex=True)
axarray[0].set_title('Local minima of curvature')
axarray[0].plot(x, y, ls='-', color='black', marker='None')
axarray[0].plot(x[curv_minima], y[curv_minima], ls='None', marker='+', color='red')
axarray[0].set_ylabel('Intensity')
axarray[1].plot(x[curvature_legit], normed_curv[curvature_legit], ls='-', marker='None', color='black')
axarray[1].plot(x[curv_minima], normed_curv[curv_minima], ls='None', marker='+', color='red')
axarray[1].set_ylabel('Curvature (scaled)')
# fig.savefig('curv_minima_curvature.pdf')
return fig, axarray
def figure_curv_minima_classified(x, y, curv_minima, high_outliers, normals, low_outliers, normed_curv):
fig, axarray = plt.subplots(2, sharex=True)
axarray[0].set_title('Curvature minima, classified as features, noise, and weirdness')
axarray[0].plot(x, y, ls='-', color='black', marker='None', lw=1)
axarray[0].plot(x[curv_minima][high_outliers], y[curv_minima][high_outliers], ls='None', marker='+', color='red',
ms=10)
axarray[0].plot(x[curv_minima][normals], y[curv_minima][normals], ls='None', marker='+', color='blue', ms=10)
axarray[0].plot(x[curv_minima][low_outliers], y[curv_minima][low_outliers], ls='None', marker='+', color='green',
ms=10)
axarray[0].set_ylabel('Intensity')
axarray[1].plot(x, normed_curv, ls='-', color='black', marker='None', lw=1)
axarray[1].plot(x[curv_minima][high_outliers], normed_curv[curv_minima][high_outliers], ls='None', marker='+',
color='red', ms=10)
axarray[1].plot(x[curv_minima][normals], normed_curv[curv_minima][normals], ls='None', marker='+', color='blue',
ms=10)
axarray[1].plot(x[curv_minima][low_outliers], normed_curv[curv_minima][low_outliers], ls='None', marker='+',
color='green', ms=10)
axarray[1].set_ylabel('Curvature (scaled)')
# fig.savefig('curv_minima_classified.pdf')
return fig, axarray
def figure_curv_zeros(x, y, curv_zeros, normed_curv):
fig, axarray = plt.subplots(2, sharex=True)
axarray[0].set_title('Curvature zeros')
axarray[0].plot(x, y, ls='-', color='black', marker='None')
axarray[0].plot(x[curv_zeros], y[curv_zeros], ls='None', marker='+', color='red', ms=10)
axarray[0].set_ylabel('Intensity')
axarray[1].plot(x, normed_curv, ls='-', color='black', marker='None', lw=1)
axarray[1].plot(x[curv_zeros], normed_curv[curv_zeros], ls='None', marker='+', color='red', ms=10)
axarray[1].set_ylabel('Curvature (scaled)')
# fig.savefig('curv_zeros.pdf')
return fig, axarray
def figure_running_variance(x, y, curv_zeros, running_local_variance):
mean_variance = running_local_variance.mean()
median_variance = np.median(running_local_variance)
fig, axarray = plt.subplots(2, sharex=True)
axarray[0].set_title('Running local variance & curvature zeros')
axarray[0].plot(x, y, ls='-', color='black', marker='None')
axarray[0].plot(x[curv_zeros], y[curv_zeros], ls='None', marker='+', color='red', ms=10)
axarray[0].set_ylabel('Intensity')
axarray[1].plot(x, np.log10(running_local_variance), ls='-', color='black', marker='None', lw=1)
axarray[1].plot(x, np.log10(mean_variance) * np.ones(x.size), ls='-', color='blue', marker='None', lw=1)
axarray[1].plot(x, np.log10(median_variance) * np.ones(x.size), ls='-', color='green', marker='None', lw=1)
axarray[1].plot(x[curv_zeros], np.log10(running_local_variance)[curv_zeros], ls='None', marker='+', color='red',
ms=10)
axarray[1].set_ylabel('Logarithm of running variance')
# fig.savefig('running_variance.pdf')
return fig, axarray
def figure_slope_anchors_clipped(x, y, low_bound_indices, high_bound_indices, feature_indices_clipped):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hold(True)
ax.plot(x, y, ls='-', color='black', marker='None')
for ii in range(feature_indices_clipped.size):
x1 = x[low_bound_indices[ii]]
x2 = x[high_bound_indices[ii]]
y1 = y[low_bound_indices[ii]]
y2 = y[high_bound_indices[ii]]
ax.plot([x1, x2], [y1, y2], ls='-', marker='.', color='blue')
ax.set_title('Anchor points for feature fitting, excluding ends')
ax.set_xlabel('q')
ax.set_ylabel('Intensity')
# plt.savefig('slope_anchors_clipped.pdf')
return fig, ax
def figure_naive_gauss_guess(x, y, low_bound_indices, high_bound_indices, feature_indices, slope,
offset, intensity, sigma):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hold(True)
ax.plot(x, y, ls='-', color='black', marker='None')
slope, offset = linear_backgrounds(x, y, low_bound_indices, high_bound_indices)
number_features = feature_indices.size
for ii in range(number_features):
centroid = x[feature_indices[ii]]
linear_model = slope[ii] * x + offset[ii]
gauss_model = (intensity[ii] / (sigma[ii] * (2 * np.pi) ** 0.5)) \
* np.exp(-((x - centroid) ** 2) / (2 * sigma[ii] ** 2))
# Fix plotting bpundaries if necessary
if low_bound_indices[ii] < high_bound_indices[ii]:
model_segment = (linear_model + gauss_model)[low_bound_indices[ii]: high_bound_indices[ii]]
x_segment = x[low_bound_indices[ii]: high_bound_indices[ii]]
else:
high_x = centroid + 5 * sigma[ii]
low_x = centroid - 5 * sigma[ii]
segment = (np.greater(high_x, x) & np.less(low_x, x))
x_segment = x[segment]
model_segment = (linear_model + gauss_model)[segment]
# Mark as bad (red) if intensity < 0, good (cyan) otherwise
if intensity[ii] > 0:
ax.plot(x_segment, model_segment, ls=':', color='cyan')
else:
ax.plot(x_segment, model_segment, ls=':', color='red')
ax.set_title('Naive gaussian parameters guess, %i features' % number_features)
ax.set_xlabel('q')
ax.set_ylabel('Intensity')
# plt.savefig('naive_gauss_guess.pdf')
return fig, ax
def figure_smoothed_comparison(x_list, y_list, labels):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hold(True)
data, = ax.plot(x_list[0], y_list[0], ls='-', color='black', marker=',', lw=2)
smoothed_1, = ax.plot(x_list[1], y_list[1], ls='-', color='blue', marker=',', lw=1)
smoothed_2, = ax.plot(x_list[2], y_list[2], ls='-', color='green', marker=',', lw=1)
smoothed_3, = ax.plot(x_list[3], y_list[3], ls='-', color='red', marker=',', lw=1)
smoothed_4, = ax.plot(x_list[4], y_list[4], ls='-', color='magenta', marker=',', lw=1)
handles = [data, smoothed_1, smoothed_2, smoothed_3, smoothed_4]
# Shrink x axis by 20%; place legend to the right plot
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(handles, labels, prop={'size': 10}, loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_xlabel('q')
ax.set_ylabel('Intensity')
return fig, ax
def plot_peaks_flat_all(x, y_lists, masks, title, location, labels):
# Feature region plot
x_peak = x[masks[0]]
x_peaks = []
y_peaks = []
for i in range(len(labels)):
y_peak = (y_lists[i])[masks[0]]
y_peaks.append(y_peak)
x_peaks.append(x_peak)
fig, ax = figure_smoothed_comparison(x_peaks, y_peaks, labels)
ax.set_title(title)
plt.savefig(location + 'peaks.pdf')
# Continuum region plot
x_flat = x[masks[1]]
x_flats = []
y_flats = []
for i in range(len(labels)):
y_flat = (y_lists[i])[masks[1]]
y_flats.append(y_flat)
x_flats.append(x_flat)
fig, ax = figure_smoothed_comparison(x_flats, y_flats, labels)
ax.set_title(title)
plt.savefig(location + 'continuum.pdf')
# Full range plot
x_all = x[masks[2]]
x_alls = []
y_alls = []
for i in range(len(labels)):
y_all = (y_lists[i])[masks[2]]
y_alls.append(y_all)
x_alls.append(x_all)
fig, ax = figure_smoothed_comparison(x_alls, y_alls, labels)
ax.set_title(title)
plt.savefig(location + 'all.pdf')
def figure_many_intensity_errors(data):
num_data = len(data)
fig = plt.figure()
ax = fig.add_subplot(111)
colors = ['red', 'orange', 'yellow', 'green', 'blue', 'purple', 'indigo']
for ii in range(num_data):
x = data[ii][0]
y = data[ii][1]
dy = data[ii][2]
ax.fill_between(x, y + dy, y - dy, facecolor=colors[ii], alpha=0.3)
ax.plot(x, y, lw=3, ls='-', marker=',', color=colors[ii])
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Intensity and error estimates, SAXS data')
ax.set_xlabel('q')
ax.set_ylabel('Intensity')
return fig, ax
def figure_intensity_errors(data, out_folder):
num_data = len(data)
for ii in range(num_data):
fig = plt.figure()
ax = fig.add_subplot(111)
x = data[ii][0]
y = data[ii][1]
dy = data[ii][2]
ax.fill_between(x, y + dy, y - dy, facecolor='k', alpha=0.3)
ax.plot(x, y, lw=3, ls='-', marker=',', color='k')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Intensity and error estimates, SAXS data')
ax.set_xlabel('q')
ax.set_ylabel('Intensity')
plt.savefig(out_folder + 'intensity_errors_' + str(ii) + '.pdf')
##### Complex script functions #####
def process_demo_1():
out_folder = 'process_demo_1_figures/'
# Data intake
data_folder = '/Users/Amanda/Desktop/Travails/Programming/ImageProcessing/SampleData/Fang/spreadsheets1d/'
file1 = 'Sample2_30x30_t60_0069_1D.csv'
data1 = np.genfromtxt(data_folder + file1, delimiter=',')
(length, width) = data1.shape # (1096, 2)
# The data is for some reason doubled. Quick 2-line fix.
length = length / 2
data1 = data1[0:length, :]
x = data1[:, 0]
y = data1[:, 1]
# Local maxima
maxima = local_maxima_detector(y)
print 'Initially detected %i local maxima.' % maxima.sum()
fig, ax = figure_initial_maxima(x, y, maxima)
plt.savefig(out_folder + 'initial_maxima.pdf')
# plt.savefig(out_folder + '.pdf')
# Curvature
curvature = noiseless_curvature(x, y)
normed_curv = curvature / (real_max(curvature) - real_min(curvature))
curvature_legit = ~np.isnan(curvature)
curv_minima = local_minima_detector(curvature)
fig, ax = figure_maxima_curvature(x, y, maxima, normed_curv, curvature_legit)
plt.savefig(out_folder + 'maxima_curvature.pdf')
# Maxima vs curvature minima
exclusive_curv_minima = curv_minima & (~maxima)
exclusive_maxima = maxima & (~curv_minima)
max_and_curvmin = maxima & curv_minima
fig, ax = figure_curv_vs_max(x, y, exclusive_maxima, exclusive_curv_minima, max_and_curvmin, normed_curv,
curvature_legit)
plt.savefig(out_folder + 'curv_vs_max.pdf')
fig, ax = figure_curv_minima(x, y, curv_minima)
plt.savefig(out_folder + 'curv_minima.pdf')
fig, ax = figure_curv_minima_curvature(x, y, curv_minima, normed_curv, curvature_legit)
plt.savefig(out_folder + 'curv_minima_curvature.pdf')
# Classifying curvature minima
normals, high_outliers, low_outliers = isolate_outliers(curvature[curv_minima & curvature_legit], 4)
print 'Found %i low outliers (features?), %i normals (noise), and %i high outliers (problems?).' % (
low_outliers.sum(), normals.sum(), high_outliers.sum())
fig, ax = figure_curv_minima_classified(x, y, curv_minima, high_outliers, normals, low_outliers, normed_curv)
plt.savefig(out_folder + 'curv_minima_classified.pdf')
# Curvature zeros
curv_zeros = find_zeros(curvature)
fig, ax = figure_curv_zeros(x, y, curv_zeros, normed_curv)
plt.savefig(out_folder + 'curv_zeros.pdf')
# Classifying curvature zeros
running_local_variance = calc_running_local_variance(y, 2)
mean_variance = running_local_variance.mean()
median_variance = np.median(running_local_variance)
print 'The median of the calculated running variance is %f, and the mean is %f.' % (median_variance, mean_variance)
fig, ax = figure_running_variance(x, y, curv_zeros, running_local_variance)
plt.savefig(out_folder + 'running_variance.pdf')
indices = np.arange(y.size, dtype=int)
curv_minima_indices = indices[curv_minima]
likely_gaussian_feature_indices = curv_minima_indices[low_outliers]
likely_gaussian_features = np.zeros(y.size, dtype=bool)
likely_gaussian_features[likely_gaussian_feature_indices] = True
likely_gaussian_feature_indices_clipped = likely_gaussian_feature_indices[1:-1]
likely_gaussian_feature_clipped = np.zeros(y.size, dtype=bool)
likely_gaussian_feature_clipped[likely_gaussian_feature_indices_clipped] = True
suggested_low_bound_indices, suggested_high_bound_indices, no_good_background, extrapolated_background \
= pick_slope_anchors(running_local_variance, likely_gaussian_feature_indices_clipped, curv_zeros, 0)
fig, ax = figure_slope_anchors_clipped(x, y, suggested_low_bound_indices,
suggested_high_bound_indices, likely_gaussian_feature_indices_clipped)
plt.savefig(out_folder + 'slope_anchors_clipped.pdf')
slope, offset, intensity, sigma = gauss_guess(x, y, curvature, suggested_low_bound_indices,
suggested_high_bound_indices, likely_gaussian_feature_indices_clipped)
fig, ax = figure_naive_gauss_guess(x, y, suggested_low_bound_indices, suggested_high_bound_indices,
likely_gaussian_feature_indices_clipped, slope, offset, intensity, sigma)
plt.savefig(out_folder + 'naive_gauss_guess.pdf')
def batch_demo():
data_folder = '/Users/Amanda/Desktop/Travails/Programming/ImageProcessing/SampleData/Fang/spreadsheets1d/'
file_list = os.listdir(data_folder)
out_dir = 'batch_demo_figures/'
# Quick 'n' dirty scrub of file_list by file name
for ii in file_list:
ii = str(ii)
if (~ii.startswith('Sample') | ~ii.endswith('_1D.csv')):
print 'Script wants to remove item %s from list of files to import...' % ii
if ~ii.startswith('Sample'):
print "...because it doesn't start with 'Sample' but instead starts with '%s'..." % ii[:6]
if ~ii.endswith('_1D.csv'):
print "...because it doesn't end with '_1D.csv' but instead ends with '%s'..." % ii[-7:]
print '...but there is clearly something wrong with this picture, so it will not be removed.'
# file_list.remove(ii)
plt.ioff() # Don't show me batch plots!
for ii in file_list:
print "Reading file %s." % ii
name_string = ii[6:-7]
# Data intake
data = np.genfromtxt(data_folder + ii, delimiter=',')
(length, width) = data.shape # (1096, 2)
# The data is for some reason doubled. Quick 2-line fix.
length = length / 2
data = data[0:length, :]
x = data[:, 0]
y = data[:, 1]
name_location_string = out_dir + ii[:-7] + '_'
# Find and classify curvature minima
curvature = noiseless_curvature(x, y)
normed_curv = curvature / (real_max(curvature) - real_min(curvature))
curvature_legit = ~np.isnan(curvature)
curv_minima = local_minima_detector(curvature)
normals, high_outliers, low_outliers = isolate_outliers(curvature[curv_minima & curvature_legit], 4)
fig, ax = figure_curv_minima_classified(x, y, curv_minima, high_outliers, normals, low_outliers, normed_curv)
plt.savefig(name_location_string + 'curv_minima_classified.pdf')
# Cleaning up some indexing mess, pointing to features
features, feature_indices = nested_boolean_indexing(curv_minima, low_outliers)
# Choose local-fit segments
curv_zeros = find_zeros(curvature)
running_variance = calc_running_local_variance(y, 2)
fig, ax = figure_running_variance(x, y, curv_zeros, running_variance)
plt.savefig(name_location_string + 'running_variance.pdf')
low_bound_indices, high_bound_indices, no_good_background, extrapolated_background = \
pick_slope_anchors(running_variance, feature_indices, curv_zeros, 0)
slope, offset, intensity, sigma = gauss_guess(x, y, curvature, low_bound_indices, high_bound_indices,
feature_indices)
fig, ax = figure_naive_gauss_guess(x, y, low_bound_indices, high_bound_indices, feature_indices, slope, offset,
intensity, sigma)
plt.savefig(name_location_string + 'naive_gauss_guess.pdf')
plt.close('all')
plt.ion() # Interactive plotting back on
##### WIP functions #####
def mean_smoother(y, n):
'''
Takes local running mean *n* pixels to each side.
:param y: 1d numpy float array
:param n: int
:return mean_smoothed: 1d numpy float array
'''
y_stacked, mask = shift_stack(y, n, n)
mean_smoothed = masked_mean_2d_axis_0(y_stacked, mask)
return mean_smoothed
def masked_median_2d_axis_0(y2d, mask2d):
'''
Takes the median of masked data along axis 0.
:param y2d: 2d numpy float array
:param mask2d: 2d numpy bool array
:return mean: 1d numpy float array
*y2d* is data; *mask2d* is its corresponding mask
with values *True* for legitimate data, *False* otherwise.
Unlike mean and variance, median cannot negate elements by setting their weight to zero.
This leads to unequally-sized regions, esp. near boundaries.
In order to avoid unnecessary iteration over unevenly-sized boundary regions,
those regions are split up by number of valid elements,
e.g., all pixels with 3 valid elements are handled together.
Returns *median*, the median of *y2d* along axis 0.
'''
median = np.zeros(y2d.size, dtype=float)
mask = mask2d.any(axis=0)
num_entries = mask2d.sum(axis=0)
num_max_entries = num_entries.max()
num_min_entries = num_entries.min()
if num_min_entries == 0:
num_min_entries = 1 # skip past empty columns; they're already masked zeros
median = np.zeros(num_entries.size, dtype=float)
for i in range(num_min_entries, num_max_entries + 1):
i_entries = np.equal(num_entries, i)
num_i = i_entries.sum()
if num_i != 0:
mask_i = i_entries & mask2d # i_entries is broadcast to dimensions of mask2d
# numpy aggregates items along rows first, then columns.
# We want columns first for this application, so we get clever with transpose.
medianees = y2d.T[mask_i.T]
medianees = (medianees.reshape((num_i, i))).T
median[i_entries] = np.median(medianees, axis=0)
return median, mask
def median_smoother(y, n):
'''
Takes local running median *n* pixels to each side.
:param y: 1d numpy float array
:param n: int
:return median_smoothed: 1d numpy float array
:return median_mask: 1d numpy bool array
If for whatever reason there are no valid elements at a pizel,
the median_mask at that pixel is set to zero.
This may need to be handled separately from overall image mask; not sure.
Presently included for completeness, I guess.
'''
y_stacked, mask = shift_stack(y, n, n)
median_smoothed, median_mask = masked_median_2d_axis_0(y_stacked, mask)
return median_smoothed, median_mask
def curvature_from_quadratic_approximation(x, y, n):
abc = local_quadratic_approximation(x, y, n)
a = abc[0, :]
curvature = 2 * a
return curvature
def local_quadratic_approximation_no_errors(x, y, n):
x_stacked, stack_mask = shift_stack(x, n, n)
y_stacked, stack_mask = shift_stack(y, n, n)
a, b, c = quadratic_approximation_no_errors(x_stacked, y_stacked, stack_mask)
y_smoothed = a * x ** 2 + b * x + c
curv_smoothed = 2 * a
return y_smoothed, curv_smoothed, a, b, c
def local_quadratic_approximation_with_errors(x, y, dy, n):
x_stacked, stack_mask = shift_stack(x, n, n)
# y_stacked, stack_mask = shift_stack(y, n, n)
# dy_stacked, stack_mask = shift_stack(dy, n, n)
# a, b, c = quadratic_approximation(x_stacked, y_stacked, dy_stacked, stack_mask)
a, b, c = quadratic_approximation(x_stacked, y, dy, stack_mask)
y_smoothed = a * x ** 2 + b * x + c
curv_smoothed = 2 * a
return y_smoothed, curv_smoothed, a, b, c
def quadratic_approximation(x, y, dy, mask):
length = x.shape[1]
# Notation in the form of
# xe4_dyen2 = x exponent 4 (x**4) dy exponent negative 2 (dy**-2)
xe4_dyen2 = (x ** 4 * dy ** -2 * mask).sum(axis=0)
xe3_dyen2 = (x ** 3 * dy ** -2 * mask).sum(axis=0)
xe2_dyen2 = (x ** 2 * dy ** -2 * mask).sum(axis=0)
x_dyen2 = (x * dy ** -2 * mask).sum(axis=0)
dyen2 = (dy ** -2 * mask).sum(axis=0)
y_xe2_dyen2 = (y * x ** 2 * dy ** -2 * mask).sum(axis=0)
y_x_dyen2 = (y * x * dy ** -2 * mask).sum(axis=0)
y_dyen2 = (y * dy ** -2 * mask).sum(axis=0)
abc = np.zeros((3, length), dtype=float)
for i in range(length):
fit_vector = np.array([[y_xe2_dyen2[i]],
[y_x_dyen2[i]],
[y_dyen2[i]]])
fit_matrix = np.array([[xe4_dyen2[i], xe3_dyen2[i], xe2_dyen2[i]],
[xe3_dyen2[i], xe2_dyen2[i], x_dyen2[i]],
[xe2_dyen2[i], x_dyen2[i], dyen2[i]]])
abc[:, i] = (np.dot(np.linalg.pinv(fit_matrix), fit_vector)).ravel()
a = abc[0, :]
b = abc[1, :]
c = abc[2, :]
return a, b, c
def quadratic_approximation_no_errors(x, y, mask):
dy = np.ones(x.shape, dtype=float)
return quadratic_approximation(x, y, dy, mask)
def quadratic_approximation_no_mask(x, y, dy):
mask = np.ones(x.shape, dtype=bool)
return quadratic_approximation(x, y, dy, mask)
def quadratic_approximation_no_mask_no_errors(x, y):
dy = np.ones(x.shape, dtype=float)
mask = np.ones(x.shape, dtype=bool)
return quadratic_approximation(x, y, dy, mask)
# Finish later
def overplot_quadratic_approx(x, y, n, a, b, c):
num_data = x.size
fig = plt.figure()
ax = fig.add_subplot(111)
xn = shift_stack(x, n, n)
y_smooth = a * x ** 2 + b * x + c
yn = a * xn ** 2 + b * xn + c
for ii in range(num_data):
x = data[ii][0]
y = data[ii][1]
dy = data[ii][2]
ax.fill_between(x, y + dy, y - dy, facecolor=colors[ii], alpha=0.3)
ax.plot(x, y, lw=3, ls='-', marker=',', color=colors[ii])
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Intensity and error estimates, SAXS data')
ax.set_xlabel('q')
ax.set_ylabel('Intensity')
return fig, ax
# Ugh, finish later
def plot_peaks_flat_all_quadsanity(x, y, y_smooth, a, b, c, masks, title, location, labels):
# Feature region plot
x_peak = x[masks[0]]
x_peaks = []
y_peaks = []
for i in range(len(labels)):
y_peak = (y_lists[i])[masks[0]]
y_peaks.append(y_peak)
x_peaks.append(x_peak)
fig, ax = figure_smoothed_comparison(x_peaks, y_peaks, labels)
ax.set_title(title)
plt.savefig(location + 'peaks.pdf')
# Continuum region plot
x_flat = x[masks[1]]
x_flats = []
y_flats = []
for i in range(len(labels)):
y_flat = (y_lists[i])[masks[1]]
y_flats.append(y_flat)
x_flats.append(x_flat)
fig, ax = figure_smoothed_comparison(x_flats, y_flats, labels)
ax.set_title(title)
plt.savefig(location + 'continuum.pdf')
# Full range plot
x_all = x[masks[2]]
x_alls = []
y_alls = []
for i in range(len(labels)):
y_all = (y_lists[i])[masks[2]]
y_alls.append(y_all)
x_alls.append(x_all)
fig, ax = figure_smoothed_comparison(x_alls, y_alls, labels)
ax.set_title(title)
plt.savefig(location + 'all.pdf')
def smoothing_demo_1():
out_folder = '/Users/Amanda/PyCharmProjects/peak_detection/smoothing_demo_1_figures/'
# Data intake
data_folder = '/Users/Amanda/Desktop/Travails/Programming/ImageProcessing/SampleData/Fang/spreadsheets1d/'
file1 = 'Sample2_30x30_t60_0069_1D.csv'
data = np.genfromtxt(data_folder + file1, delimiter=',')
(length, width) = data.shape # (1096, 2)
# The data is for some reason doubled. Quick 2-line fix.
length = length / 2
data = data[0:length, :]
x = data[:, 0]
y = data[:, 1]
# masks specify interesting regions of the spectrum
peaks_mask = (np.less(x, 5.5) & np.greater(x, 4.5))
continuum_mask = (np.less(x, 2.0) & np.greater(x, 1.0))
all_mask = np.ones(x.shape, dtype=bool)
masks = [peaks_mask, continuum_mask, all_mask]
plt.ioff() # Don't show me batch plots!
# mean smoothing
mean_smoothed_3 = mean_smoother(y, 1)
mean_smoothed_5 = mean_smoother(y, 2)
mean_smoothed_7 = mean_smoother(y, 3)
mean_smoothed_9 = mean_smoother(y, 4)
y_list = [y, mean_smoothed_3, mean_smoothed_5, mean_smoothed_7, mean_smoothed_9]
labels = ['Data', '3-pixel smoothed', '5-pixel smoothed', '7-pixel smoothed', '9-pixel smoothed']
title = 'Mean smoothing comparison'
location = out_folder + 'mean_smoothed_'
plot_peaks_flat_all(x, y_list, masks, title, location, labels)
# median smoothing
median_smoothed_3, _ = median_smoother(y, 1)
median_smoothed_5, _ = median_smoother(y, 2)
median_smoothed_7, _ = median_smoother(y, 3)
median_smoothed_9, _ = median_smoother(y, 4)
y_list = [y, median_smoothed_3, median_smoothed_5, median_smoothed_7, median_smoothed_9]
labels = ['Data', '3-pixel smoothed', '5-pixel smoothed', '7-pixel smoothed', '9-pixel smoothed']
title = 'Median smoothing comparison'
location = out_folder + 'median_smoothed_'
plot_peaks_flat_all(x, y_list, masks, title, location, labels)
# quadratic approximation smoothing
quad_smoothed_5, _, a_5, b_5, c_5 = local_quadratic_approximation_no_errors(x, y, 2)
quad_smoothed_7, _, a_7, b_7, c_7 = local_quadratic_approximation_no_errors(x, y, 3)
quad_smoothed_9, _, a_9, b_9, c_9 = local_quadratic_approximation_no_errors(x, y, 4)
quad_smoothed_11, _, a_11, b_11, c_11 = local_quadratic_approximation_no_errors(x, y, 5)
a_list = [a_5, a_7, a_9, a_11]
b_list = [b_5, b_7, b_9, b_11]
c_list = [c_5, c_7, c_9, c_11]
y_list = [y, quad_smoothed_5, quad_smoothed_7, quad_smoothed_9, quad_smoothed_11]
labels = ['Data', '5-pixel smoothed', '7-pixel smoothed', '9-pixel smoothed', '11-pixel smoothed']
title = 'Quadratic smoothing comparison'
location = out_folder + 'quadratic_smoothed_'
plot_peaks_flat_all_quadsanity(x, y_list, a_list, b_list, c_list, masks, title, location, labels)
plt.close('all')
plt.ion() # Interactive plotting back on
def saxs_demo_do_one(x, y, suffix, out_folder):
# Find and classify curvature maxima
log_curv = noiseless_curvature(np.log(x), np.log(y))
normed_curv = log_curv / (real_max(log_curv) - real_min(log_curv))
curvature_legit = ~np.isnan(log_curv)
log_curv_maxima = local_maxima_detector(log_curv)
normals, high_outliers, low_outliers = isolate_outliers(log_curv[log_curv_maxima & curvature_legit], 4)
fig, axarray = figure_curv_minima_classified(x, y, log_curv_maxima, low_outliers, normals, high_outliers,
normed_curv)
axarray[0].set_title('Curvature maxima in log-log space, classified as features, noise, and weirdness')
axarray[0].set_xscale('log')
axarray[0].set_yscale('log')
axarray[1].set_xscale('log')
# axarray[1].set_yscale('log')
plt.savefig(out_folder + 'curv_maxima_classified_' + suffix + '.pdf')
plt.close()
# Cleaning up some indexing mess, pointing to features
features, feature_indices = nested_boolean_indexing(log_curv_maxima, high_outliers)
# Choose local-fit segments
curv_zeros = find_zeros(log_curv)
running_variance = calc_running_local_variance(np.log(y), 2)
fig, axarray = figure_running_variance(x, y, curv_zeros, running_variance)
axarray[0].set_xscale('log')
axarray[0].set_yscale('log')
axarray[1].set_xscale('log')
plt.savefig(out_folder + 'running_variance_' + suffix + '.pdf')
plt.close()
low_bound_indices, high_bound_indices, no_good_background, extrapolated_background = \
pick_slope_anchors(running_variance, feature_indices, curv_zeros, 0)
slope, offset, intensity, sigma = gauss_guess(np.log(x), np.log(y), log_curv, low_bound_indices, high_bound_indices,
feature_indices)
fig, ax = figure_naive_gauss_guess(np.log(x), np.log(y), low_bound_indices, high_bound_indices, feature_indices,
slope, offset,
intensity, sigma)
ax.set_xscale('linear')
ax.set_yscale('linear')
plt.savefig(out_folder + 'naive_gauss_guess_' + suffix + '.pdf')
plt.close()
def saxs_demo_1(): # Pretty much just showing it reads in so far. More to come.
out_folder = '/Users/Amanda/PyCharmProjects/peak_detection/saxs_demo_1_figures/'
# Data intake
data_folder = '/Users/Amanda/Desktop/Travails/Programming/ImageProcessing/SampleData/Liheng/megaSAXSspreadsheet/'
file1 = 'megaSAXSspreadsheet.csv'
data = read_mega_spreadsheet(file1, data_folder)
[data1, data2, data3, data4, data5, data6, data7] = data
[[x1, y1, dy1], [x2, y2, dy2], [x3, y3, dy3], [x4, y4, dy4], [x5, y5, dy5], [x6, y6, dy6], [x7, y7, dy7]] = data
plt.ioff() # Don't show me batch plots!
fig, ax = figure_many_intensity_errors(data)
plt.savefig(out_folder + 'many_intensity_errors.pdf')
figure_intensity_errors(data, out_folder)
for i in range(len(data)):
[x, y, dy] = data[i]
saxs_demo_do_one(x, y, str(i), out_folder)
# Smooth
mean_smoothed_19 = mean_smoother(y, 9)
saxs_demo_do_one(x, mean_smoothed_19, 'SMOOTHED_' + str(i), out_folder)
plt.close('all')
plt.ion() # Interactive plotting back on
def read_one_beam_csv(filename):
# filename the full path name
data = np.genfromtxt(filename, delimiter=',', skip_header=2, autostrip=True, usecols=(0, 4))
x = data[:, 0]
y = data[:, 1]
return x, y
def remove_non_csv(filenames):
csv_only = []
csv_count = 0
for i in filenames:
if i[-4:] == '.csv':
csv_only.append(i)
csv_count += 1
# print '%i .csv files found.' % csv_count
return csv_only
def saxs_demo_do_a_thing(x, y, suffix, out_folder):
# Find and classify curvature maxima
log_curv = noiseless_curvature(np.log(x), np.log(y))
normed_curv = log_curv / (real_max(log_curv) - real_min(log_curv))
curvature_legit = ~np.isnan(log_curv)
log_curv_maxima = local_maxima_detector(log_curv)
normals, high_outliers, low_outliers = isolate_outliers(log_curv[log_curv_maxima & curvature_legit], 4)
fig, axarray = figure_curv_minima_classified(x, y, log_curv_maxima, low_outliers, normals, high_outliers,
normed_curv)
a_title = '''Curvature maxima in log-log space, \n classified as features, noise, and weirdness'''
axarray[0].set_xlim([2., 5.])
axarray[1].set_xlim([2., 5.])
fig.set_xlim([2., 5.])
axarray[0].set_title(a_title)
axarray[0].set_xscale('log')
axarray[0].set_yscale('log')
axarray[1].set_xscale('log')
# axarray[1].set_yscale('log')
plt.savefig(out_folder + 'curv_maxima_classified_' + suffix + '.pdf')
plt.close()
# Cleaning up some indexing mess, pointing to features
features, feature_indices = nested_boolean_indexing(log_curv_maxima, high_outliers)
# Choose local-fit segments
curv_zeros = find_zeros(log_curv)
running_variance = calc_running_local_variance(np.log(y), 2)
fig, axarray = figure_running_variance(x, y, curv_zeros, running_variance)
axarray[0].set_xlim([2., 5.])
axarray[1].set_xlim([2., 5.])
fig.set_xlim([2., 5.])
axarray[0].set_xscale('log')
axarray[0].set_yscale('log')
axarray[1].set_xscale('log')
plt.savefig(out_folder + 'running_variance_' + suffix + '.pdf')
plt.close()
low_bound_indices, high_bound_indices, no_good_background, extrapolated_background = \
pick_slope_anchors(running_variance, feature_indices, curv_zeros, 0)
slope, offset, intensity, sigma = gauss_guess(np.log(x), np.log(y), log_curv, low_bound_indices, high_bound_indices,
feature_indices)
fig, ax = figure_naive_gauss_guess(np.log(x), np.log(y), low_bound_indices, high_bound_indices, feature_indices,
slope, offset,
intensity, sigma)
ax.set_xlim([2., 5.])
ax.set_xscale('linear')
ax.set_yscale('linear')
plt.savefig(out_folder + 'naive_gauss_guess_' + suffix + '.pdf')
plt.close()
def saxs_demo_2(): # Pretty much just showing it reads in so far. More to come.
out_folder = '/Users/Amanda/PyCharmProjects/peak_detection/saxs_demo_2_figures/'
# Data intake
data_folder = '/Users/Amanda/Desktop/Travails/Programming/ImageProcessing/SampleData/Liheng/beamtime/imagesR1/'
files1 = os.listdir(data_folder)
files1 = remove_non_csv(files1)
###
files1 = files1[:5]
###
plt.ioff() # Don't show me batch plots!
for i in range(len(files1)):
x, y = read_one_beam_csv(data_folder + files1[i])
tag = (files1[i])[10:-8] + str(i)
saxs_demo_do_one(x, y, tag, out_folder)
# Smooth
mean_smoothed_19 = np.exp(mean_smoother(np.log(y), 9))
tag = tag + '_logSMOOTHED'
saxs_demo_do_one(x, mean_smoothed_19, tag, out_folder)
plt.close('all')
plt.ion() # Interactive plotting back on
print 'run done'
# Life is change
def monodisperse_model(q, R):
pass
def smoother(y):
pass
def binner(y, nbins, npix):
pass
def convolution_smoother(y, kernel):
pass
def gauss_smoother(x, y, x0, sigma):
pass
def edge_preserving_smoothing(y, mysteries):
pass
def pseudo_voigt(x, x0, gamma, sigma):
fwhm_gauss = 2 * (2 * np.log(2)) ** 0.5 * sigma
fwhm_lorentz = 2 * gamma
# Approximation to the FWHM of the Voigt distribution, accurate to 0.02%, taken from Wikipedia
fwhm_voigt = 0.5346 * fwhm_lorentz + (0.2166 * fwhm_lorentz ** 2 + fwhm_gauss ** 2) ** 0.5
# Formula for a good pseudo-Voigt approximation, accurate to 1%, taken from Wikipedia
# *f* and *eta* are constants used in that approximation
f = (fwhm_gauss ** 5 + 2.69269 * fwhm_gauss ** 4 * fwhm_lorentz + 2.42843 * fwhm_gauss ** 3 * fwhm_lorentz ** 2
+ 4.47163 * fwhm_gauss ** 2 * fwhm_lorentz ** 3 + 0.07842 * fwhm_gauss * fwhm_lorentz ** 4 + fwhm_lorentz ** 5) ** 0.2
eta = 1.36603 * (fwhm_lorentz / f) - 0.47719 * (fwhm_lorentz / f) ** 2 + 0.11116 * (fwhm_lorentz / f) ** 3
gauss_profile = gaussian(x, x0, sigma)
lorentz_profile = lorentzian(x, x0, gamma)
pseudo_voigt_profile = eta * lorentz_profile + (1 - eta) * gauss_profile
return pseudo_voigt_profile, fwhm_voigt
def lorentzian(x, x0, gamma):
pass
def gaussian(x, x0, sigma):
pass
def departure_from_linear(y, n):
pass
def departure_from_model(y, model):
pass
def collect_metrics(x, y):
pass
def discriminator():
data_folder = '/Users/Amanda/Desktop/Travails/Programming/ImageProcessing/SampleData/Fang/spreadsheets1d/'
file_list = os.listdir(data_folder)
# metrics = np.zeros(stuff, stuffy stuff)
for ii in file_list:
print "Reading file %s." % ii
# name_string = ii[6:-7]
# Data intake
data = np.genfromtxt(data_folder + ii, delimiter=',')
(length, width) = data.shape # (1096, 2)
# The data is for some reason doubled. Quick 2-line fix.
length = length / 2
data = data[0:length, :]
x = data[:, 0]
y = data[:, 1]
# name_location_string = out_dir + ii[:-7] + '_'
metrics_ii = collect_metrics(x, y)
# amorphous_labels = np.array([])
# broad_base_labels = np.array([])
def process_demo_2():
out_folder = 'process_demo_2_figures/'
# Data intake
data_folder = '/Users/Amanda/Desktop/Travails/Programming/ImageProcessing/SampleData/Fang/spreadsheets1d/'
file1 = 'Sample2_30x30_t60_0069_1D.csv'
data1 = np.genfromtxt(data_folder + file1, delimiter=',')
(length, width) = data1.shape # (1096, 2)
# The data is for some reason doubled. Quick 2-line fix.
length = length / 2
data1 = data1[0:length, :]
x = data1[:, 0]
y = data1[:, 1]
# Local maxima
maxima = local_maxima_detector(y)
print 'Initially detected %i local maxima.' % maxima.sum()
fig, ax = figure_initial_maxima(x, y, maxima)
plt.savefig(out_folder + 'initial_maxima.pdf')
# plt.savefig(out_folder + '.pdf')
# Curvature
curvature = noiseless_curvature(x, y)
normed_curv = curvature / (real_max(curvature) - real_min(curvature))
curvature_legit = ~np.isnan(curvature)
curv_minima = local_minima_detector(curvature)
fig, ax = figure_maxima_curvature(x, y, maxima, normed_curv, curvature_legit)
plt.savefig(out_folder + 'maxima_curvature.pdf')
# Maxima vs curvature minima
exclusive_curv_minima = curv_minima & (~maxima)
exclusive_maxima = maxima & (~curv_minima)
max_and_curvmin = maxima & curv_minima
fig, ax = figure_curv_vs_max(x, y, exclusive_maxima, exclusive_curv_minima, max_and_curvmin, normed_curv,
curvature_legit)
plt.savefig(out_folder + 'curv_vs_max.pdf')
fig, ax = figure_curv_minima(x, y, curv_minima)
plt.savefig(out_folder + 'curv_minima.pdf')
fig, ax = figure_curv_minima_curvature(x, y, curv_minima, normed_curv, curvature_legit)
plt.savefig(out_folder + 'curv_minima_curvature.pdf')
# Classifying curvature minima
normals, high_outliers, low_outliers = isolate_outliers(curvature[curv_minima & curvature_legit], 4)
print 'Found %i low outliers (features?), %i normals (noise), and %i high outliers (problems?).' % (
low_outliers.sum(), normals.sum(), high_outliers.sum())
fig, ax = figure_curv_minima_classified(x, y, curv_minima, high_outliers, normals, low_outliers, normed_curv)
plt.savefig(out_folder + 'curv_minima_classified.pdf')
# Curvature zeros
curv_zeros = find_zeros(curvature)
fig, ax = figure_curv_zeros(x, y, curv_zeros, normed_curv)
plt.savefig(out_folder + 'curv_zeros.pdf')
# Classifying curvature zeros
running_local_variance = calc_running_local_variance(y, 2)
mean_variance = running_local_variance.mean()
median_variance = np.median(running_local_variance)
print 'The median of the calculated running variance is %f, and the mean is %f.' % (median_variance, mean_variance)
fig, ax = figure_running_variance(x, y, curv_zeros, running_local_variance)
plt.savefig(out_folder + 'running_variance.pdf')
indices = np.arange(y.size, dtype=int)
curv_minima_indices = indices[curv_minima]
likely_gaussian_feature_indices = curv_minima_indices[low_outliers]
likely_gaussian_features = np.zeros(y.size, dtype=bool)
likely_gaussian_features[likely_gaussian_feature_indices] = True
likely_gaussian_feature_indices_clipped = likely_gaussian_feature_indices[1:-1]
likely_gaussian_feature_clipped = np.zeros(y.size, dtype=bool)
likely_gaussian_feature_clipped[likely_gaussian_feature_indices_clipped] = True
suggested_low_bound_indices, suggested_high_bound_indices, no_good_background, extrapolated_background \
= pick_slope_anchors(running_local_variance, likely_gaussian_feature_indices_clipped, curv_zeros, 0)
fig, ax = figure_slope_anchors_clipped(x, y, suggested_low_bound_indices,
suggested_high_bound_indices, likely_gaussian_feature_indices_clipped)
plt.savefig(out_folder + 'slope_anchors_clipped.pdf')
slope, offset, intensity, sigma = gauss_guess(x, y, curvature, suggested_low_bound_indices,
suggested_high_bound_indices, likely_gaussian_feature_indices_clipped)
fig, ax = figure_naive_gauss_guess(x, y, suggested_low_bound_indices, suggested_high_bound_indices,
likely_gaussian_feature_indices_clipped, slope, offset, intensity, sigma)
plt.savefig(out_folder + 'naive_gauss_guess.pdf')
##### Run scripts, optional #####
# process_demo_1()
# batch_demo()
# process_demo_2()
# smoothing_demo_1()
# saxs_demo_1()
#saxs_demo_2()
##### Not-yet-started and/or not-yet-used functions #####
def endpoint_curvature_cheat(curvature):
'''
Guesses the value of the curvature at the endpoints.
:param curvature: numpy float array
:return curvature: numpy float array
'''
curvature[0] = curvature[1]
curvature[-1] = curvature[-2]
return curvature
def noisy_curvature(x, y, width):
pass
def no_ends(x, clip):
'''
Returns a boolean array with pixels too close to the ends marked.
:param x: numpy array
:param clip: int
:return clipped: numpy bool array
x is a 1d array of the correct size.
clip is an integer number of pixels that will be masked at each end.
clipped is a boolean array with True for pixels with respectable locations
and False for pixels with dodgy locations.
'''
clipped = np.ones(x.size, dtype=bool)
clipped[clip:] = False
clipped[:-clip] = False
return clipped
def distance_from_nearest_neighbor(x, maxima):
pass
def reject_pure_noise_maxima(x, y, maxima):
pass
def cluster_maxima(x, y, maxima):
pass
|
11468106
|
import random
from exterminate.Utilities import builtins
_shuffle = random.shuffle
_sorted = builtins.sorted
def alt_shuffle(a: list, *args, **kwargs):
a.sort() # Shuffle returns None and shuffles in place
def alt_sorted(a, *args, **kwargs):
shuffled = list(a)
_shuffle(shuffled)
return shuffled
builtins.sorted = alt_sorted
builtins.shuffle = alt_shuffle
|
11468122
|
import time
class UserInterface(object):
def __init__(self):
self.open_player_menu = lambda: None
self.stop = lambda: None
def run(self):
while True:
time.sleep(1)
userInterface = UserInterface()
|
11468139
|
from functools import partial
from ... import documentation_helpers
from ...component_index import component_index
from . import cfc_utils
SIDE_COLOR = "color(var(--bluish) blend(var(--background) 60%))"
def get_inline_documentation(cfml_view, doc_type):
if not cfml_view.project_name:
return None
cfc_path, file_path, dot_path, function_name, regions = cfc_utils.find_cfc(
cfml_view, cfml_view.position
)
if file_path:
if dot_path:
if function_name:
metadata = component_index.get_extended_metadata_by_file_path(
cfml_view.project_name, file_path
)
if function_name in metadata["functions"]:
doc, callback = component_index.get_method_documentation(
cfml_view.view,
cfml_view.project_name,
file_path,
function_name,
dot_path.split(".").pop(),
metadata["functions"][function_name]["name"],
)
return cfml_view.Documentation(regions, doc, callback, 2)
doc, callback = component_index.get_documentation(
cfml_view.view, cfml_view.project_name, file_path, dot_path
)
return cfml_view.Documentation(regions, doc, callback, 2)
doc, callback = get_documentation(
cfml_view.view,
file_path,
documentation_helpers.span_wrap(cfc_path, "entity.name.class"),
)
return cfml_view.Documentation(regions, doc, callback, 2)
return None
def get_method_preview(cfml_view):
if not cfml_view.project_name:
return None
cfc_path, file_path, dot_path, function_name, regions = cfc_utils.find_cfc(
cfml_view, cfml_view.position
)
if file_path and dot_path and function_name:
doc, callback = component_index.get_method_preview(
cfml_view.view, cfml_view.project_name, file_path, function_name
)
return cfml_view.MethodPreview(regions, doc, callback, 2)
return None
def get_goto_cfml_file(cfml_view):
if not cfml_view.project_name:
return None
cfc_path, file_path, dot_path, function_name, region = cfc_utils.find_cfc(
cfml_view, cfml_view.position
)
if file_path:
if function_name:
metadata = component_index.get_extended_metadata_by_file_path(
cfml_view.project_name, file_path
)
if function_name in metadata["functions"]:
return cfml_view.GotoCfmlFile(
metadata["function_file_map"][function_name],
metadata["functions"][function_name]["name"],
)
else:
return cfml_view.GotoCfmlFile(file_path, None)
return None
def get_completions_doc(cfml_view):
if (
not cfml_view.project_name
or not cfml_view.function_call_params
or not cfml_view.function_call_params.method
):
return None
if len(cfml_view.function_call_params.dot_context) != 1:
return None
start_pt = cfml_view.function_call_params.dot_context[0].name_region.begin()
cfc_path, file_path, dot_path, temp_function_name, region = cfc_utils.find_cfc(
cfml_view, start_pt
)
if file_path:
function_name = cfml_view.function_call_params.function_name
metadata = component_index.get_extended_metadata_by_file_path(
cfml_view.project_name, file_path
)
if (
metadata
and cfml_view.function_call_params.function_name in metadata["functions"]
):
doc, callback = component_index.get_function_call_params_doc(
cfml_view.project_name,
file_path,
cfml_view.function_call_params,
dot_path.split(".").pop(),
metadata["functions"][function_name]["name"],
)
return cfml_view.CompletionDoc(None, doc, callback)
return None
def on_navigate(view, file_path, href):
view.window().open_file(file_path)
def get_documentation(view, file_path, header):
cfc_doc = {"side_color": SIDE_COLOR, "html": {}}
cfc_doc["html"]["links"] = []
cfc_doc["html"]["header"] = header
cfc_doc["html"][
"body"
] = """
<div class="path">
<strong>path</strong>: <a href="__go_to_component">{}</a>
</div>
""".strip().format(
file_path
)
callback = partial(on_navigate, view, file_path)
return cfc_doc, callback
|
11468177
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
'''IMPORTS'''
import re
import json
from datetime import datetime, date
import urllib3.util
# Disable insecure warnings
urllib3.disable_warnings()
def parse_tag_field(tags_str):
tags = []
regex = re.compile(r"key=([\w\d_:.-]+),value=([ /\w\d@_,.*-]+)", flags=re.I)
for f in tags_str.split(';'):
match = regex.match(f)
if match is None:
demisto.log('could not parse field: %s' % (f,))
continue
tags.append({
'Key': match.group(1),
'Value': match.group(2)
})
return tags
def parse_subnet_mappings(subnets_str):
subnets = []
regex = re.compile(r"subnetid=([\w\d_:.-]+),allocationid=([ /\w\d@_,.*-]+)", flags=re.I)
for f in subnets_str.split(';'):
match = regex.match(f)
if match is None:
demisto.log('could not parse field: %s' % (f,))
continue
subnets.append({
'SubnetId': match.group(1),
'AllocationId': match.group(2)
})
return subnets
class DatetimeEncoder(json.JSONEncoder):
# pylint: disable=method-hidden
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def parse_resource_ids(resource_id):
id_list = resource_id.replace(" ", "")
resource_ids = id_list.split(",")
return resource_ids
'''MAIN FUNCTIONS'''
def describe_certificate(args, aws_client):
client = aws_client.aws_session(
service='acm',
region=args.get('region'),
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config)
response = client.describe_certificate(CertificateArn=args.get('certificateArn'))
cert = response['Certificate']
data = ({
'CertificateArn': cert.get('CertificateArn'),
'DomainName': cert.get('DomainName'),
'Subject': cert.get('Subject'),
'Issuer': cert.get('Issuer'),
'Status': cert.get('Status'),
'KeyAlgorithm': cert.get('KeyAlgorithm'),
'SignatureAlgorithm': cert.get('SignatureAlgorithm'),
'Type': cert.get('Type'),
'Region': obj['_user_provided_options']['region_name'],
})
if 'Serial' in cert:
data.update({'Serial': cert['Serial']})
try:
raw = json.loads(json.dumps(response['Certificate'], cls=DatetimeEncoder))
except ValueError as e:
return_error('Could not decode/encode the raw response - {err_msg}'.format(err_msg=e))
if raw:
raw.update({'Region': obj['_user_provided_options']['region_name']})
ec = {'AWS.ACM.Certificates(val.CertificateArn === obj.CertificateArn)': raw}
human_readable = tableToMarkdown('AWS ACM Certificates', data)
return_outputs(human_readable, ec)
def list_certificates(args, aws_client):
client = aws_client.aws_session(
service='acm',
region=args.get('region'),
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config)
kwargs = {}
data = []
includes = {}
if args.get('certificateStatuses') is not None:
kwargs.update({'CertificateStatuses': args.get('certificateStatuses')})
if args.get('extendedKeyUsage') is not None:
includes.update({'extendedKeyUsage': [args.get('extendedKeyUsage')]})
if args.get('keyUsage') is not None:
includes.update({'keyUsage': [args.get('keyUsage')]})
if args.get('keyTypes') is not None:
includes.update({'keyTypes': [args.get('keyTypes')]})
if includes:
kwargs.update({'Includes': includes})
response = client.list_certificates(**kwargs)
for cert in response['CertificateSummaryList']:
data.append({
'CertificateArn': cert['CertificateArn'],
'DomainName': cert['DomainName'],
'Region': obj['_user_provided_options']['region_name'],
})
ec = {'AWS.ACM.Certificates(val.CertificateArn === obj.CertificateArn)': data}
human_readable = tableToMarkdown('AWS ACM Certificates', data)
return_outputs(human_readable, ec)
def add_tags_to_certificate(args, aws_client):
client = aws_client.aws_session(
service='acm',
region=args.get('region'),
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
kwargs = {
'CertificateArn': args.get('certificateArn'),
'Tags': parse_tag_field(args.get('tags'))
}
response = client.add_tags_to_certificate(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Certificate was Tagged successfully")
def remove_tags_from_certificate(args, aws_client):
client = aws_client.aws_session(
service='acm',
region=args.get('region'),
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
kwargs = {
'CertificateArn': args.get('certificateArn'),
'Tags': parse_tag_field(args.get('tags'))
}
response = client.remove_tags_from_certificate(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Certificate Tags were removed successfully")
def list_tags_for_certificate(args, aws_client):
client = aws_client.aws_session(
service='acm',
region=args.get('region'),
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
kwargs = {'CertificateArn': args.get('certificateArn')}
response = client.list_tags_for_certificate(**kwargs)
data = ({'CertificateArn': args.get('certificateArn')})
for tag in response['Tags']:
data.update({
tag['Key']: tag['Value']
})
ec = {'AWS.ACM.Certificates(val.CertificateArn === obj.CertificateArn).Tags': data}
human_readable = tableToMarkdown('AWS ACM Certificate Tags', data)
return_outputs(human_readable, ec)
def get_certificate(args, aws_client):
client = aws_client.aws_session(
service='acm',
region=args.get('region'),
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
kwargs = {'CertificateArn': args.get('certificateArn')}
response = client.get_certificate(**kwargs)
if 'Certificate' in response:
fileResult('Certificate.pem', response['Certificate'])
if 'CertificateChain' in response:
fileResult('CertificateChain.pem', response['CertificateChain'])
demisto.results('### Certificate files for ARN: {arn}'.format(arn=args.get('certificateArn')))
def test_function(aws_client):
client = aws_client.aws_session(service='acm')
response = client.list_certificates()
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results('ok')
def main():
try:
params = demisto.params()
aws_default_region = params.get('defaultRegion')
aws_role_arn = params.get('roleArn')
aws_role_session_name = params.get('roleSessionName')
aws_role_session_duration = params.get('sessionDuration')
aws_role_policy = None
aws_access_key_id = params.get('access_key')
aws_secret_access_key = params.get('secret_key')
verify_certificate = not params.get('insecure', True)
timeout = demisto.params().get('timeout')
retries = demisto.params().get('retries') or 5
validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id,
aws_secret_access_key)
aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration,
aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate, timeout,
retries)
args = demisto.args()
command = demisto.command()
if command == 'test-module':
test_function(aws_client)
if command == 'aws-acm-describe-certificate':
describe_certificate(args, aws_client)
if command == 'aws-acm-list-certificates':
list_certificates(args, aws_client)
if command == 'aws-acm-add-tags-to-certificate':
add_tags_to_certificate(args, aws_client)
if command == 'aws-acm-remove-tags-from-certificate':
remove_tags_from_certificate(args, aws_client)
if command == 'aws-acm-list-tags-for-certificate':
list_tags_for_certificate(args, aws_client)
if command == 'aws-acm-get-certificate':
get_certificate(args, aws_client)
except Exception as e:
LOG(str(e))
return_error('Error has occurred in the AWS ACM Integration: {code}\n {message}'.format(
code=type(e), message=str(e)))
from AWSApiModule import * # noqa: E402
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
|
11468210
|
import z5py
from heimdall import view
# this fails with out-of-range due to
# https://github.com/napari/napari/issues/699
def example():
path = '/home/pape/Work/data/cremi/example/sampleA.n5'
with z5py.File(path) as f:
raw = f['volumes/raw/s0']
raw.n_threads = 8
seg = f['volumes/segmentation/groundtruth']
seg.n_threads = 8
view(raw, seg)
if __name__ == '__main__':
example()
|
11468226
|
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Permission
from tenant_users.permissions.models import UserTenantPermissions
class UserBackend(ModelBackend):
"""
Authenticates against UserProfile
Authorizes against the UserTenantPermissions.
The Facade classes handle the magic of passing
requests to the right spot.
"""
# We override this so that it looks for the 'groups' attribute on the
# UserTenantPermissions rather than from get_user_model()
def _get_group_permissions(self, user_obj):
user_groups_field = UserTenantPermissions._meta.get_field('groups')
user_groups_query = 'group__{0}'.format(
user_groups_field.related_query_name(),
)
return Permission.objects.filter(**{user_groups_query: user_obj})
|
11468250
|
import sys
import torch.nn as nn
from typing import Union
from torchvision import models
from dffml.base import config, field
from dffml.util.entrypoint import entrypoint
from .utils import create_layer
from .pytorch_base import PyTorchModelConfig, PyTorchModelContext, PyTorchModel
class LayersNotFound(Exception):
"""
Raised when add_layers is set to True but no layers are provided.
"""
@config
class PyTorchPreTrainedModelConfig(PyTorchModelConfig):
pretrained: bool = field(
"Load Pre-trained model weights", default=True,
)
trainable: bool = field(
"Tweak pretrained model by training again", default=False
)
add_layers: bool = field(
"Replace the last layer of the pretrained model", default=False,
)
layers: Union[
dict, nn.ModuleDict, nn.Sequential, nn.ModuleList, nn.Module
] = field(
"Extra layers to replace the last layer of the pretrained model",
default=None,
)
class PyTorchPretrainedContext(PyTorchModelContext):
def __init__(self, parent):
super().__init__(parent)
if self.parent.config.add_layers and self.parent.config.layers is None:
raise LayersNotFound(
"add_layers is set to True but no layers are provided."
)
def set_model_parameters(self):
if self.parent.LAST_LAYER_TYPE == "classifier_sequential":
self.model_parameters = (
self.parent.model.parameters()
if self.parent.config.trainable
else self.parent.model.classifier[-1].parameters()
)
elif self.parent.LAST_LAYER_TYPE == "classifier_linear":
self.model_parameters = (
self.parent.model.parameters()
if self.parent.config.trainable
else self.parent.model.classifier.parameters()
)
else:
self.model_parameters = (
self.parent.model.parameters()
if self.parent.config.trainable
else self.parent.model.fc.parameters()
)
class PyTorchPreTrainedModel(PyTorchModel):
def __init__(self, config) -> None:
super().__init__(config)
def createModel(self):
"""
Generates a model
"""
if self.model is not None:
return self.model
self.logger.debug(
"Loading model with classifications(%d): %r",
len(self.classifications),
self.classifications,
)
model = getattr(models, self.PYTORCH_MODEL)(
pretrained=self.config.pretrained
)
for param in model.parameters():
param.require_grad = self.config.trainable
if self.config.add_layers:
if self.config.layers.__class__.__base__.__name__ in [
"ModuleDict",
"Sequential",
"ModuleList",
"Module",
]:
layers = nn.Sequential()
for name, module in self.config.layers.named_children():
layers.add_module(name, module)
else:
layers = [
create_layer(value)
for key, value in self.config.layers.items()
]
if self.LAST_LAYER_TYPE == "classifier_sequential":
if len(layers) > 1:
layers = [nn.Sequential(*layers)]
model.classifier = nn.Sequential(
*list(model.classifier.children())[:-1] + layers
)
elif self.LAST_LAYER_TYPE == "classifier_linear":
if len(layers) == 1:
model.classifier = layers[0]
elif len(layers) > 1:
model.classifier = nn.Sequential(*layers)
else:
if len(layers) == 1:
model.fc = layers[0]
elif len(layers) > 1:
model.fc = nn.Sequential(*layers)
self.model = model.to(self.device)
return self.model
for model_name, name, last_layer_type in [
("alexnet", "AlexNet", "classifier_sequential"),
("densenet121", "DenseNet121", "classifier_linear"),
("densenet161", "DenseNet161", "classifier_linear"),
("densenet169", "DenseNet169", "classifier_linear"),
("densenet201", "DenseNet201", "classifier_linear"),
("mnasnet0_5", "MnasNet0_5", "classifier_sequential"),
("mnasnet1_0", "MnasNet1_0", "classifier_sequential"),
("mobilenet_v2", "MobileNetV2", "classifier_sequential"),
("vgg11", "VGG11", "classifier_sequential"),
("vgg11_bn", "VGG11BN", "classifier_sequential"),
("vgg13", "VGG13", "classifier_sequential"),
("vgg13_bn", "VGG13BN", "classifier_sequential"),
("vgg16", "VGG16", "classifier_sequential"),
("vgg16_bn", "VGG16BN", "classifier_sequential"),
("vgg19", "VGG19", "classifier_sequential"),
("vgg19_bn", "VGG19BN", "classifier_sequential"),
("googlenet", "GoogleNet", "fully_connected"),
("inception_v3", "InceptionV3", "fully_connected"),
("resnet101", "ResNet101", "fully_connected"),
("resnet152", "ResNet152", "fully_connected"),
("resnet18", "ResNet18", "fully_connected"),
("resnet34", "ResNet34", "fully_connected"),
("resnet50", "ResNet50", "fully_connected"),
("resnext101_32x8d", "ResNext101_32x8D", "fully_connected"),
("resnext50_32x4d", "ResNext50_32x4D", "fully_connected"),
("shufflenet_v2_x0_5", "ShuffleNetV2x0_5", "fully_connected"),
("shufflenet_v2_x1_0", "ShuffleNetV2x1_0", "fully_connected"),
("wide_resnet101_2", "WideResNet101_2", "fully_connected"),
("wide_resnet50_2", "WideResNet50_2", "fully_connected"),
]:
cls_config = type(
name + "ModelConfig", (PyTorchPreTrainedModelConfig,), {},
)
cls_context = type(name + "ModelContext", (PyTorchPretrainedContext,), {},)
dffml_cls = type(
name + "Model",
(PyTorchPreTrainedModel,),
{
"CONFIG": cls_config,
"CONTEXT": cls_context,
"PYTORCH_MODEL": model_name,
"LAST_LAYER_TYPE": last_layer_type,
},
)
dffml_cls = entrypoint(model_name)(dffml_cls)
setattr(sys.modules[__name__], cls_config.__qualname__, cls_config)
setattr(sys.modules[__name__], cls_context.__qualname__, cls_context)
setattr(sys.modules[__name__], dffml_cls.__qualname__, dffml_cls)
|
11468252
|
import objc as _objc
__bundle__ = _objc.initFrameworkWrapper(
"EventKit",
frameworkIdentifier="com.apple.EventKit",
frameworkPath=_objc.pathForFramework(
"/System/Library/Frameworks/EventKit.framework"
),
globals=globals()
)
|
11468294
|
from neural_layout.network_graph import Network
def vgg16_network():
net = Network()
net.add_layer('input', [1, 224, 224])
net.add_layer('l1', [1, 224, 224])
net.add_layer('l2', [1, 224, 224])
net.add_layer('l3', [2, 112, 112])
net.add_layer('l4', [2, 112, 112])
net.add_layer('l5', [4, 56, 56])
net.add_layer('l6', [4, 56, 56])
net.add_layer('l7', [4, 56, 56])
net.add_layer('l8', [8, 28, 28])
net.add_layer('l9', [8, 28, 28])
net.add_layer('l10', [8, 28, 28])
net.add_layer('l11', [8, 14, 14])
net.add_layer('l12', [8, 14, 14])
net.add_layer('l13', [8, 14, 14])
net.add_layer('l14', [64, 1, 1])
net.add_layer('l15', [64, 1, 1])
net.add_layer('output', [16, 1, 1])
net.add_conv2d_connections('input', 'l1', stride=(1, 1),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l1', 'l2', stride=(1, 1),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l2', 'l3', stride=(2, 2),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l3', 'l4', stride=(1, 1),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l4', 'l5', stride=(2, 2),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l5', 'l6', stride=(1, 1),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l6', 'l7', stride=(1, 1),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l7', 'l8', stride=(2, 2),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l8', 'l9', stride=(1, 1),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l9', 'l10', stride=(1, 1),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l10', 'l11', stride=(2, 2),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l11', 'l12', stride=(1, 1),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_conv2d_connections('l12', 'l13', stride=(1, 1),
kernel_size=(3, 3), padding=(1, 1, 1, 1))
net.add_full_connections('l13', 'l14')
net.add_full_connections('l14', 'l15')
net.add_full_connections('l15', 'output')
return net
def vgg16_1d_network():
net = Network()
net.add_layer('input', [1, 224])
net.add_layer('l1', [8, 224])
net.add_layer('l2', [8, 224])
net.add_layer('l3', [16, 112])
net.add_layer('l4', [16, 112])
net.add_layer('l5', [32, 56])
net.add_layer('l6', [32, 56])
net.add_layer('l7', [32, 56])
net.add_layer('l8', [64, 28])
net.add_layer('l9', [64, 28])
net.add_layer('l10', [64, 28])
net.add_layer('l11', [64, 14])
net.add_layer('l12', [64, 14])
net.add_layer('l13', [64, 14])
net.add_layer('l14', [512, 1])
net.add_layer('l15', [512, 1])
net.add_layer('output', [128, 1])
net.add_conv1d_connections('input', 'l1', stride=2,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l1', 'l2', stride=1,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l2', 'l3', stride=2,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l3', 'l4', stride=1,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l4', 'l5', stride=2,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l5', 'l6', stride=1,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l6', 'l7', stride=1,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l7', 'l8', stride=2,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l8', 'l9', stride=1,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l9', 'l10', stride=1,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l10', 'l11', stride=2,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l11', 'l12', stride=1,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('l12', 'l13', stride=1,
kernel_size=3, padding=(1, 1))
net.add_full_connections('l13', 'l14')
net.add_full_connections('l14', 'l15')
net.add_full_connections('l15', 'output')
return net
def resnet18_1d_network():
net = Network()
net.add_layer('input', [1, 224])
net.add_layer('conv1', [64, 112])
net.add_layer('conv2_1_a', [64, 56])
net.add_layer('conv2_1_b', [64, 56])
net.add_layer('conv2_2_a', [64, 56])
net.add_layer('conv2_2_b', [64, 56])
net.add_layer('conv3_1_a', [128, 28])
net.add_layer('conv3_1_b', [128, 28])
net.add_layer('conv3_2_a', [128, 28])
net.add_layer('conv3_2_b', [128, 28])
net.add_layer('conv4_1_a', [256, 14])
net.add_layer('conv4_1_b', [256, 14])
net.add_layer('conv4_2_a', [256, 14])
net.add_layer('conv4_2_b', [256, 14])
net.add_layer('conv5_1_a', [512, 7])
net.add_layer('conv5_1_b', [512, 7])
net.add_layer('conv5_2_a', [512, 7])
net.add_layer('conv5_2_b', [512, 7])
net.add_layer('average_pool', [512, 1])
net.add_layer('fully_connected', [1000, 1])
net.add_conv1d_connections('input', 'conv1', stride=2,
kernel_size=7, padding=(3, 3))
# conv2
net.add_conv1d_connections('conv1', 'conv2_1_a', stride=2,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv2_1_a', 'conv2_1_b',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv1', 'conv2_1_b', stride=2,
kernel_size=1)
net.add_conv1d_connections('conv2_1_b', 'conv2_2_a',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv2_2_a', 'conv2_2_b',
kernel_size=3, padding=(1, 1))
net.add_one_to_one_connections('conv2_1_b', 'conv2_2_b')
# conv3
net.add_conv1d_connections('conv2_2_b', 'conv3_1_a', stride=2,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv3_1_a', 'conv3_1_b',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv2_2_b', 'conv3_1_b', stride=2,
kernel_size=1)
net.add_conv1d_connections('conv3_1_b', 'conv3_2_a',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv3_2_a', 'conv3_2_b',
kernel_size=3, padding=(1, 1))
net.add_one_to_one_connections('conv3_1_b', 'conv3_2_b')
# conv4
net.add_conv1d_connections('conv3_2_b', 'conv4_1_a', stride=2,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv4_1_a', 'conv4_1_b',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv3_2_b', 'conv4_1_b', stride=2,
kernel_size=1)
net.add_conv1d_connections('conv4_1_b', 'conv4_2_a',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv4_2_a', 'conv4_2_b',
kernel_size=3, padding=(1, 1))
net.add_one_to_one_connections('conv4_1_b', 'conv4_2_b')
# conv5
net.add_conv1d_connections('conv4_2_b', 'conv5_1_a', stride=2,
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv5_1_a', 'conv5_1_b',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv4_2_b', 'conv5_1_b', stride=2,
kernel_size=1)
net.add_conv1d_connections('conv5_1_b', 'conv5_2_a',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('conv5_2_a', 'conv5_2_b',
kernel_size=3, padding=(1, 1))
net.add_one_to_one_connections('conv5_1_b', 'conv5_2_b')
net.add_conv1d_connections('conv5_2_b', 'average_pool', kernel_size=7)
net.add_full_connections('average_pool', 'fully_connected')
return net
def scalogram_resnet_network():
net = Network()
net.add_layer('scalogram', [2, 292])
net.add_layer('scalogram_block_0_main_conv_1', [32, 228])
net.add_layer('scalogram_block_0_main_conv_2', [32, 114])
net.add_layer('scalogram_block_1_main_conv_1', [32, 114])
net.add_layer('scalogram_block_1_main_conv_2', [32, 114])
net.add_layer('scalogram_block_2_main_conv_1', [64, 82])
net.add_layer('scalogram_block_2_main_conv_2', [64, 41])
net.add_layer('scalogram_block_3_main_conv_1', [64, 41])
net.add_layer('scalogram_block_3_main_conv_2', [64, 41])
net.add_layer('scalogram_block_4_main_conv_1', [128, 26])
net.add_layer('scalogram_block_4_main_conv_2', [128, 13])
net.add_layer('scalogram_block_5_main_conv_1', [128, 13])
net.add_layer('scalogram_block_5_main_conv_2', [128, 13])
net.add_layer('scalogram_block_6_main_conv_1', [256, 5])
net.add_layer('scalogram_block_6_main_conv_2', [256, 5])
net.add_layer('scalogram_block_7_main_conv_1', [512, 3])
net.add_layer('scalogram_block_7_main_conv_2', [512, 1])
net.add_layer('ar_block_0', [512, 1])
net.add_layer('ar_block_1', [512, 1])
net.add_layer('ar_block_2', [512, 1])
net.add_layer('ar_block_3', [512, 1])
net.add_layer('ar_block_4', [256, 1])
net.add_layer('ar_block_5', [256, 1])
net.add_layer('ar_block_6', [256, 1])
net.add_layer('ar_block_7', [256, 1])
net.add_layer('ar_block_8', [256, 1])
# Encoder
# BLOCK 0
net.add_conv1d_connections('scalogram', 'scalogram_block_0_main_conv_1',
kernel_size=65)
net.add_conv1d_connections('scalogram_block_0_main_conv_1', 'scalogram_block_0_main_conv_2',
kernel_size=3, stride=2, padding=(1, 1))
net.add_conv1d_connections('scalogram', 'scalogram_block_0_main_conv_2',
kernel_size=1, stride=2)
# BLOCK 1
net.add_conv1d_connections('scalogram_block_0_main_conv_2', 'scalogram_block_1_main_conv_1',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_1_main_conv_1', 'scalogram_block_1_main_conv_2',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_0_main_conv_2', 'scalogram_block_1_main_conv_2',
kernel_size=1)
# BLOCK 2
net.add_conv1d_connections('scalogram_block_1_main_conv_2', 'scalogram_block_2_main_conv_1',
kernel_size=33)
net.add_conv1d_connections('scalogram_block_2_main_conv_1', 'scalogram_block_2_main_conv_2',
kernel_size=3, stride=2, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_1_main_conv_2', 'scalogram_block_2_main_conv_2',
kernel_size=1, stride=2)
# BLOCK 3
net.add_conv1d_connections('scalogram_block_2_main_conv_2', 'scalogram_block_3_main_conv_1',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_3_main_conv_1', 'scalogram_block_3_main_conv_2',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_2_main_conv_2', 'scalogram_block_3_main_conv_2',
kernel_size=1)
# BLOCK 4
net.add_conv1d_connections('scalogram_block_3_main_conv_2', 'scalogram_block_4_main_conv_1',
kernel_size=16)
net.add_conv1d_connections('scalogram_block_4_main_conv_1', 'scalogram_block_4_main_conv_2',
kernel_size=3, stride=2, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_3_main_conv_2', 'scalogram_block_4_main_conv_2',
kernel_size=1, stride=2)
# BLOCK 5
net.add_conv1d_connections('scalogram_block_4_main_conv_2', 'scalogram_block_5_main_conv_1',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_5_main_conv_1', 'scalogram_block_5_main_conv_2',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_4_main_conv_2', 'scalogram_block_5_main_conv_2',
kernel_size=1)
# BLOCK 6
net.add_conv1d_connections('scalogram_block_5_main_conv_2', 'scalogram_block_6_main_conv_1',
kernel_size=9)
net.add_conv1d_connections('scalogram_block_6_main_conv_1', 'scalogram_block_6_main_conv_2',
kernel_size=3, stride=1, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_5_main_conv_2', 'scalogram_block_6_main_conv_2',
kernel_size=1, stride=2)
# BLOCK 7
net.add_conv1d_connections('scalogram_block_6_main_conv_2', 'scalogram_block_7_main_conv_1',
kernel_size=3)
net.add_conv1d_connections('scalogram_block_7_main_conv_1', 'scalogram_block_7_main_conv_2',
kernel_size=3)
net.add_conv1d_connections('scalogram_block_6_main_conv_2', 'scalogram_block_7_main_conv_2',
kernel_size=1)
# Autoregressive model
# BLOCK 0
net.add_conv1d_connections('scalogram_block_7_main_conv_2', 'ar_block_0',
kernel_size=1)
# BLOCK 1
net.add_conv1d_connections('ar_block_0', 'ar_block_1',
kernel_size=1)
# BLOCK 2
net.add_conv1d_connections('ar_block_1', 'ar_block_2',
kernel_size=1)
# BLOCK 3
net.add_conv1d_connections('ar_block_2', 'ar_block_3',
kernel_size=1)
# BLOCK 4
net.add_conv1d_connections('ar_block_3', 'ar_block_4',
kernel_size=1)
# BLOCK 5
net.add_conv1d_connections('ar_block_4', 'ar_block_5',
kernel_size=1)
# BLOCK 3
net.add_conv1d_connections('ar_block_5', 'ar_block_6',
kernel_size=1)
# BLOCK 4
net.add_conv1d_connections('ar_block_6', 'ar_block_7',
kernel_size=1)
# BLOCK 5
net.add_conv1d_connections('ar_block_7', 'ar_block_8',
kernel_size=1)
# scoring
net.add_conv1d_connections('ar_block_8', 'scalogram_block_7_main_conv_2',
kernel_size=1)
return net
def scalogram_resnet_network_smaller():
net = Network()
net.add_layer('scalogram', [2, 216])
net.add_layer('scalogram_block_0_main_conv_1', [8, 108])
net.add_layer('scalogram_block_0_main_conv_2', [8, 84])
net.add_layer('scalogram_block_1_main_conv_1', [16, 84])
net.add_layer('scalogram_block_1_main_conv_2', [16, 84])
net.add_layer('scalogram_block_2_main_conv_1', [32, 84])
net.add_layer('scalogram_block_2_main_conv_2', [32, 60])
net.add_layer('scalogram_block_3_main_conv_1', [64, 60])
net.add_layer('scalogram_block_3_main_conv_2', [64, 60])
net.add_layer('scalogram_block_4_main_conv_1', [128, 30])
net.add_layer('scalogram_block_4_main_conv_2', [128, 6])
net.add_layer('scalogram_block_5_main_conv_1', [256, 6])
net.add_layer('scalogram_block_5_main_conv_2', [256, 6])
net.add_layer('scalogram_block_6_main_conv_1', [512, 6])
net.add_layer('scalogram_block_6_main_conv_2', [512, 3])
net.add_layer('scalogram_block_7_main_conv_1', [512, 3])
net.add_layer('scalogram_block_7_main_conv_2', [512, 1])
net.add_layer('ar_block_0', [512, 1])
net.add_layer('ar_block_1', [512, 1])
net.add_layer('ar_block_2', [512, 1])
net.add_layer('ar_block_3', [512, 1])
net.add_layer('ar_block_4', [256, 1])
net.add_layer('ar_block_5', [256, 1])
net.add_layer('ar_block_6', [256, 1])
net.add_layer('ar_block_7', [256, 1])
net.add_layer('ar_block_8', [256, 1])
# Encoder
# BLOCK 0
net.add_conv1d_connections('scalogram', 'scalogram_block_0_main_conv_1',
kernel_size=3, stride=2, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_0_main_conv_1', 'scalogram_block_0_main_conv_2',
kernel_size=25)
net.add_conv1d_connections('scalogram', 'scalogram_block_0_main_conv_2',
kernel_size=1, stride=2)
# BLOCK 1
net.add_conv1d_connections('scalogram_block_0_main_conv_2', 'scalogram_block_1_main_conv_1',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_1_main_conv_1', 'scalogram_block_1_main_conv_2',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_0_main_conv_2', 'scalogram_block_1_main_conv_2',
kernel_size=1)
# BLOCK 2
net.add_conv1d_connections('scalogram_block_1_main_conv_2', 'scalogram_block_2_main_conv_1',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_2_main_conv_1', 'scalogram_block_2_main_conv_2',
kernel_size=25)
net.add_conv1d_connections('scalogram_block_1_main_conv_2', 'scalogram_block_2_main_conv_2',
kernel_size=1, stride=2)
# BLOCK 3
net.add_conv1d_connections('scalogram_block_2_main_conv_2', 'scalogram_block_3_main_conv_1',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_3_main_conv_1', 'scalogram_block_3_main_conv_2',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_2_main_conv_2', 'scalogram_block_3_main_conv_2',
kernel_size=1)
# BLOCK 4
net.add_conv1d_connections('scalogram_block_3_main_conv_2', 'scalogram_block_4_main_conv_1',
kernel_size=3, stride=2, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_4_main_conv_1', 'scalogram_block_4_main_conv_2',
kernel_size=25)
net.add_conv1d_connections('scalogram_block_3_main_conv_2', 'scalogram_block_4_main_conv_2',
kernel_size=1, stride=2)
# BLOCK 5
net.add_conv1d_connections('scalogram_block_4_main_conv_2', 'scalogram_block_5_main_conv_1',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_5_main_conv_1', 'scalogram_block_5_main_conv_2',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_4_main_conv_2', 'scalogram_block_5_main_conv_2',
kernel_size=1)
# BLOCK 6
net.add_conv1d_connections('scalogram_block_5_main_conv_2', 'scalogram_block_6_main_conv_1',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_6_main_conv_1', 'scalogram_block_6_main_conv_2',
kernel_size=4)
net.add_conv1d_connections('scalogram_block_5_main_conv_2', 'scalogram_block_6_main_conv_2',
kernel_size=1, stride=2)
# BLOCK 7
net.add_conv1d_connections('scalogram_block_6_main_conv_2', 'scalogram_block_7_main_conv_1',
kernel_size=3, padding=(1, 1))
net.add_conv1d_connections('scalogram_block_7_main_conv_1', 'scalogram_block_7_main_conv_2',
kernel_size=3)
net.add_conv1d_connections('scalogram_block_6_main_conv_2', 'scalogram_block_7_main_conv_2',
kernel_size=1)
# Autoregressive model
# BLOCK 0
net.add_conv1d_connections('scalogram_block_7_main_conv_2', 'ar_block_0',
kernel_size=1)
# BLOCK 1
net.add_conv1d_connections('ar_block_0', 'ar_block_1',
kernel_size=1)
# BLOCK 2
net.add_conv1d_connections('ar_block_1', 'ar_block_2',
kernel_size=1)
# BLOCK 3
net.add_conv1d_connections('ar_block_2', 'ar_block_3',
kernel_size=1)
# BLOCK 4
net.add_conv1d_connections('ar_block_3', 'ar_block_4',
kernel_size=1)
# BLOCK 5
net.add_conv1d_connections('ar_block_4', 'ar_block_5',
kernel_size=1)
# BLOCK 3
net.add_conv1d_connections('ar_block_5', 'ar_block_6',
kernel_size=1)
# BLOCK 4
net.add_conv1d_connections('ar_block_6', 'ar_block_7',
kernel_size=1)
# BLOCK 5
net.add_conv1d_connections('ar_block_7', 'ar_block_8',
kernel_size=1)
# scoring
net.add_conv1d_connections('ar_block_8', 'scalogram_block_7_main_conv_2',
kernel_size=1)
return net
|
11468325
|
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class GlobalPause(Base):
__slots__ = ()
_SDM_NAME = 'globalPause'
_SDM_ATT_MAP = {
'HeaderDstAddress': 'globalPause.header.header.dstAddress-1',
'HeaderSrcAddress': 'globalPause.header.header.srcAddress-2',
'HeaderEthertype': 'globalPause.header.header.ethertype-3',
'MacControlControlOpcode': 'globalPause.header.macControl.controlOpcode-4',
'MacControlPfcQueue0': 'globalPause.header.macControl.pfcQueue0-5',
}
def __init__(self, parent, list_op=False):
super(GlobalPause, self).__init__(parent, list_op)
@property
def HeaderDstAddress(self):
"""
Display Name: Destination address
Default Value: 01:80:C2:00:00:01
Value Format: mAC
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderDstAddress']))
@property
def HeaderSrcAddress(self):
"""
Display Name: Source address
Default Value: 00:00:AA:00:00:01
Value Format: mAC
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderSrcAddress']))
@property
def HeaderEthertype(self):
"""
Display Name: Ethertype
Default Value: 0x8808
Value Format: hex
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderEthertype']))
@property
def MacControlControlOpcode(self):
"""
Display Name: Control opcode
Default Value: 0x0001
Value Format: hex
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MacControlControlOpcode']))
@property
def MacControlPfcQueue0(self):
"""
Display Name: PAUSE Quanta
Default Value: 0xFFFF
Value Format: hex
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MacControlPfcQueue0']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
11468335
|
class DataTree(object):
def __init__(self, scr, data):
self.scr = scr
self.save = True
self.valid = True
self._data = data
if isinstance(data, dict):
self._data = {}
for k, v in data.items():
self._data[k] = DataTree(self.scr, v)
elif isinstance(data, list):
self._data = []
for v in data:
self._data.append(DataTree(self.scr, v))
def _getdata(self):
self.prune()
if not isinstance(self._data, (dict, list)): return self._data
raise TypeError("Unable to provide internal subtree object")
def _setdata(self, data):
if not isinstance(data, (dict, list)):
self._data = data
return
data = DataTree(self.scr, data)
data.prune()
self._data = data._data
data = property(_getdata, _setdata)
def __eq__(self, other):
if not isinstance(other, DataTree): return False
this = self.todict()
that = other.todict()
self.scr.format.trim(this)
self.scr.format.trim(that)
return this == that
def __ne__(self, other):
return not (self == other)
def __getitem__(self, keys):
if not hasattr(type(keys), '__iter__'): keys = [keys]
if len(keys) == 0: return self
if not isinstance(self._data, dict):
clsname = self._data.__class__.__name__
msg = "Referenced '" + clsname + "' object cannot be indexed at '"
msg += str(keys[0]) + "'"
raise TypeError(msg)
if keys[0] not in self._data:
self._data[keys[0]] = DataTree(self.scr, {})
return self._data[keys[0]][keys[1:]]
def init(self, val):
if self._data == {} or self._data == [] or self._data == None:
self.data = val
def values(self):
if self._data == {}: self._data = []
if not isinstance(self._data, list):
raise TypeError("Unable to provide values")
return self._data
def items(self):
if not isinstance(self._data, dict):
raise TypeError("Unable to provide items")
return self._data.items()
def isleaf(self):
return not isinstance(self._data, (dict, list))
def islist(self):
return isinstance(self._data, list)
def prune(self):
if isinstance(self._data, list):
for v in self._data: v.prune()
elif isinstance(self._data, dict):
for k, v in self._data.items():
v.prune()
if v._data == {}: del self._data[k]
def clone(self):
data = self._data
if isinstance(self._data, list):
data = []
for v in self._data: data.append(v.clone())
elif isinstance(self._data, dict):
data = {}
for k, v in self._data.items(): data[k] = v.clone()
clone = DataTree(self.scr, None)
clone._data = data
clone.save = self.save
clone.valid = self.valid
return clone
def todict(self):
if isinstance(self._data, list):
data = []
for v in self._data:
if v.save: data.append(v.todict())
return sorted(data)
if isinstance(self._data, dict):
data = {}
for k, v in self._data.items():
if v.save: data[k] = v.todict()
return data
return self._data
|
11468341
|
import torch
import pdb
from utils.learning import pick_valid_points
from utils.io import safe_printout
def get_cam_mat(width, height, focal_length):
"""
Get intrinsic camera matrix (tensor)
"""
cam_mat = torch.eye(3)
cam_mat[0, 0] = focal_length
cam_mat[1, 1] = focal_length
cam_mat[0, 2] = width / 2
cam_mat[1, 2] = height / 2
cam_mat = cam_mat.cuda()
return cam_mat
def coords_world_to_cam(scene_coords, gt_coords, gt_poses):
"""
Transform the scene coordinates to camera coordinates.
@param scene_coords [B, 3, N] Predicted scene coords tensor.
@param gt_coords [B, 3, N] Ground-truth scene coords tensor.
@param gt_poses [B, 4, 4] cam-to-world matrix.
@return camera_coords [B, 3, N] camera coords tensor corresponding to scene_coords.
@return target_camera_coords [B, 3, N] camera coords tensor corresponding to gt_coords.
"""
gt_pose_inv = gt_poses.inverse()[:, 0:3, :] # [B, 3, 4], world to camera matrix
ones = torch.ones((scene_coords.size(0), 1, scene_coords.size(2))).cuda()
scene_coords_ = torch.cat([scene_coords, ones], dim=1) # [B, 4, N]
gt_coords_ = torch.cat([gt_coords, ones], dim=1) # [B, 4, N]
camera_coords = torch.bmm(gt_pose_inv, scene_coords_) # [B, 3, N] = [B, 3, 4] * [B, 4, N]
target_camera_coords = torch.bmm(gt_pose_inv, gt_coords_) # [B, 3, N] = [B, 3, 4] * [B, 4, N]
return camera_coords, target_camera_coords
def get_repro_err(camera_coords, cam_mat, pixel_grid_crop, min_depth):
"""
Get reprojection error for each pixel.
@param camera_coords [B, 3, N] tensor for camera coordinates.
@param cam_mat [3, 3] tensor for intrinsic camera matrix.
@param pixel_grid_crop [2, N] tensor for pixel grid.
@param min_depth Scalar for minimum reprojected depth.
@return reprojection_error [B, N] tensor for reprojection error in pixel.
"""
batch_size = camera_coords.size(0)
reprojection_error = torch.bmm(cam_mat.expand(batch_size, -1, -1), camera_coords) # [B, 3, H_ds*W_ds]
reprojection_error[:, 2].clamp_(min=min_depth) # avoid division by zero
reprojection_error = reprojection_error[:, 0:2] / reprojection_error[:, 2:] # [B, 2, H_ds*W_ds]
reprojection_error = reprojection_error - pixel_grid_crop[None, :, :]
reprojection_error = reprojection_error.norm(p=2, dim=1).clamp(min=1.e-7) # [B, H_ds*W*ds]
return reprojection_error
def check_constraints(camera_coords, reproj_error, cam_coords_reg_error, mask_gt_coords_nodata,
min_depth, max_reproj_error, max_coords_reg_error):
"""
Check constraints on network prediction.
@param camera_coords [B, 3, N] tensor for camera coordinates.
@param reproj_error [B, N] tensor for reprojection errors.
@param cam_coords_reg_error [B, N] tensor for scene coordinate regression raw errors including invalid points.
@param mask_gt_coords_nodata [B, N] tensor indicating points w/o valid scene coords labels.
@param min_depth Scalar, threshold of minimum depth before camera panel in meter.
@param max_reproj_error Scalar, threshold of maximum reprojection error in pixel.
@param max_coords_reg_error Scalar, threshold of maximum scene coords regression error in meter.
@return valid_sc [B, N] Pixels w/ valid scene coords prediction, goes for reprojection error.
"""
# check predicted scene coordinate for various constraints
invalid_min_depth = camera_coords[:, 2] < min_depth # [B, N], behind or too close to camera plane
invalid_repro = reproj_error > max_reproj_error # [B, N], very large reprojection errors
# check for additional constraints regarding ground truth scene coordinates
invalid_gt_distance = cam_coords_reg_error > max_coords_reg_error # [B, N] too far from ground truth
invalid_gt_distance[mask_gt_coords_nodata] = 0 # [B, N], filter out unknown ground truth
# combine all constraints
valid_sc = (invalid_min_depth + invalid_repro + invalid_gt_distance) == 0 # [B, N]
return valid_sc
def scene_coords_regression_loss(min_depth, soft_clamp, hard_clamp, init_tolerance, uncertainty,
pixel_grid, nodata_value, cam_mat,
scene_coords, uncertainty_map, gt_poses, gt_coords, reduction='mean'):
"""
Calculate scene coordinate regression loss, based on DSAC* and KF-Net implementation.
Code: https://github.com/vislearn/dsacstar
Paper: https://arxiv.org/abs/2002.12324
@param min_depth Scalar hyper-parameter for minimum reprojected depth.
@param soft_clamp Scalar hyper-parameter for loss clamp.
@param hard_clamp Scalar hyper-parameter for loss clamp.
@param init_tolerance Scalar hyper-parameter for coordinate regression loss.
@param uncertainty Flag for uncertainty loss.
@param pixel_grid [2, M, N] Pixel positions tensor.
@param nodata_value Scalar to indicate NODATA element of ground truth scene coordinates.
@param cam_mat [3, 3] tensor for intrinsic camera matrix.
@param scene_coords [B, 3, H_ds, W_ds] Predicted scene coords tensor.
@param uncertainty_map [B, 1, H_ds, W_ds] Uncertainty map tensor.
@param gt_poses [B, 4, 4] Camera to world matrix
@param gt_coords [B, 3, H_ds, W_ds] ---> [B, 3, 60, 80] by default w/o augmentation
@param reduction Method to post-process the mini-batch loss, 'mean' for mean and None for not aggregating
@return loss Regression loss value.
@return num_valid_sc_rate Rate of valid scene coordinates.
"""
"""RGB mode, optimize a variant of the reprojection error"""
# crop ground truth pixel positions to prediction size
pixel_grid_crop = pixel_grid[:, 0:scene_coords.size(2), 0:scene_coords.size(3)].clone().view(2, -1)
scene_coords = scene_coords.view(scene_coords.size(0), 3, -1) # [B, 3, H_ds*W_ds]
gt_coords = gt_coords.view(gt_coords.size(0), 3, -1) # [B, 3, H_ds*W_ds]
camera_coords, target_camera_coords = coords_world_to_cam(scene_coords, gt_coords, gt_poses) # [B, 3, H_ds*W_ds]*2
camera_coords_reg_error = torch.norm(camera_coords - target_camera_coords, dim=1, p=2) # [B, H_ds*W_ds]
reprojection_error = get_repro_err(camera_coords, cam_mat, pixel_grid_crop, min_depth) # [B, H_ds*W_ds]
# check for invalid/unknown ground truth scene coordinates
mask_gt_coords_valdata = pick_valid_points(gt_coords[:, :3, :], nodata_value, boolean=True) # [B, H_ds*W_ds]
mask_gt_coords_nodata = torch.logical_not(mask_gt_coords_valdata) # [B, H_ds*W_ds]
valid_scene_coordinates = check_constraints(
camera_coords, reprojection_error, camera_coords_reg_error, mask_gt_coords_nodata,
min_depth=min_depth, max_reproj_error=hard_clamp,
max_coords_reg_error=init_tolerance) # [B, H_ds*W_ds], warning: it is not coupled with mask_gt_coords_valdata!
num_valid_sc = valid_scene_coordinates.sum(dim=1).cpu().numpy() # [B]
num_pixels_batch = valid_scene_coordinates.numel() # number of all pixels in the batch
num_pixels_instance = valid_scene_coordinates[0].numel() # number of pixels in one data point
# assemble loss
loss = 0
"""Reprojection error for all valid scene coordinates"""
loss_reproj = 0
if num_valid_sc.sum() > 0:
# calculate soft clamped l1 loss of reprojection error
# it's only applied to the **valid** scene coordinates
reprojection_error = reprojection_error * valid_scene_coordinates # [B, H_ds*W_ds], masked
loss_l1 = (reprojection_error * (reprojection_error <= soft_clamp)).clamp(min=1.e-7) # [B, H_ds*W_ds]
loss_sqrt = (reprojection_error * (reprojection_error > soft_clamp)).clamp(min=1.e-7) # [B, H_ds*W_ds]
loss_sqrt = torch.sqrt(soft_clamp * loss_sqrt + 1.e-7).clamp(min=1.e-7) # [B, H_ds*W_ds]
loss_reproj = loss_l1 + loss_sqrt # [B, H_ds*W_ds]
"""3D distance loss for pixels whose ground truth is known"""
# assemble loss
if uncertainty is None:
# original DSAC* implementation, not used
# invalid_scene_coordinates = torch.logical_not(valid_scene_coordinates) # [B, H_ds*W_ds]
# invalid_scene_coordinates[mask_gt_coords_nodata] = 0 # filter out pixels w/o valid labels
# loss += torch.sum(camera_coords_reg_error * invalid_scene_coordinates,
# dim=1) # [B], applied to invalid pixels w/ valid labels
# L2 distance loss on both coordinate regression and reprojection error
# it's applied to all pixels w/ valid labels
loss += torch.sum(camera_coords_reg_error * mask_gt_coords_valdata + loss_reproj, dim=1) # [B]
elif uncertainty == 'MLE':
uncertainty_map = uncertainty_map.view(uncertainty_map.size(0), -1).clamp(min=1.e-7) # [B, H_ds*W_ds]
coord_error_square = camera_coords_reg_error.square().clamp(min=1.e-7) # [B, H_ds*W_ds]
loss_unc = 3.0 * torch.log(uncertainty_map) + coord_error_square / (
2.0 * uncertainty_map.square().clamp(min=1.e-7)) # [B, H_ds*W_ds]
loss += torch.sum(loss_unc * mask_gt_coords_valdata + loss_reproj, dim=1) # [B]
# diagnosis
safe_printout(
'Regression error: coord: %.2f, reprojection: %.2f' % (
torch.sum(camera_coords_reg_error * mask_gt_coords_valdata).item()
/ max(1, mask_gt_coords_valdata.sum().item()),
torch.sum(reprojection_error * valid_scene_coordinates).item()
/ max(1, valid_scene_coordinates.sum().item())))
else:
raise NotImplementedError
valid_pred_rate = num_valid_sc.sum() / num_pixels_batch # scalar
if reduction is None:
loss /= num_pixels_instance # [B], each item is the mean over all pixels within one instance
elif reduction == 'mean':
loss = loss.sum() # scalar, mean over each pixels within the batch
loss /= num_pixels_batch
else:
raise NotImplementedError
return loss, valid_pred_rate
|
11468347
|
from binascii import hexlify, unhexlify
from eth_hash.auto import keccak
from os import urandom
from secp256k1 import Secp256k1, SECRET_KEY_SIZE, PUBLIC_KEY_SIZE, PUBLIC_KEY_SIZE_COMPRESSED, \
EC_COMPRESSED, EC_UNCOMPRESSED
from ._libsecp256k1 import ffi, lib
class SecretKey:
def __init__(self):
# Byte array containing the key, use bytes() before passing to secp256k1
self.key = bytearray([0] * SECRET_KEY_SIZE)
def __eq__(self, other):
return isinstance(other, SecretKey) and self.key == other.key
def __str__(self):
return "SecretKey<{}>".format(self.to_hex().decode())
def __repr__(self):
return self.__str__()
def to_bytearray(self) -> bytearray:
return self.key[:]
def to_hex(self) -> bytes:
return hexlify(self.key)
def to_public_key(self, secp: Secp256k1):
return PublicKey.from_secret_key(secp, self)
def clone(self):
obj = SecretKey()
obj.key = self.key[:]
return obj
# b: a -> a+b
def add_assign(self, secp: Secp256k1, other):
assert isinstance(other, SecretKey)
key = ffi.new("char [32]", bytes(self.key))
res = lib.secp256k1_ec_privkey_tweak_add(secp.ctx, key, bytes(other.key))
assert res, "Unable to add in place"
self.key = bytearray(ffi.buffer(key, 32))
# b: a+b
def add(self, secp: Secp256k1, other):
obj = self.clone()
obj.add_assign(secp, other)
return obj
# b: a -> a*b
def mul_assign(self, secp: Secp256k1, other):
assert isinstance(other, SecretKey)
key = ffi.new("char [32]", bytes(self.key))
res = lib.secp256k1_ec_privkey_tweak_mul(secp.ctx, key, bytes(other.key))
assert res, "Unable to multiply in place"
self.key = bytearray(ffi.buffer(key, 32))
# b: a*b
def mul(self, secp: Secp256k1, other):
obj = self.clone()
obj.mul_assign(secp, other)
return obj
# a -> -a
def negate_assign(self, secp: Secp256k1):
key = ffi.new("char [32]", bytes(self.key))
res = lib.secp256k1_ec_privkey_negate(secp.ctx, key)
assert res, "Unable to negate in place"
self.key = bytearray(ffi.buffer(key, 32))
# -a
def negate(self, secp: Secp256k1):
obj = self.clone()
obj.negate_assign(secp)
return obj
@staticmethod
def from_bytearray(secp: Secp256k1, data: bytearray):
assert len(data) == SECRET_KEY_SIZE, "Invalid private key size"
res = lib.secp256k1_ec_seckey_verify(secp.ctx, bytes(data))
assert res, "Invalid private key"
obj = SecretKey()
obj.key = data[:]
return obj
@staticmethod
def from_hex(secp: Secp256k1, data: bytes):
return SecretKey.from_bytearray(secp, bytearray(unhexlify(data)))
@staticmethod
def random(secp: Secp256k1):
try:
return SecretKey.from_bytearray(secp, bytearray(urandom(32)))
except AssertionError:
# There is a very small chance of producing a number larger than the curve order
return SecretKey.random(secp)
class PublicKey:
def __init__(self, secp: Secp256k1):
self.key = ffi.new("secp256k1_pubkey *")
self.secp = secp
def __eq__(self, other):
return self.to_bytearray(self.secp) == other.to_bytearray(other.secp)
def __str__(self):
return "PublicKey<{}>".format(self.to_hex(self.secp, compressed=True).decode())
def __repr__(self):
return self.__str__()
def to_bytearray(self, secp: Secp256k1, compressed=True) -> bytearray:
size = PUBLIC_KEY_SIZE_COMPRESSED if compressed else PUBLIC_KEY_SIZE
flag = EC_COMPRESSED if compressed else EC_UNCOMPRESSED
out = ffi.new("char [%d]" % size)
out_size = ffi.new("size_t *", size)
res = lib.secp256k1_ec_pubkey_serialize(secp.ctx, out, out_size, self.key, flag)
assert res, "Unable to serialize"
return bytearray(ffi.buffer(out, size))
def to_hex(self, secp: Secp256k1, compressed=True) -> bytes:
return hexlify(self.to_bytearray(secp, compressed))
def clone(self, secp: Secp256k1):
return PublicKey.from_bytearray(secp, self.to_bytearray(secp))
# b: A -> A+b*G - Add a scalar in place
def add_scalar_assign(self, secp: Secp256k1, other: SecretKey):
res = lib.secp256k1_ec_pubkey_tweak_add(secp.ctx, self.key, bytes(other.key))
assert res, "Unable to add scalar in place"
# b: A+b*G - Add a scalar
def add_scalar(self, secp: Secp256k1, other: SecretKey):
obj = self.clone(secp)
obj.add_scalar_assign(secp, other)
return obj
# B: A -> A+B - Add a public key in place
def add_assign(self, secp: Secp256k1, other):
obj = self.add(secp, other)
self.key = obj.key
# B: A+B - Add a public key
def add(self, secp: Secp256k1, other):
assert isinstance(other, PublicKey)
obj = PublicKey.from_combination(secp, [self, other])
return obj
# b: A -> b*A - Multiple the public key by a scalar in place
def mul_assign(self, secp: Secp256k1, other: SecretKey):
res = lib.secp256k1_ec_pubkey_tweak_mul(secp.ctx, self.key, bytes(other.key))
assert res, "Unable to multiply in place"
# b: b*A - Multiple the public key by a scalar
def mul(self, secp: Secp256k1, other: SecretKey):
obj = self.clone(secp)
obj.mul_assign(secp, other)
return obj
# A -> -A
def negate_assign(self, secp: Secp256k1):
res = lib.secp256k1_ec_pubkey_negate(secp.ctx, self.key)
assert res, "Unable to negate in place"
# -A
def negate(self, secp: Secp256k1):
obj = self.clone(secp)
obj.negate_assign(secp)
return obj
@staticmethod
def from_bytearray(secp: Secp256k1, data: bytearray):
size = len(data)
assert size in (PUBLIC_KEY_SIZE_COMPRESSED, PUBLIC_KEY_SIZE), "Invalid public key size"
obj = PublicKey(secp)
res = lib.secp256k1_ec_pubkey_parse(secp.ctx, obj.key, bytes(data), size)
assert res, "Invalid public key"
return obj
@staticmethod
def from_hex(secp: Secp256k1, data: bytes):
return PublicKey.from_bytearray(secp, bytearray(unhexlify(data)))
@staticmethod
def from_secret_key(secp: Secp256k1, secret: SecretKey):
obj = PublicKey(secp)
res = lib.secp256k1_ec_pubkey_create(secp.ctx, obj.key, bytes(secret.key))
assert res, "Invalid secret key"
return obj
@staticmethod
def from_combination(secp: Secp256k1, pos_keys, neg_keys=None):
if neg_keys is None:
assert len(pos_keys) > 0
else:
assert len(pos_keys) > 0 or len(neg_keys) > 0
obj = PublicKey(secp)
items = []
for key in pos_keys:
if isinstance(key, SecretKey):
items.append(key.to_public_key(secp).key)
else:
assert isinstance(key, PublicKey), "Input not all instance of SecretKey or PublicKey"
items.append(key.key)
if isinstance(neg_keys, list) and len(neg_keys) > 0:
neg_sum = PublicKey.from_combination(secp, neg_keys)
neg_sum.negate_assign(secp)
items.append(neg_sum.key)
res = lib.secp256k1_ec_pubkey_combine(secp.ctx, obj.key, items, len(items))
assert res, "Unable to combine keys"
return obj
class Signature:
def __init__(self, signature: bytearray):
self.signature = signature
def __eq__(self, other):
return isinstance(other, Signature) and self.signature == other.signature
def __str__(self):
return "Signature<{}>".format(self.to_hex().decode())
def __repr__(self):
return self.__str__()
def scalar(self, secp: Secp256k1) -> SecretKey:
return SecretKey.from_bytearray(secp, self.signature[32:64])
def to_bytearray(self, secp: Secp256k1, compact=False) -> bytearray:
if not compact:
return self.signature[:]
signature = ffi.new("secp256k1_ecdsa_signature *", [bytes(self.signature)])
output = ffi.new("char [64]")
res = lib.secp256k1_ecdsa_signature_serialize_compact(secp.ctx, output, signature)
assert res, "Unable to serialize signature"
return bytearray(ffi.buffer(output, 64))
def to_hex(self) -> bytes:
return hexlify(self.signature)
def normalize_s(self, secp: Secp256k1):
signature_in = ffi.new("secp256k1_ecdsa_signature *", [bytes(self.signature)])
signature_out = ffi.new("secp256k1_ecdsa_signature *")
lib.secp256k1_ecdsa_signature_normalize(secp.ctx, signature_out, signature_in)
self.signature = bytearray(ffi.buffer(signature_out, 64))
@staticmethod
def from_bytearray(secp: Secp256k1, signature: bytearray, compact=False):
if not compact:
return Signature(signature[:])
signature_out = ffi.new("secp256k1_ecdsa_signature *")
res = lib.secp256k1_ecdsa_signature_parse_compact(secp.ctx, signature_out, bytes(signature))
assert res, "Unable to parse signature"
return Signature(ffi.buffer(signature_out, 64))
@staticmethod
def from_hex(data: bytes):
return Signature(unhexlify(data))
def ethereum_address(secp: Secp256k1, public_key: PublicKey) -> bytes:
return b"0x"+hexlify(keccak(bytes(public_key.to_bytearray(secp, False)[1:]))[12:])
|
11468368
|
import torch
import random
import torch.nn as nn
from torch.nn import functional as F
from torch.distributions.categorical import Categorical
import torchvision
from . import resnet, resnext, mobilenet, hrnet, u_net, attention_u_net, attention_u_net_deep, attention_u_net_deep_ds4x, hrnetv2_nonsyn
from lib.nn import SynchronizedBatchNorm2d
from dataset import imresize, b_imresize, patch_loader
BatchNorm2d = SynchronizedBatchNorm2d
BN_MOMENTUM = 0.1
class FovResModule(nn.Module):
def __init__(self, in_channels, out_channels, cfg):
# in_channels: num of channels corresponds to input image channels, e.g. 3
# out_channels: num of channels corresponds to num of sclaes tested
super(FovResModule, self).__init__()
self.compress = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, padding=0, bias=False)
self.pool = nn.AdaptiveAvgPool2d(cfg.MODEL.patch_bank[0] // cfg.DATASET.segm_downsampling_rate)
def forward(self, x):
return self.pool(self.compress(x))
class FoveationModule(nn.Module):
def __init__(self, in_channels, out_channels, len_gpus, cfg):
# in_channels: num of channels corresponds to input image channels, e.g. 3
# out_channels: num of channels corresponds to num of sclaes tested
super(FoveationModule, self).__init__()
self.cfg = cfg
self.fov_expand_1 = nn.Conv2d(in_channels=in_channels, out_channels=8*out_channels, kernel_size=3, padding=1, bias=False)
self.fov_expand_2 = nn.Conv2d(in_channels=8*out_channels, out_channels=8*out_channels, kernel_size=3, padding=1, bias=False)
self.fov_squeeze_1 = nn.Conv2d(in_channels=8*out_channels, out_channels=out_channels, kernel_size=3, padding=1, bias=False)
if cfg.MODEL.fov_normalise == 'instance':
self.norm1 = nn.InstanceNorm2d(8*out_channels, momentum=BN_MOMENTUM, affine=True)
self.norm2 = nn.InstanceNorm2d(8*out_channels, momentum=BN_MOMENTUM, affine=True)
self.norm3 = nn.InstanceNorm2d(out_channels, momentum=BN_MOMENTUM, affine=True)
else:
# bn
self.norm1 = BatchNorm2d(8*out_channels, momentum=BN_MOMENTUM)
self.norm2 = BatchNorm2d(8*out_channels, momentum=BN_MOMENTUM)
self.norm3 = BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
if cfg.MODEL.fov_activation == 'relu':
self.act = nn.ReLU6(inplace=True)
elif cfg.MODEL.fov_activation == 'leaky_relu':
self.act = nn.LeakyReLU(inplace=True)
self.acc_grad = [[] for _ in range(len_gpus)]
# for tensorboard gradient visualization
self.save_print_grad = [{'layer1_grad': None, 'layer2_grad': None, 'layer3_grad': None} for _ in range(len_gpus)]
if self.cfg.MODEL.deep_fov == 'deep_fov1':
self.fov_expand_3 = nn.Conv2d(in_channels=8*out_channels, out_channels=8*out_channels, kernel_size=3, padding=1, bias=False)
self.fov_expand_4 = nn.Conv2d(in_channels=8*out_channels, out_channels=8*out_channels, kernel_size=3, padding=1, bias=False)
# self.requires_grad = True
if self.cfg.MODEL.deep_fov == 'deep_fov2':
self.fov_expand_2 = nn.Conv2d(in_channels=8*out_channels, out_channels=8*out_channels, kernel_size=5, padding=2, bias=False)
self.fov_squeeze_1 = nn.Conv2d(in_channels=8*out_channels, out_channels=out_channels, kernel_size=7, padding=3, bias=False)
def forward(self, x, reset_grad=True, train_mode=True):
# TODO: F.softplus(Ci) / Sum(C[:])
if self.cfg.MODEL.deep_fov == 'deep_fov1':
output = F.softmax(self.norm3(self.fov_squeeze_1(self.act(self.fov_expand_4(self.act(self.fov_expand_3(self.act(self.norm2(self.fov_expand_2(self.act(self.norm1(self.fov_expand_1(x)))))))))))), dim=1)
else:
if self.cfg.MODEL.fov_normalise == 'no_normalise':
layer3 = self.fov_squeeze_1(self.act(self.fov_expand_2(self.act(self.fov_expand_1(x)))))
elif self.cfg.MODEL.fov_normalise == 'bn1':
layer3 = self.fov_squeeze_1(self.act(self.fov_expand_2(self.act(self.norm1(self.fov_expand_1(x))))))
else:
if train_mode:
layer1 = self.act(self.norm1(self.fov_expand_1(x)))
layer1.register_hook(self.print_grad_layer1)
layer2 = self.act(self.norm2(self.fov_expand_2(layer1)))
layer2.register_hook(self.print_grad_layer2)
layer3 = self.norm3(self.fov_squeeze_1(layer2))
layer3.register_hook(self.print_grad_layer3)
else:
layer1 = self.act(self.norm1(self.fov_expand_1(x)))
layer2 = self.act(self.norm2(self.fov_expand_2(layer1)))
layer3 = self.norm3(self.fov_squeeze_1(layer2))
if self.cfg.MODEL.gumbel_softmax:
output = F.log_softmax(layer3, dim=1)
else:
output = F.softmax(layer3, dim=1)
if train_mode:
if reset_grad:
output.register_hook(self.save_grad)
else:
output.register_hook(self.manipulate_grad)
if train_mode and not reset_grad:
return output, self.save_print_grad[0]
else:
return output
def print_grad_layer1(self, grad):
self.save_print_grad[grad.device.index]['layer1_grad'] = grad.clone()
def print_grad_layer2(self, grad):
self.save_print_grad[grad.device.index]['layer2_grad'] = grad.clone()
def print_grad_layer3(self, grad):
self.save_print_grad[grad.device.index]['layer3_grad'] = grad.clone()
def save_grad(self, grad):
self.acc_grad[grad.device.index].append(grad.clone())
if self.cfg.MODEL.Zero_Step_Grad:
grad *= 0
def manipulate_grad(self, grad):
self.acc_grad[grad.device.index].append(grad.clone())
total_grad = torch.cat(self.acc_grad[grad.device.index])
total_grad = torch.sum(total_grad, dim=0)
grad.data += total_grad.data
self.acc_grad[grad.device.index] = []
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
def pixel_acc(self, pred, label):
_, preds = torch.max(pred, dim=1)
valid = (label >= 0).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
return acc
class FovSegmentationModule(SegmentationModuleBase):
def __init__(self, net_foveater, cfg, len_gpus=2):
super(FovSegmentationModule, self).__init__()
self.foveater = net_foveater
self.cfg = cfg
self.F_Xlr_i_soft = [None for _ in range(len_gpus)]
self.F_Xlr_i_grad = [None for _ in range(len_gpus)]
def forward(self, batch_data, *, train_mode=True):
# print('in device', batch_data['img_data'].device)
if train_mode:
xi, yi, rand_location, fov_location_batch_step = batch_data['cor_info']
else:
xi, yi = batch_data['cor_info']
X, Y = batch_data['img_data'], batch_data['seg_label']
if not train_mode:
X_unnorm = batch_data['img_data_unnorm']
fov_map_scale = self.cfg.MODEL.fov_map_scale
X_lr = b_imresize(X, (round(X.shape[2]/fov_map_scale), round(X.shape[3]/(fov_map_scale*self.cfg.MODEL.patch_ap))), interp='bilinear')
if self.cfg.MODEL.one_hot_patch != []:
if max(self.cfg.MODEL.one_hot_patch) == 0:
one_hot_patch_temp = self.cfg.MODEL.one_hot_patch
one_hot_patch_temp[random.randint(0,len(one_hot_patch_temp)-1)] = 1
one_hot_tensor = torch.FloatTensor(one_hot_patch_temp)
else:
one_hot_tensor = torch.FloatTensor(self.cfg.MODEL.one_hot_patch)
one_hot_tensor = one_hot_tensor.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
F_Xlr = one_hot_tensor.expand((X_lr.shape[0],len(self.cfg.MODEL.patch_bank),X_lr.shape[2],X_lr.shape[3])).to(X_lr.device)
print_grad = torch.zeros(F_Xlr.shape).to(X_lr.device)
else:
if train_mode:
if fov_location_batch_step == rand_location:
F_Xlr, print_grad = self.foveater(X_lr, reset_grad=False, train_mode=True) # b,d,w,h
else:
F_Xlr = self.foveater(X_lr, reset_grad=True, train_mode=True) # b,d,w,h
else:
F_Xlr = self.foveater(X_lr, reset_grad=False, train_mode=False)
if self.cfg.VAL.F_Xlr_low_scale != 0 and 'F_Xlr_low_res' in batch_data:
F_Xlr = batch_data['F_Xlr_low_res']
F_Xlr_i = F_Xlr[:,:,xi,yi] # b,d,m m-> mini_batch size len(xi)
hard_selected_scale = None
if self.cfg.MODEL.hard_fov:
if train_mode:
self.F_Xlr_i_soft[F_Xlr_i.device.index] = F_Xlr_i
if train_mode and self.cfg.MODEL.one_hot_patch == []:
F_Xlr_i.register_hook(self.modify_argmax_grad)
_, max_idx = torch.max(F_Xlr_i, dim=1)
if self.cfg.MODEL.hard_fov_pred:
patch_data['hard_max_idx'] = max_idx
mask = (torch.ones(F_Xlr_i.shape).long()*torch.arange(F_Xlr_i.size(1)).reshape(F_Xlr_i.size(1),-1)).to(F_Xlr_i.device) == max_idx.unsqueeze(1).to(F_Xlr_i.device)
# mask = torch.arange(F_Xlr_i.size(1)).reshape(1,-1).to(F_Xlr_i.device) == max_idx.unsqueeze(1).to(F_Xlr_i.device)
# important trick: normal inplace operation like a[mask] = 1 will eturns a broken gradient, use tensor.masked_fill_
# https://github.com/pytorch/pytorch/issues/1011
F_Xlr_i_hard = F_Xlr_i.clone().masked_fill_(mask.eq(0), 0).masked_fill_(mask.eq(1), 1)
if train_mode and self.cfg.MODEL.one_hot_patch == []:
F_Xlr_i_hard.register_hook(self.hook_F_Xlr_i_grad)
# print(max_idx.size())
if self.cfg.MODEL.hard_select:
hard_selected_scale = max_idx
elif self.cfg.MODEL.categorical:
# patch_probs = foveation_net(x_lr) # get the probabilities over patches from FoveationModule
# mbs > 1 not currently supported
m = Categorical(F_Xlr_i.permute(0,2,1)) # categorical see last dimension as prob
patch_selected = m.sample() # select a patch by sampling from the distribution, e.g. tensor(3)
# mask = torch.arange(F_Xlr_i.size(1)).reshape(1,-1).to(F_Xlr_i.device) == patch_selected.unsqueeze(1).to(F_Xlr_i.device)
# F_Xlr_i_hard = F_Xlr_i.clone().masked_fill_(mask.eq(0), 0).masked_fill_(mask.eq(1), 1)
hard_selected_scale = patch_selected
elif self.cfg.MODEL.gumbel_softmax:
# print('applied gumbel_tau: ', self.cfg.MODEL.gumbel_tau)
if self.cfg.MODEL.gumbel_softmax_st:
# s.t.
F_Xlr_i_hard = F.gumbel_softmax(F_Xlr_i, tau=self.cfg.MODEL.gumbel_tau, hard=True, dim=1)
else:
F_Xlr_i_hard = F.gumbel_softmax(F_Xlr_i, tau=self.cfg.MODEL.gumbel_tau, hard=False, dim=1)
# if self.cfg.VAL.F_Xlr_only:
# hard_selected_scale = torch.tensor([[[0]]])
patch_data = dict()
# iter over mini_batch
for i_mb in range(len(xi)):
xi_i = xi[i_mb]
yi_i = yi[i_mb]
if train_mode:
if self.cfg.MODEL.categorical or (self.cfg.MODEL.hard_fov and self.cfg.MODEL.hard_select):
X_patches, seg_label = patch_loader(X, Y, xi_i, yi_i, self.cfg, train_mode=train_mode, select_scale=hard_selected_scale[:,i_mb])
else:
X_patches, seg_label = patch_loader(X, Y, xi_i, yi_i, self.cfg, train_mode=train_mode, select_scale=hard_selected_scale)
else:
if self.cfg.MODEL.categorical or (self.cfg.MODEL.hard_fov and self.cfg.MODEL.hard_select):
X_patches, Y_patch_cord, X_patches_cords, seg_label = patch_loader(X, Y, xi_i, yi_i, self.cfg, train_mode=False, select_scale=hard_selected_scale[:,i_mb])
else:
X_patches, Y_patch_cord, X_patches_cords, seg_label = patch_loader(X, Y, xi_i, yi_i, self.cfg, train_mode=False, select_scale=hard_selected_scale)
if self.cfg.VAL.visualize:
if self.cfg.MODEL.categorical or (self.cfg.MODEL.hard_fov and self.cfg.MODEL.hard_select):
X_patches_unnorm, Y_patch_cord_unnorm, X_patches_cords_unnorm, seg_label_unnorm = patch_loader(X_unnorm, Y, xi_i, yi_i, self.cfg, train_mode=False, select_scale=hard_selected_scale[:,i_mb])
else:
X_patches_unnorm, Y_patch_cord_unnorm, X_patches_cords_unnorm, seg_label_unnorm = patch_loader(X_unnorm, Y, xi_i, yi_i, self.cfg, train_mode=False, select_scale=hard_selected_scale)
X_patches_unnorm = torch.cat([item.unsqueeze(0) for item in X_patches_unnorm]) # d,b,c,w,h (item
X_patches_unnorm = X_patches_unnorm.permute(1,0,2,3,4) # b,d,c,w,h
# convert list to tensor
X_patches = torch.cat([item.unsqueeze(0) for item in X_patches]) # d,b,c,w,h (item
X_patches = X_patches.permute(1,0,2,3,4) # b,d,c,w,h
if self.cfg.MODEL.hard_fov or self.cfg.MODEL.gumbel_softmax:
if self.cfg.MODEL.hard_select:
weighted_average = X_patches[:,0,:,:,:]*(patch_selected/patch_selected).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).float() # d = 1 as selected single scale
else:
weighted_patches = F_Xlr_i_hard[:,:,i_mb].unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(*X_patches.size()).to(X_patches.device)*X_patches
weighted_average = torch.sum(weighted_patches, dim=1) # b,c,w,h
elif self.cfg.MODEL.categorical:
weighted_average = X_patches[:,0,:,:,:] # b,c,w,h
else:
weighted_patches = F_Xlr[:,:,xi_i,yi_i].unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(*X_patches.size()).to(X_patches.device)*X_patches
weighted_average = torch.sum(weighted_patches, dim=1) # b,c,w,h
if i_mb == 0:
seg_label_mb = seg_label
weighted_average_mb = weighted_average
else:
if train_mode:
seg_label_mb = torch.cat([seg_label_mb, seg_label])
weighted_average_mb = torch.cat([weighted_average_mb, weighted_average])
patch_data['seg_label'] = seg_label_mb
patch_data['img_data'] = weighted_average_mb
if self.cfg.MODEL.categorical:
patch_data['log_prob_act'] = m.log_prob(patch_selected)
# training
if train_mode:
if fov_location_batch_step == rand_location:
return patch_data, F_Xlr, print_grad
else:
return patch_data, F_Xlr
# inference
elif self.cfg.VAL.visualize:
return patch_data, F_Xlr, Y_patch_cord, X_patches_cords, X_patches_unnorm
else:
return patch_data, F_Xlr, Y_patch_cord
def hook_F_Xlr_i_grad(self, grad):
# print('F_Xlr_i_grad', grad)
self.F_Xlr_i_grad[grad.device.index] = grad.clone()
def modify_argmax_grad(self, grad):
# print('argmax_grad', grad)
if self.cfg.MODEL.hard_grad == "st_inv":
self.F_Xlr_i_grad[grad.device.index] /= self.F_Xlr_i_soft[grad.device.index]
# normal straight though
# print('ori_argmax_grad:', grad.data)
grad.data = self.F_Xlr_i_grad[grad.device.index].data
# print('modifyed_argmax_grad', grad)
class SegmentationModule(SegmentationModuleBase):
def __init__(self, net_enc, net_dec, crit, cfg, deep_sup_scale=None, net_fov_res=None):
super(SegmentationModule, self).__init__()
self.encoder = net_enc
self.decoder = net_dec
self.crit = crit
self.cfg = cfg
self.deep_sup_scale = deep_sup_scale
self.net_fov_res = net_fov_res
# @torchsnooper.snoop()
def forward(self, feed_dict, *, segSize=None, F_Xlr_acc_map=False):
# training
if segSize is None:
if self.deep_sup_scale is not None: # use deep supervision technique
(pred, pred_deepsup) = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True))
elif self.net_fov_res is not None:
pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True), res=self.net_fov_res(feed_dict['img_data']))
else:
pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True))
if self.cfg.MODEL.hard_fov_pred:
hard_pred = []
patch_bank = self.cfg.MODEL.patch_bank
segm_downsampling_rate = self.cfg.DATASET.segm_downsampling_rate
hard_max_idx = feed_dict['hard_max_idx']
for b in range(feed_dict['seg_label'].shape[0]):
hard_max_patch_size = patch_bank[hard_max_idx[b]]
# print('hard_max_idx:', hard_max_idx[b])
central_patch_size = patch_bank[0]
cx = (hard_max_patch_size//2 - central_patch_size//2) // (hard_max_patch_size//central_patch_size)
cy = (hard_max_patch_size//2 - central_patch_size//2) // (hard_max_patch_size//central_patch_size)
central_patch_size = central_patch_size // (hard_max_patch_size//central_patch_size)
if segm_downsampling_rate != 1:
central_patch_size = central_patch_size // segm_downsampling_rate
cx = cx // segm_downsampling_rate
cy = cy // segm_downsampling_rate
central_crop = pred[b][:, cx:cx+central_patch_size, cy:cy+central_patch_size].clone()
central_crop = central_crop.unsqueeze(0)
# print('central_crop_shape:', central_crop.shape)
# print('pred:', pred[b])
hard_pred.append(F.interpolate(central_crop, (pred[b].shape[1], pred[b].shape[2]), mode='bilinear').clone())
# print('hard_pred:', hard_pred[b])
hard_pred = torch.cat(hard_pred, dim=0)
loss = self.crit(hard_pred, feed_dict['seg_label'])
else:
loss = self.crit(pred, feed_dict['seg_label'])
if self.deep_sup_scale is not None:
loss_deepsup = self.crit(pred_deepsup, feed_dict['seg_label'])
loss = loss + loss_deepsup * self.deep_sup_scale
acc = self.pixel_acc(pred, feed_dict['seg_label'])
return loss, acc
# inference
else:
if self.net_fov_res is not None:
pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True), segSize=segSize, res=self.net_fov_res(feed_dict['img_data']))
else:
pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True), segSize=segSize)
if self.cfg.MODEL.hard_fov_pred:
patch_bank = list((float(self.cfg.VAL.expand_prediection_rate_patch)*np.array(self.cfg.MODEL.patch_bank)).astype(int))
hard_max_idx = feed_dict['hard_max_idx']
for b in range(feed_dict['img_data'].shape[0]):
hard_max_patch_size = patch_bank[hard_max_idx[b]]
# print('hard_max_idx:', hard_max_idx[b])
central_patch_size = patch_bank[0]
cx = (hard_max_patch_size//2 - central_patch_size//2) // (hard_max_patch_size//central_patch_size)
cy = (hard_max_patch_size//2 - central_patch_size//2) // (hard_max_patch_size//central_patch_size)
central_patch_size = central_patch_size // (hard_max_patch_size//central_patch_size)
central_crop = pred[b][:, cx:cx+central_patch_size, cy:cy+central_patch_size].clone()
central_crop = central_crop.unsqueeze(0)
# print('central_crop_shape:', central_crop.shape)
# print('pred:', pred[b])
pred[b] = F.interpolate(central_crop, (pred[b].shape[1], pred[b].shape[2]), mode='bilinear')[0].clone()
if F_Xlr_acc_map:
loss = self.crit(pred, feed_dict['seg_label'])
return pred, loss
else:
return pred
class ModelBuilder:
# custom weights initialization
@staticmethod
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
#elif classname.find('Linear') != -1:
# m.weight.data.normal_(0.0, 0.0001)
@staticmethod
def build_encoder(arch='resnet50', fc_dim=2048, weights='', dilate_rate=4):
pretrained = True if len(weights) == 0 else False
arch = arch.lower()
if arch == 'mobilenetv2dilated':
orig_mobilenet = mobilenet.__dict__['mobilenetv2'](pretrained=pretrained)
net_encoder = MobileNetV2Dilated(orig_mobilenet, dilate_scale=dilate_rate)
elif arch == 'resnet18':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet18dilated':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=dilate_rate)
elif arch == 'resnet34':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet34dilated':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=dilate_rate)
elif arch == 'resnet50':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet50dilated':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=dilate_rate)
elif arch == 'resnet101':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet101dilated':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=dilate_rate)
elif arch == 'resnext101':
orig_resnext = resnext.__dict__['resnext101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnext) # we can still use class Resnet
elif arch == 'hrnetv2':
net_encoder = hrnet.__dict__['hrnetv2'](pretrained=pretrained)
elif arch == 'hrnetv2_nonsyn':
net_encoder = hrnetv2_nonsyn.__dict__['hrnetv2_nonsyn'](pretrained=False)
elif arch == 'hrnetv2_nopretrain':
net_encoder = hrnet.__dict__['hrnetv2'](pretrained=False)
elif arch == 'u_net':
net_encoder = u_net.__dict__['u_net'](pretrained=pretrained)
elif arch == 'attention_u_net':
net_encoder = attention_u_net.__dict__['attention_u_net'](pretrained=pretrained, width=fc_dim)
elif arch == 'attention_u_net_deep':
net_encoder = attention_u_net_deep.__dict__['attention_u_net_deep'](pretrained=pretrained)
elif arch == 'attention_u_net_deep_ds4x':
net_encoder = attention_u_net_deep_ds4x.__dict__['attention_u_net_deep_ds4x'](pretrained=pretrained)
else:
raise Exception('Architecture undefined!')
# encoders are usually pretrained
# net_encoder.apply(ModelBuilder.weights_init)
if len(weights) > 0:
print('Loading weights for net_encoder')
net_encoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_encoder
@staticmethod
def build_decoder(arch='upernet',
fc_dim=2048, num_class=150,
weights='', use_softmax=False):
arch = arch.lower()
if arch == 'c1_deepsup':
net_decoder = C1DeepSup(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'c1_u':
net_decoder = C1_U(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'c1':
net_decoder = C1(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'c1_hrnet_cityscape':
net_decoder = C1_hrnet_cityscape(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'c1_8090':
net_decoder = C1_8090(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'c1_upsample':
net_decoder = C1_upsample(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm':
net_decoder = PPM(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm_deepsup':
net_decoder = PPMDeepsup(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'upernet_lite':
net_decoder = UPerNet(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=256)
elif arch == 'upernet':
net_decoder = UPerNet(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=512)
else:
raise Exception('Architecture undefined!')
net_decoder.apply(ModelBuilder.weights_init)
if len(weights) > 0:
print('Loading weights for net_decoder')
net_decoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_decoder
@staticmethod
def build_foveater(in_channel=3,
out_channel=3,
len_gpus=2,
weights='',
cfg=None):
net_foveater = FoveationModule(in_channel, out_channel, len_gpus, cfg)
if len(weights) == 0:
net_foveater.apply(ModelBuilder.weights_init)
else:
print('Loading weights for net_foveater')
net_foveater.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_foveater
@staticmethod
def build_fov_res(in_channel=3,
out_channel=1,
weights='',
cfg=None):
net_fov_res = FovResModule(in_channel, out_channel, cfg)
if len(weights) == 0:
net_fov_res.apply(ModelBuilder.weights_init)
else:
print('Loading weights for net_fov_res')
net_fov_res.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_fov_res
def conv3x3_bn_relu(in_planes, out_planes, stride=1):
"3x3 convolution + BN + relu"
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=stride, padding=1, bias=False),
BatchNorm2d(out_planes),
nn.ReLU(inplace=True),
)
class Resnet(nn.Module):
def __init__(self, orig_resnet):
super(Resnet, self).__init__()
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
class ResnetDilated(nn.Module):
def __init__(self, orig_resnet, dilate_scale=8):
super(ResnetDilated, self).__init__()
from functools import partial
if dilate_scale == 2:
orig_resnet.conv1.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer2.apply(
partial(self._nostride_dilate, dilate=4))
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=8))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=16))
elif dilate_scale == 4:
orig_resnet.layer2.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=4))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=8))
elif dilate_scale == 8:
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=4))
elif dilate_scale == 16:
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=2))
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate//2, dilate//2)
m.padding = (dilate//2, dilate//2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
class MobileNetV2Dilated(nn.Module):
def __init__(self, orig_net, dilate_scale=8):
super(MobileNetV2Dilated, self).__init__()
from functools import partial
# take pretrained mobilenet features
self.features = orig_net.features[:-1]
self.total_idx = len(self.features)
self.down_idx = [2, 4, 7, 14]
if dilate_scale == 8:
for i in range(self.down_idx[-2], self.down_idx[-1]):
self.features[i].apply(
partial(self._nostride_dilate, dilate=2)
)
for i in range(self.down_idx[-1], self.total_idx):
self.features[i].apply(
partial(self._nostride_dilate, dilate=4)
)
elif dilate_scale == 16:
for i in range(self.down_idx[-1], self.total_idx):
self.features[i].apply(
partial(self._nostride_dilate, dilate=2)
)
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate//2, dilate//2)
m.padding = (dilate//2, dilate//2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x, return_feature_maps=False):
if return_feature_maps:
conv_out = []
for i in range(self.total_idx):
x = self.features[i](x)
if i in self.down_idx:
conv_out.append(x)
conv_out.append(x)
return conv_out
else:
return [self.features(x)]
# last conv, deep supervision
class C1DeepSup(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1DeepSup, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
# deep sup
conv4 = conv_out[-2]
_ = self.cbr_deepsup(conv4)
_ = self.conv_last_deepsup(_)
x = nn.functional.log_softmax(x, dim=1)
_ = nn.functional.log_softmax(_, dim=1)
return (x, _)
# last conv
class C1_U(nn.Module):
def __init__(self, num_class=7, fc_dim=32, use_softmax=False):
super(C1_U, self).__init__()
self.use_softmax = use_softmax
# last conv
self.conv_last = nn.Conv2d(fc_dim, num_class, 1)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
x = self.conv_last(conv5)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
else:
x = nn.functional.log_softmax(x, dim=1)
return x
class C1(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
def forward(self, conv_out, segSize=None, res=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if res is not None:
x += res
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
else:
x = nn.functional.log_softmax(x, dim=1)
return x
class C1_hrnet_cityscape(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1_hrnet_cityscape, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
def forward(self, conv_out, segSize=None, res=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if res is not None:
x += res
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
else:
x = nn.functional.interpolate(
x, size=(x.shape[-2]*4, x.shape[-1]*4), mode='bilinear', align_corners=False)
x = nn.functional.log_softmax(x, dim=1)
return x
class C1_8090(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1_8090, self).__init__()
self.use_softmax = use_softmax
def forward(self, x, segSize=None, res=None):
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=(x.shape[-2]*4, x.shape[-1]*4), mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
else:
x = nn.functional.interpolate(
x, size=(x.shape[-2]*4, x.shape[-1]*4), mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
class C1_upsample(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1_upsample, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
def forward(self, conv_out, segSize=None, res=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if res is not None:
x += res
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.log_softmax(x, dim=1)
else:
x = nn.functional.interpolate(
x, size=(512, 512), mode='bilinear', align_corners=False)
x = nn.functional.log_softmax(x, dim=1)
return x
# pyramid pooling
class PPM(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6)):
super(PPM, self).__init__()
self.use_softmax = use_softmax
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
self.conv_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(512, num_class, kernel_size=1)
)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x = self.conv_last(ppm_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
else:
x = nn.functional.log_softmax(x, dim=1)
return x
# pyramid pooling, deep supervision
class PPMDeepsup(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6)):
super(PPMDeepsup, self).__init__()
self.use_softmax = use_softmax
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
self.conv_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(512, num_class, kernel_size=1)
)
self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
self.dropout_deepsup = nn.Dropout2d(0.1)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x = self.conv_last(ppm_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
# deep sup
conv4 = conv_out[-2]
_ = self.cbr_deepsup(conv4)
_ = self.dropout_deepsup(_)
_ = self.conv_last_deepsup(_)
x = nn.functional.log_softmax(x, dim=1)
_ = nn.functional.log_softmax(_, dim=1)
return (x, _)
# upernet
class UPerNet(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6),
fpn_inplanes=(256, 512, 1024, 2048), fpn_dim=256):
super(UPerNet, self).__init__()
self.use_softmax = use_softmax
# PPM Module
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
self.ppm_conv.append(nn.Sequential(
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
BatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1)
# FPN Module
self.fpn_in = []
for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
self.fpn_in.append(nn.Sequential(
nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
BatchNorm2d(fpn_dim),
nn.ReLU(inplace=True)
))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range(len(fpn_inplanes) - 1): # skip the top layer
self.fpn_out.append(nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_last = nn.Sequential(
conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, num_class, kernel_size=1)
)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
fpn_feature_list = [f]
for i in reversed(range(len(conv_out) - 1)):
conv_x = conv_out[i]
conv_x = self.fpn_in[i](conv_x) # lateral branch
f = nn.functional.interpolate(
f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
f = conv_x + f
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse() # [P2 - P5]
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(nn.functional.interpolate(
fpn_feature_list[i],
output_size,
mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
x = self.conv_last(fusion_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
x = nn.functional.log_softmax(x, dim=1)
return x
|
11468389
|
import functools
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated."""
@functools.wraps(func)
def new_func(*args, **kwargs):
return func(*args, **kwargs)
return new_func
|
11468395
|
import typing as T
import numpy as np
import py
import pytest
pd = pytest.importorskip("pandas") # noqa
pytest.importorskip("xarray") # noqa
import xarray as xr
from cfgrib import xarray_to_grib
@pytest.fixture()
def canonic_da() -> xr.DataArray:
coords: T.List[T.Any] = [
pd.date_range("2018-01-01T00:00", "2018-01-02T12:00", periods=4),
pd.timedelta_range(0, "12h", periods=2),
[1000.0, 850.0, 500.0],
np.linspace(90.0, -90.0, 5),
np.linspace(0.0, 360.0, 6, endpoint=False),
]
da = xr.DataArray(
np.zeros((4, 2, 3, 5, 6)),
coords=coords,
dims=["time", "step", "isobaricInhPa", "latitude", "longitude"],
)
return da
def test_canonical_dataarray_to_grib_with_grib_keys(
canonic_da: xr.DataArray, tmpdir: py.path.local
) -> None:
out_path = tmpdir.join("res.grib")
grib_keys = {"gridType": "regular_ll"}
with open(str(out_path), "wb") as file:
xarray_to_grib.canonical_dataarray_to_grib(canonic_da, file, grib_keys=grib_keys)
def test_canonical_dataarray_to_grib_detect_grib_keys(
canonic_da: xr.DataArray, tmpdir: py.path.local
) -> None:
out_path = tmpdir.join("res.grib")
with open(str(out_path), "wb") as file:
xarray_to_grib.canonical_dataarray_to_grib(canonic_da, file)
def test_canonical_dataarray_to_grib_conflicting_detect_grib_keys(
canonic_da: xr.DataArray, tmpdir: py.path.local
) -> None:
out_path = tmpdir.join("res.grib")
grib_keys = {"gridType": "reduced_ll"}
with open(str(out_path), "wb") as file:
with pytest.raises(ValueError):
xarray_to_grib.canonical_dataarray_to_grib(canonic_da, file, grib_keys=grib_keys)
def test_canonical_dataset_to_grib(canonic_da: xr.DataArray, tmpdir: py.path.local) -> None:
out_path = tmpdir.join("res.grib")
canonic_ds = canonic_da.to_dataset(name="t")
with pytest.warns(FutureWarning):
xarray_to_grib.canonical_dataset_to_grib(canonic_ds, str(out_path))
xarray_to_grib.canonical_dataset_to_grib(canonic_ds, str(out_path), no_warn=True)
def test_to_grib(canonic_da: xr.DataArray, tmpdir: py.path.local) -> None:
out_path = tmpdir.join("res.grib")
canonic_ds = canonic_da.to_dataset(name="t")
with pytest.warns(FutureWarning):
xarray_to_grib.to_grib(canonic_ds, str(out_path))
|
11468400
|
import theano, theano.tensor as TT
from cgt.utils import Message
import time
import numpy as np
def normc(x):
assert x.ndim == 2
return x/norms(x,0)[None,:]
def randnf(*shp):
return np.random.randn(*shp).astype(theano.config.floatX)
def norms(x,ax):
return np.sqrt(np.square(x).sum(axis=ax))
class GRUCell(object):
"""
Gated Recurrent Unit. E.g., see
Chung, Junyoung, et al. "Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling." arXiv preprint arXiv:1412.3555 (2014).
"""
def __init__(self,input_sizes,mem_size,name_prefix=""):
Wiz_vals = [normc(randnf(input_size,mem_size)) for input_size in input_sizes]
self.Wizs = [theano.shared(Wiz_val,name=name_prefix+"Wiz") for Wiz_val in Wiz_vals]
Wmz_val = normc(randnf(mem_size,mem_size))
self.Wmz = theano.shared(Wmz_val,name=name_prefix+"Wmz")
bz = np.zeros((1,mem_size),theano.config.floatX)
self.bz = theano.shared(bz,name=name_prefix+"bz")
self.bz.type.broadcastable = (True,False)
Wir_vals = [normc(randnf(input_size,mem_size)) for input_size in input_sizes]
self.Wirs = [theano.shared(Wir_val,name=name_prefix+"Wir") for Wir_val in Wir_vals]
Wmr_val = normc(randnf(mem_size,mem_size))
self.Wmr = theano.shared(Wmr_val,name=name_prefix+"Wmr")
br = np.zeros((1,mem_size),theano.config.floatX)
self.br = theano.shared(br,name=name_prefix+"br")
self.br.type.broadcastable = (True,False)
Wim_vals = [normc(randnf(input_size,mem_size)) for input_size in input_sizes]
self.Wims = [theano.shared(Wim_val,name=name_prefix+"Wim") for Wim_val in Wim_vals]
Wmm_val = normc(np.eye(mem_size,dtype=theano.config.floatX))
self.Wmm = theano.shared(Wmm_val,name=name_prefix+"Wmm")
bm = np.zeros((1,mem_size),theano.config.floatX)
self.bm = theano.shared(bm,name=name_prefix+"bm")
self.bm.type.broadcastable = (True,False)
def __call__(self,M,*inputs):
assert len(inputs) == len(self.Wizs)
summands = [Xi.dot(Wiz) for (Xi,Wiz) in zip(inputs,self.Wizs)] + [M.dot(self.Wmz),self.bz]
z = TT.nnet.sigmoid(TT.add(*summands))
summands = [Xi.dot(Wir) for (Xi,Wir) in zip(inputs,self.Wirs)] + [M.dot(self.Wmr),self.br]
r = TT.nnet.sigmoid(TT.add(*summands))
summands = [Xi.dot(Wim) for (Xi,Wim) in zip(inputs,self.Wims)] + [(r*M).dot(self.Wmm),self.bm]
Mtarg = TT.tanh(TT.add(*summands)) #pylint: disable=E1111
Mnew = (1-z)*M + z*Mtarg
return Mnew
def params(self):
out = []
out.extend(self.Wizs)
out.append(self.Wmz)
out.append(self.bz)
out.extend(self.Wirs)
out.append(self.Wmr)
out.append(self.br)
out.extend(self.Wims)
out.append(self.Wmm)
out.append(self.bm)
return out
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--horizon",type=int)
args = parser.parse_args()
horizon =args.horizon
assert horizon is not None
size=128
batchsize=64
cell = GRUCell([size],size)
X = TT.tensor3()
init = TT.zeros((batchsize, size),theano.config.floatX)
prev_h = init
for i in xrange(horizon):
prev_h = cell(X[i], prev_h)
with Message("compiling"):
f = theano.function([X],theano.grad(prev_h.sum(), cell.params()))
with Message("running"):
x = np.zeros((horizon,batchsize,size),theano.config.floatX)
for i in xrange(100):
f(x)
|
11468432
|
import argparse
import os
import cv2
import numpy as np
import torch
from torch.utils.data import DataLoader
import tqdm
import models
from datasets.validation_datasets import FolderDataset
from config import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--src-dir', type=str, required=True, help='Source folder with images')
parser.add_argument('--out-dir', type=str, required=True, help='Folder to put output predictions')
parser.add_argument('--vis-dir', type=str, help='Folder to put visualisations')
parser.add_argument('--no-vis', action='store_true', help='Disable visualisations (only predictions)')
parser.add_argument('--model', type=str, choices=MODEL_ZOO, help='Model architecture to use')
parser.add_argument('--checkpoint-dir', type=str, default='weights/', help='Folder with model checkpoints')
parser.add_argument('--domain', type=str, choices=['depth', 'log-depth', 'disparity', 'log-disparity'],
default='log-disparity', help='Which domain to use to store predictions')
parser.add_argument('--device', default=('cuda:0' if torch.cuda.is_available() else 'cpu'))
args = parser.parse_args()
return args
def convert_predictions(pred, domain):
if domain == 'depth':
return (-pred).exp()
elif domain == 'log-depth':
return -pred
elif domain == 'disparity':
return pred.exp()
else:
return pred
def get_path(path, old_dir, new_dir, postfix):
pred_path = os.path.relpath(path, old_dir)
pred_path = os.path.join(new_dir, os.path.dirname(pred_path),
os.path.splitext(os.path.basename(pred_path))[0] + postfix)
return pred_path
def save_vis(pred, out_path):
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
vmin, vmax = np.quantile(pred, [0.01, 0.99])
vis = ((torch.clamp(pred, vmin, vmax) - vmin) * 255 / (vmax - vmin)).numpy().astype(np.uint8)
cv2.imwrite(out_path, cv2.applyColorMap(vis, cv2.COLORMAP_INFERNO))
def save_pred(pred, out_path):
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
torch.save(pred, out_path)
@torch.no_grad()
def main(args):
ds = FolderDataset(args.src_dir)
ds_loader = DataLoader(ds, batch_size=1, shuffle=False, num_workers=4, pin_memory=True)
print('Found {} images in source directory: {}'.format(len(ds), args.src_dir))
assert args.model in MODEL_ZOO
model = getattr(models, args.model)()
model.load_state_dict(torch.load(os.path.join(args.checkpoint_dir, args.model + '.pth'), map_location=args.device))
model = model.to(device=args.device)
model.eval()
print('Using "{}" model (predicting in {} domain)'.format(args.model, args.domain))
if (not args.no_vis) and (args.vis_dir is None):
args.vis_dir = args.out_dir
for batch in tqdm.tqdm(ds_loader, total=len(ds_loader)):
pred = model(batch['image'].to(device=args.device))
pred = convert_predictions(pred, args.domain).cpu()
for i in range(len(pred)):
save_pred(pred[i, 0], get_path(batch['path'][i], args.src_dir, args.out_dir, '.pth'))
if not args.no_vis:
save_vis(pred[i, 0], get_path(batch['path'][i], args.src_dir, args.vis_dir, '_vis.png'))
if __name__ == '__main__':
main(parse_args())
|
11468438
|
p4 = bfrt.int.pipe.Ingress.Int_sink_config.tb_int_sink
def setUp():
global p4
# To configure when Tofino switch is used as a sink node
p4.add_with_configure_sink(ucast_egress_port=132, sink_reporting_port=132)
p4.dump()
setUp()
|
11468503
|
def func(a, b=4, c=88):
print(a, b, c)
func(1) # prints: 1 4 88
func(b=5, a=7, c=9) # prints: 7 5 9
func(42, c=9) # prints: 42 4 9
func(42, 43, 44) # prints: 42, 43, 44
|
11468517
|
from pathlib import Path
from palm.plugins.base import BasePlugin
# This plugin is intended for use in testing palm-cli.
# It is not intended to be used in production.
TestInternalPlugin = BasePlugin(
name='test_internal',
command_dir=Path(__file__).parent / 'commands',
)
|
11468575
|
from keras.models import load_model
import h5py
import numpy as np
import matplotlib.pyplot as plt
model = load_model('my_model_12.h5')
def read_dataset(print_shape=True):
x = []
y = []
hdf5_file = h5py.File('all_data.hdf5', "r")
data = hdf5_file["sim_data"][:, ...]
hdf5_file.close()
print(data.shape)
for i in range(10):
for j in range(5):
x.append(data[i, j, ...])
y.append(data[i, j + 1, ...])
x = np.asarray(x)
y = np.asarray(y)
x_test = x
y_test = y
if print_shape:
print("x_test.shape: {}\ny_test.shape: {}\n".format(
x_test.shape,
y_test.shape))
return x_test, y_test
x_test, y_test = read_dataset()
predicted_flow = model.predict(x_test, batch_size=4)
predicted_flow = predicted_flow.reshape(predicted_flow.shape[:3])
y_test = y_test.reshape(y_test.shape[:3])
for i in range(15):
extent = 0, 3, 0, 1
plt.suptitle('Comparision of OpenFOAM vs Deep Learning', fontsize=13)
plt.subplot(211)
plt.ylabel('OpenFOAM', fontsize=15)
plt.imshow(y_test[i], cmap=plt.cm.viridis, alpha=.9, interpolation='bilinear', extent=extent)
plt.subplot(212)
plt.ylabel('Deep Learning', fontsize=15)
plt.imshow(predicted_flow[i], cmap=plt.cm.viridis, alpha=.9, interpolation='bilinear', extent=extent)
plt.subplots_adjust(left=0.2, wspace=0.8, top=0.85)
plt.savefig('plots/' + str(i) + '.png')
plt.close()
|
11468581
|
load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library")
def baikaldb_proto_library(name, srcs, deps=[], include=None, visibility=None, testonly=0):
native.filegroup(name=name + "_proto_srcs",
srcs=srcs,
visibility=visibility,)
cc_proto_library(name=name,
srcs=srcs,
deps=deps,
cc_libs=["@com_google_protobuf//:protobuf"],
include=include,
protoc="@com_google_protobuf//:protoc",
default_runtime="@com_google_protobuf//:protobuf",
testonly=testonly,
visibility=visibility,)
|
11468606
|
import uvicorn
from fastapi import FastAPI
from data.config import *
from routers import *
from model.todo import *
from model.user import *
Base.metadata.create_all(engine)
app = FastAPI(
title="Pexon-Rest-API",
description="A full Rest-API for JSON response included Docker Contains.",
version="1.0.0",
)
app.include_router(signup_router)
app.include_router(login_router)
app.include_router(user_router)
app.include_router(todo_router)
@app.get(path="/")
def index():
return {"detail": "Hello World"}
if __name__ == "__main__":
uvicorn.run(app=app, host="127.0.0.0", port=8000)
|
11468637
|
import uuid
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models.indexes import Index
from django_zombodb.indexes import ZomboDBIndex
from django_zombodb.querysets import SearchQuerySet
class Restaurant(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
url = models.URLField()
name = models.TextField()
street = models.TextField()
zip_code = models.TextField()
city = models.TextField()
state = models.TextField()
phone = models.TextField()
email = models.EmailField()
website = models.URLField(blank=True)
categories = ArrayField(models.TextField())
objects = models.Manager.from_queryset(SearchQuerySet)()
class Meta:
indexes = [
Index(name='other-index', fields=['url']),
ZomboDBIndex(
fields=[
'name',
'street',
'zip_code',
'city',
'state',
'phone',
'email',
'website',
'categories',
]
),
]
def __str__(self):
return self.name
class RestaurantNoIndex(models.Model):
name = models.TextField()
objects = models.Manager.from_queryset(SearchQuerySet)()
def __str__(self):
return self.name
|
11468641
|
import os
API_VERSION = '1.19.0'
# Elasticsearch settings
ES_HOST = os.getenv("ES_HOST", "127.0.0.1")
ES_PORT = os.getenv("ES_PORT", 9200)
ES_USER = os.getenv("ES_USER")
ES_PWD = os.getenv("ES_PWD")
ES_INDEX = os.getenv("ES_INDEX", "platsannons-read")
ES_STREAM_INDEX = os.getenv("ES_STREAM_INDEX", "platsannons-stream")
ES_TAX_INDEX_ALIAS = os.getenv("ES_TAX_INDEX_ALIAS", "taxonomy")
ES_RETRIES = int(os.getenv("ES_RETRIES", 2))
ES_RETRIES_SLEEP = float(os.getenv("ES_RETRIES_SLEEP", 2))
ES_HISTORICAL_TIMEOUT = int(os.getenv("ES_HISTORICAL_TIMEOUT", 60))
ES_DEFAULT_TIMEOUT = 10
DELAY_ONTOLOGY_STARTUP = os.getenv('DELAY_ONTOLOGY_STARTUP', 'false').lower() == 'true'
# APM and Debug settings
APM_SERVICE_NAME = os.getenv("APM_SERVICE_NAME")
APM_SERVICE_URL = os.getenv("APM_SERVICE_URL")
APM_SECRET = os.getenv("APM_SECRET")
APM_LOG_LEVEL = os.getenv("APM_LOG_LEVEL", "WARNING")
# Flask-Restplus settings
RESTPLUS_SWAGGER_UI_DOC_EXPANSION = 'list'
RESTPLUS_VALIDATE = False
RESTPLUS_MASK_SWAGGER = False
RESTPLUS_ERROR_404_HELP = False
# Base API URL
BASE_PB_URL = os.getenv('BASE_PB_URL', 'https://arbetsformedlingen.se/platsbanken/annonser/')
BASE_TAXONOMY_URL = os.getenv('BASE_TAXONOMY_URL', 'https://taxonomy.api.jobtechdev.se/v1/taxonomy/')
TAXONOMY_APIKEY = os.getenv('TAXONOMY_APIKEY')
COMPANY_LOGO_BASE_URL = os.getenv('COMPANY_LOGO_BASE_URL',
'https://www.arbetsformedlingen.se/rest/arbetsgivare/rest/af/v3/')
COMPANY_LOGO_FETCH_DISABLED = os.getenv('COMPANY_LOGO_FETCH_DISABLED', 'false').lower() == 'true'
# Header parameters
APIKEY = 'api-key'
# Feature toggles
X_FEATURE_FREETEXT_BOOL_METHOD = 'x-feature-freetext-bool-method'
X_FEATURE_DISABLE_SMART_FREETEXT = 'x-feature-disable-smart-freetext'
X_FEATURE_ENABLE_FALSE_NEGATIVE = 'x-feature-enable-false-negative'
# Query parameters
OFFSET = 'offset'
LIMIT = 'limit'
FREETEXT_QUERY = 'q'
TYPEAHEAD_QUERY = 'typehead'
CONTEXTUAL_TYPEAHEAD = 'contextual'
UNSPECIFIED_SWEDEN_WORKPLACE = 'unspecified-sweden-workplace'
ABROAD = 'abroad'
REMOTE = 'remote'
FREETEXT_FIELDS = 'qfields'
SORT = 'sort'
PUBLISHED_BEFORE = 'published-before'
PUBLISHED_AFTER = 'published-after'
EXPERIENCE_REQUIRED = 'experience'
STATISTICS = 'stats'
STAT_LMT = 'stats.limit'
PARTTIME_MIN = 'parttime.min'
PARTTIME_MAX = 'parttime.max'
POSITION = 'position'
POSITION_RADIUS = 'position.radius'
DEFAULT_POSITION_RADIUS = 5
EMPLOYER = 'employer'
HISTORICAL_FROM = 'historical-from'
HISTORICAL_TO = 'historical-to'
REQUEST_TIMEOUT = 'request-timeout'
OCCUPATION = 'occupation-name'
OCCUPATION_GROUP = 'occupation-group'
OCCUPATION_FIELD = 'occupation-field'
MUNICIPALITY = 'municipality'
DEFAULT_FREETEXT_BOOL_METHOD = 'or'
MAX_OFFSET = 2000
MAX_LIMIT = 100
MAX_TAXONOMY_LIMIT = 8000
MAX_COMPLETE_LIMIT = 50
RESULT_MODEL = 'resultmodel'
DETAILS = 'resdet'
MIN_RELEVANCE = 'relevance-threshold'
# For taxonomy
SHOW_COUNT = 'show-count'
TAX_DESCRIPTION = 'DEPRECATED, use https://taxonomy.api.jobtechdev.se/v1/taxonomy/swagger-ui/index.html instead'
# For Batch
DATE = 'date'
UPDATED_BEFORE_DATE = 'updated-before-date'
MAX_DATE = '3000-01-01T00:00:00'
OCCUPATION_CONCEPT_ID = 'occupation-concept-id'
LOCATION_CONCEPT_ID = 'location-concept-id'
OCCUPATION_LIST = ['occupation', 'occupation_field', 'occupation_group']
LOCATION_LIST = ['region', 'country', 'municipality']
# For all ads
SHOW_EXPIRED = 'show-expired'
API_KEY_RATE_LIMIT = None if os.getenv("API_KEY_RATE_LIMIT") == 'UNLIMITED' else os.getenv("API_KEY_RATE_LIMIT", 60)
result_models = [
'pbapi', 'simple'
]
# sweden country concept id: /v1/taxonomy/main/concepts?id=i46j_HmG_v64'
SWEDEN_CONCEPT_ID = 'i46j_HmG_v64'
# original index: narvalontology, new: jae-synonym-dictionary
ONTOLOGY_INDEX = os.getenv('ONTOLOGY_INDEX', 'jae-synonym-dictionary')
UNWANTED_SUGGESTED_WORDS = ['sverige', 'svenska']
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
TAXONOMY_TYPE = 'taxonomy-type'
STATS_BY = 'stats-by'
LEGANCY_ID = 'legacy_ams_taxonomy_id'
CONCEPT_ID = 'concept_id'
LABEL = 'label'
LIMIT = 'limit'
TAXONOMY_EXTRAL_TYPE_LIST = ['worktime-extent', 'wage-type', 'sun-education-field-1', 'sun-education-field-2',
'sun-education-field-3', 'sun-education-level-1', 'sun-education-level-2',
'sun-education-level-3', 'employment-duration']
TAXONOMY_TYPE_LIST = ['occupation-name', 'occupation-group', 'occupation-field', 'employment-type', 'country',
'region', 'municipality', 'language', 'skill']
TAXONOMY_GENERAL_TYPE_LIST = ['occupation-name', 'occupation-group', 'occupation-field', 'employment-type']
TAXONOMY_LOCATION_TYPE_LIST = ['country', 'region', 'municipality']
TAXONOMY_TYPE_MUST_HAVE_LIST = ['language', 'skill']
TAXONOMY_PROCESSES = int(os.getenv("TAXONOMY_PROCESSES", 8))
|
11468659
|
import csv
import datetime
from dateutil.relativedelta import relativedelta
import io
from itertools import chain
from sqlalchemy import and_, or_, func, desc
from sqlalchemy.sql import text
from PIL import Image
from cStringIO import StringIO
from wand.image import Image as WandImage
from xhtml2pdf import pisa
from flask import (
abort,
Blueprint,
render_template,
request,
redirect,
url_for,
g,
session,
current_app,
jsonify,
send_file
)
from flask.ext.login import login_user, current_user
from flask.ext.security import login_required, roles_accepted
from flask.ext.security.forms import LoginForm
from app import db, login_manager
from app.forms import (
PatientForm,
PrescreenForm,
ScreeningResultForm,
SearchPatientForm,
ReferralCommentForm
)
from app.models import (
AppUser,
Patient,
PhoneNumber,
Address,
Employer,
IncomeSource,
HouseholdMember,
DocumentImage,
EmergencyContact,
Service,
ActionLog,
PatientReferral,
PatientReferralComment,
SlidingScale,
PatientScreeningResult,
UnsavedForm
)
from app.prescreening import calculate_fpl, calculate_pre_screen_results
from app.utils import (
translate_object,
get_unsaved_form,
check_patient_permission,
send_new_referral_email,
send_referral_comment_email,
send_referral_closed_email,
remove_blank_rows,
remove_blank_rows_helper
)
screener = Blueprint('screener', __name__, url_prefix='')
@screener.before_request
def before_request():
g.user = current_user
session.modified = True
@screener.route("/")
@login_required
def home_page_redirect():
"""Redirect the user to the home page appropriate for their role."""
if not current_user.is_patient_user():
return redirect(url_for('screener.index'))
elif current_user.patient_id is not None:
return redirect(url_for(
'screener.patient_details',
id=current_user.patient_id
))
else:
return redirect(url_for('screener.new_patient'))
@screener.route("/relogin", methods=['POST', 'GET'])
def relogin():
"""Prompt the user to reauthenticate after 15 minutes of inactivity.
After reauthentication, return to previous page and include any unsaved form data.
"""
form = LoginForm()
if form.validate_on_submit():
login_user(form.user, remember=form.remember.data)
user = AppUser.query.filter(AppUser.email == form.user.email).first()
user.authenticated = True
db.session.add(user)
db.session.commit()
return redirect(request.form['previous_page'])
else:
if hasattr(current_user, 'email'):
email = current_user.email
return render_template(
"relogin.html",
email=email,
previous_page=request.referrer,
login_user_form=form
)
return redirect(url_for('security.login'))
@screener.route("/unsaved_form", methods=["POST"])
@login_required
def unsaved_form():
"""When a user is automatically logged out, store any form data on the page as JSON
so it can be restored when they log back in.
"""
unsaved_form = UnsavedForm(
app_user_id=current_user.id,
patient_id=request.form['patient_id'],
page_name=request.referrer,
form_json=request.form['form_json']
)
db.session.add(unsaved_form)
db.session.commit()
return jsonify()
@screener.route("/ping", methods=["POST"])
@login_required
def ping():
"""User is active on front-end, so don't let session expire."""
session.modified = True
return jsonify()
@screener.route('/new_patient', methods=['POST', 'GET'])
@login_required
def new_patient():
"""Display the form for a new patient, and create a new patient after submission."""
form = PatientForm()
if form.validate_on_submit():
patient = Patient()
update_patient(patient, form, request.files)
# If the patient was created by a patient user, link the new patient to the
# user account
if current_user.is_patient_user():
current_user.patient = patient
db.session.add(patient)
db.session.commit()
return redirect(url_for('screener.patient_details', id=patient.id))
else:
index_search = {}
if 'first_name' in session and session['first_name']:
index_search['first_name'] = session['first_name']
if 'last_name' in session and session['last_name']:
index_search['last_name'] = session['last_name']
if 'dob' in session and session['dob']:
index_search['dob'] = 'test'
if 'ssn' in session and session['ssn']:
index_search['ssn'] = session['ssn']
# Delete empty rows at end of many-to-one tables
remove_blank_rows(form)
return render_template(
'patient_details.html',
patient={},
form=form,
index_search=index_search
)
@screener.route('/patient_overview/<id>', methods=['POST', 'GET'])
@login_required
@roles_accepted('Staff', 'Admin', 'Superuser')
def patient_overview(id):
check_patient_permission(id)
patient = Patient.query.get(id)
patient.update_stats()
prescreen_results = calculate_pre_screen_results(
fpl=patient.fpl_percentage,
has_health_insurance=patient.insurance_status,
is_eligible_for_medicaid="",
service_ids=[current_user.service_id]
)
form = ScreeningResultForm()
sliding_scale_options = SlidingScale.query.filter(
SlidingScale.service_id == current_user.service_id
)
form.sliding_scale_id.choices = [("", "N/A")] + [
(str(option.id), option.scale_name) for option in sliding_scale_options
]
if form.validate_on_submit():
screening_result = PatientScreeningResult()
screening_result.service_id = current_user.service_id
screening_result.eligible_yn = form.eligible_yn.data
screening_result.sliding_scale_id = form.sliding_scale_id.data or None
screening_result.notes = form.notes.data
patient.screening_results.append(screening_result)
# If the patient has an open referral to the current organization, mark
# as completed
open_referrals = [
r for r in patient.referrals
if r.to_service_id == current_user.service.id
and r.in_sent_status()
]
if open_referrals:
screening_result.patient_referral_id = open_referrals[0].id
for referral in open_referrals:
referral.mark_completed()
send_referral_closed_email(
service=referral.to_service,
patient=patient,
from_app_user=referral.from_app_user,
closed_user=current_user
)
db.session.commit()
past_results = [r for r in patient.screening_results
if r.service_id == current_user.service_id]
new_form = ScreeningResultForm(formdata=None)
new_form.sliding_scale_id.choices = [("", "N/A")] + [
(str(option.id), option.scale_name) for option in sliding_scale_options
]
# if there is no referral id, then this is a screening result being saved
# that is not associated to a referral
if request.form and 'referral_id' in request.form:
referral_form = ReferralCommentForm()
if referral_form.validate_on_submit() and referral_form.notes.data != '':
referral = PatientReferral.query.get(referral_form.referral_id.data)
referral_comment = PatientReferralComment()
referral_comment.patient_referral_id = referral_form.referral_id.data
referral_comment.notes = referral_form.notes.data
db.session.add(referral_comment)
db.session.commit()
send_referral_comment_email(
service=referral.to_service,
patient=patient,
referral=referral,
commented_user=current_user
)
else:
referral_form = ReferralCommentForm(formdata=None)
has_open_referral = bool(
[r for r in patient.referrals
if r.status == 'SENT' and r.to_service_id == current_user.service.id]
)
return render_template(
'patient_overview.html',
patient=patient,
form=new_form,
service=prescreen_results[0],
past_results=past_results,
referral_form=referral_form,
has_open_referral=has_open_referral
)
@screener.route('/patient_details/<id>', methods=['POST', 'GET'])
@login_required
def patient_details(id):
"""Display the full patient details form for an existing user."""
check_patient_permission(id)
patient = Patient.query.get(id)
form = (
get_unsaved_form(request, patient, 'patient_details', PatientForm)
or PatientForm(obj=patient)
)
if request.method == 'POST' and form.validate_on_submit():
update_patient(patient, form, request.files)
db.session.commit()
patient.update_stats()
return render_template(
'patient_details.html',
patient=patient,
form=form,
save_message=True
)
# Delete empty rows at end of many-to-one tables
remove_blank_rows(form)
return render_template(
'patient_details.html',
patient=patient,
form=form,
save_message=False
)
def update_patient(patient, form, files):
"""Update a patient record with information from submitted form."""
for field_name, class_name in [
('income_sources', IncomeSource),
('phone_numbers', PhoneNumber),
('addresses', Address),
('emergency_contacts', EmergencyContact),
('household_members', HouseholdMember),
('employers', Employer),
('document_images', DocumentImage)
]:
if form[field_name]:
# If the last row in a many-to-one section doesn't have any data, don't save it
remove_blank_rows_helper(form[field_name])
# Add a new child object for each new item in a many-to-one section
new_row_count = len(form[field_name].entries) - getattr(patient, field_name).count()
if new_row_count > 0:
for p in range(new_row_count):
getattr(patient, field_name).append(class_name())
# When a user clicks the delete icon on a many-to-one row, it clears
# all the data in that row. If any existing rows have no data, delete
# them from patient object and then from the form.
for row in form[field_name]:
if not bool([val for key, val in row.data.iteritems() if (
val != ''
and val is not None
and key != 'id'
and not (key in ['state', 'employee'])
)]):
row_index = int(row.name[-1])
# Delete from patient object
db.session.delete(getattr(patient, field_name)[row_index])
# Deletion from form FieldList requires popping all entries
# after the one to be removed, then readding them
to_re_add = []
for _ in range(len(form[field_name].entries) - row_index):
to_re_add.append(form[field_name].pop_entry())
to_re_add.pop()
for row in reversed(to_re_add):
form[field_name].append_entry(data=row.data)
# Get binary data and create resized versions of any new document images
for index, entry in enumerate(form.document_images):
if entry.file_name.data and entry.file_name.data.filename:
# This is a new file
if entry.file_name.data.content_type == 'application/pdf':
# PIL can't handle PDFs, so use Wand
pdf = WandImage(file=entry.file_name.data.stream, resolution=500)
pdf.convert('jpg')
entry.file_name.data.stream = io.BytesIO(pdf.make_blob('jpeg'))
large_image = Image.open(entry.file_name.data.stream)
small_image = large_image.copy()
large_image_output, small_image_output = io.BytesIO(), io.BytesIO()
large_image.thumbnail(
current_app.config['LARGE_DOCUMENT_IMAGE_SIZE'],
Image.ANTIALIAS
)
large_image.save(large_image_output, format='JPEG')
small_image.thumbnail(
current_app.config['SMALL_DOCUMENT_IMAGE_SIZE'],
Image.ANTIALIAS
)
small_image.save(small_image_output, format='JPEG')
entry.data_full.data = entry.file_name.data.stream.getvalue()
entry.data_large.data = large_image_output.getvalue()
entry.data_small.data = small_image_output.getvalue()
entry.file_name.data = entry.file_name.data.filename
else:
# This is an existing entry, so the file can't change, only the description
# Fill in the fields that aren't inputs from the saved data so
# that populate_obj doesn't overwrite them.
entry.file_name.data = patient.document_images[index].file_name
entry.data_full.data = patient.document_images[index].data_full
entry.data_large.data = patient.document_images[index].data_large
entry.data_small.data = patient.document_images[index].data_small
# Populate the patient object with all the updated info
form.populate_obj(patient)
return
@screener.route('/delete/<id>', methods=['POST', 'GET'])
@login_required
def delete(id):
"""Soft delete a patient."""
check_patient_permission(id)
patient = Patient.query.get(id)
patient.deleted = datetime.datetime.now()
patient.deleted_by = current_user
db.session.commit()
return redirect(url_for('screener.index'))
@screener.route('/document_image/<image_id>')
@login_required
def document_image(image_id):
"""Display an uploaded document image."""
_image = DocumentImage.query.get_or_404(image_id)
check_patient_permission(_image.patient.id)
return render_template('documentimage.html', image_id=image_id)
@screener.route('/documentimages/<image_id>/<thumbnail>')
@login_required
def get_image(image_id, thumbnail):
"""Serve a document image file."""
image = DocumentImage.query.get_or_404(image_id)
if thumbnail == 'True':
return current_app.response_class(image.data_small, mimetype='application/octet-stream')
return current_app.response_class(image.data_large, mimetype='application/octet-stream')
@screener.route('/new_prescreening', methods=['POST', 'GET'])
@login_required
def new_prescreening():
"""Display the page that allows the user to select which services
to pre-screen for.
"""
if request.method == 'POST':
session['service_ids'] = request.form.getlist('services')
return redirect(url_for('screener.prescreening_basic'))
services = Service.query.all()
return render_template(
'new_prescreening.html',
services=[s for s in services if s.has_screening_yn == 'Y']
)
@screener.route('/prescreening_basic', methods=['POST', 'GET'])
@login_required
def prescreening_basic():
"""Page for inputting basic prescreening requirements--
household size, income, insurance status.
"""
form = PrescreenForm()
if form.validate_on_submit():
session['household_size'] = form.household_size.data
session['household_income'] = form.household_income.data
session['has_health_insurance'] = form.has_health_insurance.data
session['is_eligible_for_medicaid'] = form.eligible_for_medicaid.data
return redirect(url_for('screener.prescreening_results'))
else:
return render_template('prescreening_basic.html', form=form)
@screener.route('/prescreening_results')
@login_required
def prescreening_results():
"""Page with patient's prescreening results: which services she qualifies for and why,
which sliding scale she'll fall into, and sample prices.
"""
fpl = calculate_fpl(session['household_size'], int(session['household_income']) * 12)
return render_template(
'prescreening_results.html',
services=calculate_pre_screen_results(
fpl=fpl,
has_health_insurance=session['has_health_insurance'],
is_eligible_for_medicaid=session['is_eligible_for_medicaid'],
service_ids=session['service_ids']
),
household_size=session['household_size'],
household_income=int(session['household_income']) * 12,
fpl=fpl,
has_health_insurance=session['has_health_insurance'],
is_eligible_for_medicaid=session['is_eligible_for_medicaid']
)
@screener.route('/patient_history/<patient_id>')
@login_required
def patient_history(patient_id):
"""Display all past edits to the patient's information"""
check_patient_permission(patient_id)
patient = Patient.query.get(patient_id)
patient.update_stats()
history = ActionLog.query.\
filter(or_(
and_(
ActionLog.row_id == patient_id,
ActionLog.table_name == 'patient'
),
and_(
ActionLog.row_id.in_([p.id for p in patient.phone_numbers]),
ActionLog.table_name == 'phone_number'
),
and_(
ActionLog.row_id.in_([p.id for p in patient.addresses]),
ActionLog.table_name == 'address'
),
and_(
ActionLog.row_id.in_([p.id for p in patient.emergency_contacts]),
ActionLog.table_name == 'emergency_contact'
),
and_(
ActionLog.row_id.in_([p.id for p in patient.employers]),
ActionLog.table_name == 'employer'
),
and_(
ActionLog.row_id.in_([p.id for p in patient.document_images]),
ActionLog.table_name == 'document_image'
),
and_(
ActionLog.row_id.in_([p.id for p in patient.income_sources]),
ActionLog.table_name == 'income_source'
),
and_(
ActionLog.row_id.in_([p.id for p in patient.household_members]),
ActionLog.table_name == 'household_member'
)
)).\
order_by(ActionLog.action_timestamp.desc())
# Filter out history entries that only contain changes to fields we don't want to
# show in edit history
history = [i for i in history if not (
i.changed_fields
and set(i.changed_fields).issubset([
'last_modified',
'last_modified_by_id',
'id',
'created',
'created_by_id',
'patient_id',
'data_full',
'data_large',
'data_small'
])
)]
services = dict((x.id, x) for x in Service.query.all())
readable_names = dict(
(column.name, column.info) for column in (
chain(
Patient.__table__.columns,
PhoneNumber.__table__.columns,
Address.__table__.columns,
EmergencyContact.__table__.columns,
HouseholdMember.__table__.columns,
IncomeSource.__table__.columns,
Employer.__table__.columns,
DocumentImage.__table__.columns
)
)
)
return render_template(
'patient_history.html',
patient=patient,
history=history,
services=services,
readable_names=readable_names
)
@screener.route('/patient_share/<patient_id>')
@login_required
def patient_share(patient_id):
"""Displays the 'Share Patient' page, which includes prescreening
results and allows users to send referrals.
"""
check_patient_permission(patient_id)
patient = Patient.query.get(patient_id)
patient.update_stats()
services = Service.query.all()
if not current_user.is_patient_user() and current_user.service:
allowed_referral_service_ids = [
service.id for service in current_user.service.can_send_referrals_to
]
else:
allowed_referral_service_ids = []
# Get ids of services where the patient already has open referrals,
# to prevent user from sending duplicates.
open_referral_service_ids = [
r.to_service_id for r in patient.referrals
if (r.in_sent_status() or r.in_sent_status())
]
return render_template(
'patient_share.html',
patient=patient,
current_user=current_user,
servicesAll=Service.query.all(),
services=calculate_pre_screen_results(
fpl=patient.fpl_percentage,
has_health_insurance=patient.insurance_status,
is_eligible_for_medicaid="",
service_ids=[s.id for s in services if s.has_screening_yn == 'Y']
),
household_size=patient.household_members.count() + 1,
household_income=patient.total_annual_income,
fpl=patient.fpl_percentage,
has_health_insurance=patient.insurance_status,
is_eligible_for_medicaid="",
referral_buttons=True,
allowed_referral_service_ids=allowed_referral_service_ids,
open_referral_service_ids=open_referral_service_ids
)
@screener.route('/add_referral', methods=["POST"])
@login_required
def add_referral():
"""Adds new referral to the database. Called when user clicks
'Send Referral' button."""
check_patient_permission(request.form['patient_id'])
referral = PatientReferral(
patient_id=request.form['patient_id'],
from_app_user_id=request.form['app_user_id'],
to_service_id=request.form['service_id'],
notes=request.form['notes'],
status='SENT'
)
db.session.add(referral)
db.session.commit()
service = Service.query.get(request.form['service_id'])
patient = Patient.query.get(request.form['patient_id'])
send_new_referral_email(
service=service,
patient=patient,
from_app_user=current_user
)
return jsonify()
@screener.route('/patient_screening_history/<patient_id>', methods=['POST', 'GET'])
@login_required
def patient_screening_history(patient_id):
"""Display a patient's past referrals and screening results, and a form
to enter new screening results.
"""
check_patient_permission(patient_id)
patient = Patient.query.get(patient_id)
patient.update_stats()
form = ReferralCommentForm()
if form.validate_on_submit():
referral = PatientReferral.query.get(form.referral_id.data)
referral_comment = PatientReferralComment()
referral_comment.patient_referral_id = form.referral_id.data
referral_comment.notes = form.notes.data
db.session.add(referral_comment)
db.session.commit()
send_referral_comment_email(
service=referral.to_service,
patient=patient,
referral=referral,
commented_user=current_user
)
return render_template('patient_screening_history.html', patient=patient, form=form)
@screener.route('/index', methods=['POST', 'GET'])
@login_required
@roles_accepted('Staff', 'Admin', 'Superuser')
def index():
"""Display the initial landing page, which lists patients in the
network and allows users to search and filter them.
"""
form = SearchPatientForm()
if request.method == 'POST':
session['first_name'] = form.search_patient_first_name.data
session['last_name'] = form.search_patient_last_name.data
# session['dob'] = form.search_patient_dob.data
session['ssn'] = form.search_patient_ssn.data
return redirect(url_for('screener.new_patient'))
all_patients = Patient.query.all()
# ORGANIZATION-BASED QUERIES
org_users = [user.id for user in AppUser.query.filter(
AppUser.service_id == current_user.service_id
)]
# Get patients that this organization referred out who have open referrals or
# referrals closed in the last month
org_referrals_outgoing = db.session.query(
Patient.id,
Patient.first_name,
Patient.last_name,
Patient.dob,
func.max(func.coalesce(PatientReferral.last_modified, PatientReferral.created)).label(
"referral_last_modified"
)
).join(Patient.referrals).filter(
and_(
or_(
and_(
and_(
PatientReferral.from_app_user_id.in_(org_users),
PatientReferral.status == 'COMPLETED'
),
func.coalesce(
PatientReferral.last_modified, PatientReferral.created
) > datetime.date.today() - relativedelta(months=1)
),
and_(
PatientReferral.from_app_user_id.in_(org_users),
PatientReferral.status == 'SENT'
)
),
Patient.deleted == None
)
).group_by(
Patient.id, Patient.first_name, Patient.last_name
).order_by(
desc(func.max(func.coalesce(PatientReferral.last_modified, PatientReferral.created)))
)
# Get patients with open referrals or referrals closed in the last month at
# this user's organization
org_referrals_incoming = db.session.query(
Patient.id,
Patient.first_name,
Patient.last_name,
Patient.dob,
func.max(func.coalesce(PatientReferral.last_modified, PatientReferral.created)).label(
"referral_last_modified"
)
).join(Patient.referrals).filter(
and_(
or_(
and_(
PatientReferral.to_service_id == current_user.service_id,
PatientReferral.status == 'SENT'
),
and_(
and_(
PatientReferral.to_service_id == current_user.service_id,
PatientReferral.status == 'COMPLETED'
),
func.coalesce(
PatientReferral.last_modified, PatientReferral.created
) > datetime.date.today() - relativedelta(months=1)
)
),
Patient.deleted == None
)
).group_by(
Patient.id, Patient.first_name, Patient.last_name
).order_by(
desc(func.max(func.coalesce(PatientReferral.last_modified, PatientReferral.created)))
)
# Get patients who were most recently screened and found eligible for this organization
# more than 11 months ago
# No idea how to do this in SQLAlchemy
query = text(
"select * from ( "
"select "
"patient.id, "
"patient.first_name, "
"patient.middle_name, "
"patient.last_name, "
"patient.dob, "
"max(patient_screening_result.created) as most_recent_result "
"from "
"patient, "
"patient_screening_result "
"where "
"patient.id = patient_screening_result.patient_id "
"and patient_screening_result.eligible_yn = 'Y' "
"and patient_screening_result.service_id = :service_id "
"and patient.deleted is null "
"group by "
"patient.id, "
"patient.first_name, "
"patient.middle_name, "
"patient.last_name, "
"patient.dob "
") subquery where most_recent_result < :eleven_months_ago "
"order by subquery.most_recent_result "
)
conn = db.get_engine(current_app).connect()
org_need_renewal = conn.execute(
query,
service_id=current_user.service_id,
eleven_months_ago=datetime.date.today() - relativedelta(months=11)
).fetchall()
# USER-BASED QUERIES
# Get patients this user created or updated in the last week
your_recently_updated = Patient.query.filter(or_(
and_(
Patient.last_modified > datetime.date.today() - datetime.timedelta(days=7),
Patient.last_modified_by_id == current_user.id,
Patient.deleted == None
),
and_(
Patient.created > datetime.date.today() - datetime.timedelta(days=7),
Patient.created_by_id == current_user.id,
Patient.deleted == None
)
)).order_by(desc(func.coalesce(Patient.last_modified, Patient.created)))
# Get patients this user referred out who have open referrals or referrals closed in
# the last month
your_referrals_outgoing = db.session.query(
Patient.id,
Patient.first_name,
Patient.last_name,
Patient.dob,
func.max(func.coalesce(PatientReferral.last_modified, PatientReferral.created)).label(
"referral_last_modified"
)
).join(Patient.referrals).filter(
and_(
or_(
and_(
and_(
PatientReferral.from_app_user_id == current_user.id,
PatientReferral.status == 'COMPLETED'
),
func.coalesce(
PatientReferral.last_modified, PatientReferral.created
) > datetime.date.today() - relativedelta(months=1)
),
and_(
PatientReferral.from_app_user_id == current_user.id,
PatientReferral.status == 'SENT'
)
),
Patient.deleted == None
)
).group_by(
Patient.id, Patient.first_name, Patient.last_name
).order_by(
desc(func.max(func.coalesce(PatientReferral.last_modified, PatientReferral.created)))
)
patient_ids = []
for patient_list in [
org_referrals_outgoing,
org_referrals_incoming,
org_need_renewal,
your_recently_updated,
your_referrals_outgoing
]:
patient_ids += [patient.id for patient in patient_list]
patients = Patient.query.filter(Patient.id.in_(patient_ids))
patient_dict = {patient.id: patient for patient in patients}
return render_template(
'index.html',
user=current_user,
all_patients=all_patients,
patient_dict=patient_dict,
org_referrals_outgoing=org_referrals_outgoing,
org_referrals_incoming=org_referrals_incoming,
org_need_renewal=org_need_renewal,
your_recently_updated=your_recently_updated,
your_referrals_outgoing=your_referrals_outgoing,
one_month_ago=datetime.datetime.today() - relativedelta(months=1),
form=form
)
@screener.route('/user/<user_id>')
@login_required
def user(user_id):
"""Display the profile page for a single user."""
user = AppUser.query.get(user_id)
return render_template('user_profile.html', user=user)
@screener.route('/service/<service_id>')
@login_required
def service(service_id):
"""Display the profile page for a service organization."""
service = translate_object(
Service.query.get(service_id),
current_app.config['BABEL_DEFAULT_LOCALE']
)
return render_template('service_profile.html', service=service)
@screener.route('/export_csv/<patient_id>')
@login_required
def export_csv(patient_id):
"""Create a CSV of a patient's information."""
check_patient_permission(patient_id)
patient = Patient.query.get(patient_id)
form = PatientForm(obj=patient)
fieldnames = []
data = {}
for field in form:
if field.name not in ('document_images', 'csrf_token'):
if field.type == 'FieldList':
for entry in field.entries:
for subfield in entry:
fieldnames.append(subfield.name)
data[subfield.name] = subfield.data
else:
fieldnames.append(field.name)
data[field.name] = field.data
filename = 'zipscreen_patient_' + str(patient_id) + '.csv'
csvf = StringIO()
writer = csv.DictWriter(csvf, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(data)
csvf.seek(0)
return send_file(
csvf,
mimetype="text/csv",
attachment_filename=filename,
as_attachment=True
)
@screener.route('/export_pdf/<patient_id>')
@login_required
def export_pdf(patient_id):
"""Create a PDF of a patient's information."""
check_patient_permission(patient_id)
patient = Patient.query.get(patient_id)
form = PatientForm(obj=patient)
filename = 'zipscreen_patient_' + str(patient_id) + '.pdf'
html = render_template(
'pdf_export.html',
patient=patient,
form=form,
is_production=current_app.config['IS_PRODUCTION']
)
pdf = StringIO()
pisa.CreatePDF(StringIO(html.encode('utf-8')), pdf)
pdf.seek(0)
return send_file(
pdf,
mimetype="application/pdf",
attachment_filename=filename,
as_attachment=True
)
@screener.route('/403')
def throw_403():
abort(403)
#########################################
# Development-only routes
#########################################
@screener.route('/template_prototyping/')
def template_prototyping():
"""This is a dev-only route for prototyping fragments of other templates without touching
them. The url should not be linked anywhere, and ideally it should be not be
accessible in the deployed version."""
return render_template('template_prototyping.html')
@screener.route('/mockup')
@login_required
def mockup():
return render_template('MOCKUPS.html')
@screener.route('/style-guide')
@login_required
def style_guide():
return render_template('style-guide.html')
|
11468702
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def GetPlacementType(item):
if hasattr(item, "FamilyPlacementType"): return item.FamilyPlacementType
else: return None
items = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetPlacementType(x) for x in items]
else: OUT = GetPlacementType(items)
|
11468715
|
from __future__ import absolute_import, division, print_function
import boost_adaptbx.boost.python as bp
ext = bp.import_ext("mmtbx_validation_ramachandran_ext")
from mmtbx_validation_ramachandran_ext import rama_eval
# maps programatic name to file name
aminoAcids = {
'general' : 'general',
'glycine' : 'gly-sym',
'proline' : 'pro',
'prepro' : 'prepro',
}
aminoAcids_8000 = {
'general' : 'general-noGPIVpreP',
'glycine' : 'gly-sym',
'cis-proline' : 'cispro',
'trans-proline' : 'transpro',
'pre-proline' : 'prepro-noGP',
'isoleucine or valine' : 'ileval-nopreP',
}
#
# Why constants from ramalyze, i.e. res_types are not used here at all?
# They should be defined either here (preferably) and used everywhere or there.
#
class RamachandranEval:
def __init__(self):
self.rama_eval = rama_eval()
def check_table_name(self, name):
# This function take time to run. We have similar check in C++ and raising
# Runtime error there if the name is not good.
return name in aminoAcids_8000
def evaluate(self, aaName, phiPsi):
# assert self.check_table_name(aaName)
return self.rama_eval.get_score(aaName, phiPsi[0],phiPsi[1])
def evaluate_sites(self, aaName, phi_psi_i_seqs, sites_cart):
# assert self.check_table_name(aaName)
(phi, psi) = mmtbx.rotamer.phi_psi_from_sites(
i_seqs=phi_psi_i_seqs,
sites_cart=sites_cart)
return self.evaluate(aaName, (phi,psi))
|
11468743
|
import numpy as np
from collections import OrderedDict
from typing import Tuple
from classifiers import TextCNNClassifier
class SentimentAnalyzer(object):
""" Анализатор тональности текстового контента.
"""
def __init__(self, cls: TextCNNClassifier):
self.cls = cls
def analyze(self, web_content: OrderedDict) -> Tuple[int, int, int]:
""" Проанализировать тональность абзацев заданного веб-контента. Сам веб-контент представляет собой словарь,
ключами которого являются строковые описания ранее пропарсенных URL-ов, а значениями - списки строк, т.е.
текстовый контент каждого URL-а, разбитый на последовательность абзацев. Пример:
{
"www.abc.com": [
"<NAME>",
"<NAME>"
],
"https://hello.org": [
"Здравствуй, мир!",
"И тебе исполать, добрый молодец!",
"Доброго здоровьица, девица краса.",
"Здесь что, все здороваются?"
]
}
Данный метод анализирует тональность каждого абзаца в каждом URL-е и возвращает три числа: число позитивных
высказываний (т.е. число абзацев с положительной тональностью), число негативных высказываний (т.е. число
абзцацев с негативной тональность) и, наконец, общее число всех абзацев.
:param web_content: словарь текстового контента, разбитого на абзацы, для всех обойдённых URL-ов
:return Число позитивных высказываний, число негативных высказываний и общее число высказываний.
"""
if not isinstance(self.cls, TextCNNClassifier):
raise ValueError('`cls` is wrong! Expected a `classifiers.text_cnn.TextCNNClassifier`, '
'but got a `{0}.`'.format(type(self.cls)))
if not isinstance(web_content, OrderedDict):
raise TypeError("web_content must be an OrderedDict,"
" but it is a {type}".format(type = type(web_content)))
output = self.cls.predict(sum([web_content[key] for key in web_content], []))
positives = int(sum(output == 2))
neutrals = int(sum(output == 1))
negatives = int(sum(output == 0))
return (negatives, neutrals, positives)
def __getstate__(self):
return {'cls': self.cls}
def __setstate__(self, state):
self.cls = state['cls']
return self
|
11468818
|
from thenewboston.utils.fields import standard_field_names
from v1.banks.models.bank import Bank
from v1.self_configurations.helpers.self_configuration import get_self_configuration
from v1.validators.models.validator import Validator
def create_bank_from_config_data(*, config_data):
"""Create bank from config data"""
fields = standard_field_names(Bank)
data = {field: config_data[field] for field in fields if field != 'trust'}
Bank.objects.create(**data, trust=0)
def create_validator_from_config_data(*, config_data):
"""Create validator from config data"""
fields = standard_field_names(Validator)
data = {field: config_data[field] for field in fields if field != 'trust'}
Validator.objects.create(**data, trust=0)
def get_primary_validator():
"""Return primary validator"""
# TODO: This should be hitting the cache
self_configuration = get_self_configuration(exception_class=RuntimeError)
primary_validator = self_configuration.primary_validator
if not primary_validator:
raise RuntimeError('No primary validator')
return primary_validator
def update_bank_from_config_data(*, bank, config_data):
"""Update bank from config data"""
fields = standard_field_names(Bank)
data = {field: config_data[field] for field in fields if field != 'trust'}
Bank.objects.filter(pk=bank.pk).update(**data)
def update_validator_from_config_data(*, validator, config_data):
"""Update validator from config data"""
fields = standard_field_names(Validator)
data = {field: config_data[field] for field in fields if field != 'trust'}
Validator.objects.filter(pk=validator.pk).update(**data)
|
11468819
|
r''' Euler angle rotations and their conversions for Tait-Bryan zyx convention
See :mod:`euler` for general discussion of Euler angles and conventions.
This module has specialized implementations of the extrinsic Z axis, Y axis, X
axis rotation convention.
The conventions in this module are therefore:
* axes $i, j, k$ are the $z, y, x$ axes respectively. Thus
an Euler angle vector $[ \alpha, \beta, \gamma ]$ in our convention
implies a $\alpha$ radian rotation around the $z$ axis, followed by a $\beta$
rotation around the $y$ axis, followed by a $\gamma$ rotation around the $x$
axis.
* the rotation matrix applies on the left, to column vectors on the
right, so if ``R`` is the rotation matrix, and ``v`` is a 3 x N matrix with N
column vectors, the transformed vector set ``vdash`` is given by ``vdash =
np.dot(R, v)``.
* extrinsic rotations - the axes are fixed, and do not move with the
rotations.
* a right-handed coordinate system
The convention of rotation around ``z``, followed by rotation around
``y``, followed by rotation around ``x``, is known (confusingly) as
"xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles.
Terms used in function names:
* *mat* : array shape (3, 3) (3D non-homogenous coordinates)
* *euler* : (sequence of) rotation angles about the z, y, x axes (in that
order)
* *axangle* : rotations encoded by axis vector and angle scalar
* *quat* : quaternion shape (4,)
'''
import math
from functools import reduce
import numpy as np
from .axangles import axangle2mat
_FLOAT_EPS_4 = np.finfo(float).eps * 4.0
def euler2mat(z, y, x):
''' Return matrix for rotations around z, y and x axes
Uses the convention of static-frame rotation around the z, then y, then x
axis.
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
M : array shape (3,3)
Rotation matrix giving same rotation as for given angles
Examples
--------
>>> zrot = 1.3 # radians
>>> yrot = -0.1
>>> xrot = 0.2
>>> M = euler2mat(zrot, yrot, xrot)
>>> M.shape == (3, 3)
True
The output rotation matrix is equal to the composition of the
individual rotations
>>> M1 = euler2mat(zrot, 0, 0)
>>> M2 = euler2mat(0, yrot, 0)
>>> M3 = euler2mat(0, 0, xrot)
>>> composed_M = np.dot(M3, np.dot(M2, M1))
>>> np.allclose(M, composed_M)
True
When applying M to a vector, the vector should column vector to the
right of M. If the right hand side is a 2D array rather than a
vector, then each column of the 2D array represents a vector.
>>> vec = np.array([1, 0, 0]).reshape((3,1))
>>> v2 = np.dot(M, vec)
>>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array
>>> vecs2 = np.dot(M, vecs)
Rotations are counter-clockwise.
>>> zred = np.dot(euler2mat(np.pi/2, 0, 0), np.eye(3))
>>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])
True
>>> yred = np.dot(euler2mat(0, np.pi/2, 0), np.eye(3))
>>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])
True
>>> xred = np.dot(euler2mat(0, 0, np.pi/2), np.eye(3))
>>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])
True
Notes
-----
The direction of rotation is given by the right-hand rule. Orient the
thumb of the right hand along the axis around which the rotation occurs,
with the end of the thumb at the positive end of the axis; curl your
fingers; the direction your fingers curl is the direction of rotation.
Therefore, the rotations are counterclockwise if looking along the axis of
rotation from positive to negative.
'''
Ms = []
if z:
cosz = math.cos(z)
sinz = math.sin(z)
Ms.append(np.array(
[[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1]]))
if y:
cosy = math.cos(y)
siny = math.sin(y)
Ms.append(np.array(
[[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy]]))
if x:
cosx = math.cos(x)
sinx = math.sin(x)
Ms.append(np.array(
[[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx]]))
if Ms:
return reduce(np.dot, Ms[::-1])
return np.eye(3)
def mat2euler(M, cy_thresh=None):
''' Discover Euler angle vector from 3x3 matrix
Uses the conventions above.
Parameters
----------
M : array-like, shape (3,3)
cy_thresh : None or scalar, optional
threshold below which to give up on straightforward arctan for
estimating x rotation. If None (default), estimate from
precision of input.
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Notes
-----
If there was no numerical error, the routine could be derived using
Sympy expression for z then y then x rotation matrix, (see
``eulerangles.py`` in ``derivations`` subdirectory)::
[ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],
[cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],
[sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]
This gives the following solutions for ``[z, y, x]``::
z = atan2(-r12, r11)
y = asin(r13)
x = atan2(-r23, r33)
Problems arise when ``cos(y)`` is close to zero, because both of::
z = atan2(cos(y)*sin(z), cos(y)*cos(z))
x = atan2(cos(y)*sin(x), cos(x)*cos(y))
will be close to ``atan2(0, 0)``, and highly unstable.
The ``cy`` fix for numerical instability in this code is from: *Euler Angle
Conversion* by <NAME>, p222-9 ; in: *Graphics Gems IV*, <NAME>
(editor), Academic Press, 1994, ISBN: 0123361559. Specifically it comes
from ``EulerAngles.c`` and deals with the case where cos(y) is close to
zero:
* http://www.graphicsgems.org/
* https://github.com/erich666/GraphicsGems/blob/master/gemsiv/euler_angle/EulerAngles.c#L68
The code appears to be licensed (from the website) as "can be used without
restrictions".
'''
M = np.asarray(M)
if cy_thresh is None:
try:
cy_thresh = np.finfo(M.dtype).eps * 4
except ValueError:
cy_thresh = _FLOAT_EPS_4
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
# (-cos(y)*sin(x))**2 + (cos(x)*cos(y))**2) =
# (cos(y)**2)(sin(x)**2 + cos(x)**2) ==> (Pythagoras)
# cos(y) = sqrt((-cos(y)*sin(x))**2 + (cos(x)*cos(y))**2)
cy = math.sqrt(r23 * r23 + r33 * r33)
if cy > cy_thresh: # cos(y) not close to zero, standard form
z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))
else: # cos(y) (close to) zero, so x -> 0.0 (see above)
# so r21 -> sin(z), r22 -> cos(z) and
z = math.atan2(r21, r22)
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = 0.0
return z, y, x
def euler2quat(z, y, x):
''' Return quaternion corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
quat : array shape (4,)
Quaternion in w, x, y z (real, then vector) format
Notes
-----
Formula from Sympy - see ``eulerangles.py`` in ``derivations``
subdirectory
'''
z = z/2.0
y = y/2.0
x = x/2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
return np.array([
cx*cy*cz - sx*sy*sz,
cx*sy*sz + cy*cz*sx,
cx*cz*sy - sx*cy*sz,
cx*cy*sz + sx*cz*sy])
def quat2euler(q):
''' Return Euler angles corresponding to quaternion `q`
Parameters
----------
q : 4 element sequence
w, x, y, z of quaternion
Returns
-------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``quat2mat`` and ``mat2euler`` functions, but
the reduction in computation is small, and the code repetition is
large.
'''
# delayed import to avoid cyclic dependencies
from . import quaternions as nq
return mat2euler(nq.quat2mat(q))
def euler2axangle(z, y, x):
''' Return angle, axis corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
vector : array shape (3,)
axis around which rotation occurs
theta : scalar
angle of rotation
Examples
--------
>>> vec, theta = euler2axangle(0, 1.5, 0)
>>> np.allclose(vec, [0, 1, 0])
True
>>> theta
1.5
'''
# delayed import to avoid cyclic dependencies
from . import quaternions as nq
return nq.quat2axangle(euler2quat(z, y, x))
def axangle2euler(vector, theta):
''' Convert axis, angle pair to Euler angles
Parameters
----------
vector : 3 element sequence
vector specifying axis for rotation.
theta : scalar
angle of rotation
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Examples
--------
>>> z, y, x = axangle2euler([1, 0, 0], 0)
>>> np.allclose((z, y, x), 0)
True
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``angle_axis2mat`` and ``mat2euler``
functions, but the reduction in computation is small, and the code
repetition is large.
'''
return mat2euler(axangle2mat(vector, theta))
|
11468838
|
import cv2
import numpy as np
import os
import sys
from tqdm import tqdm
import math
import conf
# DEPTH = 4 -> 4 * 4 * 4 = 64 colors
DEPTH = conf.DEPTH
# list of rotations, in degrees, to apply over the original image
ROTATIONS = conf.ROTATIONS
img_path = sys.argv[1]
img_dir = os.path.dirname(img_path)
img_name, ext = os.path.basename(img_path).rsplit('.', 1)
out_folder = img_dir + '/gen_' + img_name
if not os.path.exists(out_folder):
os.mkdir(out_folder)
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
img = img.astype('float')
height, width, channels = img.shape
center = (width/2, height/2)
for b in tqdm(np.arange(0, 1.01, 1 / DEPTH)):
for g in np.arange(0, 1.01, 1 / DEPTH):
for r in np.arange(0, 1.01, 1 / DEPTH):
mult_vector = [b, g, r]
if channels == 4:
mult_vector.append(1)
new_img = img * mult_vector
new_img = new_img.astype('uint8')
for rotation in ROTATIONS:
rotation_matrix = cv2.getRotationMatrix2D(center, rotation, 1)
abs_cos = abs(rotation_matrix[0,0])
abs_sin = abs(rotation_matrix[0,1])
new_w = int(height * abs_sin + width * abs_cos)
new_h = int(height * abs_cos + width * abs_sin)
rotation_matrix[0, 2] += new_w/2 - center[0]
rotation_matrix[1, 2] += new_h/2 - center[1]
cv2.imwrite(
f'{out_folder}/{img_name}_{round(r,1)}_{round(g,1)}_{round(b,1)}_r{rotation}.{ext}',
cv2.warpAffine(new_img, rotation_matrix, (new_w, new_h)),
# compress image
[cv2.IMWRITE_PNG_COMPRESSION, 9])
|
11468842
|
import configparser
import os
import unittest
from TM1py.Objects import User
from TM1py.Services import TM1Service
from TM1py.Utils.Utils import CaseAndSpaceInsensitiveSet
config = configparser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.ini'))
PREFIX = "TM1py_Tests_"
class TestSecurityMethods(unittest.TestCase):
tm1 = None
@classmethod
def setup_class(cls):
cls.tm1 = TM1Service(**config['tm1srv01'])
cls.user_name = PREFIX + "User1"
cls.user = User(name=cls.user_name, groups=[], password='<PASSWORD>')
cls.group_name1 = PREFIX + "Group1"
cls.group_name2 = PREFIX + "Group2"
cls.user.add_group(cls.group_name1)
if cls.user_name in CaseAndSpaceInsensitiveSet(*cls.tm1.security.get_all_user_names()):
cls.tm1.security.delete_user(cls.user_name)
for group in (cls.group_name1, cls.group_name2):
if group in CaseAndSpaceInsensitiveSet(*cls.tm1.security.get_all_groups()):
cls.tm1.security.delete_group(group)
def setUp(self):
self.tm1.security.create_group(self.group_name1)
self.tm1.security.create_group(self.group_name2)
self.tm1.security.create_user(self.user)
def tearDown(self):
self.tm1.security.delete_user(self.user_name)
self.tm1.security.delete_group(self.group_name1)
self.tm1.security.delete_group(self.group_name2)
def test_get_user(self):
u = self.tm1.security.get_user(self.user_name)
# Adjust it a little bit
u.password = '<PASSWORD>'
u.friendly_name = None
self.assertEqual(u.body, self.user.body)
def test_get_current_user(self):
me = self.tm1.security.get_current_user()
self.assertEqual(me.name, config['tm1srv01']['User'])
user = self.tm1.security.get_user(config['tm1srv01']['User'])
self.assertEqual(me, user)
def test_update_user(self):
# get user
u = self.tm1.security.get_user(self.user_name)
# update user. Add Group
u.add_group(self.group_name2)
self.tm1.security.update_user(u)
# test it !
groups = self.tm1.security.get_groups(u.name)
self.assertIn(self.group_name2, groups)
# update user. Remove Group
u.remove_group(self.group_name2)
self.tm1.security.update_user(u)
# test it !
groups = self.tm1.security.get_groups(u.name)
self.assertNotIn(self.group_name2, groups)
def test_get_all_users(self):
all_users = [user.name for user in self.tm1.security.get_all_users()]
all_clients = self.tm1.dimensions.hierarchies.elements.get_element_names('}Clients', '}Clients')
self.assertGreater(len(all_users), 0)
self.assertIn(self.user_name, all_users)
self.assertEqual(sorted(all_users), sorted(all_clients))
def test_get_all_user_names(self):
all_users = self.tm1.security.get_all_user_names()
all_clients = self.tm1.dimensions.hierarchies.elements.get_element_names('}Clients', '}Clients')
self.assertGreater(len(all_users), 0)
self.assertIn(self.user_name, all_users)
self.assertEqual(sorted(all_users), sorted(all_clients))
def test_add_user_to_groups(self):
self.tm1.security.add_user_to_groups(self.user_name, (self.group_name2,))
user = self.tm1.security.get_user(self.user_name)
self.assertIn(self.group_name2, user.groups)
def test_remove_user_from_group(self):
self.tm1.security.remove_user_from_group(self.group_name1, self.user_name)
user = self.tm1.security.get_user(self.user_name)
self.assertNotIn(self.group_name1, user.groups)
def test_get_users_from_group(self):
users = [user.name for user in self.tm1.security.get_users_from_group("AdMiN")]
mdx = "{ FILTER ( { [}Clients].Members } , [}ClientGroups].([}Groups].[ADMIN]) = 'ADMIN' ) }"
clients = self.tm1.dimensions.execute_mdx("}Clients", mdx)
self.assertGreater(len(users), 0)
self.assertGreater(len(clients), 0)
self.assertEqual(sorted(users), sorted(clients))
def test_get_user_names_from_group(self):
users = self.tm1.security.get_user_names_from_group(self.group_name1)
mdx = "{ FILTER ( { [}Clients].Members } , [}ClientGroups].([}Groups].[" + self.group_name1 + "]) = '" + self.group_name1 + "' ) }"
clients = self.tm1.dimensions.execute_mdx("}Clients", mdx)
self.assertGreater(len(users), 0)
self.assertGreater(len(clients), 0)
self.assertEqual(sorted(users), sorted(clients))
def test_get_groups_from_user(self):
groups = self.tm1.security.get_groups(self.user_name)
self.assertIn(self.group_name1, groups)
groups = self.tm1.security.get_groups(" ".join(self.user_name.upper()))
self.assertIn(self.group_name1, groups)
def test_get_groups(self):
groups = self.tm1.security.get_all_groups()
self.assertGreater(len(groups), 0)
self.assertEqual(
sorted(groups),
sorted(self.tm1.dimensions.hierarchies.elements.get_element_names("}Groups", "}Groups"))
)
def test_security_refresh(self):
response = self.tm1.security.security_refresh()
self.assertTrue(response.ok)
def test_create_and_delete_user(self):
u = User(name=PREFIX + "User2", groups=())
all_users = self.tm1.security.get_all_user_names()
if u.name not in CaseAndSpaceInsensitiveSet(*all_users):
self.tm1.security.create_user(u)
users_before_delete = self.tm1.security.get_all_user_names()
response = self.tm1.security.delete_user(u.name)
self.assertTrue(response.ok)
users_after_delete = self.tm1.security.get_all_user_names()
self.assertIn(u.name, CaseAndSpaceInsensitiveSet(*users_before_delete))
self.assertNotIn(u.name, CaseAndSpaceInsensitiveSet(*users_after_delete))
def test_create_and_delete_group(self):
group = PREFIX + "Group3"
groups = self.tm1.security.get_all_groups()
if group not in CaseAndSpaceInsensitiveSet(*groups):
self.tm1.security.create_group(group)
groups_before_delete = self.tm1.security.get_all_groups()
response = self.tm1.security.delete_group(group)
self.assertTrue(response.ok)
groups_after_delete = self.tm1.security.get_all_groups()
self.assertIn(group, groups_before_delete)
self.assertNotIn(group, groups_after_delete)
@classmethod
def teardown_class(cls):
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
|
11468876
|
import os
import pyshark
import argparse
INFILE_PATH = 'miner.pcapng'
OUTFILE_PATH = 'datasets/mining_4t_nicehash.dat'
SAMPLE_DELTA = 0.5
LOCAL_IP = '192.168.1.158'
LOCAL_IPV6 = 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'
REMOTE_PORTS = [3341, 3333, 3334, 3357, 80, 443]
def save_to_file(delta, last_bytes_up, last_bytes_down,
last_npkts_up, last_npkts_down):
global OUTFILE_PATH
with open(OUTFILE_PATH, "a") as f:
f.write("{} {} {} {}\n".format(last_bytes_up, last_bytes_down,
last_npkts_up, last_npkts_down))
diff_intervals = int(delta / SAMPLE_DELTA) - 1
for i in range(diff_intervals):
f.write("0 0 0 0\n")
def process_packets(tcp_cap):
global SAMPLE_DELTA
last_timestamp = None
last_bytes_up = 0
last_bytes_down = 0
last_npkts_up = 0
last_npkts_down = 0
for packet in tcp_cap:
packet_type = -1
if 'ipv6' in [l.layer_name for l in packet.layers]:
ip = LOCAL_IPV6
src = packet.ipv6.src
dst = packet.ipv6.dst
size = int(packet.ipv6.plen)
else:
ip = LOCAL_IP
src = packet.ip.src
dst = packet.ip.dst
size = int(packet.ip.get_field('Len'))
if src == ip and int(packet.tcp.get_field('DstPort')) in REMOTE_PORTS:
packet_type = 0
elif int(packet.tcp.get_field('SrcPort')) in REMOTE_PORTS and dst == ip:
packet_type = 1
if packet_type == 0:
if last_timestamp is None:
last_timestamp = float(packet.sniff_timestamp)
last_bytes_down += size
last_npkts_down += 1
else:
time_delta = float(packet.sniff_timestamp) - last_timestamp
if time_delta > SAMPLE_DELTA:
save_to_file(time_delta, last_bytes_up, last_bytes_down,
last_npkts_up, last_npkts_down)
last_timestamp = float(packet.sniff_timestamp)
last_bytes_down = size
last_npkts_down = 1
else:
last_bytes_down += size
last_npkts_down += 1
elif packet_type == 1:
if last_timestamp is None:
last_timestamp = float(packet.sniff_timestamp)
last_bytes_up += size
last_npkts_up += 1
else:
time_delta = float(packet.sniff_timestamp) - last_timestamp
if time_delta > SAMPLE_DELTA:
save_to_file(time_delta, last_bytes_up, last_bytes_down,
last_npkts_up, last_npkts_down)
last_timestamp = float(packet.sniff_timestamp)
last_bytes_up = size
last_npkts_up = 1
else:
last_bytes_up += size
last_npkts_up += 1
save_to_file(0, last_bytes_up, last_bytes_down, last_npkts_up,
last_npkts_down)
def main():
global INFILE_PATH
global OUTFILE_PATH
global SAMPLE_DELTA
global LOCAL_IP
global LOCAL_IPV6
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', nargs='?',
help='input capture file')
parser.add_argument('-o', '--output', nargs='?',
help='output processed file')
parser.add_argument('-w', '--sampwindow', nargs='?',
help='sampling interval (default 0.5s)')
parser.add_argument('-4', '--ipv4', nargs='?',
help='IPv4 of the host machine')
parser.add_argument('-6', '--ipv6', nargs='?',
help='IPv6 of the host machine')
args = parser.parse_args()
INFILE_PATH = args.input if args.input is not None else INFILE_PATH
OUTFILE_PATH = args.output if args.output is not None else OUTFILE_PATH
SAMPLE_DELTA = args.sampwindow if args.sampwindow is not None else SAMPLE_DELTA
LOCAL_IP = args.ipv4 if args.ipv4 is not None else LOCAL_IP
LOCAL_IPV6 = args.ipv6 if args.ipv6 is not None else LOCAL_IPV6
if os.path.exists(OUTFILE_PATH):
if input('Write over file? [y/N] ') == 'y':
os.remove(OUTFILE_PATH)
else:
exit()
tcp_cap = pyshark.FileCapture(
INFILE_PATH, display_filter='tcp', keep_packets=False)
process_packets(tcp_cap)
if __name__ == '__main__':
main()
|
11468892
|
from dataclasses import dataclass
from typing import Optional
from lightning_transformers.core.nlp import HFTransformerDataConfig
@dataclass
class HFSeq2SeqConfig:
val_target_max_length: Optional[int] = 128
num_beams: Optional[int] = 1
compute_generate_metrics: bool = True
@dataclass
class Seq2SeqDataConfig(HFTransformerDataConfig):
max_target_length: int = 128
max_source_length: int = 1024
padding: str = "longest"
|
11468900
|
import financedatabase as fd
import FundamentalAnalysis as fa
import pandas as pd
import matplotlib.pyplot as plt
all_technology_companies = fd.select_equities(sector='Technology')
silicon_valley = fd.search_products(all_technology_companies, query='San Jose', search='city')
API_KEY = "YOUR_API_KEY_HERE"
data_set = {}
for ticker in silicon_valley:
try:
data_set[ticker] = fa.key_metrics(ticker, API_KEY, period='annual')
except Exception:
continue
years = ['2016', '2017', '2018', '2019', '2020']
market_cap = pd.DataFrame(index=years)
for ticker in data_set:
try:
data_years = []
for year in years:
data_years.append(data_set[ticker].loc['marketCap'][year])
market_cap[all_technology_companies[ticker]['short_name']] = data_years
except Exception:
continue
market_cap_plot = market_cap.plot.bar(stacked=True, rot=0, colormap='Spectral')
market_cap_plot.legend(prop={'size': 5.25}, loc="upper left")
plt.show()
|
11468926
|
import io
import os
import pytest
from django.core.management import call_command
from django.core.management.base import CommandError
files = (
'conditions.xml',
'domain.xml',
'options.xml',
'questions.xml',
'tasks.xml',
'views.xml'
)
@pytest.mark.parametrize('file_name', files)
def test_import(db, settings, file_name):
xml_file = os.path.join(settings.BASE_DIR, 'xml', file_name)
stdout, stderr = io.StringIO(), io.StringIO()
call_command('import', xml_file, stdout=stdout, stderr=stderr)
assert not stdout.getvalue()
assert not stderr.getvalue()
def test_import_error(db, settings):
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'error.xml')
stdout, stderr = io.StringIO(), io.StringIO()
with pytest.raises(CommandError) as e:
call_command('import', xml_file, stdout=stdout, stderr=stderr)
assert str(e.value) == 'The content of the xml file does not consist of well formed data or markup.'
def test_import_error2(db, settings):
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml')
stdout, stderr = io.StringIO(), io.StringIO()
with pytest.raises(CommandError) as e:
call_command('import', xml_file, stdout=stdout, stderr=stderr)
assert str(e.value) == 'This XML does not contain RDMO content.'
|
11468950
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("fluent_contents", "0001_initial")]
operations = [
migrations.CreateModel(
name="RawHtmlItem",
fields=[
(
"contentitem_ptr",
models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to="fluent_contents.ContentItem",
on_delete=models.CASCADE,
),
),
(
"html",
models.TextField(
help_text="Enter the HTML code to display, like the embed code of an online widget.",
verbose_name="HTML code",
),
),
],
options={
"db_table": "contentitem_rawhtml_rawhtmlitem",
"verbose_name": "HTML code",
"verbose_name_plural": "HTML code",
},
bases=("fluent_contents.contentitem",),
)
]
|
11468957
|
from pyspark import SparkContext
import json
import sys
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Error usage: LoadJson [sparkmaster] [inputfile] [outputfile]"
sys.exit(-1)
master = sys.argv[1]
inputFile = sys.argv[2]
outputFile = sys.argv[3]
sc = SparkContext(master, "LoadJson")
input = sc.textFile(inputFile)
data = input.map(lambda x: json.loads(x))
data.filter(lambda x: 'lovesPandas' in x and x['lovesPandas']).map(
lambda x: json.dumps(x)).saveAsTextFile(outputFile)
sc.stop()
print "Done!"
|
11468960
|
from threading import Thread
class ant_colony:
class ant(Thread):
def __init__(self, init_location, possible_locations, pheromone_map, distance_callback, alpha, beta, first_pass=False):
"""
initialized an ant, to traverse the map
init_location -> marks where in the map that the ant starts
possible_locations -> a list of possible nodes the ant can go to
when used internally, gives a list of possible locations the ant can traverse to _minus those nodes already visited_
pheromone_map -> map of pheromone values for each traversal between each node
distance_callback -> is a function to calculate the distance between two nodes
alpha -> a parameter from the ACO algorithm to control the influence of the amount of pheromone when making a choice in _pick_path()
beta -> a parameters from ACO that controls the influence of the distance to the next node in _pick_path()
first_pass -> if this is a first pass on a map, then do some steps differently, noted in methods below
route -> a list that is updated with the labels of the nodes that the ant has traversed
pheromone_trail -> a list of pheromone amounts deposited along the ants trail, maps to each traversal in route
distance_traveled -> total distance tranveled along the steps in route
location -> marks where the ant currently is
tour_complete -> flag to indicate the ant has completed its traversal
used by get_route() and get_distance_traveled()
"""
Thread.__init__(self)
self.init_location = init_location
self.possible_locations = possible_locations
self.route = []
self.distance_traveled = 0.0
self.location = init_location
self.pheromone_map = pheromone_map
self.distance_callback = distance_callback
self.alpha = alpha
self.beta = beta
self.first_pass = first_pass
#append start location to route, before doing random walk
self._update_route(init_location)
self.tour_complete = False
def run(self):
"""
until self.possible_locations is empty (the ant has visited all nodes)
_pick_path() to find a next node to traverse to
_traverse() to:
_update_route() (to show latest traversal)
_update_distance_traveled() (after traversal)
return the ants route and its distance, for use in ant_colony:
do pheromone updates
check for new possible optimal solution with this ants latest tour
"""
while self.possible_locations:
next = self._pick_path()
self._traverse(self.location, next)
self.tour_complete = True
def _pick_path(self):
"""
source: https://en.wikipedia.org/wiki/Ant_colony_optimization_algorithms#Edge_selection
implements the path selection algorithm of ACO
calculate the attractiveness of each possible transition from the current location
then randomly choose a next path, based on its attractiveness
"""
#on the first pass (no pheromones), then we can just choice() to find the next one
if self.first_pass:
import random
return random.choice(self.possible_locations)
attractiveness = dict()
sum_total = 0.0
#for each possible location, find its attractiveness (it's (pheromone amount)*1/distance [tau*eta, from the algortihm])
#sum all attrativeness amounts for calculating probability of each route in the next step
for possible_next_location in self.possible_locations:
#NOTE: do all calculations as float, otherwise we get integer division at times for really hard to track down bugs
pheromone_amount = float(self.pheromone_map[self.location][possible_next_location])
distance = float(self.distance_callback(self.location, possible_next_location))
#tau^alpha * eta^beta
attractiveness[possible_next_location] = pow(pheromone_amount, self.alpha)*pow(1/distance, self.beta)
sum_total += attractiveness[possible_next_location]
#it is possible to have small values for pheromone amount / distance, such that with rounding errors this is equal to zero
#rare, but handle when it happens
if sum_total == 0.0:
#increment all zero's, such that they are the smallest non-zero values supported by the system
#source: http://stackoverflow.com/a/10426033/5343977
def next_up(x):
import math
import struct
# NaNs and positive infinity map to themselves.
if math.isnan(x) or (math.isinf(x) and x > 0):
return x
# 0.0 and -0.0 both map to the smallest +ve float.
if x == 0.0:
x = 0.0
n = struct.unpack('<q', struct.pack('<d', x))[0]
if n >= 0:
n += 1
else:
n -= 1
return struct.unpack('<d', struct.pack('<q', n))[0]
for key in attractiveness:
attractiveness[key] = next_up(attractiveness[key])
sum_total = next_up(sum_total)
#cumulative probability behavior, inspired by: http://stackoverflow.com/a/3679747/5343977
#randomly choose the next path
import random
toss = random.random()
cummulative = 0
for possible_next_location in attractiveness:
weight = (attractiveness[possible_next_location] / sum_total)
if toss <= weight + cummulative:
return possible_next_location
cummulative += weight
def _traverse(self, start, end):
"""
_update_route() to show new traversal
_update_distance_traveled() to record new distance traveled
self.location update to new location
called from run()
"""
self._update_route(end)
self._update_distance_traveled(start, end)
self.location = end
def _update_route(self, new):
"""
add new node to self.route
remove new node form self.possible_location
called from _traverse() & __init__()
"""
self.route.append(new)
self.possible_locations.remove(new)
def _update_distance_traveled(self, start, end):
"""
use self.distance_callback to update self.distance_traveled
"""
self.distance_traveled += float(self.distance_callback(start, end))
def get_route(self):
if self.tour_complete:
return self.route
return None
def get_distance_traveled(self):
if self.tour_complete:
return self.distance_traveled
return None
def __init__(self, nodes, distance_callback, start=None, ant_count=50, alpha=.5, beta=1.2, pheromone_evaporation_coefficient=.40, pheromone_constant=1000.0, iterations=80):
"""
initializes an ant colony (houses a number of worker ants that will traverse a map to find an optimal route as per ACO [Ant Colony Optimization])
source: https://en.wikipedia.org/wiki/Ant_colony_optimization_algorithms
nodes -> is assumed to be a dict() mapping node ids to values
that are understandable by distance_callback
distance_callback -> is assumed to take a pair of coordinates and return the distance between them
populated into distance_matrix on each call to get_distance()
start -> if set, then is assumed to be the node where all ants start their traversal
if unset, then assumed to be the first key of nodes when sorted()
distance_matrix -> holds values of distances calculated between nodes
populated on demand by _get_distance()
pheromone_map -> holds final values of pheromones
used by ants to determine traversals
pheromone dissipation happens to these values first, before adding pheromone values from the ants during their traversal
(in ant_updated_pheromone_map)
ant_updated_pheromone_map -> a matrix to hold the pheromone values that the ants lay down
not used to dissipate, values from here are added to pheromone_map after dissipation step
(reset for each traversal)
alpha -> a parameter from the ACO algorithm to control the influence of the amount of pheromone when an ant makes a choice
beta -> a parameters from ACO that controls the influence of the distance to the next node in ant choice making
pheromone_constant -> a parameter used in depositing pheromones on the map (Q in ACO algorithm)
used by _update_pheromone_map()
pheromone_evaporation_coefficient -> a parameter used in removing pheromone values from the pheromone_map (rho in ACO algorithm)
used by _update_pheromone_map()
ants -> holds worker ants
they traverse the map as per ACO
notable properties:
total distance traveled
route
first_pass -> flags a first pass for the ants, which triggers unique behavior
iterations -> how many iterations to let the ants traverse the map
shortest_distance -> the shortest distance seen from an ant traversal
shortets_path_seen -> the shortest path seen from a traversal (shortest_distance is the distance along this path)
"""
#nodes
if type(nodes) is not dict:
raise TypeError("nodes must be dict")
if len(nodes) < 1:
raise ValueError("there must be at least one node in dict nodes")
#create internal mapping and mapping for return to caller
self.id_to_key, self.nodes = self._init_nodes(nodes)
#create matrix to hold distance calculations between nodes
self.distance_matrix = self._init_matrix(len(nodes))
#create matrix for master pheromone map, that records pheromone amounts along routes
self.pheromone_map = self._init_matrix(len(nodes))
#create a matrix for ants to add their pheromones to, before adding those to pheromone_map during the update_pheromone_map step
self.ant_updated_pheromone_map = self._init_matrix(len(nodes))
#distance_callback
if not callable(distance_callback):
raise TypeError("distance_callback is not callable, should be method")
self.distance_callback = distance_callback
#start
if start is None:
self.start = 0
else:
self.start = None
#init start to internal id of node id passed
for key, value in self.id_to_key.items():
if value == start:
self.start = key
#if we didn't find a key in the nodes passed in, then raise
if self.start is None:
raise KeyError("Key: " + str(start) + " not found in the nodes dict passed.")
#ant_count
if type(ant_count) is not int:
raise TypeError("ant_count must be int")
if ant_count < 1:
raise ValueError("ant_count must be >= 1")
self.ant_count = ant_count
#alpha
if (type(alpha) is not int) and type(alpha) is not float:
raise TypeError("alpha must be int or float")
if alpha < 0:
raise ValueError("alpha must be >= 0")
self.alpha = float(alpha)
#beta
if (type(beta) is not int) and type(beta) is not float:
raise TypeError("beta must be int or float")
if beta < 1:
raise ValueError("beta must be >= 1")
self.beta = float(beta)
#pheromone_evaporation_coefficient
if (type(pheromone_evaporation_coefficient) is not int) and type(pheromone_evaporation_coefficient) is not float:
raise TypeError("pheromone_evaporation_coefficient must be int or float")
self.pheromone_evaporation_coefficient = float(pheromone_evaporation_coefficient)
#pheromone_constant
if (type(pheromone_constant) is not int) and type(pheromone_constant) is not float:
raise TypeError("pheromone_constant must be int or float")
self.pheromone_constant = float(pheromone_constant)
#iterations
if (type(iterations) is not int):
raise TypeError("iterations must be int")
if iterations < 0:
raise ValueError("iterations must be >= 0")
self.iterations = iterations
#other internal variable init
self.first_pass = True
self.ants = self._init_ants(self.start)
self.shortest_distance = None
self.shortest_path_seen = None
def _get_distance(self, start, end):
"""
uses the distance_callback to return the distance between nodes
if a distance has not been calculated before, then it is populated in distance_matrix and returned
if a distance has been called before, then its value is returned from distance_matrix
"""
if not self.distance_matrix[start][end]:
distance = self.distance_callback(self.nodes[start], self.nodes[end])
if (type(distance) is not int) and (type(distance) is not float):
raise TypeError("distance_callback should return either int or float, saw: "+ str(type(distance)))
self.distance_matrix[start][end] = float(distance)
return distance
return self.distance_matrix[start][end]
def _init_nodes(self, nodes):
"""
create a mapping of internal id numbers (0 .. n) to the keys in the nodes passed
create a mapping of the id's to the values of nodes
we use id_to_key to return the route in the node names the caller expects in mainloop()
"""
id_to_key = dict()
id_to_values = dict()
id = 0
for key in sorted(nodes.keys()):
id_to_key[id] = key
id_to_values[id] = nodes[key]
id += 1
return id_to_key, id_to_values
def _init_matrix(self, size, value=0.0):
"""
setup a matrix NxN (where n = size)
used in both self.distance_matrix and self.pheromone_map
as they require identical matrixes besides which value to initialize to
"""
ret = []
for row in range(size):
ret.append([float(value) for x in range(size)])
return ret
def _init_ants(self, start):
"""
on first pass:
create a number of ant objects
on subsequent passes, just call __init__ on each to reset them
by default, all ants start at the first node, 0
as per problem description: https://www.codeeval.com/open_challenges/90/
"""
#allocate new ants on the first pass
if self.first_pass:
return [self.ant(start, self.nodes.keys(), self.pheromone_map, self._get_distance,
self.alpha, self.beta, first_pass=True) for x in range(self.ant_count)]
#else, just reset them to use on another pass
for ant in self.ants:
ant.__init__(start, self.nodes.keys(), self.pheromone_map, self._get_distance, self.alpha, self.beta)
def _update_pheromone_map(self):
"""
1) Update self.pheromone_map by decaying values contained therein via the ACO algorithm
2) Add pheromone_values from all ants from ant_updated_pheromone_map
called by:
mainloop()
(after all ants have traveresed)
"""
#always a square matrix
for start in range(len(self.pheromone_map)):
for end in range(len(self.pheromone_map)):
#decay the pheromone value at this location
#tau_xy <- (1-rho)*tau_xy (ACO)
self.pheromone_map[start][end] = (1-self.pheromone_evaporation_coefficient)*self.pheromone_map[start][end]
#then add all contributions to this location for each ant that travered it
#(ACO)
#tau_xy <- tau_xy + delta tau_xy_k
# delta tau_xy_k = Q / L_k
self.pheromone_map[start][end] += self.ant_updated_pheromone_map[start][end]
def _populate_ant_updated_pheromone_map(self, ant):
"""
given an ant, populate ant_updated_pheromone_map with pheromone values according to ACO
along the ant's route
called from:
mainloop()
( before _update_pheromone_map() )
"""
route = ant.get_route()
for i in range(len(route)-1):
#find the pheromone over the route the ant traversed
current_pheromone_value = float(self.ant_updated_pheromone_map[route[i]][route[i+1]])
#update the pheromone along that section of the route
#(ACO)
# delta tau_xy_k = Q / L_k
new_pheromone_value = self.pheromone_constant/ant.get_distance_traveled()
self.ant_updated_pheromone_map[route[i]][route[i+1]] = current_pheromone_value + new_pheromone_value
self.ant_updated_pheromone_map[route[i+1]][route[i]] = current_pheromone_value + new_pheromone_value
def mainloop(self):
"""
Runs the worker ants, collects their returns and updates the pheromone map with pheromone values from workers
calls:
_update_pheromones()
ant.run()
runs the simulation self.iterations times
"""
for _ in range(self.iterations):
#start the multi-threaded ants, calls ant.run() in a new thread
for ant in self.ants:
ant.start()
#source: http://stackoverflow.com/a/11968818/5343977
#wait until the ants are finished, before moving on to modifying shared resources
for ant in self.ants:
ant.join()
for ant in self.ants:
#update ant_updated_pheromone_map with this ant's constribution of pheromones along its route
self._populate_ant_updated_pheromone_map(ant)
#if we haven't seen any paths yet, then populate for comparisons later
if not self.shortest_distance:
self.shortest_distance = ant.get_distance_traveled()
if not self.shortest_path_seen:
self.shortest_path_seen = ant.get_route()
#if we see a shorter path, then save for return
if ant.get_distance_traveled() < self.shortest_distance:
self.shortest_distance = ant.get_distance_traveled()
self.shortest_path_seen = ant.get_route()
#decay current pheromone values and add all pheromone values we saw during traversal (from ant_updated_pheromone_map)
self._update_pheromone_map()
#flag that we finished the first pass of the ants traversal
if self.first_pass:
self.first_pass = False
#reset all ants to default for the next iteration
self._init_ants(self.start)
#reset ant_updated_pheromone_map to record pheromones for ants on next pass
self.ant_updated_pheromone_map = self._init_matrix(len(self.nodes), value=0)
#translate shortest path back into callers node id's
ret = []
for id in self.shortest_path_seen:
ret.append(self.id_to_key[id])
return ret
|
11468979
|
from homie.device_base import Device_Base
from homie.node.node_base import Node_Base
class Device_Status(Device_Base):
def __init__(
self, device_id=None, name=None, homie_settings=None, mqtt_settings=None
):
super().__init__(device_id, name, homie_settings, mqtt_settings)
node = Node_Base(self, "status", "Status", "status")
self.add_node(node)
self.register_status_properties(node)
self.start()
def register_status_properties(self, node):
raise RuntimeError("Override in subclass")
|
11469000
|
from django.apps import AppConfig
class CountConfig(AppConfig):
name = 'apps.count'
verbose_name = '统计'
|
11469002
|
import os
import dj_email_url
from django.conf import settings
from django.db import migrations
from django.utils.module_loading import import_string
def populate_email_config_in_user_email_plugin(apps, schema):
user_email_path = "saleor.plugins.user_email.plugin.UserEmailPlugin"
if user_email_path not in settings.PLUGINS:
return
# Allow to provide different email url from env
email_url = os.environ.get("USER_EMAIL_URL", getattr(settings, "EMAIL_URL", None))
if not email_url:
return
email_config = dj_email_url.parse(email_url)
# Assume that EMAIL_URL has been split to partial env values
email_config = {
"host": email_config["EMAIL_HOST"],
"port": email_config["EMAIL_PORT"],
"username": email_config["EMAIL_HOST_USER"],
"password": email_config["EMAIL_HOST_PASSWORD"],
"sender_address": getattr(settings, "DEFAULT_FROM_EMAIL"),
"use_tls": email_config["EMAIL_USE_TLS"],
"use_ssl": email_config["EMAIL_USE_SSL"],
}
if not all(
[email_config["host"], email_config["port"], email_config["sender_address"]]
):
return
UserEmail = import_string(user_email_path)
configuration = UserEmail.DEFAULT_CONFIGURATION
for configuration_field in configuration:
config_name = configuration_field["name"]
if config_name in email_config:
configuration_field["value"] = email_config[config_name]
PluginConfiguration = apps.get_model("plugins", "PluginConfiguration")
plugin_configuration, _ = PluginConfiguration.objects.get_or_create(
identifier=UserEmail.PLUGIN_ID,
defaults={"active": True, "configuration": configuration},
)
class Migration(migrations.Migration):
dependencies = [
("plugins", "0006_auto_20200909_1253"),
]
operations = [
migrations.RunPython(populate_email_config_in_user_email_plugin),
]
|
11469058
|
import pandas as pd, numpy as np
import pickle, time
from sklearn.model_selection import StratifiedKFold, KFold, train_test_split
from hyperopt import fmin, tpe, Trials, STATUS_OK, STATUS_FAIL
from datetime import datetime
from cat_counter import CatCounter
#from pandas.io.common import EmptyDataError
import os
class Experiment(object):
def __init__(self, learning_task='classification', bst_name=None, n_estimators=5000, hyperopt_evals=50,
compute_counters=True, counters_sort_col=None, holdout_size=0,
train_path=None, test_path=None, cd_path=None, output_folder_path='./'):
self.learning_task, self.bst_name = learning_task, bst_name
self.compute_counters = compute_counters
self.holdout_size = holdout_size
self.counters_sort_col = counters_sort_col
self.n_estimators, self.best_loss = n_estimators, np.inf
self.best_n_estimators = None
self.hyperopt_evals, self.hyperopt_eval_num = hyperopt_evals, 0
self.train_path, self.test_path, self.cd_path = train_path, test_path, cd_path
self.output_folder_path = os.path.join(output_folder_path, '')
self.default_params, self.best_params = None, None
self.title = None
if self.learning_task == 'classification':
self.metric = 'logloss'
elif self.learning_task == 'regression':
self.metric = 'rmse'
else:
raise ValueError('Task type must be "classification" or "regression"')
def read_file(self, file_name, target_col):
X = pd.read_csv(file_name, sep='\t', header=None)
if self.learning_task == 'classification':
y = np.maximum(X[target_col].values, 0)
else:
y = X[target_col].values
X.drop(target_col, axis=1, inplace=True)
return X, y
def read_data(self):
cols = pd.read_csv(self.cd_path, sep='\t', header=None)
target_col = cols[0][np.where(cols[1] == 'Target')[0][0]]
cat_cols = cols[cols[1] == "Categ"][0].values
X_train, y_train = self.read_file(self.train_path, target_col)
X_test, y_test = self.read_file(self.test_path, target_col)
data = pd.concat([X_train, X_test])
data[cat_cols] = data[cat_cols].apply(lambda x: x.astype('category').cat.codes)
data = np.array(data).astype('float')
X_train, X_test = data[:X_train.shape[0]], data[X_train.shape[0]:]
cat_cols[cat_cols > target_col] = cat_cols[cat_cols > target_col] - 1
return X_train, y_train, X_test, y_test, cat_cols
def convert_to_dataset(self, data, label, cat_cols=None):
raise NotImplementedError('Method convert_to_dataset is not implemented.')
def preprocess_cat_cols(self, X_train, y_train, cat_cols, X_test=None, cc=None):
if self.compute_counters == False:
return None
if cc is None:
sort_values = None if self.counters_sort_col is None else X_train[:, self.counters_sort_col]
cc = CatCounter(self.learning_task, sort_values)
X_train[:,cat_cols] = cc.fit(X_train[:,cat_cols], y_train)
else:
X_train[:,cat_cols] = cc.transform(X_train[:,cat_cols])
if not X_test is None:
X_test[:,cat_cols] = cc.transform(X_test[:,cat_cols])
return cc
def split_and_preprocess(self, X_train, y_train, X_test, y_test, cat_cols, n_splits=5, random_state=0):
if self.holdout_size > 0:
print('Holdout is used for counters.')
X_train, X_hout, y_train, y_hout = train_test_split(X_train, y_train,
test_size=self.holdout_size,
random_state=random_state)
cc = self.preprocess_cat_cols(X_hout, y_hout, cat_cols)
else:
cc = None
CVSplit = KFold if self.learning_task == 'regression' else StratifiedKFold
cv = CVSplit(n_splits=n_splits, shuffle=True, random_state=random_state)
cv_pairs = []
for train_index, test_index in cv.split(X_train, y_train):
train, test = X_train[train_index], X_train[test_index]
_ = self.preprocess_cat_cols(train, y_train[train_index], cat_cols, test, cc)
dtrain = self.convert_to_dataset(train.astype(float), y_train[train_index], cat_cols)
dtest = self.convert_to_dataset(test.astype(float), y_train[test_index], cat_cols)
cv_pairs.append((dtrain, dtest))
_ = self.preprocess_cat_cols(X_train, y_train, cat_cols, X_test, cc)
dtrain = self.convert_to_dataset(X_train.astype(float), y_train, cat_cols)
dtest = self.convert_to_dataset(X_test.astype(float), y_test, cat_cols)
return cv_pairs, (dtrain, dtest)
def fit(self, params, dtrain, dtest, n_estimators):
raise NotImplementedError('Method train is not implemented.')
def predict(self, bst, dtest, X_test):
raise NotImplementedError('Method predict is not implemented.')
def preprocess_params(self, params):
raise NotImplementedError('Method preprocess_params is not implemented.')
def run_cv(self, cv_pairs, params=None, n_estimators=None, verbose=False):
params = params or self.default_params
n_estimators = n_estimators or self.n_estimators
params = self.preprocess_params(params)
evals_results, start_time = [], time.time()
for dtrain, dtest in cv_pairs:
_, evals_result = self.fit(params, dtrain, dtest, n_estimators)
evals_results.append(evals_result)
mean_evals_results = np.mean(evals_results, axis=0)
best_n_estimators = np.argmin(mean_evals_results) + 1
eval_time = time.time() - start_time
cv_result = {'loss': mean_evals_results[best_n_estimators - 1],
'best_n_estimators': best_n_estimators,
'eval_time': eval_time,
'status': STATUS_FAIL if np.isnan(mean_evals_results[best_n_estimators - 1]) else STATUS_OK,
'params': params.copy()}
self.best_loss = min(self.best_loss, cv_result['loss'])
self.hyperopt_eval_num += 1
cv_result.update({'hyperopt_eval_num': self.hyperopt_eval_num, 'best_loss': self.best_loss})
if verbose:
print '[{0}/{1}]\teval_time={2:.2f} sec\tcurrent_{3}={4:.6f}\tmin_{3}={5:.6f}'.format(
self.hyperopt_eval_num, self.hyperopt_evals, eval_time,
self.metric, cv_result['loss'], self.best_loss)
return cv_result
def run_test(self, dtrain, dtest, X_test=None, params=None, n_estimators=None, custom_metric=None, seed=0):
params = params or self.best_params or self.default_params
n_estimators = n_estimators or self.best_n_estimators or self.n_estimators
params = self.preprocess_params(params)
start_time = time.time()
bst, evals_result = self.fit(params, dtrain, dtest, n_estimators, seed=seed)
eval_time = time.time() - start_time
preds = self.predict(bst, dtest, X_test)
result = {'loss': evals_result[-1], 'bst': bst, 'n_estimators': n_estimators,
'eval_time': eval_time, 'status': STATUS_OK, 'params': params.copy(),
'preds': preds}
if custom_metric is not None:
if type(custom_metric) is not dict:
raise TypeError("custom_metric argument should be dict")
pred = self.predict(bst, dtest, X_test)
for title, func in custom_metric.iteritems():
score = func(dtest.get_label(), pred, sample_weight=None) # TODO weights
result[title] = score
return result
def optimize_params(self, cv_pairs, max_evals=None, verbose=True):
max_evals = max_evals or self.hyperopt_evals
self.trials = Trials()
self.hyperopt_eval_num, self.best_loss = 0, np.inf
_ = fmin(fn=lambda params: self.run_cv(cv_pairs, params, verbose=verbose),
space=self.space, algo=tpe.suggest, max_evals=max_evals, trials=self.trials, rseed=1)
self.best_params = self.trials.best_trial['result']['params']
self.best_n_estimators = self.trials.best_trial['result']['best_n_estimators']
return self.trials.best_trial['result']
def dump(self, preds, elementwise_losses, test_losses, file_name):
results = {'trials': self.trials, 'best_params': self.best_params,
'best_n_estimators': self.best_n_estimators,
'preds': preds, 'elementwise_losses': elementwise_losses, 'test_losses': test_losses}
with open(file_name, 'wb') as f:
pickle.dump(results, f)
def load(self, file_name):
with open(file_name, 'r') as f:
results = pickle.load(f)
self.trials = results['trials']
self.best_params = results['best_params']
self.best_n_estimators = results['best_n_estimators']
preds = results['preds']
losses = results['losses']
test_loss = results['test_loss']
return preds, losses, test_loss
def print_result(self, result, name='', extra_keys=None):
print '%s:\n' % name
print '%s = %s' % (self.metric, result['loss'])
if 'best_n_estimators' in result.keys():
print 'best_n_estimators = %s' % result['best_n_estimators']
elif 'n_estimators' in result.keys():
print 'n_estimators = %s' % result['n_estimators']
print 'params = %s' % result['params']
if extra_keys is not None:
for k in extra_keys:
if k in result:
print "%s = %f" % (k, result[k])
def elementwise_loss(self, y, p):
if self.learning_task == 'classification':
p_ = np.clip(p, 1e-16, 1-1e-16)
return - y * np.log(p_) - (1 - y) * np.log(1 - p_)
return (y - p) ** 2
def run(self):
print 'Loading and preprocessing dataset...'
X_train, y_train, X_test, y_test, cat_cols = self.read_data()
cv_pairs, (dtrain, dtest) = self.split_and_preprocess(X_train, y_train, X_test, y_test, cat_cols)
print 'Optimizing params...'
cv_result = self.optimize_params(cv_pairs)
self.print_result(cv_result, '\nBest result on cv')
print '\nTraining algorithm with the tuned parameters for different seed...'
preds, test_losses, elementwise_losses = [], [], []
for seed in range(5):
test_result = self.run_test(dtrain, dtest, X_test, seed=seed)
preds.append(test_result['preds'])
test_losses.append(test_result['loss'])
elementwise_losses.append(self.elementwise_loss(y_test, preds[-1]))
print 'For seed=%d Test\'s %s : %.5f' % (seed, self.metric, test_losses[-1])
print '\nTest\'s %s mean: %.5f, Test\'s %s std: %.5f' % (self.metric, np.mean(test_losses), self.metric, np.std(test_losses))
if not self.output_folder_path is None:
date = datetime.now().strftime('%Y%m%d-%H%M%S')
dataset_name = self.train_path.replace('/', ' ').strip().split()[-2]
file_name = '{}{}_results_{}_{}.pkl'.format(self.output_folder_path, self.bst_name, dataset_name, date)
self.dump(preds, elementwise_losses, test_losses, file_name)
print 'Results are saved to %s' % file_name
|
11469108
|
import unittest
import numpy as np
import skfda
class TestsSklearn(unittest.TestCase):
def setUp(self) -> None:
unittest.TestCase.setUp(self)
self.x = np.linspace(-1, 1, 1000)[:, np.newaxis]
def _test_compare_sklearn(
self,
cov: skfda.misc.covariances.Covariance,
) -> None:
cov_sklearn = cov.to_sklearn()
cov_matrix = cov(self.x, self.x)
cov_sklearn_matrix = cov_sklearn(self.x)
np.testing.assert_array_almost_equal(cov_matrix, cov_sklearn_matrix)
def test_linear(self) -> None:
for variance in (1, 2):
for intercept in (0, 1, 2):
with self.subTest(variance=variance, intercept=intercept):
cov = skfda.misc.covariances.Linear(
variance=variance, intercept=intercept)
self._test_compare_sklearn(cov)
def test_polynomial(self) -> None:
for variance in (1, 2):
for intercept in (0, 1, 2):
for slope in (1, 2):
for degree in (1, 2, 3):
with self.subTest(
variance=variance,
intercept=intercept,
slope=slope,
degree=degree,
):
cov = skfda.misc.covariances.Polynomial(
variance=variance,
intercept=intercept,
slope=slope,
degree=degree,
)
self._test_compare_sklearn(cov)
def test_gaussian(self) -> None:
for variance in (1, 2):
for length_scale in (0.5, 1, 2):
with self.subTest(
variance=variance,
length_scale=length_scale,
):
cov = skfda.misc.covariances.Gaussian(
variance=variance,
length_scale=length_scale,
)
self._test_compare_sklearn(cov)
def test_exponential(self) -> None:
for variance in (1, 2):
for length_scale in (0.5, 1, 2):
with self.subTest(
variance=variance,
length_scale=length_scale,
):
cov = skfda.misc.covariances.Exponential(
variance=variance,
length_scale=length_scale,
)
self._test_compare_sklearn(cov)
def test_matern(self) -> None:
for variance in (1, 2):
for length_scale in (0.5, 1, 2):
for nu in (0.5, 1, 1.5, 2, 2.5, 3.5, 4.5, np.inf):
with self.subTest(
variance=variance,
length_scale=length_scale,
nu=nu,
):
cov = skfda.misc.covariances.Matern(
variance=variance,
length_scale=length_scale,
nu=nu,
)
self._test_compare_sklearn(cov)
def test_white_noise(self) -> None:
for variance in (1, 2):
with self.subTest(variance=variance):
cov = skfda.misc.covariances.WhiteNoise(variance=variance)
self._test_compare_sklearn(cov)
|
11469148
|
import random
import dgl
import numpy as np
import torch as th
from torch.utils.data import DataLoader
from dgl.nn.functional import edge_softmax
from openhgnn.models import build_model
import torch.nn.functional as F
from . import BaseFlow, register_flow
from ..tasks import build_task
from sklearn.metrics import f1_score, roc_auc_score
@register_flow("kgcntrainer")
class KGCNTrainer(BaseFlow):
"""Demo flows."""
def __init__(self, args):
super(KGCNTrainer, self).__init__(args)
self.in_dim = args.in_dim
self.out_dim = args.out_dim
self.l2_weight = args.weight_decay
self.task = build_task(args)
if args.dataset == 'LastFM4KGCN':
self.ratingsGraph = self.task.dataset.g_1.to(self.device)
self.neighborList = [8]
self.trainIndex, self.evalIndex, self.testIndex = self.task.get_idx()
self.model = build_model(self.model_name).build_model_from_args(self.args, self.hg).to(self.device)
self.optimizer = th.optim.Adam(self.model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
def KGCNCollate(self, index):
item, user = self.ratingsGraph.find_edges(th.stack(index).to(self.device))
label = self.ratingsGraph.edata['label'][th.stack(index).to(self.device)]
inputData = th.stack([user, item, label]).t().cpu().numpy()
deleteindex = []
item_indices = []
for i in range(len(inputData)):
if inputData[i][1] in item_indices:
deleteindex.append(i)
else:
item_indices.append(inputData[i][1])
inputData = np.delete(inputData, deleteindex, axis=0)
self.renew_weight(inputData)
sampler = dgl.dataloading.MultiLayerNeighborSampler(self.neighborList)
dataloader = dgl.dataloading.NodeDataLoader(
self.hg, list(inputData[:, 1]), sampler,
batch_size=1024,
shuffle=True,
drop_last=False,
num_workers=0)
block = next(iter(dataloader))[2]
return block, inputData
def preprocess(self, dataIndex):
self.user_emb_matrix, self.entity_emb_matrix, self.relation_emb_matrix = self.model.get_embeddings()
self.hg.ndata['embedding'] = self.entity_emb_matrix
dataloader = DataLoader(dataIndex, batch_size=self.args.batch_size, shuffle=True, collate_fn=self.KGCNCollate)
self.dataloader_it = iter(dataloader)
return
def train(self):
epoch_iter = self.args.epoch_iter
for self.epoch in range(epoch_iter):
self._mini_train_step()
print('train_data:')
self.evaluate(self.trainIndex)
print('eval_data:')
self.evaluate(self.evalIndex)
# print('test_data:')
# self.evaluate(self.testIndex)
pass
def _mini_train_step(self,):
# random.shuffle(self.trainIndex)
self.preprocess(self.trainIndex)
L = 0
import time
t0 = time.time()
for block, inputData in self.dataloader_it:
t1 =time.time()
self.labels, self.scores = self.model(block, inputData)
t2 =time.time()
loss = self.loss_calculation()
t3 = time.time()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
t4 = time.time()
L = L+loss
#print("t1_{},t2_{}, t3_{}, t4_{}".format(t1-t0, t2-t1, t3-t2, t4-t3))
f = open('result.txt','a')
res = "step: "+str(self.epoch)+'full_Loss: '+str(L)+'\n'
f.write(res)
print("step:", self.epoch, 'full_Loss:', L)
def evaluate(self, dataIndex):
self.preprocess(dataIndex)
labelsList = []
scoresList = []
for block, inputData in self.dataloader_it:
self.labels, self.scores = self.model(block, inputData)
labelsList+=(self.labels.detach().cpu().numpy().tolist())
scoresList+=(th.sigmoid(self.scores).detach().cpu().numpy().tolist())
auc = roc_auc_score(y_true = np.array(labelsList), y_score = np.array(scoresList))
for i in range(len(scoresList)):
if scoresList[i] >= 0.5:
scoresList[i] = 1
else:
scoresList[i] = 0
f1 = f1_score(y_true = np.array(labelsList), y_pred = np.array(scoresList))
f = open('result.txt','a')
f.write('auc:'+str(auc)+' f1:'+str(f1)+'\n')
print('auc:',auc,' f1:',f1)
return auc ,f1
def loss_calculation(self):
labels, logits = self.labels, self.scores
# output = -labels * th.log(th.sigmoid(logits)) - (1-labels) * th.log(1-th.sigmoid(logits))
output = F.binary_cross_entropy_with_logits(logits,labels.to(th.float32))
self.base_loss = th.mean(output)
self.l2_loss = th.norm(self.user_emb_matrix) ** 2/2 + th.norm(self.entity_emb_matrix) **2/2 + th.norm(self.relation_emb_matrix) ** 2/2
'''
for aggregator in self.aggregators:
self.l2_loss = self.l2_loss + torch.norm(aggregator.weights) **2/2
'''
loss = self.base_loss + self.l2_weight * self.l2_loss
return loss
def _full_train_setp(self):
pass
def _test_step(self, split=None, logits=None):
pass
def renew_weight(self,inputData):
user_indices = inputData[:, 0]
self.user_embeddings = self.user_emb_matrix[user_indices]
weight = th.mm(self.relation_emb_matrix[self.hg.edata['relation'].cpu().numpy()], self.user_embeddings.t())
weight = weight.unsqueeze(dim=-1)
self.hg.edata['weight'] = edge_softmax(self.hg, th.as_tensor(weight))
|
11469159
|
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional
import graphene
from django.core.exceptions import ValidationError
from ...core.exceptions import InsufficientStock
from ...order.error_codes import OrderErrorCode
from ...order.utils import get_valid_shipping_methods_for_order
from ...plugins.manager import PluginsManager
from ...product.models import Product, ProductChannelListing, ProductVariant
from ...shipping.interface import ShippingMethodData
from ...shipping.utils import convert_to_shipping_method_data
from ...warehouse.availability import check_stock_and_preorder_quantity
from ..core.validators import validate_variants_available_in_channel
if TYPE_CHECKING:
from ...channel.models import Channel
from ...order.models import Order
T_ERRORS = Dict[str, List[ValidationError]]
def validate_total_quantity(order: "Order", errors: T_ERRORS):
if order.get_total_quantity() == 0:
errors["lines"].append(
ValidationError(
"Could not create order without any products.",
code=OrderErrorCode.REQUIRED.value,
)
)
def get_shipping_method_availability_error(
order: "Order",
method: Optional["ShippingMethodData"],
manager: "PluginsManager",
):
"""Validate whether shipping method is still available for the order."""
is_valid = False
if method:
valid_methods_ids = {
m.id
for m in get_valid_shipping_methods_for_order(
order,
order.channel.shipping_method_listings.all(),
manager,
)
if m.active
}
is_valid = method.id in valid_methods_ids
if not is_valid:
return ValidationError(
"Shipping method cannot be used with this order.",
code=OrderErrorCode.SHIPPING_METHOD_NOT_APPLICABLE.value,
)
def validate_shipping_method(
order: "Order", errors: T_ERRORS, manager: "PluginsManager"
):
if not order.shipping_method:
error = ValidationError(
"Shipping method is required.",
code=OrderErrorCode.SHIPPING_METHOD_REQUIRED.value,
)
elif (
order.shipping_address
and order.shipping_address.country.code
not in order.shipping_method.shipping_zone.countries
):
error = ValidationError(
"Shipping method is not valid for chosen shipping address",
code=OrderErrorCode.SHIPPING_METHOD_NOT_APPLICABLE.value,
)
elif not order.shipping_method.shipping_zone.channels.filter(id=order.channel_id):
error = ValidationError(
"Shipping method not available in given channel.",
code=OrderErrorCode.SHIPPING_METHOD_NOT_APPLICABLE.value,
)
else:
error = get_shipping_method_availability_error(
order,
convert_to_shipping_method_data(
order.shipping_method,
order.channel.shipping_method_listings.filter(
shipping_method=order.shipping_method
).last(),
),
manager,
)
if error:
errors["shipping"].append(error)
def validate_billing_address(order: "Order", errors: T_ERRORS):
if not order.billing_address:
errors["order"].append(
ValidationError(
"Can't finalize draft with no billing address.",
code=OrderErrorCode.BILLING_ADDRESS_NOT_SET.value,
)
)
def validate_shipping_address(order: "Order", errors: T_ERRORS):
if not order.shipping_address:
errors["order"].append(
ValidationError(
"Can't finalize draft with no shipping address.",
code=OrderErrorCode.ORDER_NO_SHIPPING_ADDRESS.value,
)
)
def validate_order_lines(order: "Order", country: str, errors: T_ERRORS):
for line in order.lines.all():
if line.variant is None:
errors["lines"].append(
ValidationError(
"Could not create orders with non-existing products.",
code=OrderErrorCode.NOT_FOUND.value,
)
)
elif line.variant.track_inventory:
try:
check_stock_and_preorder_quantity(
line.variant, country, order.channel.slug, line.quantity
)
except InsufficientStock as exc:
errors["lines"].extend(
prepare_insufficient_stock_order_validation_errors(exc)
)
def validate_variants_is_available(order: "Order", errors: T_ERRORS):
variants_ids = {line.variant_id for line in order.lines.all()}
try:
validate_variants_available_in_channel(
variants_ids, order.channel_id, OrderErrorCode.NOT_AVAILABLE_IN_CHANNEL
)
except ValidationError as e:
errors["lines"].extend(e.error_dict["lines"])
def validate_product_is_published(order: "Order", errors: T_ERRORS):
variant_ids = [line.variant_id for line in order.lines.all()]
unpublished_product = Product.objects.filter(
variants__id__in=variant_ids
).not_published(order.channel.slug)
if unpublished_product.exists():
errors["lines"].append(
ValidationError(
"Can't finalize draft with unpublished product.",
code=OrderErrorCode.PRODUCT_NOT_PUBLISHED.value,
)
)
def validate_product_is_published_in_channel(
variants: Iterable[ProductVariant], channel: "Channel"
):
if not channel:
raise ValidationError(
{
"channel": ValidationError(
"Can't add product variant for draft order without channel",
code=OrderErrorCode.REQUIRED.value,
)
}
)
variant_ids = [variant.id for variant in variants]
unpublished_product = list(
Product.objects.filter(variants__id__in=variant_ids).not_published(channel.slug)
)
if unpublished_product:
unpublished_variants = ProductVariant.objects.filter(
product_id__in=unpublished_product, id__in=variant_ids
).values_list("pk", flat=True)
unpublished_variants_global_ids = [
graphene.Node.to_global_id("ProductVariant", unpublished_variant)
for unpublished_variant in unpublished_variants
]
raise ValidationError(
{
"lines": ValidationError(
"Can't add product variant that are not published in "
"the channel associated with this draft order.",
code=OrderErrorCode.PRODUCT_NOT_PUBLISHED.value,
params={"variants": unpublished_variants_global_ids},
)
}
)
def validate_variant_channel_listings(
variants: Iterable[ProductVariant], channel: "Channel"
):
if not channel:
raise ValidationError(
{
"channel": ValidationError(
"Can't add product variant for draft order without channel",
code=OrderErrorCode.REQUIRED.value,
)
}
)
variant_ids = {variant.id for variant in variants}
validate_variants_available_in_channel(
variant_ids, channel.id, OrderErrorCode.NOT_AVAILABLE_IN_CHANNEL
)
def validate_product_is_available_for_purchase(order: "Order", errors: T_ERRORS):
invalid_lines = []
for line in order.lines.all():
variant = line.variant
if not variant:
continue
product_channel_listing = ProductChannelListing.objects.filter(
channel_id=order.channel_id, product_id=variant.product_id
).first()
if not (
product_channel_listing
and product_channel_listing.is_available_for_purchase()
):
invalid_lines.append(graphene.Node.to_global_id("OrderLine", line.pk))
if invalid_lines:
errors["lines"].append(
ValidationError(
"Can't finalize draft with product unavailable for purchase.",
code=OrderErrorCode.PRODUCT_UNAVAILABLE_FOR_PURCHASE.value,
params={"order_lines": invalid_lines},
)
)
def validate_channel_is_active(channel: "Channel", errors: T_ERRORS):
if not channel.is_active:
errors["channel"].append(
ValidationError(
"Cannot complete draft order with inactive channel.",
code=OrderErrorCode.CHANNEL_INACTIVE.value,
)
)
def validate_draft_order(order: "Order", country: str, manager: "PluginsManager"):
"""Check if the given order contains the proper data.
- Has proper customer data,
- Shipping address and method are set up,
- Product variants for order lines still exists in database.
- Product variants are available in requested quantity.
- Product variants are published.
Returns a list of errors if any were found.
"""
errors: T_ERRORS = defaultdict(list)
validate_billing_address(order, errors)
if order.is_shipping_required():
validate_shipping_address(order, errors)
validate_shipping_method(order, errors, manager)
validate_total_quantity(order, errors)
validate_order_lines(order, country, errors)
validate_channel_is_active(order.channel, errors)
validate_product_is_published(order, errors)
validate_product_is_available_for_purchase(order, errors)
validate_variants_is_available(order, errors)
if errors:
raise ValidationError(errors)
def prepare_insufficient_stock_order_validation_errors(exc):
errors = []
for item in exc.items:
order_line_global_id = (
graphene.Node.to_global_id("OrderLine", item.order_line.pk)
if item.order_line
else None
)
warehouse_global_id = (
graphene.Node.to_global_id("Warehouse", item.warehouse_pk)
if item.warehouse_pk
else None
)
errors.append(
ValidationError(
f"Insufficient product stock: {item.order_line or item.variant}",
code=OrderErrorCode.INSUFFICIENT_STOCK,
params={
"order_lines": [order_line_global_id]
if order_line_global_id
else [],
"warehouse": warehouse_global_id,
},
)
)
return errors
|
11469163
|
class CodeCheckingParameterServiceData(object,IDisposable):
""" The data needed by code checking server to perform code checking. """
def Dispose(self):
""" Dispose(self: CodeCheckingParameterServiceData) """
pass
def GetCurrentElements(self):
"""
GetCurrentElements(self: CodeCheckingParameterServiceData) -> IList[ElementId]
Returns the list of Ids of the current elements.
Returns: Ids of the current elements. Contains the analytical model element to which the
code checking parameter belongs.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: CodeCheckingParameterServiceData,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Document=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The current document.
Get: Document(self: CodeCheckingParameterServiceData) -> Document
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: CodeCheckingParameterServiceData) -> bool
"""
|
11469193
|
from django import forms
from .models import Set, Card
from random import randrange
class SetForm(forms.ModelForm):
name = forms.CharField(widget=forms.TextInput, label='')
color = forms.CharField(widget=forms.TextInput, label='')
class Meta:
model = Set
fields = ['name']
class CardForm(forms.ModelForm):
front = forms.CharField(widget=forms.TextInput, label='')
back = forms.CharField(widget=forms.TextInput, label='')
class Meta:
model = Card
fields = ['front', 'back']
|
11469214
|
import unittest
from satella.coding import static_var
class FunTestTest(unittest.TestCase):
def test_fun_static_function(self):
@static_var("counter", 2)
def static_fun(a):
static_fun.counter += 1
return a
static_fun(2)
static_fun(3)
self.assertEqual(static_fun.counter, 4)
def test_fun_static_method(self):
class MyClass(object):
@static_var("counter", 2)
def my_method(self):
MyClass.my_method.counter += 1
return a
a = MyClass()
a.my_method()
a.my_method()
self.assertEqual(MyClass.my_method.counter, 4)
|
11469239
|
import sqlite3
try:
conexion = sqlite3.connect('RyMDB.db')
cursor = conexion.cursor()
print('Conectado a SQLite')
query = 'SELECT sqlite_version();'
cursor.execute(query)
rows = cursor.fetchall()
print('Version de SQLite: ', rows)
cursor.close()
except sqlite3.Error as error:
print('Error con la conexión!', error)
finally:
if (conexion):
conexion.close()
print('Conexión a SQLite cerrada\n')
try:
conexion = sqlite3.connect('RyMDB.db')
cursor = conexion.cursor()
print('Conectado a SQLite')
#creadno tabla de personajes
tablaper = '''CREATE TABLE IF NOT EXISTS Personajes (
id TEXT,
name TEXT,
status TEXT,
species TEXT,
type TEXT,
gender TEXT,
OriginName TEXT,
OriginUrl TEXT,
LocationName TEXT,
LocationUrl TEXT,
image TEXT,
episodes TEXT,
url TEXT,
created TEXT
);'''
#creando tabla de locaciones
tablaloc = '''CREATE TABLE IF NOT EXISTS Locaciones (
id TEXT,
name TEXT,
type TEXT,
dimension TEXT,
residents TEXT,
url TEXT,
created TEXT
);'''
#creando tabla de episodios
tablaepi = '''CREATE TABLE IF NOT EXISTS Episodios (
id TEXT,
name TEXT,
air_date TEXT,
episode TEXT,
characters TEXT,
url TEXT,
created TEXT
);'''
cursor.execute(tablaper)
cursor.execute(tablaloc)
cursor.execute(tablaepi)
conexion.commit()
print('Tabla creada con éxito')
cursor.close()
except sqlite3.Error as error:
print('Error con la conexión!', error)
finally:
if (conexion):
conexion.close()
print('Conexión a SQLite cerrada\n')
|
11469245
|
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
# In this test, we check and make sure that the scale parameter is applied properly. Here, we check and make sure
# the mse or logloss obtained from two models built with different scale parameters should be different.
def test_gam_scale_parameters():
print("Checking logloss for binomial with different scale parameters")
h2o_data = h2o.import_file(path=pyunit_utils.locate("smalldata/glm_test/binomial_20_cols_10KRows.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
myY = "C21"
h2o_data["C21"] = h2o_data["C21"].asfactor()
buildModelScaleParam(h2o_data, myY, ["C11", "C12", "C13"], 'binomial')
print("Checking mse for gaussian with different scale parameters")
h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/glm_test/gaussian_20cols_10000Rows.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
myY = "C21"
buildModelScaleParam(h2o_data, myY, ["C11", "C12", "C13"], 'gaussian')
print("Checking logloss for multinomial with different scale parameters")
h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/glm_test/multinomial_10_classes_10_cols_10000_Rows_train.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
myY = "C11"
h2o_data["C11"] = h2o_data["C11"].asfactor()
buildModelScaleParam(h2o_data, myY, ["C6", "C7", "C8"], 'multinomial')
print("gam scale parameter test completed successfully")
def buildModelScaleParam(train_data, y, gamX, family):
numKnots = [5,6,7]
x=["C1","C2"]
h2o_model = H2OGeneralizedAdditiveEstimator(family=family, gam_columns=gamX, scale = [0.001, 0.001, 0.001], num_knots=numKnots)
h2o_model.train(x=x, y=y, training_frame=train_data)
h2o_model2 = H2OGeneralizedAdditiveEstimator(family=family, gam_columns=gamX, scale = [10, 10, 10], num_knots=numKnots)
h2o_model2.train(x=x, y=y, training_frame=train_data)
if family == 'multinomial' or family == 'binomial':
logloss1 = h2o_model.logloss()
logloss2 = h2o_model2.logloss()
assert not(logloss1 == logloss2), "logloss from models with different scale parameters should be different but is not."
else:
mse1 = h2o_model.mse()
mse2 = h2o_model2.mse()
assert not(mse1 == mse2), "mse from models with different scale parameters should be different but is not."
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gam_scale_parameters)
else:
test_gam_scale_parameters()
|
11469302
|
import inspect
from ..exceptions import DumpException
from .Dump import Dump
class Dumper:
def __init__(self, application):
self.app = application
self.dumps = []
def clear(self):
"""Clear all dumped data"""
self.dumps = []
return self
def dd(self, *objects):
"""Dump all provided args and die, raising a DumpException."""
self._dump(*objects)
raise DumpException()
def dump(self, *objects):
"""Dump all provided args and continue code execution. This does not raise a DumpException."""
dumps = self._dump(*objects)
# output dumps in console
for dump in dumps:
print(dump)
return dumps
def get_dumps(self, ascending=False):
"""Get all dumps as Dump objects."""
if ascending:
return self.dumps
else:
new_dumps = self.dumps.copy()
new_dumps.reverse()
return new_dumps
def last(self):
"""Return last added dump."""
return self.dumps[-1]
def get_serialized_dumps(self, ascending=False):
"""Get all dumps as dict."""
return list(
map(lambda dump: dump.serialize(), self.get_dumps(ascending=ascending))
)
def _dump(self, *objects):
# get origin of dumped objects (go up 2 frames)
function, filename, line = (
inspect.stack()[2].function,
inspect.stack()[2].filename,
inspect.stack()[2].lineno,
)
# get name of dumped objects (go up 2 frames)
some = inspect.currentframe().f_back.f_back.f_locals
names = {}
for name, var in some.items():
names.update({str(var): name})
named_objects = {}
for obj in objects:
# for variables dumped without name, use their type
default = (
f"<class '{obj.__name__}'>" if inspect.isclass(obj) else str(type(obj))
)
name = names.get(str(obj), default)
named_objects.update({name: obj})
dump = Dump(
named_objects,
function,
filename,
line,
)
self.dumps.append(dump)
return self.dumps
|
11469324
|
import sys, traceback
from timeit import default_timer as timer
from ytmusiclibtracker.common import log
from ytmusiclibtracker.create_library_changelog import create_library_changelog
from ytmusiclibtracker.export_playlists import export_to_csv
def changelog():
create_library_changelog()
def export():
export_to_csv()
def show_exception_and_exit(exc_type, exc_value, tb):
traceback.print_exception(exc_type, exc_value, None)
input('\nPress Enter to exit...')
sys.exit(-1)
def main():
sys.excepthook = show_exception_and_exit
log('Welcome in YTMusic-Lib-Tracker!\n')
start = timer()
export_to_csv()
create_library_changelog()
end = timer()
log('END')
log('-----------------------------------------------------------------------', True)
log('All tasks has been completed. Time: ' + str(end - start) + ' sec.', True)
input("Press Enter to continue...")
sys.exit()
if __name__ == "__main__":
main()
|
11469333
|
from brownie import *
import random
import numpy as np
from bisect import bisect_left
from helpers import *
#global variables
day = 24
month = 24 * 30
year = 24 * 365
period = year
#number of runs in simulation
#n_sim = 8640
n_sim = year
# number of liquidations for each call to `liquidateTroves`
NUM_LIQUIDATIONS = 10
LUSD_GAS_COMPENSATION = 200.0
MIN_NET_DEBT = 1800.0
MAX_FEE = Wei(1e18)
"""# Ether price (exogenous)
Ether is the collateral for LUSD. The ether price $P_t^e$ follows
> $P_t^e = P_{t-1}^e (1+\zeta_t^e)(1+\sigma_t^e)$,
where $\zeta_t^e \sim N(0, $ sd_ether$)$ represents ether price shock and $\sigma_t^e$ the drift of ether price. At the end of the year, the expected ether price is:
> $E(P_{8760}^e) = P_0^e \cdot (1 +$ drift_ether$)^{8760}$
"""
#ether price
price_ether_initial = 2000
price_ether = [price_ether_initial]
sd_ether=0.02
#drift_ether = 0.001
# 4 stages:
# growth
# crash
# growth
# decrease
period1 = 2 * month
drift_ether1 = 0.001
period2 = period1 + 7 * day
drift_ether2 = -0.02
period3 = 6 * month
drift_ether3 = 0.0013
period4 = period
drift_ether4 = -0.0002
"""# LQTY price
In the first month, the price of LQTY follows
> $P_t^q = P_{t-1}^q (1+\zeta_t^q)(1+\sigma_t^q)$.
Note that $\zeta_t^q \sim N(0,$ sd_LQTY) represents LQTY price shock and $\sigma_t^q$ the drift. Here, $\sigma_t^q =$ drift_LQTY, so that the expected LQTY price increases from price_LQTY_initial to the following at the end of the first month:
> $E(P_{720}^q) = $price_LQTY_initial$ \cdot (1+$ drift_LQTY$)^{720}$
The LQTY price from the second month on is endogenously determined.
"""
#LQTY price & airdrop
price_LQTY_initial = 0.4
price_LQTY = [price_LQTY_initial]
sd_LQTY=0.005
drift_LQTY = 0.0035
supply_LQTY=[0]
LQTY_total_supply=100000000
"""**LQTY Endogenous Price**
The staked LQTY pool earning consists of the issuance fee revenue and redemption fee revenue
> $R_t^q = R_t^i + R_t^r.$
From period 721 onwards, using the data in the last 720 periods (i.e. the last 30 days), we can calculate the annualized earning
> $$E_t=\frac{365}{30}\sum_{\tau=t-720}^{t-1}R_\tau^q.$$
For example, in period 721 (the first hour of the second month), we can calculate the annualized earning
> $$E_{721}=\frac{365}{30}\sum_{\tau=1}^{720}R_\tau^q.$$
In period 722 (the second hour of the second month), we can calculate the annualized earning
> $$E_{722}=\frac{365}{30}\sum_{\tau=2}^{721}R_\tau^q.$$
The annualized earning $E_t$ takes into account the last 720 periods' earning only and then annualize it to represent the whole year's revenue.
Only the latest 720 periods matter! The earlier ones become irrelevant over time.
The P/E ratio is defined as follows
> $$r_t=r^{PE}(1 + \zeta_t^{PE}),$$
where $r^{PE} =$ PE_ratio ~and \zeta_t^{PE}\sim N(0, 0.1)~ $\zeta_t^{PE} = 0$.
> $$r_t=\frac{LQTY Market Cap}{Annualized Earning}=\frac{MC_t}{E_t}$$
> $MC_t=P_t^q \cdot$ LQTY_total_supply
Therefore, the LQTY price dynamics is determined
> $$P_t^q=discount \cdot \frac{r^{PE}}{LQTY\_total\_supply}E_t$$
Interpretation: The denominator implies that with more LQTY tokens issued, LQTY price decreases. However, the depreciation effect can be counteracted by the growth of the earning.
"""
#PE ratio
PE_ratio = 50
"""# Liquidity Pool
The demand of tokens from liquidity pool is defined by
> $$D_t^l = D_{t-1}^l (1+\zeta_t^l) (1+\sigma_t^l) (\frac{P_t^l}{P_{t-1}^l})^\delta, \\
D_0^l = liquidity\_initial$$
where $\zeta_t^l \sim N(0, sd\_liquidity)$ is the shock in the liquidity pool, $1+\sigma_t^l = drift\_liquidity$ and $\delta \leq -1$.
"""
# liquidity pool
liquidity_initial=0
sd_liquidity=0.001
#drift_liquidity=1.0003
drift_liquidity=1
delta = -20
"""# Stability Pool
The demand of tokens from stability pool is defined by
>$$D_t^s = D_{t-1}^s (1+\zeta_t^s) (1+R_{t-1}^s-R_{t}^n)^\theta, \\
D_0^s = stability\_initial$$
where $\zeta_t^s \sim N(0, sd\_stability)$ is the shock in the liquidity pool.
During the first month the formula above is also multiplied by a drift factor, $drift\_stability$.
$R_{t-1}^s$ is the return in the stability pool, which consists of liquidation gain and airdrop LQTY gain.
The natural rate of the stability pool follows
> $$R_{t}^n=R_{t-1}^n(1+\zeta_t^n)\geq 0,$$
where $\zeta_t^n \sim N(0, sd\_return)$ is the natural rate shock and $R_{0}^n = natural\_rate\_initial$.
The natural rate compensates the opportunity cost and risk undertaken by the stability pool providers. It resembles the risk-free government bond return in the macroeconomics model. Stability pool depositors compare the return of the stability pool with the outside investment opportunities. A positive shock $\zeta_t^n$ implies investment on other platforms, e.g. Compound, Uniswap, Aave, yield higher returns, thus making the stability pool less appealing.
"""
#stability pool
initial_return = 0.2
sd_return = 0.001
stability_initial = 1000
sd_stability = 0.001
drift_stability = 1.002
theta = 0.001
#natural rate
natural_rate_initial = 0.2
natural_rate = [natural_rate_initial]
sd_natural_rate = 0.002
"""# Trove pool
Each trove is defined by five numbers
> (collateral in ether, debt in LUSD, collateral ratio target, rational inattention, collateral ratio)
which can be denoted by
> ($Q_t^e(i)$, $Q_t^d(i)$, $CR^*(i)$, $\tau(i)$, $CR_t(i)$).
**Open Troves**
The amount of new troves opened in period t is denoted by $N_t^o$, which follows
> $N_t^o = \begin{cases}
initial\_open &\mbox{if } t = 0\\
max(0, n\_steady \cdot (1+\zeta_t^o)) &\mbox{if } P_{t-1}^l \leq 1 + f_t^i\\
max(0, n\_steady \cdot (1+\zeta_t^o)) + \alpha (P_{t-1}^l - (1 + f_t^i)) N_t &\mbox{otherwise }
\end{cases}
$
where the shock $\zeta_t^o \sim N(0,sd\_opentroves)$.
$R_t^o$ represents the break-even natural rate of opening troves and $f_t^i$ represents the issuance fee.
$P_{t}^{l}$ is the price of LUSD.
$N_t^o$ is rounded to an integer.
---
The amount of LUSD tokens generated by a new trove is
> $$Q_t^d(i) = \frac{P_t^e Q_t^e(i)}{CR^*(i)}.$$
---
The distribution of ether $Q_t^e(i)$ follows
> $Q_t^e(i) \sim \Gamma(k, \theta)$
So that $E(Q_t^e) = collateral\_gamma\_k \cdot collateral\_gamma\_theta$ and $Var(Q_t^e) = \sqrt{collateral\_gamma\_k} \cdot collateral\_gamma\_theta$
---
$CR^*(i)$ follows a chi-squared distribution with $df=target\_cr\_chi\_square\_df$, i.e. $CR^*(i) \sim \chi_{df}^2$, so that $CR^*(i)\geq target\_cr\_a$:
> $CR^*(i) = target\_cr\_a + target\_cr\_b \cdot \chi_{df}^2$.
Then:\
$E(CR^*(i)) = target\_cr\_a + target\_cr\_b * target\_cr\_chi\_square\_df$, \\
$SD(CR^*(i))=target\_cr\_b*\sqrt{2*target\_cr\_chi\_square\_df}$
---
Each trove is associated with a rational inattention parameter $\tau(i)$.
The collateral ratio of the existing troves vary with the ether price $P_t^e$
> $$CR_t(i) = \frac{P_t^e Q_t^e(i)}{Q_t^d(i)}.$$
If the collateral ratio falls in the range
> $CR_t(i) \in [CR^*(i)-\tau(i), CR^*(i)+2\tau(i)]$,
no action taken. Otherwise, the trove owner readjusts the collateral ratio so that
> $CR_t(i)=CR^*(i)$.
The distribution of $\tau(i)$ follows gamma distribution $\Gamma(k,\theta)$ with mean of $k\theta$ and standard error of $\sqrt{k\theta^2}$.
"""
#open troves
initial_open=10
sd_opentroves=0.5
n_steady=0.5
collateral_gamma_k = 10
collateral_gamma_theta = 500
target_cr_a = 1.1
target_cr_b = 0.03
target_cr_chi_square_df = 16
rational_inattention_gamma_k = 4
rational_inattention_gamma_theta = 0.08
#sensitivity to LUSD price & issuance fee
alpha = 0.3
"""**Close Troves**
The amount of troves closed in period t is denoted as $N_t^c$, which follows
> $$N_t^c = \begin{cases}
U(0, 1) &\mbox{if } t \in [0,240] \\
max(0, n\_steady \cdot (1+\zeta_t^c)) &\mbox{if } P_{t-1}^l \geq 1 \\
max(0, n\_steady \cdot (1+\zeta_t^c)) + \beta(1 - P_{t-1}^l)N_t &\mbox{otherwise }
\end{cases} $$
where the shock $\zeta_t^c \sim N(0, sd\_closetroves)$.
$N_t^c$ is rounded to an integer.
"""
#close troves
sd_closetroves=0.5
#sensitivity to LUSD price
beta = 0.2
"""**Trove Liquidation**
At the beginning of each period,
right after the feed of ether price,
the system checks the collateral ratio of the exisitng troves in the
trove pool.
If the collateral ratio falls below 110%, i.e.
> $$CR_t(i) = \frac{P_t^e Q_t^e(i)}{Q_t^d(i)}<110\%,$$
this trove is liquidated. Namely, it is eliminated from the trove pool.
Denote the amount of liquidated troves by $N_t^l$. The sum of the debt amounts to
> $$Q_t^d=\sum_i^{N_t^l} Q_t^d(i)$$
The amount of ether is
> $$Q_t^e=\sum_i^{N_t^l} Q_t^e(i)$$
The debt $Q_t^d$ is paid by the stability pool in exchange for the collateral $Q_t^e$. Therefore, the return of the previous period's stability pool is
> $$R_{t-1}^s=\frac{R_t^l+R_t^a}{P_{t-1}^lD_{t-1}^s}$$
where:
- $R_t^l=P_t^eQ_t^e-P_{t-1}^lQ_t^d$ is the liquidation gain
- $R_t^a=P_{t}^q\hat{Q}_t^q$ is the airdrop gain, $\hat{Q}_t^q=1000$ denotes the amount of LQTY token airdropped to the stability pool providers
- $D_{t}^{s}$ is the total amount of LUSD deposited in the Stability Pool (see below)
# Exogenous Factors
Ether Price
"""
#ether price
for i in range(1, period1):
random.seed(2019375+10000*i)
shock_ether = random.normalvariate(0, sd_ether)
price_ether.append(price_ether[i-1] * (1 + shock_ether) * (1 + drift_ether1))
print(" - ETH period 1 -")
print(f"Min ETH price: {min(price_ether[1:period1])}")
print(f"Max ETH price: {max(price_ether[1:period1])}")
for i in range(period1, period2):
random.seed(2019375+10000*i)
shock_ether = random.normalvariate(0, sd_ether)
price_ether.append(price_ether[i-1] * (1 + shock_ether) * (1 + drift_ether2))
print(" - ETH period 2 -")
print(f"Min ETH price: {min(price_ether[period1:period2])}")
print(f"Max ETH price: {max(price_ether[period1:period2])}")
for i in range(period2, period3):
random.seed(2019375+10000*i)
shock_ether = random.normalvariate(0, sd_ether)
price_ether.append(price_ether[i-1] * (1 + shock_ether) * (1 + drift_ether3))
print(" - ETH period 3 -")
print(f"Min ETH price: {min(price_ether[period2:period3])}")
print(f"Max ETH price: {max(price_ether[period2:period3])}")
for i in range(period3, period4):
random.seed(2019375+10000*i)
shock_ether = random.normalvariate(0, sd_ether)
price_ether.append(price_ether[i-1] * (1 + shock_ether) * (1 + drift_ether4))
print(" - ETH period 4 -")
print(f"Min ETH price: {min(price_ether[period3:period4])}")
print(f"Max ETH price: {max(price_ether[period3:period4])}")
"""Natural Rate"""
#natural rate
for i in range(1, period):
random.seed(201597+10*i)
shock_natural = random.normalvariate(0,sd_natural_rate)
natural_rate.append(natural_rate[i-1]*(1+shock_natural))
"""LQTY Price - First Month"""
#LQTY price
for i in range(1, month):
random.seed(2+13*i)
shock_LQTY = random.normalvariate(0,sd_LQTY)
price_LQTY.append(price_LQTY[i-1]*(1+shock_LQTY)*(1+drift_LQTY))
"""# Troves
Liquidate Troves
"""
def is_recovery_mode(contracts, price_ether_current):
price = Wei(price_ether_current * 1e18)
return contracts.troveManager.checkRecoveryMode(price)
def pending_liquidations(contracts, price_ether_current):
last_trove = contracts.sortedTroves.getLast()
last_ICR = contracts.troveManager.getCurrentICR(last_trove, Wei(price_ether_current * 1e18))
if last_trove == ZERO_ADDRESS:
return False
if last_ICR >= Wei(15e17):
return False
if last_ICR < Wei(11e17):
return True
if not is_recovery_mode(contracts, price_ether_current):
return False
stability_pool_balance = contracts.stabilityPool.getTotalLUSDDeposits()
trove = last_trove
for i in range(NUM_LIQUIDATIONS):
debt = contracts.troveManager.getEntireDebtAndColl(trove)[0]
if stability_pool_balance >= debt:
return True
trove = contracts.sortedTroves.getPrev(trove)
ICR = contracts.troveManager.getCurrentICR(trove, Wei(price_ether_current * 1e18))
if ICR >= Wei(15e17):
return False
return False
def remove_account(accounts, active_accounts, inactive_accounts, address):
try:
active_index = next(i for i, a in enumerate(active_accounts) if accounts[a['index']] == address)
inactive_accounts.append(active_accounts[active_index]['index'])
active_accounts.pop(active_index)
except StopIteration: # TODO
print(f"\n ***Error: {address} not found in active accounts!")
def remove_accounts_from_events(accounts, active_accounts, inactive_accounts, events, field):
for event in events:
remove_account(accounts, active_accounts, inactive_accounts, event[field])
# The issuance factor F determines the curvature of the issuance curve.
# Hours in one year: 24*365 = 8760
# For 50% of remaining tokens issued each year, with hours as time units, we have:
# F ** 8760 = 0.5
# Re-arranging:
# F = 0.5 ** (1/8760)
# F = 0.99992087674
def quantity_LQTY_airdrop(index):
F = 0.99992087674
if index <= 0:
return 0
return 32e6 * (F ** (index-1) - F ** index)
def liquidate_troves(accounts, contracts, active_accounts, inactive_accounts, price_ether_current, price_LUSD, price_LQTY_current, data, index):
if len(active_accounts) == 0:
return [0, 0]
stability_pool_previous = contracts.stabilityPool.getTotalLUSDDeposits() / 1e18
stability_pool_eth_previous = contracts.stabilityPool.getETH() / 1e18
while pending_liquidations(contracts, price_ether_current):
try:
tx = contracts.troveManager.liquidateTroves(NUM_LIQUIDATIONS, { 'from': accounts[0], 'gas_limit': 8000000, 'allow_revert': True })
#print(tx.events['TroveLiquidated'])
remove_accounts_from_events(accounts, active_accounts, inactive_accounts, tx.events['TroveLiquidated'], '_borrower')
except:
print(f"TM: {contracts.troveManager.address}")
stability_pool_balance = contracts.stabilityPool.getTotalLUSDDeposits()
print(f"stability_pool_balance: {stability_pool_balance / 1e18}")
trove = last_trove
for i in range(NUM_LIQUIDATIONS):
print(f"i: {i}")
debt = contracts.troveManager.getEntireDebtAndColl(trove)[0]
print(f"debt: {debt / 1e18}")
if stability_pool_balance >= debt:
print("True!")
trove = contracts.sortedTroves.getPrev(trove)
ICR = contracts.troveManager.getCurrentICR(trove, Wei(price_ether_current * 1e18))
print(f"ICR: {ICR}")
stability_pool_current = contracts.stabilityPool.getTotalLUSDDeposits() / 1e18
stability_pool_eth_current = contracts.stabilityPool.getETH() / 1e18
debt_liquidated = stability_pool_current - stability_pool_previous
ether_liquidated = stability_pool_eth_current - stability_pool_eth_previous
liquidation_gain = ether_liquidated * price_ether_current - debt_liquidated * price_LUSD
airdrop_gain = price_LQTY_current * quantity_LQTY_airdrop(index)
data['liquidation_gain'][index] = liquidation_gain
data['airdrop_gain'][index] = airdrop_gain
return_stability = calculate_stability_return(contracts, price_LUSD, data, index)
return [ether_liquidated, return_stability]
def calculate_stability_return(contracts, price_LUSD, data, index):
stability_pool_previous = contracts.stabilityPool.getTotalLUSDDeposits() / 1e18
if index == 0:
return_stability = initial_return
elif stability_pool_previous == 0:
return_stability = initial_return * 2
elif index < month:
return_stability = (year/index) * \
(sum(data['liquidation_gain'][0:index]) +
sum(data['airdrop_gain'][0:index])
) / (price_LUSD * stability_pool_previous)
else:
return_stability = (year/month) * \
(sum(data['liquidation_gain'][index - month:index]) +
sum(data['airdrop_gain'][index - month:index])
) / (price_LUSD * stability_pool_previous)
return return_stability
def isNewTCRAboveCCR(contracts, collChange, isCollIncrease, debtChange, isDebtIncrease, price):
newTCR = contracts.borrowerOperations.getNewTCRFromTroveChange(collChange, isCollIncrease, debtChange, isDebtIncrease, price)
return newTCR >= Wei(1.5 * 1e18)
"""Close Troves"""
def close_troves(accounts, contracts, active_accounts, inactive_accounts, price_ether_current, price_LUSD, index):
if len(active_accounts) == 0:
return [0]
if is_recovery_mode(contracts, price_ether_current):
return [0]
np.random.seed(208+index)
shock_closetroves = np.random.normal(0,sd_closetroves)
n_troves = contracts.sortedTroves.getSize()
if index <= 240:
number_closetroves = np.random.uniform(0,1)
elif price_LUSD >=1:
number_closetroves = max(0, n_steady * (1+shock_closetroves))
else:
number_closetroves = max(0, n_steady * (1+shock_closetroves)) + beta*(1-price_LUSD)*n_troves
number_closetroves = min(int(round(number_closetroves)), len(active_accounts) - 1)
random.seed(293+100*index)
drops = list(random.sample(range(len(active_accounts)), number_closetroves))
for i in range(0, len(drops)):
account_index = active_accounts[drops[i]]['index']
account = accounts[account_index]
amounts = contracts.troveManager.getEntireDebtAndColl(account)
coll = amounts['coll']
debt = amounts['debt']
pending = get_lusd_to_repay(accounts, contracts, active_accounts, inactive_accounts, account, debt)
if pending == 0:
if isNewTCRAboveCCR(contracts, coll, False, debt, False, floatToWei(price_ether_current)):
contracts.borrowerOperations.closeTrove({ 'from': account })
inactive_accounts.append(account_index)
active_accounts.pop(drops[i])
if is_recovery_mode(contracts, price_ether_current):
break
return [number_closetroves]
"""Adjust Troves"""
def transfer_from_to(contracts, from_account, to_account, amount):
balance = contracts.lusdToken.balanceOf(from_account)
transfer_amount = min(balance, amount)
if transfer_amount == 0:
return amount
if from_account == to_account:
return amount
contracts.lusdToken.transfer(to_account, transfer_amount, { 'from': from_account })
pending = amount - transfer_amount
return pending
def get_lusd_to_repay(accounts, contracts, active_accounts, inactive_accounts, account, debt):
lusdBalance = contracts.lusdToken.balanceOf(account)
if debt > lusdBalance:
pending = debt - lusdBalance
# first try to withdraw from SP
initial_deposit = contracts.stabilityPool.deposits(account)[0]
if initial_deposit > 0:
contracts.stabilityPool.withdrawFromSP(pending, { 'from': account, 'gas_limit': 8000000, 'allow_revert': True })
# it can only withdraw up to the deposit, so we check the balance again
lusdBalance = contracts.lusdToken.balanceOf(account)
pending = debt - lusdBalance
# try with whale
pending = transfer_from_to(contracts, accounts[0], account, pending)
# try with active accounts, which are more likely to hold LUSD
for a in active_accounts:
if pending <= 0:
break
a_address = accounts[a['index']]
pending = transfer_from_to(contracts, a_address, account, pending)
for i in inactive_accounts:
if pending <= 0:
break
i_address = accounts[i]
pending = transfer_from_to(contracts, i_address, account, pending)
if pending > 0:
print(f"\n ***Error: not enough LUSD to repay! {debt / 1e18} LUSD for {account}")
return pending
return 0
def get_hints(contracts, coll, debt):
NICR = contracts.hintHelpers.computeNominalCR(floatToWei(coll), floatToWei(debt))
approxHint = contracts.hintHelpers.getApproxHint(NICR, 100, 0)
#print("approx hint", approxHint)
return contracts.sortedTroves.findInsertPosition(NICR, approxHint[0], approxHint[0])
def get_hints_from_amounts(accounts, contracts, active_accounts, coll, debt, price_ether_current):
ICR = coll * price_ether_current / debt
NICR = contracts.hintHelpers.computeNominalCR(floatToWei(coll), floatToWei(debt))
return get_hints_from_ICR(accounts, contracts, active_accounts, ICR, NICR)
#def get_address_from_active_index(accounts, active_accounts, index):
def index2address(accounts, active_accounts, index):
return accounts[active_accounts[index]['index']]
def get_hints_from_ICR(accounts, contracts, active_accounts, ICR, NICR):
l = len(active_accounts)
if l == 0:
return [ZERO_ADDRESS, ZERO_ADDRESS, 0]
else:
keys = [a['CR_initial'] for a in active_accounts]
i = bisect_left(keys, ICR)
#return [index2address(accounts, active_accounts, min(i, l-1)), index2address(accounts, active_accounts, max(i-1, 0)), i]
hints = contracts.sortedTroves.findInsertPosition(
NICR,
index2address(accounts, active_accounts, min(i, l-1)),
index2address(accounts, active_accounts, max(i-1, 0))
)
return [hints[0], hints[1], i]
def adjust_troves(accounts, contracts, active_accounts, inactive_accounts, price_ether_current, index):
random.seed(57984-3*index)
ratio = random.uniform(0,1)
coll_added_float = 0
issuance_LUSD_adjust = 0
for i, working_trove in enumerate(active_accounts):
account = accounts[working_trove['index']]
currentICR = contracts.troveManager.getCurrentICR(account, floatToWei(price_ether_current)) / 1e18
amounts = contracts.troveManager.getEntireDebtAndColl(account)
coll = amounts['coll'] / 1e18
debt = amounts['debt'] / 1e18
random.seed(187*index + 3*i)
p = random.uniform(0,1)
check = (currentICR - working_trove['CR_initial']) / (working_trove['CR_initial'] * working_trove['Rational_inattention'])
if check >= -1 and check <= 2:
continue
#A part of the troves are adjusted by adjusting debt
if p >= ratio:
debt_new = price_ether_current * coll / working_trove['CR_initial']
hints = get_hints_from_amounts(accounts, contracts, active_accounts, coll, debt_new, price_ether_current)
if debt_new < MIN_NET_DEBT:
continue
if check < -1:
# pay back
repay_amount = floatToWei(debt - debt_new)
pending = get_lusd_to_repay(accounts, contracts, active_accounts, inactive_accounts, account, repay_amount)
if pending == 0:
contracts.borrowerOperations.repayLUSD(repay_amount, hints[0], hints[1], { 'from': account })
elif check > 2 and not is_recovery_mode(contracts, price_ether_current):
# withdraw LUSD
withdraw_amount = debt_new - debt
withdraw_amount_wei = floatToWei(withdraw_amount)
if isNewTCRAboveCCR(contracts, 0, False, withdraw_amount_wei, True, floatToWei(price_ether_current)):
contracts.borrowerOperations.withdrawLUSD(MAX_FEE, withdraw_amount_wei, hints[0], hints[1], { 'from': account })
rate_issuance = contracts.troveManager.getBorrowingRateWithDecay() / 1e18
issuance_LUSD_adjust = issuance_LUSD_adjust + rate_issuance * withdraw_amount
#Another part of the troves are adjusted by adjusting collaterals
elif p < ratio:
coll_new = working_trove['CR_initial'] * debt / price_ether_current
hints = get_hints_from_amounts(accounts, contracts, active_accounts, coll_new, debt, price_ether_current)
if check < -1:
# add coll
coll_added_float = coll_new - coll
coll_added = floatToWei(coll_added_float)
contracts.borrowerOperations.addColl(hints[0], hints[1], { 'from': account, 'value': coll_added })
elif check > 2 and not is_recovery_mode(contracts, price_ether_current):
# withdraw ETH
coll_withdrawn = floatToWei(coll - coll_new)
if isNewTCRAboveCCR(contracts, coll_withdrawn, False, 0, False, floatToWei(price_ether_current)):
contracts.borrowerOperations.withdrawColl(coll_withdrawn, hints[0], hints[1], { 'from': account })
return [coll_added_float, issuance_LUSD_adjust]
"""Open Troves"""
def open_trove(accounts, contracts, active_accounts, inactive_accounts, supply_trove, quantity_ether, CR_ratio, rational_inattention, price_ether_current):
if len(inactive_accounts) == 0:
return
if is_recovery_mode(contracts, price_ether_current) and CR_ratio < 1.5:
return
#hints = get_hints_from_ICR(accounts, active_accounts, CR_ratio)
hints = get_hints_from_amounts(accounts, contracts, active_accounts, quantity_ether, supply_trove, price_ether_current)
coll = floatToWei(quantity_ether)
debtChange = floatToWei(supply_trove) + LUSD_GAS_COMPENSATION
lusd = get_lusd_amount_from_net_debt(contracts, floatToWei(supply_trove))
if isNewTCRAboveCCR(contracts, coll, True, debtChange, True, floatToWei(price_ether_current)):
contracts.borrowerOperations.openTrove(MAX_FEE, lusd, hints[0], hints[1],
{ 'from': accounts[inactive_accounts[0]], 'value': coll })
new_account = {"index": inactive_accounts[0], "CR_initial": CR_ratio, "Rational_inattention": rational_inattention}
active_accounts.insert(hints[2], new_account)
inactive_accounts.pop(0)
return True
return False
def open_troves(accounts, contracts, active_accounts, inactive_accounts, price_ether_current, price_LUSD, index):
random.seed(2019*index)
shock_opentroves = random.normalvariate(0,sd_opentroves)
n_troves = len(active_accounts)
rate_issuance = contracts.troveManager.getBorrowingRateWithDecay() / 1e18
coll_added = 0
issuance_LUSD_open = 0
if index <= 0:
number_opentroves = initial_open
elif price_LUSD <= 1 + rate_issuance:
number_opentroves = max(0, n_steady * (1+shock_opentroves))
else:
number_opentroves = max(0, n_steady * (1+shock_opentroves)) + \
alpha * (price_LUSD - rate_issuance - 1) * n_troves
number_opentroves = min(int(round(float(number_opentroves))), len(inactive_accounts))
for i in range(0, number_opentroves):
np.random.seed(2033 + index + i*i)
CR_ratio = target_cr_a + target_cr_b * np.random.chisquare(df=target_cr_chi_square_df)
np.random.seed(20 + 10 * i + index)
quantity_ether = np.random.gamma(collateral_gamma_k, scale=collateral_gamma_theta)
np.random.seed(209870- index + i*i)
rational_inattention = np.random.gamma(rational_inattention_gamma_k, scale=rational_inattention_gamma_theta)
supply_trove = price_ether_current * quantity_ether / CR_ratio
if supply_trove < MIN_NET_DEBT:
supply_trove = MIN_NET_DEBT
quantity_ether = CR_ratio * supply_trove / price_ether_current
issuance_LUSD_open = issuance_LUSD_open + rate_issuance * supply_trove
if open_trove(accounts, contracts, active_accounts, inactive_accounts, supply_trove, quantity_ether, CR_ratio, rational_inattention, price_ether_current):
coll_added = coll_added + quantity_ether
return [coll_added, issuance_LUSD_open]
"""# LUSD Market
Stability Pool
"""
def stability_update(accounts, contracts, active_accounts, return_stability, index):
supply = contracts.lusdToken.totalSupply() / 1e18
stability_pool_previous = contracts.stabilityPool.getTotalLUSDDeposits() / 1e18
np.random.seed(27+3*index)
shock_stability = np.random.normal(0,sd_stability)
natural_rate_current = natural_rate[index]
if stability_pool_previous == 0:
stability_pool = stability_initial
elif index <= month:
stability_pool = stability_pool_previous * drift_stability * (1+shock_stability) * (1 + return_stability - natural_rate_current)**theta
else:
stability_pool = stability_pool_previous * (1+shock_stability) * (1 + return_stability - natural_rate_current)**theta
if stability_pool > supply:
print("Warning! Stability pool supposed to be greater than supply", stability_pool, supply)
stability_pool = supply
if stability_pool > stability_pool_previous:
remaining = stability_pool - stability_pool_previous
i = 0
while remaining > 0 and i < len(active_accounts):
account = index2address(accounts, active_accounts, i)
balance = contracts.lusdToken.balanceOf(account) / 1e18
deposit = min(balance, remaining)
if deposit > 0:
contracts.stabilityPool.provideToSP(floatToWei(deposit), ZERO_ADDRESS, { 'from': account, 'gas_limit': 8000000, 'allow_revert': True })
remaining = remaining - deposit
i = i + 1
else:
current_deposit = contracts.stabilityPool.getCompoundedLUSDDeposit(accounts[0])
if current_deposit > 0:
new_withdraw = min(floatToWei(stability_pool_previous - stability_pool), current_deposit)
contracts.stabilityPool.withdrawFromSP(new_withdraw, { 'from': accounts[0] })
"""LUSD Price, liquidity pool, and redemption
**Price Determination**
---
With the supply and demand of LUSD tokens defined above, the price of LUSD at the current period is given by the following equilibrium condition:
> $$S_t = D_t^s + D_t^l = D_t^s + D_{t-1}^l (1+\zeta_t^l)(1+\sigma_t^l) (\frac{P_t^l}{P_{t-1}^l})^\delta$$
where $S$ is the total supply of LUSD.
Solving this equation gives that:
> $$P_t^l = P_{t-1}^l (\frac{S_t-D_t^s}{D_{t-1}^l(1+\zeta_t^l)(1+\sigma_t^l)})^{1/\delta}$$
"""
def calculate_price(price_LUSD, liquidity_pool, liquidity_pool_next):
# liquidity_pool = supply - stability_pool
# liquidity_pool_next = liquidity_pool_previous * drift_liquidity * (1+shock_liquidity)
price_LUSD_current= price_LUSD * (liquidity_pool / liquidity_pool_next) ** (1/delta)
return price_LUSD_current
""" **Stabilizers**
There are two stabilizers to attenuate LUSD price deviation from its target range.
No action if $P_t^l \in [1-f_t^r, 1.1+f_t^i]$, where $f_t^r$ represents the redemption fee, and $f_t^i$ represents the issuance fee.
For the moment, we set $f_t^r = 1\%$.
---
Stabilizer 1: ceiling arbitrageurs
If $P_t^l > 1.1+f_t^i$, open a new trove with $CR^*=110\%$ and $\tau^*=10\%$. Its debt amounts to
> $$Q_t^d(c) = \frac{P_t^e Q_t^e(c)}{110\%}.$$
The amount of $Q_t^d(c)$ is expected to bring the LUSD price back to $1.1+f_t^i$. This means that
> $$S_t' = D_t^s + (\frac{1.1+f_t^i}{P_{t-1}^l})^\delta D_{t-1}^l(1+\zeta_t^l)(1+\sigma_t^l)$$
The debt of th new trove is the difference between the original supply and the supply needed to bring price to $1.1+f_t^i$, which is
> $$Q_t^d(c) = S_t' - S_t$$
**Programming logic**:
market clearing condition supply = demand ==> $P_t^l$ is determined
If $P_t^l > 1.1+f_t^i$ ==> calculate what amount of extra supply leads to
$P_t^l = 1.1+f_t^i$ ==> denote this amount by $Q_t^d(c)$ ==> open a trove
with $CR^*=110\%$ and debt = $Q_t^d(c)$
---
Stabilizer 2: floor arbitrageurs
If $P_t^l < 1-f_t^r$, a fraction $\chi_t$ of LUSD in the liquidity pool is used for redemption
> $$D_t^r = \chi_t D_t^l,$$
where
> $$\chi_t = ...$$
The redemption eliminates troves with the lowest collateral ratio.
Note that unlike stabilizer 1, stabilizer 2 has impact of LUSD price in
the next period. Namely, after the determination of $P_t^l$ and if $P_t^l < 1-f_t^r$, the redemption does not affect $P_t^l$ any more. So no need to
program stabilizer 2 like what you did for stabilizer 1. The redemption kills some troves and thus affect $P_{t+1}^l$ in the next period as the number of troves shrinks.
**Programming logic**
Denote the amount of troves fully redeemed by $N_t^r$. Therefore,
> $$D_t^r = \sum_i^{N_t^r} Q_t^d(i) + \Delta$$
where $\Delta \geq 0$ represents the residual.
Note that the redemption starts from the riskest troves, i.e. those with
the lowest collateral ratios.
If any residual $\Delta > 0$ left, then the changes to the trove $j$ with the lowest collateral ratio are
> $$Q_{t+1}^e(j) = Q_{t}^e(j) - \Delta/P_t^e$$
> $$Q_{t+1}^d(j) = Q_{t}^d(j) - \Delta$$
> $$CR_{t+1}(j) = \frac{P_t^e(Q_{t}^e(j) - \Delta)}{Q_{t}^d(j) - \Delta}$$
---
Redemption fee revenue amounts to
> $$R_t^r = D_t^r(f_t^r + \frac{D_t^r}{S_t^l})$$
"""
# redemption pool - to avoid redempting the whole liquidity pool
sd_redemption = 0.001
redemption_start = 0.8
def redeem_trove(accounts, contracts, i, price_ether_current):
lusd_balance = contracts.lusdToken.balanceOf(accounts[i])
[firstRedemptionHint, partialRedemptionHintNICR, truncatedLUSDamount] = contracts.hintHelpers.getRedemptionHints(lusd_balance, price_ether_current, 70)
if truncatedLUSDamount == Wei(0):
return None
approxHint = contracts.hintHelpers.getApproxHint(partialRedemptionHintNICR, 2000, 0)
hints = contracts.sortedTroves.findInsertPosition(partialRedemptionHintNICR, approxHint[0], approxHint[0])
try:
tx = contracts.troveManager.redeemCollateral(
truncatedLUSDamount,
firstRedemptionHint,
hints[0],
hints[1],
partialRedemptionHintNICR,
70,
MAX_FEE,
{ 'from': accounts[i], 'gas_limit': 8000000, 'allow_revert': True }
)
return tx
except:
print(f"\n Redemption failed! ")
print(f"Trove Manager: {contracts.troveManager.address}")
print(f"LUSD Token: {contracts.lusdToken.address}")
print(f"i: {i}")
print(f"account: {accounts[i]}")
print(f"LUSD bal: {lusd_balance / 1e18}")
print(f"truncated: {truncatedLUSDamount / 1e18}")
print(f"Redemption rate: {contracts.troveManager.getRedemptionRateWithDecay() * 100 / 1e18} %")
print(f"approx: {approxHint[0]}")
print(f"diff: {approxHint[1]}")
print(f"diff: {approxHint[1] / 1e18}")
print(f"seed: {approxHint[2]}")
print(f"amount: {truncatedLUSDamount}")
print(f"first: {firstRedemptionHint}")
print(f"hint: {hints[0]}")
print(f"hint: {hints[1]}")
print(f"nicr: {partialRedemptionHintNICR}")
print(f"nicr: {partialRedemptionHintNICR / 1e18}")
print(f"70")
print(f"{MAX_FEE}")
#return None
exit(1)
def price_stabilizer(accounts, contracts, active_accounts, inactive_accounts, price_ether_current, price_LUSD, index):
stability_pool = contracts.stabilityPool.getTotalLUSDDeposits() / 1e18
redemption_pool = 0
redemption_fee = 0
issuance_LUSD_stabilizer = 0
supply = contracts.lusdToken.totalSupply() / 1e18
#Liquidity Pool
liquidity_pool = supply - stability_pool
# next iteration step for liquidity pool
np.random.seed(20*index)
shock_liquidity = np.random.normal(0,sd_liquidity)
liquidity_pool_next = liquidity_pool * drift_liquidity * (1+shock_liquidity)
#Calculating Price
price_LUSD_current = calculate_price(price_LUSD, liquidity_pool, liquidity_pool_next)
rate_issuance = contracts.troveManager.getBorrowingRateWithDecay() / 1e18
rate_redemption = contracts.troveManager.getRedemptionRateWithDecay() / 1e18
#Stabilizer
#Ceiling Arbitrageurs
if price_LUSD_current > 1.1 + rate_issuance:
supply_wanted = stability_pool + \
liquidity_pool_next * \
((1.1+rate_issuance) / price_LUSD)**delta
supply_trove = min(supply_wanted - supply, MIN_NET_DEBT)
CR_ratio = 1.1
rational_inattention = 0.1
quantity_ether = supply_trove * CR_ratio / price_ether_current
issuance_LUSD_stabilizer = rate_issuance * supply_trove
if open_trove(accounts, contracts, active_accounts, inactive_accounts, supply_trove, quantity_ether, CR_ratio, rational_inattention):
price_LUSD_current = 1.1 + rate_issuance
liquidity_pool = supply_wanted - stability_pool
#Floor Arbitrageurs
if price_LUSD_current < 1 - rate_redemption:
np.random.seed(30*index)
shock_redemption = np.random.normal(0, sd_redemption)
redemption_ratio = max(1, redemption_start * (1+shock_redemption))
supply_target = stability_pool + \
liquidity_pool_next * \
((1-rate_redemption) / price_LUSD)**delta
supply_diff = supply - supply_target
if supply_diff < redemption_ratio * liquidity_pool:
redemption_pool = supply_diff
#liquidity_pool = liquidity_pool - redemption_pool
price_LUSD_current = 1 - rate_redemption
else:
redemption_pool = redemption_ratio * liquidity_pool
#liquidity_pool = (1-redemption_ratio)*liquidity_pool
price_LUSD_current = calculate_price(price_LUSD, liquidity_pool, liquidity_pool_next)
remaining = redemption_pool
i = 0
while remaining > 0 and i < len(active_accounts):
account = index2address(accounts, active_accounts, i)
balance = contracts.lusdToken.balanceOf(account) / 1e18
redemption = min(balance, remaining)
if redemption > 0:
tx = redeem_trove(accounts, contracts, 0, price_ether_current)
if tx:
remove_accounts_from_events(
accounts,
active_accounts,
inactive_accounts,
filter(lambda e: e['coll'] == 0, tx.events['TroveUpdated']),
'_borrower'
)
remaining = remaining - redemption
i = i + 1
#Redemption Fee
redemption_fee = redemption_pool * (rate_redemption + redemption_pool / supply)
return [price_LUSD_current, redemption_pool, redemption_fee, issuance_LUSD_stabilizer]
"""# LQTY Market"""
def LQTY_market(index, data):
#quantity_LQTY = (LQTY_total_supply/3)*(1-0.5**(index/period))
np.random.seed(2+3*index)
if index <= month:
price_LQTY_current = price_LQTY[index-1]
annualized_earning = (index/month)**0.5 * np.random.normal(200000000,500000)
else:
revenue_issuance = sum(data['issuance_fee'][index - month:index])
revenue_redemption = sum(data['redemption_fee'][index - month:index])
annualized_earning = 365 * (revenue_issuance+revenue_redemption) / 30
#discounting factor to factor in the risk in early days
discount=index/period
price_LQTY_current = discount * PE_ratio * annualized_earning / LQTY_total_supply
#MC_LQTY_current = price_LQTY_current * quantity_LQTY
return [price_LQTY_current, annualized_earning]
|
11469354
|
import pandas as pd
import numpy as np
import yfinance as yf #Yahoo Finance API
from datetime import datetime as dt, date
import time
df = pd.DataFrame()
tickers = ["^KS11", "^GSPC", "^N225", "^HSI", "^N100", "^FTSE", "^DJI"]
start_day = dt(2019, 12, 1)
today = str(date.today())
kospi = yf.download('^KS11', start=dt(2005, 1, 1), end=today)
def get_all_index_data(df, tickers, start_day, today):
for ticker in tickers:
try:
print('Stealing from Yahoo Finance ......................\n')
print('Working on a ticker: ', ticker, '......................\n')
ticker_df = yf.download(ticker, start=start_day, end=today)
time.sleep(1)
df_temp = ticker_df.reset_index()
df_temp = df_temp[['Date','Adj Close']]
df_temp = df_temp.rename(columns={'Adj Close': ticker})
df = df.join(df_temp, how='outer', rsuffix='Date')
except IndexError:
print('value error')
df = df.loc[:,~df.columns.str.contains('DateDate', case=False)]
df = df.dropna()
df.columns = df.columns.str.replace('^', '')
print('.....................Done ......................')
return df
data = get_all_index_data(df, tickers, start_day, today)
def normalize(df):
df1 = df.iloc[:, 1:].apply(lambda x: np.log(x) - np.log(x.shift(1)))
df1['Date'] = df['Date']
df1 = df1[list(df.columns)]
return df1
def plot(data):
plt.figure(figsize=(15, 10))
plt.plot(data.Date, data.KS11, label='KOSPI', color='blue')
plt.plot(data.Date, data.GSPC, label='S&P 500', color='orange')
plt.plot(data.Date, data.N225, label='Nikkei 225', color='magenta')
plt.plot(data.Date, data.HSI, label='Hang Seng Index', color='green')
plt.plot(data.Date, data.N100, label='Euro 100', color='yellow')
plt.plot(data.Date, data.FTSE, label='FTSE', color='grey')
plt.legend(loc='upper left')
#plt.savefig('SMA-KOSPI.png')
plt.show()
#Interactive Graph 시각화 with plotly
import plotly.express as px
import plotly.graph_objects as go
##로그 변화율 interactive 시각화(data_fill이용)
log_data_fill = log_diff(data_fill)
fig = go.Figure()
fig.add_trace(go.Scatter(
x=log_data_fill.index,
y=log_data_fill.FTSE,
mode='lines',
name='FTSE'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.GSPC,
mode='lines',
name='GSPC(S&P 500)'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.HSI,
mode='lines',
name='HSI(Hangseng'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.KS11,
mode='lines',
name='KS11(KOSPI)'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.N100,
mode='lines',
name='N100(EuroNext100)'))
fig.add_trace(go.Scatter(x=log_data_fill.index,
y=log_data_fill.N225,
mode='lines',
name='N225(Nikkei225)'))
#정규화 지표값 추이 interactive 시각화
standardize_data_fill = standardize(data_fill)
fig = go.Figure()
fig.add_trace(go.Scatter(
x=standardize_data_fill.index,
y=standardize_data_fill.FTSE,
mode='lines',
name='FTSE'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.GSPC,
mode='lines',
name='GSPC(S&P 500)'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.HSI,
mode='lines',
name='HSI(Hangseng'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.KS11,
mode='lines',
name='KS11(KOSPI)'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.N100,
mode='lines',
name='N100(EuroNext100)'))
fig.add_trace(go.Scatter(x=standardize_data_fill.index,
y=standardize_data_fill.N225,
mode='lines',
name='N225(Nikkei225)'))
world_aggregated = 'https://raw.githubusercontent.com/datasets/covid-19/master/data/worldwide-aggregated.csv'
countries_aggregated= 'https://raw.githubusercontent.com/datasets/covid-19/master/data/countries-aggregated.csv'
world = pd.read_csv(world_aggregated)
countries = pd.read_csv(countries_aggregated)
#print(corona.head())
#print(countries.head())
korea = countries[countries['Country'].str.contains("Korea, South")]
#print(korea.head())
|
11469378
|
import hashlib
import struct
import hmac
import base58
try:
hashlib.new("ripemd160")
except ValueError:
# No native implementation
from . import _ripemd
def ripemd160(*args):
return _ripemd.new(*args)
else:
# Use OpenSSL
def ripemd160(*args):
return hashlib.new("ripemd160", *args)
class ECC:
# pylint: disable=line-too-long
# name: (nid, p, n, a, b, (Gx, Gy)),
CURVES = {
"secp112r1": (
704,
0xDB7C2ABF62E35E668076BEAD208B,
0xDB7C2ABF62E35E7628DFAC6561C5,
0xDB7C2ABF62E35E668076BEAD2088,
0x659EF8BA043916EEDE8911702B22,
(
0x09487239995A5EE76B55F9C2F098,
0xA89CE5AF8724C0A23E0E0FF77500
)
),
"secp112r2": (
705,
0xDB7C2ABF62E35E668076BEAD208B,
0x36DF0AAFD8B8D7597CA10520D04B,
0x6127C24C05F38A0AAAF65C0EF02C,
0x51DEF1815DB5ED74FCC34C85D709,
(
0x4BA30AB5E892B4E1649DD0928643,
0xADCD46F5882E3747DEF36E956E97
)
),
"secp128r1": (
706,
0xFFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF,
0xFFFFFFFE0000000075A30D1B9038A115,
0xFFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFC,
0xE87579C11079F43DD824993C2CEE5ED3,
(
0x161FF7528B899B2D0C28607CA52C5B86,
0xCF5AC8395BAFEB13C02DA292DDED7A83
)
),
"secp128r2": (
707,
0xFFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF,
0x3FFFFFFF7FFFFFFFBE0024720613B5A3,
0xD6031998D1B3BBFEBF59CC9BBFF9AEE1,
0x5EEEFCA380D02919DC2C6558BB6D8A5D,
(
0x7B6AA5D85E572983E6FB32A7CDEBC140,
0x27B6916A894D3AEE7106FE805FC34B44
)
),
"secp160k1": (
708,
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73,
0x0100000000000000000001B8FA16DFAB9ACA16B6B3,
0,
7,
(
0x3B4C382CE37AA192A4019E763036F4F5DD4D7EBB,
0x938CF935318FDCED6BC28286531733C3F03C4FEE
)
),
"secp160r1": (
709,
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFF,
0x0100000000000000000001F4C8F927AED3CA752257,
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFC,
0x001C97BEFC54BD7A8B65ACF89F81D4D4ADC565FA45,
(
0x4A96B5688EF573284664698968C38BB913CBFC82,
0x23A628553168947D59DCC912042351377AC5FB32
)
),
"secp160r2": (
710,
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73,
0x0100000000000000000000351EE786A818F3A1A16B,
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC70,
0x00B4E134D3FB59EB8BAB57274904664D5AF50388BA,
(
0x52DCB034293A117E1F4FF11B30F7199D3144CE6D,
0xFEAFFEF2E331F296E071FA0DF9982CFEA7D43F2E
)
),
"secp192k1": (
711,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37,
0xFFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D,
0,
3,
(
0xDB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D,
0x9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D
)
),
"prime192v1": (
409,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF,
0xFFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC,
0x64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1,
(
0x188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012,
0x07192B95FFC8DA78631011ED6B24CDD573F977A11E794811
)
),
"secp224k1": (
712,
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D,
0x010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7,
0,
5,
(
0xA1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C,
0x7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5
)
),
"secp224r1": (
713,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE,
0xB4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4,
(
0xB70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21,
0xBD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34
)
),
"secp256k1": (
714,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141,
0,
7,
(
0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
)
),
"prime256v1": (
715,
0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF,
0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551,
0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC,
0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B,
(
0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296,
0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5
)
),
"secp384r1": (
716,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFF,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC7634D81F4372DDF581A0DB248B0A77AECEC196ACCC52973,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFC,
0xB3312FA7E23EE7E4988E056BE3F82D19181D9C6EFE8141120314088F5013875AC656398D8A2ED19D2A85C8EDD3EC2AEF,
(
0xAA87CA22BE8B05378EB1C71EF320AD746E1D3B628BA79B9859F741E082542A385502F25DBF55296C3A545E3872760AB7,
0x3617DE4A96262C6F5D9E98BF9292DC29F8F41DBD289A147CE9DA3113B5F0B8C00A60B1CE1D7E819D7A431D7C90EA0E5F
)
),
"secp521r1": (
717,
0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA51868783BF2F966B7FCC0148F709A5D03BB5C9B8899C47AEBB6FB71E91386409,
0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC,
0x0051953EB9618E1C9A1F929A21A0B68540EEA2DA725B99B315F3B8B489918EF109E156193951EC7E937B1652C0BD3BB1BF073573DF883D2C34F1EF451FD46B503F00,
(
0x00C6858E06B70404E9CD9E3ECB662395B4429C648139053FB521F828AF606B4D3DBAA14B5E77EFE75928FE1DC127A2FFA8DE3348B3C1856A429BF97E7E31C2E5BD66,
0x011839296A789A3BC0045C8A5FB42C7D1BD998F54449579B446817AFBD17273E662C97EE72995EF42640C550B9013FAD0761353C7086A272C24088BE94769FD16650
)
),
"brainpoolP160r1": (
921,
0xE95E4A5F737059DC60DFC7AD95B3D8139515620F,
0xE95E4A5F737059DC60DF5991D45029409E60FC09,
0x340E7BE2A280EB74E2BE61BADA745D97E8F7C300,
0x1E589A8595423412134FAA2DBDEC95C8D8675E58,
(
0xBED5AF16EA3F6A4F62938C4631EB5AF7BDBCDBC3,
0x1667CB477A1A8EC338F94741669C976316DA6321
)
),
"brainpoolP192r1": (
923,
0xC302F41D932A36CDA7A3463093D18DB78FCE476DE1A86297,
0xC302F41D932A36CDA7A3462F9E9E916B5BE8F1029AC4ACC1,
0x6A91174076B1E0E19C39C031FE8685C1CAE040E5C69A28EF,
0x469A28EF7C28CCA3DC721D044F4496BCCA7EF4146FBF25C9,
(
0xC0A0647EAAB6A48753B033C56CB0F0900A2F5C4853375FD6,
0x14B690866ABD5BB88B5F4828C1490002E6773FA2FA299B8F
)
),
"brainpoolP224r1": (
925,
0xD7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF,
0xD7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F,
0x68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43,
0x2580F63CCFE44138870713B1A92369E33E2135D266DBB372386C400B,
(
0x0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D,
0x58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD
)
),
"brainpoolP256r1": (
927,
0xA9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377,
0xA9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7,
0x7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9,
0x26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6,
(
0x8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262,
0x547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997
)
),
"brainpoolP320r1": (
929,
0xD35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28FCD412B1F1B32E27,
0xD35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658E98691555B44C59311,
0x3EE30B568FBAB0F883CCEBD46D3F3BB8A2A73513F5EB79DA66190EB085FFA9F492F375A97D860EB4,
0x520883949DFDBC42D3AD198640688A6FE13F41349554B49ACC31DCCD884539816F5EB4AC8FB1F1A6,
(
0x43BD7E9AFB53D8B85289BCC48EE5BFE6F20137D10A087EB6E7871E2A10A599C710AF8D0D39E20611,
0x14FDD05545EC1CC8AB4093247F77275E0743FFED117182EAA9C77877AAAC6AC7D35245D1692E8EE1
)
),
"brainpoolP384r1": (
931,
0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53,
0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565,
0x7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F90F8AA5814A503AD4EB04A8C7DD22CE2826,
0x04A8C7DD22CE28268B39B55416F0447C2FB77DE107DCD2A62E880EA53EEB62D57CB4390295DBC9943AB78696FA504C11,
(
0x1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E,
0x8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315
)
),
"brainpoolP512r1": (
933,
0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3,
0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069,
0x7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA,
0x3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CADC083E67984050B75EBAE5DD2809BD638016F723,
(
0x81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822,
0x7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892
)
),
}
# pylint: enable=line-too-long
def __init__(self, backend, aes):
self._backend = backend
self._aes = aes
def get_curve(self, name):
if name not in self.CURVES:
raise ValueError("Unknown curve {}".format(name))
nid, p, n, a, b, g = self.CURVES[name]
params = {"p": p, "n": n, "a": a, "b": b, "g": g}
return EllipticCurve(self._backend, params, self._aes, nid)
def get_backend(self):
return self._backend.get_backend()
class EllipticCurve:
def __init__(self, backend_factory, params, aes, nid):
self._backend = backend_factory(**params)
self.params = params
self._aes = aes
self.nid = nid
def _encode_public_key(self, x, y, is_compressed=True, raw=True):
if raw:
if is_compressed:
return bytes([0x02 + (y[-1] % 2)]) + x
else:
return bytes([0x04]) + x + y
else:
return struct.pack("!HH", self.nid, len(x)) + x + struct.pack("!H", len(y)) + y
def _decode_public_key(self, public_key, partial=False):
if not public_key:
raise ValueError("No public key")
if public_key[0] == 0x04:
# Uncompressed
expected_length = 1 + 2 * self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid uncompressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid uncompressed public key length")
x = public_key[1:1 + self._backend.public_key_length]
y = public_key[1 + self._backend.public_key_length:expected_length]
if partial:
return (x, y), expected_length
else:
return x, y
elif public_key[0] in (0x02, 0x03):
# Compressed
expected_length = 1 + self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid compressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid compressed public key length")
x, y = self._backend.decompress_point(public_key[:expected_length])
# Sanity check
if x != public_key[1:expected_length]:
raise ValueError("Incorrect compressed public key")
if partial:
return (x, y), expected_length
else:
return x, y
else:
raise ValueError("Invalid public key prefix")
def _decode_public_key_openssl(self, public_key, partial=False):
if not public_key:
raise ValueError("No public key")
i = 0
nid, = struct.unpack("!H", public_key[i:i + 2])
i += 2
if nid != self.nid:
raise ValueError("Wrong curve")
xlen, = struct.unpack("!H", public_key[i:i + 2])
i += 2
if len(public_key) - i < xlen:
raise ValueError("Too short public key")
x = public_key[i:i + xlen]
i += xlen
ylen, = struct.unpack("!H", public_key[i:i + 2])
i += 2
if len(public_key) - i < ylen:
raise ValueError("Too short public key")
y = public_key[i:i + ylen]
i += ylen
if partial:
return (x, y), i
else:
if i < len(public_key):
raise ValueError("Too long public key")
return x, y
def decode_public_key(self, public_key):
return self._decode_public_key(public_key)
def new_private_key(self, is_compressed=False):
return self._backend.new_private_key() + (b"\x01" if is_compressed else b"")
def private_to_public(self, private_key):
if len(private_key) == self._backend.public_key_length:
is_compressed = False
elif len(private_key) == self._backend.public_key_length + 1 and private_key[-1] == 1:
is_compressed = True
private_key = private_key[:-1]
else:
raise ValueError("Private key has invalid length")
x, y = self._backend.private_to_public(private_key)
return self._encode_public_key(x, y, is_compressed=is_compressed)
def private_to_wif(self, private_key):
return base58.b58encode_check(b"\x80" + private_key)
def wif_to_private(self, wif):
dec = base58.b58decode_check(wif)
if dec[0] != 0x80:
raise ValueError("Invalid network (expected mainnet)")
return dec[1:]
def public_to_address(self, public_key):
h = hashlib.sha256(public_key).digest()
hash160 = ripemd160(h).digest()
return base58.b58encode_check(b"\x00" + hash160)
def private_to_address(self, private_key):
# Kinda useless but left for quick migration from pybitcointools
return self.public_to_address(self.private_to_public(private_key))
def derive(self, private_key, public_key):
if len(private_key) == self._backend.public_key_length + 1 and private_key[-1] == 1:
private_key = private_key[:-1]
if len(private_key) != self._backend.public_key_length:
raise ValueError("Private key has invalid length")
if not isinstance(public_key, tuple):
public_key = self._decode_public_key(public_key)
return self._backend.ecdh(private_key, public_key)
def _digest(self, data, hash):
if hash is None:
return data
elif callable(hash):
return hash(data)
elif hash == "sha1":
return hashlib.sha1(data).digest()
elif hash == "sha256":
return hashlib.sha256(data).digest()
elif hash == "sha512":
return hashlib.sha512(data).digest()
else:
raise ValueError("Unknown hash/derivation method")
# High-level functions
def encrypt(self, data, public_key, algo="aes-256-cbc", derivation="sha256", mac="hmac-sha256", return_aes_key=False):
# Generate ephemeral private key
private_key = self.new_private_key()
# Derive key
ecdh = self.derive(private_key, public_key)
key = self._digest(ecdh, derivation)
k_enc_len = self._aes.get_algo_key_length(algo)
if len(key) < k_enc_len:
raise ValueError("Too short digest")
k_enc, k_mac = key[:k_enc_len], key[k_enc_len:]
# Encrypt
ciphertext, iv = self._aes.encrypt(data, k_enc, algo=algo)
ephem_public_key = self.private_to_public(private_key)
ephem_public_key = self._decode_public_key(ephem_public_key)
ephem_public_key = self._encode_public_key(*ephem_public_key, raw=False)
ciphertext = iv + ephem_public_key + ciphertext
# Add MAC tag
if callable(mac):
tag = mac(k_mac, ciphertext)
elif mac == "hmac-sha256":
h = hmac.new(k_mac, digestmod="sha256")
h.update(ciphertext)
tag = h.digest()
elif mac == "hmac-sha512":
h = hmac.new(k_mac, digestmod="sha512")
h.update(ciphertext)
tag = h.digest()
elif mac is None:
tag = b""
else:
raise ValueError("Unsupported MAC")
if return_aes_key:
return ciphertext + tag, k_enc
else:
return ciphertext + tag
def decrypt(self, ciphertext, private_key, algo="aes-256-cbc", derivation="sha256", mac="hmac-sha256"):
# Get MAC tag
if callable(mac):
tag_length = mac.digest_size
elif mac == "hmac-sha256":
tag_length = hmac.new(b"", digestmod="sha256").digest_size
elif mac == "hmac-sha512":
tag_length = hmac.new(b"", digestmod="sha512").digest_size
elif mac is None:
tag_length = 0
else:
raise ValueError("Unsupported MAC")
if len(ciphertext) < tag_length:
raise ValueError("Ciphertext is too small to contain MAC tag")
if tag_length == 0:
tag = b""
else:
ciphertext, tag = ciphertext[:-tag_length], ciphertext[-tag_length:]
orig_ciphertext = ciphertext
if len(ciphertext) < 16:
raise ValueError("Ciphertext is too small to contain IV")
iv, ciphertext = ciphertext[:16], ciphertext[16:]
public_key, pos = self._decode_public_key_openssl(ciphertext, partial=True)
ciphertext = ciphertext[pos:]
# Derive key
ecdh = self.derive(private_key, public_key)
key = self._digest(ecdh, derivation)
k_enc_len = self._aes.get_algo_key_length(algo)
if len(key) < k_enc_len:
raise ValueError("Too short digest")
k_enc, k_mac = key[:k_enc_len], key[k_enc_len:]
# Verify MAC tag
if callable(mac):
expected_tag = mac(k_mac, orig_ciphertext)
elif mac == "hmac-sha256":
h = hmac.new(k_mac, digestmod="sha256")
h.update(orig_ciphertext)
expected_tag = h.digest()
elif mac == "hmac-sha512":
h = hmac.new(k_mac, digestmod="sha512")
h.update(orig_ciphertext)
expected_tag = h.digest()
elif mac is None:
expected_tag = b""
if not hmac.compare_digest(tag, expected_tag):
raise ValueError("Invalid MAC tag")
return self._aes.decrypt(ciphertext, iv, k_enc, algo=algo)
def sign(self, data, private_key, hash="sha256", recoverable=False, entropy=None):
if len(private_key) == self._backend.public_key_length:
is_compressed = False
elif len(private_key) == self._backend.public_key_length + 1 and private_key[-1] == 1:
is_compressed = True
private_key = private_key[:-1]
else:
raise ValueError("Private key has invalid length")
data = self._digest(data, hash)
if not entropy:
v = b"\x01" * len(data)
k = b"\x00" * len(data)
k = hmac.new(k, v + b"\x00" + private_key + data, "sha256").digest()
v = hmac.new(k, v, "sha256").digest()
k = hmac.new(k, v + b"\x01" + private_key + data, "sha256").digest()
v = hmac.new(k, v, "sha256").digest()
entropy = hmac.new(k, v, "sha256").digest()
return self._backend.sign(data, private_key, recoverable, is_compressed, entropy=entropy)
def recover(self, signature, data, hash="sha256"):
# Sanity check: is this signature recoverable?
if len(signature) != 1 + 2 * self._backend.public_key_length:
raise ValueError("Cannot recover an unrecoverable signature")
x, y = self._backend.recover(signature, self._digest(data, hash))
is_compressed = signature[0] >= 31
return self._encode_public_key(x, y, is_compressed=is_compressed)
def verify(self, signature, data, public_key, hash="sha256"):
if len(signature) == 1 + 2 * self._backend.public_key_length:
# Recoverable signature
signature = signature[1:]
if len(signature) != 2 * self._backend.public_key_length:
raise ValueError("Invalid signature format")
if not isinstance(public_key, tuple):
public_key = self._decode_public_key(public_key)
return self._backend.verify(signature, self._digest(data, hash), public_key)
def derive_child(self, seed, child):
# Based on BIP32
if not 0 <= child < 2 ** 31:
raise ValueError("Invalid child index")
return self._backend.derive_child(seed, child)
|
11469390
|
import datetime as dt
import json
import pytest
import pytz
from stix2.base import STIXJSONEncoder
def test_encode_json_datetime():
now = dt.datetime(2017, 3, 22, 0, 0, 0, tzinfo=pytz.UTC)
test_dict = {'now': now}
expected = '{"now": "2017-03-22T00:00:00Z"}'
assert json.dumps(test_dict, cls=STIXJSONEncoder) == expected
def test_encode_json_object():
obj = object()
test_dict = {'obj': obj}
with pytest.raises(TypeError) as excinfo:
json.dumps(test_dict, cls=STIXJSONEncoder)
assert " is not JSON serializable" in str(excinfo.value)
|
11469409
|
import django
from .views import (
test,
test_model_form,
test_custom_error_message,
test_per_form_format,
test_non_required,
test_id_prefix,
test_custom_generator,
)
if django.VERSION >= (3, 1, 0):
from django.urls import re_path as url, include
else:
from django.conf.urls import url, include
urlpatterns = [
url(r"test/$", test, name="captcha-test"),
url(r"test-modelform/$", test_model_form, name="captcha-test-model-form"),
url(r"test2/$", test_custom_error_message, name="captcha-test-custom-error-message"),
url(r"test3/$", test_per_form_format, name="test_per_form_format"),
url(r"custom-generator/$", test_custom_generator, name="test_custom_generator"),
url(r"test-non-required/$", test_non_required, name="captcha-test-non-required"),
url(r"test-id-prefix/$", test_id_prefix, name="captcha-test-id-prefix"),
url(r"", include("captcha.urls")),
]
|
11469411
|
import http.client
import mock
from datetime import timedelta
from rdr_service import clock
from rdr_service.model.utils import to_client_participant_id
from rdr_service.dao.database_utils import format_datetime
from tests.helpers.unittest_base import BaseTestCase
from rdr_service.message_broker.message_broker import MessageBrokerFactory
from rdr_service.model.message_broker import MessageBrokerRecord, MessageBrokerDestAuthInfo
from rdr_service.dao.message_broker_dest_auth_info_dao import MessageBrokerDestAuthInfoDao
from rdr_service.dao.message_broker_dao import MessageBrokerDao, MessageBrokenEventDataDao
class MockedTokenResponse(object):
status_code = 200
@staticmethod
def json():
return {
'access_token': '<PASSWORD>',
'expires_in': 600
}
class MessageBrokerApiTest(BaseTestCase):
def setUp(self):
super().setUp()
self.record_dao = MessageBrokerDao()
self.event_data_dao = MessageBrokenEventDataDao()
@staticmethod
def _create_auth_info_record(dest, token, expired_at):
auth_info_record = MessageBrokerDestAuthInfo(
destination=dest,
accessToken=token,
expiredAt=expired_at
)
auth_info_dao = MessageBrokerDestAuthInfoDao()
auth_info_dao.insert(auth_info_record)
def test_exist_token_not_expired(self):
message = MessageBrokerRecord(messageDest='vibrent')
message_broker = MessageBrokerFactory.create(message)
# create a auth info record with token not expired
now = clock.CLOCK.now()
expired_at = now + timedelta(seconds=600)
self._create_auth_info_record('vibrent', 'current_token', expired_at)
token = message_broker.get_access_token()
self.assertEqual(token, 'current_token')
def test_exist_token_expired(self):
requests_api_patcher = mock.patch(
"rdr_service.message_broker.message_broker.requests",
**{"post.return_value": MockedTokenResponse()}
)
requests_api_patcher.start()
message = MessageBrokerRecord(messageDest='vibrent')
message_broker = MessageBrokerFactory.create(message)
# create a auth info record with expired token
expired_at = clock.CLOCK.now()
self._create_auth_info_record('vibrent', 'current_token', expired_at)
token = message_broker.get_access_token()
self.assertEqual(token, 'new_token')
requests_api_patcher.stop()
@mock.patch('rdr_service.dao.participant_dao.get_account_origin_id')
@mock.patch('rdr_service.message_broker.message_broker.PtscMessageBroker.send_request')
def test_send_valid_message(self, send_request, request_origin):
send_request.return_value = 200, {'result': 'mocked result'}, ''
request_origin.return_value = 'color'
participant = self.data_generator.create_database_participant(participantOrigin='vibrent')
request_json = {
"event": "result_viewed",
"eventAuthoredTime": "2021-05-19T21:05:41Z",
"participantId": to_client_participant_id(participant.participantId),
"messageBody": {
"test_str": "str",
"test_int": 0,
"test_datatime": "2020-01-01T21:05:41Z",
"test_bool": True,
"test_json": {'name': 'value'}
}
}
result = self.send_post("MessageBroker", request_json)
self.assertEqual(result, {'event': 'result_viewed',
'participantId': to_client_participant_id(participant.participantId),
'responseCode': 200,
'responseBody': {'result': 'mocked result'},
'errorMessage': ''})
# test cloud task API
from rdr_service.resource import main as resource_main
records = self.record_dao.get_all()
record = records[0]
payload = {
'id': record.id,
'eventType': record.eventType,
'eventAuthoredTime': record.eventAuthoredTime.strftime("%Y-%m-%dT%H:%M:%SZ"),
'participantId': record.participantId,
'requestBody': record.requestBody
}
self.send_post(
local_path='StoreMessageBrokerEventDataTaskApi',
request_data=payload,
prefix="/resource/task/",
test_client=resource_main.app.test_client(),
)
event_data = self.event_data_dao.get_all()
self.assertEqual(5, len(event_data))
count = 0
for item in event_data:
if item.fieldName == 'test_bool':
self.assertEqual(item.valueBool, True)
count = count + 1
if item.fieldName == 'test_json':
self.assertEqual(item.valueJson, {'name': 'value'})
count = count + 1
if item.fieldName == 'test_str':
self.assertEqual(item.valueString, 'str')
count = count + 1
if item.fieldName == 'test_datatime':
self.assertEqual(item.valueDatetime.strftime("%Y-%m-%dT%H:%M:%SZ"), "2020-01-01T21:05:41Z")
count = count + 1
if item.fieldName == 'test_int':
self.assertEqual(item.valueInteger, 0)
count = count + 1
self.assertEqual(count, 5)
def test_send_invalid_message(self):
# request without participant id
request_json = {
"event": "result_viewed",
"eventAuthoredTime": format_datetime(clock.CLOCK.now()),
"messageBody": {
"result_type": "hdr_v1",
"report_revision_number": 0
}
}
self.send_post("MessageBroker", request_json, expected_status=http.client.BAD_REQUEST)
# participant not exist
request_json = {
"event": "result_viewed",
"participantId": "P111",
"eventAuthoredTime": format_datetime(clock.CLOCK.now()),
"messageBody": {
"result_type": "hdr_v1",
"report_revision_number": 0
}
}
self.send_post("MessageBroker", request_json, expected_status=http.client.BAD_REQUEST)
@mock.patch('rdr_service.dao.participant_dao.get_account_origin_id')
@mock.patch('rdr_service.message_broker.message_broker.PtscMessageBroker.send_request')
def test_informing_loop(self, send_request, request_origin):
send_request.return_value = 200, {'result': 'mocked result'}, ''
request_origin.return_value = 'color'
participant_one = self.data_generator.create_database_participant(participantOrigin='vibrent')
participant_two = self.data_generator.create_database_participant(participantOrigin='vibrent')
loop_decision = 'informing_loop_decision'
loop_started = 'informing_loop_started'
from rdr_service.resource import main as resource_main
request_json_decision = {
"event": "informing_loop_decision",
"eventAuthoredTime": format_datetime(clock.CLOCK.now()),
"participantId": to_client_participant_id(participant_one.participantId),
"messageBody": {
'module_type': 'hdr',
'decision_value': 'yes'
}
}
self.send_post("MessageBroker", request_json_decision)
records = self.record_dao.get_all()
record = records[0]
event_time = format_datetime(record.eventAuthoredTime)
payload = {
'id': record.id,
'eventType': record.eventType,
'eventAuthoredTime': event_time,
'participantId': record.participantId,
'requestBody': record.requestBody
}
self.send_post(
local_path='StoreMessageBrokerEventDataTaskApi',
request_data=payload,
prefix="/resource/task/",
test_client=resource_main.app.test_client(),
)
loop_decision_records = self.event_data_dao.get_informing_loop(
record.id,
loop_decision
)
self.assertIsNotNone(loop_decision_records)
self.assertEqual(len(loop_decision_records), 2)
for loop_record in loop_decision_records:
self.assertIsNotNone(loop_record.valueString)
self.assertEqual(format_datetime(loop_record.eventAuthoredTime), event_time)
self.assertTrue(any(obj for obj in loop_decision_records if obj.valueString == 'hdr'))
self.assertTrue(any(obj for obj in loop_decision_records if obj.valueString == 'yes'))
request_json_started = {
"event": "informing_loop_started",
"eventAuthoredTime": format_datetime(clock.CLOCK.now()),
"participantId": to_client_participant_id(participant_two.participantId),
"messageBody": {
'module_type': 'hdr',
}
}
self.send_post("MessageBroker", request_json_started)
records = self.record_dao.get_all()
record = records[1]
event_time = format_datetime(record.eventAuthoredTime)
payload = {
'id': record.id,
'eventType': record.eventType,
'eventAuthoredTime': event_time,
'participantId': record.participantId,
'requestBody': record.requestBody
}
self.send_post(
local_path='StoreMessageBrokerEventDataTaskApi',
request_data=payload,
prefix="/resource/task/",
test_client=resource_main.app.test_client(),
)
loop_started_records = self.event_data_dao.get_informing_loop(
record.id,
loop_started
)
self.assertIsNotNone(loop_started_records)
self.assertEqual(len(loop_started_records), 1)
for loop_record in loop_started_records:
self.assertIsNotNone(loop_record.valueString)
self.assertEqual(format_datetime(loop_record.eventAuthoredTime), event_time)
self.assertTrue(any(obj for obj in loop_decision_records if obj.valueString == 'hdr'))
|
11469450
|
import scrapy
from bs4 import BeautifulSoup
from utils.text_cleansing import clean_ep_data
class ImdbEpisodeSummarySpider(scrapy.Spider):
"""Spider for scraping the episode summaries of a TV show on IMDb."""
name = 'imdb_episode_summary_spider'
def __init__(self, start_urls, *args, **kwargs):
super(ImdbEpisodeSummarySpider, self).__init__(*args, **kwargs)
self.allowed_domains = ['www.imdb.com']
self.start_urls = start_urls
def parse(self, response):
"""Look up the episode list urls."""
# get the rating count. if its > 500, this might be a real TV show, not some fan-made project
# this does not work all the time, but in my experience it might be a good indicator
rating_count = response.xpath('//*[@itemprop="ratingCount"]/text()').extract_first()
if rating_count:
rating_count = int(rating_count.replace(',', ''))
if rating_count > 500:
# look up episodes
episode_list_urls = response.xpath('//*[@class="seasons-and-year-nav"]/div/a/@href').extract()
episode_list_urls = [response.urljoin(e) for e in episode_list_urls if 'season' in e]
for url in episode_list_urls:
yield scrapy.Request(url, callback=self.parse_episode_list)
def parse_episode_list(self, response):
"""Look up the episode urls from an episode list page."""
episode_list = response.xpath('//*[@class="list detail eplist"]')
episode_page_urls = episode_list.xpath('//*[@class="info"]/strong/a/@href').extract()
episode_page_urls = ['https://www.imdb.com{}'.format(e) for e in episode_page_urls]
for url in episode_page_urls:
yield scrapy.Request(url, callback=self.parse_episode_page)
# look up the page for the previous/next season
prev = response.xpath('//*[@id="load_previous_episodes"]/@href').extract_first()
next = response.xpath('//*[@id="load_next_episodes"]/@href').extract_first()
base_url = response.url
base_url = base_url.split("episodes?")[0]
if prev:
prev_url = '{}episodes{}'.format(base_url, prev)
yield scrapy.Request(prev_url, callback=self.parse_episode_list)
if next:
next_url = '{}episodes{}'.format(base_url, next)
yield scrapy.Request(next_url, callback=self.parse_episode_list)
def parse_episode_page(self, response):
"""Look up the link to the plot summary page from episode page and process it accordingly."""
plot_summary_url = response.xpath('//*[text()="Plot Summary"]/@href').extract_first()
plot_summary_url = 'https://www.imdb.com{}'.format(plot_summary_url)
yield scrapy.Request(plot_summary_url, callback=self.parse_plot_summary_page)
def parse_plot_summary_page(self, response):
"""Create and load the episode summary items."""
show_title = response.xpath('//*[@class="subpage_title_block"]//h4/a/text()').extract_first().strip()
ep_title = response.xpath('//*[@class="subpage_title_block"]//h3/a/text()').extract_first().strip()
summaries = response.xpath('//*[@class="ipl-zebra-list__item" and contains(@id, "summary")]/p').extract()
for ep_sum in summaries:
ep_sum = BeautifulSoup(ep_sum, 'lxml').get_text().strip()
if 'be the first to contribute' not in ep_sum.lower():
ep_data = {
'source_url': response.url,
'episode_title': ep_title,
'episode_summary': ep_sum,
'tv_show_title': show_title,
}
yield clean_ep_data(ep_data)
|
11469496
|
import docutils
import os
import pytest
import sphinx
from packaging.version import Version
from django.conf import settings
from django.core.cache import cache
from django.urls import reverse
from .utils import srcdir
@pytest.mark.django_db
@pytest.mark.embed_api
class TestEmbedAPIv3ExternalPages:
@pytest.fixture(autouse=True)
def setup_method(self, settings):
settings.USE_SUBDOMAIN = True
settings.PUBLIC_DOMAIN = 'readthedocs.io'
settings.RTD_EMBED_API_EXTERNAL_DOMAINS = ['docs.project.com']
self.api_url = reverse('embed_api_v3')
yield
cache.clear()
@pytest.mark.sphinx('html', srcdir=srcdir, freshenv=True)
def test_default_main_section(self, app, client, requests_mock):
app.build()
path = app.outdir / 'index.html'
assert path.exists() is True
content = open(path).read()
requests_mock.get('https://docs.project.com', text=content)
params = {
'url': 'https://docs.project.com',
}
response = client.get(self.api_url, params)
assert response.status_code == 200
# The output is different because docutils is outputting this,
# and we're not sanitizing it, but just passing it through.
if Version(docutils.__version__) >= Version('0.17'):
content = '<div class="body" role="main">\n \n <section id="title">\n<h1>Title<a class="headerlink" href="https://docs.project.com#title" title="Permalink to this headline">¶</a></h1>\n<p>This is an example page used to test EmbedAPI parsing features.</p>\n<section id="sub-title">\n<h2>Sub-title<a class="headerlink" href="https://docs.project.com#sub-title" title="Permalink to this headline">¶</a></h2>\n<p>This is a reference to <a class="reference internal" href="https://docs.project.com#sub-title"><span class="std std-ref">Sub-title</span></a>.</p>\n</section>\n</section>\n\n\n </div>'
else:
content = '<div class="body" role="main">\n \n <div class="section" id="title">\n<h1>Title<a class="headerlink" href="https://docs.project.com#title" title="Permalink to this headline">¶</a></h1>\n<p>This is an example page used to test EmbedAPI parsing features.</p>\n<div class="section" id="sub-title">\n<h2>Sub-title<a class="headerlink" href="https://docs.project.com#sub-title" title="Permalink to this headline">¶</a></h2>\n<p>This is a reference to <a class="reference internal" href="https://docs.project.com#sub-title"><span class="std std-ref">Sub-title</span></a>.</p>\n</div>\n</div>\n\n\n </div>'
assert response.json() == {
'url': 'https://docs.project.com',
'fragment': None,
'content': content,
'external': True,
}
@pytest.mark.sphinx('html', srcdir=srcdir, freshenv=True)
def test_specific_identifier(self, app, client, requests_mock):
app.build()
path = app.outdir / 'index.html'
assert path.exists() is True
content = open(path).read()
requests_mock.get('https://docs.project.com', text=content)
params = {
'url': 'https://docs.project.com#sub-title',
}
response = client.get(self.api_url, params)
assert response.status_code == 200
if Version(docutils.__version__) >= Version('0.17'):
content = '<section id="sub-title">\n<h2>Sub-title<a class="headerlink" href="https://docs.project.com#sub-title" title="Permalink to this headline">¶</a></h2>\n<p>This is a reference to <a class="reference internal" href="https://docs.project.com#sub-title"><span class="std std-ref">Sub-title</span></a>.</p>\n</section>'
else:
content = '<div class="section" id="sub-title">\n<h2>Sub-title<a class="headerlink" href="https://docs.project.com#sub-title" title="Permalink to this headline">¶</a></h2>\n<p>This is a reference to <a class="reference internal" href="https://docs.project.com#sub-title"><span class="std std-ref">Sub-title</span></a>.</p>\n</div>'
assert response.json() == {
'url': 'https://docs.project.com#sub-title',
'fragment': 'sub-title',
'content': content,
'external': True,
}
@pytest.mark.sphinx('html', srcdir=srcdir, freshenv=True)
def test_dl_identifier(self, app, client, requests_mock):
app.build()
path = app.outdir / 'configuration.html'
assert path.exists() is True
content = open(path).read()
requests_mock.get('https://docs.project.com/configuration.html', text=content)
params = {
'url': 'https://docs.project.com/configuration.html#confval-config1',
}
response = client.get(self.api_url, params)
assert response.status_code == 200
if sphinx.version_info < (3, 5, 0):
content = '<dt id="confval-config1">\n<code class="sig-name descname">config1</code><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>'
elif sphinx.version_info[:2] == (3, 5):
content = '<dt id="confval-config1">\n<code class="sig-name descname"><span class="pre">config1</span></code><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>'
else:
content = '<dt class="sig sig-object std" id="confval-config1">\n<span class="sig-name descname"><span class="pre">config1</span></span><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>'
assert response.json() == {
'url': 'https://docs.project.com/configuration.html#confval-config1',
'fragment': 'confval-config1',
'content': content,
'external': True,
}
@pytest.mark.sphinx('html', srcdir=srcdir, freshenv=True)
def test_dl_identifier_doctool_sphinx(self, app, client, requests_mock):
app.build()
path = app.outdir / 'configuration.html'
assert path.exists() is True
content = open(path).read()
requests_mock.get('https://docs.project.com/configuration.html', text=content)
# Calling the API without doctool
params = {
'url': 'https://docs.project.com/configuration.html#confval-config1',
}
response = client.get(self.api_url, params)
assert response.status_code == 200
if sphinx.version_info < (3, 5, 0):
content = '<dt id="confval-config1">\n<code class="sig-name descname">config1</code><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>'
elif sphinx.version_info[:2] == (3, 5):
content = '<dt id="confval-config1">\n<code class="sig-name descname"><span class="pre">config1</span></code><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>'
else:
content = '<dt class="sig sig-object std" id="confval-config1">\n<span class="sig-name descname"><span class="pre">config1</span></span><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>'
assert response.json() == {
'url': 'https://docs.project.com/configuration.html#confval-config1',
'fragment': 'confval-config1',
'content': content,
'external': True,
}
# Calling the API with doctool
params = {
'url': 'https://docs.project.com/configuration.html#confval-config1',
'doctool': 'sphinx',
}
response = client.get(self.api_url, params)
assert response.status_code == 200
if sphinx.version_info < (3, 0, 0): # <3.0
content = '<dl class="confval">\n<dt id="confval-config1">\n<code class="sig-name descname">config1</code><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>\n<dd><p>Description: This the description for config1</p>\n<p>Default: <code class="docutils literal notranslate"><span class="pre">\'Default</span> <span class="pre">value</span> <span class="pre">for</span> <span class="pre">config\'</span></code></p>\n<p>Type: bool</p>\n</dd></dl>'
elif sphinx.version_info[:2] == (3, 5):
content = '<dl class="std confval">\n<dt id="confval-config1">\n<code class="sig-name descname"><span class="pre">config1</span></code><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>\n<dd><p>Description: This the description for config1</p>\n<p>Default: <code class="docutils literal notranslate"><span class="pre">\'Default</span> <span class="pre">value</span> <span class="pre">for</span> <span class="pre">config\'</span></code></p>\n<p>Type: bool</p>\n</dd></dl>'
elif sphinx.version_info < (4, 0, 0): # >3.0,=!3.5.x,<4.0
content = '<dl class="std confval">\n<dt id="confval-config1">\n<code class="sig-name descname">config1</code><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>\n<dd><p>Description: This the description for config1</p>\n<p>Default: <code class="docutils literal notranslate"><span class="pre">\'Default</span> <span class="pre">value</span> <span class="pre">for</span> <span class="pre">config\'</span></code></p>\n<p>Type: bool</p>\n</dd></dl>'
else: # >=4.0
content = '<dl class="std confval">\n<dt class="sig sig-object std" id="confval-config1">\n<span class="sig-name descname"><span class="pre">config1</span></span><a class="headerlink" href="https://docs.project.com/configuration.html#confval-config1" title="Permalink to this definition">¶</a></dt>\n<dd><p>Description: This the description for config1</p>\n<p>Default: <code class="docutils literal notranslate"><span class="pre">\'Default</span> <span class="pre">value</span> <span class="pre">for</span> <span class="pre">config\'</span></code></p>\n<p>Type: bool</p>\n</dd></dl>'
assert response.json() == {
'url': 'https://docs.project.com/configuration.html#confval-config1',
'fragment': 'confval-config1',
'content': content,
'external': True,
}
@pytest.mark.sphinx('html', srcdir=srcdir, freshenv=True)
def test_citation_identifier_doctool_sphinx(self, app, client, requests_mock):
app.build()
path = app.outdir / 'bibtex-cite.html'
assert path.exists() is True
content = open(path).read()
requests_mock.get('https://docs.project.com/bibtex-cite.html', text=content)
# Calling the API without doctool
params = {
'url': 'https://docs.project.com/bibtex-cite.html#id4',
}
response = client.get(self.api_url, params)
assert response.status_code == 200
assert response.json() == {
'url': 'https://docs.project.com/bibtex-cite.html#id4',
'fragment': 'id4',
'content': '<dt class="label" id="id4"><span class="brackets">Nel87</span><span class="fn-backref">(<a href="https://docs.project.com/bibtex-cite.html#id1">1</a>,<a href="https://docs.project.com/bibtex-cite.html#id2">2</a>)</span></dt>',
'external': True,
}
# Calling the API with doctool
params = {
'url': 'https://docs.project.com/bibtex-cite.html#id4',
'doctool': 'sphinx',
}
response = client.get(self.api_url, params)
assert response.status_code == 200
assert response.json() == {
'url': 'https://docs.project.com/bibtex-cite.html#id4',
'fragment': 'id4',
'content': '<dl class="citation">\n<dt class="label" id="id4"><span class="brackets">Nel87</span><span class="fn-backref">(<a href="https://docs.project.com/bibtex-cite.html#id1">1</a>,<a href="https://docs.project.com/bibtex-cite.html#id2">2</a>)</span></dt>\n<dd><p><NAME>son. <em>Radically Elementary Probability Theory</em>. Princeton University Press, 1987.</p>\n</dd>\n</dl>',
'external': True,
}
@pytest.mark.sphinx('html', srcdir=srcdir, freshenv=True)
def test_glossary_identifier_doctool_sphinx(self, app, client, requests_mock):
app.build()
path = app.outdir / 'glossary.html'
assert path.exists() is True
content = open(path).read()
requests_mock.get('https://docs.project.com/glossary.html', text=content)
# Note there are differences on the case of the fragment
if sphinx.version_info >= (3, 0, 0):
fragment = 'term-Read-the-Docs'
else:
fragment = 'term-read-the-docs'
# Calling the API without doctool
url = f'https://docs.project.com/glossary.html#{fragment}'
params = {
'url': url,
}
response = client.get(self.api_url, params)
assert response.status_code == 200
if sphinx.version_info >= (3, 5, 0):
content = f'<dt id="{fragment}">Read the Docs<a class="headerlink" href="https://docs.project.com/glossary.html#{fragment}" title="Permalink to this term">¶</a></dt>'
else:
content = f'<dt id="{fragment}">Read the Docs</dt>'
assert response.json() == {
'url': url,
'fragment': fragment,
'content': content,
'external': True,
}
# Calling the API with doctool
params = {
'url': url,
'doctool': 'sphinx',
}
response = client.get(self.api_url, params)
assert response.status_code == 200
if sphinx.version_info >= (3, 5, 0):
content = f'<dl class="glossary simple">\n<dt id="{fragment}">Read the Docs<a class="headerlink" href="https://docs.project.com/glossary.html#{fragment}" title="Permalink to this term">¶</a></dt><dd><p>Best company ever.</p>\n</dd>\n</dl>'
else:
content = f'<dl class="glossary simple">\n<dt id="{fragment}">Read the Docs</dt><dd><p>Best company ever.</p>\n</dd>\n</dl>'
assert response.json() == {
'url': url,
'content': content,
'fragment': fragment,
'external': True,
}
|
11469497
|
import configparser
import requests
import os.path
import os
import sys
import time
import json
import click
from ..utils.misc_utils import walk_up, printDebug
USER_DIR = os.path.expanduser("~/.dimensions/")
# for API credentials
USER_CONFIG_FILE_NAME = "dsl.ini"
USER_CONFIG_FILE_PATH = os.path.expanduser(USER_DIR + USER_CONFIG_FILE_NAME)
USER_HISTORY_FILE = os.path.expanduser(USER_DIR + "history.txt")
#
USER_EXPORTS_DIR = os.path.expanduser("~/dimcli-exports/")
# for other settings
USER_SETTINGS_FILE_NAME = "settings"
USER_SETTINGS_FILE_PATH = os.path.expanduser(USER_DIR + USER_SETTINGS_FILE_NAME)
###
#
# class that encapsulates the login/token logic for the API
#
#
###
class APISession():
"""Dimensions API Authentication logic
"""
def __init__(self, verbose=True):
"""Initialises a Dsl Authentication Session object.
Normally it is not needed to instantiate directly this object, instead it's
quicker to use the `dimcli.login()` utility method, which create a global
authentication session.
In some situations though, you'd want to query two separate Dimensions instances
in parallel. To that end, pass an APISession instance to the Dsl() constructor
using the `auth_session` parameter, IE:
```
from dimcli.core.auth import APISession
mysession1 = APISession()
mysession1.login(instance="key-test")
d1 = Dsl(auth_session=mysession1)
d1.query("search publications return research_orgs")
mysession2 = APISession()
mysession2.login(instance="live")
d2 = Dsl(auth_session=mysession2)
d2.query("search publications return research_orgs")
```
"""
self.instance = None
self.url = None
self.url_auth = None
self.url_query = None
self.username = None
self.password = <PASSWORD>
self.key = None
self.token = None
# self._verbose = verbose
def login(self,
instance="",
username="",
password="",
key="",
endpoint="",
verbose=True):
"""Login into Dimensions API endpoint and get a query token.
INSTANCE => used to reference the local configuration file
ENDPOINT => explicity set the endpoint URL to use. If not provided, it will be inferred from the instance name.
"""
if False:
# FOR INTERNAL QA ONLY
click.secho(f"""instance="{instance}",
username="{username}",
password="{password}",
key="{key}",
endpoint="{endpoint}")""", fg="red")
if (username or password) and not (username and password):
raise Exception("Authentication error: you provided only one value for username and password. Both are required.")
if instance and endpoint:
if verbose: printDebug(f"Warning: you provided both instance and endpoint values. Endpoint will be ignored." , "comment")
endpoint = ""
if (key or (username and password)):
# explicit credentials, no need to look for config file
if not endpoint:
if verbose: printDebug("Using default endpoint: 'https://app.dimensions.ai'", "comment")
endpoint="https://app.dimensions.ai"
instance = ""
else:
# no key, no usr/pwd => use the config file
fpath = get_init_file()
if not instance and not endpoint:
if verbose: printDebug("Searching config file credentials for default 'live' instance..", "comment")
instance="live"
config_section = read_init_file(fpath, instance_name=instance)
elif endpoint:
if verbose: printDebug(f"Searching config file credentials for '{endpoint}' endpoint..", "comment")
config_section = read_init_file(fpath, endpoint=endpoint)
else:
if verbose: printDebug(f"Searching config file credentials for '{instance}' instance..", "comment")
config_section = read_init_file(fpath, instance_name=instance)
endpoint = config_section['url'] # OVERRIDE URL USING LOCAL CONFIG
try:
username = config_section['login']
password = config_section['password']
except:
username, password = "", ""
try:
key = config_section['key']
except:
key = ""
URL_AUTH, URL_QUERY = self._get_endpoint_urls(endpoint)
# printDebug(URL_AUTH, URL_QUERY )
login_data = {'username': username, 'password': password, 'key': key}
# POST AUTH REQUEST
response = requests.post(URL_AUTH, json=login_data)
response.raise_for_status()
token = response.json()['token']
self.instance = instance
self.url = URL_QUERY
self.username = username
self.password = password
self.key = key
self.token = token
def _get_endpoint_urls(self, user_url):
"""Infer the proper API endpoints URLs from the (possibly incomplete) URL the use is sending.
CASE 1
User provides a domain URL eg https://app.dimensions.ai.
=> Then the Query URL defaults to `/api/dsl`
CASE 1
User provides a full query URL eg https://app.dimensions.ai/api/dsl/v2.
=> Then no action is needed
Query Endpoints:
* /api/dsl/v1
* /api/dsl/v2
* /api/dsl
* /api/dsl.json
Auth endpoint (always inferred)
* /api/auth.json
https://docs.dimensions.ai/dsl/2.0.0/api.html#endpoints
NOTE
We never try to validate URLs provided by users.
"""
url_auth, url_query = None, None
if "/api/" in user_url:
# savy user passing the full QUERY URL
domain = user_url.split("/api/")[0]
url_auth = domain + "/api/auth.json"
url_query = user_url
else:
domain = user_url
url_auth = domain + "/api/auth.json"
url_query = domain + "/api/dsl"
self.url_auth, self.url_query = url_auth, url_query
return url_auth, url_query
def refresh_login(self):
"""
Method used to login again if the TOKEN has expired - using previously entered credentials
"""
self.login( instance=self.instance,
username=self.username,
password=<PASSWORD>.password,
key=self.key,
endpoint=self.url,
verbose=False
)
def reset_login(self):
""
self.instance = None
self.url = None
self.username = None
self.password = None
self.key = None
self.token = None
def is_logged_in(self):
if self.token:
return True
else:
printDebug("Warning: you are not logged in. Please use `dimcli.login()` before querying.")
return False
###
#
# global connection object and helper methods
#
#
###
CONNECTION = APISession()
def do_global_login(instance="", username="", password="", key="", url=""):
"Login into DSL and set the connection object with token"
global CONNECTION
CONNECTION.login(instance, username, password, key, url)
def get_global_connection():
global CONNECTION
return CONNECTION
def is_logged_in_globally():
"used only internally for magic commands and function wrappers, which always expect a global login"
global CONNECTION
if CONNECTION.token:
return True
else:
printDebug("Warning: you are not logged in. Please use `dimcli.login(username, password)` before querying.")
return False
###
#
# INIT file helpers
#
#
###
def get_init_file():
"""
LOGIC
a) if dsl.ini credentials file in WD that overrides everything
b) try user level credentials ("~/.dimensions/")
c) try navigating up directory tree for dsl.ini
=> a) and c) are useful for jup notebooks without system wide installation
=> b) is the usual case for CLI
===================
BACKGROUND
Authentication details can be stored in a `dsl.ini` file
File contents need to have this structure:
[instance.live]
url=https://app.dimensions.ai
login=your_username
password=<PASSWORD>
key=your_key
The section name has to start with "instance."
Keyword "live" is the default name for most installations.
If you have access to other Dimensions APIs just add an entry for them with a suitable name.
===================
"""
if os.path.exists(os.getcwd() + "/" + USER_CONFIG_FILE_NAME):
return os.getcwd() + "/" + USER_CONFIG_FILE_NAME
elif os.path.exists(os.path.expanduser(USER_CONFIG_FILE_PATH )):
return os.path.expanduser(USER_CONFIG_FILE_PATH )
else:
for c,d,f in walk_up(os.getcwd()):
if os.path.exists(c + "/" + USER_CONFIG_FILE_NAME):
return c + "/" + USER_CONFIG_FILE_NAME
return None
def read_init_file(fpath, instance_name="", endpoint=""):
"""
parse the credentials file
"""
config = configparser.ConfigParser()
try:
config.read(fpath)
except:
printDebug(f"ERROR: `{USER_CONFIG_FILE_NAME}` credentials file not found." , fg="red")
printDebug("HowTo: https://digital-science.github.io/dimcli/getting-started.html#authentication", fg="red")
sys.exit(0)
# we have a good config file
if instance_name:
try:
section = config['instance.' + instance_name]
except:
printDebug(f"ERROR: Credentials file `{fpath}` does not contain settings for instance: '{instance_name}''", fg="red")
printDebug(f"Available instances are:")
for x in config.sections():
printDebug("'%s'" % x)
printDebug("---\nSee Also: https://digital-science.github.io/dimcli/getting-started.html#authentication")
config.sections()
sys.exit(0)
return section
elif endpoint:
# try all stored configurations
for instance_name in config.sections():
try:
# print(instance_name, config[instance_name]['url'])
if endpoint == config[instance_name]['url']:
return config[instance_name]
except:
pass
printDebug(f"ERROR: Credentials file `{fpath}` does not contain settings for endpoint: '{endpoint}''", fg="red")
sys.exit(0)
###
#
# settings file helper eg gists key etcc
#
#
###
def get_settings_file():
"""Get the global settings file.
This does not include any authentication info, only other settings eg github keys etc..
It'll be extended in the future as new integrations/functionalities become available.
"""
if os.path.exists(os.getcwd() + "/" + USER_SETTINGS_FILE_NAME):
return os.getcwd() + "/" + USER_SETTINGS_FILE_NAME
elif os.path.exists(os.path.expanduser(USER_SETTINGS_FILE_PATH )):
return os.path.expanduser(USER_SETTINGS_FILE_PATH )
else:
for c,d,f in walk_up(os.getcwd()):
if os.path.exists(c + "/" + USER_SETTINGS_FILE_NAME):
return c + "/" + USER_SETTINGS_FILE_NAME
return None
def read_settings_file(fpath, section_name):
"""
parse the settings file for sections like 'gist' key etc..
"""
config = configparser.ConfigParser()
try:
config.read(fpath)
except:
printDebug(f"ERROR: `{USER_SETTINGS_FILE_NAME}` settings file not found." , fg="red")
printDebug("HowTo: https://digital-science.github.io/dimcli/getting-started.html#github-gists-token", fg="red")
sys.exit(0)
# we have a good config file
try:
section = config[section_name]
except:
printDebug(f"ERROR: Settings file `{fpath}` does not contain settings for: '{section_name}''", fg="red")
printDebug("---\nPlease review the file contents, or see https://digital-science.github.io/dimcli/getting-started.html#github-gists-token")
config.sections()
sys.exit(0)
return section
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.