id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
120135
|
import argparse
import itertools
import logging
import os
import time
from types import SimpleNamespace
import falcon
import pandas
import torch
from falcon_cors import CORS
import waitress
import numpy as np
import json
import re
from torch.utils.data import DataLoader
from tqdm import tqdm
from data import Data
from model import BertSupportNetX
from utils import load_torch_model
from tools.utils import convert_to_tokens
MODEL_MAP={
"bert": BertSupportNetX,
"bertxl": BertSupportNetX
}
logging.basicConfig(level=logging.INFO, format='%(asctime)-18s %(message)s')
logger = logging.getLogger()
cors_allow_all = CORS(allow_all_origins=True,
allow_origins_list=['*'],
allow_all_headers=True,
allow_all_methods=True,
allow_credentials_all_origins=True
)
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', default=58081,
help='falcon server port')
parser.add_argument(
'-c', '--config_file', default='config/bert_config-xl.json',
help='model config file')
args = parser.parse_args()
model_config=args.config_file
# def result_to_json(string, tags):
# item = {"string": string, "entities": []}
# entity_name = ""
# entity_start = 0
# idx = 0
# i = -1
# zipped = zip(string, tags)
# listzip = list(zipped)
# last = len(listzip)
# for char, tag in listzip:
# i += 1
# if tag == 3:
# item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":'s'})
# elif tag == 0:
# entity_name += char
# entity_start = idx
# elif tag == 1:
# if (entity_name != "") and (i == last):
# entity_name += char
# item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": 'bms'})
# entity_name = ""
# else:
# entity_name += char
# elif tag == 2: # or i == len(zipped)
# entity_name += char
# item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": 'bms'})
# entity_name = ""
# else:
# entity_name = ""
# entity_start = idx
# idx += 1
# return item
#
class TorchResource:
def __init__(self):
logger.info("...")
# 0. Load config
with open(model_config) as fin:
self.config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# 1. Load data
self.data = Data(vocab_file=os.path.join(self.config.model_path, 'vocab.txt'),
max_seq_len=self.config.max_seq_len,
model_type=self.config.model_type, config=self.config)
# 2. Load model
self.model = MODEL_MAP[self.config.model_type](self.config)
self.model = load_torch_model(
self.model, model_path=os.path.join(self.config.model_path, 'model.bin'))
self.model.to(self.device)
logger.info("###")
def flatten(self, ll):
return list(itertools.chain(*ll))
def cleanall(self, content):
return content.replace(" ", "", 10**10)
def process_context(self, line):
line = line.replace("·", "", 100)
spans = re.split('([,。])', line)
if len(spans) <= 2:
spans = re.split('([,。])', line)
if len(spans) <= 2:
spans = re.split('([;;,。,])', line)
assert len(spans) > 2, spans
# spans = [span for span in spans if len(span)>1]
spans_sep = []
for i in range(len(spans) // 2):
spans_sep.append(spans[2 * i] + spans[2 * i + 1])
assert len(spans_sep) > 0, spans
return [[spans_sep[0], spans_sep]]
def bert_classification(self, content, question):
logger.info('1:{}'.format( content))
conv_dic = {}
conv_dic['_id'] = 0
conv_dic['context'] = self.process_context(content)
conv_dic['question'] = question
conv_dic["answer"] = ""
conv_dic['supporting_facts'] = []
rows = [conv_dic]
filename = "data/{}.json".format(time.time())
with open(filename, 'w', encoding='utf8') as fw:
json.dump(rows, fw, ensure_ascii=False, indent=4)
exam, feats, dataset = self.data.load_file(filename, False)
data_loader = DataLoader(dataset, batch_size=self.config.batch_size)
self.model.eval()
answer_dict = {}
sp_dict = {}
tqdm_obj = tqdm(data_loader, ncols=80)
for step, batch in enumerate(tqdm_obj):
batch = tuple(t.to(self.device) for t in batch)
start_logits, end_logits, type_logits, sp_logits, start_position, end_position = self.model(*batch)
batchsize = batch[0].size(0)
# ids
answer_dict_ = convert_to_tokens(exam, feats, batch[5], start_position.data.cpu().numpy().tolist(),
end_position.data.cpu().numpy().tolist(),
np.argmax(type_logits.data.cpu().numpy(), 1))
answer_dict.update(answer_dict_)
predict_support_np = torch.sigmoid(sp_logits).data.cpu().numpy()
for i in range(predict_support_np.shape[0]):
cur_sp_pred = []
cur_id = batch[5][i].item()
cur_sp_logit_pred = [] # for sp logit output
for j in range(predict_support_np.shape[1]):
if j >= len(exam[cur_id].sent_names):
break
if predict_support_np[i, j] > self.config.sp_threshold:
cur_sp_pred.append(exam[cur_id].sent_names[j])
sp_dict.update({cur_id: cur_sp_pred})
new_answer_dict = {}
for key, value in answer_dict.items():
new_answer_dict[key] = value.replace(" ", "")
prediction = {'answer': new_answer_dict, 'sp': sp_dict}
return {"data": prediction}
def on_get(self, req, resp):
logger.info("...")
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials','true')
content = req.get_param('c', True)
question = req.get_param('q', True)
# clean_content =
#clean_content = self.cleanall(content)
resp.media = self.bert_classification(content, question)
logger.info("###")
def on_post(self, req, resp):
"""Handles POST requests"""
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials', 'true')
resp.set_header("Cache-Control", "no-cache")
data = req.stream.read(req.content_length)
data = data.decode('utf-8')
# regex = re.compile(r'\\(?![/u"])')
# data = regex.sub(r"\\", data)
jsondata = json.loads(data)
# clean_title = shortenlines(jsondata['1'])
# clean_content = cleanall(jsondata['2'])
content = jsondata['context']
question = jsondata['question']
# clean_content = self.cleanall(content)
resp.media = self.bert_classification(content, question)
logger.info("###")
if __name__=="__main__":
api = falcon.API(middleware=[cors_allow_all.middleware])
api.req_options.auto_parse_form_urlencoded = True
api.add_route('/z', TorchResource())
waitress.serve(api, port=args.port, threads=48, url_scheme='http')
|
120163
|
import functools
import hashlib
from flask import jsonify, request, url_for, current_app, make_response, g
from .rate_limit import RateLimit
from .errors import too_many_requests, precondition_failed, not_modified
def json(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
rv = f(*args, **kwargs)
status_or_headers = None
headers = None
if isinstance(rv, tuple):
rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
if isinstance(status_or_headers, (dict, list)):
headers, status_or_headers = status_or_headers, None
if not isinstance(rv, dict):
rv = rv.to_json()
rv = jsonify(rv)
if status_or_headers is not None:
rv.status_code = status_or_headers
if headers is not None:
rv.headers.extend(headers)
return rv
return wrapped
def rate_limit(limit, per, scope_func=lambda: request.remote_addr):
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if current_app.config['USE_RATE_LIMITS']:
key = 'rate-limit/%s/%s/' % (f.__name__, scope_func())
limiter = RateLimit(key, limit, per)
if not limiter.over_limit:
rv = f(*args, **kwargs)
else:
rv = too_many_requests('You have exceeded your request rate')
#rv = make_response(rv)
g.headers = {
'X-RateLimit-Remaining': str(limiter.remaining),
'X-RateLimit-Limit': str(limiter.limit),
'X-RateLimit-Reset': str(limiter.reset)
}
return rv
else:
return f(*args, **kwargs)
return wrapped
return decorator
def paginate(max_per_page=10):
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', max_per_page,
type=int), max_per_page)
query = f(*args, **kwargs)
p = query.paginate(page, per_page)
pages = {'page': page, 'per_page': per_page,
'total': p.total, 'pages': p.pages}
if p.has_prev:
pages['prev'] = url_for(request.endpoint, page=p.prev_num,
per_page=per_page,
_external=True, **kwargs)
else:
pages['prev'] = None
if p.has_next:
pages['next'] = url_for(request.endpoint, page=p.next_num,
per_page=per_page,
_external=True, **kwargs)
else:
pages['next'] = None
pages['first'] = url_for(request.endpoint, page=1,
per_page=per_page, _external=True,
**kwargs)
pages['last'] = url_for(request.endpoint, page=p.pages,
per_page=per_page, _external=True,
**kwargs)
return jsonify({
'urls': [item.get_url() for item in p.items],
'meta': pages
})
return wrapped
return decorator
def cache_control(*directives):
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
rv = f(*args, **kwargs)
rv = make_response(rv)
rv.headers['Cache-Control'] =', '.join(directives)
return rv
return wrapped
return decorator
def no_cache(f):
return cache_control('no-cache', 'no-store', 'max-age=0')(f)
def etag(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
# only for HEAD and GET requests
assert request.method in ['HEAD', 'GET'],\
'@etag is only supported for GET requests'
rv = f(*args, **kwargs)
rv = make_response(rv)
etag = '"' + hashlib.md5(rv.get_data()).hexdigest() + '"'
rv.headers['ETag'] = etag
if_match = request.headers.get('If-Match')
if_none_match = request.headers.get('If-None-Match')
if if_match:
etag_list = [tag.strip() for tag in if_match.split(',')]
if etag not in etag_list and '*' not in etag_list:
rv = precondition_failed()
elif if_none_match:
etag_list = [tag.strip() for tag in if_none_match.split(',')]
if etag in etag_list or '*' in etag_list:
rv = not_modified()
return rv
return wrapped
|
120181
|
from pcraft.PluginsContext import PluginsContext
class PCraftPlugin(PluginsContext):
name = "PrintVariables"
def __init__(self, app, session, plugins_data):
super().__init__(app, session, plugins_data)
def help(self):
helpstr="""
This Plugins prints all the variables
### Example
The usage is trivial, just call it in your flow.
```
printvars:
_plugin: PrintVariables
_next: dnsconnect
```
"""
return helpstr
def run(self, script=None):
self.update_vars_from_script(script)
print("PrintVariables: " + str(self.plugins_data))
return script["_next"], self.plugins_data
|
120190
|
import argparse
from time import time
from xml.dom.minidom import parseString
from block import Block
from grid import Grid
from os.path import join
from pattern import Pattern
from pattern_utils import de_densify, measure_density, pattern_to_svg, shorten_jumps, \
remove_short
from stitch import Stitch
from svgutils import scan_lines, stack_paths, trace_image, sort_paths, overall_bbox, \
get_color, get_stroke_width, make_continuous, write_debug, remove_close_paths, \
path1_is_contained_in_path2, shorter_side, is_concave, draw_fill, posturize, \
make_equidistant, perpendicular, split_subpaths, get_pixel_from_string
from configure import PLOTTING, MINIMUM_STITCH_DISTANCE, OUTPUT_DIRECTORY
from svgwrite import rgb
from svgwrite.shapes import Circle
if PLOTTING:
from scipy.spatial.qhull import Voronoi
import matplotlib.pyplot as plt
else:
plt = None
try:
# potrace is wrapped in a try/except statement because the digitizer might sometimes
# be run on an environment where Ctypes are not allowed
import potrace
from potrace import BezierSegment, CornerSegment
except:
potrace = None
BezierSegment = None
CornerSegment = None
from numpy import argmax, average, ceil
from svgpathtools import svgdoc2paths, Line, Path
from brother import BrotherEmbroideryFile, pattern_to_csv, upload
from configure import MINIMUM_STITCH_LENGTH, MAXIMUM_STITCH, DEBUG
fill_method = "scan" #"grid"#"polygon"#"voronoi
parser = argparse.ArgumentParser(
description='Generate a pes file for brother sewing machines from an svg or png image')
parser.add_argument('--filename', type=str,
help='The filename of the input image.')
parser.add_argument('--fill', dest="fill", action="store_true",
help="Fill the shapes")
class Digitizer(object):
def __init__(self, filename=None, fill=False):
self.fill = fill
# stitches is the stitches that have yet to be added to the pattern
self.stitches = []
self.attributes = []
self.all_paths = []
self.fill_color = None
self.last_color = None
self.last_stitch = None
self.pattern = Pattern()
if not filename:
return
self.filecontents = open(join("workspace", filename), "r").read()
if filename.split(".")[-1] != "svg":
self.image_to_pattern()
else:
self.svg_to_pattern()
def image_to_pattern(self):
self.all_paths, self.attributes = stack_paths(*trace_image(self.filecontents))
self.scale = 2.64583333
self.generate_pattern()
def svg_to_pattern(self):
doc = parseString(self.filecontents)
# make sure the document size is appropriate
root = doc.getElementsByTagName('svg')[0]
root_width = root.attributes.getNamedItem('width')
viewbox = root.getAttribute('viewBox')
if viewbox:
lims = [float(i) for i in viewbox.split(" ")]
width = abs(lims[0] - lims[2])
height = abs(lims[1] - lims[3])
else:
# run through all the coordinates
bbox = overall_bbox(self.all_paths)
width = bbox[1] - bbox[0]
height = bbox[3] - bbox[2]
path_attributes = split_subpaths(*svgdoc2paths(doc))
if self.fill:
self.all_paths, self.attributes = sort_paths(*stack_paths(*path_attributes))
else:
self.all_paths, self.attributes = sort_paths(*path_attributes)
if root_width is not None:
root_width = get_pixel_from_string(root_width.value, width)
size = 4*25.4
# The maximum size is 4 inches - multiplied by 10 for scaling
if root_width:
size = root_width
size *= 10.0
if width > height:
self.scale = size / width
else:
self.scale = size / height
self.generate_pattern()
def add_block(self, clear=True):
if len(self.stitches) == 0:
print("got no stitches in add block!")
return
if self.last_color is not None:
block = Block(stitches=self.stitches, color=self.last_color)
self.pattern.add_block(block)
else:
print("last color was none, not adding the block")
if clear:
self.last_stitch = self.stitches[-1]
self.stitches = []
def generate_pattern(self):
# cut the paths by the paths above
if self.fill:
self.all_paths, self.attributes = stack_paths(self.all_paths, self.attributes)
for k, v in enumerate(self.attributes):
paths = self.all_paths[k]
# first, look for the color from the fill
# if fill is false, change the attributes so that the fill is none but the
# stroke is the fill (if not set)
self.fill_color = get_color(v, "fill")
self.stroke_color = get_color(v, "stroke")
stroke_width = get_stroke_width(v, self.scale)
if not self.fill:
if not self.stroke_color:
self.stroke_color = self.fill_color
stroke_width = stroke_width if stroke_width != MINIMUM_STITCH_LENGTH \
else MINIMUM_STITCH_LENGTH * 3.0
self.fill_color = None
if self.fill_color is None and self.stroke_color is None:
self.fill_color = [0, 0, 0]
# if both the fill color and stroke color are none,
if self.fill_color is not None:
if len(self.pattern.blocks) == 0 and self.fill_color is not None:
self.pattern.add_block(Block([Stitch(["JUMP"], 0, 0)], color=self.fill_color))
self.switch_color(self.fill_color)
if fill_method == "polygon":
full_path = Path(*paths)
if not full_path.iscontinuous():
self.fill_polygon(make_continuous(full_path))
else:
self.fill_polygon(paths)
elif fill_method == "grid":
self.fill_grid(paths)
elif fill_method == "scan":
self.fill_scan(paths)
elif fill_method == "voronoi":
self.fill_voronoi(paths)
self.last_color = self.fill_color
self.add_block()
# then do the stroke
if self.stroke_color is None:
continue
self.switch_color(self.stroke_color)
paths = self.generate_stroke_width(paths, stroke_width)
self.generate_straight_stroke(paths)
self.last_color = self.stroke_color
if len(self.pattern.blocks) == 0 and self.stroke_color is not None:
self.pattern.add_block(
Block([Stitch(["JUMP"], 0, 0)], color=self.stroke_color))
if self.stroke_color:
self.add_block()
if len(self.stitches) > 0:
self.last_color = self.stroke_color
# finally, move the stitches so that it is as close as possible to the next
# location
if len(self.pattern.blocks) > 0 and len(self.pattern.blocks[-1].stitches) > 0:
last_stitch = self.pattern.blocks[-1].stitches[-1]
self.pattern.add_block(
Block(stitches=[Stitch(["END"], last_stitch.x, last_stitch.y)],
color=self.pattern.blocks[-1].color))
def generate_stroke_width(self, paths, stroke_width):
new_paths = []
if stroke_width / MINIMUM_STITCH_DISTANCE <= 1.:
return paths
# how many times can the MINIMUM_STITCH_DISTANCE fit in the stroke width?
# if it is greater 1, duplicate the stitch offset by the minimum stitch
for i in range(0, int(stroke_width / MINIMUM_STITCH_DISTANCE)):
for path in paths:
if i == 0:
new_paths.append(path)
continue
# what is the broad angle of the path? (used to determine the
# perpendicular angle to translate the path by)
num_norm_samples = 10.0
diff = average([path.normal(t / num_norm_samples)
for t in range(int(num_norm_samples))])
diff *= -1 if i % 2 == 0 else 1
diff *= ceil(i / 2.0) * MINIMUM_STITCH_DISTANCE / 2.0
# if i is odd, translate up/left, if even, translate down/right
new_paths.append(path.translated(diff))
return new_paths
def switch_color(self, new_color):
if self.last_color is None or self.last_color == new_color \
or self.last_stitch is None:
return
to = self.last_stitch
block = Block(stitches=[Stitch(["TRIM"], to.x, to.y)],
color=self.last_color)
self.pattern.add_block(block)
block = Block(stitches=[Stitch(["COLOR"], to.x, to.y)],
color=new_color)
self.pattern.add_block(block)
self.stitches = []
def generate_straight_stroke(self, paths):
# sort the paths by the distance to the upper right corner
bbox = overall_bbox(paths)
write_debug("stroke_travel", [(Path(*paths), "none", (0, 0, 0)),
(Circle(center=(bbox[0], bbox[2]), r=1, fill=rgb(255, 0, 0)), "none", "none")])
# discretize the paths
points = []
for i, path in enumerate(paths):
if path.length() == 0:
continue
points.append(path.start*self.scale)
num_segments = ceil(path.length() / MINIMUM_STITCH_LENGTH)
for seg_i in range(int(num_segments + 1)):
points.append(path.point(seg_i / num_segments) * self.scale)
# if the next stitch doesn't start at the end of this stitch, add that one as
# well
end_stitch = path.end * self.scale
if i != len(paths) - 1:
if path.end != paths[i + 1].start:
points.append(end_stitch)
else:
points.append(end_stitch)
if len(points) == 0:
return
# find the point closest to the last stitch
if not self.last_stitch:
last_stitch = points[0]
else:
last_stitch = self.last_stitch.x+self.last_stitch.y*1j
closest = sorted([i for i in range(len(points))], key=lambda dist: abs(points[i]-last_stitch))[0]
points = points[closest:]+points[:closest]
for point in points:
to = Stitch(["STITCH"], point.real, point.imag, color=self.stroke_color)
self.stitches.append(to)
def fill_polygon(self, paths):
rotated = 0
fudge_factor = 0.03
while len(paths) > 2:
if len(paths) < 4:
self.fill_triangle(paths, color="red")
return
shapes = [[Path(*paths), "none", "blue"], [Path(*paths), "none", "green"]]
write_debug("close", shapes)
paths = remove_close_paths(paths)
if len(paths) <= 2:
return
# check whether the next triangle is concave
test_line1 = Line(start=paths[0].start, end=paths[1].end)
test_line1 = Line(start=test_line1.point(fudge_factor),
end=test_line1.point(1 - fudge_factor))
comparison_path = Path(*paths)
if test_line1.length() == 0:
has_intersection = True
else:
has_intersection = len(
[1 for line in paths if len(line.intersect(test_line1)) > 0]) > 0
if not path1_is_contained_in_path2(test_line1,
comparison_path) or has_intersection:
shapes = [[comparison_path, "none", "blue"],
[test_line1, "none", "black"]]
write_debug("anim", shapes)
# rotate the paths
paths = paths[1:] + [paths[0]]
rotated += 1
if rotated >= len(paths):
print("failed to rotate into a concave path -> ",
(test_line1.start.real, test_line1.start.imag),
(test_line1.end.real, test_line1.end.imag),
[(p.start.real, p.start.imag) for p in paths])
return
continue
side = shorter_side(paths)
test_line2 = Line(start=paths[1].start, end=paths[2].end)
test_line2 = Line(start=test_line2.point(fudge_factor),
end=test_line2.point(1 - fudge_factor))
test_line3 = Line(start=paths[-1 + side].end,
end=paths[(3 + side) % len(paths)].start)
test_line3 = Line(start=test_line3.point(fudge_factor),
end=test_line3.point(1 - fudge_factor))
num_intersections = []
for path in comparison_path:
if test_line3.length() == 0:
print("test line 3 is degenerate!")
num_intersections += test_line3.intersect(path)
num_intersections += test_line2.intersect(path)
rect_not_concave = not path1_is_contained_in_path2(test_line2,
comparison_path)
# test for concavity. If concave, fill as triangle
if is_concave(paths) or len(num_intersections) > 0 or rect_not_concave:
self.fill_triangle(paths, color="blue")
shapes = [[Path(*paths), "none", "black"]]
to_remove = []
to_remove.append(paths.pop(0))
to_remove.append(paths.pop(0))
for shape in to_remove:
shapes.append([shape, "none", "blue"])
closing_line = Line(start=paths[-1].end, end=paths[0].start)
shapes.append([closing_line, "none", "green"])
shapes.append([test_line1, "none", "red"])
write_debug("rem", shapes)
else:
# check whether the next triangle is concave
side, side2 = self.fill_trap(paths)
if side:
paths = paths[1:] + [paths[0]]
shapes = [[Path(*paths), "none", "black"]]
to_remove = []
to_remove.append(paths.pop(0))
to_remove.append(paths.pop(0))
to_remove.append(paths.pop(0))
# if the trap was stitched in the vertical (perpendicular to the
# stitches), don't remove that segment
linecolors = ["blue", "purple", "pink"]
for i, shape in enumerate(to_remove):
shapes.append([shape, "none", linecolors[i]])
closing_line = Line(start=paths[-1].end, end=paths[0].start)
shapes.append([closing_line, "none", "green"])
shapes.append([test_line2, "none", "purple"])
write_debug("rem", shapes)
delta = closing_line.length() - (
test_line3.length() / (1.0 - 2.0 * fudge_factor))
if abs(delta) > 1e-14:
print("closing line different than test!", side, test_line3,
closing_line)
rotated = 0
if paths[-1].end != paths[0].start:
# check for intersections
closing_line = Line(start=paths[-1].end, end=paths[0].start)
paths.insert(0, closing_line)
else:
print("removed paths but they connected anyway")
def fill_shape(self, side1, side2, paths, shapes):
if paths[side1].length() == 0:
return
increment = 3 * MINIMUM_STITCH_LENGTH / paths[side1].length()
current_t = 0
# make closed shape
filled_paths = [paths[side1], paths[side2]]
if filled_paths[0].end != filled_paths[1].start:
filled_paths.insert(1, Line(start=filled_paths[0].end,
end=filled_paths[1].start))
if filled_paths[0].start != filled_paths[-1].end:
filled_paths.append(Line(start=filled_paths[-1].end,
end=filled_paths[0].start))
while current_t < 1.0 - increment * 0.5:
point1 = paths[side1].point(current_t)
point2 = paths[side2].point(1 - (current_t + 0.5 * increment))
point3 = paths[side1].point(current_t + increment)
to = Stitch(["STITCH"], point1.real * self.scale,
point1.imag * self.scale,
color=self.fill_color)
self.stitches.append(to)
to = Stitch(["STITCH"], point2.real * self.scale,
point2.imag * self.scale,
color=self.fill_color)
self.stitches.append(to)
current_t += increment
to = Stitch(["STITCH"], point3.real * self.scale,
point3.imag * self.scale,
color=self.fill_color)
self.stitches.append(to)
shapes.append([paths[side1], "none", "orange"])
shapes.append([paths[side2], "none", "red"])
return shapes
def fill_grid(self, paths):
grid = Grid(paths)
draw_fill(grid, paths)
# need to find the next location to stitch to. It needs to zig-zag, so we need to
# keep a record of what direction it was going in
going_east = True
rounds = 1
num_empty = grid.count_empty()
while num_empty > 0:
curr_pos = grid.find_upper_corner()
to = Stitch(["STITCH"], curr_pos.real * self.scale,
curr_pos.imag * self.scale,
color=self.fill_color)
self.stitches.append(to)
blocks_covered = int(MAXIMUM_STITCH / MINIMUM_STITCH_LENGTH)
while grid.grid_available(curr_pos):
for i in range(0, blocks_covered):
sign = 1.0 if going_east else -1.0
test_pos = curr_pos + sign * i * MINIMUM_STITCH_LENGTH
if not grid.grid_available(test_pos):
break
else:
next_pos = test_pos + 1j * MINIMUM_STITCH_LENGTH
going_east = not going_east
to = Stitch(["STITCH"], next_pos.real * self.scale,
next_pos.imag * self.scale,
color=self.fill_color)
self.stitches.append(to)
curr_pos = next_pos
draw_fill(grid, paths)
new_num_empty = grid.count_empty()
if new_num_empty == num_empty:
print("fill was not able to fill any parts of the grid!")
break
else:
num_empty = new_num_empty
rounds += 1
def fill_scan(self, paths):
lines = scan_lines(paths)
self.attributes = [{"stroke": self.fill_color} for i in range(len(lines))]
lines, self.attributes = sort_paths(lines, self.attributes)
if isinstance(lines, list):
if len(lines) == 0:
return
start_point = lines[0].start
else:
start_point = lines.start
to = Stitch(["STITCH"], start_point.real * self.scale,
start_point.imag * self.scale, color=self.fill_color)
self.stitches.append(to)
for line in lines:
to = Stitch(["STITCH"], line.start.real * self.scale,
line.start.imag * self.scale, color=self.fill_color)
self.stitches.append(to)
to = Stitch(["STITCH"], line.end.real * self.scale,
line.end.imag * self.scale, color=self.fill_color)
self.stitches.append(to)
def cross_stitch_to_pattern(self, _image):
# this doesn't work well for images with more than 2-3 colors
max_dimension = max(_image.size)
pixel_ratio = int(max_dimension*MINIMUM_STITCH_LENGTH/(4*25.4))
if pixel_ratio != 0:
_image = _image.resize((_image.size[0]/pixel_ratio, _image.size[1]/pixel_ratio))
pixels = posturize(_image)
paths = []
attrs = []
for color in pixels:
for pixel in pixels[color]:
rgb = "#%02x%02x%02x" % (pixel[2][0], pixel[2][1], pixel[2][2])
x = pixel[0]
y = pixel[1]
attrs.append({"fill": "none", "stroke": rgb})
paths.append(Path(Line(start=x + 1j * y,
end=x + 0.5 * MINIMUM_STITCH_LENGTH + 1j * (y + MINIMUM_STITCH_LENGTH))))
debug_paths = [[path, attrs[i]["fill"], attrs[i]["stroke"]] for i, path in enumerate(paths)]
write_debug("png", debug_paths)
self.all_paths = paths
self.attributes = attrs
self.scale = 1.0
self.generate_pattern()
def fill_voronoi(self, paths):
points = []
for path in paths:
num_stitches = 100.0 * path.length() / MAXIMUM_STITCH
ppoints = [path.point(i / num_stitches) for i in range(int(num_stitches))]
for ppoint in ppoints:
points.append([ppoint.real, ppoint.imag])
points.append([path.end.real, path.end.imag])
vor = Voronoi(points)
vertices = vor.vertices
pxs = [x[0] for x in points]
pys = [-x[1] for x in points]
if PLOTTING:
plt.plot(pxs, pys)
# restrict the points to ones within the shape
vertices = [x for i, x in enumerate(vertices)
if path1_is_contained_in_path2(Line(end=x[0] + x[1] * 1j,
start=x[0] + 0.01 + x[
1] * 1j),
Path(*paths))]
# now sort the vertices. This is close but not quite what is being done in
# sort_paths
new_vertices = []
start_location = points[0]
while len(vertices) > 0:
vertices = sorted(vertices,
key=lambda x: (start_location[0] - x[0]) ** 2
+ (start_location[1] - x[1]) ** 2)
new_vertices.append(vertices.pop(0))
start_location = new_vertices[-1]
vertices = new_vertices
# now smooth out the vertices
vertices = [[[x[0] for x in vertices[i:i + 3]],
[x[1] for x in vertices[i:i + 3]]]
for i in range(0, len(vertices) - 3)]
vertices = [[average(x[0]), average(x[1])] for x in vertices]
# we want each vertice to be about equidistant
vertices = make_equidistant(vertices, MINIMUM_STITCH_LENGTH / 2.0)
xs = [x[0] for x in vertices]
ys = [-x[1] for x in vertices]
if PLOTTING:
plt.plot(xs, ys, 'r-')
stitchx = [vertices[0][0]]
stitchy = [vertices[0][1]]
# make spines
for i in range(len(vertices) - 1):
intersections = perpendicular(vertices[i][0] + vertices[i][1] * 1j,
vertices[i + 1][0] + vertices[i + 1][
1] * 1j,
Path(*paths))
diff = abs(intersections[0] - intersections[1])
if diff > 9:
continue
stitchx.append(intersections[0].real)
stitchy.append(-intersections[0].imag)
stitchx.append(intersections[1].real)
stitchy.append(-intersections[1].imag)
for i in range(len(stitchx)):
to = Stitch(["STITCH"], stitchx[i] * self.scale,
-stitchy[i] * self.scale, color=self.fill_color)
self.stitches.append(to)
if PLOTTING:
plt.plot(stitchx, stitchy, 'g-')
plt.xlim(min(pxs), max(pxs))
plt.ylim(min(pys), max(pys))
# plt.show()
def fill_trap(self, paths, color="gray"):
side = shorter_side(paths)
shapes = [[Path(*paths), "none", "black"],
[Path(*paths[side:side + 3]), color, "none"]]
side2 = side + 2
shapes = self.fill_shape(side, side2, paths, shapes)
write_debug("fill", shapes)
return side, side2
def fill_triangle(self, paths, color="green"):
triangle_sides = [paths[0], paths[1],
Line(start=paths[2].start, end=paths[0].start)]
shapes = [[Path(*paths), "none", "black"],
[Path(*triangle_sides), color, "none"]]
lengths = [p.length() for p in triangle_sides]
side1 = argmax(lengths)
lengths[side1] = 0
side2 = argmax(lengths)
shapes = self.fill_shape(side1, side2, triangle_sides, shapes)
write_debug("fill", shapes)
if __name__ == "__main__":
start = time()
args = parser.parse_args()
filename = args.filename
dig = Digitizer(filename=filename, fill=args.fill)
end = time()
filename += ".fill" if args.fill else ""
print("digitizer time: %s" % (end - start))
# remove previous density files
try:
measure_density(dig.pattern)
except ValueError as e:
pass
pattern = remove_short(dig.pattern)
pattern = de_densify(pattern)
measure_density(pattern)
shorten_jumps(dig.pattern)
pattern_to_csv(pattern, join(OUTPUT_DIRECTORY, filename + ".csv"))
pattern_to_svg(pattern, join(OUTPUT_DIRECTORY, filename + ".svg"))
pes_filename = join(OUTPUT_DIRECTORY, filename + ".pes")
bef = BrotherEmbroideryFile(pes_filename)
bef.write_pattern(pattern)
upload(pes_filename)
|
120197
|
from pyunity import Behaviour, ShowInInspector, RectTransform, Screen, Vector2, Input, CheckBox, Text, SceneManager, GameObject, Canvas, Texture2D, Gui, RectOffset, Logger, Image2D, FontLoader, RGB
import os
class Mover2D(Behaviour):
rectTransform = ShowInInspector(RectTransform)
speed = ShowInInspector(float, 300)
def Start(self):
self.rectTransform.offset.Move(Screen.size / 2)
def Update(self, dt):
movement = Vector2(Input.GetAxis("Horizontal"), -
Input.GetAxis("Vertical"))
self.rectTransform.offset.Move(movement * dt * self.speed)
self.rectTransform.rotation += 270 * dt
class FPSTracker(Behaviour):
text = ShowInInspector(Text)
def Start(self):
self.a = 0
def Update(self, dt):
self.a += dt
if self.a > 0.05:
self.text.text = str(1 / dt)
self.a = 0
class CheckboxTracker(Behaviour):
check = ShowInInspector(CheckBox)
text = ShowInInspector(Text)
def Update(self, dt):
self.text.text = "On" if self.check.checked else "Off"
def main():
scene = SceneManager.AddScene("Scene")
canvas = GameObject("Canvas")
canvas.AddComponent(Canvas)
scene.Add(canvas)
imgObject = GameObject("Image", canvas)
rectTransform = imgObject.AddComponent(RectTransform)
rectTransform.offset = RectOffset.Rectangle(100)
imgObject.AddComponent(Mover2D).rectTransform = rectTransform
img = imgObject.AddComponent(Image2D)
img.depth = -0.1
img.texture = Texture2D(os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), "example8", "logo.png"))
scene.Add(imgObject)
rect, button, text = Gui.MakeButton(
"Button", scene, "Click me", FontLoader.LoadFont("Consolas", 20))
rect.transform.ReparentTo(canvas.transform)
rect.offset = RectOffset(Vector2(40, 25), Vector2(190, 50))
button.callback = lambda: Logger.Log("Clicked")
rect, checkbox = Gui.MakeCheckBox("Checkbox", scene)
rect.transform.ReparentTo(canvas.transform)
rect.offset = RectOffset(Vector2(300, 50), Vector2(325, 75))
label = GameObject("Label")
text = label.AddComponent(Text)
text.text = "Off"
text.color = RGB(0, 0, 0)
label.AddComponent(RectTransform).offset = RectOffset(
Vector2(330, 50), Vector2(425, 75))
label.transform.ReparentTo(canvas.transform)
scene.Add(label)
tracker = rect.AddComponent(CheckboxTracker)
tracker.text = text
tracker.check = checkbox
t = GameObject("Text", canvas)
rect = t.AddComponent(RectTransform)
rect.anchors.SetPoint(Vector2(1, 0))
rect.offset.min = Vector2(-150, 25)
text = t.AddComponent(Text)
text.text = "60"
text.color = RGB(0, 0, 0)
t.AddComponent(FPSTracker).text = text
scene.Add(t)
SceneManager.LoadScene(scene)
if __name__ == "__main__":
main()
|
120213
|
import unittest
from FakeSocket import FakeSocket
import tHome as T
#===========================================================================
#===========================================================================
class TestAcTotalPower ( T.util.test.Case ) :
def test_acTotalPower( self ):
reply = """
53 4D 41 00 00 04 02 A0 00 00
00 01 00 42 00 10 60 65 10 90
7D 00 AB 94 40 3B 00 A0 F7 00
E0 27 06 72 00 00 00 00 00 00
12 80 01 02 00 51 00 00 00 00
00 00 00 00 01 3F 26 40 86 22
AF 53 6A 0F 00 00 6A 0F 00 00
6A 0F 00 00 6A 0F 00 00 01 00
00 00 00 00 00 00
"""
l = T.sma.Link( "fake", connect=False )
try:
l.socket = FakeSocket( T.util.hex.toBytes( reply ) )
o1 = l.acTotalPower()
l.decode = False
buf, decoder = l.acTotalPower()
o2 = decoder( buf )
finally:
l.socket = None
right = T.util.Data(
acPower = 3946.0,
)
print o1
for k in right.keys():
r = right[k]
self.eq( getattr( o1, k ), r, k )
self.eq( getattr( o2, k ), r, k )
#===========================================================================
|
120237
|
import os,imaplib,email,subprocess,time
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import decode_header
from ansi2html import Ansi2HTMLConverter
from flask import Flask
app = Flask(__name__)
mail = ''
pas = ''
number = ''
imap_server = ''
def get_text(mail,pas,number,imap_host):
imap = imaplib.IMAP4_SSL(imap_host)
imap.login(mail, pas)
imap.select('Inbox')
result,data = imap.uid('search',None,"ALL")
inbox_item = data[0].split()
most_recent = inbox_item[-1]
result2,email_data = imap.uid('fetch',most_recent,'(RFC822)')
raw_email = email_data[0][1].decode("utf-8")
b = email.message_from_string(raw_email,policy = email.policy.default)
mail_bytes = []
if b['From'] == number and b.is_multipart() == True:
print('You Recieved a message from:' + b['From'])
for p in b.get_payload():
mail_bytes.append(p.get_payload(decode=True))
return mail_bytes[1].decode('utf-8')
elif b['From'] == number:
return b.get_payload(decode=True).decode('utf-8').strip()
else:
return "No command from email yet try refreshing the page"
def command_to_html(cmd):
print(str(cmd))
command = os.popen(cmd)
conv = Ansi2HTMLConverter()
ansi = "".join(command.read())
html = conv.convert(ansi)
return html
@app.route('/')
def index():
time.sleep(10)
command = get_text(mail,pas,number,imap_server)
return f"You ran {command}\n\n {command_to_html(command)}"
if __name__ == '__main__':
app.run()
|
120271
|
from rest_framework import viewsets
from rest_framework.permissions import AllowAny
from mliyweb.api.v2.serializers import InstanceSerializer
from mliyweb.models import Instance
class InstanceViewSet(viewsets.ModelViewSet):
queryset = Instance.objects.all().exclude(state__iexact='terminated')
serializer_class = InstanceSerializer
permission_classes = [AllowAny]
|
120294
|
import datetime
import json
import sys
from caresjpsutil import PythonLogger
from pyproj import Proj, transform
import admsTest
from admsAplWriterShip import admsAplWriter
from admsInputDataRetrieverChimney import admsInputDataRetriever
from config import Constants
from adms_apl_builder import *
pythonLogger = PythonLogger('admsTest.py')
def get_input(*args):
sourceCRS = Proj(init='epsg:4326')
targetCRS = Proj(init=args[5][:4].lower() + args[5][4:])
bdn_data = json.loads(args[1].replace("'", '"'))
coor_data = str(args[2]).replace("'", '"')
ships = json.loads(args[3])
working_dir = str(args[4])
coord_sys = args[5][5:]
precipitation = float(str(args[6]))
chimney_iri = str(args[7])
BDN = admsTest.get_bdn(bdn_data)
coords = admsTest.get_coordinates(coor_data)
pollutants = [Constants.POL_CO2, Constants.POL_CO, Constants.POL_NO2, Constants.POL_HC, Constants.POL_NOX,
Constants.POL_PART_001, Constants.POL_PART_SO2, Constants.POL_PART_O3]
ship_coordinates_list = []
chimney_iri_list = []
for ship in ships:
x_coordinate_value = float(ship['lon'])
y_coordinate_value = float(ship['lat'])
ship_coordinates_list.append(list(transform(sourceCRS, targetCRS, x_coordinate_value, y_coordinate_value)))
chimney_iri_list.append(chimney_iri)
test = admsInputDataRetriever(chimney_iri_list, Constants.BLD_TOPNODE, coords, pollutants, 2,
Constants.BLD_LIMIT,
False, BDN, targetCRS)
result = test.get()
pythonLogger.postInfoToLogServer('calling admsAplWriter ...')
result['Bdn'] = BDN
result['CoordiSys'] = coord_sys
latitudemid = (float(coords[Constants.KEY_MIN_Y]) + float(coords[Constants.KEY_MAX_Y])) / 2
longitudemid = (float(coords[Constants.KEY_MIN_X]) + float(coords[Constants.KEY_MAX_X])) / 2
xmid, ymid = transform(targetCRS, sourceCRS, longitudemid, latitudemid)
result['Met'] = working_dir + '/test.met'
result['Lat'] = ymid
result['Bkg'] = working_dir + '/testbackgrnd.bgd'
if "2326" in args[5][5:]:
result['terrindicator'] = "1"
else:
result['terrindicator'] = "0"
result['chemindicator'] = "1"
result['wetindicator'] = "1"
now = datetime.datetime.now()
hournow = now.hour + 1
if not (6 <= hournow <= 18):
result['night'] = "1"
result['dirnight'] = "C:\JPS_DATA\working_dir\JPS\ADMS\chemistrynight.AAI"
else:
result['night'] = "0"
result['dirnight'] = ""
annualprecipitation = precipitation * 365 * 24
if annualprecipitation < 103:
so2washout = 0.000001 / 500 * annualprecipitation
else:
so2washout = 0.0000019 + annualprecipitation * 0.0000000008
if precipitation < 0.5:
pm10washout = 0.0016
elif precipitation > 4:
pm10washout = 0.0072
else:
pm10washout = 0.00363
result['so2washout'] = so2washout
result['pm10washout'] = pm10washout
for idx in range(len(ship_coordinates_list)):
result['Src'][idx].setCoordinates(ship_coordinates_list[idx])
result['Src'][idx].SrcName = "Chimney-{0}".format(idx + 1)
return result, working_dir
def save_apl(*args):
writer = admsAplWriter(get_input(args) + Constants.FILE_NAME_APL)
writer.write()
def main(*args):
try:
builder = AdmsAplShipBuilder()
director = AplDirector()
director.set_builder(builder)
apl = director.get_apl()
apl.specification()
save_apl(args)
except Exception as e:
pythonLogger.postErrorToLogServer(e)
if __name__ == "__main__":
main(sys.argv)
|
120309
|
import unittest
from dojo import main
class DojoTest(unittest.TestCase):
def test_zero(self):
primes_list = list(primes(0))
self.assertListEqual(primes_list, [])
def test_one(self):
primes_list = list(primes(3))
self.assertListEqual(primes_list, [2])
def test_two(self):
primes_list = list(primes(12))
self.assertListEqual(primes_list, [2,3,5,7,11])
if __name__ == '__main__':
unittest.main()
|
120339
|
import os
import re
re_class = re.compile((
r'class\s+(\w+)(?:\s*<[\w\.,\s]+>)?'
r'(?:\s+implements\s+(?:[\w\.]+)(?:\s*<[\w\.,\s]+>)?)*'
r'(?:\s+extends\s+([\w\.]+)(?:\s*<[\w\.,\s]+>)?)?'
r'(?:\s+implements\s+(?:[\w\.]+)(?:\s*<[\w\.,\s]+>)?)*'
), re.M)
re_comments = re.compile(
r'(//[^\n\r]*?[\n\r]|/\*(.*?)\*/)', re.MULTILINE | re.DOTALL)
re_import = re.compile(r'import\s+([\w\.*]+)[^;]*;', re.MULTILINE)
re_package = re.compile(r'package\s*([a-z0-9.]*);', re.I | re.M)
re_type_decl = re.compile(
r'(?:abstract|class|interface|enum|typedef)\s+(\w+)', re.M)
def find_class_declarations(src):
return [mo for mo in re_class.finditer(src)]
def find_comment_regions(src):
regions = []
for mo in re_comments.finditer(src):
regions.append((mo.start(0), mo.end(0)))
return regions
def find_field_declaration(src, field_name, type_name=None):
mo = re.search(
r'((?:override|static|macro|inline|public|private|#if.*?#end)\s+)*'
r'(?:(?:var|function))\s+%s' % field_name,
src)
if mo:
return mo.group(0)
return None
def find_type_path(type_name, type_map, imported_type_map, package_path):
if '.' in type_name:
return type_name
if type_name in type_map:
package = type_map[type_name]
if is_string(package):
return join_type(package, type_name)
else:
for p in package:
if p == '':
continue
for imp in imported_type_map:
if imp == '*':
continue
print(p, imp, imported_type_map[imp])
if p == imported_type_map[imp].rpartition('.')[0]:
return join_type(p, type_name)
if '*' in imported_type_map:
for imp in imported_type_map['*']:
imp_pk = imp.rpartition('.')[0]
for p in package:
if p == imp_pk:
return join_type(p, type_name)
if package_path in package:
return join_type(package_path, type_name)
if '' in package:
return type_name
return None
def find_line_positions(src):
lines = src.split('\n')
pos = 0
positions = []
for line in lines:
pos += len(line) + 1
positions.append(pos)
return positions
def find_module_filepath(type_name, classpaths):
rel_module_filepath = to_module_filepath(type_name)
for cp in classpaths:
if not cp:
continue
module_filepath = os.path.join(cp, rel_module_filepath)
if os.path.isfile(module_filepath):
return module_filepath
return None
def get_package(path):
parts = path.split('.')
if parts and not is_type(parts[-1]):
parts.pop()
if len(parts) > 1 and is_type(parts[-1]) and is_type(parts[-2]):
parts.pop()
if parts and is_type(parts[-1]):
parts.pop()
return '.'.join(parts)
def get_parent_path(path):
return path.rpartition('.')[0]
def has_module_in_path(type_path):
parts = type_path.split('.')
return len(parts) > 1 and is_type(parts[-1]) and is_type(parts[-2])
def is_imported(type_paths, type_map, imported_type_map, all=True):
for type_path in type_paths:
type_pk, _, type_name = type_path.rpartition('.')
if has_module_in_path(type_path):
type_paths.append(type_path.rpartition('.')[0])
if type_name in imported_type_map and \
imported_type_map[type_name] == type_path:
if not all:
return True
continue
tp_is_imported = False
if '*' in imported_type_map:
package = type_map[type_name]
if is_string(package):
tp_is_imported = package in imported_type_map['*']
else:
for imp in imported_type_map['*']:
imp_pk = imp.rpartition('.')[0]
for p in package:
if p == type_pk and p == imp_pk:
tp_is_imported = True
break
if tp_is_imported:
break
if not tp_is_imported and all:
return False
if tp_is_imported and not all:
return True
return all
def is_in_package(type_path, package):
type_package = get_package(type_path)
return type_package == package
def is_string(value):
val = False
try:
val = isinstance(value, basestring)
except:
val = isinstance(value, str)
return val
def is_type(type_name, type_map=None):
c = type_name[0]
result = c != '_' and c.upper() == c
if result and type_map:
result = type_name in type_map
return result
def join_type(package, type_name):
if package:
type_name = package + '.' + type_name
return type_name
def parse_declared_type_names(src, as_dict):
lst = None if as_dict else []
dct = {} if as_dict else None
for mo in re_type_decl.finditer(src):
if as_dict:
dct[mo.group(1)] = True
else:
lst.append(mo.group(1))
return dct if as_dict else lst
def parse_imports(src, as_dict=False):
lst = None if as_dict else []
dct = {} if as_dict else None
for mo in re_import.finditer(src):
imp_path = mo.group(1)
if as_dict:
imp_name = imp_path.rpartition('.')[2]
if imp_name == '*':
if imp_name in dct:
dct[imp_name].append(imp_path)
else:
dct[imp_name] = [imp_path]
else:
dct[imp_name] = imp_path
else:
lst.append(imp_path)
return dct if as_dict else lst
def parse_package(src):
mo = re_package.search(src)
if mo:
return mo.group(1)
return ''
def remove_comments(text):
return re_comments.sub('', text)
def to_module_filepath(type_path):
if has_module_in_path(type_path):
type_path = type_path.rpartition('.')[0]
path = os.sep.join(type_path.split('.'))
return '%s.hx' % path
|
120377
|
import os
import shutil
import json
root_dir = os.pardir
libmem_dir = f"{root_dir}{os.sep}libmem"
project_dir = os.curdir
project_src_dir = f"{project_dir}{os.sep}src/libmem-py"
clean_script = "clean.py"
print(f"[+] Creating '{clean_script}'...")
keep_dirs = []
keep_files = []
for (path, dirs, files) in os.walk(os.curdir):
keep_dirs.append(path)
keep_files.extend([f"{path}{os.sep}{f}" for f in files])
json_dict = {
"dirs" : keep_dirs,
"files" : keep_files
}
json_data = json.dumps(json_dict)
with open("tree.json", "w") as tree_file:
tree_file.write(json_data)
tree_file.close()
print(f"[-] Creation complete")
print("[+] Configuring files...")
project_files = {
# (src_dir : dst_dir) [ files ]
(root_dir, project_dir) : [
"README.md",
"LICENSE"
],
(libmem_dir, project_src_dir) : [
"libmem.h",
"libmem.c"
]
}
for i in project_files:
src_dir = i[0]
dst_dir = i[1]
files = project_files[i]
print(f"[*] Source Directory: {src_dir}")
print(f"[*] Destination Directory: {dst_dir}")
print(f"[*] Files: {files}")
for f in files:
shutil.copy(f"{src_dir}{os.sep}{f}", dst_dir)
print("====================")
print("[-] Configuration complete")
|
120398
|
import pytest
from dbt.tests.util import run_dbt
models_get__any_model_sql = """
-- models/any_model.sql
select {{ config.get('made_up_nonexistent_key', 'default_value') }} as col_value
"""
class TestConfigGetDefault:
@pytest.fixture(scope="class")
def models(self):
return {"any_model.sql": models_get__any_model_sql}
def test_config_with_get_default(
self,
project,
):
# This test runs a model with a config.get(key, default)
# The default value is 'default_value' and causes an error
results = run_dbt(["run"], expect_pass=False)
assert len(results) == 1
assert str(results[0].status) == "error"
assert 'column "default_value" does not exist' in results[0].message
|
120412
|
import util
def test_is_gpu_available():
# for i in range(4):
if(util.tf.is_gpu_available()):
print "GPU is available, %s CUDA installed"%('with' if util.tf.is_gpu_available(True) else 'without');
def test_get_available_gpus():
devices = util.tf.get_available_gpus();
for d in devices:
print d
if util.mod.is_main(__name__):
test_is_gpu_available()
test_get_available_gpus()
|
120413
|
from hwt.doc_markers import internal
from hwt.hdl.statements.assignmentContainer import HdlAssignmentContainer
from hwt.hdl.statements.codeBlockContainer import HdlStmCodeBlockContainer
from hwt.hdl.statements.statement import HdlStatement
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
@internal
def getMaxStmIdForStm(stm):
"""
Get maximum _instId from all assignments in statement,
used for sorting of processes in architecture
"""
maxId = 0
if isinstance(stm, HdlAssignmentContainer):
return stm._instId
else:
for _stm in stm._iter_stms():
maxId = max(maxId, getMaxStmIdForStm(_stm))
return maxId
def RtlSignal_sort_key(s: RtlSignalBase):
return (s.name, s._instId)
def HdlStatement_sort_key(stm: HdlStatement):
if isinstance(stm, HdlStmCodeBlockContainer) and stm.name is not None:
return (stm.name, getMaxStmIdForStm(stm))
else:
return ("", getMaxStmIdForStm(stm))
|
120415
|
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, List, Optional, Set, Union, overload
class LLVMType(metaclass=ABCMeta):
@abstractmethod
def to_json(self) -> Any: pass
class LLVMIntType(LLVMType):
def __init__(self, width : int) -> None:
self.width = width
def to_json(self) -> Any:
return {'type': 'primitive type', 'primitive': 'integer', 'size': self.width}
class LLVMArrayType(LLVMType):
def __init__(self, elemtype : 'LLVMType', size : int) -> None:
self.size = size
self.elemtype = elemtype
def to_json(self) -> Any:
return { 'type': 'array',
'element type': self.elemtype.to_json(),
'size': self.size }
class LLVMPointerType(LLVMType):
def __init__(self, points_to : 'LLVMType') -> None:
self.points_to = points_to
def to_json(self) -> Any:
return {'type': 'pointer', 'to type': self.points_to.to_json()}
class LLVMAliasType(LLVMType):
def __init__(self, name : str) -> None:
self.name = name
def to_json(self) -> Any:
return {'type': 'type alias',
'alias of': self.name}
class LLVMStructType(LLVMType):
def __init__(self, field_types : List[LLVMType]) -> None:
self.field_types = field_types
def to_json(self) -> Any:
return {'type': 'struct',
'fields': [fld_ty.to_json() for fld_ty in self.field_types]}
class LLVMPackedStructType(LLVMType):
def __init__(self, field_types : List[LLVMType]) -> None:
self.field_types = field_types
def to_json(self) -> Any:
return {'type': 'packed struct',
'fields': [fld_ty.to_json() for fld_ty in self.field_types]}
|
120429
|
from struct import unpack
from math import ceil, floor
def unsigned_int(_bytes, pointer):
return unpack('I', _bytes[pointer:pointer + 4])[0]
def unsigned_char(_bytes, pointer):
return unpack('B', _bytes[pointer:pointer + 1])[0]
def float_(_bytes, pointer):
return unpack('f', _bytes[pointer:pointer + 4])[0]
def bin32(num):
return f'{bin(num)[2:]:>32}'.replace(' ', '0')
def bin16(num):
return f'{bin(num)[2:]:>16}'.replace(' ', '0')
class Ref(object):
pass
def entry(file):
buffer = bytearray(0x100000)
with open(file, 'rb') as f:
_buffer = f.read()
flength = len(_buffer)
buffer[:flength] = _buffer
return buffer
# 过滤按住的连续帧为按下帧
# 只用于shift z x
# 妖妖梦 永夜抄 花映冢
def filter_constant_frame(frame_list):
result_frame_list=[]
for i, frame in enumerate(frame_list):
if i==0 or (frame!=frame_list[i-1]+1):
result_frame_list.append(frame)
return result_frame_list
# 根据长度获取正确的帧数
def true_frame(llength):
frame = floor(llength / (6 + 1/30))
if frame * 6 + ceil(frame / 30) == llength:
return frame
# 暴搜,,,
for i in range(frame - 10, frame + 10):
if i * 6 + ceil(i / 30) == llength:
return i
raise Exception("Can't correct the frame length")
def correct_true_frame(llength):
try:
return true_frame(llength)
except Exception:
# 一直加65536,直到能够获取正确的帧数
return correct_true_frame(llength + 65536)
|
120430
|
a = [int(x) for x in input().split()]
time = None
# a[0] initial hour
# a[1] initial min
# a[2] final hour
# a[3] final min
start = 60 * a[0] + a[1]
finish = 60 * a[2] + a[3]
if finish <= start:
finish += 1440 # 24 * 60
time = finish - start
print(f"O JOGO DUROU {int(time / 60)} HORA(S) E {int(time % 60)} MINUTO(S)")
|
120446
|
try:
import unittest2 as unittest
except ImportError:
import unittest
class TestCase(unittest.TestCase):
'''
We use this base class for all the tests in this package.
If necessary, we can put common utility or setup code in here.
'''
# vim:set ft=python:
|
120459
|
import threading
# Notices: 1. The correctness is relied on GIL
# 2. Iterator is not thread-safe, so don't access by different threads in the same time
class _SimplePrefetcherIterator:
def __init__(self, iterable, low_limit: int, high_limit: int):
super(_SimplePrefetcherIterator, self).__init__()
self.iterable = iterable
self.queue = []
self.low_limit = low_limit
self.high_limit = high_limit
self.low_limit_condition = threading.Condition(threading.Lock())
self.produced_condition = threading.Condition(threading.Lock())
self.end_flag = False
self.thread_exit_flag = False
self.thread = threading.Thread(target=self.worker, daemon=True)
self.thread.start()
self.exp = None
def __del__(self):
if self.thread.is_alive():
self.thread_exit_flag = True
self.low_limit_condition.notify()
self.thread.join()
def __next__(self):
if len(self.queue) == 0:
if self.end_flag:
raise StopIteration
else:
with self.produced_condition:
while True:
# Release GIL
if self.produced_condition.wait(0.5):
break
else:
if len(self.queue) != 0:
break
elif self.end_flag:
if self.exp is not None:
raise self.exp
raise StopIteration
# Release GIL
if not self.thread.is_alive():
if self.exp is not None:
raise self.exp
else:
raise Exception('Worker exited unexpected')
item = self.queue.pop(0)
if len(self.queue) <= self.low_limit:
with self.low_limit_condition:
self.low_limit_condition.notify()
return item
def worker(self):
try:
iterator = iter(self.iterable)
while True:
if self.thread_exit_flag:
return
if len(self.queue) >= self.high_limit:
with self.low_limit_condition:
self.low_limit_condition.wait()
continue
try:
item = next(iterator)
self.queue.append(item)
if len(self.queue) == 1:
with self.produced_condition:
self.produced_condition.notify()
except (StopIteration, IndexError):
break
except Exception as e:
self.exp = e
finally:
self.end_flag = True
class SimplePrefetcher:
def __init__(self, iterable, buffer_low_limit: int = 1, buffer_high_limit: int = 3):
assert buffer_low_limit < buffer_high_limit
assert buffer_low_limit >= 0
self.iterable = iterable
self.low_limit = buffer_low_limit
self.high_limit = buffer_high_limit
def __iter__(self):
return _SimplePrefetcherIterator(self.iterable, self.low_limit, self.high_limit)
def __len__(self):
return len(self.iterable)
def __getattr__(self, item):
return getattr(self.iterable, item)
|
120491
|
import os
import codecs
import numpy as np
#class CoNLL_Sentence:
# def __init__(self, tokens):
# self.tokens = tokens
#class Simplified_CoNLL_Token:
# def __init__(self, token, token_label, sentence_label):
# self.token = token
# self.token_label = token_label
# self.sentence_label = sentence_label
def parse_conll_file(file, multiple=False):
tokens = []
sentences = []
for line in file.readlines():
if(line == "\n"):
if len(tokens) != 0:
#sentence = CoNLL_Sentence(tokens=tokens)
sentences.append(tokens)
tokens = []
else:
print("That should not happen.")
else:
parts = line.split("\t")
if multiple == False:
token = [parts[0], parts[1], parts[2]]#Simplified_CoNLL_Token(token=parts[0], token_label=parts[1], sentence_label=parts[2])
else:
token = [parts[0], parts[1], parts[2], parts[3], parts[4], parts[5]]
tokens.append(token)
return sentences
def parse_conll_files(path, multiple=False):
sentences = []
for subdir, dirs, files in os.walk(path):
for file in files:
with codecs.open(os.path.join(subdir, file), "r", "utf8") as f:
file_sentences = parse_conll_file(f, multiple=multiple)
sentences.append(file_sentences)
return sentences
def transform_to_model_input(sentences):
x = []
y_arg = []
y_rhet = []
for sentence in sentences:
x_sentence = []
y_sentence_arg = []
y_sentence_rhet = []
for token in sentence:
x_sentence.append(token[0])
y_sentence_arg.append(token[1])
y_sentence_rhet.append(token[2])
x.append(np.array(x_sentence))
y_arg.append(np.array(y_sentence_arg))
y_rhet.append(np.array(y_sentence_rhet))
return np.array(x), np.array(y_arg), np.array(y_rhet)
def transform_to_model_input_multiple(sentences):
x = []
y_arg = []
y_rhet = []
y_aspect = []
y_summary = []
y_citation = []
for sentence in sentences:
x_sentence = []
y_sentence_arg = []
y_sentence_rhet = []
y_sentence_aspect = []
y_sentence_summary = []
y_sentence_citation = []
for token in sentence:
x_sentence.append(token[0])
y_sentence_arg.append(token[1])
y_sentence_rhet.append(token[2])
y_sentence_aspect.append(token[3])
y_sentence_summary.append(token[4])
y_sentence_citation.append(token[5])
x.append(np.array(x_sentence))
y_arg.append(np.array(y_sentence_arg))
y_rhet.append(np.array(y_sentence_rhet))
y_aspect.append(np.array(y_sentence_aspect))
y_summary.append(np.array(y_sentence_summary))
y_citation.append(np.array(y_sentence_citation))
return np.array(x), np.array(y_arg), np.array(y_rhet), np.array(y_aspect), np.array(y_summary), np.array(y_citation)
def load_data(path="./../annotations_conll"):
sentences = parse_conll_files(path)
flat_sentences = [item for sublist in sentences for item in sublist]
x, y_arg, y_rhet = transform_to_model_input(flat_sentences)
print("Data size: " + str(len(x)))
return x, y_arg, y_rhet
def load_data_multiple(path=""):
sentences = parse_conll_files(path, multiple=True)
flat_sentences = [item for sublist in sentences for item in sublist]
x, y_arg, y_rhet, y_aspect, y_summary, y_citation = transform_to_model_input_multiple(flat_sentences)
print("Data size: " + str(len(x)))
return x, y_arg, y_rhet, y_aspect, y_summary, y_citation
def main():
print("Process started")
sentences = parse_conll_files("./annotations_conll")
flat_sentences = [item for sublist in sentences for item in sublist]
x, y_arg, y_rhet = transform_to_model_input(flat_sentences)
print("Process ended")
if __name__ == "__main__":
main()
|
120525
|
import tensorflow as tf
from typing import Tuple
# Copyright 2019 Bisonai Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of paper Searching for MobileNetV3, https://arxiv.org/abs/1905.02244
MobileNetV3 Large
https://github.com/Bisonai/mobilenetv3-tensorflow/blob/master/mobilenetv3_large.py
"""
class MobileNetV3(tf.keras.Model):
def __init__(
self,
num_classes: int=1001,
width_multiplier: float=1.0,
name: str="MobileNetV3_Large",
divisible_by: int=8,
l2_reg: float=1e-5,
):
super().__init__(name=name)
# First layer
self.first_layer = ConvNormAct(
16,
kernel_size=3,
stride=2,
padding=1,
norm_layer="bn",
act_layer="hswish",
use_bias=False,
l2_reg=l2_reg,
name="FirstLayer",
)
# Bottleneck layers
self.bneck_settings = [
# k exp out SE NL s
[ 3, 16, 16, False, "relu", 1 ],
[ 3, 64, 24, False, "relu", 2 ],
[ 3, 72, 24, False, "relu", 1 ],
[ 5, 72, 40, True, "relu", 2 ],
[ 5, 120, 40, True, "relu", 1 ],
[ 5, 120, 40, True, "relu", 1 ],
[ 3, 240, 80, False, "hswish", 2 ],
[ 3, 200, 80, False, "hswish", 1 ],
[ 3, 184, 80, False, "hswish", 1 ],
[ 3, 184, 80, False, "hswish", 1 ],
[ 3, 480, 112, True, "hswish", 1 ],
[ 3, 672, 112, True, "hswish", 1 ],
[ 5, 672, 160, True, "hswish", 2 ],
[ 5, 960, 160, True, "hswish", 1 ],
[ 5, 960, 160, True, "hswish", 1 ],
]
self.bneck = tf.keras.Sequential(name="Bneck")
for idx, (k, exp, out, SE, NL, s) in enumerate(self.bneck_settings):
out_channels = _make_divisible(out * width_multiplier, divisible_by)
exp_channels = _make_divisible(exp * width_multiplier, divisible_by)
self.bneck.add(
LayerNamespaceWrapper(
Bneck(
out_channels=out_channels,
exp_channels=exp_channels,
kernel_size=k,
stride=s,
use_se=SE,
act_layer=NL,
),
name=f"Bneck{idx}")
)
# Last stage
penultimate_channels = _make_divisible(960 * width_multiplier, divisible_by)
last_channels = _make_divisible(1_280 * width_multiplier, divisible_by)
self.last_stage = LastStage(
penultimate_channels,
last_channels,
num_classes,
l2_reg=l2_reg,
)
def call(self, input):
x = self.first_layer(input)
x = self.bneck(x)
x = self.last_stage(x)
return x
def _make_divisible(v, divisor, min_value=None):
"""https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class LayerNamespaceWrapper(tf.keras.layers.Layer):
"""`NameWrapper` defines auxiliary layer that wraps given `layer`
with given `name`. This is useful for better visualization of network
in TensorBoard.
Default behavior of namespaces defined with nested `tf.keras.Sequential`
layers is to keep only the most high-level `tf.keras.Sequential` name.
"""
def __init__(
self,
layer: tf.keras.layers.Layer,
name: str,
):
super().__init__(name=name)
self.wrapped_layer = tf.keras.Sequential(
[
layer,
],
name=name,
)
def call(self, input):
return self.wrapped_layer(input)
def get_layer(layer_name, layer_dict, default_layer):
if layer_name is None:
return default_layer
if layer_name in layer_dict.keys():
return layer_dict.get(layer_name)
else:
raise NotImplementedError(f"Layer [{layer_name}] is not implemented")
class ConvNormAct(tf.keras.layers.Layer):
def __init__(
self,
filters: int,
kernel_size: int=3,
stride: int=1,
padding: int=0,
norm_layer: str=None,
act_layer: str="relu",
use_bias: bool=True,
l2_reg: float=1e-5,
name: str="ConvNormAct",
):
super().__init__(name=name)
if padding > 0:
self.pad = tf.keras.layers.ZeroPadding2D(
padding=padding,
name=f"Padding{padding}x{padding}",
)
else:
self.pad = Identity()
self.conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=stride,
name=f"Conv{kernel_size}x{kernel_size}",
kernel_regularizer=tf.keras.regularizers.l2(l2_reg),
use_bias=use_bias,
)
_available_normalization = {
"bn": BatchNormalization(),
}
self.norm = get_layer(norm_layer, _available_normalization, Identity())
_available_activation = {
"relu": tf.keras.layers.ReLU(name="ReLU"),
"relu6": ReLU6(),
"hswish": HardSwish(),
"hsigmoid": HardSigmoid(),
"softmax": tf.keras.layers.Softmax(name="Softmax"),
}
self.act = get_layer(act_layer, _available_activation, Identity())
def call(self, input):
x = self.pad(input)
x = self.conv(x)
x = self.norm(x)
x = self.act(x)
return x
class Bneck(tf.keras.layers.Layer):
def __init__(
self,
out_channels: int,
exp_channels: int,
kernel_size: int,
stride: int,
use_se: bool,
act_layer: str,
l2_reg: float=1e-5,
):
super().__init__(name="Bneck")
self.out_channels = out_channels
self.stride = stride
self.use_se = use_se
# Expand
self.expand = ConvNormAct(
exp_channels,
kernel_size=1,
norm_layer="bn",
act_layer=act_layer,
use_bias=False,
l2_reg=l2_reg,
name="Expand",
)
# Depthwise
dw_padding = (kernel_size - 1) // 2
self.pad = tf.keras.layers.ZeroPadding2D(
padding=dw_padding,
name=f"Depthwise/Padding{dw_padding}x{dw_padding}",
)
self.depthwise = tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=stride,
name=f"Depthwise/DWConv{kernel_size}x{kernel_size}",
depthwise_regularizer=tf.keras.regularizers.l2(l2_reg),
use_bias=False,
)
self.bn = BatchNormalization(name="Depthwise/BatchNormalization")
if self.use_se:
self.se = SEBottleneck(
l2_reg=l2_reg,
name="Depthwise/SEBottleneck",
)
_available_activation = {
"relu": tf.keras.layers.ReLU(name="Depthwise/ReLU"),
"hswish": HardSwish(name="Depthwise/HardSwish"),
}
self.act = get_layer(act_layer, _available_activation, Identity())
# Project
self.project = ConvNormAct(
out_channels,
kernel_size=1,
norm_layer="bn",
act_layer=None,
use_bias=False,
l2_reg=l2_reg,
name="Project",
)
def build(self, input_shape):
self.in_channels = int(input_shape[3])
super().build(input_shape)
def call(self, input):
x = self.expand(input)
x = self.pad(x)
x = self.depthwise(x)
x = self.bn(x)
if self.use_se:
x = self.se(x)
x = self.act(x)
x = self.project(x)
if self.stride == 1 and self.in_channels == self.out_channels:
return input + x
else:
return x
class SEBottleneck(tf.keras.layers.Layer):
def __init__(
self,
reduction: int=4,
l2_reg: float=0.01,
name: str="SEBottleneck",
):
super().__init__(name=name)
self.reduction = reduction
self.l2_reg = l2_reg
def build(self, input_shape):
input_channels = int(input_shape[3])
self.gap = GlobalAveragePooling2D()
self.conv1 = ConvNormAct(
input_channels // self.reduction,
kernel_size=1,
norm_layer=None,
act_layer="relu",
use_bias=False,
l2_reg=self.l2_reg,
name="Squeeze",
)
self.conv2 = ConvNormAct(
input_channels,
kernel_size=1,
norm_layer=None,
act_layer="hsigmoid",
use_bias=False,
l2_reg=self.l2_reg,
name="Excite",
)
super().build(input_shape)
def call(self, input):
x = self.gap(input)
x = self.conv1(x)
x = self.conv2(x)
return input * x
class LastStage(tf.keras.layers.Layer):
def __init__(
self,
penultimate_channels: int,
last_channels: int,
num_classes: int,
l2_reg: float,
):
super().__init__(name="LastStage")
self.conv1 = ConvNormAct(
penultimate_channels,
kernel_size=1,
stride=1,
norm_layer="bn",
act_layer="hswish",
use_bias=False,
l2_reg=l2_reg,
)
self.gap = GlobalAveragePooling2D()
self.conv2 = ConvNormAct(
last_channels,
kernel_size=1,
norm_layer=None,
act_layer="hswish",
l2_reg=l2_reg,
)
self.dropout = tf.keras.layers.Dropout(
rate=0.2,
name="Dropout",
)
self.conv3 = ConvNormAct(
num_classes,
kernel_size=1,
norm_layer=None,
act_layer="softmax",
l2_reg=l2_reg,
)
self.squeeze = Squeeze()
def call(self, input):
x = self.conv1(input)
x = self.gap(x)
x = self.conv2(x)
x = self.dropout(x)
x = self.conv3(x)
x = self.squeeze(x)
return x
class Identity(tf.keras.layers.Layer):
def __init__(self):
super().__init__(name="Identity")
def call(self, input):
return input
class ReLU6(tf.keras.layers.Layer):
def __init__(self):
super().__init__(name="ReLU6")
self.relu6 = tf.keras.layers.ReLU(max_value=6, name="ReLU6")
def call(self, input):
return self.relu6(input)
class HardSigmoid(tf.keras.layers.Layer):
def __init__(self):
super().__init__(name="HardSigmoid")
self.relu6 = ReLU6()
def call(self, input):
return self.relu6(input + 3.0) / 6.0
class HardSwish(tf.keras.layers.Layer):
def __init__(self, name="HardSwish"):
super().__init__(name=name)
self.hard_sigmoid = HardSigmoid()
def call(self, input):
return input * self.hard_sigmoid(input)
class Squeeze(tf.keras.layers.Layer):
"""Squeeze the second and third dimensions of given tensor.
(batch, 1, 1, channels) -> (batch, channels)
"""
def __init__(self):
super().__init__(name="Squeeze")
def call(self, input):
x = tf.keras.backend.squeeze(input, 1)
x = tf.keras.backend.squeeze(x, 1)
return x
class GlobalAveragePooling2D(tf.keras.layers.Layer):
"""Return tensor of output shape (batch_size, rows, cols, channels)
where rows and cols are equal to 1. Output shape of
`tf.keras.layer.GlobalAveragePooling2D` is (batch_size, channels),
"""
def __init__(self):
super().__init__(name="GlobalAveragePooling2D")
def build(self, input_shape):
pool_size = tuple(map(int, input_shape[1:3]))
self.gap = tf.keras.layers.AveragePooling2D(
pool_size=pool_size,
name=f"AvgPool{pool_size[0]}x{pool_size[1]}",
)
super().build(input_shape)
def call(self, input):
return self.gap(input)
class BatchNormalization(tf.keras.layers.Layer):
"""Searching fo MobileNetV3: All our convolutional layers
use batch-normalization layers with average decay of 0.99.
"""
def __init__(
self,
momentum: float=0.99,
name="BatchNormalization",
):
super().__init__(name=name)
self.bn = tf.keras.layers.BatchNormalization(
momentum=0.99,
name="BatchNormalization",
)
def call(self, input):
return self.bn(input)
def build_mobilenet(
input_shape: Tuple[int, int, int]=(64, 64, 3), # (224,224,3)
num_classes: int=11, # 1001
width_multiplier: float=1.0,
l2_reg: float=1e-5,):
assert len(input_shape) == 3, "`input_shape` should be a tuple representing input data shape (height, width, channels)"
model = MobileNetV3(
num_classes=num_classes,
width_multiplier=width_multiplier,
l2_reg=l2_reg,
)
input_tensor = tf.keras.layers.Input(shape=input_shape)
output_tensor = model(input_tensor)
model = tf.keras.Model(
inputs=[model.input],
outputs=[model.output],
)
return model
|
120562
|
import tables as PT
# This describes indexes in the "pt_undistorted" tuple. These are
# used in MainBrain.py, flydra_tracker.py, and kalmanize.py
PT_TUPLE_IDX_X = 0
PT_TUPLE_IDX_Y = 1
PT_TUPLE_IDX_AREA = 2
PT_TUPLE_IDX_SLOPE = 3
PT_TUPLE_IDX_ECCENTRICITY = 4
# 3D coordinates of plane formed by camera center and slope line
# centered on object.
PT_TUPLE_IDX_P1 = 5
PT_TUPLE_IDX_P2 = 6
PT_TUPLE_IDX_P3 = 7
PT_TUPLE_IDX_P4 = 8
PT_TUPLE_IDX_LINE_FOUND = 9
PT_TUPLE_IDX_FRAME_PT_IDX = 10
PT_TUPLE_IDX_CUR_VAL_IDX = 11
PT_TUPLE_IDX_MEAN_VAL_IDX = 12
PT_TUPLE_IDX_SUMSQF_VAL_IDX = 13
WIRE_ORDER_CUR_VAL_IDX = 6
WIRE_ORDER_MEAN_VAL_IDX = 7
WIRE_ORDER_SUMSQF_VAL_IDX = 8
# 2D data format for PyTables:
class Info2D(PT.IsDescription):
camn = PT.UInt16Col(pos=0)
frame = PT.Int64Col(pos=1)
timestamp = PT.FloatCol(
pos=2
) # when the image trigger happened (returned by timestamp modeler on MainBrain)
cam_received_timestamp = PT.FloatCol(
pos=3
) # when the image was acquired by flydra software (on camera computer)
x = PT.Float32Col(pos=4)
y = PT.Float32Col(pos=5)
area = PT.Float32Col(pos=6)
slope = PT.Float32Col(pos=7)
eccentricity = PT.Float32Col(pos=8)
frame_pt_idx = PT.UInt8Col(
pos=9
) # index of point if there were > 1 points in frame
cur_val = PT.UInt8Col(pos=10)
mean_val = PT.Float32Col(pos=11)
sumsqf_val = PT.Float32Col(pos=12) # estimate of <x^2> (running_sumsqf)
class TextLogDescription(PT.IsDescription):
mainbrain_timestamp = PT.FloatCol(pos=0)
cam_id = PT.StringCol(255, pos=1)
host_timestamp = PT.FloatCol(pos=2)
message = PT.StringCol(255, pos=3)
class CamSyncInfo(PT.IsDescription):
cam_id = PT.StringCol(256, pos=0)
camn = PT.UInt16Col(pos=1)
hostname = PT.StringCol(2048, pos=2)
class HostClockInfo(PT.IsDescription):
remote_hostname = PT.StringCol(255, pos=0)
start_timestamp = PT.FloatCol(pos=1)
remote_timestamp = PT.FloatCol(pos=2)
stop_timestamp = PT.FloatCol(pos=3)
class TriggerClockInfo(PT.IsDescription):
start_timestamp = PT.FloatCol(pos=0)
framecount = PT.Int64Col(pos=1)
tcnt = PT.UInt16Col(pos=2)
stop_timestamp = PT.FloatCol(pos=3)
class MovieInfo(PT.IsDescription):
cam_id = PT.StringCol(16, pos=0)
filename = PT.StringCol(255, pos=1)
approx_start_frame = PT.Int64Col(pos=2)
approx_stop_frame = PT.Int64Col(pos=3)
class ExperimentInfo(PT.IsDescription):
uuid = PT.StringCol(32, pos=0)
|
120570
|
import os
import yaml
from functools import reduce
CONFIG_PATH = os.path.dirname(__file__)
def load_yaml(config_name):
with open(os.path.join(CONFIG_PATH, config_name)+ '.yaml') as file:
config = yaml.safe_load(file)
return config
class DotDict(dict):
def __getattr__(self, k):
try:
v = self[k]
except:
return super().__getattr__(k)
if isinstance(v, dict):
return DotDict(v)
return v
def __getitem__(self, k):
if isinstance(k, str) and '.' in k:
k = k.split('.')
if isinstance(k, (list, tuple)):
return reduce(lambda d, kk: d[kk], k, self)
return super().__getitem__(k)
def get(self, k, default=None):
if isinstance(k, str) and '.' in k:
try:
return self[k]
except KeyError:
return default
return super().get(k, default=default)
|
120667
|
from cStringIO import StringIO
import mock
from changes.artifacts.base import ArtifactParseError
from changes.artifacts.collection_artifact import CollectionArtifactHandler
from changes.config import db
from changes.constants import Result
from changes.models.failurereason import FailureReason
from changes.models.jobplan import JobPlan
from changes.testutils import TestCase
class CollectionArtifactHandlerTest(TestCase):
@mock.patch.object(JobPlan, 'get_build_step_for_job')
def test_valid_json(self, get_build_step_for_job):
buildstep = mock.Mock()
get_build_step_for_job.return_value = (None, buildstep)
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'tests.json')
handler = CollectionArtifactHandler(jobstep)
handler.FILENAMES = ('/tests.json',)
handler.process(StringIO("{}"), artifact)
buildstep.expand_jobs.assert_called_once_with(jobstep, {})
# make sure changes were committed
db.session.rollback()
assert not FailureReason.query.filter(FailureReason.step_id == jobstep.id).first()
@mock.patch.object(JobPlan, 'get_build_step_for_job')
def test_invalid_json(self, get_build_step_for_job):
buildstep = mock.Mock()
get_build_step_for_job.return_value = (None, buildstep)
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'tests.json')
handler = CollectionArtifactHandler(jobstep)
handler.FILENAMES = ('/tests.json',)
handler.process(StringIO(""), artifact)
assert buildstep.call_count == 0
# make sure changes were committed
db.session.rollback()
assert FailureReason.query.filter(FailureReason.step_id == jobstep.id).first()
@mock.patch.object(JobPlan, 'get_build_step_for_job')
def test_parse_error(self, get_build_step_for_job):
buildstep = mock.Mock()
get_build_step_for_job.return_value = (None, buildstep)
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'tests.json')
handler = CollectionArtifactHandler(jobstep)
handler.FILENAMES = ('/tests.json',)
buildstep.expand_jobs.side_effect = ArtifactParseError('bad file')
handler.process(StringIO("{}"), artifact)
buildstep.expand_jobs.assert_called_once_with(jobstep, {})
# make sure changes were committed
db.session.rollback()
assert FailureReason.query.filter(FailureReason.step_id == jobstep.id).first()
@mock.patch.object(JobPlan, 'get_build_step_for_job')
def test_expand_jobs_error(self, get_build_step_for_job):
buildstep = mock.Mock()
get_build_step_for_job.return_value = (None, buildstep)
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'tests.json')
handler = CollectionArtifactHandler(jobstep)
handler.FILENAMES = ('/tests.json',)
buildstep.expand_jobs.side_effect = Exception('error')
handler.process(StringIO("{}"), artifact)
buildstep.expand_jobs.assert_called_once_with(jobstep, {})
# make sure changes were committed
db.session.rollback()
assert jobstep.result == Result.infra_failed
assert not FailureReason.query.filter(FailureReason.step_id == jobstep.id).first()
|
120675
|
import numpy as np
import pandas as pd
from functools import reduce
from itertools import combinations_with_replacement
from amlearn.featurize.base import create_featurizer_backend
from amlearn.utils.packing import load_radii
from sklearn.base import BaseEstimator, TransformerMixin
try:
from amlearn.featurize.src import bp_symmfunc
except Exception:
print("import fortran file bp_symmfunc error!\n")
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class BPRadialFunction(BaseEstimator, TransformerMixin):
def __init__(self, bds, atom_type_symbols, pbc=None,
delta_r=0.2, n_r=50, cutoff=6.5,
id_col='id', type_col='type', coords_cols=None,
backend=None, verbose=1, save=True, output_path=None,
output_file_prefix='feature_bp_radial_function',
print_freq=1000):
self.bds = bds
self.pbc = np.array([1, 1, 1]) if pbc is None else pbc
self.atom_type_symbols = atom_type_symbols
self.delta_r = delta_r
self.n_r = n_r
self.cutoff = cutoff
self.id_col = id_col
self.type_col = type_col
self.coords_cols = ["x", "y", "z"] if coords_cols is None \
else coords_cols
self.save = save
self.verbose = verbose
self.backend = backend if backend is not None \
else create_featurizer_backend(output_path=output_path)
self.output_file_prefix = output_file_prefix
self.print_freq = print_freq
@classmethod
def default_from_system(cls, bds, atom_type_symbols, ref_atom_number,
delta_r=0.1, n_r=50, cutoff=None, pbc=None,
sigma_AA=None, radii=None, radius_type="miracle_radius",
id_col='id', type_col='type', coords_cols=None,
backend=None, verbose=1, save=True, output_path=None,
output_file_prefix='feature_bp_radial_function',
print_freq=1000):
radii = load_radii() if radii is None else radii
if sigma_AA is None:
sigma_AA = \
radii[str(ref_atom_number)][radius_type] * 2
delta_r = sigma_AA * delta_r
cutoff = (2.5 * sigma_AA) if cutoff is None else cutoff
return cls(bds=bds, atom_type_symbols=atom_type_symbols, pbc=pbc,
delta_r=delta_r, n_r=n_r, cutoff=cutoff,
id_col=id_col, type_col=type_col, coords_cols=coords_cols,
backend=backend, verbose=verbose, save=save,
output_path=output_path,
output_file_prefix=output_file_prefix,
print_freq=print_freq)
def fit_transform(self, X, y=None, **fit_params):
return self.transform(X)
def transform(self, X):
n_atoms = len(X)
atom_ids = X[[self.id_col]].values
atom_types = X[[self.type_col]].values
atom_coords = X[self.coords_cols].values
radial_funcs = np.zeros(
(n_atoms, self.n_r * len(self.atom_type_symbols)), dtype=np.float)
radial_funcs = bp_symmfunc.bp_radial(
center_atom_ids=atom_ids, center_atom_coords=atom_coords,
atom_ids=atom_ids, atom_types=atom_types,
atom_type_symbols=self.atom_type_symbols,
atom_coords=atom_coords, pbc=self.pbc, bds=self.bds,
cutoff=self.cutoff, delta_r=self.delta_r, n_r=self.n_r,
radial_funcs=radial_funcs, print_freq=self.print_freq)
radial_funcs_df = pd.DataFrame(radial_funcs,
index=atom_ids.transpose().tolist()[0],
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=radial_funcs_df, name=self.output_file_prefix)
return radial_funcs_df
def get_feature_names(self):
return reduce(list.__add__,
([["{}_{:.3f}".format(str(t), i)
for i in np.arange(0, self.n_r) * self.delta_r]
for t in self.atom_type_symbols]))
class BPAngularFunction(BaseEstimator, TransformerMixin):
def __init__(self, bds, atom_type_symbols, ksaais, lambdas,
zetas, pbc=None, cutoff=6.5,
id_col='id', type_col='type', coords_cols=None,
backend=None, verbose=1, save=True, output_path=None,
output_file_prefix='feature_bp_angular_function',
print_freq=1000):
self.bds = bds
self.atom_type_symbols = atom_type_symbols
self.ksaais = ksaais
self.lambdas = lambdas
self.zetas = zetas
self.pbc = np.array([1, 1, 1]) if pbc is None else pbc
self.cutoff = cutoff
self.id_col = id_col
self.type_col = type_col
self.coords_cols = ["x", "y", "z"] if coords_cols is None \
else coords_cols
self.save = save
self.verbose = verbose
self.backend = backend if backend is not None \
else create_featurizer_backend(output_path=output_path)
self.output_file_prefix = output_file_prefix
self.print_freq = print_freq
@classmethod
def default_from_system(cls, ref_atom_number, atom_type_symbols, ksaais,
lambdas, zetas, bds, cutoff=None, pbc=None, sigma_AA=None,
radii=None, radius_type="miracle_radius",
id_col='id', type_col='type', coords_cols=None,
backend=None, verbose=1, save=True, output_path=None,
output_file_prefix='feature_bp_angular_function',
print_freq=1000):
radii = load_radii() if radii is None else radii
sigma_AA = sigma_AA if sigma_AA is not None else \
radii[str(ref_atom_number)][radius_type] * 2
ksaais = ksaais * sigma_AA # in this case, ksaais are in the unit of sigma_AA
cutoff = (2.5 * sigma_AA) if cutoff is None else cutoff
return cls(bds=bds, atom_type_symbols=atom_type_symbols,
ksaais=ksaais, lambdas=lambdas, zetas=zetas,
pbc=pbc, cutoff=cutoff,
id_col=id_col, type_col=type_col, coords_cols=coords_cols,
backend=backend, verbose=verbose, save=save,
output_path=output_path,
output_file_prefix=output_file_prefix,
print_freq=print_freq)
def fit_transform(self, X, y=None, **fit_params):
return self.transform(X)
def transform(self, X):
n_atoms = len(X)
n_atom_types = len(self.atom_type_symbols)
atom_ids = X[[self.id_col]].values
atom_types = X[[self.type_col]].values
atom_coords = X[self.coords_cols].values
angular_funcs = \
np.zeros((n_atoms, int(n_atom_types * (n_atom_types + 1) /
2 * len(self.ksaais))),
dtype=np.float)
angular_funcs = bp_symmfunc.bp_angular(
center_atom_ids=atom_ids, center_atom_coords=atom_coords,
atom_ids=atom_ids, atom_types=atom_types,
atom_type_symbols=self.atom_type_symbols,
atom_coords=atom_coords, pbc=self.pbc, bds=self.bds,
ksaais=self.ksaais, lambdas=self.lambdas, zetas=self.zetas,
cutoff=self.cutoff, angular_funcs=angular_funcs,
print_freq=self.print_freq)
angular_funcs_df = pd.DataFrame(angular_funcs,
index=atom_ids.transpose().tolist()[0],
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=angular_funcs_df, name=self.output_file_prefix)
return angular_funcs_df
def get_feature_names(self):
return reduce(list.__add__,
([["{}_{}_{:.3f}_{:.3f}_{:.3f}".format(
str(t1), str(t2), i, j, k)
for i, j, k in zip(self.ksaais,
self.lambdas, self.zetas)]
for t1, t2 in combinations_with_replacement(
self.atom_type_symbols, 2)]))
|
120719
|
import pytz
from datetime import datetime
from mock import patch
from nose.tools import eq_
from django.utils.timezone import make_aware
from mozillians.announcements.models import Announcement
from mozillians.announcements.tests import AnnouncementFactory, TestCase
class AnnouncementManagerTests(TestCase):
def setUp(self):
AnnouncementFactory.create(
publish_from=make_aware(datetime(2013, 2, 12), pytz.UTC),
publish_until=make_aware(datetime(2013, 2, 18), pytz.UTC))
AnnouncementFactory.create(
publish_from=make_aware(datetime(2013, 2, 15), pytz.UTC),
publish_until=make_aware(datetime(2013, 2, 17), pytz.UTC))
AnnouncementFactory.create(
publish_from=make_aware(datetime(2013, 2, 21), pytz.UTC),
publish_until=make_aware(datetime(2013, 2, 23), pytz.UTC))
@patch('mozillians.announcements.managers.now')
def test_published(self, mock_obj):
"""Test published() of Announcement Manager."""
mock_obj.return_value = make_aware(datetime(2013, 2, 10), pytz.UTC)
eq_(Announcement.objects.published().count(), 0)
mock_obj.return_value = make_aware(datetime(2013, 2, 13), pytz.UTC)
eq_(Announcement.objects.published().count(), 1)
mock_obj.return_value = make_aware(datetime(2013, 2, 16), pytz.UTC)
eq_(Announcement.objects.published().count(), 2)
mock_obj.return_value = make_aware(datetime(2013, 2, 19), pytz.UTC)
eq_(Announcement.objects.published().count(), 0)
mock_obj.return_value = make_aware(datetime(2013, 2, 24), pytz.UTC)
eq_(Announcement.objects.published().count(), 0)
@patch('mozillians.announcements.managers.now')
def test_unpublished(self, mock_obj):
"""Test unpublished() of Announcement Manager."""
mock_obj.return_value = make_aware(datetime(2013, 2, 10), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 3)
mock_obj.return_value = make_aware(datetime(2013, 2, 13), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 2)
mock_obj.return_value = make_aware(datetime(2013, 2, 16), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 1)
mock_obj.return_value = make_aware(datetime(2013, 2, 19), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 3)
mock_obj.return_value = make_aware(datetime(2013, 2, 24), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 3)
|
120730
|
import logging
from pydantic import BaseModel
from .exceptions import JsonErrors, RequestError
from .settings import GREPAPTCHA_TEST_SECRET, BaseSettings
from .utils import get_ip, remove_port
logger = logging.getLogger('atoolbox.auth')
async def check_grecaptcha(m: BaseModel, request, *, error_headers=None):
settings: BaseSettings = request.app['settings']
client_ip = get_ip(request)
if not m.grecaptcha_token:
logger.warning('grecaptcha not provided, path="%s" ip=%s', request.path, client_ip)
raise JsonErrors.HTTPBadRequest(message='No recaptcha value', headers=error_headers)
post_data = {'secret': settings.grecaptcha_secret, 'response': m.grecaptcha_token, 'remoteip': client_ip}
async with request.app['http_client'].post(settings.grecaptcha_url, data=post_data) as r:
if r.status != 200:
raise RequestError(r.status, settings.grecaptcha_url, text=await r.text())
data = await r.json()
if data['success']:
hostname = data['hostname']
if remove_port(request.host) == hostname:
logger.info('grecaptcha success')
if hostname == 'testkey.google.com' and settings.grecaptcha_secret == GREPAPTCHA_TEST_SECRET:
logger.info('grecaptcha test key success')
else:
logger.warning(
'grecaptcha failure, path="%s" ip=%s response=%s',
request.path,
client_ip,
data,
extra={'data': {'grecaptcha_response': data}},
)
raise JsonErrors.HTTPBadRequest(message='Invalid recaptcha value', headers=error_headers)
|
120736
|
import py
import pytest
import sys
import os
# XXX: copied from pypy/tool/cpyext/extbuild.py
if os.name != 'nt':
so_ext = 'so'
else:
so_ext = 'dll'
def _build(cfilenames, outputfilename, compile_extra, link_extra,
include_dirs, libraries, library_dirs):
try:
# monkeypatch distutils for some versions of msvc compiler
import setuptools
except ImportError:
# XXX if this fails and is required,
# we must call pypy -mensurepip after translation
pass
from distutils.ccompiler import new_compiler
from distutils import sysconfig
# XXX for Darwin running old versions of CPython 2.7.x
sysconfig.get_config_vars()
compiler = new_compiler(force=1)
sysconfig.customize_compiler(compiler) # XXX
objects = []
for cfile in cfilenames:
cfile = py.path.local(cfile)
old = cfile.dirpath().chdir()
try:
res = compiler.compile([cfile.basename],
include_dirs=include_dirs, extra_preargs=compile_extra)
assert len(res) == 1
cobjfile = py.path.local(res[0])
assert cobjfile.check()
objects.append(str(cobjfile))
finally:
old.chdir()
compiler.link_shared_object(
objects, str(outputfilename),
libraries=libraries,
extra_preargs=link_extra,
library_dirs=library_dirs)
def c_compile(cfilenames, outputfilename,
compile_extra=None, link_extra=None,
include_dirs=None, libraries=None, library_dirs=None):
compile_extra = compile_extra or []
link_extra = link_extra or []
include_dirs = include_dirs or []
libraries = libraries or []
library_dirs = library_dirs or []
if sys.platform == 'win32':
link_extra = link_extra + ['/DEBUG'] # generate .pdb file
if sys.platform == 'darwin':
# support Fink & Darwinports
for s in ('/sw/', '/opt/local/'):
if (s + 'include' not in include_dirs
and os.path.exists(s + 'include')):
include_dirs.append(s + 'include')
if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'):
library_dirs.append(s + 'lib')
outputfilename = py.path.local(outputfilename).new(ext=so_ext)
saved_environ = os.environ.copy()
try:
_build(
cfilenames, outputfilename,
compile_extra, link_extra,
include_dirs, libraries, library_dirs)
finally:
# workaround for a distutils bugs where some env vars can
# become longer and longer every time it is used
for key, value in saved_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
return outputfilename
# end copy
def compile_so_file(udir):
cfile = py.path.local(__file__).dirpath().join("_ctypes_test.c")
if sys.platform == 'win32':
libraries = ['oleaut32']
else:
libraries = []
return c_compile([cfile], str(udir / '_ctypes_test'), libraries=libraries)
@pytest.fixture(scope='session')
def sofile(tmpdir_factory):
udir = tmpdir_factory.mktemp('_ctypes_test')
return str(compile_so_file(udir))
@pytest.fixture
def dll(sofile):
from ctypes import CDLL
return CDLL(str(sofile))
|
120746
|
from . import foo
from .. import bar
from ... import baz
import ...snurp
def blah(...):
pass
class Foo(tuple):
@staticmethod
def __new__(cls, initialValue=(0,0)):
return tuple.__new__(cls, initialValue)
class Bar(tuple):
def __init__(self, initialValue=(0,0)):
super(tuple, self).__init__(initialValue)
super(tuple, self).count(items, stuff="thneed")
@staticmethod
def somefunc(arg, arrg=pirate):
pass
@property
list(map(str, range(100)))
[str(i) for i in range(100)]
sâomething(arg=22)
def sâomething():
pass
class sâomething(object):
pass
__init__(something)
list(map(str, range(100)))
# ^^^ support.type.python
[str(i) for i in range(100)]
#^^^ support.function.builtin.call.python
super(arg1=1)(arg2=2)
list abs, map
some.None NotImplemented
print (file=None)
print __class__
print keyword
print __init__
print(foobar)
__init__ . something()
callback(print)
callback(range)
.__init__
__init__ [2]
@ deco . __init__ (call=some) # rator
def function():
pass
@classmethod
def from_foo(cls, foo):
return cls(foo, bar='baz')
# highlighting for arguments broken
mod.func()
mod.submod.func()
func().func2().attr
mod.func().attr.attr2.func2()
range(foo, bar)
(...)
__exit__
print foobar
Привет_мир() or 我喜欢奶酪() or Kolbász_finom()
bool
class MyWarning(Warning):
def __init__(self, text='first part'
'second part'):
self.args = (text,)
foo(text="line 1"
"line 2")
foobar(param=r"()")
def f():
print(r"()")
f()
def hi(name):
r""" Say hi!
with a \) special characters.
"""
return 'hi ' + name
print(hi('avi'))
|
120778
|
from brownie import accounts, interface, Contract
from brownie import (Bank, SimpleBankConfig, SimplePriceOracle, PancakeswapPool1Goblin,
StrategyAllBNBOnly, StrategyLiquidate, StrategyWithdrawMinimizeTrading, StrategyAddTwoSidesOptimal, PancakeswapGoblinConfig, TripleSlopeModel, ConfigurableInterestBankConfig, ProxyAdminImpl, TransparentUpgradeableProxyImpl)
from .utils import *
import eth_abi
def main():
admin = accounts[0]
alice = accounts[1]
triple_slope_model = TripleSlopeModel.deploy({'from': admin})
# min debt 0.2 BNB at 10 gwei gas price (killBps 5% -> at least 0.01BNB bonus)
# reserve pool bps 1000 (10%)
# kill bps 500 (5%)
bank_config = ConfigurableInterestBankConfig.deploy(
2 * 10**17, 1000, 500, triple_slope_model, {'from': admin})
proxy_admin = ProxyAdminImpl.deploy({'from': admin})
bank_impl = Bank.deploy({'from': admin})
bank = TransparentUpgradeableProxyImpl.deploy(
bank_impl, proxy_admin, bank_impl.initialize.encode_input(bank_config), {'from': admin})
bank = interface.IAny(bank)
###################################################################
# deposit & withdraw
deposit_amt = 10**18
prevBNBBal = alice.balance()
prevIbBNBBal = bank.balanceOf(alice)
bank.deposit({'from': alice, 'value': deposit_amt})
curBNBBal = alice.balance()
curIbBNBBal = bank.balanceOf(alice)
print('∆ bnb alice', curBNBBal - prevBNBBal)
print('∆ ibBNB alice', curIbBNBBal - prevIbBNBBal)
assert curBNBBal - prevBNBBal == -(curIbBNBBal - prevIbBNBBal), 'first depositor should get 1:1'
assert curBNBBal - prevBNBBal == -deposit_amt
# withdraw 1/3
prevBNBBal = alice.balance()
prevIbBNBBal = bank.balanceOf(alice)
bank.withdraw(curIbBNBBal // 3, {'from': alice})
curBNBBal = alice.balance()
curIbBNBBal = bank.balanceOf(alice)
print('∆ bnb alice', curBNBBal - prevBNBBal)
print('∆ ibBNB alice', curIbBNBBal - prevIbBNBBal)
assert curBNBBal - prevBNBBal == -(curIbBNBBal - prevIbBNBBal), 'first depositor should get 1:1'
assert curBNBBal - prevBNBBal == deposit_amt // 3
# withdraw remaining
prevBNBBal = alice.balance()
prevIbBNBBal = bank.balanceOf(alice)
bank.withdraw(curIbBNBBal, {'from': alice})
curBNBBal = alice.balance()
curIbBNBBal = bank.balanceOf(alice)
print('∆ bnb alice', curBNBBal - prevBNBBal)
print('∆ ibBNB alice', curIbBNBBal - prevIbBNBBal)
assert curBNBBal - prevBNBBal == -(curIbBNBBal - prevIbBNBBal), 'first depositor should get 1:1'
assert curBNBBal - prevBNBBal == deposit_amt - deposit_amt // 3
|
120796
|
import time
from leek_demo.tasks.low import failed_task, succeeded_task, rejected_task, parent_task
from leek_demo.tasks.high import critical_task, revoked_expired_task, recovered_task, revoked_terminated_task
while True:
succeeded_task.delay(4, 4)
failed_task.delay()
rejected_task.delay()
critical_task.delay()
revoked_expired_task.delay()
revoked_terminated_task.delay()
recovered_task.delay()
parent_task.delay()
time.sleep(1)
|
120807
|
from django.db import models
from accounts.models import Account
from store.models import Product
class Payment(models.Model):
user = models.ForeignKey(Account, on_delete=models.CASCADE)
payment_id = models.CharField(max_length=100)
payment_method = models.CharField(max_length=100)
amount_paid = models.CharField(max_length=100)
status = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.payment_id
class Order(models.Model):
STATUS = (
('New', 'New'),
('Accepted', 'Accepted'),
('Completed', 'Completed'),
('Cancelled', 'Cancelled'),
)
user = models.ForeignKey(Account, on_delete=models.SET_NULL, null=True)
payment = models.ForeignKey(Payment, on_delete=models.SET_NULL, blank=True, null=True)
order_number = models.CharField(max_length=20)
f_name = models.CharField(max_length=50)
l_name = models.CharField(max_length=50)
email = models.EmailField(max_length=100)
tel = models.CharField(max_length=50)
address = models.CharField(max_length=50)
country = models.CharField(max_length=50, blank=True) # blank=True in case the customer is within the store's country
state = models.CharField(max_length=50)
city = models.CharField(max_length=50)
zipcode = models.CharField(max_length=20)
order_note = models.CharField(max_length=100, blank=True)
order_total = models.FloatField()
tax = models.FloatField()
status = models.CharField(max_length=10, choices=STATUS, default='New')
ip = models.CharField(blank=True, max_length=20)
is_ordered = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def full_name(self):
return f"{self.f_name} {self.l_name}"
def thecountry(self):
if self.country:
return f"{self.country}, {self.state}, {self.city}"
else:
return f"{self.state}, {self.city}"
def __str__(self):
return self.f_name
class OrderProduct(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
payment = models.ForeignKey(Payment, on_delete=models.SET_NULL, blank=True, null=True)
user = models.ForeignKey(Account, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.IntegerField()
product_price = models.FloatField()
ordered = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.product.name
|
120831
|
from torchvision.models.segmentation import deeplabv3_resnet50
def setup_model(config):
return deeplabv3_resnet50(num_classes=config.num_classes)
|
120859
|
import json
import pytest
from indy.ledger import build_acceptance_mechanisms_request
from indy_common.authorize.auth_actions import ADD_PREFIX
from indy_common.authorize.auth_constraints import AuthConstraint, IDENTITY_OWNER
from indy_common.types import Request
from indy_node.test.auth_rule.auth_framework.basic import AuthTest
from indy_node.test.helper import build_auth_rule_request_json
from plenum.common.constants import TXN_AUTHOR_AGREEMENT_DISABLE, TXN_TYPE
from plenum.common.exceptions import RequestRejectedException
from plenum.common.util import randomString, get_utc_epoch
from plenum.test.helper import sdk_get_and_check_replies, sdk_sign_request_from_dict
from plenum.test.pool_transactions.helper import sdk_add_new_nym, sdk_sign_and_send_prepared_request
from plenum.test.txn_author_agreement.helper import sdk_send_txn_author_agreement
class TAADisableTest(AuthTest):
def __init__(self, env, action_id):
super().__init__(env, action_id)
def prepare(self):
self.default_auth_rule = self.get_default_auth_rule()
self.changed_auth_rule = self.get_changed_auth_rule()
req = self.taa_aml_request()
rep = sdk_sign_and_send_prepared_request(self.looper, self.trustee_wallet, self.sdk_pool_handle, req)
sdk_get_and_check_replies(self.looper, [rep])
self.send_taa()
def taa_aml_request(self):
return self.looper.loop.run_until_complete(build_acceptance_mechanisms_request(
self.trustee_wallet[1],
json.dumps({
'Nice way': 'very good way to accept agreement'}),
randomString(), randomString()))
def send_taa(self):
sdk_send_txn_author_agreement(self.looper, self.sdk_pool_handle, self.trustee_wallet,
randomString(10), randomString(5), ratified=get_utc_epoch())
def get_changed_auth_rule(self):
self.new_default_wallet = sdk_add_new_nym(self.looper, self.sdk_pool_handle, self.trustee_wallet,
role=IDENTITY_OWNER)
constraint = AuthConstraint(role=IDENTITY_OWNER,
sig_count=1,
need_to_be_owner=False)
return build_auth_rule_request_json(
self.looper, self.trustee_wallet[1],
auth_action=ADD_PREFIX,
auth_type=TXN_AUTHOR_AGREEMENT_DISABLE,
field='*',
new_value='*',
constraint=constraint.as_dict
)
def send_taa_disable_req(self, wallet):
operation = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_DISABLE}
req = sdk_sign_request_from_dict(self.looper, wallet, operation)
self.send_and_check(json.dumps(req), wallet)
def run(self):
# Step 1. Change auth rule
self.send_and_check(self.changed_auth_rule, wallet=self.trustee_wallet)
# Step 2. Check, that we cannot do txn the old way
with pytest.raises(RequestRejectedException):
self.send_taa_disable_req(self.trustee_wallet)
# Step 3. Check, that new auth rule is used
self.send_taa_disable_req(self.new_default_wallet)
# Step 4. Return default auth rule
self.send_and_check(self.default_auth_rule, wallet=self.trustee_wallet)
# Step 5. Check, that default auth rule works
self.send_taa()
self.send_taa_disable_req(self.trustee_wallet)
def result(self):
pass
def down(self):
pass
|
120885
|
DosTags = {
# System
33: "SYS_Input",
34: "SYS_Output",
35: "SYS_Asynch",
36: "SYS_UserShell",
37: "SYS_CustomShell",
# CreateNewProc
1001: "NP_SegList",
1002: "NP_FreeSegList",
1003: "NP_Entry",
1004: "NP_Input",
1005: "NP_Output",
1006: "NP_CloseInput",
1007: "NP_CloseOutput",
1008: "NP_Error",
1009: "NP_CloseError",
1010: "NP_CurrentDir",
1011: "NP_StackSize",
1012: "NP_Name",
1013: "NP_Priority",
1014: "NP_ConsoleTask",
1015: "NP_WindowPtr",
1016: "NP_HomeDir",
1017: "NP_CopyVars",
1018: "NP_Cli",
1019: "NP_Path",
1020: "NP_CommandName",
1021: "NP_Arguments",
1022: "NP_NotifyOnDeath",
1023: "NP_Synchronous",
1024: "NP_ExitCode",
1025: "NP_ExitData",
# AllocDosObject
2001: "ADO_FH_Mode",
2002: "ADO_DirLen",
2003: "ADR_CommNameLen",
2004: "ADR_CommFileLen",
2005: "ADR_PromptLen",
}
|
120923
|
import logging
from typing import Optional
from colorlog import ColoredFormatter
class levelFilter(logging.Filter):
r"""Log level filter.
Arguments:
level (int): filter log level. Only logs with level higher than ``level`` will be kept.
"""
def __init__(self, level: int):
self.level = level
def filter(self, record: logging.LogRecord) -> bool:
"""Filter the log record whose level is greater than the preset log level.
Arguments:
record (logging.LogRecord): callback function input record items.
"""
return record.levelno > self.level
STREAM_LOG_FORMAT = "%(log_color)s%(asctime)s %(levelname)-8s%(reset)s %(blue)s[%(filename)s:%(lineno)d]%(reset)s %(log_color)s%(message)s"
FILE_LOG_FORMAT = "%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s"
DEFAULT_LOGGER_NAME = "log"
_default_logger = None
LOG_COLOR = {
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bg_white",
}
COLOR_FORMATTER = ColoredFormatter(
STREAM_LOG_FORMAT,
datefmt=None,
reset=True,
log_colors=LOG_COLOR,
secondary_log_colors={},
style="%",
)
FORMATTER = logging.Formatter(
FILE_LOG_FORMAT,
datefmt=None,
style="%",
)
def setLogger(
name: str,
log_level: int = logging.DEBUG,
log_filename: str = "train.log",
enable_file_logger: bool = False,
err_redirect_filepath: str = "error.log",
enable_err_redirect: bool = False,
err_redirect_level: int = logging.INFO,
) -> logging.Logger:
r"""Helper function to simplify the logger setup process with provided
log_level and log_filename. Also makes it possible to redirect logs
above a certain level to a different file.
Arguments:
name (str): logger name
log_filename (str): log filename
enable_file_logger (bool): whether enable save log into file
err_redirect_filepath (str): err log redirect filepath
enable_err_redirect (bool): whether enable err log redirect
err_redirect_level (int): error redirect log level
"""
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setFormatter(COLOR_FORMATTER)
logger.addHandler(handler)
logger.setLevel(log_level)
if enable_file_logger:
file_normal_handler = logging.FileHandler(log_filename, mode="a")
file_normal_handler.setFormatter(FORMATTER)
logger.addHandler(file_normal_handler)
if enable_err_redirect:
file_error_handler = logging.FileHandler(err_redirect_filepath, mode="a")
file_error_handler.setFormatter(FORMATTER)
file_error_handler.addFilter(levelFilter(err_redirect_level))
logger.addHandler(file_error_handler)
return logger
def get_logger(name: str) -> logging.Logger:
r"""Get logger by name.
Arguments:
name (str): logger name.
"""
return logging.getLogger(name)
def _set_default_logger(name: str, **kwargs) -> logging.Logger:
r"""Set the default logger.
Arguments:
name (str): default logger name.
logging.Logger
"""
global _default_logger
if not _default_logger:
_default_logger = setLogger(name, **kwargs)
return _default_logger
def get_default_logger(name: Optional[str] = None, **kwargs) -> logging.Logger:
r"""Get the default logger. If default logger is not set, init the default by
the given name.
Arguments:
name (str, optional): logger name.
"""
if _default_logger is None:
_set_default_logger(name or DEFAULT_LOGGER_NAME, **kwargs)
return _default_logger
|
120956
|
from typing import NamedTuple
import torch
from kmtools import structure_tools
class ProteinData(NamedTuple):
sequence: str
row_index: torch.LongTensor
col_index: torch.LongTensor
distances: torch.FloatTensor
def extract_seq_and_adj(structure, chain_id, remove_hetatms=False):
domain, result_df = get_interaction_dataset_wdistances(
structure, 0, chain_id, r_cutoff=12, remove_hetatms=remove_hetatms
)
domain_sequence = structure_tools.get_chain_sequence(domain)
assert max(result_df["residue_idx_1"].values) < len(domain_sequence)
assert max(result_df["residue_idx_2"].values) < len(domain_sequence)
data = ProteinData(
domain_sequence,
result_df["residue_idx_1"].values,
result_df["residue_idx_2"].values,
result_df["distance"].values,
# result_df["distance_backbone"].values,
# result_df["orientation_1"].values,
# result_df["orientation_2"].values,
# result_df["orientation_3"].values,
)
return data
def get_interaction_dataset_wdistances(
structure, model_id, chain_id, r_cutoff=12, remove_hetatms=False
):
chain = structure[0][chain_id]
num_residues = len(list(chain.residues))
dd = structure_tools.DomainDef(model_id, chain_id, 1, num_residues)
domain = structure_tools.extract_domain(structure, [dd], remove_hetatms=remove_hetatms)
distances_core = structure_tools.get_distances(
domain.to_dataframe(), r_cutoff, groupby="residue"
)
assert (distances_core["residue_idx_1"] <= distances_core["residue_idx_2"]).all()
return domain, distances_core
|
120962
|
import sys
import os
import time
sys.path.append(os.getcwd())
from cluster.prepare_data import get_headers_pairs_list, write_dist_matrix
from cluster.token_edit_distance import get_distance_matrix
if len(sys.argv) < 3:
print(
"Too few arguments. You should provide: \n1. dataset_filename" +
"\n2. output_data_filename"
)
sys.exit()
start = time.perf_counter()
dataset_filename_ = sys.argv[1]
output_data_filename_ = sys.argv[2]
headers_pairs = get_headers_pairs_list(dataset_filename_, verbose=True)
dist_matrix, max_dist = get_distance_matrix(list(map(lambda x: x[1],
headers_pairs)),
verbose=True)
write_dist_matrix(dist_matrix, max_dist, output_data_filename_, verbose=True)
end = time.perf_counter()
print("\nWorking time: %f sec." % (end - start))
|
120975
|
from django.contrib import admin
from .models import BgpPeering
@admin.register(BgpPeering)
class BgpPeeringAdmin(admin.ModelAdmin):
list_display = ("device", "peer_name", "remote_as", "remote_ip")
|
120999
|
from gitpandas import Repository
import time
__author__ = 'willmcginnis'
if __name__ == '__main__':
g = Repository(working_dir='..')
st = time.time()
blame = g.cumulative_blame(branch='master', include_globs=['*.py', '*.html', '*.sql', '*.md'], limit=None, skip=None)
print(blame.head())
print(time.time() - st)
st = time.time()
blame = g.parallel_cumulative_blame(branch='master', include_globs=['*.py', '*.html', '*.sql', '*.md'], limit=None, skip=None, workers=4)
print(blame.head())
print(time.time() - st)
|
121021
|
from blenderneuron.section import Section
import numpy as np
import math
import numpy as np
class BlenderSection(Section):
def __init__(self):
super(BlenderSection, self).__init__()
self.was_split = False
self.split_sections = []
def from_full_NEURON_section_dict(self, nrn_section_dict):
self.name = nrn_section_dict["name"]
self.nseg = nrn_section_dict["nseg"]
self.point_count = nrn_section_dict["point_count"]
self.coords = nrn_section_dict["coords"]
self.radii = nrn_section_dict["radii"]
self.parent_connection_loc = nrn_section_dict["parent_connection_loc"]
self.connection_end = nrn_section_dict["connection_end"]
# Parse the children
self.children = []
for nrn_child in nrn_section_dict["children"]:
child = BlenderSection()
child.from_full_NEURON_section_dict(nrn_child)
self.children.append(child)
self.segments_3D = []
if "activity" in nrn_section_dict:
self.activity.from_dict(nrn_section_dict["activity"])
def make_split_sections(self, max_length):
"""
Splits a section into smaller chained sub-sections if the arc length of the points
exceeds the specified length. This is used to temporarily split the sections for
confining dendrites between layers.
:param max_length: maximum allowed section length in um
:return: None
"""
arc_lengths = self.arc_lengths()
total_length = arc_lengths[-1]
num_sections = int(math.ceil(total_length / max_length))
is_too_long = num_sections > 1
if not is_too_long:
return None
# Mark the the section as having been split
self.was_split = True
# Get the maximum length of the new sections
new_length = total_length / num_sections
# Create new sections
self.split_sections = [BlenderSection() for i in range(num_sections)]
old_coords = np.array(self.coords).reshape((-1, 3))
old_radii = np.array(self.radii)
# Split the coords and radii
split_length = 0
point_i = 0
for split_sec_i, split_sec in enumerate(self.split_sections):
split_length += new_length
split_sec_coords = []
split_sec_radii = []
# Start a 2nd+ split section with the most recent point
if split_sec_i > 0:
prev_sec = self.split_sections[split_sec_i-1]
split_sec_coords.append(prev_sec.coords[-1])
split_sec_radii.append(prev_sec.radii[-1])
exact_length_match = False
# Add 3d points to the split until reached the end of the split
while arc_lengths[point_i] <= split_length:
split_sec_coords.append(old_coords[point_i])
split_sec_radii.append(old_radii[point_i])
exact_length_match = abs(arc_lengths[point_i] - split_length) < 0.001
point_i += 1
if point_i == len(arc_lengths):
break
# If reached the end of the sub-section, but the last real sub-section point is not
# at the exact end of the sub-section, then create a virtual point, which
# lies at the exact end of the sub-section
if not exact_length_match:
virtual_arc_length_delta = split_length - arc_lengths[point_i-1]
pt_segment_arc_length_delta = arc_lengths[point_i] - arc_lengths[point_i - 1]
pt_segment_vector = old_coords[point_i] - old_coords[point_i-1]
fraction_along = virtual_arc_length_delta / pt_segment_arc_length_delta
virtual_coord = old_coords[point_i-1] + pt_segment_vector * fraction_along
pt_segment_radius_delta = old_radii[point_i] - old_radii[point_i-1]
virtual_radius = old_radii[point_i-1] + pt_segment_radius_delta * fraction_along
split_sec_coords.append(virtual_coord)
split_sec_radii.append(virtual_radius)
split_sec.coords = np.array(split_sec_coords)
split_sec.radii = np.array(split_sec_radii)
split_sec.point_count = len(split_sec.radii)
split_sec.name = self.name + "["+str(split_sec_i)+"]"
return self.split_sections
def update_coords_from_split_sections(self):
if not self.was_split:
return
# Reassemble the coords and radii, skipping identical consecutive points
prev_coord, prev_radius = None, None
coords, radii = [], []
for split_i, split_sec in enumerate(self.split_sections):
for coord_i, coord in enumerate(np.reshape(split_sec.coords, (-1, 3))):
radius = split_sec.radii[coord_i]
# Skip if identical to previous point
if prev_coord is not None and radius == prev_radius and \
np.all(np.isclose(coord, prev_coord, rtol=0.001)):
continue
else:
coords.append(coord)
radii.append(radius)
prev_coord = coord
prev_radius = radius
self.coords = np.array(coords).reshape(-1)
self.radii = np.array(radii).reshape(-1)
self.point_count = len(self.radii)
def arc_lengths(self):
coords = np.array(self.coords).reshape(-1, 3)
start = coords[0:-1]
end = coords[1:]
diff = end - start
sq = np.square(diff)
sum = np.sum(sq, axis=1)
dist = np.sqrt(sum)
tot_len = np.concatenate(([0],np.cumsum(dist)))
return tot_len
def dist_to_closest_coord(self, target):
coords = np.array(self.coords).reshape(-1, 3)
target = np.array(target).reshape((1, 3))
diff = coords - target
sq = np.square(diff)
sum = np.sum(sq, axis=1)
dists = np.sqrt(sum)
return np.min(dists)
def remove_split_sections(self, recursive=True):
if self.was_split:
self.split_sections = []
self.was_split = False
if recursive:
for child_sec in self.children:
child_sec.remove_split_sections(recursive=True)
class BlenderRoot(BlenderSection):
def __init__(self, index, name, group=None):
super(BlenderRoot, self).__init__()
self.index = index
self.name = name
self.group = group
@property
def ui_root(self):
return self.group.ui_group.root_entries[self.name]
def remove(self, node):
# Remove view container objects if any
if self.group is not None and self.group.view is not None:
self.group.view.remove_container(self.name)
# remove from UI and from node groups
self.remove_from_group(delete=True)
# remove from index
node.root_index.pop(self.name)
def remove_from_group(self, delete=False):
if self.group is None:
return
# Keep a reference to group
current_group = self.group
# Remove group from 3D view
if self.group.view is not None:
self.group.view.remove()
self.group.view = None
# Set group to none in the root_index
self.group = None
# remove from node group
current_group.roots.pop(self.name)
# from ui group
root_entry = current_group.ui_group.root_entries.get(self.name)
if root_entry is not None and root_entry.selected:
root_entry.selected = False
if delete:
# Remove the root entry from all the UI groups
for group in current_group.node.groups.values():
entries = group.ui_group.root_entries
ui_root = entries.get(self.name)
if ui_root is not None:
remove_idx = entries.find(self.name)
entries.remove(remove_idx)
def add_to_UI_group(self, ui_group):
ui_root = ui_group.root_entries.add()
ui_root.index = self.index
ui_root.name = self.name
return ui_root
def add_to_group(self, group):
if self.group == group:
return
if self.group is not None:
self.remove_from_group()
# index
self.group = group
if group is None:
return
# node group
self.group.roots[self.name] = self
# ui
group.highlight()
ui_group = self.group.ui_group
root_entry = ui_group.root_entries.get(self.name)
# If not on the list of cells (e.g. when newly added in NRN)
if root_entry is None:
root_entry = self.add_to_UI_group(ui_group)
if root_entry is not None and not root_entry.selected:
root_entry.selected = True
|
121037
|
import pickle
import time
import numpy as np
import torch
import tqdm
from liga.models import load_data_to_gpu
from liga.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += ret_dict.get('gt', 0)
metric['num'] += 1
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
# depth evaluation for stereo detection
for k, v in ret_dict.items():
if k.startswith('depth_error_'):
if k.endswith('perbox'):
if k not in metric:
metric[k] = []
metric[k].extend(v)
else:
metric[k] = metric.get(k, 0.) + ret_dict[k]
if k in ['depth_error_fg_median', 'depth_error_median']:
disp_dict[k] = '%.3f' % (metric[k] / metric['num'])
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
final_2d_output_dir = result_dir / 'final_result' / 'data2d'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
final_2d_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'num': 0,
'gt_num': 0,
# 'depth_error_mean': 0.,
# 'depth_error_median': 0.,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
det_annos_2d = []
iou_results = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
if 'gt_boxes' in batch_dict and 'iou_results' in pred_dicts[0]:
iou_results.extend([x['iou_results'] for x in pred_dicts])
annos_2d = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_2d_output_dir if save_to_file else None,
mode_2d=True
) if 'pred_scores_2d' in pred_dicts[0] else None
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
) if 'pred_scores' in pred_dicts[0] else None
if annos_2d is not None:
det_annos_2d += annos_2d
if annos is not None:
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
iou_results = common_utils.merge_results_dist(iou_results, len(dataset), tmpdir=result_dir / 'tmpdir')
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
det_annos_2d = common_utils.merge_results_dist(det_annos_2d, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
for k in metric:
if k.startswith('depth_error_'):
if not k.endswith('perbox'):
metric[k] /= metric['num']
logger.info('%s: %f' % (k, metric[k]))
ret_dict['depth_error/%s' % (k)] = metric[k]
else:
for kk in metric[k][0]:
if kk.startswith("err_"):
values = [item[kk] for item in metric[k]]
mean_value = np.mean(values)
logger.info('%s: %f' % (k + "_" + kk, mean_value))
ret_dict['%s' % (k + "_" + kk)] = mean_value
# copy iou into metric[k]
if not iou_results:
continue
for x in metric[k]:
x['iou'] = iou_results[x['image_idx']][x['idx']]
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
with open(result_dir / 'metric_result.pkl', 'wb') as f:
pickle.dump(metric, f)
if det_annos and 'gt_boxes' in batch_dict:
logger.info('---- 3d box evaluation ---- ')
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric='3d',
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
if det_annos_2d and 'gt_boxes_2d' in batch_dict:
logger.info('---- 2d box evaluation ---- ')
result_str, _ = dataset.evaluation(
det_annos_2d, class_names,
eval_metric='2d',
output_path=final_2d_output_dir
)
logger.info(result_str)
else:
logger.info(f"no 2d eval: {'gt_boxes_2d' in batch_dict} / {det_annos_2d}")
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
if __name__ == '__main__':
pass
|
121058
|
def test13(a, b):
"""Test for Docstring"""
a.b[1]
ziffern = "0123456789"
ziffern[a:b]
|
121088
|
import Inline
info = {
"friendly_name": "Link (External)",
"example_template": "scheme://authority/path?query|Text To Display",
"summary": "A more flexible way of linking to an external resource.",
"details": """
<p>Links to external resources can be embedded in the page by
<i>naked linking</i>, just mentioning the URL itself, or by using
this plugin. The text within the span is split at the first"|" character.
The first part is interpreted as the URL to link to, and
the second part is used as the display text for the link.</p>
<p>For example, [link http://www.google.com|Google] is rendered to
<a href="http://www.google.com">Google</a>.</p>
"""
}
def SpanHandler(rest, acc):
(text, rest) = Inline.collectSpan(rest)
textparts = text.split('|', 1)
if len(textparts) > 1:
target = textparts[0]
vistext = textparts[1]
else:
target = text
vistext = target
acc.append(Inline.ExternalLink(target, vistext))
return rest
|
121116
|
import torch
import torch.autograd as autograd
import torch.nn as nn
import pdb
from textcnn import TextCNN
class Discriminator(nn.Module):
def __init__(self, vocab_size, emb_dim, filter_num, filter_sizes, dropout=0.0):
super(Discriminator, self).__init__()
self.query_cnn = TextCNN(emb_dim, filter_num, filter_sizes)
self.response_cnn = TextCNN(emb_dim, filter_num, filter_sizes)
self.dropout = nn.Dropout(p=dropout)
self.embeddings = nn.Embedding(vocab_size, emb_dim)
#
self.judger = nn.Sequential(
nn.Linear(2 * filter_num * len(filter_sizes), 128),
nn.ReLU(),
self.dropout,
nn.Linear(128, 2),
nn.Softmax(dim=1)
)
# self.query_h0 = nn.Parameter(torch.zeros(1, 1, 300))
# self.query_c0 = nn.Parameter(torch.zeros(1, 1, 300))
# self.response_h0 = nn.Parameter(torch.zeros(1, 1, 300))
# self.response_c0 = nn.Parameter(torch.zeros(1, 1, 300))
#
# self.query_rnn = torch.nn.LSTM(emb_dim, 300, 1)
# self.response_rnn = torch.nn.LSTM(emb_dim, 300, 1)
# self.judger = nn.Sequential(
# nn.Linear(2 * 300, 128),
# nn.ReLU(),
# self.dropout,
# nn.Linear(128, 2),
# nn.Softmax(dim=1)
#
# )
def forward(self, query, response):
'''
Args:
query: bsz x query_len
response: bsz x response_len
Returns:
'''
bsz = query.size(0)
query_emb = self.embeddings(query) # bsz x query_len x emb_size
response_emb = self.embeddings(response) # bsz x response_len x emb_size
query_features = self.query_cnn(query_emb) # [B, T, D] -> [B, all_features]
response_features = self.response_cnn(response_emb)
# h0 = self.query_h0.expand(1, bsz, 300).contiguous()
# c0 = self.query_c0.expand(1, bsz, 300).contiguous()
# _, (h_n, c_n) = self.query_rnn(query_emb.transpose(0, 1), (h0, c0))
# query_features = h_n.squeeze(0) # bsz x 300
#
# h0 = self.response_h0.expand(1, bsz, 300).contiguous()
# c0 = self.response_c0.expand(1, bsz, 300).contiguous()
# _, (h_n, c_n) = self.response_rnn(response_emb.transpose(0, 1), (h0, c0))
# response_features = h_n.squeeze(0) # bsz x 300
inputs = torch.cat((query_features, response_features), 1)
prob = self.judger(inputs)[:, 1]
return prob
def batchClassify(self, query, response):
'''
Args:
query: bsz x query_len
response: bsz x response_len
Returns:
out: bsz
'''
out = self.forward(query, response)
return out
def batchBCEloss(self, query, response, target):
'''
Returns Binary Cross Entropy Loss for discriminator.
Args:
query: bsz x query_len
response: bsz x response_len
target: bsz (binary 0/1)
Returns:
'''
loss_fn = nn.BCELoss()
out = self.forward(query, response)
return loss_fn(out, target)
def validate(self, valid_set):
acc = 0
total = 0
for i in range(len(valid_set)):
src, rep, target = valid_set[i]
bsz, _ = src.size()
out = self.batchClassify(src, rep)
acc += torch.sum((out > 0.5) == (target > 0.5)).data.item()
total += bsz
return acc / total
def save_model(self, save_path, **kwargs):
kwargs['state_dict'] = self.state_dict()
torch.save(kwargs, save_path)
def load_model(self, save_path):
saved_stuff = torch.load(save_path)
# remove the 'module' prefix
clean_state = {}
saved_state = saved_stuff["state_dict"]
for k, v in saved_state.items():
nk = k[7:] if k.startswith('module.') else k
clean_state[nk] = v
# self.load_state_dict(saved_stuff['state_dict'])
self.load_state_dict(clean_state)
|
121135
|
import unittest
from pymatgen.core import Structure
from veidt.monte_carlo.base import StateDict, StaticState
from veidt.monte_carlo.state import AtomNumberState, IsingState
from veidt.monte_carlo.state import SpinStructure, Chain
import os
file_path = os.path.dirname(__file__)
def unequal_site_number(list1, list2):
return sum([i != j for i, j in zip(list1, list2)])
class TestMonteCarlo(unittest.TestCase):
def test_ising_state(self):
ising_state = IsingState([0, 1, 0, 1])
new_ising_state = ising_state.copy()
self.assertListEqual(ising_state.state, new_ising_state.state)
self.assertEqual(ising_state, IsingState([0, 1, 0, 1], 'ising2'))
self.assertEqual(ising_state.n, 4)
self.assertListEqual(ising_state.state, [0, 1, 0, 1])
self.assertEqual(ising_state.name, 'ising')
ising_state.change()
self.assertEqual(unequal_site_number(ising_state.state, [0, 1, 0, 1]), 1)
def test_atom_number_state(self):
atom_number = AtomNumberState(10)
self.assertEqual(atom_number.state, 10)
atom_number.change()
self.assertIn(atom_number.state, [9, 11])
def test_spin_structure(self):
species_map = {0: 'K', 1: 'Na'}
structure = Structure.from_file(os.path.join(file_path, 'test_NaCoO2.cif'))
state_dict = StateDict([StaticState(100, 'temperature'),
AtomNumberState(10),
IsingState([0]*22+[1, 1])])
spin_struct = SpinStructure(structure, state_dict, species_map)
self.assertListEqual(spin_struct.state_dict['ising'].state, [0] * 22 + [1, 1])
orig_specie_list = spin_struct.to_specie_list()
# test move method
spin_struct = SpinStructure(structure, state_dict, species_map)
spin_struct.change()
self.assertEqual(unequal_site_number(spin_struct.state_dict['ising'].state, [0] * 22 + [1, 1]), 1)
specie_list = spin_struct.to_specie_list()
self.assertEqual(unequal_site_number(orig_specie_list, specie_list), 1)
# test from_states
spin_struct.from_states(
StateDict([StaticState(1000, 'temperature'),
AtomNumberState(10), IsingState([0]*20+[1, 1] + [0, 0])]))
self.assertEqual(unequal_site_number(spin_struct.to_specie_list(), orig_specie_list), 4)
self.assertEqual(unequal_site_number(spin_struct.to_states()['ising'].state, [0]*22 + [1, 1]), 4)
# test structure to states
self.assertListEqual(spin_struct.structure_to_states(structure)['ising'].state,
[0] * 22 + [1, 1])
def test_chain(self):
spin_state = IsingState([0, 1, 0])
atom_state = AtomNumberState(10)
state_dict = StateDict([spin_state, atom_state])
chain = Chain()
chain.append(state_dict)
chain.append(StateDict([AtomNumberState(20), IsingState([1, 1, 1])]))
self.assertListEqual(chain.chain['ising'][0], [0, 1, 0])
self.assertListEqual(chain.chain['ising'][1], [1, 1, 1])
self.assertListEqual(chain.chain['atom_number'], [10, 20])
self.assertIs(spin_state._chain, chain)
self.assertEqual(spin_state._chain.length, 2)
if __name__ == '__main__':
unittest.main()
|
121196
|
import operator
import threading
import functools
import itertools
import contextlib
import collections
import numpy as np
from ..autoray import (
get_lib_fn,
infer_backend,
get_dtype_name,
register_function,
astype,
)
_EMPTY_DICT = {}
class LazyArray:
"""A lazy array representing a shaped node in a computational graph.
"""
__slots__ = (
"_backend",
"_fn",
"_args",
"_kwargs",
"_shape",
"_dtype",
"_data",
"_deps",
)
def __init__(
self, backend, fn, args, kwargs, shape, dtype, deps=None,
):
# info required to perform the computation
self._backend = backend
self._fn = fn
self._args = args
if kwargs is None:
self._kwargs = _EMPTY_DICT
else:
self._kwargs = kwargs
# resulting array information
self._shape = shape
self._dtype = dtype
self._data = None
# lazy arrays this ``LazyArray`` depends on
if deps is None:
# automatically find them
self._deps = (*find_lazy(self._args), *find_lazy(self._kwargs))
else:
# manually specified (more efficient)
self._deps = deps
@classmethod
def from_data(cls, data):
"""Create a new ``LazyArray`` directly from a concrete array.
"""
obj = cls.__new__(cls)
obj._backend = infer_backend(data)
obj._fn = obj._args = obj._kwargs = None
obj._shape = tuple(map(int, data.shape))
obj._dtype = get_dtype_name(data)
obj._data = data
obj._deps = ()
return obj
@classmethod
def from_shape(cls, shape, backend='numpy', dtype=None):
"""Create a new ``LazyArray`` with a given shape.
"""
obj = cls.__new__(cls)
obj._backend = backend
obj._fn = obj._args = obj._kwargs = None
obj._shape = tuple(map(int, shape))
obj._dtype = dtype
obj._data = '__PLACEHOLDER__'
obj._deps = ()
return obj
def to(
self,
fn,
args,
kwargs=None,
backend=None,
shape=None,
dtype=None,
deps=None,
):
"""Create a new ``LazyArray``, by default propagating backend, shape,
dtype and deps from the the current LazyArray.
"""
return LazyArray(
fn=fn,
args=args,
kwargs=kwargs,
backend=backend if backend is not None else self._backend,
shape=shape if shape is not None else self.shape,
dtype=dtype if dtype is not None else self.dtype,
deps=deps if deps is not None else (self,),
)
def _materialize(self):
"""Recursively compute all required args and kwargs for this node
before computing itself and dereferencing dependencies. Note using this
to materialize a large computation from scratch should be avoided due
to the recursion limit, use ``x.compute()`` instead.
"""
if self._data is None:
# materialize any actual array args
args = (maybe_materialize(x) for x in self._args)
kwargs = {k: maybe_materialize(v) for k, v in self._kwargs.items()}
self._data = self._fn(*args, **kwargs)
# free any references to deps
self._fn = self._args = self._kwargs = None
self._deps = ()
return self._data
def __iter__(self):
"""Generate each unique computational node. Use ``ascend`` if you need
to visit children before parents.
"""
seen = set()
queue = [self]
queue_pop = queue.pop
queue_extend = queue.extend
seen_add = seen.add
while queue:
node = queue_pop()
nid = id(node)
if nid not in seen:
yield node
queue_extend(node._deps)
seen_add(nid)
def ascend(self):
"""Generate each unique computational node, from leaves to root.
"""
seen = set()
ready = set()
queue = [self]
queue_extend = queue.extend
queue_pop = queue.pop
ready_add = ready.add
seen_add = seen.add
while queue:
node = queue[-1]
need_to_visit = [c for c in node._deps if id(c) not in ready]
if need_to_visit:
queue_extend(need_to_visit)
else:
node = queue_pop()
nid = id(node)
ready_add(nid)
if nid not in seen:
yield node
seen_add(nid)
def compute(self):
"""Compute the value of this lazy array.
Unlike ``self._materialize()`` this avoids deep recursion.
"""
for node in self.ascend():
node._materialize()
return self._data
def compute_constants(self, variables):
"""Fold constant arrays - everything not dependent on ``variables`` -
into the graph.
"""
if isinstance(variables, LazyArray):
variables = (variables,)
variables = set(variables)
# must ascend
for node in self.ascend():
if not any(c in variables for c in node._deps):
# can fold
node._materialize()
else:
# mark as variable
variables.add(node)
def as_string(self, params):
"""Create a string which evaluates to the lazy array creation.
"""
# name function and store in locals
fn_name = f"{getattr(self._fn, '__name__', 'fn')}{id(self._fn)}"
params.setdefault(fn_name, self._fn)
# string of args and kwargs
str_call = ", ".join(
itertools.chain(
(stringify(x, params) for x in self._args),
(
f"{k}: {stringify(v, params)}"
for k, v in self._kwargs.items()
),
)
)
# assign function call to new variable
return f"x{id(self)} = {fn_name}({str_call})"
def get_source(self, params=None):
"""Write the source code of an unravelled version of the computational
graph, injecting required runtime objects into ``params``.
"""
if params is None:
# locals space mapping LazyArray names to values
params = {}
delete_checked = set()
s = [] # source code lines
for node in reversed(tuple(self.ascend())):
# when *descending*, the first encounter of a node is the
# *last* time it is referenced in forward pass -> delete,
# need to do this for GC since running in single big function
for c in node._deps:
if c not in delete_checked:
if c._deps:
# is an intermediate - safe to delete
s.append(f"del x{id(c)}")
delete_checked.add(c)
if node._data is None:
# create the array via computation
s.append(node.as_string(params))
else:
# inject the already computed data as constant
params[f"x{id(node)}"] = node._data
# reverse (ascend) into source code
return "\n".join(reversed(s))
def get_compiled(self, optimize=1):
"""Compile the function into a code object using ``compile``,
returning a wrapper that executes it using ``exec`` and the 'locals'
dict specifiying inputs which can be modified. It should be called
like:
fn, params = x.get_compiled()
# modify params e.g. inject new arrays here before call
...
fn(params)
"""
# write source and populate locals mapping that function will run under
params = {}
source = self.get_source(params)
# compile source
code = compile(source, f"code{id(self)}", "exec", optimize=optimize)
compiled = functools.partial(
_code_exec_fn, code=code, out_name=f"x{id(self)}"
)
# need both function and locals mapping to run it with / modify args
return compiled, params
def get_function(self, variables, fold_constants=True):
"""Get a compiled function that computes ``fn(arrays)``, with ``fn``
describing the computational graph of this ``LazyArray`` and ``arrays``
corresponding to the downstream ``LazyArray`` nodes ``variables``.
Parameters
----------
variables : sequence of LazyArray
Input nodes whose data can change between calls.
fold_constants : bool, optional
Compute all intermediates which do not depend on ``variables``
prior to compilation.
Returns
-------
fn : callable
Function with signature ``fn(arrays)``.
"""
if fold_constants:
self.compute_constants(variables=variables)
var_names = tuple(f"x{id(v)}" for v in variables)
fn, params = self.get_compiled()
return functools.partial(
_array_fn, var_names=var_names, params=params, fn=fn
)
def history_max_size(self):
"""Get the largest single tensor size appearing in this computation.
"""
return max(node.size for node in self)
def history_size_footprint(self):
"""Get the combined size of intermediates at each step of the
computation. Note this assumes that intermediates are immediately
garbage collected when they are no longer required.
"""
delete_checked = set()
sizes = []
for node in reversed(tuple(self.ascend())):
for c in node._deps:
if c not in delete_checked:
# last time a dependency is seen, subtract the size
if c._deps:
sizes.append(-c.size)
delete_checked.add(c)
if node._data is None:
# this is a new intermediate, add the size
sizes.append(+node.size)
sizes.reverse()
return list(itertools.accumulate(sizes))
def history_peak_size(self):
"""Get the peak combined intermediate size of this computation.
"""
return max(self.history_size_footprint())
def history_total_size(self):
"""The the total size of all unique arrays in the computational graph,
possibly relevant e.g. for back-propagation algorithms.
"""
return sum(node.size for node in self)
def plot_history_size_footprint(
self,
log=None,
figsize=(8, 2),
color='purple',
alpha=0.5,
ax=None,
return_fig=False,
):
"""Plot the memory footprint throughout this computation.
Parameters
----------
log : None or int, optional
If not None, display the sizes in base ``log``.
figsize : tuple, optional
Size of the figure.
color : str, optional
Color of the line.
alpha : float, optional
Alpha of the line.
ax : matplotlib.axes.Axes, optional
Axes to plot on, will be created if not provided.
return_fig : bool, optional
If True, return the figure object, else just show and close it.
"""
import matplotlib.pyplot as plt
y = np.array(self.history_size_footprint())
if log:
y = np.log2(y) / np.log2(log)
ylabel = f'$\\log_{log}[SIZE]$'
else:
ylabel = 'SIZE'
x = np.arange(y.size)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
ax.fill_between(x, 0, y, alpha=alpha, color=color)
if fig is not None:
ax.grid(True, c=(0.95, 0.95, 0.95), which='both')
ax.set_axisbelow(True)
ax.set_xlim(0, np.max(x))
ax.set_ylim(0, np.max(y))
ax.set_ylabel(ylabel)
if return_fig or fig is None:
return fig
else:
plt.show()
plt.close(fig)
def to_nx_digraph(
self,
variables=None,
var_color=(0, 0.5, 0.25),
const_color=(0, 0.5, 1.0),
root_color=(1, 0, 0.5),
node_scale=5,
):
"""Convert this ``LazyArray`` into a ``networkx.DiGraph``, injecting
various plotting information as properties.
"""
import networkx as nx
if variables is not None:
if isinstance(variables, LazyArray):
variables = (variables,)
variables = set(variables)
def is_variable(node):
return node in variables
else:
def is_variable(_):
return False
def extract_props(node, **kwargs):
v = is_variable(node)
d = {
"variable": v,
"fn": getattr(node._fn, "__name__", "CONST"),
"size": node_scale * np.log2(node.size) + node_scale,
"color": var_color if v else const_color,
}
d.update(kwargs)
if not node._deps:
d["color"] = tuple(x ** 0.2 for x in d["color"])
return d
G = nx.DiGraph()
for node in self.ascend():
if any(is_variable(child) for child in node._deps):
variables.add(node)
G.add_node(node, **extract_props(node))
for x in node._deps:
G.add_edge(x, node)
G.nodes[self]["color"] = root_color
return G
def plot(
self,
variables=None,
initial_layout="spiral",
iterations=0,
k=None,
connectionstyle="arc3,rad=0.2",
arrowsize=5,
edge_color=None,
var_color=(0, 0.5, 0.25),
const_color=(0, 0.5, 1.0),
root_color=(1, 0, 0.5),
node_scale=5,
node_alpha=1.0,
show_labels=True,
label_alpha=0.2,
label_color=None,
font_size=8,
figsize=(6, 6),
ax=None,
return_fig=False,
**layout_opts,
):
"""Plot the computational graph of this ``LazyArray``.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgb
import networkx as nx
isdark = sum(to_rgb(mpl.rcParams["figure.facecolor"])) / 3 < 0.5
if isdark:
draw_color = (0.75, 0.77, 0.80, 1.0)
else:
draw_color = (0.45, 0.47, 0.50, 1.0)
if edge_color is None:
edge_color = draw_color
if label_color is None:
label_color = mpl.rcParams["axes.labelcolor"]
created_fig = ax is None
if created_fig:
fig, ax = plt.subplots(figsize=figsize, constrained_layout=True)
ax.axis("off")
ax.set_aspect("equal")
G = self.to_nx_digraph(
variables=variables,
var_color=var_color,
const_color=const_color,
root_color=root_color,
node_scale=node_scale,
)
if initial_layout == "spiral":
layout_opts.setdefault("equidistant", True)
pos = getattr(nx, initial_layout + "_layout")(G, **layout_opts)
if iterations:
pos = nx.layout.spring_layout(
G, pos=pos, k=k, iterations=iterations
)
nx.draw_networkx_edges(
G,
pos=pos,
ax=ax,
edge_color=draw_color,
connectionstyle=connectionstyle,
arrowsize=arrowsize,
arrows=True,
)
nx.draw_networkx_nodes(
G,
pos=pos,
ax=ax,
node_color=[G.nodes[x]["color"] for x in G.nodes],
node_size=[G.nodes[x]["size"] for x in G.nodes],
alpha=node_alpha,
)
if show_labels:
nx.draw_networkx_labels(
G,
pos=pos,
ax=ax,
labels={x: G.nodes[x]["fn"] for x in G.nodes},
font_color=label_color,
font_size=font_size,
alpha=label_alpha,
bbox={
"color": to_rgb(mpl.rcParams["figure.facecolor"]),
"alpha": label_alpha,
},
)
if not created_fig:
return
if return_fig:
return fig
else:
plt.show()
plt.close(fig)
@property
def fn(self):
return self._fn
@property
def fn_name(self):
return getattr(self._fn, "__name__", "None")
@property
def args(self):
return self._args
@property
def kwargs(self):
return self._kwargs
@property
def shape(self):
return self._shape
@property
def ndim(self):
return len(self._shape)
@property
def size(self):
return functools.reduce(operator.mul, self.shape, 1)
@property
def dtype(self):
return self._dtype
@property
def backend(self):
return self._backend
@property
def deps(self):
return self._deps
def __getitem__(self, key):
return getitem(self, key)
# this makes numpy operations delegate to __rmatmul__ etc.
__array_ufunc__ = None
def __mul__(self, other):
return multiply(self, other)
def __rmul__(self, other):
return multiply(self, other)
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return add(self, other)
def __sub__(self, other):
return sub(self, other)
def __rsub__(self, other):
return sub(other, self)
def __floordiv__(self, other):
return floordivide(self, other)
def __rfloordiv__(self, other):
return floordivide(other, self)
def __truediv__(self, other):
return truedivide(self, other)
def __rtruediv__(self, other):
return truedivide(other, self)
def __pow__(self, other):
return pow_(self, other)
def __rpow__(self, other):
return pow_(other, self)
def __matmul__(self, other):
return matmul(self, other)
def __rmatmul__(self, other):
return matmul(other, self)
def __abs__(self):
return abs_(self)
@property
def T(self):
return transpose(self)
@property
def H(self):
return conj(transpose(self))
@property
def real(self):
return real(self)
@property
def imag(self):
return imag(self)
def __repr__(self):
return (
f"<{self.__class__.__name__}("
f"fn={self.fn_name}, "
f"shape={self.shape}, "
f"dtype={self.dtype}, "
f"backend='{self.backend}')>"
)
def ensure_lazy(array):
if not isinstance(array, LazyArray):
return LazyArray.from_data(array)
return array
def find_lazy(x):
"""Recursively search for ``LazyArray`` instances in pytrees.
"""
if isinstance(x, LazyArray):
yield x
return
if isinstance(x, (tuple, list)):
for subx in x:
yield from find_lazy(subx)
return
if isinstance(x, dict):
for subx in x.values():
yield from find_lazy(subx)
return
# --------------------- recusively evaluating 'pytrees' --------------------- #
def materialize_larray(x):
return x._materialize()
def materialize_tuple(x):
return tuple(map(maybe_materialize, x))
def materialize_list(x):
return list(map(maybe_materialize, x))
def materialize_dict(x):
return {k: maybe_materialize(v) for k, v in x.items()}
def materialize_identity(x):
return x
_materialize_dispatch = collections.defaultdict(lambda: materialize_identity, {
LazyArray: materialize_larray,
tuple: materialize_tuple,
list: materialize_list,
dict: materialize_dict,
})
def maybe_materialize(x):
"""Recursively evaluate LazyArray instances in tuples, lists and dicts.
"""
return _materialize_dispatch[x.__class__](x)
# -------------------- recusively stringifying 'pytrees' -------------------- #
def stringify_larray(x, params):
name = f"x{id(x)}"
if x._data is not None:
params.setdefault(name, x._data)
return name
def stringify_tuple(x, params):
if not x:
return "()"
return f"({', '.join(stringify(xi, params) for xi in x)},)"
def stringify_list(x, params):
return f"[{', '.join(stringify(xi, params) for xi in x)}]"
def stringify_dict(x, params):
entries = (f"{k}: {stringify(v, params)}" for k, v in x.items())
return f"{{{', '.join(entries)}}}"
def stringify_identity(x, params):
if isinstance(x, (int, float, complex, bool, slice, range)):
return f"{x}"
if isinstance(x, str):
return f"'{x}'"
name = f"c{id(x)}"
params.setdefault(name, x)
return name
_stringify_dispatch = collections.defaultdict(lambda: stringify_identity, {
LazyArray: stringify_larray,
tuple: stringify_tuple,
list: stringify_list,
dict: stringify_dict,
})
def stringify(x, params):
"""Recursively stringify LazyArray instances in tuples, lists and dicts.
"""
return _stringify_dispatch[x.__class__](x, params)
def _code_exec_fn(params, code, out_name):
exec(code, None, params)
return params[out_name]
def _array_fn(arrays, var_names, fn, params):
# inject the new arrays
for name, array in zip(var_names, arrays):
params[name] = array
# run the byte-compiled function with the new locals
return fn(params)
# --------------------------------- caching --------------------------------- #
_SHARING_STACK = collections.defaultdict(list)
def currently_sharing():
"""Check if we are currently sharing a cache -- thread specific.
"""
return threading.get_ident() in _SHARING_STACK
def get_sharing_cache():
"""Return the most recent sharing cache -- thread specific.
"""
return _SHARING_STACK[threading.get_ident()][-1]
def _add_sharing_cache(cache):
_SHARING_STACK[threading.get_ident()].append(cache)
def _remove_sharing_cache():
tid = threading.get_ident()
_SHARING_STACK[tid].pop()
if not _SHARING_STACK[tid]:
del _SHARING_STACK[tid]
@contextlib.contextmanager
def shared_intermediates(cache=None):
"""Context in which contract intermediate results are shared.
Note that intermediate computations will not be garbage collected until
1. this context exits, and
2. the yielded cache is garbage collected (if it was captured).
Parameters
----------
cache : dict
If specified, a user-stored dict in which intermediate results will
be stored. This can be used to interleave sharing contexts.
Returns
-------
cache : dict
A dictionary in which sharing results are stored. If ignored,
sharing results will be garbage collected when this context is
exited. This dict can be passed to another context to resume
sharing.
"""
if cache is None:
cache = {}
_add_sharing_cache(cache)
try:
yield cache
finally:
_remove_sharing_cache()
def maybe_id(x):
if hasattr(x, "shape"):
return id(x)
return x
def hash_args_kwargs(fn_name, *args, **kwargs):
hargs = tuple(map(maybe_id, args))
if kwargs:
hkwargs = tuple(sorted((k, maybe_id(v)) for k, v in kwargs.items()))
else:
hkwargs = None
return f"{fn_name}-{hash((hargs, hkwargs))}"
def lazy_cache(fn_name, hasher=None):
if hasher is None:
hasher = hash_args_kwargs
def wrapper(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
if not currently_sharing():
return fn(*args, **kwargs)
cache = get_sharing_cache()
key = hasher(fn_name, *args, **kwargs)
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return wrapped
return wrapper
_DTYPES_REAL_EQUIV = {"complex128": "float64", "complex64": "float32"}
_DTYPES_COMPLEX_EQUIV = {"float64": "complex128", "float32": "complex64"}
@functools.lru_cache(None)
def dtype_real_equiv(dtype_name):
return _DTYPES_REAL_EQUIV.get(dtype_name, dtype_name)
@functools.lru_cache(None)
def dtype_complex_equiv(dtype_name):
return _DTYPES_COMPLEX_EQUIV.get(dtype_name, dtype_name)
@functools.lru_cache(None)
def _find_common_dtype(array_types, scalar_types):
return np.find_common_type(array_types, scalar_types).name
def find_common_dtype(*xs):
return _find_common_dtype(tuple(map(get_dtype_name, xs)), ())
def find_common_backend(*xs):
backend = None
# prefer inferring from LazyArray
for x in xs:
b = getattr(x, "backend", None)
if b == "autoray.lazy":
# check if any LazyArray is *itself* backed by LazyArray
return b
# else default to first backend seen
elif (backend is None) and (b is not None):
backend = b
# if no LazyArray args, check raw arrays
if backend is None:
backend = next(iter(
infer_backend(x) for x in xs if hasattr(x, "shape")
), None)
return backend
@functools.lru_cache(1024)
def find_broadcast_shape(xshape, yshape):
xndim = len(xshape)
yndim = len(yshape)
if xndim < yndim:
xshape = (1,) * (yndim - xndim)
elif yndim < xndim:
yshape = (1,) * (xndim - yndim)
return tuple(max(d1, d2) for d1, d2 in zip(xshape, yshape))
# -------------------------------- interface -------------------------------- #
def Variable(shape, backend=None, dtype=None):
"""Create a ``LazyArray`` from a shape only, representing a leaf node
in the computational graph. It can only act as a placeholder for data.
"""
return LazyArray.from_shape(shape, backend=backend, dtype=dtype)
@lazy_cache("array")
def array(x):
"""Create a ``LazyArray`` from an input array, representing a leaf node
in the computational graph.
"""
return LazyArray.from_data(x)
@lazy_cache("transpose")
def transpose(a, axes=None):
a = ensure_lazy(a)
fn_transpose = get_lib_fn(a.backend, "transpose")
if axes is None:
axes = range(a.ndim)[::-1]
newshape = tuple(a.shape[i] for i in axes)
# check for chaining transpositions
if a._fn is fn_transpose:
b = a._args[0]
if isinstance(b, LazyArray):
axes_prev = a._args[1]
axes_chained = tuple(axes_prev[k] for k in axes)
return b.to(fn_transpose, (b, axes_chained), shape=newshape)
return a.to(fn_transpose, (a, axes), shape=newshape)
@lazy_cache("reshape")
def _reshape_tuple(a, newshape):
a = ensure_lazy(a)
fn_reshape = get_lib_fn(a.backend, "reshape")
# check for redundant reshapes
if a._fn is fn_reshape:
b = a._args[0]
if isinstance(b, LazyArray):
a = b
return a.to(fn_reshape, (a, newshape), shape=newshape)
@functools.lru_cache(1024)
def find_full_reshape(newshape, size):
try:
expand = newshape.index(-1)
before = newshape[:expand]
after = newshape[expand + 1:]
d = size // functools.reduce(
operator.mul, itertools.chain(before, after), 1
)
return (*before, d, *after)
except ValueError:
return newshape
def reshape(a, newshape):
newshape = find_full_reshape(tuple(newshape), a.size)
return _reshape_tuple(a, newshape)
def getitem_hasher(_, a, key):
if not isinstance(key, tuple):
key = (key,)
hkey = tuple(
str(k) if isinstance(k, slice) else id(k) if hasattr(k, "shape") else k
for k in key
)
return f"getitem-{hash((id(a), hkey))}"
@lazy_cache("getitem", hasher=getitem_hasher)
def getitem(a, key):
a = ensure_lazy(a)
deps = (a,)
if not isinstance(key, tuple):
key = (key,)
try:
# expand ellipsis
expand = key.index(...)
ndiff = a.ndim - len(key) + 1
key = key[:expand] + (slice(None),) * ndiff + key[expand + 1:]
except ValueError:
# else pad trailing slices if necessary
ndiff = a.ndim - len(key)
if ndiff:
key = key + (slice(None),) * ndiff
newshape = []
for k, d in zip(key, a.shape):
if isinstance(k, LazyArray):
newshape.append(k.size)
deps += (k,)
elif isinstance(k, slice):
newshape.append(len(range(d)[k]))
else:
try:
newshape.append(len(k))
except TypeError:
pass
# TODO: np.newaxis == None
newshape = tuple(newshape)
return a.to(operator.getitem, (a, key), shape=newshape, deps=deps)
@lazy_cache("tensordot")
def tensordot(a, b, axes=2):
if isinstance(axes, int):
axes = (tuple(range(a.ndim - axes, a.ndim)), tuple(range(b.ndim)))
newshape = tuple(
d for i, d in enumerate(a.shape) if i not in axes[0]
) + tuple(d for i, d in enumerate(b.shape) if i not in axes[1])
newdtype = find_common_dtype(a, b)
backend = find_common_backend(a, b)
fn_tensordot = get_lib_fn(backend, "tensordot")
return LazyArray(
backend=backend,
fn=fn_tensordot,
args=(a, b, axes),
kwargs=None,
shape=newshape,
dtype=newdtype,
deps=tuple(x for x in (a, b) if isinstance(x, LazyArray)),
)
@lazy_cache("einsum")
def einsum(*operands):
from opt_einsum.parser import parse_einsum_input
deps, output, larrays = parse_einsum_input(operands)
size_dict = {}
for term, op in zip(deps.split(","), larrays):
for i, char in enumerate(term):
size_dict[char] = max(size_dict.get(char, 1), op.shape[i])
eq = deps + "->" + output
newshape = tuple(size_dict[char] for char in output)
backend = find_common_backend(*larrays)
newdtype = find_common_dtype(*larrays)
fn_einsum = get_lib_fn(backend, "einsum")
return LazyArray(
backend=backend,
fn=fn_einsum,
args=(eq, *larrays),
kwargs=None,
shape=newshape,
dtype=newdtype,
deps=tuple(x for x in larrays if isinstance(x, LazyArray)),
)
@lazy_cache("trace")
def trace(a):
a = ensure_lazy(a)
return a.to(fn=get_lib_fn(a.backend, "trace"), args=(a,), shape=(),)
@lazy_cache("matmul")
def matmul(x1, x2):
backend = find_common_backend(x1, x2)
newdtype = find_common_dtype(x1, x2)
newshape = (*x1.shape[:-1], *x2.shape[1:])
return LazyArray(
backend=backend,
fn=operator.matmul,
args=(x1, x2),
kwargs=None,
shape=newshape,
dtype=newdtype,
deps=tuple(x for x in (x1, x2) if isinstance(x, LazyArray)),
)
@lazy_cache("clip")
def clip(a, a_min, a_max):
a = ensure_lazy(a)
fn_clip = get_lib_fn(a.backend, "clip")
return a.to(fn_clip, (a, a_min, a_max))
@lazy_cache("flip")
def flip(a, axis=None):
a = ensure_lazy(a)
fn_flip = get_lib_fn(a.backend, "flip")
return a.to(fn_flip, (a, axis))
@lazy_cache("sort")
def sort(a, axis=-1):
a = ensure_lazy(a)
return a.to(get_lib_fn(a.backend, "sort"), (a, axis))
@lazy_cache("argsort")
def argsort(a, axis=-1):
a = ensure_lazy(a)
return a.to(
fn=get_lib_fn(a.backend, "argsort"), args=(a, axis), dtype="int",
)
@lazy_cache("stack")
def stack(arrays, axis=0):
arrays = tuple(arrays)
newdtype = find_common_dtype(*arrays)
newshape = list(arrays[0].shape)
newshape.insert(axis if axis >= 0 else axis + 1, len(arrays))
backend = find_common_backend(*arrays)
fn = get_lib_fn(backend, "stack")
return LazyArray(
backend=backend,
fn=fn,
args=(arrays, axis),
kwargs=None,
shape=tuple(newshape),
dtype=newdtype,
deps=tuple(x for x in arrays if isinstance(x, LazyArray)),
)
def make_binary_func(name, fn):
@lazy_cache(name)
def binary_func(x1, x2):
newdtype = find_common_dtype(x1, x2)
x1shape = getattr(x1, "shape", ())
x2shape = getattr(x2, "shape", ())
newshape = find_broadcast_shape(x1shape, x2shape)
return LazyArray(
backend=find_common_backend(x1, x2),
fn=fn,
args=(x1, x2),
kwargs=None,
shape=newshape,
dtype=newdtype,
deps=tuple(x for x in (x1, x2) if isinstance(x, LazyArray)),
)
return binary_func
multiply = make_binary_func("multiply", operator.mul)
add = make_binary_func("add", operator.add)
sub = make_binary_func("sub", operator.sub)
floordivide = make_binary_func("floordivide", operator.floordiv)
truedivide = make_binary_func("truedivide", operator.truediv)
pow_ = make_binary_func("pow", operator.pow)
def make_unary_func(name, to_real=False):
if to_real:
def get_newdtype(x):
return dtype_real_equiv(x.dtype)
else:
def get_newdtype(x):
return None
@lazy_cache(name)
def unary_func(x):
x = ensure_lazy(x)
newdtype = get_newdtype(x)
return x.to(fn=get_lib_fn(x.backend, name), args=(x,), dtype=newdtype,)
return unary_func
sin = make_unary_func("sin")
cos = make_unary_func("cos")
tan = make_unary_func("tan")
arcsin = make_unary_func("arcsin")
arccos = make_unary_func("arccos")
arctan = make_unary_func("arctan")
sinh = make_unary_func("sinh")
cosh = make_unary_func("cosh")
tanh = make_unary_func("tanh")
arcsinh = make_unary_func("arcsinh")
arccosh = make_unary_func("arccosh")
arctanh = make_unary_func("arctanh")
exp = make_unary_func("exp")
log = make_unary_func("log")
log2 = make_unary_func("log2")
log10 = make_unary_func("log10")
conj = make_unary_func("conj")
sign = make_unary_func("sign")
abs_ = make_unary_func("abs", to_real=True)
angle = make_unary_func("angle", to_real=True)
real = make_unary_func("real", to_real=True)
imag = make_unary_func("imag", to_real=True)
def make_reduction_func(name):
@lazy_cache(name)
def reduction_func(a, axis=None):
a = ensure_lazy(a)
fn = get_lib_fn(a.backend, name)
nd = a.ndim
if axis is None:
return a.to(fn=fn, args=(a,), shape=(),)
elif not hasattr(axis, "__len__"):
axis = (axis,)
axis = tuple(nd - i if i < 0 else i for i in axis)
newshape = tuple(d for i, d in enumerate(a.shape) if i not in axis)
return a.to(fn=fn, args=(a, axis), shape=newshape)
return reduction_func
sum_ = make_reduction_func("sum")
prod = make_reduction_func("prod")
min_ = make_reduction_func("min")
max_ = make_reduction_func("max")
# # XXX: still missing
# allclose, complex, diag
# dot, vdot, kron, inner, outer
# pad, eye
# squeeze, expand_dims
# to_numpy
# ---------------------------- autoray specials ----------------------------- #
def lazy_get_dtype_name(x):
return x.dtype
@lazy_cache("astype")
def lazy_astype(x, dtype_name):
x = ensure_lazy(x)
return x.to(fn=astype, args=(x, dtype_name), dtype=dtype_name,)
register_function("autoray.lazy", "get_dtype_name", lazy_get_dtype_name)
register_function("autoray.lazy", "astype", lazy_astype)
|
121197
|
from django.apps import AppConfig
class AutoModelConf(AppConfig):
name = 'tests.inspectdb'
label = 'auto_model'
class DependentModelConf(AppConfig):
name = 'tests.inspectdb.dependent_model'
label = 'dependent_model'
|
121286
|
import json
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from helpers.text import devectorize
from helpers.training import load_checkpoint
from models.translate import prior_model_from_checkpoint
from modules.data.collates import Seq2SeqCollate
from modules.data.datasets import SequenceDataset, TranslationDataset
from modules.helpers import sequence_mask
from modules.models import Seq2SeqTransformer
def _ce_loss(logits, labels, lengths, ignore_index=0):
_logits = logits.contiguous().view(-1, logits.size(-1))
if ignore_index >= 0:
_labels = labels.contiguous().view(-1)
else:
assert lengths is not None
mask = ~sequence_mask(lengths, labels.size(1))
_labels = labels.masked_fill_(mask, -1).contiguous().view(-1)
_loss = F.cross_entropy(_logits, _labels, ignore_index=ignore_index,
reduction='none')
_loss_per_step = _loss.view(labels.size())
loss = _loss_per_step.sum(-1) / lengths.float()
return loss, _loss_per_step
def _load_model(checkpoint, device="cuda"):
cp = load_checkpoint(checkpoint)
x_vocab, y_vocab = cp["vocab"]
model = Seq2SeqTransformer(len(x_vocab), len(y_vocab),
**cp["config"]["model"])
model.load_state_dict(cp["model"])
model.to(device)
model.eval()
return model, x_vocab, y_vocab, cp["config"]
def _logits2dist(logits, vocab, topk=5):
top_p, top_i = torch.softmax(logits, -1).topk(topk, -1)
t = []
for probs, tokids in zip(top_p.tolist(), top_i.tolist()):
t.append([(vocab.id2tok[t], p) for p, t in zip(probs, tokids)])
return t
device = "cuda"
# --------------------------------------
# load model
# --------------------------------------
model_prior, _, _, _ = _load_model("final.trans.deen_prior_3M_kl_best.pt")
model_postnorm, _, _, _ = _load_model("final.trans.deen_postnorm_best.pt")
model_base, src_vocab, trg_vocab, cnf = _load_model("final.trans.deen_base_best.pt")
# --------------------------------------
# load lm
# --------------------------------------
lm_cp = "../checkpoints/prior.lm_news_en_30M_trans_best.pt"
lm_cp = load_checkpoint(lm_cp)
lm = prior_model_from_checkpoint(lm_cp)
lm.to(device)
lm.eval()
# --------------------------------------
# dataset
# --------------------------------------
src_path = "de.txt"
trg_path = "en.txt"
val_src = SequenceDataset(src_path, vocab=src_vocab, **{**cnf["data"],
**cnf["data"]["src"]})
val_trg = SequenceDataset(trg_path, vocab=trg_vocab, **{**cnf["data"],
**cnf["data"]["trg"]})
val_set = TranslationDataset(val_src, val_trg)
data_loader = DataLoader(
val_set,
batch_size=32,
# batch_sampler=TokenBatchSampler(val_src.lengths * 2, 1000),
collate_fn=Seq2SeqCollate())
results = []
def _is_failed(gold, y_ids, lm_ids, tm_ids):
return [True if (g != y and g == t) else False
for g, y, l, t in zip(gold, y_ids, lm_ids, tm_ids)]
def _batch_forward(batch):
batch = list(map(lambda x: x.to(device), batch))
x_sos, x_eos, x_len, y_sos, y_eos, y_len = batch
# prior
_, dec_prior = model_prior(x_eos, y_sos, x_len, y_len)
dec_prior["lm"] = lm(y_sos, y_len)["logits"]
# shallow
_, dec_shallow = model_base(x_eos, y_sos, x_len, y_len,
**{"fusion": "shallow",
"fusion_a": 0.1, "lm": lm})
# postnorm
_, dec_postnorm = model_postnorm(x_eos, y_sos, x_len, y_len,
**{"fusion": "postnorm", "lm": lm})
# --------------------------------------------------------------------
_inputs = devectorize(x_eos.tolist(), src_vocab.id2tok, src_vocab.EOS_id,
strip_eos=True)
_targets = devectorize(y_eos.tolist(), trg_vocab.id2tok, trg_vocab.EOS_id,
strip_eos=True)
# --------------------------------------------------------------------
# prior
# --------------------------------------------------------------------
_prior_ids = dec_prior["logits"].max(2)[1].tolist()
_prior_lm_ids = dec_prior["lm"].max(2)[1].tolist()
_prior_tokens = devectorize(_prior_ids, trg_vocab.id2tok)
_prior_lm_tokens = devectorize(_prior_lm_ids, trg_vocab.id2tok)
# --------------------------------------------------------------------
# shallow
# --------------------------------------------------------------------
_shallow_ids = dec_shallow["logits"].max(2)[1].tolist()
_shallow_tm_ids = dec_shallow["dec"].max(2)[1].tolist()
_shallow_lm_ids = dec_shallow["lm"].max(2)[1].tolist()
_shallow_fails = [_is_failed(y_eos[i], _shallow_ids[i],
_shallow_lm_ids[i], _shallow_tm_ids[i])
for i in range(x_eos.size(0))]
_shallow_tokens = devectorize(_shallow_ids, trg_vocab.id2tok)
_shallow_tm_tokens = devectorize(_shallow_tm_ids, trg_vocab.id2tok)
_shallow_lm_tokens = devectorize(_shallow_lm_ids, trg_vocab.id2tok)
# --------------------------------------------------------------------
# postnorm
# --------------------------------------------------------------------
_postnorm_ids = dec_postnorm["logits"].max(2)[1].tolist()
_postnorm_tm_ids = dec_postnorm["dec"].max(2)[1].tolist()
_postnorm_lm_ids = dec_postnorm["lm"].max(2)[1].tolist()
_postnorm_fails = [_is_failed(y_eos[i], _postnorm_ids[i],
_postnorm_lm_ids[i], _postnorm_tm_ids[i])
for i in range(x_eos.size(0))]
_postnorm_tokens = devectorize(_postnorm_ids, trg_vocab.id2tok)
_postnorm_tm_tokens = devectorize(_postnorm_tm_ids, trg_vocab.id2tok)
_postnorm_lm_tokens = devectorize(_postnorm_lm_ids, trg_vocab.id2tok)
# --------------------------------------------------------------------
for i in range(x_eos.size(0)):
if y_len[i].item() > 20:
continue
row = {
"source": _inputs[i][:x_len[i]],
"target": _targets[i][:y_len[i]],
"prior_toks": _prior_tokens[i][:y_len[i]],
"prior_toks_lm": _prior_lm_tokens[i][:y_len[i]],
"prior_dist": _logits2dist(dec_prior["logits"][i], trg_vocab)[
:y_len[i]],
"prior_dist_lm": _logits2dist(dec_prior["lm"][i], trg_vocab)[
:y_len[i]],
"postnorm_toks": _postnorm_tokens[i][:y_len[i]],
"postnorm_toks_lm": _postnorm_lm_tokens[i][:y_len[i]],
"postnorm_toks_tm": _postnorm_tm_tokens[i][:y_len[i]],
"postnorm_dist": _logits2dist(dec_postnorm["logits"][i], trg_vocab)[
:y_len[i]],
"postnorm_dist_tm": _logits2dist(dec_postnorm["dec"][i], trg_vocab)[
:y_len[i]],
"postnorm_dist_lm": _logits2dist(dec_postnorm["lm"][i], trg_vocab)[
:y_len[i]],
"postnorm_fails": _postnorm_fails[i],
"shallow_toks": _shallow_tokens[i][:y_len[i]],
"shallow_toks_lm": _shallow_lm_tokens[i][:y_len[i]],
"shallow_toks_tm": _shallow_tm_tokens[i][:y_len[i]],
"shallow_dist": _logits2dist(dec_shallow["logits"][i], trg_vocab)[
:y_len[i]],
"shallow_dist_tm": _logits2dist(dec_shallow["dec"][i], trg_vocab)[
:y_len[i]],
"shallow_dist_lm": _logits2dist(dec_shallow["lm"][i], trg_vocab)[
:y_len[i]],
"shallow_fails": _shallow_fails[i],
}
if any(_postnorm_fails[i]) or any(_shallow_fails[i]):
yield row
del batch
_results = []
with torch.no_grad():
for batch in tqdm(data_loader, total=len(data_loader),
desc="Translating..."):
_results.extend(list(_batch_forward(batch)))
with open("samples.json", "w") as f:
json.dump(_results, f)
|
121297
|
from torchtext import data
from torch.utils.data import DataLoader
from graph import MTInferBatcher, get_mt_dataset, MTDataset, DocumentMTDataset
from modules import make_translate_infer_model
from utils import tensor_to_sequence, average_model
import torch as th
import argparse
import yaml
max_length = 1024
def run(dev_id, config):
_dataset = config['dataset']
if _dataset == 'iwslt':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('iwslt')
_, _, test = dataset.splits(exts=('.tc.zh', '.tc.en'), fields=TEXT, root='./data')
test = DocumentMTDataset(test, context_length=config['context_len'])
vocab_zh, vocab_en = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_zh), len(vocab_en))
vocab_sizes = [len(vocab_zh), len(vocab_en)]
TEXT[0].vocab = vocab_zh
TEXT[1].vocab = vocab_en
batcher = MTInferBatcher(TEXT, config['doc_max_len'], test.BOS_TOKEN,
graph_type=config['graph_type'], **config.get('graph_attrs', {}))
test_loader = DataLoader(dataset=test,
batch_size=config['test_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'wmt':
TEXT = data.Field(batch_first=True)
dataset = get_mt_dataset('wmt14')
_, _, test = dataset.splits(exts=['.en', '.de'], fields=[TEXT, TEXT], root='./data')
test = MTDataset(test)
vocab = dataset.load_vocab(root='./data')[0]
print('vocab size: ', len(vocab))
vocab_sizes = [len(vocab)]
TEXT.vocab = vocab
batcher = MTInferBatcher(TEXT, config['doc_max_len'], test.BOS_TOKEN,
graph_type=config['graph_type'], **config.get('graph_attrs', {}))
test_loader = DataLoader(dataset=test,
batch_size=config['test_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'multi':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('multi30k')
_, _, test = dataset.splits(exts=['.en.atok', '.de.atok'], fields=TEXT, root='./data')
test = MTDataset(test)
vocab_en, vocab_de = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_en), len(vocab_de))
vocab_sizes = [len(vocab_en), len(vocab_de)]
TEXT[0].vocab = vocab_en
TEXT[1].vocab = vocab_de
batcher = MTInferBatcher(TEXT, config['doc_max_len'], test.BOS_TOKEN,
graph_type=config['graph_type'], **config.get('graph_attrs', {}))
test_loader = DataLoader(dataset=test,
batch_size=config['test_batch_size'],
collate_fn=batcher,
shuffle=False)
dim_model = config['dim_model']
dim_ff = config['dim_ff']
num_heads = config['num_heads']
n_layers = config['n_layers']
m_layers = config['m_layers']
dropouti = config['dropouti']
dropouth = config['dropouth']
dropouta = config['dropouta']
dropoutc = config['dropoutc']
rel_pos = config['rel_pos']
model = make_translate_infer_model(vocab_sizes, dim_model, dim_ff, num_heads,
n_layers, m_layers,
dropouti=dropouti, dropouth=dropouth,
dropouta=dropouta, dropoutc=dropoutc,
rel_pos=rel_pos)
device = th.device(dev_id)
model.load_state_dict(
average_model(['{}-{}.pkl'.format(epoch, config['save_name']) for epoch in range(config['n_epochs'] - 5, config['n_epochs'])]))
model = model.to(device)
model.eval()
if _dataset == 'iwslt':
vocab_trg = vocab_en
elif _dataset == 'wmt':
vocab_trg = vocab
elif _dataset == 'multi':
vocab_trg = vocab_de
for batch in test_loader:
with th.no_grad():
batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device)
batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device)
batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device)
for j in range(batcher.k):
batch.g_dec[j].edata['etype'] = batch.g_dec[j].edata['etype'].to(device)
batch.g_dec[j].ndata['pos'] = batch.g_dec[j].ndata['pos'].to(device)
batch.g_dec[j].ndata['x'] = batch.g_dec[j].ndata['x'].to(device)
output = model(batch, vocab_trg.stoi[MTDataset.EOS_TOKEN], sent_max_len=config['sent_max_len'])
for sequence in tensor_to_sequence(vocab_trg.itos, output, batch.n_sent_ctx):
print(sequence)
if __name__ == '__main__':
argparser = argparse.ArgumentParser("machine translation inference")
argparser.add_argument('--config', type=str)
argparser.add_argument('--gpu', type=int, default=0)
args = argparser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
run(args.gpu, config)
|
121302
|
import numpy as np
from scipy.spatial.distance import squareform
from random import randint
# there are more efficient algorithms for this
# https://people.csail.mit.edu/virgi/6.890/papers/APBP.pdf
def max_min(A, B):
'''max-min product of two square matrices
params:
A, B: NxN numpy arrays '''
assert A.shape == B.shape
return np.max(np.minimum(A[:, :, None], B[None, :, :]), axis=1)
def mat_gromov_prod(dists, base):
'''Gromov products of N-point metric space relative to base point
Args:
dists (ndarray): NxN matrix of pairwise distances
base (int): index of the basepoint in 0...N-1 '''
assert dists.shape[0] == dists.shape[1] and 0 <= base < dists.shape[0]
row = dists[base, :][None, :]
col = dists[:, base][:, None]
return 0.5*(row+col-dists)
def delta_rel(dists, base=None):
''' Measure the delta-hyperbolicity constant of data
with respect to basepoint, normalized by the diameter (max dist).
Args:
dists (ndarray): NxN matrix of pairwise distances
base (int): index of basepoint in 0...N-1 (default = random)
'''
if base is None:
base = randint(0,dist.shape[0]-1)
assert is_metric(dists) and 0 <= base < dists.shape[0]
G = mat_gromov_prod(dists, base)
delta = np.max(max_min(G,G)-G)
diam = np.max(dists)
return delta/diam
def delta_sample(X, **kwargs):
bs = kwargs.get("bs", X.shape[0])
tries = kwargs.get("tries", 10)
dist = kwargs.get("dist", None)
deltas = []
for i in range(tries):
idx = np.random.choice(X.shape[0], bs)
batch = X[idx]
if dist is None:
dists = np.linalg.norm(
batch[None:,]-batch[:,None],
axis=-1)
else:
dists = dist(batch,batch)
deltas.append(
delta_rel(dists,randint(0,bs-1))
)
return deltas
def is_metric(X, tol=1e-8):
return len(X.shape) == 2 and \
np.all( np.abs(X-X.T)<tol ) and\
np.all( np.abs(np.diag(X))<tol ) and\
np.all(X >= 0)
def avg_distortion(metric1, metric2):
''' Average distortion between two metrics.
Args:
metric1, metric2 (ndarray): N x N distance matrices,
or length N*(N-1)//2 compressed distance matrices
Returns:
average distortion (float)
'''
assert metric1.shape == metric2.shape
if len(metric1.shape) > 1:
assert is_metric(metric1)
X = squareform(metric1)
else:
X = metric1
if len(metric2.shape) > 1:
assert is_metric(metric2)
Y = squareform(metric2)
else:
Y = metric2
return np.mean( np.abs(X-Y)/Y )
|
121338
|
import os
import unittest
import jwt
from dataservice.app import app
from flask_webtest import TestApp as _TestApp
_HERE = os.path.dirname(__file__)
with open(os.path.join(_HERE, 'privkey.pem')) as f:
_KEY = f.read()
def create_token(data):
return jwt.encode(data, _KEY, algorithm='RS512')
_TOKEN = {'iss': 'runnerly',
'aud': 'runnerly.io'}
class TestViews(unittest.TestCase):
def setUp(self):
self.app = _TestApp(app)
self.token = create_token(_TOKEN).decode('ascii')
self.headers = {'Authorization': 'Bearer ' + self.token}
def test_one(self):
resp = self.app.get('/', headers=self.headers)
self.assertEqual(resp.status_code, 200)
|
121339
|
from MLlib.models import Agglomerative_clustering
import numpy as np
X = np.genfromtxt('datasets/agglomerative_clustering.txt')
model = Agglomerative_clustering()
model.work(X, 4)
model.plot(X)
|
121383
|
import time
import logging
import greenlet
import gevent
import contextlib
_logger = logging.getLogger(__name__)
class BlockDebugger(object):
def __init__(self, timeout=1000):
self.timeout = timeout
self._active_greenlet = None
self._greenlet_switch_counter = 0
self._greenlet_last_switch_time = None
greenlet.settrace(self._greenlet_switch_tracer)
def _greenlet_switch_tracer(self, what, (origin, target)):
self._active_greenlet = target
self._greenlet_switch_counter += 1
then = self._greenlet_last_switch_time
now = self._greenlet_last_switch_time = time.time()
if then is not None:
blocking_time = int(round((now - then) * 1000))
if origin is not gevent.hub.get_hub():
if blocking_time > self.timeout:
_logger.warning("Greenlet blocked for %s ms" % blocking_time)
@contextlib.contextmanager
def check_block(self):
# Remember the time of last switch before entering the context.
old_switch_time = self._greenlet_last_switch_time
yield None
# If the time of last switch has not changed when exiting the context,
# then we obviously didn't yield back to the event loop.
if old_switch_time is not None:
if old_switch_time == self._greenlet_last_switch_time:
raise RuntimeError("Code did not yield to gevent")
|
121427
|
from urllib.parse import quote_plus
from flask import url_for
from sopy import db
from sopy.ext.models import IDModel
from sopy.se_data.models import ChatMessage
class Transcript(IDModel):
title = db.Column(db.String, nullable=False)
ts = db.Column(db.DateTime, nullable=False)
body = db.Column(db.String, nullable=False, default='')
messages = db.relationship(ChatMessage, lambda: transcript_message, order_by=ChatMessage.id, cascade='all')
def __str__(self):
return self.title
@property
def detail_url(self):
return url_for('transcript.detail', id=self)
@property
def update_url(self):
return url_for('transcript.update', id=self.id)
@property
def delete_url(self):
return url_for('transcript.delete', id=self.id)
@property
def local_time_url(self):
query = quote_plus('{} utc in local time'.format(self.ts.strftime('%Y-%m-%d %H:%M')))
return 'http://www.wolframalpha.com/input?i={}'.format(query)
transcript_message = db.Table(
'transcript_message',
db.Column('transcript_id', db.Integer, db.ForeignKey(Transcript.id), primary_key=True),
db.Column('message_id', db.Integer, db.ForeignKey(ChatMessage.id), primary_key=True)
)
|
121428
|
import numpy as np
import pybullet as p
import gym
import numpy as np
import roboverse.bullet as bullet
import os
from tqdm import tqdm
import argparse
import time
import roboverse
import datetime
# =========================================================
# Index corresponds to POSITION, ORIENTATION, BUTTTON etc
POSITION = 1
ORIENTATION = 2
ANALOG = 3
BUTTONS = 6
ORIENTATION_ENABLED = True
EPSILON = 0.005
# =========================================================
def collect_one_trajectory(env, num_timesteps):
prev_vr_theta = 0
def get_gripper_input(e):
# Detect change in button, and change trigger state
if e[BUTTONS][33] & p.VR_BUTTON_IS_DOWN:
trigger = -0.8
elif e[BUTTONS][33] & p.VR_BUTTON_WAS_RELEASED:
trigger = 0.8
else:
trigger = 0
return trigger
def accept_traj(info):
return info["grasp_success"] # TODO: just grasping for now; will add info["push_success"] etc
# get VR controller output at one timestamp
def get_vr_output():
nonlocal prev_vr_theta
ee_pos, ee_theta = bullet.get_link_state(
env.robot_id, env.end_effector_index)
events = p.getVREvents()
# detect input from controllers
assert events, "no input from controller!"
e = events[0]
# obtain gripper state from controller trigger
trigger = get_gripper_input(e)
# pass controller position and orientation into the environment
cont_pos = e[POSITION]
cont_orient = bullet.deg_to_quat([180, 0, 0])
if ORIENTATION_ENABLED:
cont_orient = e[ORIENTATION]
cont_orient = bullet.quat_to_deg(list(cont_orient))
action = [cont_pos[0] - ee_pos[0],
cont_pos[1] - ee_pos[1],
cont_pos[2] - ee_pos[2]]
action = np.array(action) * 3.5 # to make grasp success < 20 timesteps
grip = trigger
for _ in range(2):
action = np.append(action, 0)
wrist_theta = cont_orient[2] - prev_vr_theta
action = np.append(action, wrist_theta)
action = np.append(action, grip)
action = np.append(action, 0)
# ===========================================================
# Add noise during actual data collection
noise = 0.1
noise_scalings = [noise] * 3 + [0.1 * noise] * 3 + [noise] * 2
action += np.random.normal(scale=noise_scalings)
# ===========================================================
action = np.clip(action, -1 + EPSILON, 1 - EPSILON)
prev_vr_theta = cont_orient[2]
return action
o = env.reset()
time.sleep(1.5)
images = []
accept = False
traj = dict(
observations=[],
actions=[],
rewards=[],
next_observations=[],
terminals=[],
agent_infos=[],
env_infos=[],
original_object_positions=env.original_object_positions,
)
first_time = True
# Collect a fixed length of trajectory
for i in range(num_timesteps):
action = get_vr_output()
observation = env.get_observation()
traj["observations"].append(observation)
next_state, reward, done, info = env.step(action)
traj["next_observations"].append(next_state)
traj["actions"].append(action)
traj["rewards"].append(reward)
traj["terminals"].append(done)
traj["agent_infos"].append(info)
traj["env_infos"].append(info)
time.sleep(0.03)
if accept_traj(info) and first_time:
print("num_timesteps: ", i)
first_time = False
# ===========================================================
if accept_traj(info):
accept = "y"
# ===========================================================
return accept, images, traj
def timestamp(divider='-', datetime_divider='T'):
now = datetime.datetime.now()
return now.strftime(
'%Y{d}%m{d}%dT%H{d}%M{d}%S'
''.format(d=divider, dtd=datetime_divider))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--num-trajectories", type=int, required=True)
parser.add_argument("-t", "--num-timesteps", type=int, required=True)
parser.add_argument("-e", "--env-name", type=str, required=True)
parser.add_argument("--task-name", type=str, required=True)
args = parser.parse_args()
timestamp = timestamp()
data_save_path = os.path.join(__file__, "../..", 'data', timestamp)
data_save_path = os.path.abspath(data_save_path)
if not os.path.exists(data_save_path):
os.makedirs(data_save_path)
data = []
env = roboverse.make(args.env_name,
gui=True,
control_mode='discrete_gripper')
env.reset()
for j in tqdm(range(args.num_trajectories)):
success, images, traj = collect_one_trajectory(env, args.num_timesteps)
while success != 'y' and success != 'Y':
print("failed for trajectory {}, collect again".format(j))
success, images, traj = collect_one_trajectory(env, args.num_timesteps)
data.append(traj)
if j % 50 == 0:
path = os.path.join(data_save_path, "{}_{}_{}_{}.npy".format(args.env_name, args.task_name, timestamp, j))
np.save(path, data)
path = os.path.join(data_save_path, "{}_{}_{}.npy".format(args.env_name, args.task_name, timestamp))
np.save(path, data)
|
121442
|
from pikachu.fingerprinting.daylight import Daylight
from pikachu.fingerprinting.hashing import hash_32_bit_integer
from pikachu.chem.bond_properties import BOND_PROPERTIES
from pikachu.chem.chirality import find_chirality_from_nonh
class ECFP:
def __init__(self, structure, iterations=2):
self.structure = structure
self.iterations = iterations
self.identifiers = {}
self.bonds = {}
self.fingerprint = set()
self.seen_atoms = {}
self.features = {}
self.hash_to_feature = {}
self.set_initial_identifiers()
self.ecfp()
def set_initial_identifiers(self):
for atom in self.structure.graph:
if atom.type != 'H' and atom.type != '*':
self.identifiers[atom] = {}
self.seen_atoms[atom] = {}
self.seen_atoms[atom][0] = {atom}
daylight_properties = Daylight(atom, self.structure)
initial_identifier = hash_32_bit_integer(daylight_properties.daylight)
self.identifiers[atom][0] = initial_identifier
self.fingerprint.add(initial_identifier)
bonds = set(atom.get_non_hydrogen_bonds())
self.bonds[initial_identifier] = bonds
feature = tuple(sorted(list(bonds) + [atom], key=lambda x: (x.nr, x.type)))
self.features[feature] = (initial_identifier, 0, atom)
self.hash_to_feature[initial_identifier] = feature
def ecfp(self):
for i in range(self.iterations):
new_features = []
for atom in self.identifiers:
identifier = self.identifiers[atom][i]
array = [i + 1, identifier]
array_to_add = []
neighbouring_bonds = []
seen_atoms = list(self.seen_atoms[atom][i])
for neighbour in atom.get_non_hydrogen_neighbours():
bond = self.structure.bond_lookup[atom][neighbour]
bond_order = BOND_PROPERTIES.bond_type_to_order[bond.type]
neighbour_identifier = self.identifiers[neighbour][i]
for neighbouring_bond in self.bonds[neighbour_identifier]:
neighbouring_bonds.append(neighbouring_bond)
array_to_add.append((bond_order, neighbour_identifier, neighbour))
for seen_atom in self.seen_atoms[neighbour][i]:
seen_atoms.append(seen_atom)
self.seen_atoms[atom][i + 1] = set(seen_atoms)
neighbouring_bonds = set(neighbouring_bonds)
array_to_add.sort(key=lambda x: (x[0], x[1]))
attachment_order = []
for bond_order, atom_id, neighbour in array_to_add:
array.append(bond_order)
array.append(atom_id)
attachment_order.append(neighbour)
if atom.chiral:
chirality = find_chirality_from_nonh(atom.neighbours, attachment_order, atom.chiral)
if chirality == 'clockwise':
array.append(1)
else:
array.append(0)
new_identifier = hash_32_bit_integer(array)
self.identifiers[atom][i + 1] = new_identifier
bonds_core_previous = self.bonds[identifier]
bonds_attachment = atom.get_non_hydrogen_bonds()
bond_set = bonds_core_previous.union(bonds_attachment)
bond_set = bond_set.union(neighbouring_bonds)
self.bonds[new_identifier] = bond_set
feature = tuple(sorted(list(bond_set) + list(self.seen_atoms[atom][i + 1]), key=lambda x: (x.nr, x.type)))
if feature not in self.features:
new_features.append((feature, new_identifier, atom))
new_features.sort(key=lambda x: tuple([y.nr for y in x[0]] + [x[1]]))
#new_features.sort()
previous_feature = None
previous_atom = None
for new_feature, identifier, atom in new_features:
if new_feature == previous_feature:
continue
else:
self.features[new_feature] = (identifier, i + 1, atom)
self.hash_to_feature[identifier] = new_feature
self.fingerprint.add(identifier)
previous_feature = new_feature
previous_atom = atom
def build_ecfp_bitvector(structures, depth=2, bits=1024):
fingerprints = []
identifier_to_feature = {}
for structure in structures:
ecfp = ECFP(structure, iterations=depth)
fingerprints.append(ecfp.fingerprint)
identifier_to_feature.update(ecfp.hash_to_feature)
substructure_to_count = {}
for fingerprint in fingerprints:
for identifier in fingerprint:
if identifier not in substructure_to_count:
substructure_to_count[identifier] = 0
substructure_to_count[identifier] += 1
substructures = sorted(list(substructure_to_count.items()), key=lambda x: x[1], reverse=True)
bitvector_substructures = [x[0] for x in substructures[:bits]]
bitvector_mapping = {}
for substructure in bitvector_substructures:
bitvector_mapping[substructure] = identifier_to_feature[substructure]
return bitvector_substructures, bitvector_mapping
|
121448
|
n=int(input("enter number of elements: "))
l=[]
for i in range(n):
l.append(int(input(f"enter l[{i}]: ")))
print(l)
'''
output:
enter number of elements: 6
enter l[0]: 23
enter l[1]: 11
enter l[2]: 67
enter l[3]: 889
enter l[4]: 342
enter l[5]: 23
[23, 11, 67, 889, 342, 23]
'''
|
121452
|
import boto3
import os
from botocore.exceptions import ClientError
from moto import mock_s3
from piprepo.models import S3Index
from piprepo.utils import get_project_name_from_file
from .conftest import PACKAGES
def assert_s3_bucket_contents(conn, bucket, prefix=''):
for package in PACKAGES:
package_obj = conn.Object(bucket.name, os.path.join(prefix, package))
package_index_obj = conn.Object(
bucket.name, os.path.join(prefix, 'simple', get_project_name_from_file(package), 'index.html')
)
root_index_obj = conn.Object(bucket.name, os.path.join(prefix, 'simple', 'index.html'))
assert s3_object_exists(package_obj)
assert s3_object_exists(package_index_obj)
assert s3_object_exists(root_index_obj)
assert get_project_name_from_file(package).encode() in root_index_obj.get()['Body'].read()
assert package.encode() in package_index_obj.get()['Body'].read()
@mock_s3
def assert_s3_sync(source, destination):
conn = boto3.resource("s3")
bucket = conn.create_bucket(Bucket='piprepo')
with S3Index(source, destination) as index:
prefix = index.prefix
assert_s3_bucket_contents(conn, bucket, prefix)
def test_s3_sync_with_prefix(tempindex):
assert_s3_sync(tempindex['source'], 's3://piprepo/prefix')
def test_s3_sync_with_prefix_trailing_slash(tempindex):
assert_s3_sync(tempindex['source'] + '/', 's3://piprepo/prefix/')
def test_s3_sync_with_prefix_from_source_dir(tempindex):
os.chdir(tempindex['source'])
assert_s3_sync('.', 's3://piprepo/prefix')
def test_s3_sync_without_prefix(tempindex):
assert_s3_sync(tempindex['source'], 's3://piprepo')
def test_s3_sync_without_prefix_trailing_slash(tempindex):
assert_s3_sync(tempindex['source'] + '/', 's3://piprepo/')
def test_s3_sync_without_prefix_from_source_dir(tempindex):
os.chdir(tempindex['source'])
assert_s3_sync('.', 's3://piprepo')
def s3_object_exists(obj):
try:
obj.load()
except ClientError as e:
if e.response['Error']['Code'] == "404":
return False
else:
raise
return True
|
121453
|
import numpy as np
import random
from collections import namedtuple, deque, defaultdict
import matplotlib.pyplot as plt
from mdp import *
import pdb
import util
import json
import pprint
import logging
import utils_nn as utils
BUFFER_SIZE = int(1e4)
BATCH_SIZE = 1
LR = 1e-3
class ReplayBuffer:
def __init__(self, buffer_size, batch_size, seed):
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
e = (state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
experiences = random.sample(self.memory, k=self.batch_size)
return experiences
def __len__(self):
return len(self.memory)
# Performs Q-learning. Read util.RLAlgorithm for more information.
# actions: a function that takes a state and returns a list of actions.
# discount: a number between 0 and 1, which determines the discount factor
# featureExtractor: a function that takes a state and action and returns a list of (feature name, feature value) pairs.
# explorationProb: the epsilon value indicating how frequently the policy
# returns a random action
class QLearningAlgorithm(util.RLAlgorithm):
def __init__(self, actions, discount, featureExtractor, mdp, explorationProb=0.2):
self.actions = actions
self.discount = discount
self.featureExtractor = featureExtractor
self.explorationProb = explorationProb
self.weights = defaultdict(float)
self.numIters = 0
self.mdp = mdp
# Return the Q function associated with the weights and features
def getQ(self, state, action):
score = 0
for f, v in self.featureExtractor(state, action, self.mdp):
score += self.weights[f] * v
return score
# This algorithm will produce an action given a state.
# Here we use the epsilon-greedy algorithm: with probability
# |explorationProb|, take a random action.
def getAction(self, state, eps):
self.numIters += 1
#if random.random() < self.explorationProb:
if random.random() < eps: # align qlearning and dqn exploration strategy
return random.choice(self.actions(state))
else:
return max((self.getQ(state, action), action) for action in self.actions(state))[1]
# Call this function to get the step size to update the weights.
def getStepSize(self):
return LR
return 1e-4 / math.sqrt(self.numIters)
# We will call this function with (s, a, r, s'), which you should use to update |weights|.
# Note that if s is a terminal state, then s' will be None. Remember to check for this.
# You should update the weights using self.getStepSize(); use
# self.getQ() to compute the current estimate of the parameters.
def incorporateFeedback(self, state, action, reward, newState, done=False):
if newState is None or done:
error = self.getQ(state, action) - reward
else:
error = self.getQ(state, action) - (reward + self.discount * max([self.getQ(newState, a) for a in self.actions(newState)]))
loss = error
#print("error={}".format(error))
error = min(10, error)
error = max(-10, error)
error *= self.getStepSize()
for f, v in self.featureExtractor(state, action, self.mdp):
self.weights[f] = self.weights[f] - error * v
return loss
def dumpWeights(self):
pprint.pprint(json.loads(json.dumps(self.weights)), weightsFile)
#print(dict(self.weights))
def actFeatureExtractor(state, action, mdp):
features = []
order = 1 # polynomial approx
dmax = 200
vmax = 30
amax = 2
ttcmax = 100
pos, speed, ttc_info = state[1], state[3], mdp._get_smallest_TTC(state)
ttc, nobj = ttc_info
idx = 4+nobj*4
ttcX, ttcY, ttcVx, ttcVy = state[idx:idx+4]
ttcX, ttcY, ttcVx, ttcVy = ttcX/dmax, ttcY/dmax, ttcVx/vmax, ttcVy/vmax
features.append(('bias', 1))
# NB: trying to play with these features. I had to lower donw the learning rate (cf LR)
#for i in range(1,order+1):
# features.append(('ttcX'+str(i), ttcX**i))
# features.append(('ttcY'+str(i), ttcY**i))
# features.append(('ttcVx'+str(i), ttcVx**i))
# features.append(('ttcVy'+str(i), ttcVy**i))
#features.append(('ttcR', 1 - math.exp(-ttc/100.)))
#features.append(('speedR', 1 - abs((speed-20.)/20.)))
# normalize features, otherwise it does not work at all
ttc = min(ttc,ttcmax)
pos, speed, ttc, action = pos/dmax, speed/vmax, ttc/ttcmax, action/amax
for i in range(1,order+1):
#features.append(('pos'+str(i), pos**i))
features.append(('speed'+str(i), speed**i))
features.append(('ttc'+str(i), ttc**i))
features.append(('action'+str(i), action**i))
return features
def qlearning(mdp, n_epochs=20, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
rl = QLearningAlgorithm(mdp.actions, mdp.discount(), actFeatureExtractor, mdp, 0.2)
memory = ReplayBuffer(BUFFER_SIZE, batch_size=BATCH_SIZE, seed=0)
best_score = -math.inf
mean_score = -math.inf
avg_tr_loss = 0
eps = eps_start
iters = 0
for num_epoch in range(n_epochs):
random.shuffle(mdp.train_set)
tr_scores_window = deque(maxlen=100) # last 100 scores
for num_s, s in enumerate(mdp.train()):
score = 0
for t in range(max_t):
iters += 1
#a = agent.act(mdp.reduce_state(s), eps) # a is an index !!!
a = rl.getAction(s, eps)
sp, r = mdp.sampleSuccReward(s, a)
done = mdp.isEnd(sp)[0]
memory.add(s, a, r, sp, done)
if len(memory) > BATCH_SIZE:
samples = memory.sample()
for sample in samples:
state, action, reward, next_state, isDone = sample
l = rl.incorporateFeedback(state, action, reward, next_state, isDone)
else:
l = rl.incorporateFeedback(s, a, r, sp, done)
avg_tr_loss += l
score += r
if done:
break
s = sp
if iters%100 == 99:
logging.info("Epoch no {}: sample {} iter {} avg_tr_loss: {:0.4f} tr_mean_score: {:.2f}".format(num_epoch, num_s, iters, avg_tr_loss/100, mean_score))
avg_tr_loss = 0
tr_scores_window.append(score)
mean_score = np.mean(tr_scores_window)
eps = max(eps_end, eps_decay*eps)
dev_scores_window = deque(maxlen=100) # last 100 scores
for num_s, s in enumerate(mdp.dev()):
score = 0
for t in range(max_t):
#a = agent.act(mdp.reduce_state(s), eps=0.) # a is an index !!!
a = rl.getAction(s, eps)
sp, r = mdp.sampleSuccReward(s, a)
done = mdp.isEnd(sp)[0]
score += r
if done:
break
s = sp
dev_scores_window.append(score)
dev_mean_score = np.mean(dev_scores_window)
logging.info("Epoch no {}: dev_mean_score: {:.2f}".format(num_epoch, dev_mean_score))
if dev_mean_score > best_score:
weightsFile.write("Epoch {} dev_mean_score: {:.2f}\n".format(num_epoch, dev_mean_score))
rl.dumpWeights()
best_score = dev_mean_score
# scores_window = deque(maxlen=100) # last 100 scores
# eps = eps_start
# for i_episode in range(1, n_episodes+1):
# s = mdp.startState()
# score = 0
# for t in range(max_t):
# #a = agent.act(s, eps)
# a = rl.getAction(s, eps)
# #pdb.set_trace()
# sp, r = mdp.sampleSuccReward(s, a)
# done = mdp.isEnd(sp)[0]
# #agent.step(s, a, r, sp, done)
# memory.add(s, a, r, sp, done)
# if len(memory) > BATCH_SIZE:
# samples = memory.sample()
# for sample in samples:
# state, action, reward, next_state, isDone = sample
# rl.incorporateFeedback(state, action, reward, next_state, isDone)
# else:
# rl.incorporateFeedback(s, a, r, sp, done)
# score += r
# if done:
# break
# s = sp
# scores_window.append(score)
# eps = max(eps_end, eps_decay*eps)
# avg_sliding_score = np.mean(scores_window)
# print("Episode {} Average sliding score: {:.2f}".format(i_episode, avg_sliding_score))
# if avg_sliding_score > -10:
# weightsFile.write("Episode {} Average sliding score: {:.2f}\n".format(i_episode, avg_sliding_score))
# rl.dumpWeights()
utils.set_logger('qlearning.log')
weightsFile = open("models/qlearning.weights", "a")
mdp = ActMDP()
qlearning(mdp)
|
121480
|
from ipywidgets import interact
def myfunction(x):
return x
interact(myfunction, x=('red','green'));
|
121513
|
import os
import re
import netaddr
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils import timezone
TOOL_VERSION = 'v1.0'
BANNER = '\n'.join([
'=================== SPARCS SSO Log Inspection Report ===================',
' >> CONFIDENTIAL - Authorized Person Only << ',
'This report contains personal information. This report may be read only ',
'by SPARCS SSO system operators, and the person who reasonably needs to ',
'within law enforcement agencies. It is strictly forbidden that copying ',
'disclosing or sharing this report with others, unless permitted to do ',
'so by SPARCS SSO system operators, or by existing laws. ',
'------------------------------------------------------------------------',
])
class Command(BaseCommand):
help = 'Inspect a user'
def add_arguments(self, parser):
parser.add_argument('--uid', dest='uid', help='user unique id')
parser.add_argument('--sid', dest='sid', help='service map id')
parser.add_argument('--email', dest='email', help='email address')
parser.add_argument('--ip', dest='ip', help='valid ipv4 address')
parser.add_argument('--limit', dest='limit', help='fetch limit')
def check_options(self, options):
count, target_prop = 0, ''
for prop in ['uid', 'sid', 'email', 'ip']:
if options[prop]:
count += 1
target_prop = prop
if count > 1:
self.stdout.write('* Query: invalid (two or more target)')
return
elif count == 0:
self.stdout.write('* Query: no item specified')
return
target_val = options[target_prop]
self.stdout.write(f'* Query: {target_prop}={target_val}')
if target_prop == 'ip':
if not netaddr.valid_ipv4(target_val, netaddr.INET_PTON):
self.stdout.write(' - Invalid IP')
self.stdout.write('=' * 72)
return
return f'({target_val}'
if options['uid']:
users = User.objects.filter(
username__contains=options['uid'],
)
elif options['sid']:
users = User.objects.filter(
services__sid__contains=options['sid'],
)
elif options['email']:
users = User.objects.filter(
email__contains=options['email'],
)
if len(users) > 100:
self.stdout.write(' - Too Many Users')
return
elif not users:
self.stdout.write(' - No Such User')
return
for user in users:
self.stdout.write(''.join([
f' - User: {user.username}, {user.first_name}',
f'/{user.last_name} ,{user.email}',
]))
if len(users) != 1:
return
return f'{users[0].username})'
def search_logs(self, log_files, search_str, limit):
logs, logs_count = [], 0
for log_file in log_files:
part_logs = []
with open(log_file, 'r') as f:
for log in f.readlines():
if logs_count > limit:
break
elif search_str in log:
part_logs.append(log)
logs_count += 1
logs = part_logs + logs
return logs, logs_count
def print_logs(self, search_str, limit):
try:
limit = int(limit)
except ValueError:
limit = 500
log_buffer_files = list(map(
lambda x: os.path.join(settings.LOG_BUFFER_DIR, x),
filter(
lambda x: re.match(r'\d{8}\.\d+\.log', x),
os.listdir(settings.LOG_BUFFER_DIR),
),
))
logs_buffer, logs_buffer_count = self.search_logs(
log_buffer_files, search_str, limit,
)
logs_buffer.sort()
log_files = reversed(sorted(list(map(
lambda x: os.path.join(settings.LOG_DIR, x),
filter(
lambda x: re.match(r'\d{8}\.log', x),
os.listdir(settings.LOG_DIR),
),
))))
logs, logs_count = self.search_logs(
log_files, search_str, limit - logs_buffer_count,
)
for log in logs:
self.stdout.write(log)
for log in logs_buffer:
self.stdout.write(log)
def handle(self, *args, **options):
self.stdout.write(BANNER)
self.stdout.write('\n'.join([
f'* SSO Version: {settings.VERSION}',
f'* Inspection Tool Version: {TOOL_VERSION}',
f'* Time: {timezone.now().isoformat()}',
'-' * 72,
]))
search_str = self.check_options(options)
self.stdout.write('=' * 72)
self.stdout.write('')
if search_str:
self.print_logs(search_str, options['limit'])
|
121516
|
import os
import six
import numpy as np
import pandas as pd
from math import pi
from copy import copy
from abc import ABCMeta
from functools import lru_cache
from collections import defaultdict
from scipy.spatial.qhull import ConvexHull
from amlearn.featurize.base import BaseFeaturize
from amlearn.featurize.nearest_neighbor import VoroNN, DistanceNN, BaseNN
from amlearn.utils.verbose import VerboseReporter
from amlearn.utils.data import read_imd, read_lammps_dump, \
get_isometric_lists, list_like
from amlearn.utils.packing import load_radii, pbc_image_nn_coords, \
solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume
try:
from amlearn.featurize.src import voronoi_stats, boop
except Exception:
print("import fortran file voronoi_stats/boop error!\n")
module_dir = os.path.dirname(os.path.abspath(__file__))
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class PackingOfSite(object):
def __init__(self, pbc, bds, atom_type, coords, neighbors_type,
neighbors_coords, radii=None, radius_type="miracle_radius"):
self.pbc = pbc
self.bds = bds
self.atom_type = atom_type
self.coords = coords.astype(float)
self.neighbors_type = neighbors_type
self.neighbors_coords = neighbors_coords
self.radii = load_radii() if radii is None else radii
self.radius_type = radius_type
def nn_coords(self):
if not hasattr(self, 'nn_coords_'):
self.nn_coords_ = [pbc_image_nn_coords(self.coords,
neighbor_coords,
self.bds, self.pbc)
for neighbor_coords in self.neighbors_coords]
return self.nn_coords_
def convex_hull(self):
if not hasattr(self, 'convex_hull_'):
self.convex_hull_ = ConvexHull(self.nn_coords())
return self.convex_hull_
def convex_hull_simplices(self):
if not hasattr(self, 'convex_hull_simplices_'):
self.convex_hull_simplices_ = self.convex_hull().simplices
return self.convex_hull_simplices_
def analyze_area_interstice(self):
area_list = list()
area_interstice_list = list()
triplet_array = np.array([[0, 1, 2], [1, 0, 2], [2, 0, 1]])
for facet_indices in self.convex_hull_simplices():
packed_area = 0
facet_coords = np.array(self.nn_coords())[facet_indices]
for facet_idx, triplet in zip(facet_indices, triplet_array):
triangle_angle = triangular_angle(*facet_coords[triplet])
r = self.radii[str(self.neighbors_type[facet_idx])][
self.radius_type]
packed_area += triangle_angle / 2 * pow(r, 2)
area = triangle_area(*facet_coords)
area_list.append(area)
area_interstice = 1 - packed_area/area
area_interstice_list.append(
area_interstice if area_interstice > 0 else 0)
self.area_list_ = area_list
self.area_interstice_list_ = area_interstice_list
def get_solid_angle_lists(self):
if not hasattr(self, 'solid_angle_lists_'):
solid_angle_lists = list()
triplet_array = np.array([[0, 1, 2], [1, 0, 2], [2, 0, 1]])
for facet_indices in self.convex_hull_simplices():
solid_angle_list = list()
facet_coords = np.array(self.nn_coords())[facet_indices]
for triplet in triplet_array:
solid_angle_ = solid_angle(*facet_coords[triplet],
self.coords)
solid_angle_list.append(solid_angle_)
solid_angle_lists.append(solid_angle_list)
self.solid_angle_lists_ = solid_angle_lists
return self.solid_angle_lists_
def analyze_vol_interstice(self):
volume_list = list()
volume_interstice_list = list()
for facet_indices, solid_angle_list in \
zip(self.convex_hull_simplices(), self.get_solid_angle_lists()):
packed_volume = 0
facet_coords = np.array(self.nn_coords())[facet_indices]
# calculate neighbors' packed_volume
for facet_idx, sol_angle in zip(facet_indices, solid_angle_list):
if sol_angle == 0:
continue
r = self.radii[str(self.neighbors_type[facet_idx])][
self.radius_type]
packed_volume += sol_angle / 3 * pow(r, 3)
# add center's packed_volume
center_solid_angle = solid_angle(self.coords, *facet_coords)
center_r = self.radii[str(self.atom_type)][self.radius_type]
packed_volume += center_solid_angle / 3 * pow(center_r, 3)
volume = tetra_volume(self.coords, *facet_coords)
volume_list.append(volume)
volume_interstice = 1 - packed_volume/volume
volume_interstice_list.append(
volume_interstice if volume_interstice > 0 else 0)
self.volume_list_ = volume_list
self.volume_interstice_list_ = volume_interstice_list
def cluster_packed_volume(self):
"""
Calculate the cluster volume that is packed with atoms, including the
volume of center atoms plus the volume cones (from solid angle) of
all the neighbors.
Returns:
packed_volume
"""
types_solid_angle = [0] * len(self.neighbors_type)
for facet_indices, solid_angle_list in \
zip(self.convex_hull_simplices(), self.get_solid_angle_lists()):
for facet_idx, solid_angle_ in zip(facet_indices, solid_angle_list):
types_solid_angle[facet_idx] += solid_angle_
packed_volume = 4/3 * pi * pow(
self.radii[str(self.atom_type)][self.radius_type], 3)
for neighbor_type, type_solid_angle in \
zip(self.neighbors_type, types_solid_angle):
if type_solid_angle == 0:
continue
packed_volume += type_solid_angle * 1/3 * pow(
self.radii[str(int(neighbor_type))][self.radius_type], 3)
return packed_volume
def cluster_packing_efficiency(self):
return self.cluster_packed_volume() / self.convex_hull().volume
def atomic_packing_efficiency(self):
ideal_ratio_ = {3: 0.154701, 4: 0.224745, 5: 0.361654, 6: 0.414214,
7: 0.518145, 8: 0.616517, 9: 0.709914, 10: 0.798907,
11: 0.884003, 12: 0.902113, 13: 0.976006, 14: 1.04733,
15: 1.11632, 16: 1.18318, 17: 1.2481, 18: 1.31123,
19: 1.37271, 20: 1.43267, 21: 1.49119, 22: 1.5484,
23: 1.60436, 24: 1.65915}
nn_type_dict = defaultdict(int)
for neighbor_type in self.neighbors_type:
nn_type_dict[neighbor_type] += 1
r = 0
for t, n in nn_type_dict.items():
r += self.radii[str(t)][self.radius_type] * n
r = r / len(self.neighbors_type)
return self.radii[str(self.atom_type)][self.radius_type] / r - \
ideal_ratio_[len(self.neighbors_type)]
@lru_cache(maxsize=10)
def get_nn_instance(dependent_name, backend, **nn_kwargs):
"""
Get Nearest Neighbor instance, for most SRO depends on the same Nearest
Neighbor instance, we cache the most recently used Nearest Neighbor
instance by lru_cache.
Args:
dependent_name (str): "voro"/"voronoi" or "dist"/"distance".
backend (Backend): Amlearn Backend object, to prepare amlearn needed
paths and define the common amlearn's load/save method.
nn_kwargs: Nearest Neighbor class's keyword arguments.
Returns:
dependent_class (object): Nearest Neighbor instance.
"""
if dependent_name == "voro":
dependent_class = VoroNN(backend=backend, **nn_kwargs)
elif dependent_name == "dist":
dependent_class = DistanceNN(backend=backend, **nn_kwargs)
else:
raise ValueError('dependent name {} is unknown, Possible values '
'are {}'.format(dependent_name,
'[voro, voronoi, '
'dist, distance]'))
return dependent_class
class BaseSRO(six.with_metaclass(ABCMeta, BaseFeaturize)):
"""
Base class of Short Range Order(SRO) Featurizer, most SRO Featurizer
depends on the output of the Nearest Neighbor class, so this base class
implements dependency checking. For most SRO depends on the same Nearest
Neighbor instance, we cache the most recently used Nearest Neighbor
instance by lru_cache.
Args:
save (Boolean): save file or not.
backend (object): Amlearn Backend object, to prepare amlearn needed
paths and define the common amlearn's load/save method.
dependent_class (object or str):
if object, it can be "VoroNN()" or "DistanceNN()";
if str, it can be "voro"/"voronoi" or "dist"/"distance"
nn_kwargs: Nearest Neighbor class's keyword arguments.
"""
def __init__(self, save=True, backend=None, dependent_class=None,
verbose=1, output_path=None, **nn_kwargs):
super(BaseSRO, self).__init__(save=save,
verbose=verbose,
backend=backend,
output_path=output_path)
self.calculated_X = None
if dependent_class is None:
self.dependent_class_ = None
self.dependent_name_ = 'voro'
elif isinstance(dependent_class, BaseNN):
self.dependent_class_ = dependent_class
self.dependent_name_ = dependent_class.__class__.__name__.lower()[:4]
elif isinstance(dependent_class, str):
self.dependent_name_ = dependent_class[:4]
self.dependent_class_ = get_nn_instance(
self.dependent_name_, getattr(self, 'backend', None),
save=self.save, **nn_kwargs)
else:
raise ValueError(
'dependent_class {} is unknown, Possible values are {} or '
'voro/dist object.'.format(dependent_class,
'[voro, voronoi, dist, distance]'))
self.neighbor_num_col = \
'neighbor_num_{}'.format(self.dependent_name_)
self.neighbor_ids_col = \
'neighbor_ids_{}'.format(self.dependent_name_)
self.neighbor_dists_col = \
'neighbor_dists_{}'.format(self.dependent_name_)
self.neighbor_areas_col = \
'neighbor_areas_{}'.format(self.dependent_name_)
self.neighbor_vols_col = \
'neighbor_vols_{}'.format(self.dependent_name_)
self.neighbor_edges_col = \
'neighbor_edges_{}'.format(self.dependent_name_)
def fit(self, X=None):
self.dependent_class_ = self.check_dependency(X)
if self.dependent_class_:
if self.save:
self.backend.context.logger_.info(
"Input X don't have it's dependent columns, "
"now calculate it automatically")
else:
print("Input X don't have it's dependent columns, "
"now calculate it automatically")
self.calculated_X = self.dependent_class_.fit_transform(X)
return self
@property
def category(self):
return 'sro'
class BaseInterstice(six.with_metaclass(ABCMeta, BaseSRO)):
def __init__(self, backend=None, dependent_class="voro", type_col='type',
atomic_number_list=None, neighbor_num_limit=80,
save=True, radii=None, radius_type="miracle_radius",
verbose=1, output_path=None, **nn_kwargs):
super(BaseInterstice, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.type_col = type_col
self.atomic_number_list = atomic_number_list
self.neighbor_num_limit = neighbor_num_limit
self.radii = load_radii() if radii is None else radii
self.radius_type = radius_type
self.verbose = verbose
def fit(self, X=None, lammps_df=None, bds=None, lammps_path=None):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
lammps_df (DataFrame): Constructed from the output of lammps, which
common columns is ['type', 'x', 'y', 'z'...] columns.
bds (list like): X, y, z boundaries.
lammps_path (DataFrame): If lammps_df is None, then we automatically
construct the DataFrame from lammps output path.
Returns:
self (object): Interstice or Packing instance.
"""
if lammps_df is None and lammps_path is not None \
and os.path.exists(lammps_path):
self.lammps_df, self.bds = read_lammps_dump(lammps_path)
else:
self.lammps_df = copy(lammps_df)
self.bds = bds
if self.atomic_number_list is not None:
self.lammps_df[self.type_col] = self.lammps_df[self.type_col].apply(
lambda x: self.atomic_number_list[x-1])
self.dependent_class_ = self.check_dependency(X)
if self.dependent_class_:
self.calculated_X = self.dependent_class_.fit_transform(X)
self.calculated_X = self.calculated_X.join(self.lammps_df)
return self
@property
def category(self):
return 'interstice_sro'
class DistanceInterstice(BaseInterstice):
def __init__(self, backend=None, dependent_class="voro", type_col='type',
atomic_number_list=None, neighbor_num_limit=80,
save=True, radii=None, radius_type="miracle_radius",
verbose=1, output_path=None, output_file_prefix=None,
stat_ops='all', **nn_kwargs):
super(DistanceInterstice, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
type_col=type_col, atomic_number_list=atomic_number_list,
neighbor_num_limit=neighbor_num_limit,
radii=radii, radius_type = radius_type,
verbose = verbose, output_path=output_path, **nn_kwargs)
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_distance'.format(
self.category, self.dependent_name_,
self.radius_type.replace('_radius', '') if '_radius' in self.radius_type else self.radius_type)
self.stat_ops = stat_ops if stat_ops != 'all' \
else ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col,
self.neighbor_dists_col]
def transform(self, X):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
Returns:
dist_interstice_df (DataFrame): Distance interstice DataFrame, which
index is same as X's index, columns is
[neighbor_dists_interstice_voro] or
[neighbor_dists_interstice_dist] dependent on dependent_class.
"""
X = X.join(self.lammps_df) \
if self.calculated_X is None else self.calculated_X
# define print verbose
if self.verbose > 0 and self.save:
vr = VerboseReporter(self.backend, total_stage=1,
verbose=self.verbose, max_verbose_mod=10000)
vr.init(total_epoch=len(X), start_epoch=0,
init_msg='Calculating DistanceInterstice features.',
epoch_name='Atoms', stage=1)
feature_lists = list()
for idx, row in X.iterrows():
neighbor_dist_interstice_list = list()
for neighbor_id, neighbor_dist in zip(row[self.neighbor_ids_col],
row[self.neighbor_dists_col]):
if neighbor_id > 0:
neighbor_dist_interstice_list.append(
neighbor_dist / (
self.radii[str(int(X.loc[idx][self.type_col]))][
self.radius_type] +
self.radii[
str(int(X.loc[neighbor_id][self.type_col]))][
self.radius_type]) - 1)
else:
continue
feature_lists.append(calc_stats(neighbor_dist_interstice_list,
self.stat_ops))
if self.verbose > 0 and self.save:
vr.update(idx - 1)
dist_interstice_df = \
pd.DataFrame(feature_lists, columns=self.get_feature_names(),
index=X.index)
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=dist_interstice_df, name=self.output_file_prefix)
return dist_interstice_df
def get_feature_names(self):
return ['Dist_interstice_{}_{}'.format(
stat, self.dependent_name_) for stat in self.stat_ops]
class VolumeAreaInterstice(BaseInterstice):
def __init__(self, pbc=None, backend=None, dependent_class="voro",
coords_cols=None, type_col='type',
atomic_number_list=None,
neighbor_num_limit=80, save=True,
radii=None, radius_type="miracle_radius",
calc_volume_area='all', verbose=1,
volume_types=None, area_types=None,
output_path=None, output_file_prefix=None,
calc_indices='all', stat_ops='all', **nn_kwargs):
"""
Args:
volume_types (list like): Can be one or several of the arrays
["volume_interstice",
"fractional_volume_interstice_tetrahedra",
"fractional_volume_interstice_tetrahedra_avg",
"fractional_volume_interstice_center_v"];
default is : ["fractional_volume_interstice_tetrahedra"]
area_types (list like): Can be one or several of the arrays
["area_interstice",
"fractional_area_interstice_triangle",
"fractional_area_interstice_triangle_avg",
"fractional_area_interstice_center_slice_a"]
default is : ["fractional_area_interstice_triangle"]
"""
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VolumeAreaInterstice, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
type_col=type_col, atomic_number_list=atomic_number_list,
neighbor_num_limit=neighbor_num_limit,
radii=radii, radius_type = radius_type,
verbose = verbose, output_path=output_path, **nn_kwargs)
self.pbc = pbc if pbc is not None else [1, 1, 1]
self.calc_volume_area = calc_volume_area
self.coords_cols = coords_cols \
if coords_cols is not None else ['x', 'y', 'z']
self.area_list = list()
self.area_interstice_list = list()
self.volume_list = list()
self.volume_interstice_list = list()
self.calc_indices = calc_indices
self.stat_ops = stat_ops if stat_ops != 'all' \
else ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col]
self.volume_types = \
volume_types if isinstance(volume_types, list_like()) \
else [volume_types] if volume_types is not None \
else ['fractional_volume_interstice_tetrahedra']
self.area_types = \
area_types if isinstance(area_types, list_like()) \
else [area_types] if area_types is not None \
else ['fractional_area_interstice_triangle']
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_volume_area'.format(
self.category, self.dependent_name_,
self.radius_type.replace('_radius', '') if '_radius' in self.radius_type else self.radius_type)
def transform(self, X):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
Returns:
volume_area_interstice_df (DataFrame): Volume/Area interstice
DataFrame, which index is same as X's index, see
get_feature_names() method for column names.
"""
X = X.join(self.lammps_df) if self.calculated_X is None \
else self.calculated_X
# define print verbose
if self.verbose > 0 and self.save:
vr = VerboseReporter(self.backend, total_stage=1,
verbose=self.verbose, max_verbose_mod=10000)
vr.init(total_epoch=len(X), start_epoch=0,
init_msg='Calculating VolumeAreaInterstice features.',
epoch_name='Atoms', stage=1)
if self.calc_indices == 'all':
self.calc_indices = list(X.index)
feature_lists = list()
for idx, row in X.iterrows():
if idx not in self.calc_indices:
continue
neighbor_type = list()
neighbor_coords = list()
for neighbor_id in row[self.neighbor_ids_col]:
if neighbor_id > 0:
neighbor_type.append(X.loc[neighbor_id][self.type_col])
neighbor_coords.append(
X.loc[neighbor_id][self.coords_cols].astype(float))
else:
continue
pos_ = PackingOfSite(self.pbc, self.bds, row[self.type_col],
row[self.coords_cols].values.astype(float),
neighbor_type, neighbor_coords,
radii=self.radii, radius_type=self.radius_type)
if len(neighbor_type) < 4:
feature_lists.append([0] * len(self.get_feature_names()))
else:
feature_list = list()
if self.calc_volume_area == 'volume' or \
self.calc_volume_area == 'all':
pos_.analyze_vol_interstice()
volume_interstice_list = pos_.volume_interstice_list_
volume_list = pos_.volume_list_
volume_total = pos_.convex_hull().volume
volume_interstice_original_array = \
np.array(volume_interstice_list)*np.array(volume_list)
center_volume = 4/3 * pi * pow(
pos_.radii[str(pos_.atom_type)][pos_.radius_type], 3)
for volume_type in self.volume_types:
# fractional volume_interstices in relative to the
# tetrahedra volume
if volume_type == \
"fractional_volume_interstice_tetrahedra":
feature_list.extend(
calc_stats(volume_interstice_list,
self.stat_ops))
# original volume_interstices (in the units of volume)
elif volume_type == "volume_interstice":
feature_list.extend(
calc_stats(volume_interstice_original_array,
self.stat_ops))
# fractional volume_interstices in relative to the
# entire volume
elif volume_type == \
"fractional_volume_interstice_tetrahedra_avg":
feature_list.extend(
calc_stats(volume_interstice_original_array /
volume_total * len(volume_list),
self.stat_ops))
# fractional volume_interstices in relative to the
# center atom volume
elif volume_type == \
"fractional_volume_interstice_center_v":
feature_list.extend(
calc_stats(volume_interstice_original_array /
center_volume, self.stat_ops))
if self.calc_volume_area == 'area' or \
self.calc_volume_area == 'all':
pos_.analyze_area_interstice()
area_interstice_list = pos_.area_interstice_list_
area_list = pos_.area_list_
area_total = pos_.convex_hull().area
area_interstice_original_array = \
np.array(area_interstice_list) * np.array(area_list)
center_slice_area = pi * pow(
pos_.radii[str(pos_.atom_type)][pos_.radius_type], 2)
for area_type in self.area_types:
# fractional area_interstices in relative to the
# tetrahedra area
if area_type == "fractional_area_interstice_triangle":
feature_list.extend(
calc_stats(area_interstice_list, self.stat_ops))
# original area_interstices (in the units of area)
if area_type == "area_interstice":
feature_list.extend(
calc_stats(area_interstice_original_array,
self.stat_ops))
# fractional area_interstices in relative to the
# entire area
if area_type == \
"fractional_area_interstice_triangle_avg":
feature_list.extend(
calc_stats(area_interstice_original_array /
area_total * len(area_list),
self.stat_ops))
# fractional area_interstices in relative to the center
# atom volume
if area_type == \
"fractional_area_interstice_center_slice_a":
feature_list.extend(
calc_stats(area_interstice_original_array /
center_slice_area, self.stat_ops))
feature_lists.append(feature_list)
if self.verbose > 0 and self.save:
vr.update(idx - 1)
volume_area_interstice_df = \
pd.DataFrame(feature_lists, index=self.calc_indices,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=volume_area_interstice_df,
name=self.output_file_prefix)
return volume_area_interstice_df
def get_feature_names(self):
feature_names = list()
feature_prefixs = list()
if self.calc_volume_area == 'volume' or self.calc_volume_area == 'all':
volume_type_names = ['Volume_interstice'] \
if len(self.volume_types) == 1 else self.volume_types
feature_prefixs += volume_type_names
if self.calc_volume_area == 'area' or self.calc_volume_area == 'all':
volume_type_names = ['Area_interstice'] \
if len(self.area_types) == 1 else self.area_types
feature_prefixs += volume_type_names
feature_names += ['{}_{}_{}'.format(feature_prefix, stat,
self.dependent_name_)
for feature_prefix in feature_prefixs
for stat in self.stat_ops]
return feature_names
class ClusterPackingEfficiency(BaseInterstice):
"""
<NAME>. et al. Atomic-Scale Mechanisms of the Glass-Forming Ability
in Metallic Glasses. Phys. Rev. Lett. 109, 105502 (2012).
The authors also term this metric as "Atomic Packing Efficiency" in the
original paper. Here we name it as "Cluster Packing Efficiency" to
distinguish this with that proposed in Laws, K. J. et al. Nat. Commun.
6, 8123 (2015).
"""
def __init__(self, pbc=None, backend=None, dependent_class="voro",
coords_cols=None, type_col='type',
atomic_number_list=None,
neighbor_num_limit=80, save=True,
radii=None, radius_type="miracle_radius",
verbose=1, output_path=None, output_file_prefix=None,
**nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(ClusterPackingEfficiency, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
type_col=type_col, atomic_number_list=atomic_number_list,
neighbor_num_limit=neighbor_num_limit, radii=radii,
radius_type = radius_type, verbose = verbose,
output_path=output_path, **nn_kwargs)
self.pbc = pbc if pbc is not None else [1, 1, 1]
self.coords_cols = coords_cols \
if coords_cols is not None else ['x', 'y', 'z']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_cpe'.format(
self.category.replace('interstice_', ''), self.dependent_name_,
self.radius_type.replace('_radius', '') if '_radius' in self.radius_type else self.radius_type)
def transform(self, X):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
Returns:
cluster_packing_efficiency_df (DataFrame): Cluster Packing
Efficiency_df DataFrame, which index is same as X's index,
see get_feature_names() method for column names.
"""
X = X.join(self.lammps_df) \
if self.calculated_X is None else self.calculated_X
# define print verbose
if self.verbose > 0 and self.save:
vr = VerboseReporter(self.backend, total_stage=1,
verbose=self.verbose, max_verbose_mod=10000)
vr.init(total_epoch=len(X), start_epoch=0,
init_msg='Calculating Cluster Packing Efficiency features.',
epoch_name='Atoms', stage=1)
feature_lists = list()
for idx, row in X.iterrows():
neighbor_type = list()
neighbor_coords = list()
for neighbor_id in row[self.neighbor_ids_col]:
if neighbor_id > 0:
neighbor_type.append(X.loc[neighbor_id][self.type_col])
neighbor_coords.append(X.loc[neighbor_id][self.coords_cols])
else:
continue
pos_ = PackingOfSite(self.pbc, self.bds,
row[self.type_col], row[self.coords_cols],
neighbor_type, neighbor_coords,
radii=self.radii, radius_type=self.radius_type)
if len(neighbor_type) < 4:
feature_lists.append([0] * len(self.get_feature_names()))
else:
feature_lists.append([pos_.cluster_packing_efficiency()])
if self.verbose > 0 and self.save:
vr.update(idx - 1)
cluster_packing_efficiency_df = pd.DataFrame(
feature_lists, index=X.index, columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=cluster_packing_efficiency_df,
name=self.output_file_prefix)
return cluster_packing_efficiency_df
def get_feature_names(self):
feature_names = ['Cluster_packing_efficiency_{}_{}'.format(
self.radius_type.replace("_radius", ""), self.dependent_name_)]
return feature_names
class AtomicPackingEfficiency(BaseInterstice):
"""
Laws, <NAME>., <NAME>. & <NAME>. A predictive structural model for
bulk metallic glasses. Nat. Commun. 6, 8123 (2015).
"""
def __init__(self, pbc=None, backend=None, dependent_class="voro",
coords_cols=None, type_col='type',
atomic_number_list=None,
neighbor_num_limit=80, save=True,
radii=None, radius_type="miracle_radius",
verbose=1, output_path=None, output_file_prefix=None,
**nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(AtomicPackingEfficiency, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
type_col=type_col, atomic_number_list=atomic_number_list,
neighbor_num_limit=neighbor_num_limit, radii=radii,
radius_type = radius_type, verbose = verbose,
output_path=output_path, **nn_kwargs)
self.pbc = pbc if pbc is not None else [1, 1, 1]
self.coords_cols = coords_cols \
if coords_cols is not None else ['x', 'y', 'z']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_ape'.format(
self.category.replace('interstice_', ''), self.dependent_name_,
self.radius_type.replace('_radius', '') if '_radius' in self.radius_type else self.radius_type)
def transform(self, X):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
Returns:
atomic_packing_efficiency_df (DataFrame): Atomic Packing Efficiency
DataFrame, which index is same as X's index, see
get_feature_names() method for column names.
"""
X = X.join(self.lammps_df) \
if self.calculated_X is None else self.calculated_X
# define print verbose
if self.verbose > 0 and self.save:
vr = VerboseReporter(self.backend, total_stage=1,
verbose=self.verbose, max_verbose_mod=10000)
vr.init(total_epoch=len(X), start_epoch=0,
init_msg='Calculating Atomic Packing Efficiency features.',
epoch_name='Atoms', stage=1)
feature_lists = list()
for idx, row in X.iterrows():
neighbor_type = list()
neighbor_coords = list()
for neighbor_id in row[self.neighbor_ids_col]:
if neighbor_id > 0:
neighbor_type.append(X.loc[neighbor_id][self.type_col])
neighbor_coords.append(X.loc[neighbor_id][self.coords_cols])
else:
continue
pos_ = PackingOfSite(self.pbc, self.bds,
row[self.type_col], row[self.coords_cols],
neighbor_type, neighbor_coords,
radii=self.radii, radius_type=self.radius_type)
if len(neighbor_type) < 4:
feature_lists.append([0] * len(self.get_feature_names()))
else:
feature_lists.append([pos_.atomic_packing_efficiency()])
if self.verbose > 0 and self.save:
vr.update(idx - 1)
atomic_packing_efficiency_df = pd.DataFrame(
feature_lists, index=X.index, columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=atomic_packing_efficiency_df,
name=self.output_file_prefix)
return atomic_packing_efficiency_df
def get_feature_names(self):
feature_names = ['Atomic_packing_efficiency_{}_{}'.format(
self.radius_type.replace("_radius", ""), self.dependent_name_)]
return feature_names
class CN(BaseSRO):
def __init__(self, backend=None, dependent_class="voro", save=True,
output_path=None, output_file_prefix=None, **nn_kwargs):
super(CN, self).__init__(save=save, backend=backend,
dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.dependent_cols_ = [self.neighbor_num_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_cn'.format(self.category, self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
cn_df = pd.DataFrame(X[self.dependent_cols_].values,
index=X.index, columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=cn_df, name=self.output_file_prefix)
return cn_df
def get_feature_names(self):
feature_names = ['CN_{}'.format(self.dependent_name_)]
return feature_names
class VoroIndex(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
save=True, edge_min=3, edge_max=7, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi" or \
isinstance(dependent_class, VoroNN)
super(VoroIndex, self).__init__(save=save, backend=backend,
dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.edge_min = edge_min
self.edge_max = edge_max
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_edges_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_voronoi_index'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_num = self.edge_max - self.edge_min + 1
edge_lists = get_isometric_lists(X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
voro_index_list = np.zeros((len(X), edge_num))
voro_index_list = voronoi_stats.voronoi_index(
voro_index_list, X[self.neighbor_num_col].values, edge_lists,
self.edge_min, self.edge_max, self.include_beyond_edge_max,
n_atoms=len(X), neighbor_num_limit=self.neighbor_num_limit)
voro_index_df = pd.DataFrame(voro_index_list, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=voro_index_df, name=self.output_file_prefix)
return voro_index_df
def get_feature_names(self):
return ['Voronoi_idx_{}_{}'.format(idx, self.dependent_name_)
for idx in range(self.edge_min, self.edge_max + 1)]
class CharacterMotif(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, target_voro_idx=None, frank_kasper=1,
save=True, output_path=None, output_file_prefix=None,
**nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(CharacterMotif, self).__init__(save=save,
backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
if target_voro_idx is None:
self.target_voro_idx = np.array([[0, 0, 12, 0, 0],
[0, 0, 12, 4, 0]],
dtype=np.longdouble)
self.frank_kasper = frank_kasper
self.edge_min = edge_min
self.dependent_cols_ = ['Voronoi_idx_{}_voro'.format(idx)
for idx in range(3, 8)]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_voro_character_motif'.format(self.category)
def fit(self, X=None):
self.dependent_class_ = self.check_dependency(X)
# This class is only dependent on 'Voronoi_indices_voro' col, so if
# X don't have this col, this method will calculate it automatically.
if self.dependent_class_ is not None:
voro_index = \
VoroIndex(neighbor_num_limit=self.neighbor_num_limit,
include_beyond_edge_max=self.include_beyond_edge_max,
dependent_class=self.dependent_class, save=False,
backend=getattr(self, 'backend', None))
self.calculated_X = voro_index.fit_transform(X)
return self
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
voro_idx_lists = get_isometric_lists(
X[self.dependent_cols_].values, len(self.target_voro_idx[0]))
motif_one_hot = np.zeros((len(X),
len(self.target_voro_idx) + self.frank_kasper))
motif_one_hot = \
voronoi_stats.character_motif(motif_one_hot, voro_idx_lists,
self.edge_min, self.target_voro_idx,
self.frank_kasper, n_atoms=len(X))
motif_one_hot_array = np.array(motif_one_hot)
is_120_124 = motif_one_hot_array[:, 0] | motif_one_hot_array[:, 1]
motif_one_hot_array = np.append(motif_one_hot_array,
np.array([is_120_124]).T, axis=1)
character_motif_df = pd.DataFrame(motif_one_hot_array, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=character_motif_df, name=self.output_file_prefix)
return character_motif_df
def get_feature_names(self):
feature_names = ['is_<0,0,12,0,0>_voro', 'is_<0,0,12,4,0>_voro'] + \
["_".join(map(str, v)) + "_voro"
for v in self.target_voro_idx[2:]] + \
['is_polytetrahedral_voro', 'is_<0,0,12,0/4,0>_voro']
return feature_names
class IFoldSymmetry(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(IFoldSymmetry, self).__init__(save=save,
backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.edge_min = edge_min
self.edge_max = edge_max
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_edges_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_voro_i_fold_symmetry'.format(self.category)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_num = self.edge_max - self.edge_min + 1
edge_lists = get_isometric_lists(X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
i_symm_list = np.zeros((len(X), edge_num))
i_symm_list = voronoi_stats.i_fold_symmetry(
i_symm_list, X[self.neighbor_num_col].values, edge_lists,
self.edge_min, self.edge_max, self.include_beyond_edge_max,
n_atoms=len(X), neighbor_num_limit=self.neighbor_num_limit)
i_symm_df = pd.DataFrame(i_symm_list, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=i_symm_df, name=self.output_file_prefix)
return i_symm_df
def get_feature_names(self):
feature_names = ['{}_fold_symm_idx_voro'.format(edge)
for edge in range(self.edge_min, self.edge_max+1)]
return feature_names
class AreaWtIFoldSymmetry(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(AreaWtIFoldSymmetry, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.edge_min = edge_min
self.edge_max = edge_max
self.dependent_cols_ = [self.neighbor_num_col,
self.neighbor_edges_col,
self.neighbor_areas_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_area_wt_i_fold_symmetry'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_lists = get_isometric_lists(X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
area_lists = get_isometric_lists(
X[self.neighbor_areas_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
edge_num = self.edge_max - self.edge_min + 1
area_wt_i_symm_list = np.zeros((len(X), edge_num))
area_wt_i_symm_list = voronoi_stats.area_wt_i_fold_symmetry(
area_wt_i_symm_list, X[self.neighbor_num_col].values,
edge_lists, area_lists, self.edge_min, self.edge_max,
self.include_beyond_edge_max, n_atoms=len(X),
neighbor_num_limit=self.neighbor_num_limit)
area_wt_i_symm_df = \
pd.DataFrame(area_wt_i_symm_list, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=area_wt_i_symm_df, name=self.output_file_prefix)
return area_wt_i_symm_df
def get_feature_names(self):
feature_names = ['Area_wt_{}_fold_symm_idx_voro'.format(edge)
for edge in range(self.edge_min, self.edge_max + 1)]
return feature_names
class VolWtIFoldSymmetry(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VolWtIFoldSymmetry, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.edge_min = edge_min
self.edge_max = edge_max
self.dependent_cols_ = [self.neighbor_num_col,
self.neighbor_edges_col,
self.neighbor_vols_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_vol_wt_i_fold_symmetry'.format(
self.category, self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_lists = get_isometric_lists(X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
vol_lists = get_isometric_lists(
X[self.neighbor_vols_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
edge_num = self.edge_max - self.edge_min + 1
vol_wt_i_symm_list = np.zeros((len(X), edge_num))
vol_wt_i_symm_list = \
voronoi_stats.vol_wt_i_fold_symmetry(
vol_wt_i_symm_list, X[self.neighbor_num_col].values, edge_lists,
vol_lists, self.edge_min, self.edge_max,
self.include_beyond_edge_max, n_atoms=len(X),
neighbor_num_limit=self.neighbor_num_limit)
vol_wt_i_symm_df = pd.DataFrame(vol_wt_i_symm_list, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=vol_wt_i_symm_df, name=self.output_file_prefix)
return vol_wt_i_symm_df
def get_feature_names(self):
feature_names = ['Vol_wt_{}_fold_symm_idx_voro'.format(edge)
for edge in range(self.edge_min, self.edge_max + 1)]
return feature_names
class VoroAreaStats(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VoroAreaStats, self).__init__(save=save,
backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.stats = ['mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_areas_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_area_stats'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
area_lists = get_isometric_lists(
X[self.neighbor_areas_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
area_stats = np.zeros((len(X), len(self.stats) + 1))
area_stats = \
voronoi_stats.voronoi_area_stats(area_stats,
X[self.neighbor_num_col].values,
area_lists, n_atoms=len(X),
neighbor_num_limit=
self.neighbor_num_limit)
area_stats_df = pd.DataFrame(area_stats, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=area_stats_df, name=self.output_file_prefix)
return area_stats_df
def get_feature_names(self):
feature_names = ['Facet_area_sum_voro'] + \
['Facet_area_{}_voro'.format(stat)
for stat in self.stats]
return feature_names
class VoroAreaStatsSeparate(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VoroAreaStatsSeparate, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.edge_min = edge_min
self.edge_max = edge_max
self.edge_num = edge_max - edge_min + 1
self.include_beyond_edge_max = include_beyond_edge_max
self.stats = ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_edges_col,
self.neighbor_areas_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_area_stats_separate'.format(
self.category, self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_lists = get_isometric_lists(
X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
area_lists = get_isometric_lists(
X[self.neighbor_areas_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
area_stats_separate = \
np.zeros((len(X), self.edge_num * len(self.stats)))
area_stats_separate = \
voronoi_stats.voronoi_area_stats_separate(
area_stats_separate, X[self.neighbor_num_col].values,
edge_lists, area_lists, self.edge_min, self.edge_max,
self.include_beyond_edge_max, n_atoms=len(X),
neighbor_num_limit=self.neighbor_num_limit)
area_stats_separate_df = pd.DataFrame(area_stats_separate,
index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=area_stats_separate_df, name=self.output_file_prefix)
return area_stats_separate_df
def get_feature_names(self):
feature_names = ['{}_edged_area_{}_voro'.format(edge, stat)
for edge in range(self.edge_min, self.edge_max + 1)
for stat in self.stats]
return feature_names
class VoroVolStats(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VoroVolStats, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.stats = ['mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_vols_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_vol_stats'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
vol_lists = get_isometric_lists(
X[self.neighbor_vols_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
vol_stats = np.zeros((len(X), len(self.stats) + 1))
vol_stats = \
voronoi_stats.voronoi_vol_stats(vol_stats,
X[self.neighbor_num_col].values,
vol_lists, n_atoms=len(X),
neighbor_num_limit=
self.neighbor_num_limit)
vol_stats_df = pd.DataFrame(vol_stats, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=vol_stats_df, name=self.output_file_prefix)
return vol_stats_df
def get_feature_names(self):
feature_names = ['Subpolyhedra_vol_sum_voro'] + \
['Subpolyhedra_vol_{}_voro'.format(stat)
for stat in self.stats]
return feature_names
class VoroVolStatsSeparate(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VoroVolStatsSeparate, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.edge_min = edge_min
self.edge_max = edge_max
self.edge_num = edge_max - edge_min + 1
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.stats = ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_edges_col,
self.neighbor_vols_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_vol_stats_separate'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_lists = get_isometric_lists(
X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
vol_lists = get_isometric_lists(
X[self.neighbor_vols_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
vol_stats_separate = np.zeros((len(X),
self.edge_num * len(self.stats)))
vol_stats_separate = \
voronoi_stats.voronoi_vol_stats_separate(
vol_stats_separate, X[self.neighbor_num_col].values,
edge_lists, vol_lists, self.edge_min, self.edge_max,
self.include_beyond_edge_max, n_atoms=len(X),
neighbor_num_limit=self.neighbor_num_limit)
vol_stats_separate_df = pd.DataFrame(vol_stats_separate,
index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=vol_stats_separate_df, name=self.output_file_prefix)
return vol_stats_separate_df
def get_feature_names(self):
feature_names = ['{}_edged_vol_{}_voro'.format(edge, stat)
for edge in range(self.edge_min, self.edge_max + 1)
for stat in self.stats]
return feature_names
class DistStats(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
dist_type='distance', neighbor_num_limit=80, save=True,
output_path=None, output_file_prefix=None, **nn_kwargs):
super(DistStats, self).__init__(save=save, backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.dist_type = dist_type
self.neighbor_num_limit = neighbor_num_limit
self.stats = ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_dists_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_stats'.format(
self.category, self.dependent_name_, self.dist_type)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
dist_lists = get_isometric_lists(
X[self.neighbor_dists_col].values,
limit_width=self.neighbor_num_limit)
dist_stats = np.zeros((len(X), len(self.stats)))
dist_stats = \
voronoi_stats.voronoi_distance_stats(
dist_stats, X[self.neighbor_num_col].values, dist_lists,
n_atoms=len(X), neighbor_num_limit=self.neighbor_num_limit)
dist_stats_df = pd.DataFrame(dist_stats, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=dist_stats_df, name=self.output_file_prefix)
return dist_stats_df
def get_feature_names(self):
feature_names = ['{}_{}_{}'.format(self.dist_type, stat,
self.dependent_name_)
for stat in self.stats]
return feature_names
@property
def double_dependency(self):
return False
class BOOP(BaseSRO):
def __init__(self, backend=None, dependent_class="voro", coords_path=None,
atom_coords=None, bds=None, pbc=None, low_order=1,
higher_order=1, coarse_lower_order=1, coarse_higher_order=1,
neighbor_num_limit=80, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
super(BOOP, self).__init__(save=save, backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.low_order = low_order
self.higher_order = higher_order
self.coarse_lower_order = coarse_lower_order
self.coarse_higher_order = coarse_higher_order
if coords_path is not None and os.path.exists(coords_path):
_, _, self.atom_coords, self.bds = read_imd(coords_path)
else:
self.atom_coords = atom_coords
self.bds = bds
if self.atom_coords is None or self.bds is None:
raise ValueError("Please make sure atom_coords and bds are not None"
" or coords_path is not None")
self.pbc = pbc if pbc else [1, 1, 1]
self.neighbor_num_limit = neighbor_num_limit
self.bq_tags = ['4', '6', '8', '10']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_boop'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
n_atoms = len(X)
dist_lists = get_isometric_lists(
X[self.neighbor_ids_col].values,
limit_width=self.neighbor_num_limit)
Ql = np.zeros((n_atoms, 4), dtype=np.longdouble)
Wlbar = np.zeros((n_atoms, 4), dtype=np.longdouble)
coarse_Ql = np.zeros((n_atoms, 4), dtype=np.longdouble)
coarse_Wlbar = np.zeros((n_atoms, 4), dtype=np.longdouble)
Ql, Wlbar, coarse_Ql, coarse_Wlbar = \
boop.calculate_boop(
self.atom_coords.astype(np.longdouble),
self.pbc, np.array(self.bds, dtype=np.longdouble),
X[self.neighbor_num_col].values, dist_lists,
self.low_order, self.higher_order, self.coarse_lower_order,
self.coarse_higher_order, Ql, Wlbar, coarse_Ql, coarse_Wlbar,
n_atoms=n_atoms, n_neighbor_limit=self.neighbor_num_limit)
concat_array = np.append(Ql, Wlbar, axis=1)
concat_array = np.append(concat_array, coarse_Ql, axis=1)
concat_array = np.append(concat_array, coarse_Wlbar, axis=1)
boop_df = pd.DataFrame(concat_array, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=boop_df, name=self.output_file_prefix)
return boop_df
def get_feature_names(self):
feature_names = ['q_{}_{}'.format(num, self.dependent_name_)
for num in self.bq_tags] + \
['w_{}_{}'.format(num, self.dependent_name_)
for num in self.bq_tags] + \
['Coarse_grained_q_{}_{}'.format(num,
self.dependent_name_)
for num in self.bq_tags] + \
['Coarse_grained_w_{}_{}'.format(num,
self.dependent_name_)
for num in self.bq_tags]
return feature_names
|
121565
|
import os
import sys
from robot_server.service.protocol import contents
from contextlib import contextmanager
@contextmanager
def protocol_environment(protocol: contents.Contents):
"""
Context manager used for setting up an environment to run a
UploadProtocol.
"""
old_cwd = os.getcwd()
# Change working directory to temp dir
os.chdir(protocol.directory.name)
# Add temp dir to path after caching path
old_path = sys.path.copy()
sys.path.append(protocol.directory.name)
try:
yield contents
finally:
os.chdir(old_cwd)
sys.path = old_path
|
121570
|
import torch
from mmcv.runner import OptimizerHook
from mmdet.core.utils.dist_utils import allreduce_grads
from collections import OrderedDict
import torch.distributed as dist
from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors,
_take_tensors)
class ArchOptimizerHook(OptimizerHook):
def after_train_iter(self, runner):
runner.arch_optimizer.zero_grad()
if runner.sub_obj_cfg.if_sub_obj:
loss_sub_obj = torch.log(runner.outputs['sub_obj']) / \
torch.log(torch.tensor(runner.sub_obj_cfg.log_base))
runner.outputs['loss'] += loss_sub_obj * runner.sub_obj_cfg.sub_loss_factor
runner.outputs['loss'].backward()
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.arch_optimizer.step()
self.rescale_arch_params(runner.super_backbone)
def rescale_arch_params(self, model):
"""
rescale the architecture parameters
that is to add the rescale_value (bias) to the updated architecture parameters
to maintain the magnitude of the softmax outputs of non-updated params
"""
def comp_rescale_value(old_weights, new_weights, index, block_id, branch_id):
old_exp_sum = old_weights.exp().sum()
new_drop_arch_params = [new_weights[block_id][branch_id][h_idx
] for h_idx in index]
new_exp_sum = torch.stack(new_drop_arch_params).exp().sum()
rescale_value = torch.log(old_exp_sum / new_exp_sum)
return rescale_value
if hasattr(model, 'module'):
model = model.module
alpha_head_index = model.alpha_head_index
alpha_head_weights_drop = model.alpha_head_weights_drop
alpha_stack_index = model.alpha_stack_index
alpha_stack_weights_drop = model.alpha_stack_weights_drop
# rescale the arch params for head layers
for i, (alpha_head_weights_drop_block, alpha_head_index_block) in enumerate(
zip(alpha_head_weights_drop, alpha_head_index)):
for j, (alpha_head_weights_drop_branch, alpha_head_index_branch) in enumerate(
zip(alpha_head_weights_drop_block, alpha_head_index_block)):
rescale_value = comp_rescale_value(alpha_head_weights_drop_branch,
model.alpha_head_weights,
alpha_head_index_branch, i, j)
for idx in alpha_head_index_branch:
model.alpha_head_weights[i].data[j][idx] += rescale_value
# rescale the arch params for stack layers
for i, (alpha_stack_weights_drop_block, alpha_stack_index_block) in enumerate(
zip(alpha_stack_weights_drop, alpha_stack_index)):
for j, (alpha_stack_weights_drop_branch, alpha_stack_index_branch) in enumerate(
zip(alpha_stack_weights_drop_block, alpha_stack_index_block)):
rescale_value = comp_rescale_value(alpha_stack_weights_drop_branch,
model.alpha_stack_weights,
alpha_stack_index_branch, i, j)
for idx in alpha_stack_index_branch:
model.alpha_stack_weights[i].data[j][idx] += rescale_value
class ArchDistOptimizerHook(ArchOptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner):
runner.arch_optimizer.zero_grad()
if runner.sub_obj_cfg.if_sub_obj:
loss_sub_obj = torch.log(runner.outputs['sub_obj']) / \
torch.log(torch.tensor(runner.sub_obj_cfg.log_base))
runner.outputs['loss'] += loss_sub_obj * runner.sub_obj_cfg.sub_loss_factor
runner.outputs['loss'].backward()
allreduce_grads(runner.model, self.coalesce, self.bucket_size_mb)
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.arch_optimizer.step()
# self.rescale_arch_params(runner.super_backbone)
|
121587
|
import json
import logging
import requests
import os
from text_analytics.abstract_nlp_service import NLPService
from text_analytics.enhance import *
from text_analytics.quickUMLS.semtype_lookup import lookup
from text_analytics.quickUMLS.semtype_lookup import get_semantic_type_list
logger = logging.getLogger()
class QuickUMLSService(NLPService):
types_can_handle = {'AllergyIntolerance': enhance_allergy_intolerance_payload_to_fhir,
'Immunization': enhance_immunization_payload_to_fhir,
'DiagnosticReport': enhance_diagnostic_report_payload_to_fhir,
'DocumentReference': enhance_document_reference_payload_to_fhir
}
PROCESS_TYPE_UNSTRUCTURED = "QuickUMLS Unstructured"
PROCESS_TYPE_STRUCTURED = "QuickUMLS Structured"
def __init__(self, json_string):
config_dict = json.loads(json_string)
self.quickUMLS_url = config_dict["config"]["endpoint"]
self.jsonString = json_string
self.config_name = config_dict["name"]
def process(self, text):
if type(text) is bytes:
request_body = {"text": text.decode('utf-8')}
else:
request_body = {"text": text}
logger.info("Calling QUICKUMLS-" + self.config_name)
resp = requests.post(self.quickUMLS_url, json=request_body)
concepts = json.loads(resp.text)
conceptsList = []
if concepts is not None:
for concept in concepts:
conceptsList.append(self.concept_to_dict(concept))
return {"concepts": conceptsList}
@staticmethod
def concept_to_dict(concept):
output = {"Structure": "Concept"}
output["generatingService"] = "quickUMLS"
output["coveredText"] = concept["ngram"] if "ngram" in concept else None
output["cui"] = concept["cui"] if "cui" in concept else None
output["begin"] = concept["start"] if "start" in concept else None
output["end"] = concept["end"] if "end" in concept else None
output["preferredName"] = concept["term"] if "term" in concept else None
output["type"] = get_semantic_type_list(concept["semtypes"]) if "semtypes" in concept and len(concept["semtypes"]) > 0 else None
output["negated"] = False
return output
|
121642
|
from starlette.applications import Starlette
from starlette.responses import UJSONResponse
import gpt_2_simple as gpt2
import uvicorn
import os
app = Starlette(debug=False)
sess = gpt2.start_tf_sess(threads=1)
gpt2.load_gpt2(sess)
@app.route('/', methods=['GET', 'POST'])
async def homepage(request):
if request.method == 'GET':
params = request.query_params
elif request.method == 'POST':
params = await request.json()
text = gpt2.generate(sess,
length=100,
temperature=float(params.get('temperature', 0.7)),
top_k=int(params.get('top_k', 0)),
prefix='<|startoftext|>' + params.get('prefix', ''),
truncate='<|endoftext|>',
include_prefix=str(params.get(
'include_prefix', True)).lower() == 'true',
return_as_list=True
)[0]
# strip <|startoftext|>
text = text[len('<|startoftext|>'):]
return UJSONResponse({'text': text})
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
|
121645
|
import mock
import unittest
from nsq.sockets.base import SocketWrapper
class TestSocketWrapper(unittest.TestCase):
'''Test the SocketWrapper class'''
def setUp(self):
self.socket = mock.Mock()
self.wrapped = SocketWrapper.wrap_socket(self.socket)
def test_wrap_socket(self):
'''Passes through objects to the constructor'''
with mock.patch.object(SocketWrapper, '__init__') as mock_init:
mock_init.return_value = None
SocketWrapper.wrap_socket(5, hello='foo')
mock_init.assert_called_with(5, hello='foo')
def test_method_pass_through(self):
'''Passes through most methods directly to the underlying socket'''
self.assertEqual(self.wrapped.accept, self.socket.accept)
def test_send(self):
'''SocketWrapper.send saises NotImplementedError'''
self.assertRaises(NotImplementedError, self.wrapped.send, 'foo')
def test_sendall(self):
'''Repeatedly calls send until everything has been sent'''
with mock.patch.object(self.wrapped, 'send') as mock_send:
# Only sends one byte at a time
mock_send.return_value = 1
self.wrapped.sendall('hello')
self.assertEqual(mock_send.call_count, 5)
def test_recv(self):
'''SocketWrapper.recv saises NotImplementedError'''
self.assertRaises(NotImplementedError, self.wrapped.recv, 5)
def test_recv_into(self):
'''SocketWrapper.recv_into saises NotImplementedError'''
self.assertRaises(NotImplementedError, self.wrapped.recv_into, 'foo', 5)
def test_inheritance_overrides(self):
'''Classes that inherit can override things like accept'''
class Foo(SocketWrapper):
def close(self):
pass
wrapped = Foo.wrap_socket(self.socket)
self.assertNotEqual(wrapped.close, self.socket.close)
|
121705
|
import os
dirpath = os.getcwd()
print("Current working directory is : %s" % dirpath)
APP_NAME = 'OSINT SAN Геолокация'
#------<IMAGES PATH>-------------------------------------------------------------
IMG_FD = 'img'
ICO_PATH = os.path.join(dirpath, IMG_FD, "geoIcon.ico")
BGIMG_PATH = os.path.join(dirpath, IMG_FD, "background.jpg")
DC_POS_PATH = os.path.join(dirpath, "awsRecord.txt")
#-------<GLOBAL PARAMTERS>-----------------------------------------------------
iCtrlPanel = None # panel to do the control
iMapPanel = None # panel to display the google map.
iGeoMgr = None # program control manager.
iDCPosMgr = None # data ceter position manager.
|
121708
|
import unittest
import hcl2
from checkov.terraform.checks.resource.azure.FunctionAppsEnableAuthentication import check
from checkov.common.models.enums import CheckResult
class TestFunctionAppsEnableAuthentication(unittest.TestCase):
def test_failure_missing_authentication_block(self):
hcl_res = hcl2.loads("""
resource "azurerm_function_app" "example" {
name = "test-azure-functions"
location = "azurerm_resource_group.example.location"
resource_group_name = "azurerm_resource_group.example.name"
app_service_plan_id = "azurerm_app_service_plan.example.id"
storage_account_name = "azurerm_storage_account.example.name"
storage_account_access_key = "azurerm_storage_account.example.primary_access_key"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_function_app']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "azurerm_function_app" "example" {
name = "test-azure-functions"
location = "azurerm_resource_group.example.location"
resource_group_name = "azurerm_resource_group.example.name"
app_service_plan_id = "azurerm_app_service_plan.example.id"
storage_account_name = "azurerm_storage_account.example.name"
storage_account_access_key = "azurerm_storage_account.example.primary_access_key"
auth_settings {
enabled = true
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_function_app']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_failed(self):
hcl_res = hcl2.loads("""
resource "azurerm_function_app" "example" {
name = "test-azure-functions"
location = "azurerm_resource_group.example.location"
resource_group_name = "azurerm_resource_group.example.name"
app_service_plan_id = "azurerm_app_service_plan.example.id"
storage_account_name = "azurerm_storage_account.example.name"
storage_account_access_key = "azurerm_storage_account.example.primary_access_key"
auth_settings {
enabled = false
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_function_app']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
if __name__ == '__main__':
unittest.main()
|
121734
|
from typing import *
from .row_set import RowSet
from ..column import Column
if TYPE_CHECKING:
from ..row import Row
class EmptyRowSet(RowSet):
def columns(self) -> List[Column]:
return []
def iter(self) -> Iterator['Row']:
yield from ()
|
121746
|
from virtool.hmm.fake import create_fake_hmms
async def test_fake_hmms(app, snapshot, tmp_path, dbi, example_path, pg):
hmm_dir = tmp_path / "hmm"
hmm_dir.mkdir()
await create_fake_hmms(app)
assert await dbi.hmm.find().to_list(None) == snapshot
with open(hmm_dir / "profiles.hmm", "r") as f_result:
with open(example_path / "hmms/profiles.hmm") as f_expected:
assert f_result.read() == f_expected.read()
|
121769
|
from mongogogo import *
import datetime
class PageError(Exception):
"""something was wrong with a page related class"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return "<PageError: %s>" %msg
def __str__(self):
return "<PageError: %s>" %msg
class PageSchema(Schema):
"""a location described by name, lat and long"""
created = DateTime()
updated = DateTime()
created_by = String() # TODO: should be ref to user
title = String(required=True)
menu_title = String(required=True)
slug = String(required=True)
image = String(required=False) # asset id
layout = String() # name of layout
content = String()
barcamp = String() # or empty for homepage
index = Integer() # sequence number in list of pages
slot = String() # slot id of the page
class Page(Record):
schema = PageSchema()
default_values = {
'created' : datetime.datetime.utcnow,
'updated' : datetime.datetime.utcnow,
'title' : "",
'slug' : "",
'content' : "",
'layout' : "default",
'slot' : "default",
'index' : 1,
'layout' : "default",
'image' : None
}
layouts = ['default', 'left', 'right']
def set_layout(self, layout):
if layout not in self.layouts:
return
self.layout = layout
@property
def has_image(self):
"""return whether a page has an image attached or not"""
return self.image is not None and self.image!=""
class Pages(Collection):
data_class = Page
def reorder_slot(self, slot, indexes, barcamp = None):
"""reorders a slot. You give it the slot id in ``slot`` and the new sequence order in indexes in form of a list.
Passing in [2,3,1] will reorder the existing pages in this order
"""
pages = self.for_slot(slot, barcamp = barcamp)
# do some checks
if len(indexes) != pages.count():
raise PageError("length of indexes (%s) does not match amount of pages (%s)" %(len(indexes), pages.count()))
pages = list(pages)
for page in pages:
if page.index not in indexes:
raise PageError("page with index %s missing in new indexes list" %(page.index))
page.index = indexes.index(page.index)
# finally save it
for page in pages:
page.put()
def add_to_slot(self, slot, page, barcamp = None):
"""adds a new page into a slot at the end of it. You can give the page object without the slot set and it will do the rest"""
page.slot = slot
if barcamp is not None:
page.barcamp = unicode(barcamp._id)
page.index = self.find({'slot' : page.slot, 'barcamp' : unicode(barcamp._id)}).count()
else:
page.barcamp = "___"
page.index = self.find({'slot' : page.slot, 'barcamp' : "___"}).count()
return self.put(page)
def by_slug(self, slug, barcamp = None):
"""return a page by only using the slug (and barcamp if given)"""
if barcamp is None:
return self.find_one({'slug' : slug, 'barcamp' : "___"})
else:
return self.find_one({'slug' : slug, 'barcamp' : unicode(barcamp._id)})
def remove_from_slot(self, slot, index, barcamp=None):
"""removes a page at index ``index``"""
if barcamp is None:
page = self.find_one({'slot' : slot, 'index' : index, 'barcamp' : "___"})
else:
page = self.find_one({'slot' : slot, 'index' : index, 'barcamp' : unicode(barcamp._id)})
self.remove(page)
def for_slot(self, slot, barcamp=None):
"""return all the pages for a slot"""
if barcamp is None:
return self.find({'slot' : slot, 'barcamp' : "___"}).sort("index", 1)
else:
return self.find({'slot' : slot, 'barcamp' : unicode(barcamp['_id'])}).sort("index", 1)
|
121791
|
from dolfin import *
from xii import *
def heat(n, dt, f, u0, gD):
'''BE u_t - (u_xx + u_yy) = f with u = gD on bdry and u(0, x) = u0'''
mesh = UnitSquareMesh(n, n)
facet_f = MeshFunction('size_t', mesh, 1, 0)
CompiledSubDomain('near(x[0], 0)').mark(facet_f, 1)
CompiledSubDomain('near(x[0], 1)').mark(facet_f, 2)
CompiledSubDomain('near(x[1], 0)').mark(facet_f, 3)
CompiledSubDomain('near(x[1], 1)').mark(facet_f, 4)
bmesh = EmbeddedMesh(facet_f, (1, 2, 3))
V = FunctionSpace(mesh, 'CG', 1)
Q = FunctionSpace(bmesh, 'CG', 1)
W = [V, Q]
u, p = list(map(TrialFunction, W))
v, q = list(map(TestFunction, W))
dx_ = Measure('dx', domain=bmesh)
Tu, Tv = Trace(u, bmesh), Trace(v, bmesh)
dt = Constant(dt)
a = block_form(W, 2)
a[0][0] = inner(grad(u), grad(v))*dx + (1/dt)*inner(u, v)*dx
a[0][1] = inner(p, Tv)*dx_
a[1][0] = inner(q, Tu)*dx_
u0 = interpolate(u0, V)
L = block_form(W, 1)
L[0] = (1/dt)*inner(u0, v)*dx + inner(f, v)*dx
L[1] = inner(gD, q)*dx_
bcs = [[DirichletBC(V, gD, facet_f, 4)],
[DirichletBC(Q, Constant(0), 'on_boundary')]]
AA, bb = list(map(ii_assemble, (a, L)))
AA, bb, apply_b = apply_bc(AA, bb, bcs, return_apply_b=True)
wh = ii_Function(W)
A = ii_convert(AA)
print(('Symmetry', as_backend_type(A).mat().isHermitian(1E-4)))
solver = LUSolver(A)
t = 0
while t < 1:
t += dt(0)
gD.t = t
f.t = t
bb = ii_assemble(L)
apply_b(bb)
solver.solve(wh.vector(), ii_convert(bb))
u0.assign(wh[0])
return t, u0
# --------------------------------------------------------------------
if __name__ == '__main__':
import sympy as sp
x, y, t = sp.symbols('x[0] x[1] t')
u = sp.sin(sp.pi*x*(x**2 + y**2)*t)
f = u.diff(t, 1) - (u.diff(x, 2) + u.diff(y, 2))
# Wrap
u = Expression(sp.printing.ccode(u).replace('M_PI', 'pi'), t=0, degree=4)
f = Expression(sp.printing.ccode(f).replace('M_PI', 'pi'), t=0, degree=4)
table = []
dt = 1E-1
for nrefs in range(4):
dt_row = []
for n in (8, 16, 32, 64):
u.t, f.t = 0., 0.
t, uh = heat(n, dt, f=f, u0=u, gD=u)
u.t = t
dt_row.append(errornorm(u, uh, 'H1'))
table.append(dt_row)
print((dt, '->', dt_row))
dt /= 2.
|
121839
|
import os
def get_nodes():
f = open("/rpicluster/config/nodes","r")
line = f.readline()
machines = []
while(line!=''):
split = line.split(',')
machines.append((split[0].rstrip(), split[2].rstrip()))
line = f.readline()
return machines
def get_ip(ip_output, interface):
output = ip_output.split("\n")
for x in range(len(output)):
if (interface in output[x]):
content = output[x+2].split(" ")
if(len(content) > 1):
return content[5]
return None
def network_type(network):
switcher = {
1: "Wifi-Wifi",
2: "Wifi-Switch",
3: "Wifi-OTG",
}
return switcher.get(network)
#get all licensed IPs
def get_machines():
machines = []
leases = open("/var/lib/misc/dnsmasq.leases", "r")
leases = leases.readlines()
for lease in leases:
ip = lease.split(" ")[2]
machines.append(ip)
return machines
#takes output of "ip addr" and desired interface, returns interface mac
def getmac(ip_output, interface):
output = ip_output.split("\n")
for x in range(len(output)):
if(interface in output[x]):
content = output[x+1].split(" ")
return content[len(content)-3]
def ping_node(hostname):
command = "ping -c 1 " + hostname + " > /dev/null"
return os.system(command)
#read the stamp and give back the network name and password
def read_stamp(magic_num, file):
pos = 0
fd = open(file, 'rb')
char = ord(fd.read(1))
first = "" # first is the first half of the image, including the magic number
while(char != None):
first += chr(char)
if(char == ord(magic_num[pos]) and pos == len(magic_num)-1):
break
elif(char == ord(magic_num[pos])):
pos = pos + 1
else:
pos = 0
char = ord(fd.read(1))
len_network = fd.read(8)
len_network = int(len_network.decode('utf-8'), 2)
len_pass = fd.read(8)
len_pass = int(len_pass.decode('utf-8'), 2)
name = fd.read(len_network)
passw = fd.read(len_pass)
return [bin_to_string(name), bin_to_string(passw)]
def bin_to_string(binary_string):
return ''.join(format(chr(int(binary_string[i:i+8], 2))) for i in range(0, len(binary_string), 8))
|
121891
|
import torch
from .num_nodes import maybe_num_nodes
def contains_self_loops(edge_index):
row, col = edge_index
mask = row == col
return mask.sum().item() > 0
def remove_self_loops(edge_index, edge_attr=None):
row, col = edge_index
mask = row != col
edge_attr = edge_attr if edge_attr is None else edge_attr[mask]
mask = mask.unsqueeze(0).expand_as(edge_index)
edge_index = edge_index[mask].view(2, -1)
return edge_index, edge_attr
def add_self_loops(edge_index, num_nodes=None):
num_nodes = maybe_num_nodes(edge_index, num_nodes)
dtype, device = edge_index.dtype, edge_index.device
loop = torch.arange(0, num_nodes, dtype=dtype, device=device)
loop = loop.unsqueeze(0).repeat(2, 1)
edge_index = torch.cat([edge_index, loop], dim=1)
return edge_index
|
121919
|
import math
import torch
import torch.nn as nn
from .layers import ConvLayer2d, ConvResBlock2d, EqualLinear
class DiscriminatorHead(nn.Module):
def __init__(self, in_channel, disc_stddev=False):
super().__init__()
self.disc_stddev = disc_stddev
stddev_dim = 1 if disc_stddev else 0
self.conv_stddev = ConvLayer2d(
in_channel=in_channel + stddev_dim, out_channel=in_channel, kernel_size=3, activate=True
)
self.final_linear = nn.Sequential(
nn.Flatten(),
EqualLinear(in_channel=in_channel * 4 * 4, out_channel=in_channel, activate=True),
EqualLinear(in_channel=in_channel, out_channel=1),
)
def cat_stddev(self, x, stddev_group=4, stddev_feat=1):
perm = torch.randperm(len(x))
inv_perm = torch.argsort(perm)
batch, channel, height, width = x.shape
x = x[perm] # shuffle inputs so that all views in a single trajectory don't get put together
group = min(batch, stddev_group)
stddev = x.view(group, -1, stddev_feat, channel // stddev_feat, height, width)
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
stddev = stddev.repeat(group, 1, height, width)
stddev = stddev[inv_perm] # reorder inputs
x = x[inv_perm]
out = torch.cat([x, stddev], 1)
return out
def forward(self, x):
if self.disc_stddev:
x = self.cat_stddev(x)
x = self.conv_stddev(x)
out = self.final_linear(x)
return out
class ConvDecoder(nn.Module):
def __init__(self, in_channel, out_channel, in_res, out_res):
super().__init__()
log_size_in = int(math.log(in_res, 2))
log_size_out = int(math.log(out_res, 2))
self.layers = []
in_ch = in_channel
for i in range(log_size_in, log_size_out):
out_ch = in_ch // 2
self.layers.append(
ConvLayer2d(
in_channel=in_ch, out_channel=out_ch, kernel_size=3, upsample=True, bias=True, activate=True
)
)
in_ch = out_ch
self.layers.append(
ConvLayer2d(in_channel=in_ch, out_channel=out_channel, kernel_size=3, bias=True, activate=False)
)
self.layers = nn.Sequential(*self.layers)
def forward(self, x):
return self.layers(x)
class StyleDiscriminator(nn.Module):
def __init__(self, in_channel, in_res, ch_mul=64, ch_max=512, **kwargs):
super().__init__()
log_size_in = int(math.log(in_res, 2))
log_size_out = int(math.log(4, 2))
self.conv_in = ConvLayer2d(in_channel=in_channel, out_channel=ch_mul, kernel_size=3)
# each resblock will half the resolution and double the number of features (until a maximum of ch_max)
self.layers = []
in_channels = ch_mul
for i in range(log_size_in, log_size_out, -1):
out_channels = int(min(in_channels * 2, ch_max))
self.layers.append(ConvResBlock2d(in_channel=in_channels, out_channel=out_channels, downsample=True))
in_channels = out_channels
self.layers = nn.Sequential(*self.layers)
self.disc_out = DiscriminatorHead(in_channel=in_channels, disc_stddev=True)
self.decoder = ConvDecoder(in_channel=in_channels, out_channel=in_channel, in_res=4, out_res=in_res)
def forward(self, x):
x = self.conv_in(x)
x = self.layers(x)
out = self.disc_out(x)
recon = self.decoder(x)
return out, recon
|
121933
|
from __future__ import absolute_import, division, unicode_literals
import re
from six.moves import zip
# FASTA
def read_fasta(infile, include_other_letters=False, return_headers=False):
sequences = []
if return_headers:
headers = []
currseq = []
for line in infile:
line = line.strip()
if isinstance(line, bytes) and str != bytes:
line = line.decode()
if not line or line[0] == '>':
if return_headers and line[0] == '>':
headers.append(line)
if currseq:
sequences.append(''.join(currseq))
currseq = []
else:
if not include_other_letters:
line = re.sub('[^ACGT]', '', line)
currseq.append(line)
if currseq:
sequences.append(''.join(currseq))
if return_headers:
return sequences, headers
else:
return sequences
def import_fasta(filename, **kwargs):
with open(filename, 'r') as infile:
return read_fasta(infile, **kwargs)
def write_fasta(outfile, sequences, headers=iter(str, 0)):
for header, seq in zip(headers, sequences):
outfile.write('>' + header + '\n')
outfile.write(seq)
outfile.write('\n')
def export_fasta(filename, sequences, **kwargs):
with open(filename, 'w') as outfile:
write_fasta(outfile, sequences, **kwargs)
|
121995
|
import unittest
import mock
import os
from tornado.web import StaticFileHandler
import sandstone
from sandstone.app import SandstoneApplication
from sandstone.lib import ui_methods
from sandstone.lib.handlers.main import MainHandler
from sandstone.lib.handlers.pam_auth import PAMLoginHandler
from sandstone import settings as default_settings
TEST_PREFIX = '/test/prefix'
INSTALLED_APPS = (
'sandstone.lib',
'sandstone.apps.codeeditor',
'sandstone.apps.filebrowser',
)
APP_SPECS = []
for mod_name in ['sandstone.apps.codeeditor.settings','sandstone.apps.filebrowser.settings']:
mod = __import__(mod_name,fromlist=[''])
APP_SPECS.append(mod.APP_SPECIFICATION)
class MainAppTestCase(unittest.TestCase):
@mock.patch('sandstone.settings.URL_PREFIX','')
@mock.patch('sandstone.settings.INSTALLED_APPS',INSTALLED_APPS)
@mock.patch('sandstone.settings.APP_SPECIFICATIONS',APP_SPECS)
def test_app_settings(self):
app = SandstoneApplication()
self.assertEqual(type(app.settings),type({}))
expd = dict(
project_dir=sandstone.__path__[0],
static_dir=os.path.join(sandstone.__path__[0],'client/sandstone'),
login_url='/auth/login',
cookie_secret = default_settings.COOKIE_SECRET,
xsrf_cookies=True,
ui_methods=ui_methods,
)
self.assertDictContainsSubset(expd,app.settings)
@mock.patch('sandstone.settings.URL_PREFIX',TEST_PREFIX)
@mock.patch('sandstone.settings.INSTALLED_APPS',INSTALLED_APPS)
@mock.patch('sandstone.settings.APP_SPECIFICATIONS',APP_SPECS)
def test_app_settings_prefixed(self):
app = SandstoneApplication()
self.assertEqual(type(app.settings),type({}))
expd = dict(
login_url='{}/auth/login'.format(TEST_PREFIX),
)
self.assertDictContainsSubset(expd,app.settings)
@mock.patch('sandstone.settings.URL_PREFIX','')
@mock.patch('sandstone.settings.INSTALLED_APPS',INSTALLED_APPS)
@mock.patch('sandstone.settings.APP_SPECIFICATIONS',APP_SPECS)
def test_app_handlers(self):
app = SandstoneApplication()
handlers = app.handlers[0][1]
hpaths = [h._path for h in handlers]
self.assertEqual(handlers[0]._path,'/static/core/%s')
self.assertTrue(issubclass(handlers[0].handler_class,StaticFileHandler))
self.assertTrue('/?' in hpaths)
i = hpaths.index('/?')
self.assertTrue(issubclass(handlers[i].handler_class,MainHandler))
self.assertTrue('/auth/login' in hpaths)
i = hpaths.index('/auth/login')
self.assertTrue(issubclass(handlers[i].handler_class,PAMLoginHandler))
self.assertTrue('/auth/logout' in hpaths)
self.assertTrue('/a/deps' in hpaths)
self.assertTrue('/static/editor/%s' in hpaths)
self.assertTrue('/static/filebrowser/%s' in hpaths)
self.assertTrue('/a/filesystem/' in hpaths)
self.assertTrue('/a/filesystem/directories/%s/' in hpaths)
self.assertTrue('/a/filesystem/files/%s/' in hpaths)
self.assertTrue('/a/filesystem/files/%s/contents/' in hpaths)
@mock.patch('sandstone.settings.URL_PREFIX',TEST_PREFIX)
@mock.patch('sandstone.settings.INSTALLED_APPS',INSTALLED_APPS)
@mock.patch('sandstone.settings.APP_SPECIFICATIONS',APP_SPECS)
def test_app_handlers_prefixed(self):
app = SandstoneApplication()
handlers = app.handlers[0][1]
hpaths = [h._path for h in handlers]
self.assertEqual(handlers[0]._path,'{}/static/core/%s'.format(TEST_PREFIX))
self.assertTrue(issubclass(handlers[0].handler_class,StaticFileHandler))
self.assertTrue('{}/?'.format(TEST_PREFIX) in hpaths)
i = hpaths.index('{}/?'.format(TEST_PREFIX))
self.assertTrue(issubclass(handlers[i].handler_class,MainHandler))
self.assertTrue('{}/auth/login'.format(TEST_PREFIX) in hpaths)
i = hpaths.index('{}/auth/login'.format(TEST_PREFIX))
self.assertTrue(issubclass(handlers[i].handler_class,PAMLoginHandler))
self.assertTrue('{}/auth/logout'.format(TEST_PREFIX) in hpaths)
self.assertTrue('{}/a/deps'.format(TEST_PREFIX) in hpaths)
self.assertTrue('{}/static/editor/%s'.format(TEST_PREFIX) in hpaths)
self.assertTrue('{}/static/filebrowser/%s'.format(TEST_PREFIX) in hpaths)
self.assertTrue('{}/a/filesystem/'.format(TEST_PREFIX) in hpaths)
self.assertTrue('{}/a/filesystem/directories/%s/'.format(TEST_PREFIX) in hpaths)
self.assertTrue('{}/a/filesystem/files/%s/'.format(TEST_PREFIX) in hpaths)
self.assertTrue('{}/a/filesystem/files/%s/contents/'.format(TEST_PREFIX) in hpaths)
|
121996
|
import tensorflow as tf
from absl import flags
from absl import app
from absl import logging
from tokenization import FullTokenizer
from tokenization_en import load_subword_vocab
from transformer import Transformer, FileConfig
FLAGS = flags.FLAGS
MODEL_DIR = "/Users/livingmagic/Documents/deeplearning/models/bert-nmt/zh-en_bert-tf2_L6-D256/"
flags.DEFINE_string("bert_config_file", MODEL_DIR + "bert_config.json", "The bert config file")
flags.DEFINE_string("bert_vocab_file", MODEL_DIR + "vocab.txt",
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string("init_checkpoint", MODEL_DIR + "bert_nmt_ckpt", "")
flags.DEFINE_string("config_file", MODEL_DIR + "config.json", "The transformer config file except bert")
flags.DEFINE_string("vocab_file", MODEL_DIR + "vocab_en", "The english vocabulary file")
flags.DEFINE_integer("max_seq_length", 128, "Max length to sequence length")
flags.DEFINE_string("inp_sentence", None, "")
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions so that we can add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
"""
The look-ahead mask is used to mask the future tokens in a sequence.
In other words, the mask indicates which entries should not be used.
"""
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_masks(inp, tar):
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return combined_mask, dec_padding_mask
def encode_zh(tokenizer_zh, zh):
tokens_zh = tokenizer_zh.tokenize(zh)
lang1 = tokenizer_zh.convert_tokens_to_ids(['[CLS]'] + tokens_zh + ['[SEP]'])
return lang1
def evaluate(transformer,
tokenizer_zh,
tokenizer_en,
inp_sentence,
max_seq_length):
# normalize input sentence
inp_sentence = encode_zh(tokenizer_zh, inp_sentence)
encoder_input = tf.expand_dims(inp_sentence, 0)
# as the target is english, the first word to the transformer should be the
# english start token.
decoder_input = [tokenizer_en.vocab_size]
output = tf.expand_dims(decoder_input, 0)
for i in range(max_seq_length):
combined_mask, dec_padding_mask = create_masks(
encoder_input, output)
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = transformer(encoder_input,
output,
False,
combined_mask,
dec_padding_mask)
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if tf.equal(predicted_id, tokenizer_en.vocab_size + 1):
return tf.squeeze(output, axis=0), attention_weights
# concatentate the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0), attention_weights
def main(_):
tokenizer_zh = FullTokenizer(
vocab_file=FLAGS.bert_vocab_file, do_lower_case=True)
tokenizer_en = load_subword_vocab(FLAGS.vocab_file)
target_vocab_size = tokenizer_en.vocab_size + 2
config = FileConfig(FLAGS.config_file)
transformer = Transformer(config=config,
target_vocab_size=target_vocab_size,
bert_config_file=FLAGS.bert_config_file)
inp = tf.random.uniform((1, FLAGS.max_seq_length))
tar_inp = tf.random.uniform((1, FLAGS.max_seq_length))
fn_out, _ = transformer(inp, tar_inp,
True,
look_ahead_mask=None,
dec_padding_mask=None)
transformer.load_weights(FLAGS.init_checkpoint)
print(transformer.encoder.weights[0])
result, _ = evaluate(transformer,
tokenizer_zh,
tokenizer_en,
FLAGS.inp_sentence,
FLAGS.max_seq_length)
predicted_sentence = tokenizer_en.decode([i for i in result
if i < tokenizer_en.vocab_size])
print('Input: {}'.format(FLAGS.inp_sentence))
print('Predicted translation: {}'.format(predicted_sentence))
if __name__ == "__main__":
flags.mark_flag_as_required("inp_sentence")
app.run(main)
|
122009
|
import argparse
import datetime
import netrc
import os
import subprocess
import threading
import time
import typing
import google.auth
import googleapiclient.discovery
from src.context import DataContext
TIMEOUT_MULTIPLIER = 10
API = googleapiclient.discovery.build('tpu', 'v1')
_, PROJECT = google.auth.default()
OLD_DATA_PATH = DataContext.path.replace("/", "\\/")[:-1] # remove * at the end
GLOBAL_DICT = {}
CACHE_TIME = 10
def exec_command(wandb_key: str, sweep_id: str, data_path: str, pretrained_path: str, branch: str):
data_path = data_path.replace("/", "\\/")
# Bottom one doesn't use , on purpose
return ' && '.join(("sudo apt --fix-missing --fix-broken install -y git python3 python3-pip",
"(rm -rf HomebrewNLP-Jax ; pkill -f python3 ; exit 0)",
f"git clone --depth 1 --branch {branch} https://github.com/HomebrewNLP/HomebrewNLP-Jax/",
"cd HomebrewNLP-Jax", "(bash setup.sh ; exit 0)",
f"/home/ubuntu/.local/bin/wandb login {wandb_key}",
f'sed -i "s/{OLD_DATA_PATH}/{data_path}/g" src/context.py',
f'screen -dmS model '
f'bash -c "cd HomebrewNLP-Jax ; /home/ubuntu/.local/bin/wandb agent {sweep_id}"'))
def send_to_tpu(zone: str, host: str, filename: str, command: str):
with open(host, 'w') as f:
f.write(command)
os.system(f"gcloud alpha compute tpus tpu-vm scp {host} ubuntu@{host}:~/{filename} --zone {zone}")
os.remove(host)
def send_commands_to_tpu(wandb_key: str, sweep_id: str, host: str, zone: str, data_path: str, pretrained_path: str,
branch: str):
command = exec_command(wandb_key, sweep_id, data_path, pretrained_path, branch)
send_to_tpu(zone, host, "setup.sh", command)
def exec_tpu(host: str, zone: str, command: str):
print(f"running '{command}' ...", end='')
start_time = time.time()
ret = subprocess.call(["gcloud", "alpha", "compute", "tpus", "tpu-vm", "ssh", f"ubuntu@{host}",
f"--zone", zone, "--command", command])
if not ret:
print(f"done after {time.time() - start_time:.1f}s")
return
delete_one_tpu(host, host, zone)
def all_tpus(zone: str):
zone = 'projects/' + PROJECT + '/locations/' + zone
if GLOBAL_DICT.get(f"last_write_{zone}", 0) < time.time() - CACHE_TIME:
GLOBAL_DICT[f"last_write_{zone}"] = time.time()
GLOBAL_DICT[f"tpus_{zone}"] = API.projects().locations().nodes().list(parent=zone).execute().get('nodes', [])
return GLOBAL_DICT[f"tpus_{zone}"]
def tpu_names(zone: str, preempted: bool = True, deleting: bool = False, prefix: str = ''):
while True:
try:
tpus = all_tpus(zone)
tpus = [t['name'].split('/')[-1] for t in tpus if
"state" in t
and (deleting or t['state'] != "DELETING")
and (preempted or t['state'] != "PREEMPTED")]
return [t for t in tpus if t.startswith(prefix)]
except KeyboardInterrupt as exc:
raise exc
except:
pass
def delete_one_tpu(prefix: str, host: str, zone: str):
if prefix not in host:
return
print(f"\x1b[32;1m DELETING {host}\x1b[0m")
os.system(f"echo y | gcloud alpha compute tpus tpu-vm delete {host} --zone {zone} --async")
def synchronous_deletion(prefix: str, host: str, zone: str):
if prefix not in host:
return
while host in tpu_names(zone, deleting=True):
if host in tpu_names(zone):
delete_one_tpu(prefix, host, zone)
time.sleep(CACHE_TIME)
def delete_all(prefix: str, zone: str):
while tpu_names(zone, prefix=prefix):
threads = [threading.Thread(target=synchronous_deletion, args=(prefix, host, zone), daemon=True) for host in
tpu_names(zone, prefix=prefix)]
for t in threads:
t.start()
for t in threads:
t.join()
def create_tpu(host: str, zone: str, tpu_version: int, preemptible: bool, service_account: str,
semaphore: threading.Semaphore):
with semaphore:
os.system(f'while ! gcloud alpha compute tpus tpu-vm create {host} --service-account {service_account} '
f'--zone {zone} --accelerator-type v{tpu_version}-8 --version v2-alpha '
f'{"--preemptible" * preemptible}; do echo; done')
def start_single(prefix: str, tpu_id: int, sweep_id: str, wandb_key: str, tpu_version: int, zone: str,
data_path: str, pretrained_path: str, preemptible: bool, timeout_multiplier: int, service_account: str,
branch: str, creation_semaphore: threading.Semaphore):
host = f"{prefix}-{tpu_id}"
time.sleep((tpu_id - 1) * TIMEOUT_MULTIPLIER * timeout_multiplier)
if host in tpu_names(zone, preempted=True, deleting=True):
if host not in tpu_names(zone, preempted=False, deleting=False):
synchronous_deletion(prefix, host, zone)
create_tpu(host, zone, tpu_version, preemptible, service_account, creation_semaphore)
else:
create_tpu(host, zone, tpu_version, preemptible, service_account, creation_semaphore)
while True:
try:
send_commands_to_tpu(wandb_key, sweep_id, host, zone, data_path, pretrained_path, branch)
exec_tpu(host, zone, "bash setup.sh")
while host in tpu_names(zone, preempted=False):
time.sleep(CACHE_TIME)
synchronous_deletion(prefix, host, zone)
create_tpu(host, zone, tpu_version, preemptible, service_account, creation_semaphore)
except KeyboardInterrupt:
print(f"{host} - {datetime.datetime.now()}: KeyboardInterrupt received. Killing TPU, then self.")
delete_one_tpu(prefix, host, zone)
return
def start_multiple(prefix: str, tpus: int, sweep_id: str, tpu_version: int, zone: str, data_path: str,
pretrained_path: str, preemptible: bool, timeout_multiplier: int, service_account: str,
branch: str):
_, _, wandb_key = netrc.netrc().authenticators("api.wandb.ai")
procs = []
creation_semaphore = threading.Semaphore(2)
for tpu_id in range(tpus):
proc = threading.Thread(target=start_single, daemon=True,
args=(prefix, tpu_id + 1, sweep_id, wandb_key, tpu_version, zone, data_path,
pretrained_path, preemptible, timeout_multiplier, service_account,
branch, creation_semaphore))
proc.start()
procs.append(proc)
while all(t.is_alive() for t in procs):
try:
time.sleep(10)
except KeyboardInterrupt:
print(f"MAIN - {datetime.datetime.now()}: KeyboardInterrupt received. Killing All TPUs, then self.")
delete_all(prefix, zone)
return
def parse_args() -> typing.Tuple[int, int, str, str, str, str, str, bool, bool, int, str, str]:
parser = argparse.ArgumentParser()
parser.add_argument("--tpus", type=int, default=1, help="How many TPUs should be launched")
parser.add_argument("--tpu-version", type=int, default=3, help="Which TPU version to create (v2-8 or v3-8)")
parser.add_argument("--prefix", type=str, default="homebrewnlp-preemptible-tuning", help="Name prefix for TPUs")
parser.add_argument("--zone", type=str, default="europe-west4-a", help="GCP Zone TPUs get created in")
parser.add_argument("--data-path", type=str, default="gs://ggpt4/the-char-pile/",
help="Where the data is stored. Should be changed to a bucket in the correct region")
parser.add_argument("--pretrained-path", type=str, default="",
help="Where the pretrained embeddings are stored. Should be changed to a bucket in the correct "
"region")
parser.add_argument("--sweep", type=str, help="ID of the Weights and Biases sweep that'll be resumed")
parser.add_argument("--cleanup", default=0, type=int,
help="Instead of running something new, kill all tpus. 1 or 0 for y/n")
parser.add_argument("--preemptible", default=1, type=int,
help="Whether to create preemptible or non-preemptible TPUs")
parser.add_argument("--timeout-multiplier", default=1, type=int,
help="additional timeout multiplier (for launching many script in parallel)")
parser.add_argument("--service-account", type=str,
help="Service account that controls permissions of TPU (for example, to ensure EU TPUs won't "
"use US data)")
parser.add_argument("--branch", type=str, help="Branch on github to use")
args = parser.parse_args()
return (args.tpus, args.tpu_version, args.prefix, args.zone, args.sweep, args.data_path, args.pretrained_path,
bool(args.cleanup), bool(args.preemptible), args.timeout_multiplier, args.service_account, args.branch)
def main():
(tpus, tpu_version, prefix, zone, sweep_id, data_path, pretrained_path, cleanup, preemptible, timeout_multiplier,
service_account, branch) = parse_args()
if cleanup:
delete_all(prefix, zone)
else:
start_multiple(prefix, tpus, sweep_id, tpu_version, zone, data_path, pretrained_path, preemptible,
timeout_multiplier, service_account, branch)
if __name__ == '__main__':
main()
|
122017
|
from __future__ import division
import numpy as np
import scipy.stats as st
from numpy.testing import assert_array_almost_equal
from tensorprob import (
Exponential,
MigradOptimizer,
Mix2,
Mix3,
MixN,
Model,
Normal,
Parameter,
Poisson
)
def test_mix2_fit():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1)
a = Parameter(lower=0)
f = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, np.inf)])
X12 = Mix2(f, X1, X2, bounds=[(6, 17), (18, 36)])
model.observed(X12)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
f: 0.3,
})
# Generate some data to fit
np.random.seed(42)
exp_data = np.random.exponential(10, 200000)
exp_data = exp_data[(exp_data < 8) | (10 < exp_data)]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
# Check the fit was successful
assert result.success
assert abs(model.state[mu] - 19) < 5e-3
assert abs(model.state[sigma] - 2) < 5e-3
assert abs(model.state[a] - 0.1) < 5e-4
assert abs(model.state[f] - (len(norm_data)/len(data))) < 5e-4
def test_mix2_fit_with_mix2_input():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1, upper=4)
a = Parameter(lower=0.06)
b = Parameter(lower=0)
f_1 = Parameter(lower=0, upper=1)
f_2 = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])
X12 = Mix2(f_1, X1, X2, bounds=[(6, 17), (18, 36)])
X3 = Exponential(b)
X123 = Mix2(f_2, X12, X3, bounds=[(6, 17), (18, 36)])
model.observed(X123)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
b: 0.04,
f_1: 0.3,
f_2: 0.4
})
# Generate some data to fit
np.random.seed(42)
exp_1_data = np.random.exponential(10, 200000)
exp_1_data = exp_1_data[
(6 < exp_1_data) &
((exp_1_data < 8) | (10 < exp_1_data)) &
((exp_1_data < 17) | (18 < exp_1_data)) &
((exp_1_data < 27) | (31 < exp_1_data)) &
(exp_1_data < 36)
]
exp_2_data = np.random.exponential(20, 200000)
exp_2_data = exp_2_data[
(6 < exp_2_data) &
((exp_2_data < 17) | (18 < exp_2_data)) &
(exp_2_data < 36)
]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_1_data, exp_2_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
# Check the fit was successful
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 1e-3
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[b] - 0.05) < 3e-4
assert abs(model.state[f_1] - (len(norm_data)/(len(exp_1_data)+len(norm_data)))) < 5e-3
assert abs(model.state[f_2] - ((len(exp_1_data)+len(norm_data))/len(data))) < 5e-4
# Check if we can access the individual components
xs = np.linspace(0, 41, 1001)
def allowed_point(x, bounds):
@np.vectorize
def allowed_point(x):
for l, u in bounds:
if l < x and x < u:
return 1
return 0
return allowed_point(x)
# Normal
bounds = [(6, 17), (18, 21), (22, 36)]
out1 = st.norm.pdf(xs, model.state[mu], model.state[sigma]) * allowed_point(xs, bounds)
integral = sum(
st.norm.cdf(u, model.state[mu], model.state[sigma]) -
st.norm.cdf(l, model.state[mu], model.state[sigma])
for l, u in bounds
)
out1 *= model.state[f_1] * model.state[f_2] / integral
out2 = model[X1].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 1
bounds = [(6, 8), (10, 17), (18, 27), (31, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[a]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[a]) -
st.expon.cdf(l, 0, 1/model.state[a])
for l, u in bounds
)
out1 *= (1-model.state[f_1]) * model.state[f_2] / integral
out2 = model[X2].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 2
bounds = [(6, 17), (18, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[b]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[b]) -
st.expon.cdf(l, 0, 1/model.state[b])
for l, u in bounds
)
out1 *= (1-model.state[f_2]) / integral
out2 = model[X3].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
def test_mix3_fit():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1, upper=4)
a = Parameter(lower=0.06)
b = Parameter(lower=0)
f_1 = Parameter(lower=0, upper=1)
f_2 = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])
X3 = Exponential(b)
X123 = Mix3(f_1, f_2, X1, X2, X3, bounds=[(6, 17), (18, 36)])
model.observed(X123)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
b: 0.04,
f_1: 0.3,
f_2: 0.4
})
# Generate some data to fit
np.random.seed(42)
exp_1_data = np.random.exponential(10, 200000)
exp_1_data = exp_1_data[
(6 < exp_1_data) &
((exp_1_data < 8) | (10 < exp_1_data)) &
((exp_1_data < 17) | (18 < exp_1_data)) &
((exp_1_data < 27) | (31 < exp_1_data)) &
(exp_1_data < 36)
]
exp_2_data = np.random.exponential(20, 200000)
exp_2_data = exp_2_data[
(6 < exp_2_data) &
((exp_2_data < 17) | (18 < exp_2_data)) &
(exp_2_data < 36)
]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_1_data, exp_2_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
# Check the fit was successful
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 1e-3
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[b] - 0.05) < 3e-4
assert abs(model.state[f_1] - (len(norm_data)/(len(exp_1_data)+len(norm_data)))) < 5e-3
assert abs(model.state[f_2] - ((len(exp_1_data)+len(norm_data))/len(data))) < 5e-4
# Check if we can access the individual components
xs = np.linspace(0, 41, 1001)
def allowed_point(x, bounds):
@np.vectorize
def allowed_point(x):
for l, u in bounds:
if l < x and x < u:
return 1
return 0
return allowed_point(x)
# Normal
bounds = [(6, 17), (18, 21), (22, 36)]
out1 = st.norm.pdf(xs, model.state[mu], model.state[sigma]) * allowed_point(xs, bounds)
integral = sum(
st.norm.cdf(u, model.state[mu], model.state[sigma]) -
st.norm.cdf(l, model.state[mu], model.state[sigma])
for l, u in bounds
)
out1 *= model.state[f_1] * model.state[f_2] / integral
out2 = model[X1].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 1
bounds = [(6, 8), (10, 17), (18, 27), (31, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[a]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[a]) -
st.expon.cdf(l, 0, 1/model.state[a])
for l, u in bounds
)
out1 *= (1-model.state[f_1]) * model.state[f_2] / integral
out2 = model[X2].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 2
bounds = [(6, 17), (18, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[b]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[b]) -
st.expon.cdf(l, 0, 1/model.state[b])
for l, u in bounds
)
out1 *= (1-model.state[f_2]) / integral
out2 = model[X3].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
def test_mixn_fit():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1, upper=4)
a = Parameter(lower=0.06)
b = Parameter(lower=0)
f_1 = Parameter(lower=0, upper=1)
f_2 = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])
X3 = Exponential(b)
X123 = MixN([f_1, f_2], [X1, X2, X3], bounds=[(6, 17), (18, 36)])
model.observed(X123)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
b: 0.04,
f_1: 0.3,
f_2: 0.4
})
# Generate some data to fit
np.random.seed(42)
exp_1_data = np.random.exponential(10, 200000)
exp_1_data = exp_1_data[
(6 < exp_1_data) &
((exp_1_data < 8) | (10 < exp_1_data)) &
((exp_1_data < 17) | (18 < exp_1_data)) &
((exp_1_data < 27) | (31 < exp_1_data)) &
(exp_1_data < 36)
]
exp_2_data = np.random.exponential(20, 200000)
exp_2_data = exp_2_data[
(6 < exp_2_data) &
((exp_2_data < 17) | (18 < exp_2_data)) &
(exp_2_data < 36)
]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_1_data, exp_2_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
# Check the fit was successful
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 1e-3
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[b] - 0.05) < 3e-4
assert abs(model.state[f_1] - (len(norm_data)/(len(exp_1_data)+len(norm_data)))) < 5e-3
assert abs(model.state[f_2] - ((len(exp_1_data)+len(norm_data))/len(data))) < 5e-4
# Check if we can access the individual components
xs = np.linspace(0, 41, 1001)
def allowed_point(x, bounds):
@np.vectorize
def allowed_point(x):
for l, u in bounds:
if l < x and x < u:
return 1
return 0
return allowed_point(x)
# Normal
bounds = [(6, 17), (18, 21), (22, 36)]
out1 = st.norm.pdf(xs, model.state[mu], model.state[sigma]) * allowed_point(xs, bounds)
integral = sum(
st.norm.cdf(u, model.state[mu], model.state[sigma]) -
st.norm.cdf(l, model.state[mu], model.state[sigma])
for l, u in bounds
)
out1 *= model.state[f_1] * model.state[f_2] / integral
out2 = model[X1].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 1
bounds = [(6, 8), (10, 17), (18, 27), (31, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[a]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[a]) -
st.expon.cdf(l, 0, 1/model.state[a])
for l, u in bounds
)
out1 *= (1-model.state[f_1]) * model.state[f_2] / integral
out2 = model[X2].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 2
bounds = [(6, 17), (18, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[b]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[b]) -
st.expon.cdf(l, 0, 1/model.state[b])
for l, u in bounds
)
out1 *= (1-model.state[f_2]) / integral
out2 = model[X3].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
def test_mix2_extended():
np.random.seed(0)
exp_data = np.random.exponential(10, 20000)
exp_data = exp_data[(6 < exp_data) & (exp_data < 36)]
norm1_data = np.random.normal(19, 2, 10000)
norm1_data = norm1_data[(6 < norm1_data) & (norm1_data < 36)]
data = np.concatenate([exp_data, norm1_data])
data = data[((6 < data) & (data < 36))]
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1)
a = Parameter(lower=0)
N1 = Parameter(lower=0)
N2 = Parameter(lower=0)
N = Poisson(N1+N2)
X1 = Normal(mu, sigma)
X2 = Exponential(a)
X12 = Mix2(N1/(N1+N2), X1, X2, bounds=[(6, 36)])
model.observed(X12, N)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
N1: len(data)/5,
N2: len(data)*4/5
})
result = model.fit(data, np.ones_like(data)*len(data), optimizer=MigradOptimizer())
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 3e-2
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[N1] - len(norm1_data)) < np.sqrt(len(norm1_data))
assert abs(model.state[N2] - len(exp_data)) < np.sqrt(len(exp_data))
# Check if the pdf is correct
xs = np.linspace(0, 41, 101)
def allowed_point(x, bounds):
@np.vectorize
def allowed_point(x):
for l, u in bounds:
if l < x and x < u:
return 1
return 0
return allowed_point(x)
out1a = st.norm.pdf(xs, model.state[mu], model.state[sigma]) * allowed_point(xs, [(6, 36)])
integral = st.norm.cdf(36, model.state[mu], model.state[sigma])
integral -= st.norm.cdf(6, model.state[mu], model.state[sigma])
out1a *= model.state[N1] / (model.state[N1]+model.state[N2]) / integral
out1b = st.expon.pdf(xs, 0, 1/model.state[a]) * allowed_point(xs, [(6, 36)])
integral = st.expon.cdf(36, 0, 1/model.state[a]) - st.expon.cdf(6, 0, 1/model.state[a])
out1b *= model.state[N2] / (model.state[N1]+model.state[N2]) / integral
out1 = out1a + out1b
out2 = model.pdf(xs, None)
assert_array_almost_equal(out1, out2, 16)
|
122069
|
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import torch.nn.functional as F
from hdbscan import HDBSCAN
from sklearn.cluster import KMeans, DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
import time
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
# from scipy.special import softmax
from abmt import datasets
from abmt import models
from abmt.trainers import ABMTTrainer
from abmt.evaluators import Evaluator, extract_features
from abmt.utils.data import IterLoader
from abmt.utils.data import transforms as T
from abmt.utils.data.sampler import RandomMultipleGallerySampler
from abmt.utils.data.preprocessor import Preprocessor
from abmt.utils.logging import Logger
from abmt.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from abmt.utils.rerank import compute_jaccard_dist
start_epoch = best_mAP = 0
def get_data(name, data_dir):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root)
return dataset
def get_train_loader(dataset, height, width, batch_size, workers,
num_instances, iters, mutual=False):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
])
# print(dataset)
train_set = dataset.train
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=mutual),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def create_model(args):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=1)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=1)
model_1.cuda()
model_1_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_1_ema = nn.DataParallel(model_1_ema)
if args.no_source:
print('No source pre-training')
else:
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
for param in model_1_ema.parameters():
param.detach_()
return model_1, model_1_ema
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters>0) else None
dataset_target = get_data(args.dataset_target, args.data_dir)
ori_train = dataset_target.train
if not args.no_source:
dataset_source = get_data(args.dataset_source, args.data_dir)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
# Create model
model_1, model_1_ema = create_model(args)
# Evaluator
evaluator_1_ema = Evaluator(model_1_ema)
best_mAP = 0
for nc in range(args.epochs):
cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=dataset_target.train)
dict_f, _ = extract_features(model_1_ema, cluster_loader, print_freq=50)
cf_1 = torch.stack(list(dict_f.values()))
# DBSCAN cluster
if args.no_source:
rerank_dist = compute_jaccard_dist(cf_1, lambda_value=0, source_features=None,
use_gpu=False).numpy()
else:
cluster_loader_source = get_test_loader(dataset_source, args.height, args.width, args.batch_size,
args.workers, testset=dataset_source.train)
dict_f_source, _ = extract_features(model_1_ema, cluster_loader_source, print_freq=50)
cf_1_source = torch.stack(list(dict_f_source.values()))
rerank_dist = compute_jaccard_dist(cf_1, lambda_value=args.lambda_value, source_features=cf_1_source,
use_gpu=False).numpy()
del cf_1_source
tri_mat = np.triu(rerank_dist, 1) # tri_mat.dim=2
tri_mat = tri_mat[np.nonzero(tri_mat)] # tri_mat.dim=1
tri_mat = np.sort(tri_mat, axis=None)
top_num = np.round(args.rho * tri_mat.size).astype(int)
eps = tri_mat[:top_num].mean()
print('eps in cluster: {:.3f}'.format(eps))
print('Clustering and labeling...')
cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)
labels = cluster.fit_predict(rerank_dist)
num_ids = len(set(labels)) -1
print('Epoch {} have {} training ids'.format(nc, num_ids))
# generate new dataset
labeled_ind, unlabeled_ind = [], []
for ind, label in enumerate(labels):
if label == -1:
unlabeled_ind.append(ind)
else:
labeled_ind.append(ind)
# print('Epoch {} have {} labeled samples and {} unlabeled samples'.format(nc + 1, len(labeled_ind), len(unlabeled_ind)))
cf_1 = cf_1.numpy()
centers = []
for id in range(num_ids):
centers.append(np.mean(cf_1[labels == id], axis=0))
centers = np.stack(centers, axis=0)
del cf_1, rerank_dist
model_1.module.classifier = nn.Linear(2048, num_ids, bias=False).cuda()
model_1_ema.module.classifier = nn.Linear(2048, num_ids, bias=False).cuda()
model_1.module.classifier_max = nn.Linear(2048, num_ids, bias=False).cuda()
model_1_ema.module.classifier_max = nn.Linear(2048, num_ids, bias=False).cuda()
model_1.module.classifier.weight.data.copy_(
torch.from_numpy(normalize(centers[:, :2048], axis=1)).float().cuda())
model_1_ema.module.classifier.weight.data.copy_(
torch.from_numpy(normalize(centers[:, :2048], axis=1)).float().cuda())
model_1.module.classifier_max.weight.data.copy_(
torch.from_numpy(normalize(centers[:, 2048:], axis=1)).float().cuda())
model_1_ema.module.classifier_max.weight.data.copy_(
torch.from_numpy(normalize(centers[:, 2048:], axis=1)).float().cuda())
del centers
target_label = labels
for i in range(len(dataset_target.train)):
dataset_target.train[i] = list(dataset_target.train[i])
dataset_target.train[i][1] = int(target_label[i])
dataset_target.train[i] = tuple(dataset_target.train[i])
# Optimizer
params = []
for key, value in model_1.named_parameters():
if not value.requires_grad:
continue
params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
trainer = ABMTTrainer(model_1, model_1_ema, num_cluster=num_ids, alpha=args.alpha)
epoch = nc
# # DBSCAN
dataset_target.train = [ori_train[i] for i in labeled_ind]
print(len(dataset_target.train), 'are labeled.')
labeled_loader_target = get_train_loader(dataset_target, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters, mutual=True)
labeled_loader_target.new_epoch()
trainer.train(epoch, labeled_loader_target, optimizer,
print_freq=args.print_freq, train_iters=len(labeled_loader_target))
def save_model(model_ema, is_best, best_mAP, mid, num_ids):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
'num_ids': num_ids
}, is_best, fpath=osp.join(args.logs_dir, 'model'+str(mid)+'_checkpoint.pth.tar'))
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
print('Evaluating teacher net:')
cmc, mAP_1 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
is_best = (mAP_1>best_mAP)
best_mAP = max(mAP_1, best_mAP)
save_model(model_1_ema, is_best, best_mAP, 1, num_ids)
dataset_target.train = ori_train
print ('Test on the best model.')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model_best = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=checkpoint['num_ids'])
model_best.cuda()
model_best = nn.DataParallel(model_best)
evaluator_best = Evaluator(model_best)
model_best.load_state_dict(checkpoint['state_dict'])
evaluator_best.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="ABMT Training")
# data
parser.add_argument('-dt', '--dataset-target', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-ds', '--dataset-source', type=str, default='dukemtmc-reid',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--height', type=int, default=256,
help="input height")
parser.add_argument('--width', type=int, default=128,
help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--epochs', type=int, default=40)
parser.add_argument('--iters', type=int, default=800)
# training configs
parser.add_argument('--no-source', action='store_true')
parser.add_argument('--init-1', type=str, default='', metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--eval-step', type=int, default=1)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
# cluster
parser.add_argument('--lambda_value', type=float, default=0.1,
help="balancing parameter, default: 0.1")
parser.add_argument('--rho', type=float, default=2e-3,
help="rho percentage, default: 2e-3")
main()
|
122103
|
N, C = map(int, input().split())
l = [tuple(map(int, input().split())) for i in range(N)]
s = set()
for a, b, c in l:
s.add(a)
s.add(b + 1)
d = {}
s = sorted(list(s))
for i, j in enumerate(s):
d[j] = i
m = [0] * (len(s) + 1)
for a, b, c in l:
m[d[a]] += c
m[d[b + 1]] -= c
ans = 0
for i in range(len(s) - 1):
c = m[i] if m[i] < C else C
ans += (s[i + 1] - s[i]) * c
m[i + 1] += m[i]
print(ans)
|
122111
|
import mediapipe as mp
import pandas as pd
import numpy as np
import cv2
mp_pose = mp.solutions.pose
# returns an angle value as a result of the given points
def calculate_angle(a, b, c):
a = np.array(a) # First
b = np.array(b) # Mid
c = np.array(c) # End
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) -\
np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
# check cord sys area
if angle > 180.0:
angle = 360 - angle
return angle
# return body part x,y value
def detection_body_part(landmarks, body_part_name):
return [
landmarks[mp_pose.PoseLandmark[body_part_name].value].x,
landmarks[mp_pose.PoseLandmark[body_part_name].value].y,
landmarks[mp_pose.PoseLandmark[body_part_name].value].visibility
]
# return body_part, x, y as dataframe
def detection_body_parts(landmarks):
body_parts = pd.DataFrame(columns=["body_part", "x", "y"])
for i, lndmrk in enumerate(mp_pose.PoseLandmark):
lndmrk = str(lndmrk).split(".")[1]
cord = detection_body_part(landmarks, lndmrk)
body_parts.loc[i] = lndmrk, cord[0], cord[1]
return body_parts
def score_table(exercise, counter, status):
score_table = cv2.imread("./images/score_table.png")
cv2.putText(score_table, "Activity : " + exercise.replace("-", " "),
(10, 65), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (182, 158, 128), 2,
cv2.LINE_AA)
cv2.putText(score_table, "Counter : " + str(counter), (10, 100),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (182, 158, 128), 2, cv2.LINE_AA)
cv2.putText(score_table, "Status : " + str(status), (10, 135),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (182, 158, 128), 2, cv2.LINE_AA)
cv2.imshow("Score Table", score_table)
|
122137
|
from unittest import TestCase, mock
from unittest.mock import MagicMock
import numpy as np
from source.constants import Constants
from source.preprocessing.epoch import Epoch
from source.preprocessing.heart_rate.heart_rate_collection import HeartRateCollection
from source.preprocessing.heart_rate.heart_rate_feature_service import HeartRateFeatureService
class TestHeartRateFeatureService(TestCase):
@mock.patch('source.preprocessing.heart_rate.heart_rate_feature_service.pd')
def test_load(self, mock_pd):
mock_pd.read_csv.return_value = mock_return = MagicMock()
mock_return.values = expected_return = np.array([1, 2, 3, 4, 5])
actual_returned_value = HeartRateFeatureService.load("subjectA")
self.assertListEqual(expected_return.tolist(), actual_returned_value.tolist())
mock_pd.read_csv.assert_called_once_with(str(HeartRateFeatureService.get_path("subjectA")), delimiter=' ')
def test_get_path(self):
expected_path = Constants.FEATURE_FILE_PATH.joinpath("subjectA" + '_hr_feature.out')
self.assertEqual(expected_path, HeartRateFeatureService.get_path("subjectA"))
@mock.patch('source.preprocessing.heart_rate.heart_rate_feature_service.np')
def test_write(self, mock_np):
feature_to_write = np.array([1, 2, 3, 4])
subject_id = "subjectA"
HeartRateFeatureService.write(subject_id, feature_to_write)
mock_np.savetxt.assert_called_once_with(HeartRateFeatureService.get_path(subject_id), feature_to_write,
fmt='%f')
def test_get_window(self):
timestamps = np.array([-1000, -500, 32, 50, 60, 800, 1000])
epoch = Epoch(timestamp=55, index=120)
expected_indices_in_range = np.array([2, 3, 4])
actual_indices_in_range = HeartRateFeatureService.get_window(timestamps, epoch)
self.assertEqual(expected_indices_in_range.tolist(), actual_indices_in_range.tolist())
@mock.patch.object(HeartRateFeatureService, 'get_feature')
@mock.patch('source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateService')
def test_build_feature_array(self, mock_heart_rate_service, mock_get_feature):
subject_id = "subjectA"
data = np.array(
[[1, 10], [10, 220], [20, 0], [40, 500], [70, 200], [90, 0], [100, 0], [400, 4]])
motion_collection = HeartRateCollection(subject_id=subject_id, data=data)
mock_heart_rate_service.load_cropped.return_value = motion_collection
expected_features = [np.array([0.1]), np.array([0.2])]
mock_get_feature.side_effect = expected_features
expected_feature_array = np.array(expected_features)
valid_epochs = [Epoch(timestamp=4, index=1), Epoch(timestamp=50, index=2)]
returned_feature_array = HeartRateFeatureService.build(subject_id, valid_epochs)
self.assertEqual(expected_feature_array.tolist(), returned_feature_array.tolist())
|
122161
|
import os
port = os.environ.get('PORT', 5000)
bind = f"0.0.0.0:{port}"
# Copied from gunicorn.glogging.CONFIG_DEFAULTS
logconfig_dict = {
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"gunicorn.error": {
"propagate": True,
},
"gunicorn.access": {
"propagate": True,
},
"app.app": {
"propagate": False,
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "generic",
"stream": "ext://sys.stdout"
},
},
"formatters": {
"generic": {
"format": "[%(name)s] [%(process)s] [%(levelname)s] %(message)s",
"class": "logging.Formatter"
}
}
}
|
122163
|
import plotly.plotly as py
from plotly.graph_objs import *
import plotly
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import csv
def singleDatafram(data, column):
new_df = data[['timeline',column]].dropna()
new_df.reset_index(drop=True, inplace=True)
return new_df
|
122168
|
from sanic import Sanic
from aoiklivereload import LiveReloader
import asyncio
import uvloop
import logging
import config
from blueprints import Blueprints
from database import init_db
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
app = Sanic(__name__)
app.blueprint(Blueprints.auth, url_prefix='/api/auth')
loop.create_task(init_db())
# TODO: Put somewhere for better code style
if config.DEBUG:
reloader = LiveReloader()
reloader.start_watcher_thread()
@app.middleware('response')
async def request_log(request, response):
logging.info(
f'{request.method} - {request.url} - {response.status}')
if __name__ == '__main__':
app.go_fast(port=config.PORT, workers=config.WORKERS,
debug=config.DEBUG, loop=loop)
|
122189
|
import requests
import ast
import adal
from utilities.models import ConnectionInfo
from common.methods import set_progress
from infrastructure.models import CustomField
RESOURCE_IDENTIFIER = "userPrincipalName"
def create_custom_fields():
CustomField.objects.get_or_create(
name='first_name', type='STR',
defaults={'label': 'first name', 'description': 'Used by the Office 365 blueprints', 'show_as_attribute': True})
CustomField.objects.get_or_create(
name='last_name', type='STR',
defaults={'label': 'last name', 'description': 'Used by the Office 365 blueprints', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='userPrincipalName', type='STR',
defaults={'label': '<NAME>', 'description': 'Used by the Office 365 blueprints',
'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='user_id', type='STR',
defaults={'label': 'ID', 'description': 'Used by the Office 365 blueprints', 'show_as_attribute': True}
)
def discover_resources(**kwargs):
create_custom_fields()
discovered_users = []
set_progress("Discovering office365 users...")
CI = ConnectionInfo.objects.get(name='Office365')
headers = ast.literal_eval(CI.headers)
authority = 'https://login.microsoftonline.com/'
resource_ = '{}://{}'.format(CI.protocol, CI.ip)
url = f'{CI.protocol}://{CI.ip}:{CI.port}/v1.0/users/'
tenant_id = headers.get('tenant_id')
client_id = headers.get('client_id')
client_secret = headers.get('client_secret')
context = adal.AuthenticationContext(authority + tenant_id)
token = context.acquire_token_with_client_credentials(resource_, client_id, client_secret)
headers = {'Authorization': 'Bearer {0}'.format(token['accessToken']), 'Content-Type': 'application/json'}
response = requests.get(url, headers=headers)
if response.ok:
users = response.json().get('value')
set_progress(f"Discovered {len(users)} office365 users.")
for user in users:
discovered_users.append({
"name": user.get('displayName'),
"first_name": user.get('surname'),
"last_name": user.get('givenName'),
'userPrincipalName': user.get('userPrincipalName'),
'user_id': user.get('id')
})
return discovered_users
set_progress("Error occured while trying to discover users")
return []
|
122196
|
import os.path as osp
import mmcv
import math
from copy import deepcopy
from mmcv.runner import Hook
from mmcv.runner.dist_utils import master_only, get_dist_info
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from mmcv.runner.checkpoint import save_checkpoint, load_checkpoint
class EvalHook(Hook):
"""Evaluation hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, interval=1, **eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError(
'dataloader must be a pytorch DataLoader, but got {}'.format(
type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistEvalHook(EvalHook):
"""Distributed evaluation hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
**eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError(
'dataloader must be a pytorch DataLoader, but got {}'.format(
type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
# EMA test
# if runner.rank == 0:
# results = multi_gpu_test(
# runner.ema.ema,
# self.dataloader,
# tmpdir=osp.join(runner.work_dir, '.eval_hook'),
# gpu_collect=self.gpu_collect)
# if runner.rank == 0:
# print('\n')
# self.evaluate(runner, results)
class ModelEMA(Hook):
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
"""
def __init__(self,
runner,
filename=None,
decay=0.9998,
out_dir=None,
interval=-1,
save_optimizer=True,
max_keep_ckpts=-1,
meta=None,
device='',
**kwargs):
# setting checkpoints
self.interval = interval
self.out_dir = out_dir
self.save_optimizer = save_optimizer
self.max_keep_ckpts = max_keep_ckpts
self.args = kwargs
self.create_symlink = True
self.filename = filename
self.meta = meta
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(runner.model)
if self.filename:
runner.logger.info('load EMA checkpoint for EMA model from %s', filename)
load_checkpoint(self.ema, self.filename, map_location='cpu', strict=False)
self.ema.eval()
# self.updates = 0 # number of EMA updates
self.updates = runner.iter # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
for p in self.ema.parameters():
p.requires_grad_(False)
runner.ema = self
def update(self, model):
self.updates += 1
d = self.decay(self.updates)
with torch.no_grad():
if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel):
msd, esd = model.module.state_dict(), self.ema.module.state_dict()
else:
msd, esd = model.state_dict(), self.ema.state_dict()
for k, v in esd.items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model):
# Assign attributes (which may change during training)
for k in model.__dict__.keys():
if not k.startswith('_'):
setattr(self.ema, k, getattr(model, k))
# @master_only
def after_train_iter(self, runner):
# rank, _ = get_dist_info()
# for test i dont use master_only
self.update(runner.model)
@master_only
def after_train_epoch(self, runner):
self.update_attr(runner.model)
# save ema model
if not self.every_n_epochs(runner, self.interval):
return
if not self.out_dir:
self.out_dir = runner.work_dir
meta = runner.meta
if meta is None:
meta = dict(epoch=runner.epoch + 1, iter=runner.iter)
else:
meta.update(epoch=runner.epoch + 1, iter=runner.iter)
filename = 'epoch_ema_{}.pth'.format(runner.epoch + 1)
filepath = osp.join(self.out_dir, filename)
optimizer = runner.optimizer if self.save_optimizer else None
save_checkpoint(
self.ema,
filepath,
optimizer=optimizer,
meta=meta)
if self.create_symlink:
mmcv.symlink(filename, osp.join(self.out_dir, 'latest_ema.pth'))
# remove other checkpoints
if self.max_keep_ckpts > 0:
filename_tmpl = self.args.get('filename_tmpl', 'epoch_ema_{}.pth')
current_epoch = runner.epoch + 1
for epoch in range(current_epoch - self.max_keep_ckpts, 0, -1):
ckpt_path = os.path.join(self.out_dir,
filename_tmpl.format(epoch))
if os.path.exists(ckpt_path):
os.remove(ckpt_path)
else:
break
|
122207
|
import os
import argparse
import pandas as pd
from azureml.core import Run
import aml_utils
def main(dataset_name, output_train_data, output_test_data):
run = Run.get_context()
ws = aml_utils.retrieve_workspace()
data_raw = aml_utils.get_dataset(ws, dataset_name)
print(f"Loaded dataset with {len(data_raw)} rows:")
print(data_raw.head(2))
print("Preprocessing data...")
data = preprocessing(data_raw)
print("Splitting data into a training and a testing set...")
data_train, data_test = train_test_split(data)
print(f"Saving train dataset in folder {output_train_data}...")
write_output(data_train, output_train_data)
print(f"Saving test dataset in folder {output_test_data}...")
write_output(data_test, output_test_data)
print("Finished.")
def preprocessing(data):
# Do preprocessing here
return data
def train_test_split(data):
# Do train-test split here
train_data, test_data = data.copy(), data.copy()
return train_data, test_data
def write_output(data, output_dir, file_name='dataset.csv'):
os.makedirs(output_dir, exist_ok=True)
file_path = os.path.join(output_dir, file_name)
data.to_csv(file_path)
print('OK')
def parse_args(args_list=None):
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-name', type=str, required=True)
parser.add_argument('--output-train-data', type=str, default='./outputs/train')
parser.add_argument('--output-test-data', type=str, default='./outputs/test')
args_parsed = parser.parse_args(args_list)
return args_parsed
if __name__ == '__main__':
args = parse_args()
main(
dataset_name=args.dataset_name,
output_train_data=args.output_train_data,
output_test_data=args.output_test_data
)
|
122224
|
from questionnaire import Questionnaire
import requests
q = Questionnaire(show_answers=False, can_go_back=False)
q.raw('user', prompt='Username:')
q.raw('pass', prompt='Password:', secret=True)
q.run()
r = requests.get('https://api.github.com/user/repos', auth=(q.answers.get('user'), q.answers.get('pass')))
if not(r.ok):
import sys
print('username/password incorrect')
sys.exit()
repos = [repo.get('url') for repo in r.json()]
q.one('repo', *repos, prompt='Choose a repo')
q.run()
print(q.answers.get('repo'))
|
122251
|
from functools import reduce
class Solution:
def superPow(self, a: 'int', b: 'List[int]') -> 'int':
p = reduce(lambda x, y: (10*x + y)%1140, b)
return pow(a, p, 1337)
|
122262
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import subprocess
import shlex
import pipes
import pexpect
import random
import select
import fcntl
import pwd
import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
''' ssh based connections with expect '''
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.host = self._play_context.remote_addr
self.connection_retry_interval = 60
@property
def transport(self):
''' used to identify this connection object from other classes '''
return 'onie'
# The connection is created by running expect from the exec_command, so we don't
# need to do any connection management here.
def _connect(self):
self._connect = True
return self
def _build_command(self):
self._ssh_command = ['ssh', '-tt', '-q']
ansible_ssh_args = C.ANSIBLE_SSH_ARGS
if ansible_ssh_args:
self._ssh_command += shlex.split(ansible_ssh_args)
else:
self._ssh_command += ['-o', 'ControlMaster=auto',
'-o', 'ControlPersist=60s',
'-o', 'ControlPath=/tmp/ansible-ssh-%h-%p-%r']
if not C.HOST_KEY_CHECKING:
self._ssh_command += ['-o', 'StrictHostKeyChecking=no']
self._ssh_command += ['-o', 'UserKnownHostsFile=/dev/null']
self._ssh_command += ['-o', 'GSSAPIAuthentication=no',
'-o', 'PubkeyAuthentication=no']
self._ssh_command += ['-o', 'ConnectTimeout=30']
def _spawn_connect(self):
client = None
cmd = self._ssh_command + ['-l', "root", self.host]
client = pexpect.spawn(' '.join(cmd), env={'TERM': 'dumb'})
client.expect(['#'])
self.before_backup = client.before.split()
return client
def exec_command(self, *args, **kwargs):
self.template = kwargs['template']
if kwargs['host'] is not None:
self.host = kwargs['host']
self.url = kwargs['url']
self.install = kwargs['install']
self.nretry = kwargs['retry']
self._build_command()
client = self._spawn_connect()
# Set command timeout after connection is spawned
if kwargs['timeout']:
client.timeout = int(kwargs['timeout'])
prompts = ["ONIE:.+ #", pexpect.EOF]
stdout = ""
if self.template:
cmds = self.template.split('\n')
else:
cmds = []
for cmd in cmds:
self._display.vvv('> %s' % (cmd), host=self.host)
client.sendline(cmd)
client.expect(prompts)
stdout += client.before
self._display.vvv('< %s' % (client.before), host=self.host)
if self.install:
client.sendline('onie-discovery-stop')
client.expect(prompts)
stdout += client.before
attempt = 0
while attempt < self.nretry:
client.sendline("onie-nos-install %s" % self.url)
i = client.expect(["Installed SONiC base image SONiC-OS successfully"] + prompts)
stdout += client.before
if i == 0:
break
elif i == 1:
attempt += 1
self._display.vvv("Installation fails, retry %d..." % attempt, host=self.host)
else:
raise AnsibleError("Failed to install sonic image. %s" % stdout)
self._display.vvv("SONiC installed.", host=self.host)
# for some platform, e.g., DELL S6000, it will do hard reboot,
# which will not give EOF
client.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=15)
stdout += client.before
self._display.vvv("ONIE Rebooted. %s" % stdout, host=self.host)
return stdout
def put_file(self, in_path, out_path):
pass
def fetch_file(self, in_path, out_path):
pass
def close(self):
self._connected = False
|
122271
|
import collections
import os
import random
from pathlib import Path
import logging
import shutil
from packaging import version
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.backends.cudnn as cudnn
from param import parse_args
# from pretrain_data import get_loader
from pretrain_vcr_data import get_loader
# from pretrain_data_dist import get_loader
from utils import load_state_dict, LossMeter, count_parameters, set_global_logging_level
from dist_utils import reduce_dict
set_global_logging_level(logging.ERROR, ["transformers"])
import wandb
_use_native_amp = False
_use_apex = False
# Check if Pytorch version >= 1.6 to switch between Native AMP and Apex
if version.parse(torch.__version__) < version.parse("1.6"):
from transormers.file_utils import is_apex_available
if is_apex_available():
from apex import amp
_use_apex = True
else:
_use_native_amp = True
from torch.cuda.amp import autocast
from trainer_base import TrainerBase
class Trainer(TrainerBase):
def __init__(self, args, train_loader=None, val_loader=None, test_loader=None, train=True):
super().__init__(
args,
train_loader=train_loader,
val_loader=val_loader,
test_loader=test_loader,
train=train)
if not self.verbose:
set_global_logging_level(logging.ERROR, ["transformers"])
from pretrain_model import VLT5Pretraining, VLBartPretraining
model_kwargs = {}
if 't5' in args.backbone:
model_class = VLT5Pretraining
elif 'bart' in args.backbone:
model_class = VLBartPretraining
config = self.create_config()
self.tokenizer = self.create_tokenizer()
if 'bart' in self.args.tokenizer:
num_added_toks = 0
if config.use_vis_order_embedding:
additional_special_tokens = [f'<extra_id_{i}>' for i in range(100-1, -1, -1)] + \
[f'<vis_extra_id_{i}>' for i in range(100-1, -1, -1)]
special_tokens_dict = {
'additional_special_tokens': additional_special_tokens}
num_added_toks = self.tokenizer.add_special_tokens(
special_tokens_dict)
config.default_obj_order_ids = self.tokenizer.convert_tokens_to_ids(
[f'<vis_extra_id_{i}>' for i in range(100)])
self.model = self.create_model(model_class, config, **model_kwargs)
if 't5' in self.args.tokenizer:
self.model.resize_token_embeddings(self.tokenizer.vocab_size)
elif 'bart' in self.args.tokenizer:
self.model.resize_token_embeddings(
self.model.model.shared.num_embeddings + num_added_toks)
self.model.tokenizer = self.tokenizer
# Load Checkpoint
self.start_epoch = None
if args.load is not None:
ckpt_path = args.load + '.pth'
self.load_checkpoint(ckpt_path)
if self.args.from_scratch:
self.init_weights()
# GPU Options
print(f'Model Launching at GPU {self.args.gpu}')
if self.verbose:
from time import time
start = time()
self.model = self.model.to(args.gpu)
# Optimizer
if train:
self.optim, self.lr_scheduler = self.create_optimizer_and_scheduler()
if self.args.fp16 and _use_native_amp:
self.scaler = torch.cuda.amp.GradScaler()
elif _use_apex:
self.model, self.optim = amp.initialize(
self.model, self.optim, opt_level='O1', verbosity=self.verbose)
if args.multiGPU:
if args.distributed:
self.model = DDP(self.model, device_ids=[args.gpu],
find_unused_parameters=True
)
if self.verbose:
print(f'It took {time() - start:.1f}s')
def train(self):
LOSSES_NAME = self.args.LOSSES_NAME
if self.args.dry:
results = self.evaluate_epoch(epoch=0)
if self.verbose:
loss_meters = [LossMeter() for _ in range(len(LOSSES_NAME))]
best_eval_loss = 9595.
if 't5' in self.args.backbone:
project_name = "VLT5_VCR_Pretrain"
elif 'bart' in self.args.backbone:
project_name = "VLBart_VCR_Pretrain"
wandb.init(project=project_name)
wandb.run.name = self.args.run_name
wandb.config.update(self.args)
wandb.watch(self.model)
src_dir = Path(__file__).resolve().parent
base_path = str(src_dir.parent)
src_dir = str(src_dir)
wandb.save(os.path.join(src_dir + "/*.py"), base_path=base_path)
if self.args.distributed:
dist.barrier()
# n_update = 0
global_step = 0
for epoch in range(self.args.epochs):
if self.start_epoch is not None:
epoch += self.start_epoch
if self.args.distributed:
self.train_loader.sampler.set_epoch(epoch)
# Train
self.model.train()
if self.verbose:
pbar = tqdm(total=len(self.train_loader), ncols=240)
epoch_results = {}
for loss_name in LOSSES_NAME:
epoch_results[loss_name] = 0.
epoch_results[f'{loss_name}_count'] = 0
for step_i, batch in enumerate(self.train_loader):
if self.args.fp16 and _use_native_amp:
with autocast():
if self.args.distributed:
results = self.model.module.train_step(batch)
else:
results = self.model.train_step(batch)
else:
if self.args.distributed:
results = self.model.module.train_step(batch)
else:
results = self.model.train_step(batch)
loss = results['loss']
if self.args.fp16 and _use_native_amp:
self.scaler.scale(loss).backward()
elif self.args.fp16 and _use_apex:
with amp.scale_loss(loss, self.optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
loss = loss.detach()
# Update Parameters
if self.args.clip_grad_norm > 0:
if self.args.fp16 and _use_native_amp:
self.scaler.unscale_(self.optim)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_grad_norm)
elif self.args.fp16 and _use_apex:
torch.nn.utils.clip_grad_norm_(amp.master_params(self.optim), self.args.clip_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_grad_norm)
if self.args.fp16 and _use_native_amp:
self.scaler.step(self.optim)
self.scaler.update()
else:
self.optim.step()
if self.lr_scheduler:
self.lr_scheduler.step()
# self.model.zero_grad()
for param in self.model.parameters():
param.grad = None
global_step += 1
if self.lr_scheduler:
if version.parse(torch.__version__) >= version.parse("1.4"):
lr = self.lr_scheduler.get_last_lr()[0]
else:
lr = self.lr_scheduler.get_lr()[0]
else:
try:
lr = self.optim.get_lr()[0]
except AttributeError:
lr = self.args.lr
for k, v in results.items():
if k in epoch_results:
if isinstance(v, int):
epoch_results[k] += v
elif isinstance(v, torch.Tensor):
epoch_results[k] += v.item()
if self.verbose:
desc_str = f'Epoch {epoch} | LR {lr:.6f} |'
for i, (loss_name, loss_meter) in enumerate(zip(LOSSES_NAME, loss_meters)):
if loss_name in results:
loss_meter.update(results[f'{loss_name}'] / results[f'{loss_name}_count'])
if len(loss_meter) > 0:
loss_count = epoch_results[f'{loss_name}_count']
desc_str += f' {loss_name} ({loss_count}) {loss_meter.val:.3f}'
pbar.set_description(desc_str)
pbar.update(1)
if self.verbose:
pbar.close()
dist.barrier()
results = reduce_dict(epoch_results, self.args.gpu)
if self.verbose:
train_loss = results['total_loss']
train_loss_count = results['total_loss_count']
avg_train_loss = train_loss / train_loss_count
losses_str = f"Train Loss: {avg_train_loss:.3f}\n"
for name, loss in results.items():
if name[-4:] == 'loss':
loss_count = int(results[name+'_count'])
if loss_count > 0:
avg_loss = loss/loss_count
losses_str += f"{name} ({loss_count}): {avg_loss:.3f} "
wandb.log({f'Train Loss/{name}': avg_loss}, step=epoch)
losses_str += '\n'
print(losses_str)
dist.barrier()
# Validation
valid_results, valid_uid2ans = self.evaluate_epoch(epoch=epoch)
valid_results = reduce_dict(valid_results, self.args.gpu)
if self.verbose:
valid_loss = valid_results['total_loss']
valid_loss_count = valid_results['total_loss_count']
avg_valid_loss = valid_loss / valid_loss_count
losses_str = f"Valid Loss: {avg_valid_loss:.3f}\n"
for name, loss in valid_results.items():
if name[-4:] == 'loss':
loss_count = int(valid_results[name+'_count'])
if loss_count > 0:
avg_loss = loss / loss_count
losses_str += f"{name} ({loss_count}): {avg_loss:.3f} "
wandb.log({f'Valid Loss/{name}': avg_loss}, step=epoch)
losses_str += '\n'
print(losses_str)
dist.barrier()
if self.verbose:
# Save
if avg_valid_loss < best_eval_loss:
best_eval_loss = avg_valid_loss
# self.save("BEST_EVAL_LOSS")
self.save("Epoch%02d" % (epoch + 1))
dist.barrier()
if self.verbose:
wandb.log({'finished': True})
def evaluate_epoch(self, epoch):
LOSSES_NAME = self.args.LOSSES_NAME
epoch_results = {}
for loss_name in LOSSES_NAME:
epoch_results[loss_name] = 0.
epoch_results[f'{loss_name}_count'] = 0
uid2ans = {}
self.model.eval()
with torch.no_grad():
if self.verbose:
loss_meter = LossMeter()
loss_meters = [LossMeter() for _ in range(len(LOSSES_NAME))]
pbar = tqdm(total=len(self.val_loader), ncols=240)
for step_i, batch in enumerate(self.val_loader):
if self.args.distributed:
results = self.model.module.valid_step(batch)
else:
results = self.model.valid_step(batch)
for k, v in results.items():
if k in epoch_results:
if isinstance(v, int):
epoch_results[k] += v
elif isinstance(v, torch.Tensor):
epoch_results[k] += v.item()
if self.verbose:
desc_str = f'Valid Epoch {epoch} |'
for i, (loss_name, loss_meter) in enumerate(zip(LOSSES_NAME, loss_meters)):
if loss_name in results:
loss_meter.update(results[f'{loss_name}'] / results[f'{loss_name}_count'])
if len(loss_meter) > 0:
loss_count = epoch_results[f'{loss_name}_count']
desc_str += f' {loss_name} ({loss_count}) {loss_meter.val:.3f}'
pbar.set_description(desc_str)
pbar.update(1)
dist.barrier()
if self.verbose:
pbar.close()
dist.barrier()
if 'qa' not in self.args.losses:
uid2ans = None
return epoch_results, uid2ans
def main_worker(gpu, args):
# GPU is assigned
args.gpu = gpu
args.rank = gpu
print(f'Process Launching at GPU {gpu}')
if args.distributed:
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend='nccl')
# print(f'Building data loader at GPU {gpu}')
print(f'Building train loader at GPU {gpu}')
train_loader = get_loader(
args,
# 'mscoco_minival', mode='train', batch_size=args.batch_size,
split=args.train, mode='train', batch_size=args.batch_size,
distributed=args.distributed, gpu=args.gpu,
workers=args.num_workers,
topk=args.train_topk,)
print(f'Building val loader at GPU {gpu}')
val_loader = get_loader(
args,
split=args.valid, mode='val', batch_size=args.batch_size,
distributed=args.distributed, gpu=args.gpu,
workers=args.num_workers,
topk=args.valid_topk,)
test_loader = None
trainer = Trainer(args, train_loader, val_loader, test_loader, train=True)
trainer.train()
if __name__ == "__main__":
cudnn.benchmark = True
args = parse_args()
ngpus_per_node = torch.cuda.device_count()
args.world_size = ngpus_per_node
LOSSES_NAME = [f'{name}_loss' for name in args.losses.split(',')]
if args.local_rank in [0, -1]:
print(LOSSES_NAME)
LOSSES_NAME.append('total_loss') # total loss
args.LOSSES_NAME = LOSSES_NAME
if args.local_rank in [0, -1]:
comments = []
if args.backbone:
comments.append(args.backbone)
comments.append(''.join(args.losses.split(',')))
if args.comment != '':
comments.append(args.comment)
comment = '_'.join(comments)
from datetime import datetime
current_time = datetime.now().strftime('%b%d_%H-%M')
run_name = f'{current_time}_GPU{args.world_size}'
if len(comments) > 0:
run_name += f'_{comment}'
args.run_name = run_name
if args.distributed:
main_worker(args.local_rank, args)
|
122342
|
import pickle
from collections import Counter
from itertools import chain
import numpy as np
from data.dataimport import import_data
from data.featuredict import FeatureDictionary
from encoders.baseencoder import AbstractEncoder
class TfidfEncoder(AbstractEncoder):
def decoder_loss(self, data: tuple, representation: np.array) -> float:
raise Exception("TfidfEncoder has no decoder loss")
def get_representation_vector_size(self) -> int:
return len(self.__feature_dict)
def get_encoding(self, data: tuple) -> np.array:
document_tokens = Counter(self.__feature_dict.get_id_or_unk(t) for t in data[0])
vect = np.zeros(len(self.__feature_dict), dtype=np.float)
for word_id, count in document_tokens.items():
vect[word_id] = count * self.__idfs[word_id]
vect /= len(data[0])
return vect
def __init__(self, train_file):
data = import_data(train_file)
def document_tokens():
for snippet in data.values():
yield snippet['original'][0]
all_document_tokens = [s for s in document_tokens()]
self.__feature_dict = FeatureDictionary.get_feature_dictionary_for(chain(*all_document_tokens),
count_threshold=10)
self.__idfs = np.ones(len(self.__feature_dict), dtype=np.int) # use 1s for smoothing
for document in all_document_tokens:
document_word_ids = set(self.__feature_dict.get_id_or_unk(t) for t in document)
for word_id in document_word_ids:
self.__idfs[word_id] += 1
self.__idfs = np.log(self.__idfs.astype(np.float))
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
|
122396
|
from django.utils import timezone
from datetime import timedelta
from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..permissions import IsAuthenticated
from ..models import Duo
from ..app_settings import NewDuoSerializer, ActivateDuoSerializer, DeleteDuoSerializer
from ..utils import encrypt_with_db_secret
from ..authentication import TokenAuthentication
class UserDuo(GenericAPIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
serializer_class = NewDuoSerializer
allowed_methods = ('GET', 'PUT', 'DELETE', 'OPTIONS', 'HEAD')
def get(self, request, *args, **kwargs):
"""
Checks the REST Token and returns a list of a all duo
:param request:
:type request:
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: 200
:rtype:
"""
duos = []
for duo in Duo.objects.filter(user=request.user).all():
duos.append({
'id': duo.id,
'active': duo.active,
'title': duo.title,
})
return Response({
"duos": duos
},
status=status.HTTP_200_OK)
def put(self, request, *args, **kwargs):
"""
Checks the REST Token and sets a new duo for multifactor authentication
:param request:
:type request:
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: 201 / 400
:rtype:
"""
serializer = NewDuoSerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
title = serializer.validated_data.get('title')
use_system_wide_duo = serializer.validated_data.get('use_system_wide_duo')
duo_integration_key = serializer.validated_data.get('integration_key')
duo_secret_key = serializer.validated_data.get('secret_key')
duo_host = serializer.validated_data.get('host')
enrollment_user_id = serializer.validated_data.get('enrollment_user_id')
enrollment_activation_code = serializer.validated_data.get('enrollment_activation_code')
validity_in_seconds = serializer.validated_data.get('validity_in_seconds')
if use_system_wide_duo:
new_duo = Duo.objects.create(
user = request.user,
title = 'System wide',
duo_integration_key = '',
duo_secret_key = encrypt_with_db_secret(''),
duo_host = '',
enrollment_user_id = enrollment_user_id,
enrollment_activation_code = enrollment_activation_code,
enrollment_expiration_date = timezone.now() + timedelta(seconds=validity_in_seconds),
active=False
)
else:
new_duo = Duo.objects.create(
user = request.user,
title = title,
duo_integration_key = duo_integration_key,
duo_secret_key = encrypt_with_db_secret(duo_secret_key),
duo_host = duo_host,
enrollment_user_id = enrollment_user_id,
enrollment_activation_code = enrollment_activation_code,
enrollment_expiration_date = timezone.now() + timedelta(seconds=validity_in_seconds),
active=False
)
return Response({
"id": new_duo.id,
"activation_code": new_duo.enrollment_activation_code,
},
status=status.HTTP_201_CREATED)
def post(self, request, *args, **kwargs):
"""
Validates a duo and activates it
:param request:
:type request:
:param args:
:type args:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
serializer = ActivateDuoSerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
duo = serializer.validated_data.get('duo')
# delete it
duo.active = True
duo.save()
request.user.duo_enabled = True
request.user.save()
return Response(status=status.HTTP_200_OK)
def delete(self, request, *args, **kwargs):
"""
Deletes a duo
:param request:
:param args:
:param kwargs:
:return: 200 / 400
"""
serializer = DeleteDuoSerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
duo = serializer.validated_data.get('duo')
duo_count = serializer.validated_data.get('duo_count')
# Update the user attribute if we only had 1 duo
if duo_count < 2 and duo.active:
request.user.duo_enabled = False
request.user.save()
# delete it
duo.delete()
return Response(status=status.HTTP_200_OK)
|
122408
|
from tenable_io.api.base import BaseApi, BaseRequest
from tenable_io.api.models import BulkOpTask
class BulkOperationsApi(BaseApi):
def bulk_add_agent(self, group_id, bulk_add_agent, scanner_id=1):
"""Creates a bulk operation task to add agents to a group.
:param group_id: The agent group ID.
:param bulk_add_agent: An instance of :class:`BulkAddAgentRequest`.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.post('scanners/%(scanner_id)s/agent-groups/%(group_id)s/agents/_bulk/add',
bulk_add_agent,
path_params={
'scanner_id': scanner_id,
'group_id': group_id
})
return BulkOpTask.from_json(response.text)
def bulk_remove_agent(self, group_id, bulk_remove_agent, scanner_id=1):
"""Create a bulk operation task to remove agents from a group.
:param group_id: The agent group ID.
:param bulk_remove_agent: An instance of :class:`BulkRemoveAgentRequest`.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.post('scanners/%(scanner_id)s/agent-groups/%(group_id)s/agents/_bulk/remove',
bulk_remove_agent,
path_params={
'scanner_id': scanner_id,
'group_id': group_id
})
return BulkOpTask.from_json(response.text)
def bulk_unlink_agent(self, bulk_unlink_agent, scanner_id=1):
"""Creates a bulk operation task to unlink (delete) agents.
:param bulk_unlink_agent: An instance of :class:`BulkUnlinkAgentRequest`.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.post('scanners/%(scanner_id)s/agents/_bulk/unlink',
bulk_unlink_agent,
path_params={
'scanner_id': scanner_id,
})
return BulkOpTask.from_json(response.text)
def bulk_agent_group_status(self, group_id, task_uuid, scanner_id=1):
"""Check the status of a bulk operation on an agent group.
:param group_id: The agent group ID.
:param task_uuid: The uuid of the task.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.get('scanners/%(scanner_id)s/agent-groups/%(group_id)s/agents/_bulk/%(task_uuid)s',
path_params={
'scanner_id': scanner_id,
'group_id': group_id,
'task_uuid': task_uuid
})
return BulkOpTask.from_json(response.text)
def bulk_agent_status(self, task_uuid, scanner_id=1):
"""Check the status of a bulk operation on an agent.
:param task_uuid: The uuid of the task.
:param scanner_id: The scanner ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.BulkOpTask`.
"""
response = self._client.get('scanners/%(scanner_id)s/agents/_bulk/%(task_uuid)s',
path_params={
'scanner_id': scanner_id,
'task_uuid': task_uuid
})
return BulkOpTask.from_json(response.text)
class BulkOpAddAgentRequest(BaseRequest):
def __init__(
self,
items=None
):
"""Request for BulkOperationsApi.bulk_add_agent.
:param items: list of agent ids or uuids to add to the group.
:type items: list[int].
"""
self.items = items
class BulkOpRemoveAgentRequest(BaseRequest):
def __init__(
self,
items=None
):
"""Request for BulkOperationsApi.bulk_remove_agent.
:param items: list of agent ids or uuids to add to the group.
:type items: list[int].
"""
self.items = items
class BulkOpUnlinkAgentRequest(BaseRequest):
def __init__(
self,
items=None
):
"""Request for BulkOperationsApi.bulk_unlink_agent.
:param items: list of agent ids or uuids to add to the group.
:type items: list[int].
"""
self.items = items
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.