code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import io
import torch
from torchvision import ops
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from collections import OrderedDict
# onnxruntime requires python 3.5 or above
try:
import onnxruntime
except ImportError:
onnxruntime = None
import unittest
@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable')
class ONNXExporterTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
torch.manual_seed(123)
def run_model(self, model, inputs_list):
model.eval()
onnx_io = io.BytesIO()
# export to onnx with the first input
torch.onnx.export(model, inputs_list[0], onnx_io, do_constant_folding=True, opset_version=10)
# validate the exported model with onnx runtime
for test_inputs in inputs_list:
with torch.no_grad():
if isinstance(test_inputs, torch.Tensor) or \
isinstance(test_inputs, list):
test_inputs = (test_inputs,)
test_ouputs = model(*test_inputs)
if isinstance(test_ouputs, torch.Tensor):
test_ouputs = (test_ouputs,)
self.ort_validate(onnx_io, test_inputs, test_ouputs)
def ort_validate(self, onnx_io, inputs, outputs):
inputs, _ = torch.jit._flatten(inputs)
outputs, _ = torch.jit._flatten(outputs)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
inputs = list(map(to_numpy, inputs))
outputs = list(map(to_numpy, outputs))
ort_session = onnxruntime.InferenceSession(onnx_io.getvalue())
# compute onnxruntime output prediction
ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs))
ort_outs = ort_session.run(None, ort_inputs)
for i in range(0, len(outputs)):
torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05)
def test_nms(self):
boxes = torch.rand(5, 4)
boxes[:, 2:] += torch.rand(5, 2)
scores = torch.randn(5)
class Module(torch.nn.Module):
def forward(self, boxes, scores):
return ops.nms(boxes, scores, 0.5)
self.run_model(Module(), [(boxes, scores)])
def test_roi_align(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)
model = ops.RoIAlign((5, 5), 1, 2)
self.run_model(model, [(x, single_roi)])
def test_roi_pool(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
rois = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)
pool_h = 5
pool_w = 5
model = ops.RoIPool((pool_h, pool_w), 2)
self.run_model(model, [(x, rois)])
@unittest.skip("Disable test until Resize opset 11 is implemented in ONNX Runtime")
def test_transform_images(self):
class TransformModule(torch.nn.Module):
def __init__(self_module):
super(TransformModule, self_module).__init__()
min_size = 800
max_size = 1333
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
self_module.transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
def forward(self_module, images):
return self_module.transform(images)[0].tensors
input = [torch.rand(3, 800, 1280), torch.rand(3, 800, 800)]
input_test = [torch.rand(3, 800, 1280), torch.rand(3, 800, 800)]
self.run_model(TransformModule(), [input, input_test])
def test_multi_scale_roi_align(self):
class TransformModule(torch.nn.Module):
def __init__(self):
super(TransformModule, self).__init__()
self.model = ops.MultiScaleRoIAlign(['feat1', 'feat2'], 3, 2)
self.image_sizes = [(512, 512)]
def forward(self, input, boxes):
return self.model(input, boxes, self.image_sizes)
i = OrderedDict()
i['feat1'] = torch.rand(1, 5, 64, 64)
i['feat2'] = torch.rand(1, 5, 16, 16)
boxes = torch.rand(6, 4) * 256
boxes[:, 2:] += boxes[:, :2]
i1 = OrderedDict()
i1['feat1'] = torch.rand(1, 5, 64, 64)
i1['feat2'] = torch.rand(1, 5, 16, 16)
boxes1 = torch.rand(6, 4) * 256
boxes1[:, 2:] += boxes1[:, :2]
self.run_model(TransformModule(), [(i, [boxes],), (i1, [boxes1],)])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"unittest.skipIf",
"io.BytesIO",
"torch.jit._flatten",
"torchvision.ops.RoIAlign",
"torchvision.ops.RoIPool",
"torch.testing.assert_allclose",
"torch.manual_seed",
"torchvision.ops.MultiScaleRoIAlign",
"torch.randn",
"torchvision.models.detection.transform.GeneralizedRCNNTransform",
"unittest.skip",
"torch.rand",
"collections.OrderedDict",
"torchvision.ops.nms",
"torch.no_grad",
"torch.onnx.export",
"torch.tensor"
] |
[((299, 363), 'unittest.skipIf', 'unittest.skipIf', (['(onnxruntime is None)', '"""ONNX Runtime unavailable"""'], {}), "(onnxruntime is None, 'ONNX Runtime unavailable')\n", (314, 363), False, 'import unittest\n'), ((2946, 3033), 'unittest.skip', 'unittest.skip', (['"""Disable test until Resize opset 11 is implemented in ONNX Runtime"""'], {}), "(\n 'Disable test until Resize opset 11 is implemented in ONNX Runtime')\n", (2959, 3033), False, 'import unittest\n'), ((4729, 4744), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4742, 4744), False, 'import unittest\n'), ((459, 481), 'torch.manual_seed', 'torch.manual_seed', (['(123)'], {}), '(123)\n', (476, 481), False, 'import torch\n'), ((568, 580), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (578, 580), False, 'import io\n'), ((635, 732), 'torch.onnx.export', 'torch.onnx.export', (['model', 'inputs_list[0]', 'onnx_io'], {'do_constant_folding': '(True)', 'opset_version': '(10)'}), '(model, inputs_list[0], onnx_io, do_constant_folding=True,\n opset_version=10)\n', (652, 732), False, 'import torch\n'), ((1319, 1345), 'torch.jit._flatten', 'torch.jit._flatten', (['inputs'], {}), '(inputs)\n', (1337, 1345), False, 'import torch\n'), ((1367, 1394), 'torch.jit._flatten', 'torch.jit._flatten', (['outputs'], {}), '(outputs)\n', (1385, 1394), False, 'import torch\n'), ((2119, 2135), 'torch.rand', 'torch.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (2129, 2135), False, 'import torch\n'), ((2160, 2176), 'torch.rand', 'torch.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (2170, 2176), False, 'import torch\n'), ((2194, 2208), 'torch.randn', 'torch.randn', (['(5)'], {}), '(5)\n', (2205, 2208), False, 'import torch\n'), ((2442, 2487), 'torch.rand', 'torch.rand', (['(1)', '(1)', '(10)', '(10)'], {'dtype': 'torch.float32'}), '(1, 1, 10, 10, dtype=torch.float32)\n', (2452, 2487), False, 'import torch\n'), ((2509, 2561), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 4, 4]]'], {'dtype': 'torch.float32'}), '([[0, 0, 0, 4, 4]], dtype=torch.float32)\n', (2521, 2561), False, 'import torch\n'), ((2578, 2604), 'torchvision.ops.RoIAlign', 'ops.RoIAlign', (['(5, 5)', '(1)', '(2)'], {}), '((5, 5), 1, 2)\n', (2590, 2604), False, 'from torchvision import ops\n'), ((2696, 2741), 'torch.rand', 'torch.rand', (['(1)', '(1)', '(10)', '(10)'], {'dtype': 'torch.float32'}), '(1, 1, 10, 10, dtype=torch.float32)\n', (2706, 2741), False, 'import torch\n'), ((2757, 2809), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 4, 4]]'], {'dtype': 'torch.float32'}), '([[0, 0, 0, 4, 4]], dtype=torch.float32)\n', (2769, 2809), False, 'import torch\n'), ((2864, 2896), 'torchvision.ops.RoIPool', 'ops.RoIPool', (['(pool_h, pool_w)', '(2)'], {}), '((pool_h, pool_w), 2)\n', (2875, 2896), False, 'from torchvision import ops\n'), ((4236, 4249), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4247, 4249), False, 'from collections import OrderedDict\n'), ((4271, 4295), 'torch.rand', 'torch.rand', (['(1)', '(5)', '(64)', '(64)'], {}), '(1, 5, 64, 64)\n', (4281, 4295), False, 'import torch\n'), ((4317, 4341), 'torch.rand', 'torch.rand', (['(1)', '(5)', '(16)', '(16)'], {}), '(1, 5, 16, 16)\n', (4327, 4341), False, 'import torch\n'), ((4432, 4445), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4443, 4445), False, 'from collections import OrderedDict\n'), ((4468, 4492), 'torch.rand', 'torch.rand', (['(1)', '(5)', '(64)', '(64)'], {}), '(1, 5, 64, 64)\n', (4478, 4492), False, 'import torch\n'), ((4515, 4539), 'torch.rand', 'torch.rand', (['(1)', '(5)', '(16)', '(16)'], {}), '(1, 5, 16, 16)\n', (4525, 4539), False, 'import torch\n'), ((1999, 2077), 'torch.testing.assert_allclose', 'torch.testing.assert_allclose', (['outputs[i]', 'ort_outs[i]'], {'rtol': '(0.001)', 'atol': '(1e-05)'}), '(outputs[i], ort_outs[i], rtol=0.001, atol=1e-05)\n', (2028, 2077), False, 'import torch\n'), ((3618, 3642), 'torch.rand', 'torch.rand', (['(3)', '(800)', '(1280)'], {}), '(3, 800, 1280)\n', (3628, 3642), False, 'import torch\n'), ((3644, 3667), 'torch.rand', 'torch.rand', (['(3)', '(800)', '(800)'], {}), '(3, 800, 800)\n', (3654, 3667), False, 'import torch\n'), ((3691, 3715), 'torch.rand', 'torch.rand', (['(3)', '(800)', '(1280)'], {}), '(3, 800, 1280)\n', (3701, 3715), False, 'import torch\n'), ((3717, 3740), 'torch.rand', 'torch.rand', (['(3)', '(800)', '(800)'], {}), '(3, 800, 800)\n', (3727, 3740), False, 'import torch\n'), ((4358, 4374), 'torch.rand', 'torch.rand', (['(6)', '(4)'], {}), '(6, 4)\n', (4368, 4374), False, 'import torch\n'), ((4557, 4573), 'torch.rand', 'torch.rand', (['(6)', '(4)'], {}), '(6, 4)\n', (4567, 4573), False, 'import torch\n'), ((843, 858), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (856, 858), False, 'import torch\n'), ((2318, 2345), 'torchvision.ops.nms', 'ops.nms', (['boxes', 'scores', '(0.5)'], {}), '(boxes, scores, 0.5)\n', (2325, 2345), False, 'from torchvision import ops\n'), ((3421, 3488), 'torchvision.models.detection.transform.GeneralizedRCNNTransform', 'GeneralizedRCNNTransform', (['min_size', 'max_size', 'image_mean', 'image_std'], {}), '(min_size, max_size, image_mean, image_std)\n', (3445, 3488), False, 'from torchvision.models.detection.transform import GeneralizedRCNNTransform\n'), ((4014, 4062), 'torchvision.ops.MultiScaleRoIAlign', 'ops.MultiScaleRoIAlign', (["['feat1', 'feat2']", '(3)', '(2)'], {}), "(['feat1', 'feat2'], 3, 2)\n", (4036, 4062), False, 'from torchvision import ops\n')]
|
from cloudify.decorators import workflow
from cloudify.workflows import ctx
import re
from fabric.api import run,env
#
# Run an image on the cluster pointed to by the master arg
#
@workflow
def kube_run(**kwargs):
setfabenv(kwargs)
optstr=buildopts(kwargs,{"dry_run":"dry-run"},{"port":"not _val_ == -1"},["dry_run"],['name','master'])
ctx.logger.info("Running: {}".format(optstr))
run("./kubectl -s http://localhost:8080 run "+" "+kwargs['name']+optstr)
#
# Expose an app
#
@workflow
def kube_expose(**kwargs):
setfabenv(kwargs)
optstr=buildopts(kwargs,{"target_port":"target-port","service_name":"service-name"},{"target_port":"not _val_ == -1"},[],['name','master','resource'])
runstr="./kubectl -s http://localhost:8080 expose {} {} {}".format(kwargs['resource'],kwargs['name'],optstr)
ctx.logger.info("Running: {}".format(runstr))
run(runstr)
#
# Stop a resource (by name)
#
@workflow
def kube_stop(**kwargs):
setfabenv(kwargs)
optstr=buildopts(kwargs,{},{},["all"],['name','master','resource'])
runstr="./kubectl -s http://localhost:8080 stop {} {} {}".format(kwargs['resource'],kwargs['name'],optstr)
ctx.logger.info("Running: {}".format(runstr))
run(runstr)
#
# Delete a resource (by name)
#
@workflow
def kube_delete(**kwargs):
setfabenv(kwargs)
optstr=buildopts(kwargs,{},{},["all"],['name','master','resource'])
runstr="./kubectl -s http://localhost:8080 delete {} {} {}".format(kwargs['resource'],kwargs['name'],optstr)
ctx.logger.info("Running: {}".format(runstr))
run(runstr)
##################################################
#
# UTILITY
#
##################################################
# Construct the fabric environment from the supplied master
# node in kwargs
def setfabenv(kwargs):
master=get_ip(kwargs['master'])
masternode=ctx.get_node(kwargs['master'])
url='http://'+master
fabenv={}
fabenv['user']=masternode.properties['ssh_username']
fabenv['password']=masternode.properties['ssh_password']
fabenv['key_filename']=masternode.properties['ssh_keyfilename']
fabenv['host_string']=masternode.properties['ssh_username']+'@'+masternode.properties['ip']
fabenv['port']=masternode.properties['ssh_port']
env.update(fabenv)
# utility class to process options in the form
# specific to kubectl
class Option(object):
def __init__(self,arg,val,cond=None,option_name=None):
self._arg=arg
self._option_name=option_name
self._cond=cond
self._val=val
def __str__(self):
if(self._cond):
_val_=self._val
if(not eval(self._cond)):
return ''
return "--"+(self._option_name or self._arg)+"="+str(self._val)
def buildopts(kwargs,namedict={},conddict={},flags=[],ignore=[]):
outstr=''
for k,v in kwargs.iteritems():
if(k.startswith('_') or k=='ctx' or k in ignore):
continue
if(not v):
continue
if(k in conddict):
_val_=v
if(not eval(conddict[k])):
continue
if(k in namedict):
outstr=outstr+" --"+namedict[k]
else:
outstr=outstr+" --"+k
if(not k in flags):
outstr=outstr+"="+str(v)
return outstr
def get_ip(master):
if(ctx.local):
return ctx.get_node(master).properties['ip']
else:
raise('not implemented') # need to get default instance in cloud case
|
[
"fabric.api.env.update",
"fabric.api.run",
"cloudify.workflows.ctx.get_node"
] |
[((392, 470), 'fabric.api.run', 'run', (["('./kubectl -s http://localhost:8080 run ' + ' ' + kwargs['name'] + optstr)"], {}), "('./kubectl -s http://localhost:8080 run ' + ' ' + kwargs['name'] + optstr)\n", (395, 470), False, 'from fabric.api import run, env\n'), ((857, 868), 'fabric.api.run', 'run', (['runstr'], {}), '(runstr)\n', (860, 868), False, 'from fabric.api import run, env\n'), ((1188, 1199), 'fabric.api.run', 'run', (['runstr'], {}), '(runstr)\n', (1191, 1199), False, 'from fabric.api import run, env\n'), ((1525, 1536), 'fabric.api.run', 'run', (['runstr'], {}), '(runstr)\n', (1528, 1536), False, 'from fabric.api import run, env\n'), ((1802, 1832), 'cloudify.workflows.ctx.get_node', 'ctx.get_node', (["kwargs['master']"], {}), "(kwargs['master'])\n", (1814, 1832), False, 'from cloudify.workflows import ctx\n'), ((2195, 2213), 'fabric.api.env.update', 'env.update', (['fabenv'], {}), '(fabenv)\n', (2205, 2213), False, 'from fabric.api import run, env\n'), ((3153, 3173), 'cloudify.workflows.ctx.get_node', 'ctx.get_node', (['master'], {}), '(master)\n', (3165, 3173), False, 'from cloudify.workflows import ctx\n')]
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import IndexLocator, FormatStrFormatter
'''
<NAME>, Dec 2015
Plotting routine for initial 'test' runs of ELC.
It will make a plot that has both light curve data w/fit and RV data w/fit.
There are also residuals in the plots!
(Strongly based on ELCplotter_unfold.py)
***IMPORTANT***
This version assumes the files above are NOT yet folded in phase, and are in time.
This would happen if you are using ELCgap.inp, or anytime when ELC.inp has itime = 2.
So we need to fold them.
(If you want to plot already-folded data, use ELCplotter_new.py)
***ALSO IMPORTANT***
This version assumes you haven't run demcmcELC yet, but just ELC, to get an initial
clue whether or not your input parameters are half decent.
In other words, it doesn't need .fold files or fitparm.all, but it does need ELC.out.
'''
# Colors for plots. Selected with help from colorbrewer.
red = '#e34a33' # red, star 1
yel = '#fdbb84' # yellow, star 2
# Columns in fitparm file that correspond to T0 and Period
#tconj_col = 0
#porb_col = 15
# Read in everything
f1 = 'modelU.mag'
#f2 = 'ELCdataU.fold'
f3 = 'star1.RV'
f4 = 'star2.RV'
ELCoutfile = 'ELC.out'
gridloop = 'gridloop.opt'
#f5 = 'ELCdataRV1.fold'
#f6 = 'ELCdataRV2.fold'
#fitparm = 'fitparm.all'
# OPTIONAL ADJUSTMENT B/C FINAL ELC RV MODEL OUTPUT IS SHIFTED BY GAMMA
#gamma = 0
gamma = input("Enter gamma adjustment (0 for none): ")
phase_mod,mag_mod = np.loadtxt(f1, comments='#', dtype=np.float64, usecols=(0,1), unpack=True)
#phase_dat,mag_dat = np.loadtxt(f2, comments='#', dtype=np.float64, usecols=(0,1), unpack=True)
phase_rv1,rv1 = np.loadtxt(f3, comments='#', dtype=np.float64, usecols=(0,1), unpack=True)
phase_rv2,rv2 = np.loadtxt(f4, comments='#', dtype=np.float64, usecols=(0,1), unpack=True)
#phase_rv1dat,rv1dat,rv1err = np.loadtxt(f5, comments='#', dtype=np.float64, usecols=(0,1,2), unpack=True)
#phase_rv2dat,rv2dat,rv2err = np.loadtxt(f6, comments='#', dtype=np.float64, usecols=(0,1,2), unpack=True)
# FUNCTION TO FOLD STUFF so phases are actually phases ... and then sort all the arrays.
def phasecalc(times, period=100, BJD0=2454833):
phases = []
#cycles = []
for i in range(0, len(times)):
fracP = (times[i] - BJD0) / period
if fracP < 0:
phases.append(fracP % 1)
#cycles.append(int(fracP))
else:
phases.append(fracP % 1)
#cycles.append(int(fracP) + 1)
#print(fracP, phases[i])
return np.array(phases)
# GET PERIOD AND T0 from ELC.out file
with open(ELCoutfile) as f:
for i, row in enumerate(f):
if i == 27: # 28th row
columns = row.split()
period = float(columns[0]) # 1st column
#if i == 38: # 39th row, i.e. T0 # this one has a funny zeropoint (ok if circular)
if i == 133: # 134th row, i.e. Tconj # this one puts primary eclipse at phase 0
columns = row.split()
Tconj = float(columns[0]) #1st column
#periods, tconjs = np.loadtxt(fitparm, usecols=(porb_col, tconj_col), unpack=True)
#period = np.median(periods)
#Tconj = np.median(tconjs)
print(period, Tconj)
Tconj = Tconj + 0.5*period
with open(gridloop) as f:
for i, row in enumerate(f):
if i == 0:
LCinfile = row.split()[0]
if i == 8:
RV1infile = row.split()[0]
if i == 9:
RV2infile = row.split()[0]
# Read in observed times, magnitudes, and RVs (calling time 'phase' but that's a lie)
phase_dat,mag_dat = np.loadtxt(LCinfile, comments='#', dtype=np.float64, usecols=(0,1), unpack=True)
phase_rv1dat,rv1dat,rv1err = np.loadtxt(RV1infile, comments='#', dtype=np.float64, usecols=(0,1,2), unpack=True)
phase_rv2dat,rv2dat,rv2err = np.loadtxt(RV2infile, comments='#', dtype=np.float64, usecols=(0,1,2), unpack=True)
# Fold everything (observations and model)
phase_mod = phasecalc(phase_mod, period=period, BJD0=Tconj)
phase_dat = phasecalc(phase_dat, period=period, BJD0=Tconj)
phase_rv1 = phasecalc(phase_rv1, period=period, BJD0=Tconj)
phase_rv2 = phasecalc(phase_rv2, period=period, BJD0=Tconj)
phase_rv1dat = phasecalc(phase_rv1dat, period=period, BJD0=Tconj)
phase_rv2dat = phasecalc(phase_rv2dat, period=period, BJD0=Tconj)
p1 = phase_mod.argsort()
p2 = phase_dat.argsort()
p3 = phase_rv1.argsort()
p4 = phase_rv2.argsort()
p5 = phase_rv1dat.argsort()
p6 = phase_rv2dat.argsort()
phase_mod = phase_mod[p1]
phase_dat = phase_dat[p2]
phase_rv1 = phase_rv1[p3]
phase_rv2 = phase_rv2[p4]
phase_rv1dat = phase_rv1dat[p5]
phase_rv2dat = phase_rv2dat[p6]
mag_mod = mag_mod[p1]
mag_dat = mag_dat[p2]
rv1 = rv1[p3]
rv2 = rv2[p4]
rv1dat = rv1dat[p5]
rv2dat = rv2dat[p6]
# OPTIONAL ADJUSTMENT B/C FINAL ELC RV MODEL OUTPUT IS SHIFTED BY GAMMA
#gamma = input("Enter gamma adjustment (0 for none): ")
rv1 = rv1 + gamma
rv2 = rv2 + gamma
print ("Done reading (and folding) data!")
if np.abs(np.median(mag_mod) - np.median(mag_dat)) > 1:
print('Adjusting magnitude of model light curve...')
mag_mod = mag_mod + (np.median(mag_dat) - np.median(mag_mod))
# Interpolate model onto data phase grid, for residuals
newmag_model = np.interp(phase_dat, phase_mod, mag_mod)
newrv1 = np.interp(phase_rv1dat, phase_rv1, rv1)
newrv2 = np.interp(phase_rv2dat, phase_rv2, rv2)
lcresid = mag_dat - newmag_model
rv1resid = rv1dat - newrv1
rv2resid = rv2dat - newrv2
print ("Done interpolating!")
# Make plots
# First, define some handy global parameters for the plots
phasemin = 0
phasemax = 1
magdim = np.max(mag_dat) + 0.02 #11.97 # remember magnitudes are backwards, dangit
magbright = np.min(mag_dat) - 0.02 #11.861
rvmin = np.min([np.min(rv1dat), np.min(rv2dat)]) - 5 #-79
rvmax = np.max([np.max(rv1dat), np.max(rv2dat)]) + 5 #-1
primary_phasemin = 0.48 #0.09 #0.48
primary_phasemax = 0.52 #0.14 #0.52
secondary_phasemin = 0.98 #0.881
secondary_phasemax = 1.01 #0.921
magresid_min = 0.006 # remember magnitudes are backwards, dangit
magresid_max = -0.006
rvresid_min = -5
rvresid_max = 5
# Light curve
ax1 = plt.subplot2grid((12,1),(4,0), rowspan=3)
plt.axis([phasemin, phasemax, magdim, magbright])
plt.tick_params(axis='both', which='major')
plt.plot(phase_dat, mag_dat, color=red, marker='.', ls='None', ms=6, mew=0) #lc data
plt.plot(phase_mod, mag_mod, 'k', lw=1.5, label='ELC Model') #lc model
ax1.set_ylabel('Magnitude', size=18)
ax1.set_xticklabels([])
# Radial velocities
ax2 = plt.subplot2grid((12,1),(1,0), rowspan=3)
plt.subplots_adjust(wspace = 0.0001, hspace=0.0001)
plt.axis([phasemin, phasemax, rvmin, rvmax])
plt.errorbar(phase_rv1dat, rv1dat, yerr=rv1err, marker='o', color=yel, ms=9, mec='None', ls='None') #rv1 data
plt.errorbar(phase_rv2dat, rv2dat, yerr=rv2err, marker='o', color=red, ms=9, mec='None', ls='None') #rv2 data
plt.plot(phase_rv1, rv1, color='k', lw=1.5) #rv1 model
plt.plot(phase_rv2, rv2, color='k', lw=1.5) #rv2 model
ax2.set_ylabel('Radial Velocity (km s$^{-1}$)', size=18)
ax2.set_xticklabels([])
# Light curve residuals
axr1 = plt.subplot2grid((12,1),(7,0))
axr1.axis([phasemin, phasemax, magresid_min, magresid_max])
axr1.set_yticks([-0.004, 0, 0.004])
plt.axhline(y=0, xmin=phasemin, xmax=phasemax, color='0.75', ls=':')
plt.plot(phase_dat, lcresid, color=red, marker='.', ls='None', ms=4, mew=0) #lc residual
# Radial velocity residuals
axr2 = plt.subplot2grid((12,1),(0,0))
axr2.axis([phasemin, phasemax, rvresid_min, rvresid_max])
#axr2.set_yticks([-2,0,2])
plt.axhline(y=0, xmin=phasemin, xmax=phasemax, color='0.75', ls=':')
plt.errorbar(phase_rv1dat, rv1resid, yerr=rv1err, marker='o', color=yel, ms=9, mec='None', ls='None') #rv1 residual
plt.errorbar(phase_rv2dat, rv2resid, yerr=rv2err, marker='o', color=red, ms=9, mec='None', ls='None') #rv2 residual
#plt.xlabel('Orbital Phase (conjunction at $\phi = 0.5$)', size=20) # EXTRA LABEL
axr2.set_xticklabels([])
# Zoom-in of shallower (secondary) eclipse
ax3 = plt.subplot2grid((12,2),(9,1), rowspan=2)
plt.axis([secondary_phasemin, secondary_phasemax, magdim, magbright])
ax3.set_xticks([0.89, 0.90, 0.91, 0.92])
plt.plot(phase_dat, mag_dat, color=yel, marker='.', ls='None', ms=6, mew=0) #lc data
plt.plot(phase_mod, mag_mod, color='k', lw=1.5) #lc model
ax3.set_ylabel('Magnitude')
ax3.set_xticklabels([])
ax3.set_yticklabels([])
# Zoom-in of deeper (primary) eclipse
ax4 = plt.subplot2grid((12,2),(9,0), rowspan=2)
plt.axis([primary_phasemin, primary_phasemax, magdim, magbright])
ax4.set_xticks([0.49, 0.50, 0.51, 0.52])
plt.plot(phase_dat, mag_dat, color=red, marker='.', ls='None', ms=6, mew=0) #lc data
plt.plot(phase_mod, mag_mod, color='k', lw=1.5) #lc model
ax4.set_xticklabels([])
#ax4.set_yticklabels([])
# Zoom plot residuals, shallower (secondary) eclipse
axr3 = plt.subplot2grid((12,2),(11,1))
plt.axis([secondary_phasemin, secondary_phasemax, magresid_min, magresid_max])
axr3.set_yticks([-0.004, 0, 0.004])
axr3.set_xticks([0.89, 0.90, 0.91, 0.92])
plt.axhline(y=0, xmin=0, xmax=2, color='0.75', ls=':')
plt.plot(phase_dat, lcresid, color=red, marker='.', ls='None', ms=4, mew=0) #lc residual
axr3.set_yticklabels([])
# Zoom plot residuals, deeper (primary) eclipse
axr4 = plt.subplot2grid((12,2),(11,0))
plt.axis([primary_phasemin, primary_phasemax, magresid_min, magresid_max])
axr4.set_yticks([-0.004, 0, 0.004])
axr4.set_xticks([0.49, 0.50, 0.51, 0.52])
plt.axhline(y=0, xmin=0, xmax=2, color='0.75', ls=':')
plt.plot(phase_dat, lcresid, color=red, marker='.', ls='None', ms=4, mew=0) #lc residual
#axr4.set_yticklabels([])
# Labels using overall figure as a reference
plt.figtext(0.5, 0.04, 'Orbital Phase (conjunction at $\phi = 0.5$)', ha='center', va='center', size=25)
#plt.figtext(0.135, 0.18, 'Secondary')
#plt.figtext(0.535, 0.18, 'Primary')
plt.figtext(0.06, 0.86, '$\Delta$')
plt.figtext(0.04, 0.395, '$\Delta$')
plt.figtext(0.04, 0.125, '$\Delta$')
ax1.legend(loc='lower right', frameon=False, prop={'size':20})
print ("Done preparing plot!")
plt.show()
#outfile = 'testplot1.png'
#plt.savefig(outfile)
#print ("Plot saved to %s!" % outfile)
|
[
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.median",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figtext",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.loadtxt",
"numpy.interp",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.subplots_adjust"
] |
[((1544, 1619), 'numpy.loadtxt', 'np.loadtxt', (['f1'], {'comments': '"""#"""', 'dtype': 'np.float64', 'usecols': '(0, 1)', 'unpack': '(True)'}), "(f1, comments='#', dtype=np.float64, usecols=(0, 1), unpack=True)\n", (1554, 1619), True, 'import numpy as np\n'), ((1731, 1806), 'numpy.loadtxt', 'np.loadtxt', (['f3'], {'comments': '"""#"""', 'dtype': 'np.float64', 'usecols': '(0, 1)', 'unpack': '(True)'}), "(f3, comments='#', dtype=np.float64, usecols=(0, 1), unpack=True)\n", (1741, 1806), True, 'import numpy as np\n'), ((1822, 1897), 'numpy.loadtxt', 'np.loadtxt', (['f4'], {'comments': '"""#"""', 'dtype': 'np.float64', 'usecols': '(0, 1)', 'unpack': '(True)'}), "(f4, comments='#', dtype=np.float64, usecols=(0, 1), unpack=True)\n", (1832, 1897), True, 'import numpy as np\n'), ((3491, 3577), 'numpy.loadtxt', 'np.loadtxt', (['LCinfile'], {'comments': '"""#"""', 'dtype': 'np.float64', 'usecols': '(0, 1)', 'unpack': '(True)'}), "(LCinfile, comments='#', dtype=np.float64, usecols=(0, 1), unpack\n =True)\n", (3501, 3577), True, 'import numpy as np\n'), ((3601, 3690), 'numpy.loadtxt', 'np.loadtxt', (['RV1infile'], {'comments': '"""#"""', 'dtype': 'np.float64', 'usecols': '(0, 1, 2)', 'unpack': '(True)'}), "(RV1infile, comments='#', dtype=np.float64, usecols=(0, 1, 2),\n unpack=True)\n", (3611, 3690), True, 'import numpy as np\n'), ((3714, 3803), 'numpy.loadtxt', 'np.loadtxt', (['RV2infile'], {'comments': '"""#"""', 'dtype': 'np.float64', 'usecols': '(0, 1, 2)', 'unpack': '(True)'}), "(RV2infile, comments='#', dtype=np.float64, usecols=(0, 1, 2),\n unpack=True)\n", (3724, 3803), True, 'import numpy as np\n'), ((5109, 5149), 'numpy.interp', 'np.interp', (['phase_dat', 'phase_mod', 'mag_mod'], {}), '(phase_dat, phase_mod, mag_mod)\n', (5118, 5149), True, 'import numpy as np\n'), ((5159, 5198), 'numpy.interp', 'np.interp', (['phase_rv1dat', 'phase_rv1', 'rv1'], {}), '(phase_rv1dat, phase_rv1, rv1)\n', (5168, 5198), True, 'import numpy as np\n'), ((5208, 5247), 'numpy.interp', 'np.interp', (['phase_rv2dat', 'phase_rv2', 'rv2'], {}), '(phase_rv2dat, phase_rv2, rv2)\n', (5217, 5247), True, 'import numpy as np\n'), ((5988, 6032), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 1)', '(4, 0)'], {'rowspan': '(3)'}), '((12, 1), (4, 0), rowspan=3)\n', (6004, 6032), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6079), 'matplotlib.pyplot.axis', 'plt.axis', (['[phasemin, phasemax, magdim, magbright]'], {}), '([phasemin, phasemax, magdim, magbright])\n', (6038, 6079), True, 'import matplotlib.pyplot as plt\n'), ((6080, 6123), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""'}), "(axis='both', which='major')\n", (6095, 6123), True, 'import matplotlib.pyplot as plt\n'), ((6124, 6199), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_dat', 'mag_dat'], {'color': 'red', 'marker': '"""."""', 'ls': '"""None"""', 'ms': '(6)', 'mew': '(0)'}), "(phase_dat, mag_dat, color=red, marker='.', ls='None', ms=6, mew=0)\n", (6132, 6199), True, 'import matplotlib.pyplot as plt\n'), ((6209, 6269), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_mod', 'mag_mod', '"""k"""'], {'lw': '(1.5)', 'label': '"""ELC Model"""'}), "(phase_mod, mag_mod, 'k', lw=1.5, label='ELC Model')\n", (6217, 6269), True, 'import matplotlib.pyplot as plt\n'), ((6368, 6412), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 1)', '(1, 0)'], {'rowspan': '(3)'}), '((12, 1), (1, 0), rowspan=3)\n', (6384, 6412), True, 'import matplotlib.pyplot as plt\n'), ((6410, 6459), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.0001)', 'hspace': '(0.0001)'}), '(wspace=0.0001, hspace=0.0001)\n', (6429, 6459), True, 'import matplotlib.pyplot as plt\n'), ((6462, 6506), 'matplotlib.pyplot.axis', 'plt.axis', (['[phasemin, phasemax, rvmin, rvmax]'], {}), '([phasemin, phasemax, rvmin, rvmax])\n', (6470, 6506), True, 'import matplotlib.pyplot as plt\n'), ((6507, 6610), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['phase_rv1dat', 'rv1dat'], {'yerr': 'rv1err', 'marker': '"""o"""', 'color': 'yel', 'ms': '(9)', 'mec': '"""None"""', 'ls': '"""None"""'}), "(phase_rv1dat, rv1dat, yerr=rv1err, marker='o', color=yel, ms=9,\n mec='None', ls='None')\n", (6519, 6610), True, 'import matplotlib.pyplot as plt\n'), ((6617, 6720), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['phase_rv2dat', 'rv2dat'], {'yerr': 'rv2err', 'marker': '"""o"""', 'color': 'red', 'ms': '(9)', 'mec': '"""None"""', 'ls': '"""None"""'}), "(phase_rv2dat, rv2dat, yerr=rv2err, marker='o', color=red, ms=9,\n mec='None', ls='None')\n", (6629, 6720), True, 'import matplotlib.pyplot as plt\n'), ((6727, 6770), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_rv1', 'rv1'], {'color': '"""k"""', 'lw': '(1.5)'}), "(phase_rv1, rv1, color='k', lw=1.5)\n", (6735, 6770), True, 'import matplotlib.pyplot as plt\n'), ((6782, 6825), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_rv2', 'rv2'], {'color': '"""k"""', 'lw': '(1.5)'}), "(phase_rv2, rv2, color='k', lw=1.5)\n", (6790, 6825), True, 'import matplotlib.pyplot as plt\n'), ((6950, 6983), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 1)', '(7, 0)'], {}), '((12, 1), (7, 0))\n', (6966, 6983), True, 'import matplotlib.pyplot as plt\n'), ((7077, 7145), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'xmin': 'phasemin', 'xmax': 'phasemax', 'color': '"""0.75"""', 'ls': '""":"""'}), "(y=0, xmin=phasemin, xmax=phasemax, color='0.75', ls=':')\n", (7088, 7145), True, 'import matplotlib.pyplot as plt\n'), ((7146, 7221), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_dat', 'lcresid'], {'color': 'red', 'marker': '"""."""', 'ls': '"""None"""', 'ms': '(4)', 'mew': '(0)'}), "(phase_dat, lcresid, color=red, marker='.', ls='None', ms=4, mew=0)\n", (7154, 7221), True, 'import matplotlib.pyplot as plt\n'), ((7271, 7304), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 1)', '(0, 0)'], {}), '((12, 1), (0, 0))\n', (7287, 7304), True, 'import matplotlib.pyplot as plt\n'), ((7387, 7455), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'xmin': 'phasemin', 'xmax': 'phasemax', 'color': '"""0.75"""', 'ls': '""":"""'}), "(y=0, xmin=phasemin, xmax=phasemax, color='0.75', ls=':')\n", (7398, 7455), True, 'import matplotlib.pyplot as plt\n'), ((7456, 7562), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['phase_rv1dat', 'rv1resid'], {'yerr': 'rv1err', 'marker': '"""o"""', 'color': 'yel', 'ms': '(9)', 'mec': '"""None"""', 'ls': '"""None"""'}), "(phase_rv1dat, rv1resid, yerr=rv1err, marker='o', color=yel, ms\n =9, mec='None', ls='None')\n", (7468, 7562), True, 'import matplotlib.pyplot as plt\n'), ((7572, 7678), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['phase_rv2dat', 'rv2resid'], {'yerr': 'rv2err', 'marker': '"""o"""', 'color': 'red', 'ms': '(9)', 'mec': '"""None"""', 'ls': '"""None"""'}), "(phase_rv2dat, rv2resid, yerr=rv2err, marker='o', color=red, ms\n =9, mec='None', ls='None')\n", (7584, 7678), True, 'import matplotlib.pyplot as plt\n'), ((7845, 7889), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 2)', '(9, 1)'], {'rowspan': '(2)'}), '((12, 2), (9, 1), rowspan=2)\n', (7861, 7889), True, 'import matplotlib.pyplot as plt\n'), ((7887, 7956), 'matplotlib.pyplot.axis', 'plt.axis', (['[secondary_phasemin, secondary_phasemax, magdim, magbright]'], {}), '([secondary_phasemin, secondary_phasemax, magdim, magbright])\n', (7895, 7956), True, 'import matplotlib.pyplot as plt\n'), ((7998, 8073), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_dat', 'mag_dat'], {'color': 'yel', 'marker': '"""."""', 'ls': '"""None"""', 'ms': '(6)', 'mew': '(0)'}), "(phase_dat, mag_dat, color=yel, marker='.', ls='None', ms=6, mew=0)\n", (8006, 8073), True, 'import matplotlib.pyplot as plt\n'), ((8083, 8130), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_mod', 'mag_mod'], {'color': '"""k"""', 'lw': '(1.5)'}), "(phase_mod, mag_mod, color='k', lw=1.5)\n", (8091, 8130), True, 'import matplotlib.pyplot as plt\n'), ((8262, 8306), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 2)', '(9, 0)'], {'rowspan': '(2)'}), '((12, 2), (9, 0), rowspan=2)\n', (8278, 8306), True, 'import matplotlib.pyplot as plt\n'), ((8304, 8369), 'matplotlib.pyplot.axis', 'plt.axis', (['[primary_phasemin, primary_phasemax, magdim, magbright]'], {}), '([primary_phasemin, primary_phasemax, magdim, magbright])\n', (8312, 8369), True, 'import matplotlib.pyplot as plt\n'), ((8411, 8486), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_dat', 'mag_dat'], {'color': 'red', 'marker': '"""."""', 'ls': '"""None"""', 'ms': '(6)', 'mew': '(0)'}), "(phase_dat, mag_dat, color=red, marker='.', ls='None', ms=6, mew=0)\n", (8419, 8486), True, 'import matplotlib.pyplot as plt\n'), ((8496, 8543), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_mod', 'mag_mod'], {'color': '"""k"""', 'lw': '(1.5)'}), "(phase_mod, mag_mod, color='k', lw=1.5)\n", (8504, 8543), True, 'import matplotlib.pyplot as plt\n'), ((8664, 8698), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 2)', '(11, 1)'], {}), '((12, 2), (11, 1))\n', (8680, 8698), True, 'import matplotlib.pyplot as plt\n'), ((8696, 8774), 'matplotlib.pyplot.axis', 'plt.axis', (['[secondary_phasemin, secondary_phasemax, magresid_min, magresid_max]'], {}), '([secondary_phasemin, secondary_phasemax, magresid_min, magresid_max])\n', (8704, 8774), True, 'import matplotlib.pyplot as plt\n'), ((8853, 8907), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'xmin': '(0)', 'xmax': '(2)', 'color': '"""0.75"""', 'ls': '""":"""'}), "(y=0, xmin=0, xmax=2, color='0.75', ls=':')\n", (8864, 8907), True, 'import matplotlib.pyplot as plt\n'), ((8908, 8983), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_dat', 'lcresid'], {'color': 'red', 'marker': '"""."""', 'ls': '"""None"""', 'ms': '(4)', 'mew': '(0)'}), "(phase_dat, lcresid, color=red, marker='.', ls='None', ms=4, mew=0)\n", (8916, 8983), True, 'import matplotlib.pyplot as plt\n'), ((9078, 9112), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 2)', '(11, 0)'], {}), '((12, 2), (11, 0))\n', (9094, 9112), True, 'import matplotlib.pyplot as plt\n'), ((9110, 9184), 'matplotlib.pyplot.axis', 'plt.axis', (['[primary_phasemin, primary_phasemax, magresid_min, magresid_max]'], {}), '([primary_phasemin, primary_phasemax, magresid_min, magresid_max])\n', (9118, 9184), True, 'import matplotlib.pyplot as plt\n'), ((9263, 9317), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'xmin': '(0)', 'xmax': '(2)', 'color': '"""0.75"""', 'ls': '""":"""'}), "(y=0, xmin=0, xmax=2, color='0.75', ls=':')\n", (9274, 9317), True, 'import matplotlib.pyplot as plt\n'), ((9318, 9393), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_dat', 'lcresid'], {'color': 'red', 'marker': '"""."""', 'ls': '"""None"""', 'ms': '(4)', 'mew': '(0)'}), "(phase_dat, lcresid, color=red, marker='.', ls='None', ms=4, mew=0)\n", (9326, 9393), True, 'import matplotlib.pyplot as plt\n'), ((9479, 9589), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.5)', '(0.04)', '"""Orbital Phase (conjunction at $\\\\phi = 0.5$)"""'], {'ha': '"""center"""', 'va': '"""center"""', 'size': '(25)'}), "(0.5, 0.04, 'Orbital Phase (conjunction at $\\\\phi = 0.5$)', ha=\n 'center', va='center', size=25)\n", (9490, 9589), True, 'import matplotlib.pyplot as plt\n'), ((9660, 9696), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.06)', '(0.86)', '"""$\\\\Delta$"""'], {}), "(0.06, 0.86, '$\\\\Delta$')\n", (9671, 9696), True, 'import matplotlib.pyplot as plt\n'), ((9696, 9733), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.04)', '(0.395)', '"""$\\\\Delta$"""'], {}), "(0.04, 0.395, '$\\\\Delta$')\n", (9707, 9733), True, 'import matplotlib.pyplot as plt\n'), ((9733, 9770), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.04)', '(0.125)', '"""$\\\\Delta$"""'], {}), "(0.04, 0.125, '$\\\\Delta$')\n", (9744, 9770), True, 'import matplotlib.pyplot as plt\n'), ((9866, 9876), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9874, 9876), True, 'import matplotlib.pyplot as plt\n'), ((2524, 2540), 'numpy.array', 'np.array', (['phases'], {}), '(phases)\n', (2532, 2540), True, 'import numpy as np\n'), ((5475, 5490), 'numpy.max', 'np.max', (['mag_dat'], {}), '(mag_dat)\n', (5481, 5490), True, 'import numpy as np\n'), ((5563, 5578), 'numpy.min', 'np.min', (['mag_dat'], {}), '(mag_dat)\n', (5569, 5578), True, 'import numpy as np\n'), ((4874, 4892), 'numpy.median', 'np.median', (['mag_mod'], {}), '(mag_mod)\n', (4883, 4892), True, 'import numpy as np\n'), ((4895, 4913), 'numpy.median', 'np.median', (['mag_dat'], {}), '(mag_dat)\n', (4904, 4913), True, 'import numpy as np\n'), ((4996, 5014), 'numpy.median', 'np.median', (['mag_dat'], {}), '(mag_dat)\n', (5005, 5014), True, 'import numpy as np\n'), ((5017, 5035), 'numpy.median', 'np.median', (['mag_mod'], {}), '(mag_mod)\n', (5026, 5035), True, 'import numpy as np\n'), ((5610, 5624), 'numpy.min', 'np.min', (['rv1dat'], {}), '(rv1dat)\n', (5616, 5624), True, 'import numpy as np\n'), ((5626, 5640), 'numpy.min', 'np.min', (['rv2dat'], {}), '(rv2dat)\n', (5632, 5640), True, 'import numpy as np\n'), ((5668, 5682), 'numpy.max', 'np.max', (['rv1dat'], {}), '(rv1dat)\n', (5674, 5682), True, 'import numpy as np\n'), ((5684, 5698), 'numpy.max', 'np.max', (['rv2dat'], {}), '(rv2dat)\n', (5690, 5698), True, 'import numpy as np\n')]
|
'''
@date: 22/11/2020
@author: <NAME>
@email: <EMAIL>
'''
from configparser import ConfigParser
import os
DEFAULT_CONFIG_FILE = 'settings.ini'
def get_config_file():
return os.environ.get('CONFIG_FILE', DEFAULT_CONFIG_FILE)
CONFIG_FILE = get_config_file()
def create_config(config_file=None):
parser = ConfigParser()
parser.read(config_file or CONFIG_FILE)
return parser
CONFIG = create_config()
|
[
"os.environ.get",
"configparser.ConfigParser"
] |
[((178, 228), 'os.environ.get', 'os.environ.get', (['"""CONFIG_FILE"""', 'DEFAULT_CONFIG_FILE'], {}), "('CONFIG_FILE', DEFAULT_CONFIG_FILE)\n", (192, 228), False, 'import os\n'), ((313, 327), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (325, 327), False, 'from configparser import ConfigParser\n')]
|
#!/usr/local/bin/python3
from notionscripts.notion_api import NotionApi
api = NotionApi()
def app_url(browser_url):
return browser_url.replace("https://", "notion://")
message = None
for row in api.get_active_tasks():
message = "%s | href=%s" % (row.title, app_url(row.get_browseable_url()))
print(message)
if message is None:
message = "Working on Nothing | href=%s" % app_url(api.current_day().get_browseable_url())
print(message)
|
[
"notionscripts.notion_api.NotionApi"
] |
[((79, 90), 'notionscripts.notion_api.NotionApi', 'NotionApi', ([], {}), '()\n', (88, 90), False, 'from notionscripts.notion_api import NotionApi\n')]
|
from setuptools import setup, find_packages
with open("README.md") as readme_file:
readme = readme_file.read()
# FIXME: Convert to ReST
requirements = [
"exifread",
"humanfriendly",
"jsonschema",
"lxml",
"pandas",
"pillow",
"pyyaml",
"tqdm",
"numpy",
"pycocotools",
"imagehash",
"requests",
"elasticsearch==7.13.*",
"joblib",
]
setup(
name="weedcoco",
version="0.1.0",
description="Tools for WeedCOCO agricultural image annotation interchange",
author="<NAME>, <NAME>, Sydney Informatics Hub",
long_description=readme,
include_package_data=True,
packages=find_packages(include=["weedcoco", "weedcoco.*"]),
license="BSD",
author_email="<EMAIL>",
keywords=[""],
python_requires=">=3.6",
install_requires=requirements,
extras_require={
"test": [
"pytest==6.2.*",
"pytest-cov",
"elasticmock==1.8.*",
]
},
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development",
],
zip_safe=False, # since we use __file__
)
|
[
"setuptools.find_packages"
] |
[((651, 700), 'setuptools.find_packages', 'find_packages', ([], {'include': "['weedcoco', 'weedcoco.*']"}), "(include=['weedcoco', 'weedcoco.*'])\n", (664, 700), False, 'from setuptools import setup, find_packages\n')]
|
import contextlib
import random
import re
import time
import unittest.mock as um
from typing import List
import pytest
from .. import io, logger
def test_add_duration():
event: logger.Event = {}
with logger.add_duration(event):
time.sleep(0.01)
assert event["duration"] > 0
def test_set_time():
header: logger.Event = {}
func = logger.set_time(header)
assert re.search(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}", header["time"])
fields = func({})
assert fields is not None and fields["elapsed"] >= 0
fields = logger.set_time(None)({})
assert fields is not None and fields["elapsed"] >= 0
def test_set_id():
header: logger.Event = {}
random.seed(123)
logger.set_id(header)
assert isinstance(header["id"], int)
header2: logger.Event = {}
random.seed(123) # set_id should not respect the global random seed
logger.set_id(header2)
assert (
header2["id"] != header["id"]
), "OK, this can fail randomly - but the chance should be extremely low"
def _test_set_id(header):
if header is not None:
header["id"] = "TEST_ID"
def _test_set_time(header):
if header is not None:
header["time"] = "TEST_TIME"
counter = 0
def handler(event):
nonlocal counter
event["elapsed"] = counter
counter += 1
return handler
@contextlib.contextmanager
def _test_add_duration(event):
yield
event["duration"] = "TEST_DURATION"
def test_log():
writer = um.Mock()
with logger.Log(
writer,
dict(learning_rate=0.01),
annotate=(_test_set_id, _test_set_time),
default_annotate=False,
) as log:
log.add(kind="step", loss=5.0)
with log.adding(
"eval",
partition="valid",
_scopes=(_test_add_duration,),
_default_scopes=False,
) as line:
line.set(loss=4.5)
line.set(error_rate=0.9)
writer.write.assert_has_calls(
[
um.call(
dict(
kind="header",
learning_rate=0.01,
id="TEST_ID",
time="TEST_TIME",
elapsed=0,
)
),
um.call(dict(kind="step", loss=5.0, elapsed=1)),
um.call(
dict(
kind="eval",
partition="valid",
loss=4.5,
error_rate=0.9,
duration="TEST_DURATION",
elapsed=2,
)
),
]
)
writer.close.assert_called_once()
def test_log_errors():
with logger.Log(um.Mock()) as log:
with log.adding() as line:
pass
with pytest.raises(ValueError):
line.set(foo="bar")
with pytest.raises(ValueError):
line.add_to_log()
def _assert_has(dict_, **mappings):
for key, value in mappings.items():
assert dict_[key] == value
def test_file_log(tmp_path):
path = tmp_path / "test.jsonl"
with logger.open(path, name="my_test") as log:
log.add(kind="step", loss=5.0)
log.add(kind="step", loss=4.5)
with log.adding(kind="eval") as line:
line.set(loss=4.7)
events: List[logger.Event] = list(io.read_jsonlines(tmp_path / "test.jsonl.gz"))
_assert_has(events[0], kind="header", name="my_test")
_assert_has(events[1], kind="step", loss=5.0)
_assert_has(events[2], kind="step", loss=4.5)
_assert_has(events[3], kind="eval", loss=4.7)
assert len(events) == 4
def test_file_log_no_header_no_gzip(tmp_path):
path = tmp_path / "test.jsonl"
with logger.open(path, _add_header=False, _gzip_on_close=False) as log:
log.add(kind="step", loss=5.0)
events: List[logger.Event] = list(io.read_jsonlines(tmp_path / "test.jsonl"))
_assert_has(events[0], kind="step", loss=5.0)
assert len(events) == 1
def test_file_log_error(tmp_path):
path = tmp_path / "bad.jsonl"
with pytest.raises(ValueError) as error:
logger.open(path, custom_name="my_test", _add_header=False)
assert "custom_name" in str(error.value)
assert not path.exists()
|
[
"unittest.mock.Mock",
"time.sleep",
"pytest.raises",
"random.seed",
"re.search"
] |
[((397, 467), 're.search', 're.search', (['"""\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}"""', "header['time']"], {}), "('\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}', header['time'])\n", (406, 467), False, 'import re\n'), ((695, 711), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (706, 711), False, 'import random\n'), ((815, 831), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (826, 831), False, 'import random\n'), ((1504, 1513), 'unittest.mock.Mock', 'um.Mock', ([], {}), '()\n', (1511, 1513), True, 'import unittest.mock as um\n'), ((248, 264), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (258, 264), False, 'import time\n'), ((4072, 4097), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4085, 4097), False, 'import pytest\n'), ((2712, 2721), 'unittest.mock.Mock', 'um.Mock', ([], {}), '()\n', (2719, 2721), True, 'import unittest.mock as um\n'), ((2796, 2821), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2809, 2821), False, 'import pytest\n'), ((2868, 2893), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2881, 2893), False, 'import pytest\n')]
|
"""coBib's Redo command.
This command can be used to re-apply the changes *of a previously undone* command:
```
cobib redo
```
This command takes *no* additional arguments!
Note, that if you have not used `cobib undo` previously, this command will have no effect!
Furthermore, this command is *only* available if coBib's git-integration has been enabled and
initialized.
Refer to the documentation of `cobib.commands.init.InitCommand` for more details on that topic.
You can also trigger this command from the `cobib.tui.tui.TUI`.
By default, it is bound to the `r` key.
"""
from __future__ import annotations
import argparse
import logging
import subprocess
import sys
from typing import IO, TYPE_CHECKING, Any, List
from cobib.config import Event, config
from cobib.database import Database
from cobib.utils.rel_path import RelPath
from .base_command import ArgumentParser, Command
LOGGER = logging.getLogger(__name__)
if TYPE_CHECKING:
import cobib.tui
class RedoCommand(Command):
"""The Redo Command."""
name = "redo"
def execute(self, args: List[str], out: IO[Any] = sys.stdout) -> None:
"""Redoes the last undone change.
This command is *only* available if coBib's git-integration has been enabled via
`config.database.git` *and* initialized properly (see `cobib.commands.init.InitCommand`).
If that is the case, this command will re-apply the changes *of a previously undone* command
(see `cobib.commands.undo.UndoCommand`).
Args:
args: a sequence of additional arguments used for the execution. The following values
are allowed for this command:
* **no** additional arguments are required for this subcommand!
out: the output IO stream. This defaults to `sys.stdout`.
"""
git_tracked = config.database.git
if not git_tracked:
msg = (
"You must enable coBib's git-tracking in order to use the `Redo` command."
"\nPlease refer to the man-page for more information on how to do so."
)
LOGGER.error(msg)
return
file = RelPath(config.database.file).path
root = file.parent
if not (root / ".git").exists():
msg = (
"You have configured, but not initialized coBib's git-tracking."
"\nPlease consult `cobib init --help` for more information on how to do so."
)
LOGGER.error(msg)
return
LOGGER.debug("Starting Redo command.")
parser = ArgumentParser(prog="redo", description="Redo subcommand parser.")
try:
# pylint: disable=unused-variable
largs = parser.parse_args(args)
except argparse.ArgumentError as exc:
LOGGER.error(exc.message)
return
Event.PreRedoCommand.fire(largs)
LOGGER.debug("Obtaining git log.")
lines = subprocess.check_output(
[
"git",
"--no-pager",
"-C",
f"{root}",
"log",
"--oneline",
"--no-decorate",
"--no-abbrev",
]
)
redone_shas = set()
for commit in lines.decode().strip().split("\n"):
LOGGER.debug("Processing commit %s", commit)
sha, *message = commit.split()
if message[0] == "Redo":
# Store already redone commit sha
LOGGER.debug("Storing redone commit sha: %s", message[-1])
redone_shas.add(message[-1])
continue
if sha in redone_shas:
LOGGER.info("Skipping %s as it was already redone", sha)
continue
if message[0] == "Undo":
LOGGER.debug("Attempting to redo %s.", sha)
commands = [
f"git -C {root} revert --no-commit {sha}",
f"git -C {root} commit --no-gpg-sign --quiet --message 'Redo {sha}'",
]
with subprocess.Popen(
"; ".join(commands), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
) as redo:
redo.communicate()
if redo.returncode != 0:
LOGGER.error( # pragma: no cover
"Redo was unsuccessful. Please consult the logs and git history of your"
" database for more information."
)
else:
# update Database
Database().read()
break
else:
msg = "Could not find a commit to redo. You must have undone something first!"
LOGGER.warning(msg)
sys.exit(1)
Event.PostRedoCommand.fire(root, sha)
@staticmethod
def tui(tui: cobib.tui.TUI) -> None:
# pdoc will inherit the docstring from the base class
# noqa: D102
LOGGER.debug("Redo command triggered from TUI.")
tui.execute_command(["redo"], skip_prompt=True)
# update database list
LOGGER.debug("Updating list after Redo command.")
tui.viewport.update_list()
|
[
"cobib.database.Database",
"subprocess.check_output",
"sys.exit",
"cobib.config.Event.PreRedoCommand.fire",
"cobib.utils.rel_path.RelPath",
"cobib.config.Event.PostRedoCommand.fire",
"logging.getLogger"
] |
[((902, 929), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (919, 929), False, 'import logging\n'), ((2883, 2915), 'cobib.config.Event.PreRedoCommand.fire', 'Event.PreRedoCommand.fire', (['largs'], {}), '(largs)\n', (2908, 2915), False, 'from cobib.config import Event, config\n'), ((2976, 3095), 'subprocess.check_output', 'subprocess.check_output', (["['git', '--no-pager', '-C', f'{root}', 'log', '--oneline', '--no-decorate',\n '--no-abbrev']"], {}), "(['git', '--no-pager', '-C', f'{root}', 'log',\n '--oneline', '--no-decorate', '--no-abbrev'])\n", (2999, 3095), False, 'import subprocess\n'), ((4904, 4941), 'cobib.config.Event.PostRedoCommand.fire', 'Event.PostRedoCommand.fire', (['root', 'sha'], {}), '(root, sha)\n', (4930, 4941), False, 'from cobib.config import Event, config\n'), ((2175, 2204), 'cobib.utils.rel_path.RelPath', 'RelPath', (['config.database.file'], {}), '(config.database.file)\n', (2182, 2204), False, 'from cobib.utils.rel_path import RelPath\n'), ((4883, 4894), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4891, 4894), False, 'import sys\n'), ((4694, 4704), 'cobib.database.Database', 'Database', ([], {}), '()\n', (4702, 4704), False, 'from cobib.database import Database\n')]
|
# -*- coding: utf-8 -*-
"""
idfy_rest_client.models.jwt_payload
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
from idfy_rest_client.api_helper import APIHelper
import idfy_rest_client.models.signature_error
import idfy_rest_client.models.sign_success
class JwtPayload(object):
"""Implementation of the 'JwtPayload' model.
TODO: type model description here.
Attributes:
account_id (uuid|string): Account Id
document_id (uuid|string): Document Id
external_id (string): External document Id
signer_id (uuid|string): Signer Id
external_signer_id (string): External signer Id
error (SignatureError): Error object, will be included on error
sign_success (SignSuccess): Success object, will be included on sign
success
expires (datetime): When the jwt expires (ISO 8601)
aborted (bool): Set to true if user aborted
"""
# Create a mapping from Model property names to API property names
_names = {
"account_id":'accountId',
"document_id":'documentId',
"external_id":'externalId',
"signer_id":'signerId',
"external_signer_id":'externalSignerId',
"error":'error',
"sign_success":'signSuccess',
"expires":'expires',
"aborted":'aborted'
}
def __init__(self,
account_id=None,
document_id=None,
external_id=None,
signer_id=None,
external_signer_id=None,
error=None,
sign_success=None,
expires=None,
aborted=None,
additional_properties = {}):
"""Constructor for the JwtPayload class"""
# Initialize members of the class
self.account_id = account_id
self.document_id = document_id
self.external_id = external_id
self.signer_id = signer_id
self.external_signer_id = external_signer_id
self.error = error
self.sign_success = sign_success
self.expires = APIHelper.RFC3339DateTime(expires) if expires else None
self.aborted = aborted
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
account_id = dictionary.get('accountId')
document_id = dictionary.get('documentId')
external_id = dictionary.get('externalId')
signer_id = dictionary.get('signerId')
external_signer_id = dictionary.get('externalSignerId')
error = idfy_rest_client.models.signature_error.SignatureError.from_dictionary(dictionary.get('error')) if dictionary.get('error') else None
sign_success = idfy_rest_client.models.sign_success.SignSuccess.from_dictionary(dictionary.get('signSuccess')) if dictionary.get('signSuccess') else None
expires = APIHelper.RFC3339DateTime.from_value(dictionary.get("expires")).datetime if dictionary.get("expires") else None
aborted = dictionary.get('aborted')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(account_id,
document_id,
external_id,
signer_id,
external_signer_id,
error,
sign_success,
expires,
aborted,
dictionary)
|
[
"idfy_rest_client.api_helper.APIHelper.RFC3339DateTime"
] |
[((2202, 2236), 'idfy_rest_client.api_helper.APIHelper.RFC3339DateTime', 'APIHelper.RFC3339DateTime', (['expires'], {}), '(expires)\n', (2227, 2236), False, 'from idfy_rest_client.api_helper import APIHelper\n')]
|
import os
from collections import namedtuple
from conans import Options
from conans.model.conan_file import ConanFile
from conans.model.options import PackageOptions
from conans.test.utils.tools import TestBufferConanOutput
class MockSettings(object):
def __init__(self, values):
self.values = values
def get_safe(self, value):
return self.values.get(value, None)
MockOptions = MockSettings
class MockDepsCppInfo(object):
def __init__(self):
self.include_paths = []
self.lib_paths = []
self.libs = []
self.defines = []
self.cflags = []
self.cxxflags = []
self.sharedlinkflags = []
self.exelinkflags = []
self.sysroot = ""
class MockConanfile(ConanFile):
def __init__(self, settings, options=None, runner=None):
self.deps_cpp_info = MockDepsCppInfo()
self.settings = settings
self.runner = runner
self.options = options or MockOptions({})
self.generators = []
self.output = TestBufferConanOutput()
self.should_configure = True
self.should_build = True
self.should_install = True
self.should_test = True
self.package_folder = None
def run(self, *args, **kwargs):
if self.runner:
kwargs["output"] = None
self.runner(*args, **kwargs)
class TestConanFile(object):
def __init__(self, name="Hello", version="0.1", settings=None, requires=None, options=None,
default_options=None, package_id=None, build_requires=None, info=None,
private_requires=None):
self.name = name
self.version = version
self.settings = settings
self.requires = requires
self.private_requires = private_requires
self.build_requires = build_requires
self.options = options
self.default_options = default_options
self.package_id = package_id
self.info = info
def __repr__(self):
base = """from conans import ConanFile
class {name}Conan(ConanFile):
name = "{name}"
version = "{version}"
""".format(name=self.name, version=self.version)
if self.settings:
base += " settings = %s\n" % self.settings
if self.requires or self.private_requires:
reqs_list = ['"%s"' % r for r in self.requires or []]
reqs_list.extend(["('%s', 'private')" % r for r in self.private_requires or []])
reqs_list.append("")
base += " requires = %s\n" % (", ".join(reqs_list))
if self.build_requires:
base += " build_requires = %s\n" % (", ".join('"%s"' % r
for r in self.build_requires))
if self.options:
base += " options = %s\n" % str(self.options)
if self.default_options:
if isinstance(self.default_options, str):
base += " default_options = '%s'\n" % str(self.default_options)
else:
base += " default_options = %s\n" % str(self.default_options)
if self.package_id:
base += " def package_id(self):\n %s\n" % self.package_id
if self.info:
base += """
def package_info(self):
self.cpp_info.libs = ["mylib{name}{version}lib"]
self.env_info.MYENV = ["myenv{name}{version}env"]
""".format(name=self.name, version=self.version)
return base
class ConanFileMock(ConanFile):
def __init__(self, shared=None, options=None, options_values=None):
options = options or ""
self.command = None
self.path = None
self.source_folder = self.build_folder = "."
self.settings = None
self.options = Options(PackageOptions.loads(options))
if options_values:
for var, value in options_values.items():
self.options._data[var] = value
self.deps_cpp_info = namedtuple("deps_cpp_info", "sysroot")("/path/to/sysroot")
self.output = TestBufferConanOutput()
self.in_local_cache = False
self.install_folder = "myinstallfolder"
if shared is not None:
self.options = namedtuple("options", "shared")(shared)
self.should_configure = True
self.should_build = True
self.should_install = True
self.should_test = True
self.generators = []
self.captured_env = {}
def run(self, command):
self.command = command
self.path = os.environ["PATH"]
self.captured_env = {key: value for key, value in os.environ.items()}
|
[
"conans.test.utils.tools.TestBufferConanOutput",
"os.environ.items",
"conans.model.options.PackageOptions.loads",
"collections.namedtuple"
] |
[((1038, 1061), 'conans.test.utils.tools.TestBufferConanOutput', 'TestBufferConanOutput', ([], {}), '()\n', (1059, 1061), False, 'from conans.test.utils.tools import TestBufferConanOutput\n'), ((4070, 4093), 'conans.test.utils.tools.TestBufferConanOutput', 'TestBufferConanOutput', ([], {}), '()\n', (4091, 4093), False, 'from conans.test.utils.tools import TestBufferConanOutput\n'), ((3800, 3829), 'conans.model.options.PackageOptions.loads', 'PackageOptions.loads', (['options'], {}), '(options)\n', (3820, 3829), False, 'from conans.model.options import PackageOptions\n'), ((3989, 4027), 'collections.namedtuple', 'namedtuple', (['"""deps_cpp_info"""', '"""sysroot"""'], {}), "('deps_cpp_info', 'sysroot')\n", (3999, 4027), False, 'from collections import namedtuple\n'), ((4236, 4267), 'collections.namedtuple', 'namedtuple', (['"""options"""', '"""shared"""'], {}), "('options', 'shared')\n", (4246, 4267), False, 'from collections import namedtuple\n'), ((4630, 4648), 'os.environ.items', 'os.environ.items', ([], {}), '()\n', (4646, 4648), False, 'import os\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
import os
import sys
import traceback
sys.path.append("..") # Since examples are buried one level into source tree
from asyncpushbullet import AsyncPushbullet
__author__ = "<NAME>"
__email__ = "<EMAIL>"
API_KEY = "" # YOUR API KEY
PROXY = os.environ.get("https_proxy") or os.environ.get("http_proxy")
def main():
# pb = AsyncPushbullet(API_KEY, proxy=PROXY)
msg = {"foo": "bar", 42: "23", "type": "synchronous_example", "a_list_of_none":[None]}
# pb.push_ephemeral(msg) # Synchronous IO
async def _run():
try:
async with AsyncPushbullet(API_KEY, proxy=PROXY) as pb:
msg["type"] = "asynchronous_example"
await pb.async_push_ephemeral(msg) # Asynchronous IO
# await pb.async_close()
except Exception as ex:
print("ERROR:", ex, file=sys.stderr, flush=True)
traceback.print_tb(sys.exc_info()[2])
loop = asyncio.get_event_loop()
loop.run_until_complete(_run())
if __name__ == "__main__":
if API_KEY == "":
with open("../api_key.txt") as f:
API_KEY = f.read().strip()
main()
|
[
"sys.path.append",
"asyncio.get_event_loop",
"asyncpushbullet.AsyncPushbullet",
"os.environ.get",
"sys.exc_info"
] |
[((101, 122), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (116, 122), False, 'import sys\n'), ((306, 335), 'os.environ.get', 'os.environ.get', (['"""https_proxy"""'], {}), "('https_proxy')\n", (320, 335), False, 'import os\n'), ((339, 367), 'os.environ.get', 'os.environ.get', (['"""http_proxy"""'], {}), "('http_proxy')\n", (353, 367), False, 'import os\n'), ((994, 1018), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1016, 1018), False, 'import asyncio\n'), ((629, 666), 'asyncpushbullet.AsyncPushbullet', 'AsyncPushbullet', (['API_KEY'], {'proxy': 'PROXY'}), '(API_KEY, proxy=PROXY)\n', (644, 666), False, 'from asyncpushbullet import AsyncPushbullet\n'), ((963, 977), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (975, 977), False, 'import sys\n')]
|
# Generated by Django 3.2.8 on 2021-11-20 20:03
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Inventory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_type', models.IntegerField(choices=[(1, 'Red'), (2, 'White'), (3, 'Yellow'), (4, 'Blue')])),
('stored', models.IntegerField(choices=[(0, 'Left'), (1, 'Middle'), (2, 'Right'), (3, 'Shipment')])),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sender', models.IntegerField(
choices=[(0, 'User'), (1, 'Cloud'), (11, '[Edge] Classification'), (12, '[Edge] Repository'),
(13, '[Edge] Shipment'), (21, '[Machine] Classification'), (22, '[Machine] Repository-1'),
(23, '[Machine] Repository-2'), (24, '[Machine] Repository-3'),
(25, '[Machine] Shipment')])),
('title', models.CharField(default='', max_length=50)),
('msg', models.TextField(blank=True, default='', null=True)),
('datetime', models.DateTimeField(default=datetime.datetime.now)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('made', models.DateTimeField(default=datetime.datetime.now)),
('completed', models.DateTimeField(blank=True, null=True)),
('item_type', models.IntegerField(choices=[(1, 'Red'), (2, 'White'), (3, 'Yellow'), (4, 'Blue')])),
('dest', models.IntegerField(choices=[(0, 'Left'), (1, 'Middle'), (2, 'Right'), (3, 'Shipment')])),
('status', models.IntegerField(
choices=[(1, 'Order Received'), (2, 'Repository Processing'), (3, 'Shipment Processing'),
(4, 'Order Completed')], default=1)),
],
),
migrations.CreateModel(
name='Sensory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sensorID', models.CharField(max_length=50)),
('value', models.FloatField()),
('datetime', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.BooleanField(default=False)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Verification',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('property_name', models.TextField(default='')),
('verification_result', models.BooleanField(default=True)),
('verified', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"django.db.models.TextField",
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((321, 417), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (340, 417), False, 'from django.db import migrations, models\n'), ((446, 533), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'Red'), (2, 'White'), (3, 'Yellow'), (4, 'Blue')]"}), "(choices=[(1, 'Red'), (2, 'White'), (3, 'Yellow'), (4,\n 'Blue')])\n", (465, 533), False, 'from django.db import migrations, models\n'), ((559, 651), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'Left'), (1, 'Middle'), (2, 'Right'), (3, 'Shipment')]"}), "(choices=[(0, 'Left'), (1, 'Middle'), (2, 'Right'), (3,\n 'Shipment')])\n", (578, 651), False, 'from django.db import migrations, models\n'), ((678, 713), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (698, 713), False, 'from django.db import migrations, models\n'), ((846, 942), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (865, 942), False, 'from django.db import migrations, models\n'), ((968, 1281), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'User'), (1, 'Cloud'), (11, '[Edge] Classification'), (12,\n '[Edge] Repository'), (13, '[Edge] Shipment'), (21,\n '[Machine] Classification'), (22, '[Machine] Repository-1'), (23,\n '[Machine] Repository-2'), (24, '[Machine] Repository-3'), (25,\n '[Machine] Shipment')]"}), "(choices=[(0, 'User'), (1, 'Cloud'), (11,\n '[Edge] Classification'), (12, '[Edge] Repository'), (13,\n '[Edge] Shipment'), (21, '[Machine] Classification'), (22,\n '[Machine] Repository-1'), (23, '[Machine] Repository-2'), (24,\n '[Machine] Repository-3'), (25, '[Machine] Shipment')])\n", (987, 1281), False, 'from django.db import migrations, models\n'), ((1402, 1445), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(50)'}), "(default='', max_length=50)\n", (1418, 1445), False, 'from django.db import migrations, models\n'), ((1472, 1523), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""', 'null': '(True)'}), "(blank=True, default='', null=True)\n", (1488, 1523), False, 'from django.db import migrations, models\n'), ((1555, 1606), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (1575, 1606), False, 'from django.db import migrations, models\n'), ((1737, 1833), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1756, 1833), False, 'from django.db import migrations, models\n'), ((1857, 1908), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (1877, 1908), False, 'from django.db import migrations, models\n'), ((1941, 1984), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1961, 1984), False, 'from django.db import migrations, models\n'), ((2017, 2104), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'Red'), (2, 'White'), (3, 'Yellow'), (4, 'Blue')]"}), "(choices=[(1, 'Red'), (2, 'White'), (3, 'Yellow'), (4,\n 'Blue')])\n", (2036, 2104), False, 'from django.db import migrations, models\n'), ((2128, 2220), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'Left'), (1, 'Middle'), (2, 'Right'), (3, 'Shipment')]"}), "(choices=[(0, 'Left'), (1, 'Middle'), (2, 'Right'), (3,\n 'Shipment')])\n", (2147, 2220), False, 'from django.db import migrations, models\n'), ((2246, 2399), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'Order Received'), (2, 'Repository Processing'), (3,\n 'Shipment Processing'), (4, 'Order Completed')]", 'default': '(1)'}), "(choices=[(1, 'Order Received'), (2,\n 'Repository Processing'), (3, 'Shipment Processing'), (4,\n 'Order Completed')], default=1)\n", (2265, 2399), False, 'from django.db import migrations, models\n'), ((2574, 2670), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2593, 2670), False, 'from django.db import migrations, models\n'), ((2698, 2729), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (2714, 2729), False, 'from django.db import migrations, models\n'), ((2758, 2777), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (2775, 2777), False, 'from django.db import migrations, models\n'), ((2809, 2831), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2829, 2831), False, 'from django.db import migrations, models\n'), ((2963, 3059), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2982, 3059), False, 'from django.db import migrations, models\n'), ((3085, 3119), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3104, 3119), False, 'from django.db import migrations, models\n'), ((3150, 3185), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3170, 3185), False, 'from django.db import migrations, models\n'), ((3323, 3419), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3342, 3419), False, 'from django.db import migrations, models\n'), ((3452, 3480), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (3468, 3480), False, 'from django.db import migrations, models\n'), ((3523, 3556), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3542, 3556), False, 'from django.db import migrations, models\n'), ((3588, 3623), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3608, 3623), False, 'from django.db import migrations, models\n')]
|
"""Unit tests for code in urllib.response."""
import socket
import tempfile
import urllib.response
import unittest
class TestResponse(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.fp = self.sock.makefile('rb')
self.test_headers = {"Host": "www.python.org",
"Connection": "close"}
def test_with(self):
addbase = urllib.response.addbase(self.fp)
self.assertIsInstance(addbase, tempfile._TemporaryFileWrapper)
def f():
with addbase as spam:
pass
self.assertFalse(self.fp.closed)
f()
self.assertTrue(self.fp.closed)
self.assertRaises(ValueError, f)
def test_addclosehook(self):
closehook_called = False
def closehook():
nonlocal closehook_called
closehook_called = True
closehook = urllib.response.addclosehook(self.fp, closehook)
closehook.close()
self.assertTrue(self.fp.closed)
self.assertTrue(closehook_called)
def test_addinfo(self):
info = urllib.response.addinfo(self.fp, self.test_headers)
self.assertEqual(info.info(), self.test_headers)
def test_addinfourl(self):
url = "http://www.python.org"
code = 200
infourl = urllib.response.addinfourl(self.fp, self.test_headers,
url, code)
self.assertEqual(infourl.info(), self.test_headers)
self.assertEqual(infourl.geturl(), url)
self.assertEqual(infourl.getcode(), code)
def tearDown(self):
self.sock.close()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"socket.socket"
] |
[((1770, 1785), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1783, 1785), False, 'import unittest\n'), ((208, 257), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (221, 257), False, 'import socket\n')]
|
from richkit.retrieve.cert_sh import DomainCertificates
from richkit.retrieve.x509 import X509
def get_logs(domain):
"""
Get a list of certificates with all the features
:param domain: Input domain
"""
try:
certs = DomainCertificates(domain)
return certs.get_all()
except Exception as e:
print(e)
def get_certificates(domain):
"""
Get just the list of certificates of the domain
:param domain: Input domain
"""
try:
certs = DomainCertificates(domain)
return certs.get_certificates_list()
except Exception as e:
print(e)
def get_certificates_features(cert_id):
"""
Get the certificate features by certificate ID
:param cert_id: crt.sh certificate ID
"""
try:
cert = X509(cert_id)
return cert.certificates_features
except Exception as e:
print(e)
|
[
"richkit.retrieve.cert_sh.DomainCertificates",
"richkit.retrieve.x509.X509"
] |
[((245, 271), 'richkit.retrieve.cert_sh.DomainCertificates', 'DomainCertificates', (['domain'], {}), '(domain)\n', (263, 271), False, 'from richkit.retrieve.cert_sh import DomainCertificates\n'), ((504, 530), 'richkit.retrieve.cert_sh.DomainCertificates', 'DomainCertificates', (['domain'], {}), '(domain)\n', (522, 530), False, 'from richkit.retrieve.cert_sh import DomainCertificates\n'), ((795, 808), 'richkit.retrieve.x509.X509', 'X509', (['cert_id'], {}), '(cert_id)\n', (799, 808), False, 'from richkit.retrieve.x509 import X509\n')]
|
from django.urls import path, reverse_lazy
from . import views
app_name = "home"
urlpatterns = [
path("", views.home_view, name="home_page"),
path("unauthorized", views.unauthorized_view, name="unauthorized_view"),
path("reset_password", views.reset_password, name="reset_password"),
]
|
[
"django.urls.path"
] |
[((102, 145), 'django.urls.path', 'path', (['""""""', 'views.home_view'], {'name': '"""home_page"""'}), "('', views.home_view, name='home_page')\n", (106, 145), False, 'from django.urls import path, reverse_lazy\n'), ((151, 222), 'django.urls.path', 'path', (['"""unauthorized"""', 'views.unauthorized_view'], {'name': '"""unauthorized_view"""'}), "('unauthorized', views.unauthorized_view, name='unauthorized_view')\n", (155, 222), False, 'from django.urls import path, reverse_lazy\n'), ((228, 295), 'django.urls.path', 'path', (['"""reset_password"""', 'views.reset_password'], {'name': '"""reset_password"""'}), "('reset_password', views.reset_password, name='reset_password')\n", (232, 295), False, 'from django.urls import path, reverse_lazy\n')]
|
import math
import subprocess
import sys
# Run with arg "a" to analyze, "r" to run.
# Run executes the algorithm (edit source code here for different configurations)
# Analyze will compile a summary of the outputs. Again, edit this file to extract different details.
assert len(sys.argv) == 2
do_summarize = sys.argv[1] == "a" # ANalyze
out_dir = "run_output"
def source_file(graph):
return "graphs/"+graph+".graph"
def in_partition_file(graph):
return "partitions/5/"+graph+".2.ptn"
def out_partition_file(graph):
return out_dir + "/" + graph+".ptn"
def output_file(graph):
return out_dir + "/" + graph+".out"
def num_nodes_in_graph_file(name):
fo = open(name)
ln = fo.readline()
fo.close()
firstword = ln.split()[0]
return int(firstword)
def num_edges_in_graph_file(name):
fo = open(name)
ln = fo.readline()
fo.close()
firstword = ln.split()[1]
return int(firstword)
def run_cut_matching(graph_file, out_partition_file, print_file, g_phi, h_phi, multi, rounds, timeout, in_partition_file):
#process = subprocess.run(["echo", graph_file, out_partition_file, str(g_phi), str(h_phi)], timeout=2)
try:
command = f"time -o {print_file}.time " + " ".join(["cmake-build-debug/a.out", "-f", graph_file, "-p", in_partition_file, "" if multi else "--ignore-multi",f"--timeout_m={timeout}" , "-r", f"{rounds}", "-s", f"--H_phi={h_phi}", f"--G_phi={g_phi}", "--vol", "0.1", "-o", out_partition_file, ">>", print_file])
f = open(print_file, "w")
f.write(command)
f.close()
process = subprocess.run(command, shell=True)
print("Completed")
print(process.returncode)
except subprocess.TimeoutExpired:
print("timeout")
name_colors = {
"uk": "150 150 0",
"add32": "0 150 150",
"bcsstk33": "150 0 150",
"whitaker3": "0 150 0",
"wing_nodal": "150 0 0",
"fe_4elt2": "0 0 150",
"vibrobox": "100 150 0",
"4elt": "150 150 100",
"fe_sphere": "150 100 0",
"brack2": "100 150 100",
"finan512": "150 100 100",
"fe_tooth": "100 150 150",
"144": "150 150 100",
"auto": "100 100 150"
}
def colorof(name):
if name in name_colors:
return name_colors[name]
return "0 0 0"
if do_summarize:
print("Graph_name\t"
f"vertices\t"
f"edges\t"
f"g_phi\t"
f"h_phi\t"
f"timed_out\t"
f"spent_time\t"
f"allowed_time\t"
f"read_as_multi\t"
f"CASE\t"
f"best_cut_conductance\t"
f"best_cut_expansion\t"
f"edges_crossing\t"
f"size1\t"
f"size2\t"
f"diff_total\t"
f"diff_div_nodes\t"
f"vol1\t"
f"vol2\t"
f"best_round\t"
f"last_round\t"
f"walshaw_conductance\t"
f"walshaw_imbalance\t"
f"colR\t"
f"colG\t"
f"colB\t"
f"h_cond\t"
f"h_cond_last\t"
f"walsh_cross\t"
)
def deciformat(n):
normal = f"{n:9.4}"
if 'e' in normal:
return f"{n:9.4f}"
return normal
def myform(n):
ret = deciformat(n).strip()
if n == 0:
return ret
if ret[0] == '0':
return ret[1:]
return ret
def get_round_conductance(lines, round_n):
hook = f"== End round {round_n}"
round_index = next(i for i, v in enumerate(lines) if (hook in v))
assert(0 <= round_index < len(lines)-4)
assert(hook in lines[round_index])
h_line = lines[round_index+2]
h_cond = float(h_line.split()[2][:-1])
return h_cond
def summarize(graph_name, g_phi, h_phi, multi, timeout, graph_file, print_file, time_file):
timed_out = False
runtime = -1
tf = open(time_file)
time_line = tf.readline()
if(time_line.find("Command") == 0):
timed_out = True
time_line = tf.readline()
tf.close()
user_index = time_line.find("user")
time_string = time_line[:user_index]
time_used = float(time_string)
line_int = "-"
cond = "-"
expansion = "-"
size1 = "-"
size2 = "-"
diff_abs = "-"
diff_fact = "-"
vol1 = "-"
vol2 = "-"
crossing_edges = "-"
best_round = "-"
last_round = "-"
walsh_cond = "-"
walsh_imb = "-"
h_cond = "-"
h_cond_last = "-"
walsh_cross = "-"
# TODO consider these...
if not timed_out:
# Example output
# CASE2 G Expansion target reached with a cut that is relatively balanced. Cut-matching game has found a balanced cut as good as you wanted it.
# Claimed g conductance: 0.010989
# R0 cond 0.010989
# The best with best expansion was found on round0
# final_Edge crossings (E) : 1
# final_cut size: (10 | 10)
# diff: 0 (factor 0 of total n vertices)
# final_cut volumes: (91 | 91)
# final_expansion: 0.1
# final_conductance: 0.010989
# CASE2 Goodenough balanced cut
case_line = ""
cond_line = ""
exp_line = ""
vol_line = ""
size_line = ""
diff_line = ""
cross_line = ""
bestround_line = ""
lastround_line = ""
walsh_cond_line = ""
walsh_imb_line = ""
walsh_cross_line = ""
with open(print_file) as pf:
lines = pf.read().splitlines()
walsh_cond_line = lines[-1]
walsh_imb_line = lines[-4]
walsh_cross_line = lines[-6]
lines = lines[:-8] # skip partition output
case_line = lines[-1]
cond_line = lines[-2]
exp_line = lines[-3]
vol_line = lines[-4]
diff_line = lines[-5]
size_line = lines[-6]
cross_line = lines[-7]
bestround_line = lines[-8]
lastround_line = lines[-9]
# Compute all the values from within the lines
line_int = case_line[4]
cond_word = cond_line.split()[1]
cond = float(cond_word)
expansion = float(exp_line.split()[1])
size1 = int(size_line.split()[3])
size2 = int(size_line.split()[5])
diff_abs = int(diff_line.split()[1])
diff_fact = float(diff_line.split()[3])
vol1 = int(vol_line.split()[3])
vol2 = int(vol_line.split()[5])
crossing_edges = int(cross_line.split()[4])
best_round = int(bestround_line.split()[-1][5:])
last_round = int(lastround_line.split()[0][1:])
walsh_cond = float(walsh_cond_line.split()[1])
walsh_imb = float(walsh_imb_line.split()[3])
# If we now have bestround, find h cond for that round
h_cond = get_round_conductance(lines, best_round)
h_cond_last = get_round_conductance(lines, last_round)
walsh_cross = int(walsh_cross_line.split()[4])
print(f"{graph_name}\t"
f"{num_nodes_in_graph_file(graph_file)}\t"
f"{num_edges_in_graph_file(graph_file)}\t"
f"{g_phi}\t"
f"{h_phi}\t"
f"{timed_out}\t"
f"{time_used}\t"
f"{timeout}\t"
f"{multi}\t"
f"{line_int}\t"
f"{myform(cond)}\t"
f"{expansion}\t"
f"{crossing_edges}\t"
f"{size1}\t"
f"{size2}\t"
f"{diff_abs}\t"
f"{myform(diff_fact)}\t"
f"{vol1}\t"
f"{vol2}\t"
f"{best_round}\t"
f"{last_round}\t"
f"{myform(walsh_cond)}\t"
f"{myform(walsh_imb)}\t"
f"{colorof(graph_name)}\t"
f"{myform(h_cond)}\t"
f"{myform(h_cond_last)}\t"
f"{walsh_cross}\t"
)
#pf = open(print_file)
def run_with(graph, g_phi, h_phi, multi, rounds, timeout):
graph_with_postfix = f"{graph}-h-{h_phi}-g-{g_phi}-t-{timeout}"
if do_summarize:
summarize(graph, g_phi, h_phi, multi, timeout, source_file(graph), output_file(graph_with_postfix), output_file(graph_with_postfix)+".time")
else:
print(f"Running on h_phi {h_phi} g_phi {g_phi} timeout {timeout}")
run_cut_matching(source_file(graph), out_partition_file(graph_with_postfix), output_file(graph_with_postfix), g_phi, h_phi, multi, rounds, timeout, in_partition_file(graph))
def default_analyze(graph, multi):
n_nodes = num_nodes_in_graph_file(source_file(graph))
rounds = 0
g_phi = 0.0
#g_phi = 1.0/math.log(math.log(n_nodes))
#g_phi = 1.0/math.log(n_nodes)**2
#g_phi = 1.0/math.sqrt(n_nodes)
#g_phi = 1.0/n_nodes
#for h_phi in [0.1, 0.55]:
for h_phi in [1]: # Unreachable
run_with(graph, g_phi, h_phi, multi, rounds, "120")
#default_analyze("barbell10-10", True)
#default_analyze("barbell100-100", True)
#default_analyze("barbell1000-1000", True)
#default_analyze("complete10", True)
#default_analyze("complete100", True)
#default_analyze("complete1000", True)
#default_analyze("expander4", True)
#default_analyze("expander16", True)
#default_analyze("expander64", True)
#default_analyze("expander256", True)
#default_analyze("expander1024", True)
#default_analyze("looploop8", True)
#default_analyze("multi8", True)
#default_analyze("144", False)
#default_analyze("4elt", False)
#default_analyze("add32", False)
#default_analyze("auto", False)
#default_analyze("bcsstk33", False)
#default_analyze("brack2", False)
#default_analyze("fe_4elt2", False)
#default_analyze("fe_sphere", False)
#efault_analyze("fe_tooth", False)
#default_analyze("finan512", False)
#default_analyze("uk", False)
#default_analyze("vibrobox", False)
#default_analyze("whitaker3", False)
#default_analyze("wing_nodal", False)
run_with("144", 0.0, 1, False, 0, "30")
run_with("4elt", 0.0, 1, False, 0, "60")
run_with("add32", 0.0, 1, False, 0, "30")
run_with("auto", 0.0, 1, False, 0, "60")
run_with("bcsstk33", 0.0, 1, False, 0, "60")
run_with("brack2", 0.0, 1, False, 0, "120")
run_with("fe_4elt2", 0.0, 1, False, 0, "60")
run_with("fe_sphere", 0.0, 1, False, 0, "120")
run_with("fe_tooth", 0.0, 1, False, 0, "120")
run_with("finan512", 0.0, 1, False, 0, "60")
run_with("uk", 0.0, 1, False, 0, "30")
run_with("vibrobox", 0.0, 1, False, 0, "30")
run_with("whitaker3", 0.0, 1, False, 0, "60")
run_with("wing_nodal", 0.0, 1, False, 0, "60")
|
[
"subprocess.run"
] |
[((1588, 1623), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1602, 1623), False, 'import subprocess\n')]
|
import argparse
import csv
from os import listdir
from os.path import isfile, join, basename
class Table:
name: str
tuples: list = []
attributes: list = []
def file_to_table(file_path: str) -> Table:
table: Table = Table()
table.name = basename(file_path).split(".")[0]
with open(file_path) as csv_file:
rows = [row for row in csv.reader(csv_file)]
table.attributes = rows[0]
table.tuples = rows[1:]
return table
def format_table(table: Table) -> str:
return f"{table.name} = {{\n" \
+ ", ".join(table.attributes) \
+ "\n\n" \
+ "\n".join(", ".join(map(format_tuple, tuple)) for tuple in table.tuples) \
+ "\n}\n"
def format_tuple(value: str) -> str:
if value.lower() == "null":
return "null"
elif value.isnumeric():
return value
else:
return f"'{value}'"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Converts CSV files to RelaX Dataset.')
parser.add_argument('-i', metavar='dir_path', required=True, dest='input', help='The directory with CSV files')
parser.add_argument('-o', metavar='out_file', required=True, dest='output', help='Path of the output file')
parser.add_argument('-g', metavar='name_of_group', dest='group', help='Name of the RelaX Dataset Group')
parser.add_argument('-d', metavar='desc', dest='desc', help='RelaX Dataset Group description')
args = parser.parse_args()
files_path = [join(args.input, file) for file in listdir(args.input) if isfile(join(args.input, file))]
tables = [file_to_table(file) for file in files_path]
formatted_rels = [format_table(table) for table in tables]
with open(args.output, 'w') as output_file:
output = "group: " \
+ (f"{args.group}" if args.group is not None else basename(args.output).split(".")[0]) \
+ "\n" \
+ (f"description: {args.desc}\n" if args.desc is not None else "") \
+ "\n" \
+ "\n".join(formatted_rels)
output_file.write(output)
|
[
"csv.reader",
"argparse.ArgumentParser",
"os.path.basename",
"os.path.join",
"os.listdir"
] |
[((942, 1017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Converts CSV files to RelaX Dataset."""'}), "(description='Converts CSV files to RelaX Dataset.')\n", (965, 1017), False, 'import argparse\n'), ((1505, 1527), 'os.path.join', 'join', (['args.input', 'file'], {}), '(args.input, file)\n', (1509, 1527), False, 'from os.path import isfile, join, basename\n'), ((1540, 1559), 'os.listdir', 'listdir', (['args.input'], {}), '(args.input)\n', (1547, 1559), False, 'from os import listdir\n'), ((260, 279), 'os.path.basename', 'basename', (['file_path'], {}), '(file_path)\n', (268, 279), False, 'from os.path import isfile, join, basename\n'), ((363, 383), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (373, 383), False, 'import csv\n'), ((1570, 1592), 'os.path.join', 'join', (['args.input', 'file'], {}), '(args.input, file)\n', (1574, 1592), False, 'from os.path import isfile, join, basename\n'), ((1861, 1882), 'os.path.basename', 'basename', (['args.output'], {}), '(args.output)\n', (1869, 1882), False, 'from os.path import isfile, join, basename\n')]
|
# -*- coding: utf-8 -*-
__all__ = [
'predict',
'redistribute',
'simulate',
'walk_probability'
]
###########
# IMPORTS #
###########
# Libraries
import numpy as _np
# Internal
from .custom_types import (
oint as _oint,
olist_int as _olist_int,
tarray as _tarray,
tlist_int as _tlist_int,
tmc as _tmc,
trand as _trand,
tredists as _tredists
)
#############
# FUNCTIONS #
#############
def predict(mc: _tmc, steps: int, initial_state: int) -> _olist_int:
current_state = initial_state
value = [initial_state]
for _ in range(steps):
d = mc.p[current_state, :]
d_max = _np.argwhere(d == _np.max(d))
if d_max.size > 1:
return None
current_state = d_max.item()
value.append(current_state)
return value
def redistribute(mc: _tmc, steps: int, initial_status: _tarray, output_last: bool) -> _tredists:
value = _np.zeros((steps + 1, mc.size), dtype=float)
value[0, :] = initial_status
for i in range(1, steps + 1):
value[i, :] = value[i - 1, :].dot(mc.p)
value[i, :] /= _np.sum(value[i, :])
if output_last:
return value[-1]
value = [_np.ravel(distribution) for distribution in _np.split(value, value.shape[0])]
return value
def simulate(mc: _tmc, steps: int, initial_state: int, final_state: _oint, rng: _trand) -> _tlist_int:
current_state = initial_state
value = [initial_state]
for _ in range(steps):
w = mc.p[current_state, :]
current_state = rng.choice(mc.size, size=1, p=w).item()
value.append(current_state)
if final_state is not None and current_state == final_state:
break
return value
def walk_probability(mc: _tmc, walk: _tlist_int) -> float:
p = 0.0
for (i, j) in zip(walk[:-1], walk[1:]):
if mc.p[i, j] > 0.0:
p += _np.log(mc.p[i, j])
else:
p = -_np.inf
break
value = _np.exp(p)
return value
|
[
"numpy.sum",
"numpy.log",
"numpy.ravel",
"numpy.zeros",
"numpy.split",
"numpy.max",
"numpy.exp"
] |
[((935, 979), 'numpy.zeros', '_np.zeros', (['(steps + 1, mc.size)'], {'dtype': 'float'}), '((steps + 1, mc.size), dtype=float)\n', (944, 979), True, 'import numpy as _np\n'), ((1990, 2000), 'numpy.exp', '_np.exp', (['p'], {}), '(p)\n', (1997, 2000), True, 'import numpy as _np\n'), ((1119, 1139), 'numpy.sum', '_np.sum', (['value[i, :]'], {}), '(value[i, :])\n', (1126, 1139), True, 'import numpy as _np\n'), ((1200, 1223), 'numpy.ravel', '_np.ravel', (['distribution'], {}), '(distribution)\n', (1209, 1223), True, 'import numpy as _np\n'), ((1244, 1276), 'numpy.split', '_np.split', (['value', 'value.shape[0]'], {}), '(value, value.shape[0])\n', (1253, 1276), True, 'import numpy as _np\n'), ((1900, 1919), 'numpy.log', '_np.log', (['mc.p[i, j]'], {}), '(mc.p[i, j])\n', (1907, 1919), True, 'import numpy as _np\n'), ((667, 677), 'numpy.max', '_np.max', (['d'], {}), '(d)\n', (674, 677), True, 'import numpy as _np\n')]
|
""" Test database backends """
from numpy.testing import *
from PyMC2 import Model, database
from PyMC2.examples import DisasterModel
class test_no_trace(NumpyTestCase):
def check(self):
M = Model(DisasterModel, db='no_trace')
M.sample(1000,500,2, verbose=False)
try:
assert_equal(M.e.trace().shape, (0,))
except AttributeError:
pass
class test_ram(NumpyTestCase):
def check(self):
M = Model(DisasterModel, db='ram')
M.sample(300,100,2, verbose=False)
assert_equal(M.e.trace().shape, (150,))
class test_txt(NumpyTestCase):
def check(self):
M = Model(DisasterModel, db='txt')
M.sample(300,100,2, verbose=False)
assert_equal(M.e.trace().shape, (150,))
class test_mysql(NumpyTestCase):
def check(self):
M = Model(DisasterModel, db='mysql')
M.sample(300,100,2, verbose=False)
class test_sqlite(NumpyTestCase):
def check(self):
M = Model(DisasterModel, db='sqlite')
M.sample(300,100,2, verbose=False)
class test_hdf5(NumpyTestCase):
def check(self):
M = Model(DisasterModel, db='hdf5')
M.sample(300,100,2, verbose=False)
class test_hdf5_tables(NumpyTestCase):
def check(self):
S = Sampler(DisasterModel, db='hdf5_tables')
S.sample(300,100,2, verbose=False)
assert_array_equal(S.e.trace().shape, (150,))
S.db.close()
if __name__ == '__main__':
NumpyTest().run()
|
[
"PyMC2.Model"
] |
[((206, 241), 'PyMC2.Model', 'Model', (['DisasterModel'], {'db': '"""no_trace"""'}), "(DisasterModel, db='no_trace')\n", (211, 241), False, 'from PyMC2 import Model, database\n'), ((470, 500), 'PyMC2.Model', 'Model', (['DisasterModel'], {'db': '"""ram"""'}), "(DisasterModel, db='ram')\n", (475, 500), False, 'from PyMC2 import Model, database\n'), ((661, 691), 'PyMC2.Model', 'Model', (['DisasterModel'], {'db': '"""txt"""'}), "(DisasterModel, db='txt')\n", (666, 691), False, 'from PyMC2 import Model, database\n'), ((850, 882), 'PyMC2.Model', 'Model', (['DisasterModel'], {'db': '"""mysql"""'}), "(DisasterModel, db='mysql')\n", (855, 882), False, 'from PyMC2 import Model, database\n'), ((998, 1031), 'PyMC2.Model', 'Model', (['DisasterModel'], {'db': '"""sqlite"""'}), "(DisasterModel, db='sqlite')\n", (1003, 1031), False, 'from PyMC2 import Model, database\n'), ((1145, 1176), 'PyMC2.Model', 'Model', (['DisasterModel'], {'db': '"""hdf5"""'}), "(DisasterModel, db='hdf5')\n", (1150, 1176), False, 'from PyMC2 import Model, database\n')]
|
# Generated by Django 3.0.8 on 2020-08-11 08:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth_access_admin', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='familymember',
name='address',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='auth_access_admin.Address'),
preserve_default=False,
),
migrations.AlterField(
model_name='address',
name='city_name',
field=models.CharField(max_length=100, verbose_name='Ville'),
),
migrations.AlterField(
model_name='address',
name='number',
field=models.PositiveIntegerField(blank=True, verbose_name='Numéro'),
),
migrations.AlterField(
model_name='address',
name='place_name',
field=models.CharField(max_length=100, verbose_name='nom de voie'),
),
migrations.AlterField(
model_name='address',
name='place_type',
field=models.CharField(max_length=20, verbose_name='Type de voie'),
),
migrations.AlterField(
model_name='address',
name='postal_code',
field=models.PositiveIntegerField(verbose_name='Code postal'),
),
migrations.AlterField(
model_name='address',
name='remarks',
field=models.CharField(max_length=200, verbose_name='Compléments'),
),
migrations.AlterField(
model_name='employee',
name='Is_manager',
field=models.BooleanField(verbose_name='Direction'),
),
migrations.AlterField(
model_name='employee',
name='diploma',
field=models.CharField(max_length=100, verbose_name='Plus haut diplôme obtenu'),
),
migrations.AlterField(
model_name='employee',
name='employee_contract',
field=models.ImageField(upload_to='e_contracts', verbose_name='Scanner du contrat de travail'),
),
migrations.AlterField(
model_name='employee',
name='employee_nr',
field=models.PositiveSmallIntegerField(primary_key=True, serialize=False, verbose_name="Numéro d'employé"),
),
migrations.AlterField(
model_name='employee',
name='occupation',
field=models.CharField(max_length=100, verbose_name='Métier'),
),
migrations.AlterField(
model_name='familymember',
name='IdScan',
field=models.ImageField(upload_to='ids', verbose_name="Pièce d'identité"),
),
migrations.AlterField(
model_name='familymember',
name='phone',
field=models.CharField(max_length=14, verbose_name="Téléphone d'urgence"),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.BooleanField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.ImageField"
] |
[((373, 482), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(0)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""auth_access_admin.Address"""'}), "(default=0, on_delete=django.db.models.deletion.CASCADE,\n to='auth_access_admin.Address')\n", (390, 482), False, 'from django.db import migrations, models\n'), ((640, 694), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Ville"""'}), "(max_length=100, verbose_name='Ville')\n", (656, 694), False, 'from django.db import migrations, models\n'), ((817, 879), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'verbose_name': '"""Numéro"""'}), "(blank=True, verbose_name='Numéro')\n", (844, 879), False, 'from django.db import migrations, models\n'), ((1006, 1066), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""nom de voie"""'}), "(max_length=100, verbose_name='nom de voie')\n", (1022, 1066), False, 'from django.db import migrations, models\n'), ((1193, 1253), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'verbose_name': '"""Type de voie"""'}), "(max_length=20, verbose_name='Type de voie')\n", (1209, 1253), False, 'from django.db import migrations, models\n'), ((1381, 1436), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""Code postal"""'}), "(verbose_name='Code postal')\n", (1408, 1436), False, 'from django.db import migrations, models\n'), ((1560, 1620), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Compléments"""'}), "(max_length=200, verbose_name='Compléments')\n", (1576, 1620), False, 'from django.db import migrations, models\n'), ((1748, 1793), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'verbose_name': '"""Direction"""'}), "(verbose_name='Direction')\n", (1767, 1793), False, 'from django.db import migrations, models\n'), ((1918, 1991), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Plus haut diplôme obtenu"""'}), "(max_length=100, verbose_name='Plus haut diplôme obtenu')\n", (1934, 1991), False, 'from django.db import migrations, models\n'), ((2126, 2219), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""e_contracts"""', 'verbose_name': '"""Scanner du contrat de travail"""'}), "(upload_to='e_contracts', verbose_name=\n 'Scanner du contrat de travail')\n", (2143, 2219), False, 'from django.db import migrations, models\n'), ((2343, 2447), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""Numéro d\'employé"""'}), '(primary_key=True, serialize=False,\n verbose_name="Numéro d\'employé")\n', (2375, 2447), False, 'from django.db import migrations, models\n'), ((2571, 2626), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Métier"""'}), "(max_length=100, verbose_name='Métier')\n", (2587, 2626), False, 'from django.db import migrations, models\n'), ((2754, 2821), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""ids"""', 'verbose_name': '"""Pièce d\'identité"""'}), '(upload_to=\'ids\', verbose_name="Pièce d\'identité")\n', (2771, 2821), False, 'from django.db import migrations, models\n'), ((2948, 3015), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(14)', 'verbose_name': '"""Téléphone d\'urgence"""'}), '(max_length=14, verbose_name="Téléphone d\'urgence")\n', (2964, 3015), False, 'from django.db import migrations, models\n')]
|
# This example demonstrates prototype of the update model that Ryan, Alex, and Chip discussed.
################################################################################################################################
# Everything here would be part of a DH library
################################################################################################################################
import numpy as np
from deephaven import QueryScope
#TODO: this should be java
class IndexSet:
def __init__(self, max_size):
self.current = -1
self.idx = np.zeros(max_size, dtype=np.int64)
def clear(self):
self.current = -2
self.idx = None
def add(self, kk):
if self.current == self.idx.size:
raise Exception("Adding more indices than can fit")
self.current += 1
self.idx[self.current] = kk
def __len__(self):
return self.current + 1
def __getitem__(self, i):
if i >= len(self):
raise Exception("Index out of bounds")
return self.idx[i]
#TODO: this should be java
class Gatherer:
def __init__(self, batch_size):
self.batch_size = batch_size
self.current = None
def clear(self):
self.current = None
def gather(self, kk):
if self.current == None or self.current.size() >= self.batch_size:
self.current = IndexSet(self.batch_size)
self.current.add(kk)
return self.current
#TODO: this should probably be java
class Future:
def __init__(self, func, index_set):
self.func = func
self.index_set = index_set
self.called = False
self.result = None
def clear(self):
self.func = None
self.index_set = None
self.result = None
def get(self):
if not self.called:
# data should be gathered here and then passed to model instead of doing it all in one go. That means were
# going to need to get the input objects here somehow, and I think that complicates things quite a bit.
# self.func gets passed an index set, but I think it should get passed the gathered data.
# otherwise, how does the interface not change?
self.result = self.func(self.index_set)
self.index_set.clear()
self.called = True
return self.result
#TODO: this should probably be java
class Computer:
def __init__(self, func):
self.func = func
self.futures = {}
def compute(self, index_set):
if index_set in self.futures:
return self.futures[index_set]
f = Future(self.func, index_set)
self.futures[gathered] = f
return f
#TODO: this should be java
class Scatterer:
def __init__(self, batch_size, scatter_func):
self.batch_size = batch_size
self.count = -1
self.scatter_func = scatter_func
def clear(self):
self.count = -1
def scatter(self, data):
self.count += 1
offset = self.count % self.batch_size
return self.scatter_func(data, offset)
def do_magic(table, model, scatter_func, batch_size):
#TODO: horrible hack
def gather_it(index_set):
print("Calling gather_it")
data = np.zeros([len(index_set), 3], dtype=np.float64)
for i in range(len(index_set)):
data[i,0] = table.getColumnSource("A", index_set[i])
data[i,1] = table.getColumnSource("B", index_set[i])
data[i,2] = table.getColumnSource("C", index_set[i])
return data
#TODO: horrible hack
def eval_func(index_set):
print("Calling eval_func")
data = gather_it(index_set)
return model(data)
gatherer = Gatherer(batch_size)
computer = Computer(eval_func)
scatterer_x = Scatterer(batch_size, scatter_func)
#TODO: python is having major problems. It doesn't resolve these variables inside of a function, and when you try to add them, it complains they aren't java
#TODO: may need to implement this function in Java as well to avoid some problems. Right now, it doesn't run.
QueryScope.addParam("gatherer", gatherer)
QueryScope.addParam("computer", computer)
QueryScope.addParam("scatterer_x", scatterer_x)
def cleanup(future):
gatherer.clear()
computer.clear()
future.clear()
scatterer_x.clear()
return table.update("IndexSet = gatherer.gather(kk)", "Future = computer.compute(IndexSet)", "X = (double) scatterer_x.scatter(Future.get())", "Clean = cleanup(Future)") \
.dropColumns("IndexSet", "Future", "Clean")
################################################################################################################################
# Everything here would be part of user code
################################################################################################################################
def model(data):
return np.sum(data, axis=1)
def scatter(data,i):
return data[i]
from deephaven.TableTools import timeTable
source = timeTable("00:00:01").update("A=i", "B=sqrt(i)", "C=i*i")
batch_size = 10
result = do_magic(source, model, scatter, batch_size)
|
[
"numpy.zeros",
"deephaven.TableTools.timeTable",
"numpy.sum",
"deephaven.QueryScope.addParam"
] |
[((4181, 4222), 'deephaven.QueryScope.addParam', 'QueryScope.addParam', (['"""gatherer"""', 'gatherer'], {}), "('gatherer', gatherer)\n", (4200, 4222), False, 'from deephaven import QueryScope\n'), ((4227, 4268), 'deephaven.QueryScope.addParam', 'QueryScope.addParam', (['"""computer"""', 'computer'], {}), "('computer', computer)\n", (4246, 4268), False, 'from deephaven import QueryScope\n'), ((4273, 4320), 'deephaven.QueryScope.addParam', 'QueryScope.addParam', (['"""scatterer_x"""', 'scatterer_x'], {}), "('scatterer_x', scatterer_x)\n", (4292, 4320), False, 'from deephaven import QueryScope\n'), ((5012, 5032), 'numpy.sum', 'np.sum', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (5018, 5032), True, 'import numpy as np\n'), ((579, 613), 'numpy.zeros', 'np.zeros', (['max_size'], {'dtype': 'np.int64'}), '(max_size, dtype=np.int64)\n', (587, 613), True, 'import numpy as np\n'), ((5128, 5149), 'deephaven.TableTools.timeTable', 'timeTable', (['"""00:00:01"""'], {}), "('00:00:01')\n", (5137, 5149), False, 'from deephaven.TableTools import timeTable\n')]
|
import pandas as pd # type: ignore
import json
from datetime import datetime
from json import JSONEncoder
from typing import Any, Dict, List, Union, TYPE_CHECKING
from aat.config import Side
from aat.core import Order, Trade, Instrument, ExchangeType, Position
if TYPE_CHECKING:
from aat.strategy import Strategy
class _Serializer(JSONEncoder):
def default(self, obj: Any) -> Union[dict, float]:
if isinstance(obj, (Trade, Instrument, Position, ExchangeType)):
return obj.json()
elif isinstance(obj, datetime):
return obj.timestamp()
else:
raise Exception("Unknown type: {} - {}".format(type(obj), obj))
class Portfolio(object):
"""The portfolio object keeps track of a collection of positions attributed
to a collection of strategies"""
def __init__(self) -> None:
# Track prices over time
self._prices: Dict = {}
# Track trades
self._trades: Dict[Instrument, List[Trade]] = {}
# List of all running strategies
self._strategies: List[str] = []
# Cash on hand
self._cash: List[Position] = []
# Track active positions by instrument
self._active_positions_by_instrument: Dict[Instrument, List[Position]] = {}
# Track active positions by strategy and instrument
self._active_positions_by_strategy: Dict[str, Dict[Instrument, Position]] = {}
# *****************
# Manager Methods #
# *****************
def updateStrategies(self, strategies: List) -> None:
"""update with list of strategies"""
self._strategies.extend([s.name() for s in strategies])
for strategy in self._strategies:
self._active_positions_by_strategy[strategy] = {}
def updateAccount(self, positions: List[Position]) -> None:
"""update positions tracking with a position from the exchange"""
options = {i: s for i, s in enumerate(self._strategies)}
if positions:
print("Attribute positions:")
for position in positions:
print("Position:\n{}".format(position))
try:
choice = int(
input("Select a strategy to attribute to:\n{}\n...".format(options))
)
except KeyboardInterrupt:
raise
except BaseException:
print("Ignoring position...")
continue
if choice not in options:
print("Ignoring position...")
continue
else:
print("Attributing to strategy: {}".format(options[choice]))
self._active_positions_by_instrument[position.instrument] = [position]
if (
position.instrument
not in self._active_positions_by_strategy[options[choice]]
):
self._active_positions_by_strategy[options[choice]] = {}
self._active_positions_by_strategy[options[choice]][
position.instrument
] = position
def updateCash(self, positions: List[Position]) -> None:
"""update cash positions from exchange"""
self._cash.extend(positions)
def newPosition(self, trade: Trade, strategy: "Strategy") -> None:
my_order: Order = trade.my_order
if (
trade.instrument in self._active_positions_by_instrument
and strategy.name() in self._active_positions_by_strategy
and trade.instrument in self._active_positions_by_strategy[strategy.name()]
):
# update position
cur_pos = self._active_positions_by_strategy[strategy.name()][
trade.instrument
]
cur_pos.trades.append(trade)
# TODO update notional/size/price etc
prev_size: float = cur_pos.size
prev_price: float = cur_pos.price
prev_notional: float = prev_size * prev_price
cur_pos.size = (
cur_pos.size # type: ignore # TODO why is this flagging
+ (
my_order.volume
if my_order.side == Side.BUY
else -1 * my_order.volume
),
trade.timestamp,
)
if (prev_size >= 0 and cur_pos.size > prev_size) or (
prev_size <= 0 and cur_pos.size < prev_size
): # type: ignore
# increasing position size
# update average price
cur_pos.price = (
(prev_notional + (my_order.volume * trade.price)) / cur_pos.size, # type: ignore # TODO why is this flagging
trade.timestamp,
)
elif (prev_size > 0 and cur_pos.size < 0) or (
prev_size < 0 and cur_pos.size > 0
): # type: ignore
# decreasing position size in one direction, increasing position size in other
# update realized pnl
pnl = prev_size * (trade.price - prev_price)
cur_pos.pnl = (
cur_pos.pnl + pnl, # type: ignore # TODO why is this flagging
trade.timestamp,
) # update realized pnl with closing position
# deduct from unrealized pnl
cur_pos.unrealizedPnl = (cur_pos.unrealizedPnl - pnl, trade.timestamp) # type: ignore # TODO why is this flagging
# update average price
cur_pos.price = (trade.price, trade.timestamp) # type: ignore # TODO why is this flagging
else:
# decreasing position size
# update realized pnl
pnl = prev_size * (trade.price - prev_price)
cur_pos.pnl = (
cur_pos.pnl + pnl, # type: ignore # TODO why is this flagging
trade.timestamp,
) # update realized pnl with closing position
# deduct from unrealized pnl
cur_pos.unrealizedPnl = (cur_pos.unrealizedPnl - pnl, trade.timestamp) # type: ignore # TODO why is this flagging
# TODO close if side is 0?
else:
# If strategy has no positions yet, make a new dict
if strategy.name() not in self._active_positions_by_strategy:
self._active_positions_by_strategy[strategy.name()] = {}
# if not tracking instrument yet, add
if trade.instrument not in self._active_positions_by_instrument:
self._active_positions_by_instrument[trade.instrument] = []
# Map position in by strategy
self._active_positions_by_strategy[strategy.name()][
trade.instrument
] = Position(
price=trade.price,
size=trade.volume,
timestamp=trade.timestamp,
instrument=trade.instrument,
exchange=trade.exchange,
trades=[trade],
)
# map a single position by instrument
self._active_positions_by_instrument[trade.instrument].append(
self._active_positions_by_strategy[strategy.name()][trade.instrument]
)
def onTrade(self, trade: Trade) -> None:
if trade.instrument in self._active_positions_by_instrument:
for pos in self._active_positions_by_instrument[trade.instrument]:
pos.unrealizedPnl = (
pos.size * (trade.price - pos.price), # type: ignore # TODO why is this flagging
trade.timestamp,
)
pos.pnl = (pos.pnl, trade.timestamp) # type: ignore # TODO why is this flagging
pos.instrumentPrice = (trade.price, trade.timestamp) # type: ignore # TODO why is this flagging
if trade.instrument not in self._prices:
self._prices[trade.instrument] = [(trade.price, trade.timestamp)]
self._trades[trade.instrument] = [trade]
else:
self._prices[trade.instrument].append((trade.price, trade.timestamp))
self._trades[trade.instrument].append(trade)
def onTraded(self, trade: Trade, strategy: "Strategy") -> None:
self.newPosition(trade, strategy)
# ******************
# Strategy Methods #
# ******************
def positions(
self,
strategy: "Strategy",
instrument: Instrument = None,
exchange: ExchangeType = None,
) -> List[Position]:
ret = {}
for position in self._active_positions_by_strategy.get(
strategy.name(), {}
).values():
if instrument and position.instrument != instrument:
# Skip if not asking for this instrument
continue
if exchange and position.exchange != exchange:
# Skip if not asking for this exchange
continue
ret[position.instrument] = position
return list(ret.values())
def allPositions(
self, instrument: Instrument = None, exchange: ExchangeType = None
) -> List[Position]:
ret = {}
for position_list in self._active_positions_by_instrument.values():
for position in position_list:
if instrument and position.instrument != instrument:
# Skip if not asking for this instrument
continue
if exchange and position.exchange != exchange:
# Skip if not asking for this exchange
continue
if position.instrument not in ret:
ret[position.instrument] = position
else:
ret[position.instrument] += position
return list(ret.values())
def priceHistory(self, instrument: Instrument = None) -> Union[pd.DataFrame, dict]:
if instrument:
return pd.DataFrame(
self._prices[instrument], columns=[instrument.name, "when"]
)
return {
i: pd.DataFrame(self._prices[i], columns=[i.name, "when"])
for i in self._prices
}
def _constructDf(
self, dfs: List[pd.DataFrame], drop_duplicates: bool = True
) -> pd.DataFrame:
# join along time axis
if dfs:
df = pd.concat(dfs, sort=True)
df.sort_index(inplace=True)
df = df.groupby(df.index).last()
if drop_duplicates:
df.drop_duplicates(inplace=True)
df.fillna(method="ffill", inplace=True)
else:
df = pd.DataFrame()
return df
def getPnl(self, strategy: "Strategy") -> pd.DataFrame:
portfolio = []
pnl_cols = []
total_pnl_cols = []
for position in self.positions(strategy):
instrument = position.instrument
#######
# Pnl #
#######
total_pnl_col = "pnl:{}".format(instrument.name)
unrealized_pnl_col = "ur:{}".format(instrument.name)
pnl_cols.append(unrealized_pnl_col)
unrealized_pnl_history = pd.DataFrame(
position.unrealizedPnlHistory, columns=[unrealized_pnl_col, "when"]
)
unrealized_pnl_history.set_index("when", inplace=True)
realized_pnl_col = "r:{}".format(instrument.name)
pnl_cols.append(realized_pnl_col)
realized_pnl_history = pd.DataFrame(
position.pnlHistory, columns=[realized_pnl_col, "when"]
)
realized_pnl_history.set_index("when", inplace=True)
unrealized_pnl_history[realized_pnl_col] = realized_pnl_history[
realized_pnl_col
]
unrealized_pnl_history[total_pnl_col] = unrealized_pnl_history.sum(axis=1)
total_pnl_cols.append(total_pnl_col)
portfolio.append(unrealized_pnl_history)
df_pnl = self._constructDf(
portfolio, drop_duplicates=False
) # dont drop duplicates
################
# calculations #
################
# calculate total pnl
df_pnl["alpha"] = df_pnl[
[c for c in df_pnl.columns if c.startswith("pnl:")]
].sum(axis=1)
return df_pnl
def getPnlAll(self) -> pd.DataFrame:
portfolio = []
pnl_cols = []
total_pnl_cols = []
for position in self.allPositions():
instrument = position.instrument
#######
# Pnl #
#######
total_pnl_col = "pnl:{}".format(instrument.name)
unrealized_pnl_col = "ur:{}".format(instrument.name)
pnl_cols.append(unrealized_pnl_col)
unrealized_pnl_history = pd.DataFrame(
position.unrealizedPnlHistory, columns=[unrealized_pnl_col, "when"]
)
unrealized_pnl_history.set_index("when", inplace=True)
realized_pnl_col = "r:{}".format(instrument.name)
pnl_cols.append(realized_pnl_col)
realized_pnl_history = pd.DataFrame(
position.pnlHistory, columns=[realized_pnl_col, "when"]
)
realized_pnl_history.set_index("when", inplace=True)
unrealized_pnl_history[realized_pnl_col] = realized_pnl_history[
realized_pnl_col
]
unrealized_pnl_history[total_pnl_col] = unrealized_pnl_history.sum(axis=1)
total_pnl_cols.append(total_pnl_col)
portfolio.append(unrealized_pnl_history)
df_pnl = self._constructDf(
portfolio, drop_duplicates=False
) # dont drop duplicates
################
# calculations #
################
# calculate total pnl
df_pnl["alpha"] = df_pnl[
[c for c in df_pnl.columns if c.startswith("pnl:")]
].sum(axis=1)
return df_pnl
def getInstruments(self, strategy: "Strategy") -> None:
raise NotImplementedError()
def getPrice(self) -> pd.DataFrame:
portfolio = []
price_cols = []
for instrument, price_history in self.priceHistory().items():
#########
# Price #
#########
price_col = instrument.name
price_cols.append(price_col)
price_history.set_index("when", inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)
def getAssetPrice(self, strategy: "Strategy") -> pd.DataFrame:
portfolio = []
price_cols = []
for position in self.allPositions():
instrument = position.instrument
#########
# Price #
#########
price_col = instrument.name
price_cols.append(price_col)
price_history = pd.DataFrame(
position.instrumentPriceHistory, columns=[price_col, "when"]
)
price_history.set_index("when", inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)
def getSize(self, strategy: "Strategy") -> pd.DataFrame:
portfolio = []
size_cols = []
for position in self.positions(strategy):
instrument = position.instrument
#################
# Position Size #
#################
size_col = "s:{}".format(instrument.name)
size_cols.append(size_col)
size_history = pd.DataFrame(
position.sizeHistory, columns=[size_col, "when"]
)
size_history.set_index("when", inplace=True)
portfolio.append(size_history)
price_col = instrument.name
price_history = pd.DataFrame(
position.instrumentPriceHistory, columns=[price_col, "when"]
)
price_history.set_index("when", inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)[size_cols]
def getSizeAll(self) -> pd.DataFrame:
portfolio = []
size_cols = []
for position in self.allPositions():
instrument = position.instrument
#################
# Position Size #
#################
size_col = "s:{}".format(instrument.name)
size_cols.append(size_col)
size_history = pd.DataFrame(
position.sizeHistory, columns=[size_col, "when"]
)
size_history.set_index("when", inplace=True)
portfolio.append(size_history)
price_col = instrument.name
price_history = pd.DataFrame(
position.instrumentPriceHistory, columns=[price_col, "when"]
)
price_history.set_index("when", inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)[size_cols]
def getNotional(self, strategy: "Strategy") -> pd.DataFrame:
portfolio = []
notional_cols = []
for position in self.positions(strategy):
instrument = position.instrument
#################
# Position Size #
#################
notional_col = "n:{}".format(instrument.name)
notional_cols.append(notional_col)
notional_history = pd.DataFrame(
position.notionalHistory, columns=[notional_col, "when"]
)
notional_history.set_index("when", inplace=True)
portfolio.append(notional_history)
price_col = instrument.name
price_history = pd.DataFrame(
position.instrumentPriceHistory, columns=[price_col, "when"]
)
price_history.set_index("when", inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)[notional_cols]
def getNotionalAll(
self,
) -> pd.DataFrame:
portfolio = []
notional_cols = []
for position in self.allPositions():
instrument = position.instrument
#################
# Position Size #
#################
notional_col = "n:{}".format(instrument.name)
notional_cols.append(notional_col)
notional_history = pd.DataFrame(
position.notionalHistory, columns=[notional_col, "when"]
)
notional_history.set_index("when", inplace=True)
portfolio.append(notional_history)
price_col = instrument.name
price_history = pd.DataFrame(
position.instrumentPriceHistory, columns=[price_col, "when"]
)
price_history.set_index("when", inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)[notional_cols]
def getInvestment(self, strategy: "Strategy") -> pd.DataFrame:
portfolio = []
investment_cols = []
for position in self.positions(strategy):
instrument = position.instrument
#################
# Position Size #
#################
investment_col = "i:{}".format(instrument.name)
investment_cols.append(investment_col)
investment_history = pd.DataFrame(
position.investmentHistory, columns=[investment_col, "when"]
)
investment_history.set_index("when", inplace=True)
portfolio.append(investment_history)
price_col = instrument.name
price_history = pd.DataFrame(
position.instrumentPriceHistory, columns=[price_col, "when"]
)
price_history.set_index("when", inplace=True)
portfolio.append(price_history)
return self._constructDf(portfolio)[investment_cols]
def save(self, filename_prefix: str) -> None:
with open("{}.prices.json".format(filename_prefix), "w") as fp:
json.dump(
{json.dumps(k.json()): v for k, v in self._prices.items()},
fp,
cls=_Serializer,
)
with open("{}.trades.json".format(filename_prefix), "w") as fp:
json.dump(
{json.dumps(k.json()): v for k, v in self._trades.items()},
fp,
cls=_Serializer,
)
with open("{}.active_by_inst.json".format(filename_prefix), "w") as fp:
json.dump(
{
json.dumps(k.json()): v
for k, v in self._active_positions_by_instrument.items()
},
fp,
cls=_Serializer,
)
with open("{}.active_by_strat.json".format(filename_prefix), "w") as fp:
json.dump(
{
k: {json.dumps(kk.json()): vv for kk, vv in v.items()}
for k, v in self._active_positions_by_strategy.items()
},
fp,
cls=_Serializer,
)
def restore(self, filename_prefix: str) -> None:
with open("{}.prices.json".format(filename_prefix), "r") as fp:
jsn = json.load(fp)
self._prices = {
Instrument.fromJson(json.loads(k)): [
(p1, datetime.fromtimestamp(p2)) for p1, p2 in v
]
for k, v in jsn.items()
}
with open("{}.trades.json".format(filename_prefix), "r") as fp:
jsn = json.load(fp)
self._trades = {
Instrument.fromJson(json.loads(k)): [Trade.fromJson(x) for x in v]
for k, v in jsn.items()
}
with open("{}.active_by_inst.json".format(filename_prefix), "r") as fp:
jsn = json.load(fp)
self._active_positions_by_instrument = {
Instrument.fromJson(json.loads(k)): [Position.fromJson(vv) for vv in v]
for k, v in jsn.items()
}
with open("{}.active_by_strat.json".format(filename_prefix), "r") as fp:
jsn = json.load(fp)
self._active_positions_by_strategy = {
k: {
Instrument.fromJson(json.loads(kk)): Position.fromJson(vv)
for kk, vv in v.items()
}
for k, v in jsn.items()
}
|
[
"pandas.DataFrame",
"json.load",
"json.loads",
"aat.core.Position",
"aat.core.Position.fromJson",
"datetime.datetime.fromtimestamp",
"aat.core.Trade.fromJson",
"pandas.concat"
] |
[((6856, 7003), 'aat.core.Position', 'Position', ([], {'price': 'trade.price', 'size': 'trade.volume', 'timestamp': 'trade.timestamp', 'instrument': 'trade.instrument', 'exchange': 'trade.exchange', 'trades': '[trade]'}), '(price=trade.price, size=trade.volume, timestamp=trade.timestamp,\n instrument=trade.instrument, exchange=trade.exchange, trades=[trade])\n', (6864, 7003), False, 'from aat.core import Order, Trade, Instrument, ExchangeType, Position\n'), ((10051, 10124), 'pandas.DataFrame', 'pd.DataFrame', (['self._prices[instrument]'], {'columns': "[instrument.name, 'when']"}), "(self._prices[instrument], columns=[instrument.name, 'when'])\n", (10063, 10124), True, 'import pandas as pd\n'), ((10187, 10242), 'pandas.DataFrame', 'pd.DataFrame', (['self._prices[i]'], {'columns': "[i.name, 'when']"}), "(self._prices[i], columns=[i.name, 'when'])\n", (10199, 10242), True, 'import pandas as pd\n'), ((10465, 10490), 'pandas.concat', 'pd.concat', (['dfs'], {'sort': '(True)'}), '(dfs, sort=True)\n', (10474, 10490), True, 'import pandas as pd\n'), ((10742, 10756), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10754, 10756), True, 'import pandas as pd\n'), ((11276, 11361), 'pandas.DataFrame', 'pd.DataFrame', (['position.unrealizedPnlHistory'], {'columns': "[unrealized_pnl_col, 'when']"}), "(position.unrealizedPnlHistory, columns=[unrealized_pnl_col,\n 'when'])\n", (11288, 11361), True, 'import pandas as pd\n'), ((11599, 11668), 'pandas.DataFrame', 'pd.DataFrame', (['position.pnlHistory'], {'columns': "[realized_pnl_col, 'when']"}), "(position.pnlHistory, columns=[realized_pnl_col, 'when'])\n", (11611, 11668), True, 'import pandas as pd\n'), ((12919, 13004), 'pandas.DataFrame', 'pd.DataFrame', (['position.unrealizedPnlHistory'], {'columns': "[unrealized_pnl_col, 'when']"}), "(position.unrealizedPnlHistory, columns=[unrealized_pnl_col,\n 'when'])\n", (12931, 13004), True, 'import pandas as pd\n'), ((13242, 13311), 'pandas.DataFrame', 'pd.DataFrame', (['position.pnlHistory'], {'columns': "[realized_pnl_col, 'when']"}), "(position.pnlHistory, columns=[realized_pnl_col, 'when'])\n", (13254, 13311), True, 'import pandas as pd\n'), ((15014, 15088), 'pandas.DataFrame', 'pd.DataFrame', (['position.instrumentPriceHistory'], {'columns': "[price_col, 'when']"}), "(position.instrumentPriceHistory, columns=[price_col, 'when'])\n", (15026, 15088), True, 'import pandas as pd\n'), ((15679, 15741), 'pandas.DataFrame', 'pd.DataFrame', (['position.sizeHistory'], {'columns': "[size_col, 'when']"}), "(position.sizeHistory, columns=[size_col, 'when'])\n", (15691, 15741), True, 'import pandas as pd\n'), ((15941, 16015), 'pandas.DataFrame', 'pd.DataFrame', (['position.instrumentPriceHistory'], {'columns': "[price_col, 'when']"}), "(position.instrumentPriceHistory, columns=[price_col, 'when'])\n", (15953, 16015), True, 'import pandas as pd\n'), ((16594, 16656), 'pandas.DataFrame', 'pd.DataFrame', (['position.sizeHistory'], {'columns': "[size_col, 'when']"}), "(position.sizeHistory, columns=[size_col, 'when'])\n", (16606, 16656), True, 'import pandas as pd\n'), ((16856, 16930), 'pandas.DataFrame', 'pd.DataFrame', (['position.instrumentPriceHistory'], {'columns': "[price_col, 'when']"}), "(position.instrumentPriceHistory, columns=[price_col, 'when'])\n", (16868, 16930), True, 'import pandas as pd\n'), ((17557, 17627), 'pandas.DataFrame', 'pd.DataFrame', (['position.notionalHistory'], {'columns': "[notional_col, 'when']"}), "(position.notionalHistory, columns=[notional_col, 'when'])\n", (17569, 17627), True, 'import pandas as pd\n'), ((17835, 17909), 'pandas.DataFrame', 'pd.DataFrame', (['position.instrumentPriceHistory'], {'columns': "[price_col, 'when']"}), "(position.instrumentPriceHistory, columns=[price_col, 'when'])\n", (17847, 17909), True, 'import pandas as pd\n'), ((18531, 18601), 'pandas.DataFrame', 'pd.DataFrame', (['position.notionalHistory'], {'columns': "[notional_col, 'when']"}), "(position.notionalHistory, columns=[notional_col, 'when'])\n", (18543, 18601), True, 'import pandas as pd\n'), ((18809, 18883), 'pandas.DataFrame', 'pd.DataFrame', (['position.instrumentPriceHistory'], {'columns': "[price_col, 'when']"}), "(position.instrumentPriceHistory, columns=[price_col, 'when'])\n", (18821, 18883), True, 'import pandas as pd\n'), ((19525, 19599), 'pandas.DataFrame', 'pd.DataFrame', (['position.investmentHistory'], {'columns': "[investment_col, 'when']"}), "(position.investmentHistory, columns=[investment_col, 'when'])\n", (19537, 19599), True, 'import pandas as pd\n'), ((19811, 19885), 'pandas.DataFrame', 'pd.DataFrame', (['position.instrumentPriceHistory'], {'columns': "[price_col, 'when']"}), "(position.instrumentPriceHistory, columns=[price_col, 'when'])\n", (19823, 19885), True, 'import pandas as pd\n'), ((21440, 21453), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (21449, 21453), False, 'import json\n'), ((21769, 21782), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (21778, 21782), False, 'import json\n'), ((22048, 22061), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (22057, 22061), False, 'import json\n'), ((22357, 22370), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (22366, 22370), False, 'import json\n'), ((21519, 21532), 'json.loads', 'json.loads', (['k'], {}), '(k)\n', (21529, 21532), False, 'import json\n'), ((21848, 21861), 'json.loads', 'json.loads', (['k'], {}), '(k)\n', (21858, 21861), False, 'import json\n'), ((21865, 21882), 'aat.core.Trade.fromJson', 'Trade.fromJson', (['x'], {}), '(x)\n', (21879, 21882), False, 'from aat.core import Order, Trade, Instrument, ExchangeType, Position\n'), ((22151, 22164), 'json.loads', 'json.loads', (['k'], {}), '(k)\n', (22161, 22164), False, 'import json\n'), ((22168, 22189), 'aat.core.Position.fromJson', 'Position.fromJson', (['vv'], {}), '(vv)\n', (22185, 22189), False, 'from aat.core import Order, Trade, Instrument, ExchangeType, Position\n'), ((22500, 22521), 'aat.core.Position.fromJson', 'Position.fromJson', (['vv'], {}), '(vv)\n', (22517, 22521), False, 'from aat.core import Order, Trade, Instrument, ExchangeType, Position\n'), ((21562, 21588), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['p2'], {}), '(p2)\n', (21584, 21588), False, 'from datetime import datetime\n'), ((22483, 22497), 'json.loads', 'json.loads', (['kk'], {}), '(kk)\n', (22493, 22497), False, 'import json\n')]
|
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import ValidationException
from tests.classes.simple_article import SimpleArticle
from tests.classes.simple_language import SimpleLanguage
from tests.classes.simple_project import SimpleProject
from tests.classes.simple_chart import SimpleChart
from tests.classes.author import Author
from tests.classes.linked_profile import LinkedProfile
from tests.classes.linked_user import LinkedUser
class TestValidate(TestCase):
def test_validate_does_not_raise_and_returns_self_for_valid_object(self):
article = SimpleArticle(title='U Ia Huê', content='Bê Tshua Bo')
self.assertEqual(article, article.validate())
def test_validate_raises_if_object_is_not_valid(self):
article = SimpleArticle()
self.assertRaises(ValidationException, article.validate)
def test_is_valid_returns_false_if_object_is_not_valid(self):
article = SimpleArticle()
self.assertEqual(False, article.is_valid)
def test_is_valid_returns_true_if_object_is_valid(self):
article = SimpleArticle(title='U Ia Huê', content='Bê Tshua Bo')
self.assertEqual(True, article.is_valid)
def test_validate_by_default_validates_one_field(self):
article = SimpleArticle()
with self.assertRaises(ValidationException) as context:
article.validate()
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 1)
self.assertEqual(exception.keypath_messages['title'],
"value required")
def test_validate_validates_all_fields_if_option_is_passed(self):
article = SimpleArticle()
with self.assertRaises(ValidationException) as context:
article.validate(all_fields=True)
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 2)
self.assertEqual(exception.keypath_messages['title'],
"value required")
self.assertEqual(exception.keypath_messages['content'],
"value required")
def test_validate_validates_all_fields_if_class_config_is_on(self):
language = SimpleLanguage()
with self.assertRaises(ValidationException) as context:
language.validate()
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 2)
self.assertEqual(exception.keypath_messages['name'],
"value required")
self.assertEqual(exception.keypath_messages['code'],
"value required")
def test_validate_validates_one_field_if_explicitly_specified(self):
language = SimpleLanguage()
with self.assertRaises(ValidationException) as context:
language.validate(all_fields=False)
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 1)
self.assertEqual(exception.keypath_messages['name'],
"value required")
def test_validate_validates_one_list_field_by_default(self):
project = SimpleProject(name='Teo', attendees=['A', 'B', 'C', 'D'])
with self.assertRaises(ValidationException) as context:
project.validate()
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 1)
self.assertEqual(exception.keypath_messages['attendees.0'],
"length of value is not greater than or equal 2")
def test_validate_validates_all_list_fields_if_required(self):
project = SimpleProject(name='Teo', attendees=['A', 'B', 'C', 'D'])
with self.assertRaises(ValidationException) as context:
project.validate(all_fields=True)
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 4)
self.assertEqual(exception.keypath_messages['attendees.0'],
"length of value is not greater than or equal 2")
self.assertEqual(exception.keypath_messages['attendees.1'],
"length of value is not greater than or equal 2")
self.assertEqual(exception.keypath_messages['attendees.2'],
"length of value is not greater than or equal 2")
self.assertEqual(exception.keypath_messages['attendees.3'],
"length of value is not greater than or equal 2")
def test_validate_validates_one_dict_field_by_default(self):
chart = SimpleChart(name='Teo', partitions={
'a': 2, 'b': 3, 'c': 4, 'd': 5})
with self.assertRaises(ValidationException) as context:
chart.validate()
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 1)
self.assertEqual(exception.keypath_messages['partitions.a'],
"value is not less than or equal 1")
def test_validate_validates_all_dict_fields_if_required(self):
chart = SimpleChart(name='Teo', partitions={
'a': 2, 'b': 3, 'c': 4, 'd': 5})
with self.assertRaises(ValidationException) as context:
chart.validate(all_fields=True)
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 4)
self.assertEqual(exception.keypath_messages['partitions.a'],
"value is not less than or equal 1")
self.assertEqual(exception.keypath_messages['partitions.b'],
"value is not less than or equal 1")
self.assertEqual(exception.keypath_messages['partitions.c'],
"value is not less than or equal 1")
self.assertEqual(exception.keypath_messages['partitions.d'],
"value is not less than or equal 1")
def test_validate_validates_with_class_config_by_default(self):
author = Author(name='A', articles=[{}, {}])
with self.assertRaises(ValidationException) as context:
author.validate()
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 2)
self.assertEqual(exception.keypath_messages['articles.0.title'],
"value required")
self.assertEqual(exception.keypath_messages['articles.0.content'],
"value required")
def test_validate_validates_one_field_inside_nested_if_required(self):
author = Author(name='A', articles=[{}, {}])
with self.assertRaises(ValidationException) as context:
author.validate(all_fields=False)
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 1)
self.assertEqual(exception.keypath_messages['articles.0.title'],
"value required")
def test_validate_validates_all_field_inside_nested_if_required(self):
author = Author(name='A', articles=[{}, {}])
with self.assertRaises(ValidationException) as context:
author.validate(all_fields=True)
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 4)
self.assertEqual(exception.keypath_messages['articles.0.title'],
"value required")
self.assertEqual(exception.keypath_messages['articles.0.content'],
"value required")
self.assertEqual(exception.keypath_messages['articles.1.title'],
"value required")
self.assertEqual(exception.keypath_messages['articles.1.content'],
"value required")
def test_validate_only_validate_modified_fields_for_non_new_object(self):
article = SimpleArticle(title='my', content='side')
article._mark_not_new()
article.content = None
article._modified_fields = []
article.validate(all_fields=True)
def test_validate_validates_linked_objects_anyway(self):
author = Author(name='<NAME>',
articles=[{'title': 'Khua Tioh Sê Kai',
'content': 'Ai Gua Tsuê'},
{'title': 'Thên Ha Si Lan Ê',
'content': 'Tsiu Ho Lang Tsai'}])
author._mark_not_new()
author.articles[0]._mark_not_new()
author.articles[1]._mark_not_new()
author.articles[0].content = None
with self.assertRaises(ValidationException) as context:
author.validate()
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 1)
self.assertEqual(exception.keypath_messages['articles.0.content'],
"value required")
def test_validate_linked_objects_no_infinite_loop(self):
profile = LinkedProfile(name='<NAME>')
user = LinkedUser(name='<NAME>')
user.profile = profile
user.validate()
|
[
"tests.classes.author.Author",
"tests.classes.linked_profile.LinkedProfile",
"tests.classes.simple_language.SimpleLanguage",
"tests.classes.simple_article.SimpleArticle",
"tests.classes.simple_chart.SimpleChart",
"tests.classes.linked_user.LinkedUser",
"tests.classes.simple_project.SimpleProject"
] |
[((605, 659), 'tests.classes.simple_article.SimpleArticle', 'SimpleArticle', ([], {'title': '"""U Ia Huê"""', 'content': '"""Bê Tshua Bo"""'}), "(title='U Ia Huê', content='Bê Tshua Bo')\n", (618, 659), False, 'from tests.classes.simple_article import SimpleArticle\n'), ((792, 807), 'tests.classes.simple_article.SimpleArticle', 'SimpleArticle', ([], {}), '()\n', (805, 807), False, 'from tests.classes.simple_article import SimpleArticle\n'), ((958, 973), 'tests.classes.simple_article.SimpleArticle', 'SimpleArticle', ([], {}), '()\n', (971, 973), False, 'from tests.classes.simple_article import SimpleArticle\n'), ((1104, 1158), 'tests.classes.simple_article.SimpleArticle', 'SimpleArticle', ([], {'title': '"""U Ia Huê"""', 'content': '"""Bê Tshua Bo"""'}), "(title='U Ia Huê', content='Bê Tshua Bo')\n", (1117, 1158), False, 'from tests.classes.simple_article import SimpleArticle\n'), ((1287, 1302), 'tests.classes.simple_article.SimpleArticle', 'SimpleArticle', ([], {}), '()\n', (1300, 1302), False, 'from tests.classes.simple_article import SimpleArticle\n'), ((1691, 1706), 'tests.classes.simple_article.SimpleArticle', 'SimpleArticle', ([], {}), '()\n', (1704, 1706), False, 'from tests.classes.simple_article import SimpleArticle\n'), ((2220, 2236), 'tests.classes.simple_language.SimpleLanguage', 'SimpleLanguage', ([], {}), '()\n', (2234, 2236), False, 'from tests.classes.simple_language import SimpleLanguage\n'), ((2733, 2749), 'tests.classes.simple_language.SimpleLanguage', 'SimpleLanguage', ([], {}), '()\n', (2747, 2749), False, 'from tests.classes.simple_language import SimpleLanguage\n'), ((3149, 3206), 'tests.classes.simple_project.SimpleProject', 'SimpleProject', ([], {'name': '"""Teo"""', 'attendees': "['A', 'B', 'C', 'D']"}), "(name='Teo', attendees=['A', 'B', 'C', 'D'])\n", (3162, 3206), False, 'from tests.classes.simple_project import SimpleProject\n'), ((3630, 3687), 'tests.classes.simple_project.SimpleProject', 'SimpleProject', ([], {'name': '"""Teo"""', 'attendees': "['A', 'B', 'C', 'D']"}), "(name='Teo', attendees=['A', 'B', 'C', 'D'])\n", (3643, 3687), False, 'from tests.classes.simple_project import SimpleProject\n'), ((4551, 4619), 'tests.classes.simple_chart.SimpleChart', 'SimpleChart', ([], {'name': '"""Teo"""', 'partitions': "{'a': 2, 'b': 3, 'c': 4, 'd': 5}"}), "(name='Teo', partitions={'a': 2, 'b': 3, 'c': 4, 'd': 5})\n", (4562, 4619), False, 'from tests.classes.simple_chart import SimpleChart\n'), ((5040, 5108), 'tests.classes.simple_chart.SimpleChart', 'SimpleChart', ([], {'name': '"""Teo"""', 'partitions': "{'a': 2, 'b': 3, 'c': 4, 'd': 5}"}), "(name='Teo', partitions={'a': 2, 'b': 3, 'c': 4, 'd': 5})\n", (5051, 5108), False, 'from tests.classes.simple_chart import SimpleChart\n'), ((5939, 5974), 'tests.classes.author.Author', 'Author', ([], {'name': '"""A"""', 'articles': '[{}, {}]'}), "(name='A', articles=[{}, {}])\n", (5945, 5974), False, 'from tests.classes.author import Author\n'), ((6495, 6530), 'tests.classes.author.Author', 'Author', ([], {'name': '"""A"""', 'articles': '[{}, {}]'}), "(name='A', articles=[{}, {}])\n", (6501, 6530), False, 'from tests.classes.author import Author\n'), ((6949, 6984), 'tests.classes.author.Author', 'Author', ([], {'name': '"""A"""', 'articles': '[{}, {}]'}), "(name='A', articles=[{}, {}])\n", (6955, 6984), False, 'from tests.classes.author import Author\n'), ((7758, 7799), 'tests.classes.simple_article.SimpleArticle', 'SimpleArticle', ([], {'title': '"""my"""', 'content': '"""side"""'}), "(title='my', content='side')\n", (7771, 7799), False, 'from tests.classes.simple_article import SimpleArticle\n'), ((8022, 8182), 'tests.classes.author.Author', 'Author', ([], {'name': '"""<NAME>"""', 'articles': "[{'title': 'Khua Tioh Sê Kai', 'content': 'Ai Gua Tsuê'}, {'title':\n 'Thên Ha Si Lan Ê', 'content': 'Tsiu Ho Lang Tsai'}]"}), "(name='<NAME>', articles=[{'title': 'Khua Tioh Sê Kai', 'content':\n 'Ai Gua Tsuê'}, {'title': 'Thên Ha Si Lan Ê', 'content':\n 'Tsiu Ho Lang Tsai'}])\n", (8028, 8182), False, 'from tests.classes.author import Author\n'), ((8853, 8881), 'tests.classes.linked_profile.LinkedProfile', 'LinkedProfile', ([], {'name': '"""<NAME>"""'}), "(name='<NAME>')\n", (8866, 8881), False, 'from tests.classes.linked_profile import LinkedProfile\n'), ((8897, 8922), 'tests.classes.linked_user.LinkedUser', 'LinkedUser', ([], {'name': '"""<NAME>"""'}), "(name='<NAME>')\n", (8907, 8922), False, 'from tests.classes.linked_user import LinkedUser\n')]
|
#
# Test for API infrastructure
#
import os
import multiprocessing as mp
import tempfile
import unittest
import osbuild
from osbuild.util import jsoncomm
class APITester(osbuild.api.BaseAPI):
"""Records the number of messages and if it got cleaned up"""
def __init__(self, sockaddr):
super().__init__(sockaddr)
self.clean = False
self.messages = 0
endpoint = "test-api"
def _message(self, msg, _fds, sock):
self.messages += 1
if msg["method"] == "echo":
msg["method"] = "reply"
sock.send(msg)
def _cleanup(self):
self.clean = True
class TestAPI(unittest.TestCase):
"""Check API infrastructure"""
def setUp(self):
self.tmp = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp.cleanup()
def test_basic(self):
# Basic API communication and cleanup checks
socket = os.path.join(self.tmp.name, "socket")
api = APITester(socket)
with api:
with jsoncomm.Socket.new_client(socket) as client:
req = {'method': 'echo', 'data': 'Hello'}
client.send(req)
msg, _, _ = client.recv()
self.assertEqual(msg["method"], "reply")
self.assertEqual(req["data"], msg["data"])
self.assertEqual(api.clean, True)
self.assertEqual(api.messages, 1)
# Assert proper cleanup
self.assertIsNone(api.thread)
self.assertIsNone(api.event_loop)
def test_reentrancy_guard(self):
socket = os.path.join(self.tmp.name, "socket")
api = APITester(socket)
with api:
with self.assertRaises(AssertionError):
with api:
pass
def test_exception(self):
# Check that 'api.exception' correctly sets 'API.exception'
tmpdir = self.tmp.name
path = os.path.join(tmpdir, "osbuild-api")
def exception(path):
with osbuild.api.exception_handler(path):
raise ValueError("osbuild test exception")
assert False, "api.exception should exit process"
api = osbuild.api.API(socket_address=path)
with api:
p = mp.Process(target=exception, args=(path, ))
p.start()
p.join()
self.assertEqual(p.exitcode, 2)
self.assertIsNotNone(api.error, "Error not set")
self.assertIn("type", api.error, "Error has no 'type' set")
self.assertEqual("exception", api.error["type"], "Not an exception")
e = api.error["data"]
for field in ("type", "value", "traceback"):
self.assertIn(field, e, f"Exception needs '{field}'")
self.assertEqual(e["value"], "osbuild test exception")
self.assertEqual(e["type"], "ValueError")
self.assertIn("exception", e["traceback"])
def test_metadata(self):
# Check that `api.metadata` leads to `API.metadata` being
# set correctly
tmpdir = self.tmp.name
path = os.path.join(tmpdir, "osbuild-api")
def metadata(path):
data = {"meta": "42"}
osbuild.api.metadata(data, path=path)
return 0
api = osbuild.api.API(socket_address=path)
with api:
p = mp.Process(target=metadata, args=(path, ))
p.start()
p.join()
self.assertEqual(p.exitcode, 0)
metadata = api.metadata # pylint: disable=no-member
assert metadata
self.assertEqual(metadata, {"meta": "42"})
|
[
"osbuild.api.API",
"osbuild.api.metadata",
"tempfile.TemporaryDirectory",
"osbuild.api.exception_handler",
"osbuild.util.jsoncomm.Socket.new_client",
"multiprocessing.Process",
"os.path.join"
] |
[((742, 771), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (769, 771), False, 'import tempfile\n'), ((922, 959), 'os.path.join', 'os.path.join', (['self.tmp.name', '"""socket"""'], {}), "(self.tmp.name, 'socket')\n", (934, 959), False, 'import os\n'), ((1575, 1612), 'os.path.join', 'os.path.join', (['self.tmp.name', '"""socket"""'], {}), "(self.tmp.name, 'socket')\n", (1587, 1612), False, 'import os\n'), ((1911, 1946), 'os.path.join', 'os.path.join', (['tmpdir', '"""osbuild-api"""'], {}), "(tmpdir, 'osbuild-api')\n", (1923, 1946), False, 'import os\n'), ((2167, 2203), 'osbuild.api.API', 'osbuild.api.API', ([], {'socket_address': 'path'}), '(socket_address=path)\n', (2182, 2203), False, 'import osbuild\n'), ((3046, 3081), 'os.path.join', 'os.path.join', (['tmpdir', '"""osbuild-api"""'], {}), "(tmpdir, 'osbuild-api')\n", (3058, 3081), False, 'import os\n'), ((3231, 3267), 'osbuild.api.API', 'osbuild.api.API', ([], {'socket_address': 'path'}), '(socket_address=path)\n', (3246, 3267), False, 'import osbuild\n'), ((2238, 2280), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'exception', 'args': '(path,)'}), '(target=exception, args=(path,))\n', (2248, 2280), True, 'import multiprocessing as mp\n'), ((3157, 3194), 'osbuild.api.metadata', 'osbuild.api.metadata', (['data'], {'path': 'path'}), '(data, path=path)\n', (3177, 3194), False, 'import osbuild\n'), ((3302, 3343), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'metadata', 'args': '(path,)'}), '(target=metadata, args=(path,))\n', (3312, 3343), True, 'import multiprocessing as mp\n'), ((1027, 1061), 'osbuild.util.jsoncomm.Socket.new_client', 'jsoncomm.Socket.new_client', (['socket'], {}), '(socket)\n', (1053, 1061), False, 'from osbuild.util import jsoncomm\n'), ((1994, 2029), 'osbuild.api.exception_handler', 'osbuild.api.exception_handler', (['path'], {}), '(path)\n', (2023, 2029), False, 'import osbuild\n')]
|
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Workflow-Engine-yadage command line interface."""
from __future__ import absolute_import, print_function
import base64
import json
import logging
import os
import yaml
import click
import yadageschemas
from reana_commons.config import (
REANA_LOG_FORMAT,
REANA_LOG_LEVEL,
REANA_WORKFLOW_UMASK,
SHARED_VOLUME_PATH,
)
from reana_commons.utils import check_connection_to_job_controller
from yadage.steering_api import steering_ctx
from yadage.utils import setupbackend_fromstring
from .config import LOGGING_MODULE
from .tracker import REANATracker
from .utils import REANAWorkflowStatusPublisher
logging.basicConfig(level=REANA_LOG_LEVEL, format=REANA_LOG_FORMAT)
log = logging.getLogger(LOGGING_MODULE)
def load_json(ctx, param, value):
"""Decode and load json for click option."""
value = value[1:]
return json.loads(base64.standard_b64decode(value).decode())
def load_yadage_operational_options(ctx, param, operational_options):
"""Decode and prepare operational options."""
operational_options = load_json(ctx, param, operational_options)
workflow_workspace = ctx.params.get("workflow_workspace")
workflow_workspace = "{0}/{1}".format(SHARED_VOLUME_PATH, workflow_workspace)
toplevel = operational_options.get("toplevel", "")
if not toplevel.startswith("github:"):
toplevel = os.path.join(workflow_workspace, toplevel)
operational_options["toplevel"] = toplevel
operational_options["initdir"] = os.path.join(
workflow_workspace, operational_options.get("initdir", "")
)
operational_options["initfiles"] = [
os.path.join(workflow_workspace, initfile)
for initfile in operational_options.get("initfiles", [])
]
return operational_options
@click.command()
@click.option("--workflow-uuid", required=True, help="UUID of workflow to be run.")
@click.option(
"--workflow-workspace",
required=True,
help="Name of workspace in which workflow should run.",
)
@click.option(
"--workflow-file",
required=True,
help="Path to the workflow file. This field is used when"
" no workflow JSON has been passed.",
)
@click.option(
"--workflow-parameters",
help="JSON representation of workflow_parameters received by" " the workflow.",
callback=load_json,
)
@click.option(
"--operational-options",
help="Options to be passed to the workflow engine" " (e.g. initdir).",
callback=load_yadage_operational_options,
)
def run_yadage_workflow(
workflow_uuid,
workflow_workspace,
workflow_file,
workflow_parameters=None,
operational_options={},
):
"""Run a ``yadage`` workflow."""
log.info("getting socket..")
workflow_workspace = "{0}/{1}".format(SHARED_VOLUME_PATH, workflow_workspace)
# use some shared object between tasks.
os.environ["workflow_uuid"] = workflow_uuid
os.environ["workflow_workspace"] = workflow_workspace
os.umask(REANA_WORKFLOW_UMASK)
cap_backend = setupbackend_fromstring("fromenv")
workflow_file_abs_path = os.path.join(workflow_workspace, workflow_file)
publisher = REANAWorkflowStatusPublisher()
try:
if not os.path.exists(workflow_file_abs_path):
message = f"Workflow file {workflow_file} does not exist"
raise Exception(message)
else:
schema_name = "yadage/workflow-schema"
schemadir = None
specopts = {
"toplevel": operational_options["toplevel"],
"schema_name": schema_name,
"schemadir": schemadir,
"load_as_ref": False,
}
validopts = {
"schema_name": schema_name,
"schemadir": schemadir,
}
workflow_json = yadageschemas.load(
spec=workflow_file,
specopts=specopts,
validopts=validopts,
validate=True,
)
workflow_kwargs = dict(workflow_json=workflow_json)
dataopts = {"initdir": operational_options["initdir"]}
initdata = {}
for initfile in operational_options["initfiles"]:
initdata.update(**yaml.safe_load(open(initfile)))
initdata.update(workflow_parameters)
check_connection_to_job_controller()
with steering_ctx(
dataarg=workflow_workspace,
dataopts=dataopts,
initdata=initdata,
visualize=True,
updateinterval=5,
loginterval=5,
backend=cap_backend,
accept_metadir="accept_metadir" in operational_options,
**workflow_kwargs,
) as ys:
log.info("running workflow on context: {0}".format(locals()))
publisher.publish_workflow_status(workflow_uuid, 1)
ys.adage_argument(
additional_trackers=[REANATracker(identifier=workflow_uuid)]
)
publisher.publish_workflow_status(workflow_uuid, 2)
log.info(
"Workflow {workflow_uuid} finished. Files available "
"at {workflow_workspace}.".format(
workflow_uuid=workflow_uuid, workflow_workspace=workflow_workspace
)
)
except Exception as e:
log.error("Workflow failed: {0}".format(e), exc_info=True)
if publisher:
publisher.publish_workflow_status(
workflow_uuid, 3, logs="workflow failed: {0}".format(e)
)
else:
log.error(
"Workflow {workflow_uuid} failed but status "
"could not be published.".format(workflow_uuid=workflow_uuid)
)
|
[
"base64.standard_b64decode",
"logging.basicConfig",
"yadage.utils.setupbackend_fromstring",
"click.option",
"os.path.exists",
"click.command",
"os.umask",
"yadage.steering_api.steering_ctx",
"yadageschemas.load",
"os.path.join",
"reana_commons.utils.check_connection_to_job_controller",
"logging.getLogger"
] |
[((855, 922), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'REANA_LOG_LEVEL', 'format': 'REANA_LOG_FORMAT'}), '(level=REANA_LOG_LEVEL, format=REANA_LOG_FORMAT)\n', (874, 922), False, 'import logging\n'), ((929, 962), 'logging.getLogger', 'logging.getLogger', (['LOGGING_MODULE'], {}), '(LOGGING_MODULE)\n', (946, 962), False, 'import logging\n'), ((2001, 2016), 'click.command', 'click.command', ([], {}), '()\n', (2014, 2016), False, 'import click\n'), ((2018, 2105), 'click.option', 'click.option', (['"""--workflow-uuid"""'], {'required': '(True)', 'help': '"""UUID of workflow to be run."""'}), "('--workflow-uuid', required=True, help=\n 'UUID of workflow to be run.')\n", (2030, 2105), False, 'import click\n'), ((2102, 2214), 'click.option', 'click.option', (['"""--workflow-workspace"""'], {'required': '(True)', 'help': '"""Name of workspace in which workflow should run."""'}), "('--workflow-workspace', required=True, help=\n 'Name of workspace in which workflow should run.')\n", (2114, 2214), False, 'import click\n'), ((2226, 2375), 'click.option', 'click.option', (['"""--workflow-file"""'], {'required': '(True)', 'help': '"""Path to the workflow file. This field is used when no workflow JSON has been passed."""'}), "('--workflow-file', required=True, help=\n 'Path to the workflow file. This field is used when no workflow JSON has been passed.'\n )\n", (2238, 2375), False, 'import click\n'), ((2389, 2532), 'click.option', 'click.option', (['"""--workflow-parameters"""'], {'help': '"""JSON representation of workflow_parameters received by the workflow."""', 'callback': 'load_json'}), "('--workflow-parameters', help=\n 'JSON representation of workflow_parameters received by the workflow.',\n callback=load_json)\n", (2401, 2532), False, 'import click\n'), ((2543, 2700), 'click.option', 'click.option', (['"""--operational-options"""'], {'help': '"""Options to be passed to the workflow engine (e.g. initdir)."""', 'callback': 'load_yadage_operational_options'}), "('--operational-options', help=\n 'Options to be passed to the workflow engine (e.g. initdir).', callback\n =load_yadage_operational_options)\n", (2555, 2700), False, 'import click\n'), ((3163, 3193), 'os.umask', 'os.umask', (['REANA_WORKFLOW_UMASK'], {}), '(REANA_WORKFLOW_UMASK)\n', (3171, 3193), False, 'import os\n'), ((3213, 3247), 'yadage.utils.setupbackend_fromstring', 'setupbackend_fromstring', (['"""fromenv"""'], {}), "('fromenv')\n", (3236, 3247), False, 'from yadage.utils import setupbackend_fromstring\n'), ((3277, 3324), 'os.path.join', 'os.path.join', (['workflow_workspace', 'workflow_file'], {}), '(workflow_workspace, workflow_file)\n', (3289, 3324), False, 'import os\n'), ((1587, 1629), 'os.path.join', 'os.path.join', (['workflow_workspace', 'toplevel'], {}), '(workflow_workspace, toplevel)\n', (1599, 1629), False, 'import os\n'), ((1852, 1894), 'os.path.join', 'os.path.join', (['workflow_workspace', 'initfile'], {}), '(workflow_workspace, initfile)\n', (1864, 1894), False, 'import os\n'), ((4509, 4545), 'reana_commons.utils.check_connection_to_job_controller', 'check_connection_to_job_controller', ([], {}), '()\n', (4543, 4545), False, 'from reana_commons.utils import check_connection_to_job_controller\n'), ((3396, 3434), 'os.path.exists', 'os.path.exists', (['workflow_file_abs_path'], {}), '(workflow_file_abs_path)\n', (3410, 3434), False, 'import os\n'), ((4012, 4110), 'yadageschemas.load', 'yadageschemas.load', ([], {'spec': 'workflow_file', 'specopts': 'specopts', 'validopts': 'validopts', 'validate': '(True)'}), '(spec=workflow_file, specopts=specopts, validopts=\n validopts, validate=True)\n', (4030, 4110), False, 'import yadageschemas\n'), ((4560, 4798), 'yadage.steering_api.steering_ctx', 'steering_ctx', ([], {'dataarg': 'workflow_workspace', 'dataopts': 'dataopts', 'initdata': 'initdata', 'visualize': '(True)', 'updateinterval': '(5)', 'loginterval': '(5)', 'backend': 'cap_backend', 'accept_metadir': "('accept_metadir' in operational_options)"}), "(dataarg=workflow_workspace, dataopts=dataopts, initdata=\n initdata, visualize=True, updateinterval=5, loginterval=5, backend=\n cap_backend, accept_metadir='accept_metadir' in operational_options, **\n workflow_kwargs)\n", (4572, 4798), False, 'from yadage.steering_api import steering_ctx\n'), ((1092, 1124), 'base64.standard_b64decode', 'base64.standard_b64decode', (['value'], {}), '(value)\n', (1117, 1124), False, 'import base64\n')]
|
import pytest
from fdsolver.classes import FD, FDSet
from tests.fixtures import fdsets
def test_fd_equality():
rel_abc = set('ABC')
rel_cde = set('CDE')
rel_ad = set('AD')
rel_de = set('DE')
rel_b = set('B')
rel_e = set('E')
fd_abc_de = FD(rel_abc, rel_de)
fd_abc_de2 = FD(rel_abc, rel_de)
fd_abc_e = FD(rel_abc, rel_e)
fd_abc_cde = FD(rel_abc, rel_cde)
fd_de_b = FD(rel_de, rel_b)
assert fd_abc_de == fd_abc_de2
assert fd_abc_e != fd_abc_de
assert fd_abc_de != fd_abc_cde
assert fd_de_b != fd_abc_e
def test_fdset_equality(fdsets):
fdset_1, fdset_2, fdset_3 = fdsets
assert fdset_1 == fdset_2
assert fdset_2 == fdset_1
assert fdset_1 != fdset_3
assert fdset_2 != fdset_3
def test_fdset_indexing(fdsets):
fdset_1, _, fdset_3 = fdsets
fd_cde_bc = FD(set('CDE'), set('BC'))
fd_de_b = FD(set('DE'), set('B'))
assert fdset_1[1] == fd_cde_bc
assert fdset_3[-1] == fd_de_b
def test_fdset_contains(fdsets):
fdset_1, _, fdset_3 = fdsets
fd_cde_bc = FD(set('CDE'), set('BC'))
fd_de_b = FD(set('DE'), set('B'))
assert fd_cde_bc in fdset_1
assert fd_de_b in fdset_3
|
[
"fdsolver.classes.FD"
] |
[((267, 286), 'fdsolver.classes.FD', 'FD', (['rel_abc', 'rel_de'], {}), '(rel_abc, rel_de)\n', (269, 286), False, 'from fdsolver.classes import FD, FDSet\n'), ((304, 323), 'fdsolver.classes.FD', 'FD', (['rel_abc', 'rel_de'], {}), '(rel_abc, rel_de)\n', (306, 323), False, 'from fdsolver.classes import FD, FDSet\n'), ((339, 357), 'fdsolver.classes.FD', 'FD', (['rel_abc', 'rel_e'], {}), '(rel_abc, rel_e)\n', (341, 357), False, 'from fdsolver.classes import FD, FDSet\n'), ((375, 395), 'fdsolver.classes.FD', 'FD', (['rel_abc', 'rel_cde'], {}), '(rel_abc, rel_cde)\n', (377, 395), False, 'from fdsolver.classes import FD, FDSet\n'), ((410, 427), 'fdsolver.classes.FD', 'FD', (['rel_de', 'rel_b'], {}), '(rel_de, rel_b)\n', (412, 427), False, 'from fdsolver.classes import FD, FDSet\n')]
|
#!/usr/bin/env python
import PySimpleGUI as sg
import sys
import os
import csv
import pandas as pd
import re
# TODOS:
# Add props at run time
# sg.theme('Dark Red')
sg.theme('Dark Blue 3')
# print = sg.Print
bigfont = ("Arial", 16)
rule = ("Arial", 10)
def pad(s, l=12):
return s.ljust(l, " ")
def getlines(filename):
with open(filename, "r") as f:
return [s.strip() for s in f.readlines()]
def choose_main():
layout = [[sg.Text('Params file')],
[sg.Input(key='-FILE-', visible=False, enable_events=True), sg.FileBrowse()]]
event, values = sg.Window('File Compare', layout).read(close=True)
mainwin(values['-FILE-'])
# print(f'You chose: {values["-FILE-"]}')
def generate_scripts(values):
grid = {}
template = None
experiments = None
output_dir = None
r = re.compile(r'\[([^\]]*)\]')
for k, v in values.items():
if "-PROP-" in str(k):
k = k[6:]
if values['-CHECK-' + k]:
try:
min_, max_, step = [int(x) for x in [values[a + k] for a in ['-MIN-', '-MAX-', '-STEP-']]]
max_ = max_ + 1
except:
raise ValueError("Invalid values for min/max/step")
grid[k] = range(min_, max_, step)
else:
match = re.search(r, v)
if match:
if not os.path.exists(match.group(1)):
raise ValueError(f"File {match.group(1)} specified as value not found.")
grid[k] = getlines(match.group(1))
else:
grid[k] = v.split(",")
elif k == "-OUTPUT-":
output_dir = v
elif k == "-TEMPLATE-":
template = v
elif k == "-EXPERIMENT-":
experiments = v
keys = list(grid.keys())
results = _iterate([], keys, grid, {})
if not os.path.exists(template):
raise ValueError(f"File {template} not found.")
exp_idx = 1
if not os.path.exists(experiments):
create = sg.popup_ok_cancel('Experiments file not found. Create new?')
if create != "OK":
raise ValueError(f"Experiments file {experiments} not found.")
else:
exp_df = pd.DataFrame(columns=["id"]).set_index("id")
exp_idx = len(exp_df)
else:
try:
exp_df = pd.read_csv(experiments, index_col="id")
exp_idx += len(exp_df)
except:
raise ValueError("Invalid experiments file {experiments}")
try:
with open(template, "r") as f:
template_lines = f.readlines()
except:
raise ValueError("Error reading template file.")
if not os.path.exists(output_dir) and os.path.isdir(output_dir):
raise ValueError(f"Output dir {output_dir} not valid.")
sim_jobs = 9e9
if values['-LIMIT-']:
sim_jobs = int(values['-JOBS_SLIDER-'])
firstjobs = []
mem = int(values['-MEM-'])
hours = int(values['-TIME-'])
for idx in range(len(results)):
script_id = str(exp_idx + idx).rjust(5, "0")
next_job_id = None
if len(firstjobs) < sim_jobs:
firstjobs.append(f"sbatch {script_id}_batch.sh")
if idx + sim_jobs < len(results):
next_job_id = str(exp_idx + idx + sim_jobs).rjust(5, "0")
prop_string = " ".join([f"--{k} {v}" for k, v in results[idx].items()])
prop_string = prop_string.replace("{jobid}", script_id)
for k, v in results[idx].items():
exp_df.loc[exp_idx + idx, k] = v
with open(f"{output_dir}/{script_id}_job.sh", "w") as f:
for line in template_lines:
if "SBATCH --mem=" in line:
line = f"#SBATCH --mem={mem}G\n"
if "SBATCH --time" in line:
line = f"#SBATCH --time={hours}:00:00\n"
if "{jobid}" in line:
line = line.replace("{jobid}", script_id)
if "{props}" in line:
line = line.replace("{props}", prop_string)
for k, v in results[idx].items():
line = line.replace("{" + k + "}", str(v))
f.write(line)
if next_job_id is not None:
f.write(f"\nsbatch {output_dir}/{next_job_id}_job.sh\n")
exp_df.to_csv(experiments)
if values['-SUBMIT-']:
for job in firstjobs:
os.system(job)
sg.Print("\n".join(firstjobs))
def _iterate(results, keys, grid, props):
k = keys[0]
if len(keys) > 1:
for x in range(len(grid[k])):
p = props.copy()
p[k] = grid[k][x]
_iterate(results, keys[1:], grid, p)
else:
for x in range(len(grid[keys[0]])):
props[k] = grid[k][x]
results.append(props.copy())
return results
def mainwin(pfile):
# def mainwin(pfile):
layout = []
with open(pfile, "r") as f:
lines = f.readlines()
if "arg,values" in lines[0]:
lines = lines[1:]
lines = [x for x in lines if x[0] != "#"]
options = {}
fields = []
## Setup
layout.append([sg.Text("OzSTAR GRID MAKER", size=(50, 1), justification="center", font=bigfont)])
layout.append([sg.Text("JOB", font=bigfont)])
layout.append([
sg.Text("Mem:"), sg.InputText("12", key="-MEM-", size=(5, 1)),
sg.Text("GB"),
sg.Text("Time:"), sg.InputText("16", key="-TIME-", size=(5, 1)),
sg.Text("Hours"),
])
layout.append([
sg.CBox("", default=False, key="-LIMIT-"),
sg.Text("Limit to concurrent jobs:"),
sg.Slider(range=(0, 30), key='-JOBS_SLIDER-', default_value=5, orientation="h")
])
layout.append([
sg.CBox("Submit jobs", default=False, key="-SUBMIT-"),
])
layout.append([sg.Text("", font=rule)])
layout.append([sg.Text("GRID", font=bigfont)])
checkbox = []
for toks in csv.reader(lines, quotechar='"', delimiter=',',
quoting=csv.QUOTE_ALL, skipinitialspace=True):
f = toks[0]
fields.append(f)
props = {}
options[f] = props
props['default'] = toks[1]
props['min'] = toks[2]
props['max'] = toks[3]
props['step'] = toks[4]
checkbox.append(toks[1]=="" and toks[2] != "" and toks[3] != "" and toks[4] != "")
layout.append([
sg.Text("Property", size=(18, 1)),
sg.Text("Values", size=(16, 1)),
sg.Text("grid", size=(4, 1)),
sg.Text("min", size=(3, 1)),
sg.Text("max", size=(4, 1)),
sg.Text("step", size=(4, 1)),
]
)
for n, prop in enumerate(fields):
layout.append(
[
sg.Text(f"{pad(prop)}:"),
sg.InputText(f"{options[prop]['default']}", size=(22, 1), key=f"-PROP-{prop}"),
sg.CBox('', default=checkbox[n], key=f"-CHECK-{prop}"),
sg.InputText(options[prop]['min'], size=(4, 1), key=f"-MIN-{prop}"),
sg.InputText(options[prop]['max'], size=(4, 1), key=f"-MAX-{prop}"),
sg.InputText(options[prop]['step'], size=(4, 1), key=f"-STEP-{prop}"),
]
)
layout.append([sg.Text("", font=rule)])
layout.append([sg.Text("TEMPLATE", font=bigfont)])
layout.append([
sg.Text('Template', size=(12, 1)), sg.Input("template.sh", key="-TEMPLATE-"),
sg.FileBrowse(button_text="Select"),
])
layout.append([
sg.Text('Output dir', size=(12, 1)), sg.Input(".", key="-OUTPUT-"),
sg.FolderBrowse(button_text="Select", initial_folder=".", ),
])
layout.append([
sg.Text('Record file', size=(12, 1)), sg.Input("experiments.csv", key="-EXPERIMENT-"),
sg.FolderBrowse(button_text="Select", initial_folder=".", ),
])
statusbar = sg.StatusBar(" ", key="-STATUS-")
layout.append([sg.Button("Generate"), sg.Button("Quit")])
layout.append([statusbar])
window = sg.Window('SLURM Gridder', layout)
while True:
if window.was_closed():
break
event, values = window.read(close=False)
if event == "Quit":
break
try:
generate_scripts(values)
except ValueError as e:
sg.popup('Error:', str(e))
window['-STATUS-'].update("Success.")
window.close()
if __name__ == "__main__":
if len(sys.argv) == 1:
choose_main()
else:
mainwin(sys.argv[1])
|
[
"csv.reader",
"pandas.read_csv",
"PySimpleGUI.StatusBar",
"pandas.DataFrame",
"PySimpleGUI.theme",
"PySimpleGUI.InputText",
"os.path.exists",
"PySimpleGUI.FolderBrowse",
"PySimpleGUI.Window",
"re.search",
"PySimpleGUI.popup_ok_cancel",
"os.system",
"PySimpleGUI.FileBrowse",
"re.compile",
"PySimpleGUI.Button",
"PySimpleGUI.Input",
"os.path.isdir",
"PySimpleGUI.CBox",
"PySimpleGUI.Slider",
"PySimpleGUI.Text"
] |
[((167, 190), 'PySimpleGUI.theme', 'sg.theme', (['"""Dark Blue 3"""'], {}), "('Dark Blue 3')\n", (175, 190), True, 'import PySimpleGUI as sg\n'), ((832, 861), 're.compile', 're.compile', (['"""\\\\[([^\\\\]]*)\\\\]"""'], {}), "('\\\\[([^\\\\]]*)\\\\]')\n", (842, 861), False, 'import re\n'), ((5970, 6067), 'csv.reader', 'csv.reader', (['lines'], {'quotechar': '"""\\""""', 'delimiter': '""","""', 'quoting': 'csv.QUOTE_ALL', 'skipinitialspace': '(True)'}), '(lines, quotechar=\'"\', delimiter=\',\', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n', (5980, 6067), False, 'import csv\n'), ((7875, 7919), 'PySimpleGUI.StatusBar', 'sg.StatusBar', (['""" """'], {'key': '"""-STATUS-"""'}), "(' ', key='-STATUS-')\n", (7887, 7919), True, 'import PySimpleGUI as sg\n'), ((8027, 8061), 'PySimpleGUI.Window', 'sg.Window', (['"""SLURM Gridder"""', 'layout'], {}), "('SLURM Gridder', layout)\n", (8036, 8061), True, 'import PySimpleGUI as sg\n'), ((1919, 1943), 'os.path.exists', 'os.path.exists', (['template'], {}), '(template)\n', (1933, 1943), False, 'import os\n'), ((2029, 2056), 'os.path.exists', 'os.path.exists', (['experiments'], {}), '(experiments)\n', (2043, 2056), False, 'import os\n'), ((2075, 2136), 'PySimpleGUI.popup_ok_cancel', 'sg.popup_ok_cancel', (['"""Experiments file not found. Create new?"""'], {}), "('Experiments file not found. Create new?')\n", (2093, 2136), True, 'import PySimpleGUI as sg\n'), ((2764, 2789), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (2777, 2789), False, 'import os\n'), ((449, 471), 'PySimpleGUI.Text', 'sg.Text', (['"""Params file"""'], {}), "('Params file')\n", (456, 471), True, 'import PySimpleGUI as sg\n'), ((489, 546), 'PySimpleGUI.Input', 'sg.Input', ([], {'key': '"""-FILE-"""', 'visible': '(False)', 'enable_events': '(True)'}), "(key='-FILE-', visible=False, enable_events=True)\n", (497, 546), True, 'import PySimpleGUI as sg\n'), ((548, 563), 'PySimpleGUI.FileBrowse', 'sg.FileBrowse', ([], {}), '()\n', (561, 563), True, 'import PySimpleGUI as sg\n'), ((586, 619), 'PySimpleGUI.Window', 'sg.Window', (['"""File Compare"""', 'layout'], {}), "('File Compare', layout)\n", (595, 619), True, 'import PySimpleGUI as sg\n'), ((2397, 2437), 'pandas.read_csv', 'pd.read_csv', (['experiments'], {'index_col': '"""id"""'}), "(experiments, index_col='id')\n", (2408, 2437), True, 'import pandas as pd\n'), ((2733, 2759), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (2747, 2759), False, 'import os\n'), ((4461, 4475), 'os.system', 'os.system', (['job'], {}), '(job)\n', (4470, 4475), False, 'import os\n'), ((5185, 5270), 'PySimpleGUI.Text', 'sg.Text', (['"""OzSTAR GRID MAKER"""'], {'size': '(50, 1)', 'justification': '"""center"""', 'font': 'bigfont'}), "('OzSTAR GRID MAKER', size=(50, 1), justification='center', font=bigfont\n )\n", (5192, 5270), True, 'import PySimpleGUI as sg\n'), ((5287, 5315), 'PySimpleGUI.Text', 'sg.Text', (['"""JOB"""'], {'font': 'bigfont'}), "('JOB', font=bigfont)\n", (5294, 5315), True, 'import PySimpleGUI as sg\n'), ((5346, 5361), 'PySimpleGUI.Text', 'sg.Text', (['"""Mem:"""'], {}), "('Mem:')\n", (5353, 5361), True, 'import PySimpleGUI as sg\n'), ((5363, 5407), 'PySimpleGUI.InputText', 'sg.InputText', (['"""12"""'], {'key': '"""-MEM-"""', 'size': '(5, 1)'}), "('12', key='-MEM-', size=(5, 1))\n", (5375, 5407), True, 'import PySimpleGUI as sg\n'), ((5417, 5430), 'PySimpleGUI.Text', 'sg.Text', (['"""GB"""'], {}), "('GB')\n", (5424, 5430), True, 'import PySimpleGUI as sg\n'), ((5440, 5456), 'PySimpleGUI.Text', 'sg.Text', (['"""Time:"""'], {}), "('Time:')\n", (5447, 5456), True, 'import PySimpleGUI as sg\n'), ((5458, 5503), 'PySimpleGUI.InputText', 'sg.InputText', (['"""16"""'], {'key': '"""-TIME-"""', 'size': '(5, 1)'}), "('16', key='-TIME-', size=(5, 1))\n", (5470, 5503), True, 'import PySimpleGUI as sg\n'), ((5513, 5529), 'PySimpleGUI.Text', 'sg.Text', (['"""Hours"""'], {}), "('Hours')\n", (5520, 5529), True, 'import PySimpleGUI as sg\n'), ((5566, 5607), 'PySimpleGUI.CBox', 'sg.CBox', (['""""""'], {'default': '(False)', 'key': '"""-LIMIT-"""'}), "('', default=False, key='-LIMIT-')\n", (5573, 5607), True, 'import PySimpleGUI as sg\n'), ((5617, 5653), 'PySimpleGUI.Text', 'sg.Text', (['"""Limit to concurrent jobs:"""'], {}), "('Limit to concurrent jobs:')\n", (5624, 5653), True, 'import PySimpleGUI as sg\n'), ((5663, 5742), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(0, 30)', 'key': '"""-JOBS_SLIDER-"""', 'default_value': '(5)', 'orientation': '"""h"""'}), "(range=(0, 30), key='-JOBS_SLIDER-', default_value=5, orientation='h')\n", (5672, 5742), True, 'import PySimpleGUI as sg\n'), ((5778, 5831), 'PySimpleGUI.CBox', 'sg.CBox', (['"""Submit jobs"""'], {'default': '(False)', 'key': '"""-SUBMIT-"""'}), "('Submit jobs', default=False, key='-SUBMIT-')\n", (5785, 5831), True, 'import PySimpleGUI as sg\n'), ((5860, 5882), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'font': 'rule'}), "('', font=rule)\n", (5867, 5882), True, 'import PySimpleGUI as sg\n'), ((5904, 5933), 'PySimpleGUI.Text', 'sg.Text', (['"""GRID"""'], {'font': 'bigfont'}), "('GRID', font=bigfont)\n", (5911, 5933), True, 'import PySimpleGUI as sg\n'), ((6432, 6465), 'PySimpleGUI.Text', 'sg.Text', (['"""Property"""'], {'size': '(18, 1)'}), "('Property', size=(18, 1))\n", (6439, 6465), True, 'import PySimpleGUI as sg\n'), ((6475, 6506), 'PySimpleGUI.Text', 'sg.Text', (['"""Values"""'], {'size': '(16, 1)'}), "('Values', size=(16, 1))\n", (6482, 6506), True, 'import PySimpleGUI as sg\n'), ((6516, 6544), 'PySimpleGUI.Text', 'sg.Text', (['"""grid"""'], {'size': '(4, 1)'}), "('grid', size=(4, 1))\n", (6523, 6544), True, 'import PySimpleGUI as sg\n'), ((6554, 6581), 'PySimpleGUI.Text', 'sg.Text', (['"""min"""'], {'size': '(3, 1)'}), "('min', size=(3, 1))\n", (6561, 6581), True, 'import PySimpleGUI as sg\n'), ((6591, 6618), 'PySimpleGUI.Text', 'sg.Text', (['"""max"""'], {'size': '(4, 1)'}), "('max', size=(4, 1))\n", (6598, 6618), True, 'import PySimpleGUI as sg\n'), ((6628, 6656), 'PySimpleGUI.Text', 'sg.Text', (['"""step"""'], {'size': '(4, 1)'}), "('step', size=(4, 1))\n", (6635, 6656), True, 'import PySimpleGUI as sg\n'), ((7256, 7278), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'font': 'rule'}), "('', font=rule)\n", (7263, 7278), True, 'import PySimpleGUI as sg\n'), ((7301, 7334), 'PySimpleGUI.Text', 'sg.Text', (['"""TEMPLATE"""'], {'font': 'bigfont'}), "('TEMPLATE', font=bigfont)\n", (7308, 7334), True, 'import PySimpleGUI as sg\n'), ((7365, 7398), 'PySimpleGUI.Text', 'sg.Text', (['"""Template"""'], {'size': '(12, 1)'}), "('Template', size=(12, 1))\n", (7372, 7398), True, 'import PySimpleGUI as sg\n'), ((7400, 7441), 'PySimpleGUI.Input', 'sg.Input', (['"""template.sh"""'], {'key': '"""-TEMPLATE-"""'}), "('template.sh', key='-TEMPLATE-')\n", (7408, 7441), True, 'import PySimpleGUI as sg\n'), ((7451, 7486), 'PySimpleGUI.FileBrowse', 'sg.FileBrowse', ([], {'button_text': '"""Select"""'}), "(button_text='Select')\n", (7464, 7486), True, 'import PySimpleGUI as sg\n'), ((7523, 7558), 'PySimpleGUI.Text', 'sg.Text', (['"""Output dir"""'], {'size': '(12, 1)'}), "('Output dir', size=(12, 1))\n", (7530, 7558), True, 'import PySimpleGUI as sg\n'), ((7560, 7589), 'PySimpleGUI.Input', 'sg.Input', (['"""."""'], {'key': '"""-OUTPUT-"""'}), "('.', key='-OUTPUT-')\n", (7568, 7589), True, 'import PySimpleGUI as sg\n'), ((7599, 7656), 'PySimpleGUI.FolderBrowse', 'sg.FolderBrowse', ([], {'button_text': '"""Select"""', 'initial_folder': '"""."""'}), "(button_text='Select', initial_folder='.')\n", (7614, 7656), True, 'import PySimpleGUI as sg\n'), ((7695, 7731), 'PySimpleGUI.Text', 'sg.Text', (['"""Record file"""'], {'size': '(12, 1)'}), "('Record file', size=(12, 1))\n", (7702, 7731), True, 'import PySimpleGUI as sg\n'), ((7733, 7780), 'PySimpleGUI.Input', 'sg.Input', (['"""experiments.csv"""'], {'key': '"""-EXPERIMENT-"""'}), "('experiments.csv', key='-EXPERIMENT-')\n", (7741, 7780), True, 'import PySimpleGUI as sg\n'), ((7790, 7847), 'PySimpleGUI.FolderBrowse', 'sg.FolderBrowse', ([], {'button_text': '"""Select"""', 'initial_folder': '"""."""'}), "(button_text='Select', initial_folder='.')\n", (7805, 7847), True, 'import PySimpleGUI as sg\n'), ((7939, 7960), 'PySimpleGUI.Button', 'sg.Button', (['"""Generate"""'], {}), "('Generate')\n", (7948, 7960), True, 'import PySimpleGUI as sg\n'), ((7962, 7979), 'PySimpleGUI.Button', 'sg.Button', (['"""Quit"""'], {}), "('Quit')\n", (7971, 7979), True, 'import PySimpleGUI as sg\n'), ((1340, 1355), 're.search', 're.search', (['r', 'v'], {}), '(r, v)\n', (1349, 1355), False, 'import re\n'), ((6803, 6881), 'PySimpleGUI.InputText', 'sg.InputText', (['f"""{options[prop][\'default\']}"""'], {'size': '(22, 1)', 'key': 'f"""-PROP-{prop}"""'}), '(f"{options[prop][\'default\']}", size=(22, 1), key=f\'-PROP-{prop}\')\n', (6815, 6881), True, 'import PySimpleGUI as sg\n'), ((6899, 6953), 'PySimpleGUI.CBox', 'sg.CBox', (['""""""'], {'default': 'checkbox[n]', 'key': 'f"""-CHECK-{prop}"""'}), "('', default=checkbox[n], key=f'-CHECK-{prop}')\n", (6906, 6953), True, 'import PySimpleGUI as sg\n'), ((6971, 7038), 'PySimpleGUI.InputText', 'sg.InputText', (["options[prop]['min']"], {'size': '(4, 1)', 'key': 'f"""-MIN-{prop}"""'}), "(options[prop]['min'], size=(4, 1), key=f'-MIN-{prop}')\n", (6983, 7038), True, 'import PySimpleGUI as sg\n'), ((7056, 7123), 'PySimpleGUI.InputText', 'sg.InputText', (["options[prop]['max']"], {'size': '(4, 1)', 'key': 'f"""-MAX-{prop}"""'}), "(options[prop]['max'], size=(4, 1), key=f'-MAX-{prop}')\n", (7068, 7123), True, 'import PySimpleGUI as sg\n'), ((7141, 7210), 'PySimpleGUI.InputText', 'sg.InputText', (["options[prop]['step']"], {'size': '(4, 1)', 'key': 'f"""-STEP-{prop}"""'}), "(options[prop]['step'], size=(4, 1), key=f'-STEP-{prop}')\n", (7153, 7210), True, 'import PySimpleGUI as sg\n'), ((2274, 2302), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['id']"}), "(columns=['id'])\n", (2286, 2302), True, 'import pandas as pd\n')]
|
from solarlib.error_handling import (
ValueOutOfRange,
raise_type_error,
type_check)
from solarlib.solar import (
sunrise,
sunset,
estimated_irradiance,
solar_noon
)
import pytz
import datetime
class Location:
def __init__(self, latitude, longitude, timezone=None):
self.set_timezone(timezone)
self.set_latitude(latitude)
self.set_longitude(longitude)
def set_latitude(self, latitude):
if not type_check(latitude, [int, float]):
raise_type_error('latitude', type(latitude))
elif -90 <= latitude <= 90:
self.__latitude = latitude
else:
raise ValueError(
'latitude must be between -90 and 90 degrees ' +
f'but recieved {latitude}'
)
def set_longitude(self, longitude):
if not type_check(longitude, [int, float]):
raise_type_error('longitude', type(longitude))
elif -180 <= longitude <= 180:
self.__longitude = longitude
else:
raise ValueOutOfRange(
'longitude must be between -180 and 180 ' +
f'degrees but recieved {longitude}'
)
def set_timezone(self, timezone):
if timezone is None:
self.__timezone = pytz.timezone('UTC')
elif isinstance(timezone, str):
self.__timezone = pytz.timezone(timezone)
elif isinstance(timezone, float) or isinstance(timezone, int):
timezone_hours = round(timezone*2)/2
timedelta = datetime.timedelta(hours=timezone_hours)
self.__timezone = datetime.timezone(timedelta)
else:
raise_type_error('timezone', type(timezone))
@property
def latitude(self):
return self.__latitude
@property
def longitude(self):
return self.__longitude
@property
def timezone(self):
return self.__timezone
def __repr__(self):
lat = self.__latitude
lon = self.__longitude
tz = self.__timezone
output = f'''Location(latitude={lat},
longitude={lon},
timezone={str(tz)})'''
return output.replace(' '*4, '').replace('\n', '')
def __str__(self):
return repr(self)
def sunrise(self, date, fmt='%Y-%m-%d'):
date = self.__parse_date(date, fmt='%Y-%m-%d')
lat = self.latitude
lon = self.longitude
base = sunrise(date, lat, lon)
return sunrise(base, lat, lon)
def sunset(self, date, fmt='%Y-%m-%d'):
date = self.__parse_date(date, fmt=fmt)
lat = self.latitude
lon = self.longitude
base = sunset(date, lat, lon)
return sunset(base, lat, lon)
def solar_irradiance(self, time, fmt='%Y-%m-%d %H:%M:%S'):
time = self.__parse_time(time, fmt=fmt)
lat = self.latitude
lon = self.longitude
return estimated_irradiance(time, lat, lon)
def daily_irradiance(self, date, fmt='%Y-%m-%d', freq_min=30):
date = self.__parse_date(date, fmt=fmt)
delta = datetime.timedelta(minutes=freq_min)
n = 1440//freq_min + 1
output = [
(
date+i*delta,
self.solar_irradiance(date+i*delta)
) for i in range(n)
]
return output
def day_length(self, date, fmt='%Y-%m-%d'):
date = self.__parse_date(date, fmt=fmt)
rise_time = self.sunrise(date, fmt)
set_time = self.sunset(date, fmt)
return set_time-rise_time
def solar_noon(self, date, fmt='%Y-%m-%d'):
date = self.__parse_date(date, fmt=fmt)
lon = self.longitude
base = solar_noon(date, lon)
return solar_noon(base, lon)
def __parse_date(self, date, fmt='%Y-%m-%d'):
if isinstance(date, str):
return datetime.datetime.strptime(date, fmt)
elif isinstance(date, datetime.datetime):
return date
elif isinstance(date, datetime.date):
time = datetime.time()
return datetime.datetime.combine(date, time)
else:
raise_type_error('date', datetime.datetime)
def __parse_time(self, date, fmt='%Y-%m-%d %H:%M:%S'):
if isinstance(date, str):
return datetime.datetime.strptime(date, fmt)
elif isinstance(date, datetime.datetime):
return date
else:
raise_type_error('date', datetime.datetime)
|
[
"solarlib.solar.solar_noon",
"solarlib.error_handling.type_check",
"solarlib.solar.estimated_irradiance",
"solarlib.solar.sunset",
"solarlib.error_handling.raise_type_error",
"solarlib.solar.sunrise",
"datetime.datetime.strptime",
"solarlib.error_handling.ValueOutOfRange",
"datetime.timedelta",
"pytz.timezone",
"datetime.timezone",
"datetime.time",
"datetime.datetime.combine"
] |
[((2456, 2479), 'solarlib.solar.sunrise', 'sunrise', (['date', 'lat', 'lon'], {}), '(date, lat, lon)\n', (2463, 2479), False, 'from solarlib.solar import sunrise, sunset, estimated_irradiance, solar_noon\n'), ((2495, 2518), 'solarlib.solar.sunrise', 'sunrise', (['base', 'lat', 'lon'], {}), '(base, lat, lon)\n', (2502, 2518), False, 'from solarlib.solar import sunrise, sunset, estimated_irradiance, solar_noon\n'), ((2684, 2706), 'solarlib.solar.sunset', 'sunset', (['date', 'lat', 'lon'], {}), '(date, lat, lon)\n', (2690, 2706), False, 'from solarlib.solar import sunrise, sunset, estimated_irradiance, solar_noon\n'), ((2722, 2744), 'solarlib.solar.sunset', 'sunset', (['base', 'lat', 'lon'], {}), '(base, lat, lon)\n', (2728, 2744), False, 'from solarlib.solar import sunrise, sunset, estimated_irradiance, solar_noon\n'), ((2929, 2965), 'solarlib.solar.estimated_irradiance', 'estimated_irradiance', (['time', 'lat', 'lon'], {}), '(time, lat, lon)\n', (2949, 2965), False, 'from solarlib.solar import sunrise, sunset, estimated_irradiance, solar_noon\n'), ((3098, 3134), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'freq_min'}), '(minutes=freq_min)\n', (3116, 3134), False, 'import datetime\n'), ((3703, 3724), 'solarlib.solar.solar_noon', 'solar_noon', (['date', 'lon'], {}), '(date, lon)\n', (3713, 3724), False, 'from solarlib.solar import sunrise, sunset, estimated_irradiance, solar_noon\n'), ((3740, 3761), 'solarlib.solar.solar_noon', 'solar_noon', (['base', 'lon'], {}), '(base, lon)\n', (3750, 3761), False, 'from solarlib.solar import sunrise, sunset, estimated_irradiance, solar_noon\n'), ((468, 502), 'solarlib.error_handling.type_check', 'type_check', (['latitude', '[int, float]'], {}), '(latitude, [int, float])\n', (478, 502), False, 'from solarlib.error_handling import ValueOutOfRange, raise_type_error, type_check\n'), ((858, 893), 'solarlib.error_handling.type_check', 'type_check', (['longitude', '[int, float]'], {}), '(longitude, [int, float])\n', (868, 893), False, 'from solarlib.error_handling import ValueOutOfRange, raise_type_error, type_check\n'), ((1307, 1327), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (1320, 1327), False, 'import pytz\n'), ((3866, 3903), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', 'fmt'], {}), '(date, fmt)\n', (3892, 3903), False, 'import datetime\n'), ((4299, 4336), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', 'fmt'], {}), '(date, fmt)\n', (4325, 4336), False, 'import datetime\n'), ((1066, 1166), 'solarlib.error_handling.ValueOutOfRange', 'ValueOutOfRange', (["('longitude must be between -180 and 180 ' +\n f'degrees but recieved {longitude}')"], {}), "('longitude must be between -180 and 180 ' +\n f'degrees but recieved {longitude}')\n", (1081, 1166), False, 'from solarlib.error_handling import ValueOutOfRange, raise_type_error, type_check\n'), ((1398, 1421), 'pytz.timezone', 'pytz.timezone', (['timezone'], {}), '(timezone)\n', (1411, 1421), False, 'import pytz\n'), ((4437, 4480), 'solarlib.error_handling.raise_type_error', 'raise_type_error', (['"""date"""', 'datetime.datetime'], {}), "('date', datetime.datetime)\n", (4453, 4480), False, 'from solarlib.error_handling import ValueOutOfRange, raise_type_error, type_check\n'), ((1566, 1606), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'timezone_hours'}), '(hours=timezone_hours)\n', (1584, 1606), False, 'import datetime\n'), ((1637, 1665), 'datetime.timezone', 'datetime.timezone', (['timedelta'], {}), '(timedelta)\n', (1654, 1665), False, 'import datetime\n'), ((4043, 4058), 'datetime.time', 'datetime.time', ([], {}), '()\n', (4056, 4058), False, 'import datetime\n'), ((4078, 4115), 'datetime.datetime.combine', 'datetime.datetime.combine', (['date', 'time'], {}), '(date, time)\n', (4103, 4115), False, 'import datetime\n'), ((4142, 4185), 'solarlib.error_handling.raise_type_error', 'raise_type_error', (['"""date"""', 'datetime.datetime'], {}), "('date', datetime.datetime)\n", (4158, 4185), False, 'from solarlib.error_handling import ValueOutOfRange, raise_type_error, type_check\n')]
|
'''Prediction and plotting routine
In preparation for prediction and plotting, this script will:
1) Load the obs_dimensions
2) Specify the input_dimensions and latent_dimensions
3) Instantiate the DataHandler class
4) Instantiate the neural network
5) Load the trained neural network weights
6) Select and prepare an illustrative test example
7) Draw from the predicted posterior by utilizing nn.iaf_chain_posterior as
well as the encoder
8) Predict the state using the draw from the posterior either using the
modelled or learned (decoder) parameter-to-observable map
9) Plot the prediction
Inputs:
- hyperp: dictionary storing set hyperparameter values
- options: dictionary storing the set options
- filepaths: instance of the FilePaths class storing the default strings for
importing and exporting required objects.
Author: <NAME>, Oden Institute, Austin, Texas 2020
'''
import sys
sys.path.append('../../../../..')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.ioff() # Turn interactive plotting off
# Import src code
from utils_data.data_handler import DataHandler
from neural_networks.nn_vaeiaf import VAEIAF
from utils_misc.positivity_constraints import positivity_constraint_log_exp
# Import FEM Code
from Finite_Element_Method.src.load_mesh import load_mesh
from utils_project.plot_fem_function import plot_fem_function
import pdb #Equivalent of keyboard in MATLAB, just add "pdb.set_trace()"
###############################################################################
# Plot Predictions #
###############################################################################
def predict_and_plot(hyperp, options, filepaths):
#=== Load Observation Indices ===#
if options.obs_type == 'full':
obs_dimensions = options.parameter_dimensions
if options.obs_type == 'obs':
obs_dimensions = options.num_obs_points
print('Loading Boundary Indices')
df_obs_indices = pd.read_csv(filepaths.project.obs_indices + '.csv')
obs_indices = df_obs_indices.to_numpy()
#=== Data and Latent Dimensions of Autoencoder ===#
input_dimensions = obs_dimensions
latent_dimensions = options.parameter_dimensions
#=== Prepare Data ===#
data = DataHandler(hyperp, options, filepaths,
options.parameter_dimensions, obs_dimensions)
data.load_data_test()
if options.add_noise == 1:
data.add_noise_qoi_test()
parameter_test = data.poi_test
state_obs_test = data.qoi_test
#=== Load Trained Neural Network ===#
nn = VAEIAF(hyperp, options,
input_dimensions, latent_dimensions,
None, None,
None, None,
positivity_constraint_log_exp)
nn.load_weights(filepaths.trained_nn)
#=== Selecting Samples ===#
sample_number = 105
parameter_test_sample = np.expand_dims(parameter_test[sample_number,:], 0)
state_obs_test_sample = np.expand_dims(state_obs_test[sample_number,:], 0)
#=== Predictions ===#
parameter_pred_sample, _ = nn.iaf_chain_posterior(
nn.encoder(state_obs_test_sample))
state_obs_pred_sample = nn.decoder(parameter_test_sample)
parameter_pred_sample = parameter_pred_sample.numpy().flatten()
state_obs_pred_sample = state_obs_pred_sample.numpy().flatten()
#=== Plotting Prediction ===#
print('================================')
print(' Plotting Predictions ')
print('================================')
#=== Load Mesh ===#
nodes, elements, _, _, _, _, _, _ = load_mesh(filepaths.project)
#=== Plot FEM Functions ===#
plot_fem_function(filepaths.figures_savefile_name_parameter_test,
'True Parameter', 7.0,
nodes, elements,
parameter_test_sample)
plot_fem_function(filepaths.figures_savefile_name_parameter_pred,
'Parameter Prediction', 7.0,
nodes, elements,
parameter_pred_sample)
if options.obs_type == 'full':
plot_fem_function(filepaths.figures_savefile_name_state_test,
'True State', 2.6,
nodes, elements,
state_obs_test_sample)
plot_fem_function(filepaths.figures_savefile_name_state_pred,
'State Prediction', 2.6,
nodes, elements,
state_obs_pred_sample)
print('Predictions plotted')
###############################################################################
# Plot Metrics #
###############################################################################
def plot_and_save_metrics(hyperp, options, filepaths):
print('================================')
print(' Plotting Metrics ')
print('================================')
#=== Load Metrics ===#
print('Loading Metrics')
df_metrics = pd.read_csv(filepaths.trained_nn + "_metrics" + '.csv')
array_metrics = df_metrics.to_numpy()
####################
# Load Metrics #
####################
storage_array_loss_train = array_metrics[:,0]
storage_array_loss_train_VAE = array_metrics[:,1]
storage_array_loss_train_encoder = array_metrics[:,2]
storage_array_relative_error_input_VAE = array_metrics[:,10]
storage_array_relative_error_latent_encoder = array_metrics[:,11]
storage_array_relative_error_input_decoder = array_metrics[:,12]
storage_array_relative_gradient_norm = array_metrics[:,13]
################
# Plotting #
################
#=== Loss Train ===#
fig_loss = plt.figure()
x_axis = np.linspace(1, hyperp.num_epochs, hyperp.num_epochs, endpoint = True)
plt.plot(x_axis, np.log(storage_array_loss_train))
plt.title('Log-Loss for Training Neural Network')
plt.xlabel('Epochs')
plt.ylabel('Log-Loss')
figures_savefile_name = filepaths.directory_figures + '/' +\
'loss.png'
plt.savefig(figures_savefile_name)
plt.close(fig_loss)
#=== Loss Autoencoder ===#
fig_loss = plt.figure()
x_axis = np.linspace(1, hyperp.num_epochs, hyperp.num_epochs, endpoint = True)
plt.plot(x_axis, np.log(storage_array_loss_train_VAE))
plt.title('Log-Loss for VAE')
plt.xlabel('Epochs')
plt.ylabel('Log-Loss')
figures_savefile_name = filepaths.directory_figures + '/' +\
'loss_autoencoder.png'
plt.savefig(figures_savefile_name)
plt.close(fig_loss)
#=== Loss Encoder ===#
fig_loss = plt.figure()
x_axis = np.linspace(1, hyperp.num_epochs, hyperp.num_epochs, endpoint = True)
plt.plot(x_axis, np.log(storage_array_loss_train_encoder))
plt.title('Log-Loss for Encoder')
plt.xlabel('Epochs')
plt.ylabel('Log-Loss')
figures_savefile_name = filepaths.directory_figures + '/' +\
'loss_encoder.png'
plt.savefig(figures_savefile_name)
plt.close(fig_loss)
#=== Relative Error Autoencoder ===#
fig_accuracy = plt.figure()
x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)
plt.plot(x_axis, storage_array_relative_error_input_VAE)
plt.title('Relative Error for Autoencoder')
plt.xlabel('Epochs')
plt.ylabel('Relative Error')
figures_savefile_name = filepaths.directory_figures + '/' +\
'relative_error_autoencoder.png'
plt.savefig(figures_savefile_name)
plt.close(fig_accuracy)
#=== Relative Error Encoder ===#
fig_accuracy = plt.figure()
x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)
plt.plot(x_axis, storage_array_relative_error_latent_encoder)
plt.title('Relative Error for Encoder')
plt.xlabel('Epochs')
plt.ylabel('Relative Error')
figures_savefile_name = filepaths.directory_figures + '/' +\
'relative_error_encoder.png'
plt.savefig(figures_savefile_name)
plt.close(fig_accuracy)
#=== Relative Error Decoder ===#
fig_accuracy = plt.figure()
x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)
plt.plot(x_axis, storage_array_relative_error_input_decoder)
plt.title('Relative Error for Decoder')
plt.xlabel('Epochs')
plt.ylabel('Relative Error')
figures_savefile_name = filepaths.directory_figures + '/' +\
'relative_error_decoder.png'
plt.savefig(figures_savefile_name)
plt.close(fig_accuracy)
#=== Relative Gradient Norm ===#
fig_gradient_norm = plt.figure()
x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)
plt.plot(x_axis, storage_array_relative_gradient_norm)
plt.title('Relative Gradient Norm')
plt.xlabel('Epochs')
plt.ylabel('Relative Error')
figures_savefile_name = filepaths.directory_figures + '/' +\
'relative_error_gradient_norm.png'
plt.savefig(figures_savefile_name)
plt.close(fig_gradient_norm)
if options.model_augmented == 1:
#=== Relative Error Decoder ===#
fig_loss = plt.figure()
x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)
plt.plot(x_axis, storage_array_loss_train_forward_model)
plt.title('Log-loss Forward Model')
plt.xlabel('Epochs')
plt.ylabel('Relative Error')
figures_savefile_name = filepaths.directory_figures + '/' +\
'loss_forward_model.png'
plt.savefig(figures_savefile_name)
plt.close(fig_loss)
print('Plotting complete')
|
[
"sys.path.append",
"matplotlib.pyplot.title",
"numpy.log",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.close",
"neural_networks.nn_vaeiaf.VAEIAF",
"numpy.expand_dims",
"matplotlib.pyplot.figure",
"utils_data.data_handler.DataHandler",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"utils_project.plot_fem_function.plot_fem_function",
"matplotlib.pyplot.savefig",
"Finite_Element_Method.src.load_mesh.load_mesh"
] |
[((968, 1001), 'sys.path.append', 'sys.path.append', (['"""../../../../.."""'], {}), "('../../../../..')\n", (983, 1001), False, 'import sys\n'), ((1074, 1084), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1082, 1084), True, 'import matplotlib.pyplot as plt\n'), ((2374, 2463), 'utils_data.data_handler.DataHandler', 'DataHandler', (['hyperp', 'options', 'filepaths', 'options.parameter_dimensions', 'obs_dimensions'], {}), '(hyperp, options, filepaths, options.parameter_dimensions,\n obs_dimensions)\n', (2385, 2463), False, 'from utils_data.data_handler import DataHandler\n'), ((2696, 2815), 'neural_networks.nn_vaeiaf.VAEIAF', 'VAEIAF', (['hyperp', 'options', 'input_dimensions', 'latent_dimensions', 'None', 'None', 'None', 'None', 'positivity_constraint_log_exp'], {}), '(hyperp, options, input_dimensions, latent_dimensions, None, None,\n None, None, positivity_constraint_log_exp)\n', (2702, 2815), False, 'from neural_networks.nn_vaeiaf import VAEIAF\n'), ((3003, 3054), 'numpy.expand_dims', 'np.expand_dims', (['parameter_test[sample_number, :]', '(0)'], {}), '(parameter_test[sample_number, :], 0)\n', (3017, 3054), True, 'import numpy as np\n'), ((3082, 3133), 'numpy.expand_dims', 'np.expand_dims', (['state_obs_test[sample_number, :]', '(0)'], {}), '(state_obs_test[sample_number, :], 0)\n', (3096, 3133), True, 'import numpy as np\n'), ((3697, 3725), 'Finite_Element_Method.src.load_mesh.load_mesh', 'load_mesh', (['filepaths.project'], {}), '(filepaths.project)\n', (3706, 3725), False, 'from Finite_Element_Method.src.load_mesh import load_mesh\n'), ((3764, 3896), 'utils_project.plot_fem_function.plot_fem_function', 'plot_fem_function', (['filepaths.figures_savefile_name_parameter_test', '"""True Parameter"""', '(7.0)', 'nodes', 'elements', 'parameter_test_sample'], {}), "(filepaths.figures_savefile_name_parameter_test,\n 'True Parameter', 7.0, nodes, elements, parameter_test_sample)\n", (3781, 3896), False, 'from utils_project.plot_fem_function import plot_fem_function\n'), ((3962, 4100), 'utils_project.plot_fem_function.plot_fem_function', 'plot_fem_function', (['filepaths.figures_savefile_name_parameter_pred', '"""Parameter Prediction"""', '(7.0)', 'nodes', 'elements', 'parameter_pred_sample'], {}), "(filepaths.figures_savefile_name_parameter_pred,\n 'Parameter Prediction', 7.0, nodes, elements, parameter_pred_sample)\n", (3979, 4100), False, 'from utils_project.plot_fem_function import plot_fem_function\n'), ((5159, 5214), 'pandas.read_csv', 'pd.read_csv', (["(filepaths.trained_nn + '_metrics' + '.csv')"], {}), "(filepaths.trained_nn + '_metrics' + '.csv')\n", (5170, 5214), True, 'import pandas as pd\n'), ((5866, 5878), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5876, 5878), True, 'import matplotlib.pyplot as plt\n'), ((5892, 5959), 'numpy.linspace', 'np.linspace', (['(1)', 'hyperp.num_epochs', 'hyperp.num_epochs'], {'endpoint': '(True)'}), '(1, hyperp.num_epochs, hyperp.num_epochs, endpoint=True)\n', (5903, 5959), True, 'import numpy as np\n'), ((6021, 6070), 'matplotlib.pyplot.title', 'plt.title', (['"""Log-Loss for Training Neural Network"""'], {}), "('Log-Loss for Training Neural Network')\n", (6030, 6070), True, 'import matplotlib.pyplot as plt\n'), ((6075, 6095), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (6085, 6095), True, 'import matplotlib.pyplot as plt\n'), ((6100, 6122), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log-Loss"""'], {}), "('Log-Loss')\n", (6110, 6122), True, 'import matplotlib.pyplot as plt\n'), ((6215, 6249), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figures_savefile_name'], {}), '(figures_savefile_name)\n', (6226, 6249), True, 'import matplotlib.pyplot as plt\n'), ((6254, 6273), 'matplotlib.pyplot.close', 'plt.close', (['fig_loss'], {}), '(fig_loss)\n', (6263, 6273), True, 'import matplotlib.pyplot as plt\n'), ((6321, 6333), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6331, 6333), True, 'import matplotlib.pyplot as plt\n'), ((6347, 6414), 'numpy.linspace', 'np.linspace', (['(1)', 'hyperp.num_epochs', 'hyperp.num_epochs'], {'endpoint': '(True)'}), '(1, hyperp.num_epochs, hyperp.num_epochs, endpoint=True)\n', (6358, 6414), True, 'import numpy as np\n'), ((6480, 6509), 'matplotlib.pyplot.title', 'plt.title', (['"""Log-Loss for VAE"""'], {}), "('Log-Loss for VAE')\n", (6489, 6509), True, 'import matplotlib.pyplot as plt\n'), ((6514, 6534), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (6524, 6534), True, 'import matplotlib.pyplot as plt\n'), ((6539, 6561), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log-Loss"""'], {}), "('Log-Loss')\n", (6549, 6561), True, 'import matplotlib.pyplot as plt\n'), ((6666, 6700), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figures_savefile_name'], {}), '(figures_savefile_name)\n', (6677, 6700), True, 'import matplotlib.pyplot as plt\n'), ((6705, 6724), 'matplotlib.pyplot.close', 'plt.close', (['fig_loss'], {}), '(fig_loss)\n', (6714, 6724), True, 'import matplotlib.pyplot as plt\n'), ((6768, 6780), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6778, 6780), True, 'import matplotlib.pyplot as plt\n'), ((6794, 6861), 'numpy.linspace', 'np.linspace', (['(1)', 'hyperp.num_epochs', 'hyperp.num_epochs'], {'endpoint': '(True)'}), '(1, hyperp.num_epochs, hyperp.num_epochs, endpoint=True)\n', (6805, 6861), True, 'import numpy as np\n'), ((6931, 6964), 'matplotlib.pyplot.title', 'plt.title', (['"""Log-Loss for Encoder"""'], {}), "('Log-Loss for Encoder')\n", (6940, 6964), True, 'import matplotlib.pyplot as plt\n'), ((6969, 6989), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (6979, 6989), True, 'import matplotlib.pyplot as plt\n'), ((6994, 7016), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log-Loss"""'], {}), "('Log-Loss')\n", (7004, 7016), True, 'import matplotlib.pyplot as plt\n'), ((7117, 7151), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figures_savefile_name'], {}), '(figures_savefile_name)\n', (7128, 7151), True, 'import matplotlib.pyplot as plt\n'), ((7156, 7175), 'matplotlib.pyplot.close', 'plt.close', (['fig_loss'], {}), '(fig_loss)\n', (7165, 7175), True, 'import matplotlib.pyplot as plt\n'), ((7237, 7249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7247, 7249), True, 'import matplotlib.pyplot as plt\n'), ((7263, 7330), 'numpy.linspace', 'np.linspace', (['(1)', 'hyperp.num_epochs', 'hyperp.num_epochs'], {'endpoint': '(True)'}), '(1, hyperp.num_epochs, hyperp.num_epochs, endpoint=True)\n', (7274, 7330), True, 'import numpy as np\n'), ((7336, 7392), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'storage_array_relative_error_input_VAE'], {}), '(x_axis, storage_array_relative_error_input_VAE)\n', (7344, 7392), True, 'import matplotlib.pyplot as plt\n'), ((7397, 7440), 'matplotlib.pyplot.title', 'plt.title', (['"""Relative Error for Autoencoder"""'], {}), "('Relative Error for Autoencoder')\n", (7406, 7440), True, 'import matplotlib.pyplot as plt\n'), ((7445, 7465), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (7455, 7465), True, 'import matplotlib.pyplot as plt\n'), ((7470, 7498), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative Error"""'], {}), "('Relative Error')\n", (7480, 7498), True, 'import matplotlib.pyplot as plt\n'), ((7613, 7647), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figures_savefile_name'], {}), '(figures_savefile_name)\n', (7624, 7647), True, 'import matplotlib.pyplot as plt\n'), ((7652, 7675), 'matplotlib.pyplot.close', 'plt.close', (['fig_accuracy'], {}), '(fig_accuracy)\n', (7661, 7675), True, 'import matplotlib.pyplot as plt\n'), ((7733, 7745), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7743, 7745), True, 'import matplotlib.pyplot as plt\n'), ((7759, 7826), 'numpy.linspace', 'np.linspace', (['(1)', 'hyperp.num_epochs', 'hyperp.num_epochs'], {'endpoint': '(True)'}), '(1, hyperp.num_epochs, hyperp.num_epochs, endpoint=True)\n', (7770, 7826), True, 'import numpy as np\n'), ((7832, 7893), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'storage_array_relative_error_latent_encoder'], {}), '(x_axis, storage_array_relative_error_latent_encoder)\n', (7840, 7893), True, 'import matplotlib.pyplot as plt\n'), ((7898, 7937), 'matplotlib.pyplot.title', 'plt.title', (['"""Relative Error for Encoder"""'], {}), "('Relative Error for Encoder')\n", (7907, 7937), True, 'import matplotlib.pyplot as plt\n'), ((7942, 7962), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (7952, 7962), True, 'import matplotlib.pyplot as plt\n'), ((7967, 7995), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative Error"""'], {}), "('Relative Error')\n", (7977, 7995), True, 'import matplotlib.pyplot as plt\n'), ((8106, 8140), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figures_savefile_name'], {}), '(figures_savefile_name)\n', (8117, 8140), True, 'import matplotlib.pyplot as plt\n'), ((8145, 8168), 'matplotlib.pyplot.close', 'plt.close', (['fig_accuracy'], {}), '(fig_accuracy)\n', (8154, 8168), True, 'import matplotlib.pyplot as plt\n'), ((8226, 8238), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8236, 8238), True, 'import matplotlib.pyplot as plt\n'), ((8252, 8319), 'numpy.linspace', 'np.linspace', (['(1)', 'hyperp.num_epochs', 'hyperp.num_epochs'], {'endpoint': '(True)'}), '(1, hyperp.num_epochs, hyperp.num_epochs, endpoint=True)\n', (8263, 8319), True, 'import numpy as np\n'), ((8325, 8385), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'storage_array_relative_error_input_decoder'], {}), '(x_axis, storage_array_relative_error_input_decoder)\n', (8333, 8385), True, 'import matplotlib.pyplot as plt\n'), ((8390, 8429), 'matplotlib.pyplot.title', 'plt.title', (['"""Relative Error for Decoder"""'], {}), "('Relative Error for Decoder')\n", (8399, 8429), True, 'import matplotlib.pyplot as plt\n'), ((8434, 8454), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (8444, 8454), True, 'import matplotlib.pyplot as plt\n'), ((8459, 8487), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative Error"""'], {}), "('Relative Error')\n", (8469, 8487), True, 'import matplotlib.pyplot as plt\n'), ((8598, 8632), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figures_savefile_name'], {}), '(figures_savefile_name)\n', (8609, 8632), True, 'import matplotlib.pyplot as plt\n'), ((8637, 8660), 'matplotlib.pyplot.close', 'plt.close', (['fig_accuracy'], {}), '(fig_accuracy)\n', (8646, 8660), True, 'import matplotlib.pyplot as plt\n'), ((8723, 8735), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8733, 8735), True, 'import matplotlib.pyplot as plt\n'), ((8749, 8816), 'numpy.linspace', 'np.linspace', (['(1)', 'hyperp.num_epochs', 'hyperp.num_epochs'], {'endpoint': '(True)'}), '(1, hyperp.num_epochs, hyperp.num_epochs, endpoint=True)\n', (8760, 8816), True, 'import numpy as np\n'), ((8822, 8876), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'storage_array_relative_gradient_norm'], {}), '(x_axis, storage_array_relative_gradient_norm)\n', (8830, 8876), True, 'import matplotlib.pyplot as plt\n'), ((8881, 8916), 'matplotlib.pyplot.title', 'plt.title', (['"""Relative Gradient Norm"""'], {}), "('Relative Gradient Norm')\n", (8890, 8916), True, 'import matplotlib.pyplot as plt\n'), ((8921, 8941), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (8931, 8941), True, 'import matplotlib.pyplot as plt\n'), ((8946, 8974), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative Error"""'], {}), "('Relative Error')\n", (8956, 8974), True, 'import matplotlib.pyplot as plt\n'), ((9091, 9125), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figures_savefile_name'], {}), '(figures_savefile_name)\n', (9102, 9125), True, 'import matplotlib.pyplot as plt\n'), ((9130, 9158), 'matplotlib.pyplot.close', 'plt.close', (['fig_gradient_norm'], {}), '(fig_gradient_norm)\n', (9139, 9158), True, 'import matplotlib.pyplot as plt\n'), ((2087, 2138), 'pandas.read_csv', 'pd.read_csv', (["(filepaths.project.obs_indices + '.csv')"], {}), "(filepaths.project.obs_indices + '.csv')\n", (2098, 2138), True, 'import pandas as pd\n'), ((4206, 4330), 'utils_project.plot_fem_function.plot_fem_function', 'plot_fem_function', (['filepaths.figures_savefile_name_state_test', '"""True State"""', '(2.6)', 'nodes', 'elements', 'state_obs_test_sample'], {}), "(filepaths.figures_savefile_name_state_test, 'True State',\n 2.6, nodes, elements, state_obs_test_sample)\n", (4223, 4330), False, 'from utils_project.plot_fem_function import plot_fem_function\n'), ((4413, 4543), 'utils_project.plot_fem_function.plot_fem_function', 'plot_fem_function', (['filepaths.figures_savefile_name_state_pred', '"""State Prediction"""', '(2.6)', 'nodes', 'elements', 'state_obs_pred_sample'], {}), "(filepaths.figures_savefile_name_state_pred,\n 'State Prediction', 2.6, nodes, elements, state_obs_pred_sample)\n", (4430, 4543), False, 'from utils_project.plot_fem_function import plot_fem_function\n'), ((5983, 6015), 'numpy.log', 'np.log', (['storage_array_loss_train'], {}), '(storage_array_loss_train)\n', (5989, 6015), True, 'import numpy as np\n'), ((6438, 6474), 'numpy.log', 'np.log', (['storage_array_loss_train_VAE'], {}), '(storage_array_loss_train_VAE)\n', (6444, 6474), True, 'import numpy as np\n'), ((6885, 6925), 'numpy.log', 'np.log', (['storage_array_loss_train_encoder'], {}), '(storage_array_loss_train_encoder)\n', (6891, 6925), True, 'import numpy as np\n'), ((9257, 9269), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9267, 9269), True, 'import matplotlib.pyplot as plt\n'), ((9287, 9354), 'numpy.linspace', 'np.linspace', (['(1)', 'hyperp.num_epochs', 'hyperp.num_epochs'], {'endpoint': '(True)'}), '(1, hyperp.num_epochs, hyperp.num_epochs, endpoint=True)\n', (9298, 9354), True, 'import numpy as np\n'), ((9364, 9420), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'storage_array_loss_train_forward_model'], {}), '(x_axis, storage_array_loss_train_forward_model)\n', (9372, 9420), True, 'import matplotlib.pyplot as plt\n'), ((9429, 9464), 'matplotlib.pyplot.title', 'plt.title', (['"""Log-loss Forward Model"""'], {}), "('Log-loss Forward Model')\n", (9438, 9464), True, 'import matplotlib.pyplot as plt\n'), ((9473, 9493), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (9483, 9493), True, 'import matplotlib.pyplot as plt\n'), ((9502, 9530), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative Error"""'], {}), "('Relative Error')\n", (9512, 9530), True, 'import matplotlib.pyplot as plt\n'), ((9649, 9683), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figures_savefile_name'], {}), '(figures_savefile_name)\n', (9660, 9683), True, 'import matplotlib.pyplot as plt\n'), ((9692, 9711), 'matplotlib.pyplot.close', 'plt.close', (['fig_loss'], {}), '(fig_loss)\n', (9701, 9711), True, 'import matplotlib.pyplot as plt\n')]
|
from Welcome import WelcomeCog
from MinimumMessages import MinimumMessagesCog
from SupporterRoles import SupporterRolesCog
from ReactionRoles import ReactionRolesCog
import discord
from discord.ext import commands, tasks
import asyncio
from discord.abc import GuildChannel
from discord.guild import Guild
from discord.member import Member
from discord.role import Role
from dotenv import load_dotenv
import os
import json
import emoji
load_dotenv()
BOT_TOKEN = os.environ.get("bot-token")
intents: discord.Intents = discord.Intents.default()
intents.members = True
intents.reactions = True
intents.guilds = True
bot = commands.Bot(command_prefix='.', intents=intents)
@bot.event
async def on_member_join(member: discord.Member):
print(f'{member.display_name} has joined the server')
@bot.event
async def on_member_remove(member: discord.Member):
print(f'{member.display_name} has left a server')
@bot.command()
@commands.has_permissions(administrator=True)
async def die(context: commands.Context):
await bot.close()
# initialize
@bot.event
async def on_ready():
print('Bot has started.')
json_file = open('data.json')
json_data = json.load(json_file)
json_file.close()
bot.add_cog(ReactionRolesCog(bot, json_data))
bot.add_cog(SupporterRolesCog(bot, json_data))
bot.add_cog(MinimumMessagesCog(bot, json_data))
bot.add_cog(WelcomeCog(bot, json_data))
bot.run(BOT_TOKEN)
|
[
"json.load",
"Welcome.WelcomeCog",
"ReactionRoles.ReactionRolesCog",
"discord.ext.commands.has_permissions",
"SupporterRoles.SupporterRolesCog",
"dotenv.load_dotenv",
"os.environ.get",
"discord.ext.commands.Bot",
"MinimumMessages.MinimumMessagesCog",
"discord.Intents.default"
] |
[((438, 451), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (449, 451), False, 'from dotenv import load_dotenv\n'), ((464, 491), 'os.environ.get', 'os.environ.get', (['"""bot-token"""'], {}), "('bot-token')\n", (478, 491), False, 'import os\n'), ((521, 546), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (544, 546), False, 'import discord\n'), ((624, 673), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""."""', 'intents': 'intents'}), "(command_prefix='.', intents=intents)\n", (636, 673), False, 'from discord.ext import commands, tasks\n'), ((935, 979), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (959, 979), False, 'from discord.ext import commands, tasks\n'), ((1169, 1189), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1178, 1189), False, 'import json\n'), ((1221, 1253), 'ReactionRoles.ReactionRolesCog', 'ReactionRolesCog', (['bot', 'json_data'], {}), '(bot, json_data)\n', (1237, 1253), False, 'from ReactionRoles import ReactionRolesCog\n'), ((1267, 1300), 'SupporterRoles.SupporterRolesCog', 'SupporterRolesCog', (['bot', 'json_data'], {}), '(bot, json_data)\n', (1284, 1300), False, 'from SupporterRoles import SupporterRolesCog\n'), ((1314, 1348), 'MinimumMessages.MinimumMessagesCog', 'MinimumMessagesCog', (['bot', 'json_data'], {}), '(bot, json_data)\n', (1332, 1348), False, 'from MinimumMessages import MinimumMessagesCog\n'), ((1362, 1388), 'Welcome.WelcomeCog', 'WelcomeCog', (['bot', 'json_data'], {}), '(bot, json_data)\n', (1372, 1388), False, 'from Welcome import WelcomeCog\n')]
|
import logging
logging.basicConfig(format="[%(asctime)s] %(filename)s [line:%(lineno)d] %(message)s", datefmt="%m-%d %H:%M:%S")
import os
import sys
import urllib
import pprint
import tarfile
import tensorflow as tf
import json
import datetime
import dateutil.tz
import numpy as np
import scipy.misc
pp = pprint.PrettyPrinter().pprint
logger = logging.getLogger(__name__)
def setup_model_saving(model_name, data, hyperparams=None, root_dir='run/'):
# construct the model directory template name
name = os.path.join(root_dir, data, model_name + '%s')
# iterate until we find an index that hasn't been taken yet.
i = 0
while os.path.exists(name % i):
i += 1
name = name % i
# create the folder
os.makedirs(name)
return name
def mprint(matrix, pivot=0.5):
for array in matrix:
print("".join("#" if i > pivot else " " for i in array))
def show_all_variables():
total_count = 0
for idx, op in enumerate(tf.trainable_variables()):
shape = op.get_shape()
count = np.prod(shape)
print("[%2d] %s %s = %s" % (idx, op.name, shape, count))
total_count += int(count)
print("[Total] variable size: %s" % "{:,}".format(total_count))
def get_timestamp():
now = datetime.datetime.now(dateutil.tz.tzlocal())
return now.strftime('%Y_%m_%d_%H_%M_%S')
def binarize(images):
return (np.random.uniform(size=images.shape) < images).astype('float32')
def save_images_in(images,cmin=0.0, cmax=1.0,directory="./",prefix="sample"):
index = 1
for i in np.arange(images.shape[0]):
filename = '%s_%s_%s.jpg' % (i,prefix, get_timestamp())
scipy.misc.toimage(images[i].reshape(images[i].shape[0],images[i].shape[1]), cmin=0, cmax=255).save(filename)
def save_images(images, height, width,channel, n_row, n_col, cmin=0.0, cmax=1.0, directory="./", prefix="sample"):
if channel == 1:
images = images.reshape((n_row, n_col, height, width))
images = images.transpose(1, 2, 0, 3)
images = images.reshape((height * n_row, width * n_col))
filename = '%s_%s.jpg' % (prefix, get_timestamp())
scipy.misc.toimage(images, cmin=cmin, cmax=cmax).save(os.path.join(directory, filename))
elif channel == 3:
images = images.reshape((n_row, n_col, height, width,channel))
images = images.transpose(1, 2, 0, 3, 4)
images = images.reshape((height * n_row, width * n_col,channel))
filename = '%s_%s.jpg' % (prefix, get_timestamp())
scipy.misc.toimage(images).save(os.path.join(directory, filename))
def get_model_dir(conf, exceptions=None):
# attrs = conf.__dict__['__flags']
# pp(attrs)
keys = conf.flag_values_dict()
# keys.remove('data')
# keys = ['data'] + keys
names =[]
for key in keys:
# Only use useful flags
if key not in exceptions:
names.append("%s=%s" % (key, ",".join([str(i) for i in conf[key]])
if type(conf[key]) == list else conf[key]))
return os.path.join('checkpoints', *names) + '/'
def preprocess_conf(conf):
options = conf.__flags
for option, value in options.items():
option = option.lower()
def check_and_create_dir(directory):
if not os.path.exists(directory):
logger.info('Creating directory: %s' % directory)
os.makedirs(directory)
else:
logger.info('Skip creating directory: %s' % directory)
def maybe_download_and_extract(dest_directory):
"""
Download and extract the tarball from Alex's website.
From https://github.com/tensorflow/tensorflow/blob/r0.9/tensorflow/models/image/cifar10/cifar10.py
"""
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
[
"numpy.random.uniform",
"os.makedirs",
"logging.basicConfig",
"tensorflow.trainable_variables",
"os.stat",
"os.path.exists",
"numpy.prod",
"pprint.PrettyPrinter",
"numpy.arange",
"sys.stdout.flush",
"urllib.urlretrieve",
"tarfile.open",
"os.path.join",
"logging.getLogger"
] |
[((15, 137), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s] %(filename)s [line:%(lineno)d] %(message)s"""', 'datefmt': '"""%m-%d %H:%M:%S"""'}), "(format=\n '[%(asctime)s] %(filename)s [line:%(lineno)d] %(message)s', datefmt=\n '%m-%d %H:%M:%S')\n", (34, 137), False, 'import logging\n'), ((347, 374), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (364, 374), False, 'import logging\n'), ((308, 330), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (328, 330), False, 'import pprint\n'), ((510, 557), 'os.path.join', 'os.path.join', (['root_dir', 'data', "(model_name + '%s')"], {}), "(root_dir, data, model_name + '%s')\n", (522, 557), False, 'import os\n'), ((637, 661), 'os.path.exists', 'os.path.exists', (['(name % i)'], {}), '(name % i)\n', (651, 661), False, 'import os\n'), ((716, 733), 'os.makedirs', 'os.makedirs', (['name'], {}), '(name)\n', (727, 733), False, 'import os\n'), ((1492, 1518), 'numpy.arange', 'np.arange', (['images.shape[0]'], {}), '(images.shape[0])\n', (1501, 1518), True, 'import numpy as np\n'), ((3692, 3730), 'os.path.join', 'os.path.join', (['dest_directory', 'filename'], {}), '(dest_directory, filename)\n', (3704, 3730), False, 'import os\n'), ((936, 960), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (958, 960), True, 'import tensorflow as tf\n'), ((1002, 1016), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1009, 1016), True, 'import numpy as np\n'), ((2893, 2928), 'os.path.join', 'os.path.join', (['"""checkpoints"""', '*names'], {}), "('checkpoints', *names)\n", (2905, 2928), False, 'import os\n'), ((3104, 3129), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (3118, 3129), False, 'import os\n'), ((3189, 3211), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (3200, 3211), False, 'import os\n'), ((3577, 3607), 'os.path.exists', 'os.path.exists', (['dest_directory'], {}), '(dest_directory)\n', (3591, 3607), False, 'import os\n'), ((3613, 3640), 'os.makedirs', 'os.makedirs', (['dest_directory'], {}), '(dest_directory)\n', (3624, 3640), False, 'import os\n'), ((3741, 3765), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (3755, 3765), False, 'import os\n'), ((3991, 4040), 'urllib.urlretrieve', 'urllib.urlretrieve', (['DATA_URL', 'filepath', '_progress'], {}), '(DATA_URL, filepath, _progress)\n', (4009, 4040), False, 'import urllib\n'), ((4068, 4085), 'os.stat', 'os.stat', (['filepath'], {}), '(filepath)\n', (4075, 4085), False, 'import os\n'), ((2114, 2147), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (2126, 2147), False, 'import os\n'), ((3954, 3972), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3970, 3972), False, 'import sys\n'), ((1325, 1361), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'images.shape'}), '(size=images.shape)\n', (1342, 1361), True, 'import numpy as np\n'), ((2453, 2486), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (2465, 2486), False, 'import os\n'), ((4165, 4195), 'tarfile.open', 'tarfile.open', (['filepath', '"""r:gz"""'], {}), "(filepath, 'r:gz')\n", (4177, 4195), False, 'import tarfile\n')]
|
from dataclasses import dataclass
from datetime import date
from typing import Optional, List
from domain import events
@dataclass(frozen=True)
class OrderLine:
orderid: str
sku: str
qty: int
class Batch:
def __init__(self, ref: str, sku: str, qty:int, eta: Optional[date]):
self.reference = ref
self.sku = sku
self.eta = eta
self._purchased_quantity = qty
self._allocations = set()
def __eq__(self, other):
if not isinstance(other, Batch):
return False
return other.reference == self.reference
def __hash__(self):
return hash(self.reference)
def __gt__(self, other):
if self.eta is None:
return False
if other.eta is None:
return True
return self.eta > other.eta
def allocate(self, line: OrderLine):
if self.can_allocate(line):
self._allocations.add(line)
def deallocate(self, line: OrderLine):
if line in self._allocations:
self._allocations.remove(line)
def deallocate_one(self) -> OrderLine:
return self._allocations.pop()
@property
def allocated_quantity(self) -> int:
return sum(line.qty for line in self._allocations)
@property
def available_quantity(self) -> int:
return self._purchased_quantity - self.allocated_quantity
def can_allocate(self, line: OrderLine) -> bool:
return self.sku == line.sku and self.available_quantity >= line.qty
class Product:
def __init__(self, sku: str, batches: List[Batch], version_number: int = 0):
self.sku = sku
self.batfches = batches
self.version_number = version_number
self.events = []
def allocate(self, line: OrderLine):
try:
batch = next(
b for b in sorted(self.batches) if b.can_allocate(line)
)
batch.allocate(line)
self.version += 1
self.events.append(events.Allocated(
orderid=line.orderid, sku=line.sku, qty=line.qty, batchref=batch.reference
))
return batch.reference
except StopIteration:
self.events.append(events.OutOfStock(line.sku))
return None
def change_batch_quantity(self, ref: str, qty: int):
batch = next(b for b in self.batches if b.reference == ref)
batch._purchased_quantity = qty
while batch.available_quantity < 0:
line = batch.deallocate_one()
self.events.append(
events.AllocationRequired(line.orderid, line.sku, line.qty)
)
class OutOfStock(Exception):
pass
def allocate(line: OrderLine, batches: List[Batch]) -> str:
try:
batch = next(b for b in sorted(batches) if b.can_allocate(line))
batch.allocate(line)
return batch.reference
except StopIteration:
raise OutOfStock(f'Lack of SKU {line.sku}')
|
[
"domain.events.Allocated",
"domain.events.OutOfStock",
"domain.events.AllocationRequired",
"dataclasses.dataclass"
] |
[((123, 145), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (132, 145), False, 'from dataclasses import dataclass\n'), ((2000, 2097), 'domain.events.Allocated', 'events.Allocated', ([], {'orderid': 'line.orderid', 'sku': 'line.sku', 'qty': 'line.qty', 'batchref': 'batch.reference'}), '(orderid=line.orderid, sku=line.sku, qty=line.qty, batchref\n =batch.reference)\n', (2016, 2097), False, 'from domain import events\n'), ((2573, 2632), 'domain.events.AllocationRequired', 'events.AllocationRequired', (['line.orderid', 'line.sku', 'line.qty'], {}), '(line.orderid, line.sku, line.qty)\n', (2598, 2632), False, 'from domain import events\n'), ((2220, 2247), 'domain.events.OutOfStock', 'events.OutOfStock', (['line.sku'], {}), '(line.sku)\n', (2237, 2247), False, 'from domain import events\n')]
|
import json
import sqlite3
import datetime
import math
import requests
import string
import time
visits_db = '__HOME__/locations.db'
st_pop = dict()
st_pop["AK"] = 731545
st_pop["AL"] = 4903000
st_pop["AR"] = 3018000
st_pop["AZ"] = 7279000
st_pop["CA"] = 39510000
st_pop["CO"] = 5759000
st_pop["CT"] = 3565000
st_pop["DC"] = 702455
st_pop["DE"] = 973764
st_pop["FL"] = 21480000
st_pop["GA"] = 10620000
st_pop["HI"] = 1416000
st_pop["IA"] = 3155000
st_pop["ID"] = 1787000
st_pop["IL"] = 12670000
st_pop["IN"] = 6732000
st_pop["KS"] = 2913000
st_pop["KY"] = 4468000
st_pop["LA"] = 4649000
st_pop["MA"] = 6893000
st_pop["MD"] = 6046000
st_pop["ME"] = 1344000
st_pop["MI"] = 9987000
st_pop["MN"] = 5640000
st_pop["MO"] = 6137000
st_pop["MS"] = 2976000
st_pop["MT"] = 1069000
st_pop["NC"] = 1049000
st_pop["ND"] = 762062
st_pop["NE"] = 1934000
st_pop["NH"] = 1360000
st_pop["NJ"] = 8882000
st_pop["NM"] = 2097000
st_pop["NV"] = 3080000
st_pop["NY"] = 19450000
st_pop["OH"] = 11690000
st_pop["OK"] = 3957000
st_pop["OR"] = 4218000
st_pop["PA"] = 12800000
st_pop["RI"] = 1059000
st_pop["SC"] = 5149000
st_pop["SD"] = 884659
st_pop["TN"] = 6829000
st_pop["TX"] = 29000000
st_pop["UT"] = 3206000
st_pop["VA"] = 8536000
st_pop["VT"] = 623989
st_pop["WA"] = 7615000
st_pop["WI"] = 5822000
st_pop["WV"] = 1792000
st_pop["WY"] = 578759
def request_handler(request):
def hl_func(time_now,time_entry):
fmt = '%Y-%m-%d %H:%M:%S.%f'
time_then = datetime.datetime.strptime(time_entry, fmt)
td = time_now - time_then
td_mins = int(round(td.total_seconds()/60))
return 0.5**(td_mins/390)
if (request['method']=='GET'):
if 'user' in request['args'] and 'password' in request['args'] and 'location' in request['args']:
if 'admin'==request['values']['user'] and 'adminpassword'==request['values']['password']:
location_list = request['values']['location']
location_list = location_list.split(",")
location_list = list(map(float, location_list))
lat = location_list[0]
lon = location_list[1]
time_now = datetime.datetime.now()
conn = sqlite3.connect(visits_db) # connect to that database (will create if it doesn't already exist)
c = conn.cursor() # move cursor into database (allows us to execute commands)
# find all dangerous points within 1km bounding box
dangerous_points = ""
R = 6371 #radius of the earth
lat_kil = math.degrees(0.5/R) #0.5km in latitude degree (constant)
lon_kil = math.degrees(0.5/R/math.cos(math.radians(lat))) #0.5km in longitude degree (changes based on latitude)
entries = c.execute('''SELECT * FROM locations_table
WHERE latitude BETWEEN ?-? AND ?+?
AND
longitude BETWEEN ?-? AND ?+?;''', (lat,lat_kil,lat,lat_kil,lon,lon_kil,lon,lon_kil)).fetchall()
# find weights of all points within 1km bounding box and add to dangerous_points if > 0.5
if len(entries) > 0:
for entry in entries:
try:
# Get state of person
loc_string = str(entry[1]) + "," + str(entry[2])
r = requests.get("""https://maps.googleapis.com/maps/api/geocode/json?latlng={}&key=<KEY>""".format(loc_string))
response = json.loads(r.text)
state = response['results'][0]['address_components'][5]['short_name']
state_pop = st_pop[state]
# Get information on # of infections in state of interest
r2 = requests.get("""https://covidtracking.com/api/v1/states/current.json""")
response2 = json.loads(r2.text)
for s in response2:
if s['state'] == state:
infected_pop = s['positive']
# Find percent of population infected
percent_infected = infected_pop/state_pop
except:
percent_infected = 0.09
if entry[4]==1:
weight = hl_func(time_now,entry[3])
if weight > 0.8:
return_latitude = str(entry[1])
return_longitude = str(entry[2])
if len(return_latitude)<9:
for i in range(8-len(return_latitude)):
return_latitude+='0'
if len(return_longitude)<9:
for i in range(8-len(return_longitude)):
return_longitude+='0'
loc = str(return_latitude)[:8] + "," + str(return_longitude)[:8] + "\n"
dangerous_points+=loc
else:
weight = hl_func(time_now,entry[3])*(percent_infected)
if weight > 0.006:
return_latitude = str(entry[1])
return_longitude = str(entry[2])
if len(return_latitude)<9:
for i in range(8-len(return_latitude)):
return_latitude+='0'
if len(return_longitude)<9:
for i in range(8-len(return_longitude)):
return_longitude+='0'
loc = str(return_latitude)[:8] + "," + str(return_longitude)[:8] + "\n"
dangerous_points+=loc
conn.commit()
conn.close()
return dangerous_points
else:
conn.commit()
conn.close()
return "No dangerous points near me."
else:
return "Not Authorized"
return "Invalid Request"
|
[
"json.loads",
"math.radians",
"datetime.datetime.strptime",
"sqlite3.connect",
"requests.get",
"math.degrees",
"datetime.datetime.now"
] |
[((1452, 1495), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['time_entry', 'fmt'], {}), '(time_entry, fmt)\n', (1478, 1495), False, 'import datetime\n'), ((2149, 2172), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2170, 2172), False, 'import datetime\n'), ((2196, 2222), 'sqlite3.connect', 'sqlite3.connect', (['visits_db'], {}), '(visits_db)\n', (2211, 2222), False, 'import sqlite3\n'), ((2584, 2605), 'math.degrees', 'math.degrees', (['(0.5 / R)'], {}), '(0.5 / R)\n', (2596, 2605), False, 'import math\n'), ((2695, 2712), 'math.radians', 'math.radians', (['lat'], {}), '(lat)\n', (2707, 2712), False, 'import math\n'), ((3610, 3628), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (3620, 3628), False, 'import json\n'), ((3902, 3970), 'requests.get', 'requests.get', (['"""https://covidtracking.com/api/v1/states/current.json"""'], {}), "('https://covidtracking.com/api/v1/states/current.json')\n", (3914, 3970), False, 'import requests\n'), ((4015, 4034), 'json.loads', 'json.loads', (['r2.text'], {}), '(r2.text)\n', (4025, 4034), False, 'import json\n')]
|
"""
Builds Deformable DETR package.
"""
import glob
import os
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "models/ops/src")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
raise NotImplementedError("CUDA is not available.")
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"deformable_detr.models.ops.MultiScaleDeformableAttention",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
packages = find_packages(exclude=("*datasets", "models.ops"))
packages = [f'deformable_detr.{package}' for package in packages]
package_dir = {'deformable_detr': ''}
setup(
package_dir=package_dir,
packages=packages,
python_requires=">=3.7",
install_requires=[
"torch>=1.5.1",
"torchvision>=0.6.1",
"pycocotools>=2.0.2",
"tqdm>4.29.0",
"cython",
"scipy"
],
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
[
"os.path.abspath",
"torch.cuda.is_available",
"os.path.join",
"setuptools.find_packages"
] |
[((1634, 1684), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('*datasets', 'models.ops')"}), "(exclude=('*datasets', 'models.ops'))\n", (1647, 1684), False, 'from setuptools import find_packages, setup\n'), ((375, 415), 'os.path.join', 'os.path.join', (['this_dir', '"""models/ops/src"""'], {}), "(this_dir, 'models/ops/src')\n", (387, 415), False, 'import os\n'), ((327, 352), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (342, 352), False, 'import os\n'), ((443, 480), 'os.path.join', 'os.path.join', (['extensions_dir', '"""*.cpp"""'], {}), "(extensions_dir, '*.cpp')\n", (455, 480), False, 'import os\n'), ((509, 553), 'os.path.join', 'os.path.join', (['extensions_dir', '"""cpu"""', '"""*.cpp"""'], {}), "(extensions_dir, 'cpu', '*.cpp')\n", (521, 553), False, 'import os\n'), ((583, 627), 'os.path.join', 'os.path.join', (['extensions_dir', '"""cuda"""', '"""*.cu"""'], {}), "(extensions_dir, 'cuda', '*.cu')\n", (595, 627), False, 'import os\n'), ((764, 789), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (787, 789), False, 'import torch\n'), ((1232, 1263), 'os.path.join', 'os.path.join', (['extensions_dir', 's'], {}), '(extensions_dir, s)\n', (1244, 1263), False, 'import os\n')]
|
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import binascii
import json
import os
import os.path
import subprocess
import unittest
from pywatchman import bser, encoding
from watchman.integration.lib import WatchmanInstance
class TestDashJCliOption(unittest.TestCase):
def getSockPath(self):
return WatchmanInstance.getSharedInstance().getSockPath()
def doJson(self, addNewLine, pretty: bool = False) -> None:
sockpath = self.getSockPath()
if pretty:
watchman_cmd = b'[\n"get-sockname"\n]'
else:
watchman_cmd = json.dumps(["get-sockname"])
watchman_cmd = watchman_cmd.encode("ascii")
if addNewLine:
watchman_cmd = watchman_cmd + b"\n"
cli_cmd = [
os.environ.get("WATCHMAN_BINARY", "watchman"),
"--unix-listener-path={0}".format(sockpath.unix_domain),
"--named-pipe-path={0}".format(sockpath.named_pipe),
"--logfile=/BOGUS",
"--statefile=/BOGUS",
"--no-spawn",
"--no-local",
"-j",
]
proc = subprocess.Popen(
cli_cmd,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
stdout, stderr = proc.communicate(input=watchman_cmd)
self.assertEqual(proc.poll(), 0, stderr)
# the response should be json because that is the default
result = json.loads(stdout.decode("utf-8"))
self.assertEqual(result["unix_domain"], sockpath.unix_domain)
def test_jsonInputNoNewLine(self) -> None:
self.doJson(False)
def test_jsonInputNewLine(self) -> None:
self.doJson(True)
def test_jsonInputPretty(self) -> None:
self.doJson(True, True)
def test_bserInput(self) -> None:
sockpath = self.getSockPath()
# pyre-fixme[16]: Module `pywatchman` has no attribute `bser`.
watchman_cmd = bser.dumps(["get-sockname"])
cli_cmd = [
os.environ.get("WATCHMAN_BINARY", "watchman"),
"--unix-listener-path={0}".format(sockpath.unix_domain),
"--named-pipe-path={0}".format(sockpath.named_pipe),
"--logfile=/BOGUS",
"--statefile=/BOGUS",
"--no-spawn",
"--no-local",
"-j",
]
proc = subprocess.Popen(
cli_cmd,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
stdout, stderr = proc.communicate(input=watchman_cmd)
self.assertEqual(proc.poll(), 0, stderr)
# the response should be bser to match our input
# pyre-fixme[16]: Module `pywatchman` has no attribute `bser`.
result = bser.loads(stdout)
result_sockname = result["unix_domain"]
result_sockname = encoding.decode_local(result_sockname)
self.assertEqual(
result_sockname,
sockpath.unix_domain,
binascii.hexlify(stdout).decode("ascii"),
)
|
[
"pywatchman.bser.loads",
"subprocess.Popen",
"pywatchman.bser.dumps",
"binascii.hexlify",
"watchman.integration.lib.WatchmanInstance.getSharedInstance",
"pywatchman.encoding.decode_local",
"json.dumps",
"os.environ.get"
] |
[((1266, 1366), 'subprocess.Popen', 'subprocess.Popen', (['cli_cmd'], {'stdin': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), '(cli_cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n', (1282, 1366), False, 'import subprocess\n'), ((2117, 2145), 'pywatchman.bser.dumps', 'bser.dumps', (["['get-sockname']"], {}), "(['get-sockname'])\n", (2127, 2145), False, 'from pywatchman import bser, encoding\n'), ((2520, 2620), 'subprocess.Popen', 'subprocess.Popen', (['cli_cmd'], {'stdin': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), '(cli_cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n', (2536, 2620), False, 'import subprocess\n'), ((2933, 2951), 'pywatchman.bser.loads', 'bser.loads', (['stdout'], {}), '(stdout)\n', (2943, 2951), False, 'from pywatchman import bser, encoding\n'), ((3026, 3064), 'pywatchman.encoding.decode_local', 'encoding.decode_local', (['result_sockname'], {}), '(result_sockname)\n', (3047, 3064), False, 'from pywatchman import bser, encoding\n'), ((735, 763), 'json.dumps', 'json.dumps', (["['get-sockname']"], {}), "(['get-sockname'])\n", (745, 763), False, 'import json\n'), ((924, 969), 'os.environ.get', 'os.environ.get', (['"""WATCHMAN_BINARY"""', '"""watchman"""'], {}), "('WATCHMAN_BINARY', 'watchman')\n", (938, 969), False, 'import os\n'), ((2178, 2223), 'os.environ.get', 'os.environ.get', (['"""WATCHMAN_BINARY"""', '"""watchman"""'], {}), "('WATCHMAN_BINARY', 'watchman')\n", (2192, 2223), False, 'import os\n'), ((470, 506), 'watchman.integration.lib.WatchmanInstance.getSharedInstance', 'WatchmanInstance.getSharedInstance', ([], {}), '()\n', (504, 506), False, 'from watchman.integration.lib import WatchmanInstance\n'), ((3166, 3190), 'binascii.hexlify', 'binascii.hexlify', (['stdout'], {}), '(stdout)\n', (3182, 3190), False, 'import binascii\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from module.controller import BaseController
from module.dao import AuthorDao
class AuthorController (BaseController):
"""
Controller relacionado a las operaciones correspondientes a los
autores. Implementa las operaciones CRUD sobre las tablas.
@autor <NAME>
@contact <EMAIL>
"""
@property
def dao(self):
"""
Referencia al DAO de repositorios
"""
return self.__dao
def __init__(self, config):
"""
Constructor de la clase, se encarga de inicializar el controller
"""
self.__dao = AuthorDao(config)
def listar_repositorios(self, id):
"""
Obtiene la lista de los recursos.
"""
return self.dao.get_repositories(id);
|
[
"module.dao.AuthorDao"
] |
[((632, 649), 'module.dao.AuthorDao', 'AuthorDao', (['config'], {}), '(config)\n', (641, 649), False, 'from module.dao import AuthorDao\n')]
|
#!/usr/bin/env python3
import os
import sys
import re
import argparse
from pathlib import Path
from collections import Counter
from enum import Enum
import itertools
import json
from operator import itemgetter
import math
import datetime
import logging
import multiprocessing
import time
import signal
from typing import Tuple, Union
import io
import cv2
from numpy import ndarray
import numpy as np
import pytesseract
from PIL import Image
from PIL.ExifTags import TAGS
import pageinfo
PROGNAME = "FGOスクショカウント"
VERSION = "0.4.0"
DEFAULT_ITEM_LANG = "jpn" # "jpn": japanese, "eng": English
logger = logging.getLogger(__name__)
watcher_running = True
class CustomAdapter(logging.LoggerAdapter):
"""
この adapter を通した場合、自動的にログ出力文字列の先頭に [target] が挿入される。
target は adapter インスタンス生成時に確定させること。
"""
def process(self, msg, kwargs):
return f"[{self.extra['target']}] {msg}", kwargs
class Ordering(Enum):
"""
ファイルの処理順序を示す定数
"""
NOTSPECIFIED = 'notspecified' # 指定なし
FILENAME = 'filename' # ファイル名
TIMESTAMP = 'timestamp' # 作成日時
def __str__(self):
return str(self.value)
basedir = Path(__file__).resolve().parent
Item_dir = basedir / Path("item/equip/")
CE_dir = basedir / Path("item/ce/")
Point_dir = basedir / Path("item/point/")
train_item = basedir / Path("item.xml") # item stack & bonus
train_chest = basedir / Path("chest.xml") # drop_coount (Old UI)
train_dcnt = basedir / Path("dcnt.xml") # drop_coount (New UI)
train_card = basedir / Path("card.xml") # card name
drop_file = basedir / Path("fgoscdata/hash_drop.json")
eventquest_dir = basedir / Path("fgoscdata/data/json/")
items_img = basedir / Path("data/misc/items_img.png")
hasher = cv2.img_hash.PHash_create()
FONTSIZE_UNDEFINED = -1
FONTSIZE_NORMAL = 0
FONTSIZE_SMALL = 1
FONTSIZE_TINY = 2
FONTSIZE_NEWSTYLE = 99
PRIORITY_CE = 9000
PRIORITY_POINT = 3000
PRIORITY_ITEM = 700
PRIORITY_GEM_MIN = 6094
PRIORITY_MAGIC_GEM_MIN = 6194
PRIORITY_SECRET_GEM_MIN = 6294
PRIORITY_PIECE_MIN = 5194
PRIORITY_REWARD_QP = 9012
ID_START = 9500000
ID_QP = 1
ID_REWARD_QP = 5
ID_GEM_MIN = 6001
ID_GEM_MAX = 6007
ID_MAGIC_GEM_MIN = 6101
ID_MAGIC_GEM_MAX = 6107
ID_SECRET_GEM_MIN = 6201
ID_SECRET_GEM_MAX = 6207
ID_PIECE_MIN = 7001
ID_MONUMENT_MAX = 7107
ID_EXP_MIN = 9700100
ID_EXP_MAX = 9707500
ID_2ZORO_DICE = 94047708
ID_3ZORO_DICE = 94047709
ID_NORTH_AMERICA = 93000500
ID_SYURENJYO = 94006800
ID_EVNET = 94000000
TIMEOUT = 15
QP_UNKNOWN = -1
DEFAULT_POLL_FREQ = 60
DEFAULT_AMT_PROCESSES = 1
class FgosccntError(Exception):
pass
class GainedQPandDropMissMatchError(FgosccntError):
pass
with open(drop_file, encoding='UTF-8') as f:
drop_item = json.load(f)
# JSONファイルから各辞書を作成
item_name = {item["id"]: item["name"] for item in drop_item}
item_name_eng = {item["id"]: item["name_eng"] for item in drop_item
if "name_eng" in item.keys()}
item_shortname = {item["id"]: item["shortname"] for item in drop_item
if "shortname" in item.keys()}
item_dropPriority = {item["id"]: item["dropPriority"] for item in drop_item}
item_background = {item["id"]: item["background"] for item in drop_item
if "background" in item.keys()}
item_type = {item["id"]: item["type"] for item in drop_item}
dist_item = {item["phash_battle"]: item["id"] for item in drop_item
if item["type"] == "Item" and "phash_battle" in item.keys()}
dist_ce = {item["phash"]: item["id"] for item in drop_item
if item["type"] == "Craft Essence"}
dist_ce_narrow = {item["phash_narrow"]: item["id"] for item in drop_item
if item["type"] == "Craft Essence"}
dist_secret_gem = {item["id"]: item["phash_class"] for item in drop_item
if 6200 < item["id"] < 6208
and "phash_class" in item.keys()}
dist_magic_gem = {item["id"]: item["phash_class"] for item in drop_item
if 6100 < item["id"] < 6108 and "phash_class" in item.keys()}
dist_gem = {item["id"]: item["phash_class"] for item in drop_item
if 6000 < item["id"] < 6008 and "phash_class" in item.keys()}
dist_exp_rarity = {item["phash_rarity"]: item["id"] for item in drop_item
if item["type"] == "Exp. UP"
and "phash_rarity" in item.keys()}
dist_exp_rarity_sold = {item["phash_rarity_sold"]: item["id"] for item
in drop_item if item["type"] == "Exp. UP"
and "phash_rarity_sold" in item.keys()}
dist_exp_rarity.update(dist_exp_rarity_sold)
dist_exp_class = {item["phash_class"]: item["id"] for item in drop_item
if item["type"] == "Exp. UP"
and "phash_class" in item.keys()}
dist_exp_class_sold = {item["phash_class_sold"]: item["id"]
for item in drop_item
if item["type"] == "Exp. UP" and "phash_class_sold"
in item.keys()}
dist_exp_class.update(dist_exp_class_sold)
dist_point = {item["phash_battle"]: item["id"]
for item in drop_item
if item["type"] == "Point" and "phash_battle" in item.keys()}
with open(drop_file, encoding='UTF-8') as f:
drop_item = json.load(f)
freequest = []
evnetfiles = eventquest_dir.glob('**/*.json')
for evnetfile in evnetfiles:
try:
with open(evnetfile, encoding='UTF-8') as f:
event = json.load(f)
freequest = freequest + event
except (OSError, UnicodeEncodeError) as e:
logger.exception(e)
npz = np.load(basedir / Path('background.npz'))
hist_zero = npz["hist_zero"]
hist_gold = npz["hist_gold"]
hist_silver = npz["hist_silver"]
hist_bronze = npz["hist_bronze"]
def has_intersect(a, b):
"""
二つの矩形の当たり判定
隣接するのはOKとする
"""
return max(a[0], b[0]) < min(a[2], b[2]) \
and max(a[1], b[1]) < min(a[3], b[3])
class State():
def set_screen(self):
self.screen_type = "normal"
def set_char_position(self):
logger.debug("JP Standard Position")
def set_font_size(self):
logger.debug("JP Standard Font Size")
def set_max_qp(self):
self.max_qp = 999999999
logger.debug("999,999,999")
class JpNov2020(State):
def set_screen(self):
self.screen_type = "wide"
class JpAug2021(JpNov2020):
def set_font_size(self):
logger.debug("JP New Font Size")
def set_max_qp(self):
self.max_qp = 2000000000
logger.debug("2,000,000,000")
class NaState(State):
def set_char_position(self):
logger.debug("NA Standard Position")
class Context:
def __init__(self):
self.jp_aug_2021 = JpAug2021()
self.jp_nov_2020 = JpNov2020()
self.jp = State()
self.na = NaState()
self.state = self.jp_aug_2021
self.set_screen()
self.set_font_size()
self.set_char_position()
self.set_max_qp()
def change_state(self, mode):
if mode == "jp":
self.state = self.jp_aug_2021
elif mode == "na":
self.state = self.na
else:
raise ValueError("change_state method must be in {}".format(["jp", "na"]))
self.set_screen()
self.set_font_size()
self.set_char_position()
self.set_max_qp()
def set_screen(self):
self.state.set_screen()
def set_char_position(self):
self.state.set_char_position()
def set_font_size(self):
self.state.set_font_size()
def set_max_qp(self):
self.state.set_max_qp()
def get_coodinates(img: ndarray,
display: bool = False) -> Tuple[Tuple[int, int],
Tuple[int, int]]:
threshold: int = 30
height, width = img.shape[:2]
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if display:
cv2.imshow('image', img_gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
_, inv = cv2.threshold(img_gray, threshold, 255, cv2.THRESH_BINARY_INV)
if display:
cv2.imshow('image', inv)
cv2.waitKey(0)
cv2.destroyAllWindows()
contours, _ = cv2.findContours(inv, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours2 = []
for cnt in contours:
_, _, w, h = cv2.boundingRect(cnt)
area = cv2.contourArea(cnt)
if 1.81 < w/h < 1.83 and area > height / 2 * width / 2 and height/h > 1080/910:
contours2.append(cnt)
if len(contours2) == 0:
raise ValueError("Game screen not found.")
max_contour = max(contours2, key=lambda x: cv2.contourArea(x))
x, y, width, height = cv2.boundingRect(max_contour)
return ((x, y), (x + width, y + height))
def standardize_size(frame_img: ndarray,
display: bool = False) -> Tuple[ndarray, float]:
TRAINING_WIDTH: int = 1754
height, width = frame_img.shape[:2]
if display:
pass
logger.debug("height: %d", height)
logger.debug("width: %d", width)
_, width, _ = frame_img.shape
resize_scale: float = TRAINING_WIDTH / width
logger.debug("resize_scale: %f", resize_scale)
if resize_scale > 1:
frame_img = cv2.resize(frame_img, (0, 0),
fx=resize_scale, fy=resize_scale,
interpolation=cv2.INTER_CUBIC)
elif resize_scale < 1:
frame_img = cv2.resize(frame_img, (0, 0),
fx=resize_scale, fy=resize_scale,
interpolation=cv2.INTER_AREA)
if display:
cv2.imshow('image', frame_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return frame_img, resize_scale
def area_decision(frame_img: ndarray,
display: bool = False) -> str:
"""
FGOアプリの地域を選択
'na', 'jp'に対応
'items_img.png' とのオブジェクトマッチングで判定
"""
img = frame_img[0:100, 0:500]
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if display:
cv2.imshow('image', img_gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
template = imread(items_img, 0)
res = cv2.matchTemplate(
img_gray,
template,
cv2.TM_CCOEFF_NORMED
)
threshold = 0.9
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
return 'na'
return 'jp'
def check_page_mismatch(page_items: int, chestnum: int, pagenum: int, pages: int, lines: int) -> bool:
if pages == 1:
if chestnum + 1 != page_items:
return False
return True
if not (pages - 1) * 21 <= chestnum <= pages * 21 - 1:
return False
if pagenum == pages:
item_count = chestnum - ((pages - 1) * 21 - 1) + (pages * 3 - lines) * 7
if item_count != page_items:
return False
return True
class ScreenShot:
"""
戦利品スクリーンショットを表すクラス
"""
def __init__(self, args, img_rgb, svm, svm_chest, svm_dcnt, svm_card,
fileextention, exLogger, reward_only=False):
self.exLogger = exLogger
threshold = 80
try:
self.pagenum, self.pages, self.lines = pageinfo.guess_pageinfo(img_rgb)
except pageinfo.TooManyAreasDetectedError:
self.pagenum, self.pages, self.lines = (-1, -1, -1)
self.img_rgb_orig = img_rgb
img_blue, img_green, img_red = cv2.split(img_rgb)
if (img_blue==img_green).all() & (img_green==img_red ).all():
raise ValueError("Input image is grayscale")
self.img_gray_orig = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
self.img_hsv_orig = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2HSV)
_, self.img_th_orig = cv2.threshold(self.img_gray_orig,
threshold, 255, cv2.THRESH_BINARY)
((self.x1, self.y1), (self.x2, self.y2)) = get_coodinates(self.img_rgb_orig)
frame_img: ndarray = self.img_rgb_orig[self.y1: self.y2, self.x1: self.x2]
img_resize, resize_scale = standardize_size(frame_img)
self.img_rgb = img_resize
mode = area_decision(img_resize)
logger.debug("lang: %s", mode)
# UI modeを決める
sc = Context()
sc.change_state(mode)
self.max_qp = sc.state.max_qp
self.screen_type = sc.state.screen_type
dcnt_old, dcnt_new = self.drop_count_area(self.img_rgb_orig, resize_scale, sc)
if logger.isEnabledFor(logging.DEBUG):
cv2.imwrite('frame_img.png', img_resize)
if logger.isEnabledFor(logging.DEBUG):
if self.screen_type == "normal":
cv2.imwrite('dcnt_old.png', dcnt_old)
cv2.imwrite('dcnt_new.png', dcnt_new)
self.img_gray = cv2.cvtColor(self.img_rgb, cv2.COLOR_BGR2GRAY)
_, self.img_th = cv2.threshold(self.img_gray,
threshold, 255, cv2.THRESH_BINARY)
self.svm = svm
self.svm_chest = svm_chest
self.svm_dcnt = svm_dcnt
self.height, self.width = self.img_rgb.shape[:2]
if self.screen_type == "normal":
self.chestnum = self.ocr_tresurechest(dcnt_old)
if self.chestnum == -1:
self.chestnum = self.ocr_dcnt(dcnt_new)
else:
self.chestnum = self.ocr_dcnt(dcnt_new)
self.asr_y, self.actual_height = self.detect_scroll_bar()
logger.debug("Total Drop (OCR): %d", self.chestnum)
item_pts = self.img2points(mode)
logger.debug("item_pts:%s", item_pts)
self.items = []
self.current_dropPriority = PRIORITY_REWARD_QP
if reward_only:
# qpsplit.py で利用
item_pts = item_pts[0:1]
prev_item = None
for i, pt in enumerate(item_pts):
lx, _ = self.find_edge(self.img_th[pt[1]: pt[3],
pt[0]: pt[2]], reverse=True)
logger.debug("lx: %d", lx)
item_img_th = self.img_th[pt[1] + 37: pt[3] - 30,
pt[0] + lx: pt[2] + lx]
if self.is_empty_box(item_img_th):
break
item_img_rgb = self.img_rgb[pt[1]: pt[3],
pt[0] + lx: pt[2] + lx]
item_img_gray = self.img_gray[pt[1]: pt[3],
pt[0] + lx: pt[2] + lx]
if logger.isEnabledFor(logging.DEBUG):
cv2.imwrite('item' + str(i) + '.png', item_img_rgb)
dropitem = Item(args, i, prev_item, item_img_rgb, item_img_gray,
svm, svm_card, fileextention,
self.current_dropPriority, self.exLogger, mode)
if dropitem.id == -1:
break
self.current_dropPriority = item_dropPriority[dropitem.id]
self.items.append(dropitem)
prev_item = dropitem
self.itemlist = self.makeitemlist()
try:
self.total_qp = self.get_qp(mode)
self.qp_gained = self.get_qp_gained(mode)
asr_y, actual_height = self.detect_scroll_bar()
if asr_y == -1 or actual_height == -1:
self.scroll_position = -1
else:
entire_height = 649 # from correct_pageinfo()
self.scroll_position = asr_y / entire_height
except Exception as e:
self.total_qp = -1
self.qp_gained = -1
self.exLogger.warning("QP detection fails")
logger.exception(e)
if self.qp_gained > 0 and len(self.itemlist) == 0:
raise GainedQPandDropMissMatchError
self.pagenum, self.pages, self.lines = self.correct_pageinfo()
if not reward_only:
self.check_page_mismatch()
# Determine scrollbar's position. AtlasAcademy processing pipeline uses this to group drop-pages
asr_y, actual_height = self.detect_scroll_bar()
if asr_y == -1 or actual_height == -1:
self.scroll_position = -1
else:
entire_height = 649 # from correct_pageinfo()
self.scroll_position = asr_y / entire_height
def check_page_mismatch(self):
valid = check_page_mismatch(
len(self.itemlist),
self.chestnum,
self.pagenum,
self.pages,
self.lines,
)
if not valid:
self.exLogger.warning("drops_count is a mismatch:")
self.exLogger.warning("drops_count = %d", self.chestnum)
self.exLogger.warning("drops_found = %d", len(self.itemlist))
def find_notch(self):
"""
直線検出で検出されなかったフチ幅を検出
"""
edge_width = 150
height, width = self.img_hsv_orig.shape[:2]
target_color = 0
for rx in range(edge_width):
img_hsv_x = self.img_hsv_orig[:, width - rx - 1: width - rx]
# ヒストグラムを計算
hist = cv2.calcHist([img_hsv_x], [0], None, [256], [0, 256])
# 最小値・最大値・最小値の位置・最大値の位置を取得
_, maxVal, _, maxLoc = cv2.minMaxLoc(hist)
if not (maxLoc[1] == target_color and maxVal > height * 0.7):
break
return rx
def drop_count_area(self, img: ndarray,
resize_scale,
sc,
display: bool = False) -> Tuple[Union[ndarray, None], ndarray]:
# widescreenかどうかで挙動を変える
if resize_scale > 1:
img = cv2.resize(img, (0, 0),
fx=resize_scale, fy=resize_scale,
interpolation=cv2.INTER_CUBIC)
elif resize_scale < 1:
img = cv2.resize(img, (0, 0),
fx=resize_scale, fy=resize_scale,
interpolation=cv2.INTER_AREA)
# ((x1, y1), (_, _)) = get_coodinates(img)
# 相対座標(旧UI)
dcnt_old = None
if sc.state.screen_type == "normal":
dcnt_old = img[int(self.y1*resize_scale) - 81: int(self.y1*resize_scale) - 44,
int(self.x1*resize_scale) + 1446: int(self.x1*resize_scale) + 1505]
if display:
cv2.imshow('image', dcnt_old)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 相対座標(新UI)
rx = self.find_notch()
height, width = img.shape[:2]
if width/height > 16/8.96: # Issue #317
dcnt_new = img[int(self.y1*resize_scale) - 20: int(self.y1*resize_scale) + 14,
width - 495 - rx: width - 415 - int(rx*resize_scale)]
else:
dcnt_new = img[int(self.y1*resize_scale) - 20: int(self.y1*resize_scale) + 14,
width - 430: width - 350]
if display:
cv2.imshow('image', dcnt_new)
cv2.waitKey(0)
cv2.destroyAllWindows()
return dcnt_old, dcnt_new
def detect_scroll_bar(self):
'''
Modified from determine_scroll_position()
'''
width = self.img_rgb.shape[1]
topleft = (width - 90, 81)
bottomright = (width, 2 + 753)
if logger.isEnabledFor(logging.DEBUG):
img_copy = self.img_rgb.copy()
cv2.rectangle(img_copy, topleft, bottomright, (0, 0, 255), 3)
cv2.imwrite("./scroll_bar_selected2.jpg", img_copy)
gray_image = self.img_gray[
topleft[1]: bottomright[1],
topleft[0]: bottomright[0]
]
_, binary = cv2.threshold(gray_image, 200, 255, cv2.THRESH_BINARY)
if logger.isEnabledFor(logging.DEBUG):
cv2.imwrite("scroll_bar_binary2.png", binary)
contours = cv2.findContours(
binary,
cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE
)[0]
pts = []
for cnt in contours:
ret = cv2.boundingRect(cnt)
pt = [ret[0], ret[1], ret[0] + ret[2], ret[1] + ret[3]]
if ret[3] > 10:
pts.append(pt)
if len(pts) == 0:
logger.debug("Can't find scroll bar")
return -1, -1
elif len(pts) > 1:
self.exLogger.warning("Too many objects.")
return -1, -1
else:
return pt[1], pt[3] - pt[1]
def valid_pageinfo(self):
'''
Checking the content of pageinfo and correcting it when it fails
'''
if self.pagenum == -1 or self.pages == -1 or self.lines == -1:
return False
if (self.pagenum == 1 and self.pages == 1 and self.lines == 0) and self.chestnum > 20:
return False
elif self.itemlist[0]["id"] != ID_REWARD_QP and self.pagenum == 1:
return False
elif self.chestnum != -1 and self.pagenum != 1 \
and self.lines != int(self.chestnum/7) + 1:
return False
return True
def correct_pageinfo(self):
if self.valid_pageinfo() is False:
self.exLogger.warning("pageinfo validation failed")
if self.asr_y == -1 or self.actual_height == -1:
return 1, 1, 0
entire_height = 649
esr_y = 17
cap_height = 14 # 正規化後の im.height を 1155 であると仮定して計算した値
pagenum = pageinfo.guess_pagenum(self.asr_y, esr_y, self.actual_height, entire_height, cap_height)
pages = pageinfo.guess_pages(self.actual_height, entire_height, cap_height)
lines = pageinfo.guess_lines(self.actual_height, entire_height, cap_height)
return pagenum, pages, lines
else:
return self.pagenum, self.pages, self.lines
def calc_black_whiteArea(self, bw_image):
image_size = bw_image.size
whitePixels = cv2.countNonZero(bw_image)
whiteAreaRatio = (whitePixels / image_size) * 100 # [%]
return whiteAreaRatio
def is_empty_box(self, img_th):
"""
アイテムボックスにアイテムが無いことを判別する
"""
if self.calc_black_whiteArea(img_th) < 1:
return True
return False
def get_qp_from_text(self, text):
"""
capy-drop-parser から流用
"""
qp = 0
power = 1
# re matches left to right so reverse the list
# to process lower orders of magnitude first.
for match in re.findall("[0-9]+", text)[::-1]:
qp += int(match) * power
power *= 1000
return qp
def extract_text_from_image(self, image):
"""
capy-drop-parser から流用
"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, qp_image = cv2.threshold(gray, 65, 255, cv2.THRESH_BINARY_INV)
# '+' is needed to ensure that tesseract doesn't force a recognition on it,
# which results in a '4' most of the time.
return pytesseract.image_to_string(
qp_image,
config="-l eng --oem 1 --psm 7 -c tessedit_char_whitelist=+,0123456789",
)
def get_qp(self, mode):
"""
capy-drop-parser から流用
tesseract-OCR is quite slow and changed to use SVM
"""
use_tesseract = False
pt = pageinfo.detect_qp_region(self.img_rgb_orig, mode)
logger.debug('pt from pageinfo: %s', pt)
if pt is None:
use_tesseract = True
qp_total = -1
if use_tesseract is False: # use SVM
im_th = cv2.bitwise_not(
self.img_th_orig[pt[0][1]: pt[1][1], pt[0][0]: pt[1][0]]
)
qp_total = self.ocr_text(im_th)
if use_tesseract or qp_total == -1:
if self.screen_type == "normal":
pt = ((288, 948), (838, 1024))
else:
pt = ((288, 838), (838, 914))
logger.debug('Use tesseract')
qp_total_text = self.extract_text_from_image(
self.img_rgb[pt[0][1]: pt[1][1], pt[0][0]: pt[1][0]]
)
logger.debug('qp_total_text from text: %s', qp_total_text)
qp_total = self.get_qp_from_text(qp_total_text)
logger.debug('qp_total from text: %s', qp_total)
if qp_total > self.max_qp:
self.exLogger.warning(
"qp_total exceeds the system's maximum: %s", qp_total
)
if qp_total == 0:
return QP_UNKNOWN
return qp_total
def get_qp_gained(self, mode):
use_tesseract = False
bounds = pageinfo.detect_qp_region(self.img_rgb_orig, mode)
if bounds is None:
# fall back on hardcoded bound
if self.screen_type == "normal":
bounds = ((398, 858), (948, 934))
else:
bounds = ((398, 748), (948, 824))
use_tesseract = True
else:
# Detecting the QP box with different shading is "easy", while detecting the absence of it
# for the gain QP amount is hard. However, the 2 values have the same font and thus roughly
# the same height (please NA...). You can consider them to be 2 same-sized boxes on top of
# each other.
(topleft, bottomright) = bounds
height = bottomright[1] - topleft[1]
topleft = (topleft[0], topleft[1] - height + int(height*0.12))
bottomright = (bottomright[0], bottomright[1] - height)
bounds = (topleft, bottomright)
logger.debug('Gained QP bounds: %s', bounds)
if logger.isEnabledFor(logging.DEBUG):
img_copy = self.img_rgb.copy()
cv2.rectangle(img_copy, bounds[0], bounds[1], (0, 0, 255), 3)
cv2.imwrite("./qp_gain_detection.jpg", img_copy)
qp_gain = -1
if use_tesseract is False:
im_th = cv2.bitwise_not(
self.img_th_orig[topleft[1]: bottomright[1],
topleft[0]: bottomright[0]]
)
qp_gain = self.ocr_text(im_th)
if use_tesseract or qp_gain == -1:
logger.debug('Use tesseract')
(topleft, bottomright) = bounds
qp_gain_text = self.extract_text_from_image(
self.img_rgb[topleft[1]: bottomright[1],
topleft[0]: bottomright[0]]
)
qp_gain = self.get_qp_from_text(qp_gain_text)
logger.debug('qp from text: %s', qp_gain)
if qp_gain == 0:
qp_gain = QP_UNKNOWN
return qp_gain
def find_edge(self, img_th, reverse=False):
"""
直線検出で検出されなかったフチ幅を検出
"""
edge_width = 4
_, width = img_th.shape[:2]
target_color = 255 if reverse else 0
for i in range(edge_width):
img_th_x = img_th[:, i:i + 1]
# ヒストグラムを計算
hist = cv2.calcHist([img_th_x], [0], None, [256], [0, 256])
# 最小値・最大値・最小値の位置・最大値の位置を取得
_, _, _, maxLoc = cv2.minMaxLoc(hist)
if maxLoc[1] == target_color:
break
lx = i
for j in range(edge_width):
img_th_x = img_th[:, width - j - 1: width - j]
# ヒストグラムを計算
hist = cv2.calcHist([img_th_x], [0], None, [256], [0, 256])
# 最小値・最大値・最小値の位置・最大値の位置を取得
_, _, _, maxLoc = cv2.minMaxLoc(hist)
if maxLoc[1] == 0:
break
rx = j
return lx, rx
def makeitemlist(self):
"""
アイテムを出力
"""
itemlist = []
for item in self.items:
tmp = {}
tmp['id'] = item.id
tmp['name'] = item.name
tmp['dropPriority'] = item_dropPriority[item.id]
tmp['stack'] = int(item.dropnum[1:])
tmp['bonus'] = item.bonus
tmp['category'] = item.category
tmp['x'] = item.position % 7
tmp['y'] = item.position//7
itemlist.append(tmp)
return itemlist
def ocr_text(self, im_th):
h, w = im_th.shape[:2]
# 物体検出
im_th = cv2.bitwise_not(im_th)
contours = cv2.findContours(im_th,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[0]
item_pts = []
for cnt in contours:
ret = cv2.boundingRect(cnt)
area = cv2.contourArea(cnt)
pt = [ret[0], ret[1], ret[0] + ret[2], ret[1] + ret[3]]
if ret[2] < int(w/2) and area > 80 and ret[1] < h/2 \
and 0.3 < ret[2]/ret[3] < 0.85 and ret[3] > h * 0.45:
flag = False
for p in item_pts:
if has_intersect(p, pt):
# どちらかを消す
p_area = (p[2]-p[0])*(p[3]-p[1])
pt_area = ret[2]*ret[3]
if p_area < pt_area:
item_pts.remove(p)
else:
flag = True
if flag is False:
item_pts.append(pt)
if len(item_pts) == 0:
# Recognizing Failure
return -1
item_pts.sort()
if len(item_pts) > len(str(self.max_qp)):
# QP may be misrecognizing the 10th digit or more, so cut it
item_pts = item_pts[len(item_pts) - len(str(self.max_qp)):]
logger.debug("ocr item_pts: %s", item_pts)
logger.debug("ドロップ桁数(OCR): %d", len(item_pts))
# Hog特徴のパラメータ
win_size = (120, 60)
block_size = (16, 16)
block_stride = (4, 4)
cell_size = (4, 4)
bins = 9
res = ""
for pt in item_pts:
test = []
if pt[0] == 0:
tmpimg = im_th[pt[1]:pt[3], pt[0]:pt[2]+1]
else:
tmpimg = im_th[pt[1]:pt[3], pt[0]-1:pt[2]+1]
tmpimg = cv2.resize(tmpimg, (win_size))
hog = cv2.HOGDescriptor(win_size, block_size,
block_stride, cell_size, bins)
test.append(hog.compute(tmpimg)) # 特徴量の格納
test = np.array(test)
pred = self.svm_chest.predict(test)
res = res + str(int(pred[1][0][0]))
return int(res)
def ocr_tresurechest(self, drop_count_img):
"""
宝箱数をOCRする関数
"""
threshold = 80
img_gray = cv2.cvtColor(drop_count_img, cv2.COLOR_BGR2GRAY)
_, img_num = cv2.threshold(img_gray,
threshold, 255, cv2.THRESH_BINARY)
im_th = cv2.bitwise_not(img_num)
h, w = im_th.shape[:2]
# 情報ウィンドウが数字とかぶった部分を除去する
for y in range(h):
im_th[y, 0] = 255
for x in range(w): # ドロップ数7のときバグる対策 #54
im_th[0, x] = 255
return self.ocr_text(im_th)
def pred_dcnt(self, img):
"""
for JP new UI
"""
# Hog特徴のパラメータ
win_size = (120, 60)
block_size = (16, 16)
block_stride = (4, 4)
cell_size = (4, 4)
bins = 9
char = []
tmpimg = cv2.resize(img, (win_size))
hog = cv2.HOGDescriptor(win_size, block_size,
block_stride, cell_size, bins)
char.append(hog.compute(tmpimg)) # 特徴量の格納
char = np.array(char)
pred = self.svm_dcnt.predict(char)
res = str(int(pred[1][0][0]))
return int(res)
def img2num(self, img, img_th, pts, char_w, end):
"""実際より小さく切り抜かれた数字画像を補正して認識させる
"""
height, width = img.shape[:2]
c_center = int(pts[0] + (pts[2] - pts[0])/2)
# newimg = img[:, item_pts[-1][0]-1:item_pts[-1][2]+1]
newimg = img[:, max(int(c_center - char_w/2), 0):min(int(c_center + char_w/2), width)]
threshold2 = 10
ret, newimg_th = cv2.threshold(newimg,
threshold2,
255,
cv2.THRESH_BINARY)
# 上部はもとのやつを上書き
# for w in range(item_pts[-1][2] - item_pts[-1][0] + 2):
for w in range(min(int(c_center + char_w/2), width) - max(int(c_center - char_w/2), 0)):
for h in range(end):
newimg_th[h, w] = img_th[h, w + int(c_center - char_w/2)]
# newimg_th[h, w] = img_th[h, w + item_pts[-1][0]]
newimg_th[height - 1, w] = 0
newimg_th[height - 2, w] = 0
newimg_th[height - 3, w] = 0
res = self.pred_dcnt(newimg_th)
return res
def ocr_dcnt(self, drop_count_img):
"""
ocr drop_count (for New UI)
"""
char_w = 28
threshold = 80
kernel = np.ones((4, 4), np.uint8)
img = cv2.cvtColor(drop_count_img, cv2.COLOR_BGR2GRAY)
_, img_th = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)
img_th = cv2.dilate(img_th, kernel, iterations=1)
height, width = img_th.shape[:2]
end = -1
for i in range(height):
if end == -1 and img_th[height - i - 1, width - 1] == 255:
end = height - i
break
start = end - 7
for j in range(width):
for k in range(end - start):
img_th[start + k, j] = 0
contours = cv2.findContours(img_th,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[0]
item_pts = []
for cnt in contours:
ret = cv2.boundingRect(cnt)
pt = [ret[0], ret[1], ret[0] + ret[2], ret[1] + ret[3]]
if ret[1] > 0 and ret[3] > 8 and ret[1] + ret[3] == start \
and 12 < ret[2] < char_w + 4 and ret[0] + ret[2] != width:
item_pts.append(pt)
if len(item_pts) == 0:
return -1
item_pts.sort()
res = self.img2num(img, img_th, item_pts[-1], char_w, end)
if len(item_pts) >= 2:
if item_pts[-1][0] - item_pts[-2][2] < char_w / (2 / 3):
res2 = self.img2num(img, img_th, item_pts[-2], char_w, end)
res = res2 * 10 + res
if len(item_pts) == 3:
if item_pts[-2][0] - item_pts[-3][2] < char_w / (2 / 3):
res3 = self.img2num(img, img_th, item_pts[-3], char_w, end)
res = res3 * 100 + res2 * 10 + res
return res
def calc_offset(self, pts, std_pts, margin_x):
"""
オフセットを反映
"""
if len(pts) == 0:
return std_pts
# Y列でソート
pts.sort(key=lambda x: x[1])
if len(pts) > 1: # fix #107
if (pts[1][3] - pts[1][1]) - (pts[0][3] - pts[0][1]) > 0:
pts = pts[1:]
# Offsetを算出
offset_x = pts[0][0] - margin_x
offset_y = pts[0][1] - std_pts[0][1]
if offset_y > (std_pts[7][3] - std_pts[7][1])*2:
# これ以上になったら三行目の座標と判断
offset_y = pts[0][1] - std_pts[14][1]
elif offset_y > 30:
# これ以上になったら二行目の座標と判断
offset_y = pts[0][1] - std_pts[7][1]
# Offset を反映
item_pts = []
for pt in std_pts:
ptl = list(pt)
ptl[0] = ptl[0] + offset_x
ptl[1] = ptl[1] + offset_y
ptl[3] = ptl[3] + offset_y
ptl[2] = ptl[2] + offset_x
item_pts.append(ptl)
return item_pts
def img2points(self, mode):
"""
戦利品左一列のY座標を求めて標準座標とのずれを補正して座標を出力する
"""
std_pts = self.booty_pts()
row_size = 7 # アイテム表示最大列
col_size = 3 # アイテム表示最大行
margin_x = 15
area_size_lower = 37000 # アイテム枠の面積の最小値
img_1strow = self.img_th[0:self.height,
std_pts[0][0] - margin_x:
std_pts[0][2] + margin_x]
SCROLLBAR_WIDTH4ONEPAGE = 610
POSITION_TOP = 16
POSITION_BOTTOM_JP = 42 # JP
POSITION_BOTTOM_NA = 52 # NA
SCROLL_OFFSET = 28
# 輪郭を抽出
contours = cv2.findContours(img_1strow, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)[0]
leftcell_pts = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area > area_size_lower \
and area < self.height * self.width / (row_size * col_size):
epsilon = 0.01*cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
if 4 <= len(approx) <= 6: # 六角形のみ認識
ret = cv2.boundingRect(cnt)
if ret[1] > self.height * 0.15 - 101 \
and ret[1] + ret[3] < self.height * 0.76 - 101:
# 小数の数値はだいたいの実測
pts = [ret[0], ret[1],
ret[0] + ret[2], ret[1] + ret[3]]
leftcell_pts.append(pts)
if len(leftcell_pts) != 0:
item_pts = self.calc_offset(leftcell_pts, std_pts, margin_x)
logger.debug("leftcell_pts: %s", leftcell_pts)
else:
if (self.asr_y == POSITION_TOP and self.actual_height == SCROLLBAR_WIDTH4ONEPAGE) or self.actual_height == -1:
# case: normal
if mode == "na":
leftcell_pts = [[25, 109, 214, 315]]
else:
leftcell_pts = [[14, 97, 202, 303]]
item_pts = self.calc_offset(leftcell_pts, std_pts, margin_x)
elif POSITION_BOTTOM_JP <= self.asr_y <= POSITION_BOTTOM_NA and self.actual_height == SCROLLBAR_WIDTH4ONEPAGE:
# case: scrolling down by mistake
if mode == "na":
leftcell_pts = [[25, 299, 214, 504]]
else:
leftcell_pts = [[14, 97 - SCROLL_OFFSET, 202, 303 - SCROLL_OFFSET]]
item_pts = self.calc_offset(leftcell_pts, std_pts, margin_x)
return item_pts
def booty_pts(self):
"""
戦利品が出現する21の座標 [left, top, right, bottom]
解像度別に設定
"""
criteria_left = 102
criteria_top = 99
item_width = 188
item_height = 206
margin_width = 32
margin_height = 21
pts = generate_booty_pts(criteria_left, criteria_top,
item_width, item_height,
margin_width, margin_height)
return pts
def generate_booty_pts(criteria_left, criteria_top, item_width, item_height,
margin_width, margin_height):
"""
ScreenShot#booty_pts() が返すべき座標リストを生成する。
全戦利品画像が等間隔に並んでいることを仮定している。
criteria_left ... 左上にある戦利品の left 座標
criteria_top ... 左上にある戦利品の top 座標
item_width ... 戦利品画像の width
item_height ... 戦利品画像の height
margin_width ... 戦利品画像間の width
margin_height ... 戦利品画像間の height
"""
pts = []
current = (criteria_left, criteria_top, criteria_left + item_width,
criteria_top + item_height)
for j in range(3):
# top, bottom の y座標を計算
current_top = criteria_top + (item_height + margin_height) * j
current_bottom = current_top + item_height
# x座標を左端に固定
current = (criteria_left, current_top,
criteria_left + item_width, current_bottom)
for i in range(7):
# y座標を固定したままx座標をスライドさせていく
current_left = criteria_left + (item_width + margin_width) * i
current_right = current_left + item_width
current = (current_left, current_top,
current_right, current_bottom)
pts.append(current)
return pts
class Item:
def __init__(self, args, pos, prev_item, img_rgb, img_gray, svm, svm_card,
fileextention, current_dropPriority, exLogger, mode='jp'):
self.position = pos
self.prev_item = prev_item
self.img_rgb = img_rgb
self.img_gray = img_gray
self.img_hsv = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2HSV)
_, img_th = cv2.threshold(self.img_gray, 174, 255, cv2.THRESH_BINARY)
self.img_th = cv2.bitwise_not(img_th)
self.fileextention = fileextention
self.exLogger = exLogger
self.dropnum_cache = []
self.margin_left = 5
self.height, self.width = img_rgb.shape[:2]
logger.debug("pos: %d", pos)
self.identify_item(args, prev_item, svm_card,
current_dropPriority)
if self.id == -1:
return
logger.debug("id: %d", self.id)
logger.debug("background: %s", self.background)
logger.debug("dropPriority: %s", item_dropPriority[self.id])
logger.debug("Category: %s", self.category)
logger.debug("Name: %s", self.name)
self.svm = svm
self.bonus = ""
if self.category != "Craft Essence" and self.category != "Exp. UP":
self.ocr_digit(mode)
else:
self.dropnum = "x1"
logger.debug("Bonus: %s", self.bonus)
logger.debug("Stack: %s", self.dropnum)
def identify_item(self, args, prev_item, svm_card,
current_dropPriority):
self.background = classify_background(self.img_rgb)
self.hash_item = compute_hash(self.img_rgb) # 画像の距離
if prev_item is not None:
# [Requirements for Caching]
# 1. previous item is not a reward QP.
# 2. Same background as the previous item
# 3. Not (similarity is close) dice, gem or EXP
if prev_item.id != ID_REWARD_QP \
and prev_item.background == self.background \
and not (ID_GEM_MIN <= prev_item.id <= ID_SECRET_GEM_MAX or
ID_2ZORO_DICE <= prev_item.id <= ID_3ZORO_DICE or
ID_EXP_MIN <= prev_item.id <= ID_EXP_MAX):
d = hasher.compare(self.hash_item, prev_item.hash_item)
if d <= 4:
self.category = prev_item.category
self.id = prev_item.id
self.name = prev_item.name
return
self.category = self.classify_category(svm_card)
self.id = self.classify_card(self.img_rgb, current_dropPriority)
if args.lang == "jpn":
self.name = item_name[self.id]
else:
if self.id in item_name_eng.keys():
self.name = item_name_eng[self.id]
else:
self.name = item_name[self.id]
if self.category == "":
if self.id in item_type:
self.category = item_type[self.id]
else:
self.category = "Item"
def conflictcheck(self, pts, pt):
"""
pt が ptsのどれかと衝突していたら面積に応じて入れ替える
"""
flag = False
for p in list(pts):
if has_intersect(p, pt):
# どちらかを消す
p_area = (p[2]-p[0])*(p[3]-p[1])
pt_area = (pt[2]-pt[0])*(pt[3]-pt[1])
if p_area < pt_area:
pts.remove(p)
else:
flag = True
if flag is False:
pts.append(pt)
return pts
def extension(self, pts):
"""
文字エリアを1pixcel微修正
"""
new_pts = []
for pt in pts:
if pt[0] == 0 and pt[1] == 0:
pt = [pt[0], pt[1], pt[2], pt[3] + 1]
elif pt[0] == 0 and pt[1] != 0:
pt = [pt[0], pt[1] - 1, pt[2], pt[3] + 1]
elif pt[0] != 0 and pt[1] == 0:
pt = [pt[0] - 1, pt[1], pt[2], pt[3] + 1]
else:
pt = [pt[0] - 1, pt[1] - 1, pt[2], pt[3] + 1]
new_pts.append(pt)
return new_pts
def extension_straighten(self, pts):
"""
Y軸を最大値にそろえつつ文字エリアを1pixcel微修正
"""
base_top = 6 # 強制的に高さを確保
base_bottom = 10
for pt in pts:
if base_top > pt[1]:
base_top = pt[1]
if base_bottom < pt[3]:
base_bottom = pt[3]
# 5桁目がおかしくなる対策
new_pts = []
pts.reverse()
for i, pt in enumerate(pts):
if len(pts) > 6 and i == 4:
pt = [pts[5][2], base_top, pts[3][0], base_bottom]
else:
pt = [pt[0], base_top, pt[2], base_bottom]
new_pts.append(pt)
new_pts.reverse()
return new_pts
def detect_bonus_char4jpg(self, mode):
"""
[JP]Ver.2.37.0以前の仕様
戦利品数OCRで下段の黄文字の座標を抽出する
PNGではない画像の認識用
"""
# QP,ポイントはボーナス6桁のときに高さが変わる
# それ以外は3桁のときに変わるはず(未確認)
# ここのmargin_right はドロップ数の下一桁目までの距離
base_line = 181 if mode == "na" else 179
pattern_tiny = r"^\(\+\d{4,5}0\)$"
pattern_small = r"^\(\+\d{5}0\)$"
pattern_normal = r"^\(\+[1-9]\d*\)$"
# 1-5桁の読み込み
font_size = FONTSIZE_NORMAL
if mode == 'na':
margin_right = 20
else:
margin_right = 26
line, pts = self.get_number4jpg(base_line, margin_right, font_size)
logger.debug("Read BONUS NORMAL: %s", line)
m_normal = re.match(pattern_normal, line)
if m_normal:
logger.debug("Font Size: %d", font_size)
return line, pts, font_size
# 6桁の読み込み
if mode == 'na':
margin_right = 19
else:
margin_right = 25
font_size = FONTSIZE_SMALL
line, pts = self.get_number4jpg(base_line, margin_right, font_size)
logger.debug("Read BONUS SMALL: %s", line)
m_small = re.match(pattern_small, line)
if m_small:
logger.debug("Font Size: %d", font_size)
return line, pts, font_size
# 7桁読み込み
font_size = FONTSIZE_TINY
if mode == 'na':
margin_right = 18
else:
margin_right = 26
line, pts = self.get_number4jpg(base_line, margin_right, font_size)
logger.debug("Read BONUS TINY: %s", line)
m_tiny = re.match(pattern_tiny, line)
if m_tiny:
logger.debug("Font Size: %d", font_size)
return line, pts, font_size
else:
font_size = FONTSIZE_UNDEFINED
logger.debug("Font Size: %d", font_size)
line = ""
pts = []
return line, pts, font_size
def detect_bonus_char4jpg2(self, mode):
"""
[JP]Ver.2.37.0以降の仕様
戦利品数OCRで下段の黄文字の座標を抽出する
PNGではない画像の認識用
"""
# QP,ポイントはボーナス6桁のときに高さが変わる
# それ以外は3桁のときに変わるはず(未確認)
# ここのmargin_right はドロップ数の下一桁目までの距離
base_line = 181 if mode == "na" else 179
pattern_tiny = r"^\(\+\d{4,5}0\)$"
pattern_small = r"^\(\+\d{5}0\)$"
pattern_normal = r"^\(\+[1-9]\d*\)$"
font_size = FONTSIZE_NEWSTYLE
if mode == 'na':
margin_right = 20
else:
margin_right = 26
# 1-5桁の読み込み
cut_width = 21
comma_width = 5
line, pts = self.get_number4jpg2(base_line, margin_right, cut_width, comma_width)
logger.debug("Read BONUS NORMAL: %s", line)
m_normal = re.match(pattern_normal, line)
if m_normal:
logger.debug("Font Size: %d", font_size)
return line, pts, font_size
# 6桁の読み込み
cut_width = 19
comma_width = 5
line, pts = self.get_number4jpg2(base_line, margin_right, cut_width, comma_width)
logger.debug("Read BONUS SMALL: %s", line)
m_small = re.match(pattern_small, line)
if m_small:
logger.debug("Font Size: %d", font_size)
return line, pts, font_size
# 7桁読み込み
cut_width = 18
comma_width = 5
line, pts = self.get_number4jpg2(base_line, margin_right, cut_width, comma_width)
logger.debug("Read BONUS TINY: %s", line)
m_tiny = re.match(pattern_tiny, line)
if m_tiny:
logger.debug("Font Size: %d", font_size)
return line, pts, font_size
else:
font_size = FONTSIZE_UNDEFINED
logger.debug("Font Size: %d", font_size)
line = ""
pts = []
return line, pts, font_size
def detect_bonus_char(self):
"""
戦利品数OCRで下段の黄文字の座標を抽出する
HSVで黄色をマスクしてオブジェクト検出
ノイズは少なく精度はかなり良い
"""
margin_top = int(self.height*0.72)
margin_bottom = int(self.height*0.11)
margin_left = 8
margin_right = 8
img_hsv_lower = self.img_hsv[margin_top: self.height - margin_bottom,
margin_left: self.width - margin_right]
h, w = img_hsv_lower.shape[:2]
# 手持ちスクショでうまくいっている範囲
# 黄文字がこの数値でマスクできるかが肝
# 未対応機種が発生したため[25,180,119] →[25,175,119]に変更
lower_yellow = np.array([25, 175, 119])
upper_yellow = np.array([37, 255, 255])
img_hsv_lower_mask = cv2.inRange(img_hsv_lower,
lower_yellow, upper_yellow)
contours = cv2.findContours(img_hsv_lower_mask, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)[0]
bonus_pts = []
# 物体検出マスクがうまくいっているかが成功の全て
for cnt in contours:
ret = cv2.boundingRect(cnt)
area = cv2.contourArea(cnt)
pt = [ret[0] + margin_left, ret[1] + margin_top,
ret[0] + ret[2] + margin_left, ret[1] + ret[3] + margin_top]
# )が上下に割れることがあるので上の一つは消す
if ret[2] < int(w/2) and ret[1] < int(h*3/5) \
and ret[1] + ret[3] > h*0.65 and area > 3:
bonus_pts = self.conflictcheck(bonus_pts, pt)
bonus_pts.sort()
if len(bonus_pts) > 0:
if self.width - bonus_pts[-1][2] > int((22*self.width/188)):
# 黄文字は必ず右寄せなので最後の文字が画面端から離れている場合全部ゴミ
bonus_pts = []
return self.extension(bonus_pts)
def define_fontsize(self, font_size):
if font_size == FONTSIZE_NORMAL:
cut_width = 20
cut_height = 28
comma_width = 9
elif font_size == FONTSIZE_SMALL:
cut_width = 18
cut_height = 25
comma_width = 8
else:
cut_width = 16
cut_height = 22
comma_width = 6
return cut_width, cut_height, comma_width
def get_number4jpg(self, base_line, margin_right, font_size):
"""[JP]Ver.2.37.0以前の仕様
"""
cut_width, cut_height, comma_width = self.define_fontsize(font_size)
top_y = base_line - cut_height
# まず、+, xの位置が何桁目か調査する
pts = []
if font_size == FONTSIZE_TINY:
max_digits = 8
elif font_size == FONTSIZE_SMALL:
max_digits = 8
else:
max_digits = 7
for i in range(max_digits):
if i == 0:
continue
pt = [self.width - margin_right - cut_width * (i + 1)
- comma_width * int((i - 1)/3),
top_y,
self.width - margin_right - cut_width * i
- comma_width * int((i - 1)/3),
base_line]
result = self.read_char(pt)
if i == 1 and ord(result) == 0:
# アイテム数 x1 とならず表記無し場合のエラー処理
return "", pts
if result in ['x', '+']:
break
# 決まった位置まで出力する
line = ""
for j in range(i):
pt = [self.width - margin_right - cut_width * (j + 1)
- comma_width * int(j/3),
top_y,
self.width - margin_right - cut_width * j
- comma_width * int(j/3),
base_line]
c = self.read_char(pt)
if ord(c) == 0: # Null文字対策
line = line + '?'
break
line = line + c
pts.append(pt)
j = j + 1
pt = [self.width - margin_right - cut_width * (j + 1)
- comma_width * int((j - 1)/3),
top_y,
self.width - margin_right - cut_width * j
- comma_width * int((j - 1)/3),
base_line]
c = self.read_char(pt)
if ord(c) == 0: # Null文字対策
c = '?'
line = line + c
line = "(" + line[::-1] + ")"
pts.append(pt)
pts.sort()
# PNGのマスク法との差を埋める補正
new_pts = [[pts[0][0]-10, pts[0][1],
pts[0][0]-1, pts[0][3]]] # "(" に対応
new_pts.append("") # ")" に対応
return line, new_pts
def get_number4jpg2(self, base_line, margin_right, cut_width, comma_width):
"""[JP]Ver.2.37.0以降の仕様
"""
cut_height = 30
top_y = base_line - cut_height
# まず、+, xの位置が何桁目か調査する
pts = []
max_digits = 7
for i in range(max_digits):
if i == 0:
continue
pt = [self.width - margin_right - cut_width * (i + 1)
- comma_width * int((i - 1)/3),
top_y,
self.width - margin_right - cut_width * i
- comma_width * int((i - 1)/3),
base_line]
result = self.read_char(pt)
if i == 1 and ord(result) == 0:
# アイテム数 x1 とならず表記無し場合のエラー処理
return "", pts
if result in ['x', '+']:
break
# 決まった位置まで出力する
line = ""
for j in range(i):
pt = [self.width - margin_right - cut_width * (j + 1)
- comma_width * int(j/3),
top_y,
self.width - margin_right - cut_width * j
- comma_width * int(j/3),
base_line]
c = self.read_char(pt)
if ord(c) == 0: # Null文字対策
line = line + '?'
break
line = line + c
pts.append(pt)
j = j + 1
pt = [self.width - margin_right - cut_width * (j + 1)
- comma_width * int((j - 1)/3),
top_y,
self.width - margin_right - cut_width * j
- comma_width * int((j - 1)/3),
base_line]
c = self.read_char(pt)
if ord(c) == 0: # Null文字対策
c = '?'
line = line + c
line = "(" + line[::-1] + ")"
pts.append(pt)
pts.sort()
# PNGのマスク法との差を埋める補正
new_pts = [[pts[0][0]-10, pts[0][1],
pts[0][0]-1, pts[0][3]]] # "(" に対応
new_pts.append("") # ")" に対応
return line, new_pts
def get_number(self, base_line, margin_right, font_size):
"""[JP]Ver.2.37.0以前の仕様
"""
cut_width, cut_height, comma_width = self.define_fontsize(font_size)
top_y = base_line - cut_height
# まず、+, xの位置が何桁目か調査する
for i in range(8): # 8桁以上は無い
if i == 0:
continue
elif (self.id == ID_REWARD_QP
or self.category in ["Point"]) and i <= 2:
# 報酬QPとPointは3桁以上
continue
elif self.name == "QP" and i <= 3:
# QPは4桁以上
continue
pt = [self.width - margin_right - cut_width * (i + 1)
- comma_width * int((i - 1)/3),
top_y,
self.width - margin_right - cut_width * i
- comma_width * int((i - 1)/3),
base_line]
if pt[0] < 0:
break
result = self.read_char(pt)
if i == 1 and ord(result) == 0:
# アイテム数 x1 とならず表記無し場合のエラー処理
return ""
if result in ['x', '+']:
self.margin_left = pt[0]
break
# 決まった位置まで出力する
line = ""
for j in range(i):
if (self.id == ID_REWARD_QP) and j < 1:
# 報酬QPの下一桁は0
line += '0'
continue
elif (self.name == "QP" or self.category in ["Point"]) and j < 2:
# QPとPointは下二桁は00
line += '0'
continue
pt = [self.width - margin_right - cut_width * (j + 1)
- comma_width * int(j/3),
top_y,
self.width - margin_right - cut_width * j
- comma_width * int(j/3),
base_line]
if pt[0] < 0:
break
c = self.read_char(pt)
if ord(c) == 0: # Null文字対策
c = '?'
line = line + c
j = j + 1
pt = [self.width - margin_right - cut_width * (j + 1)
- comma_width * int((j - 1)/3),
top_y,
self.width - margin_right - cut_width * j
- comma_width * int((j - 1)/3),
base_line]
if pt[0] > 0:
c = self.read_char(pt)
if ord(c) == 0: # Null文字対策
c = '?'
line = line + c
line = line[::-1]
return line
def get_number2(self, cut_width, comma_width, base_line=147, margin_right=15):
"""[JP]Ver.2.37.0以降の仕様
"""
cut_height = 26
# base_line = 147
# margin_right = 15
top_y = base_line - cut_height
# まず、+, xの位置が何桁目か調査する
for i in range(8): # 8桁以上は無い
if i == 0:
continue
elif (self.id == ID_REWARD_QP
or self.category in ["Point"]) and i <= 2:
# 報酬QPとPointは3桁以上
continue
elif self.name == "QP" and i <= 3:
# QPは4桁以上
continue
pt = [self.width - margin_right - cut_width * (i + 1)
- comma_width * int((i - 1)/3),
top_y,
self.width - margin_right - cut_width * i
- comma_width * int((i - 1)/3),
base_line]
if pt[0] < 0:
break
result = self.read_char(pt)
if i == 1 and ord(result) == 0:
# アイテム数 x1 とならず表記無し場合のエラー処理
return ""
if result in ['x', '+']:
self.margin_left = pt[0]
break
# 決まった位置まで出力する
line = ""
for j in range(i):
if (self.id == ID_REWARD_QP) and j < 1:
# 報酬QPの下一桁は0
line += '0'
continue
elif (self.name == "QP" or self.category in ["Point"]) and j < 2:
# QPとPointは下二桁は00
line += '0'
continue
pt = [self.width - margin_right - cut_width * (j + 1)
- comma_width * int(j/3),
top_y,
self.width - margin_right - cut_width * j
- comma_width * int(j/3),
base_line]
if pt[0] < 0:
break
c = self.read_char(pt)
if ord(c) == 0: # Null文字対策
c = '?'
line = line + c
j = j + 1
pt = [self.width - margin_right - cut_width * (j + 1)
- comma_width * int((j - 1)/3),
top_y,
self.width - margin_right - cut_width * j
- comma_width * int((j - 1)/3),
base_line]
if pt[0] > 0:
c = self.read_char(pt)
if ord(c) == 0: # Null文字対策
c = '?'
line = line + c
line = line[::-1]
return line
def detect_white_char(self, base_line, margin_right, mode="jp"):
"""
上段と下段の白文字を見つける機能を一つに統合
[JP]Ver.2.37.0からボーナスがある場合の表示の仕様変更有り
"""
pattern_tiny = r"^[\+x][12]\d{4}00$"
pattern_tiny_qp = r"^\+[12]\d{4,5}00$"
pattern_small = r"^[\+x]\d{4}00$"
pattern_small_qp = r"^\+\d{4,5}00$"
pattern_normal = r"^[\+x][1-9]\d{0,5}$"
pattern_normal_qp = r"^\+[1-9]\d{0,4}0$"
logger.debug("base_line: %d", base_line)
if mode == "jp" and base_line < 170:
# JP Ver.2.37.0以降の新仕様
# 1-6桁の読み込み
font_size = FONTSIZE_NEWSTYLE
cut_width = 21
comma_width = 5
line = self.get_number2(cut_width, comma_width)
logger.debug("Read NORMAL: %s", line)
if self.id == ID_QP or self.category == "Point":
pattern_normal = pattern_normal_qp
m_normal = re.match(pattern_normal, line)
if m_normal:
logger.debug("Font Size: %d", font_size)
self.font_size = font_size
return line
# 6桁の読み込み
cut_width = 19
comma_width = 5
line = self.get_number2(cut_width, comma_width)
logger.debug("Read SMALL: %s", line)
if self.id == ID_QP or self.category == "Point":
pattern_small = pattern_small_qp
m_small = re.match(pattern_small, line)
if m_small:
logger.debug("Font Size: %d", font_size)
self.font_size = font_size
return line
# 7桁読み込み
cut_width = 19
comma_width = 4
line = self.get_number2(cut_width, comma_width)
logger.debug("Read TINY: %s", line)
if self.id == ID_QP or self.category == "Point":
pattern_tiny = pattern_tiny_qp
m_tiny = re.match(pattern_tiny, line)
if m_tiny:
logger.debug("Font Size: %d", font_size)
self.font_size = font_size
return line
elif mode == "jp" and self.id not in [ID_QP, ID_REWARD_QP] and self.category != "Point":
cut_width = 21
comma_width = 5
line = self.get_number2(cut_width, comma_width, base_line=base_line, margin_right=margin_right)
logger.debug("line: %s", line)
if len(line) <= 1:
return ""
elif not line[1:].isdigit():
return ""
return line
else:
# JP Ver.2.37.0以前の旧仕様
if self.font_size != FONTSIZE_UNDEFINED:
line = self.get_number(base_line, margin_right, self.font_size)
logger.debug("line: %s", line)
if len(line) <= 1:
return ""
elif not line[1:].isdigit():
return ""
return line
else:
# 1-6桁の読み込み
font_size = FONTSIZE_NORMAL
line = self.get_number(base_line, margin_right, font_size)
logger.debug("Read NORMAL: %s", line)
if self.id == ID_QP or self.category == "Point":
pattern_normal = pattern_normal_qp
m_normal = re.match(pattern_normal, line)
if m_normal:
logger.debug("Font Size: %d", font_size)
self.font_size = font_size
return line
# 6桁の読み込み
font_size = FONTSIZE_SMALL
line = self.get_number(base_line, margin_right, font_size)
logger.debug("Read SMALL: %s", line)
if self.id == ID_QP or self.category == "Point":
pattern_small = pattern_small_qp
m_small = re.match(pattern_small, line)
if m_small:
logger.debug("Font Size: %d", font_size)
self.font_size = font_size
return line
# 7桁読み込み
font_size = FONTSIZE_TINY
line = self.get_number(base_line, margin_right, font_size)
logger.debug("Read TINY: %s", line)
if self.id == ID_QP or self.category == "Point":
pattern_tiny = pattern_tiny_qp
m_tiny = re.match(pattern_tiny, line)
if m_tiny:
logger.debug("Font Size: %d", font_size)
self.font_size = font_size
return line
return ""
def read_item(self, pts):
"""
ボーナスの数値をOCRする(エラー訂正有)
"""
win_size = (120, 60)
block_size = (16, 16)
block_stride = (4, 4)
cell_size = (4, 4)
bins = 9
lines = ""
for pt in pts:
char = []
tmpimg = self.img_gray[pt[1]:pt[3], pt[0]:pt[2]]
tmpimg = cv2.resize(tmpimg, (win_size))
hog = cv2.HOGDescriptor(win_size, block_size, block_stride,
cell_size, bins)
char.append(hog.compute(tmpimg))
char = np.array(char)
pred = self.svm.predict(char)
result = int(pred[1][0][0])
if result != 0:
lines = lines + chr(result)
logger.debug("OCR Result: %s", lines)
# 以下エラー訂正
if not lines.endswith(")"):
lines = lines[:-1] + ")"
if not lines.startswith("(+") and not lines.startswith("(x"):
if lines[0] in ["+", 'x']:
lines = "(" + lines
elif lines.startswith("("):
lines = lines.replace("(", "(+")
else:
lines = ""
lines = lines.replace("()", "0")
if len(lines) > 1:
# エラー訂正 文字列左側
# 主にイベントのポイントドロップで左側にゴミができるが、
# 特定の記号がでてきたらそれより前はデータが無いはずなので削除する
point_lbra = lines.rfind("(")
point_plus = lines.rfind("+")
point_x = lines.rfind("x")
if point_lbra != -1:
lines = lines[point_lbra:]
elif point_plus != -1:
lines = lines[point_plus:]
elif point_x != -1:
lines = lines[point_x:]
if lines.isdigit():
if int(lines) == 0:
lines = "xErr"
elif self.name == "QP" or self.name == "クエストクリア報酬QP":
lines = '+' + lines
else:
if int(lines) >= 100:
lines = '+' + lines
else:
lines = 'x' + lines
if len(lines) == 1:
lines = "xErr"
return lines
def read_char(self, pt):
"""
戦利品の数値1文字をOCRする
白文字検出で使用
"""
win_size = (120, 60)
block_size = (16, 16)
block_stride = (4, 4)
cell_size = (4, 4)
bins = 9
char = []
tmpimg = self.img_gray[pt[1]:pt[3], pt[0]:pt[2]]
tmpimg = cv2.resize(tmpimg, (win_size))
hog = cv2.HOGDescriptor(win_size, block_size, block_stride,
cell_size, bins)
char.append(hog.compute(tmpimg))
char = np.array(char)
pred = self.svm.predict(char)
result = int(pred[1][0][0])
return chr(result)
def ocr_digit(self, mode='jp'):
"""
戦利品OCR
"""
self.font_size = FONTSIZE_UNDEFINED
if self.prev_item is None:
prev_id = -1
else:
prev_id = self.prev_item.id
logger.debug("self.id: %d", self.id)
logger.debug("prev_id: %d", prev_id)
if prev_id == self.id:
self.dropnum_cache = self.prev_item.dropnum_cache
if prev_id == self.id \
and not (ID_GEM_MAX <= self.id <= ID_MONUMENT_MAX):
# もしキャッシュ画像と一致したらOCRスキップ
logger.debug("dropnum_cache: %s", self.prev_item.dropnum_cache)
for dropnum_cache in self.prev_item.dropnum_cache:
pts = dropnum_cache["pts"]
img_gray = self.img_gray[pts[0][1]-2:pts[1][1]+2,
pts[0][0]-2:pts[1][0]+2]
template = dropnum_cache["img"]
res = cv2.matchTemplate(img_gray, template,
cv2.TM_CCOEFF_NORMED)
threshold = 0.97
loc = np.where(res >= threshold)
find_match = False
for pt in zip(*loc[::-1]):
find_match = True
break
if find_match:
logger.debug("find_match")
self.bonus = dropnum_cache["bonus"]
self.dropnum = dropnum_cache["dropnum"]
self.bonus_pts = dropnum_cache["bonus_pts"]
return
logger.debug("not find_match")
if ID_GEM_MAX <= self.id <= ID_MONUMENT_MAX:
# ボーナスが無いアイテム
self.bonus_pts = []
self.bonus = ""
self.font_size = FONTSIZE_NORMAL
elif prev_id == self.id \
and self.category != "Point" and self.name != "QP":
self.bonus_pts = self.prev_item.bonus_pts
self.bonus = self.prev_item.bonus
self.font_size = self.prev_item.font_size
elif self.fileextention.lower() == '.png':
self.bonus_pts = self.detect_bonus_char()
self.bonus = self.read_item(self.bonus_pts)
# フォントサイズを決定
if len(self.bonus_pts) > 0:
y_height = self.bonus_pts[-1][3] - self.bonus_pts[-1][1]
logger.debug("y_height: %s", y_height)
if self.position >= 14:
self.font_size = FONTSIZE_UNDEFINED
elif y_height < 25:
self.font_size = FONTSIZE_TINY
elif y_height > 27:
self.font_size = FONTSIZE_NORMAL
else:
self.font_size = FONTSIZE_SMALL
else:
if mode == "jp":
self.bonus, self.bonus_pts, self.font_size = self.detect_bonus_char4jpg2(mode)
else:
self.bonus, self.bonus_pts, self.font_size = self.detect_bonus_char4jpg(mode)
logger.debug("Bonus Font Size: %s", self.font_size)
# 実際の(ボーナス無し)ドロップ数が上段にあるか下段にあるか決定
offsset_y = 2 if mode == 'na' else 0
if (self.category in ["Quest Reward", "Point"] or self.name == "QP") \
and len(self.bonus) >= 5: # ボーナスは"(+*0)"なので
# 1桁目の上部からの距離を設定
base_line = self.bonus_pts[-2][1] - 3 + offsset_y
else:
base_line = int(180/206*self.height)
self.__bonus_string_into_int()
# 実際の(ボーナス無し)ドロップ数の右端の位置を決定
offset_x = -7 if mode == "na" else 0
if self.category in ["Quest Reward", "Point"] or self.name == "QP":
margin_right = 15 + offset_x
elif len(self.bonus_pts) > 0:
margin_right = self.width - self.bonus_pts[0][0] + 2
else:
margin_right = 15 + offset_x
logger.debug("margin_right: %d", margin_right)
self.dropnum = self.detect_white_char(base_line, margin_right, mode)
logger.debug("self.dropnum: %s", self.dropnum)
if len(self.dropnum) == 0:
self.dropnum = "x1"
if self.id != ID_REWARD_QP \
and not (ID_GEM_MAX <= self.id <= ID_MONUMENT_MAX):
dropnum_found = False
for cache_item in self.dropnum_cache:
if cache_item["dropnum"] == self.dropnum:
dropnum_found = True
break
if dropnum_found is False:
# キャッシュのために画像を取得する
_, width = self.img_gray.shape
_, cut_height, _ = self.define_fontsize(self.font_size)
logger.debug("base_line: %d", base_line)
logger.debug("cut_height: %d", cut_height)
logger.debug("margin_right: %d", margin_right)
pts = ((self.margin_left, base_line - cut_height),
(width - margin_right, base_line))
cached_img = self.img_gray[pts[0][1]:pts[1][1],
pts[0][0]:pts[1][0]]
tmp = {}
tmp["dropnum"] = self.dropnum
tmp["img"] = cached_img
tmp["pts"] = pts
tmp["bonus"] = self.bonus
tmp["bonus_pts"] = self.bonus_pts
self.dropnum_cache.append(tmp)
def __bonus_string_into_int(self):
try:
self.bonus = int(re.sub(r"\(|\)|\+", "", self.bonus))
except:
self.bonus = 0
def gem_img2id(self, img, gem_dict):
hash_gem = self.compute_gem_hash(img)
gems = {}
for i in gem_dict.keys():
d2 = hasher.compare(hash_gem, hex2hash(gem_dict[i]))
if d2 <= 20:
gems[i] = d2
gems = sorted(gems.items(), key=lambda x: x[1])
gem = next(iter(gems))
return gem[0]
def classify_item(self, img, currnet_dropPriority):
""")
imgとの距離を比較して近いアイテムを求める
id を返すように変更
"""
hash_item = self.hash_item # 画像の距離
if logger.isEnabledFor(logging.DEBUG):
hex = ""
for h in hash_item[0]:
hex = hex + "{:02x}".format(h)
logger.debug("phash: %s", hex)
def compare_distance(hash_item, background=True):
ids = {}
# 既存のアイテムとの距離を比較
for i in dist_item.keys():
itemid = dist_item[i]
item_bg = item_background[itemid]
d = hasher.compare(hash_item, hex2hash(i))
if background:
if d <= 13 and item_bg == self.background:
# ポイントと種の距離が8という例有り(IMG_0274)→16に
# バーガーと脂の距離が10という例有り(IMG_2354)→14に
ids[dist_item[i]] = d
else:
if d <= 13:
ids[dist_item[i]] = d
if len(ids) > 0:
ids = sorted(ids.items(), key=lambda x: x[1])
id_tupple = next(iter(ids))
id = id_tupple[0]
if ID_SECRET_GEM_MIN <= id <= ID_SECRET_GEM_MAX:
if currnet_dropPriority >= PRIORITY_SECRET_GEM_MIN:
id = self.gem_img2id(img, dist_secret_gem)
else:
return ""
elif ID_MAGIC_GEM_MIN <= id <= ID_MAGIC_GEM_MAX:
if currnet_dropPriority >= PRIORITY_MAGIC_GEM_MIN:
id = self.gem_img2id(img, dist_magic_gem)
else:
return ""
elif ID_GEM_MIN <= id <= ID_GEM_MAX:
if currnet_dropPriority >= PRIORITY_GEM_MIN:
id = self.gem_img2id(img, dist_gem)
else:
return ""
return id
return ""
id = compare_distance(hash_item, background=True)
if id == "":
id = compare_distance(hash_item, background=False)
return id
def classify_ce_sub(self, img, hasher_prog, dist_dic, threshold):
"""
imgとの距離を比較して近いアイテムを求める
"""
hash_item = hasher_prog(img) # 画像の距離
itemfiles = {}
if logger.isEnabledFor(logging.DEBUG):
hex = ""
for h in hash_item[0]:
hex = hex + "{:02x}".format(h)
# 既存のアイテムとの距離を比較
for i in dist_dic.keys():
d = hasher.compare(hash_item, hex2hash(i))
if d <= threshold:
itemfiles[dist_dic[i]] = d
if len(itemfiles) > 0:
itemfiles = sorted(itemfiles.items(), key=lambda x: x[1])
logger.debug("itemfiles: %s", itemfiles)
item = next(iter(itemfiles))
return item[0]
return ""
def classify_ce(self, img):
itemid = self.classify_ce_sub(img, compute_hash_ce, dist_ce, 12)
if itemid == "":
logger.debug("use narrow image")
itemid = self.classify_ce_sub(
img, compute_hash_ce_narrow, dist_ce_narrow, 15
)
return itemid
def classify_point(self, img):
"""
imgとの距離を比較して近いアイテムを求める
"""
hash_item = compute_hash(img) # 画像の距離
itemfiles = {}
if logger.isEnabledFor(logging.DEBUG):
hex = ""
for h in hash_item[0]:
hex = hex + "{:02x}".format(h)
logger.debug("phash: %s", hex)
# 既存のアイテムとの距離を比較
for i in dist_point.keys():
itemid = dist_point[i]
item_bg = item_background[itemid]
d = hasher.compare(hash_item, hex2hash(i))
if d <= 16 and item_bg == self.background:
itemfiles[itemid] = d
if len(itemfiles) > 0:
itemfiles = sorted(itemfiles.items(), key=lambda x: x[1])
item = next(iter(itemfiles))
return item[0]
return ""
def classify_exp(self, img):
hash_item = self.compute_exp_rarity_hash(img) # 画像の距離
exps = {}
for i in dist_exp_rarity.keys():
dt = hasher.compare(hash_item, hex2hash(i))
if dt <= 15: # IMG_1833で11 IMG_1837で15
exps[i] = dt
exps = sorted(exps.items(), key=lambda x: x[1])
if len(exps) > 0:
exp = next(iter(exps))
hash_exp_class = self.compute_exp_class_hash(img)
exp_classes = {}
for j in dist_exp_class.keys():
dtc = hasher.compare(hash_exp_class, hex2hash(j))
exp_classes[j] = dtc
exp_classes = sorted(exp_classes.items(), key=lambda x: x[1])
exp_class = next(iter(exp_classes))
return int(str(dist_exp_class[exp_class[0]])[:4]
+ str(dist_exp_rarity[exp[0]])[4] + "00")
return ""
def make_new_file(self, img, search_dir, dist_dic, dropPriority, category):
"""
ファイル名候補を探す
"""
i_dic = {"Item": "item", "Craft Essence": "ce", "Point": "point"}
initial = i_dic[category]
for i in range(999):
itemfile = search_dir / (initial + '{:0=3}'.format(i + 1) + '.png')
if itemfile.is_file():
continue
else:
cv2.imwrite(itemfile.as_posix(), img)
# id 候補を決める
for j in range(99999):
id = j + ID_START
if id in item_name.keys():
continue
break
if category == "Craft Essence":
hash = compute_hash_ce(img)
else:
hash = compute_hash(img)
hash_hex = ""
for h in hash[0]:
hash_hex = hash_hex + "{:02x}".format(h)
dist_dic[hash_hex] = id
if category == "Craft Essence":
hash_narrow = compute_hash_ce_narrow(img)
hash_hex_narrow = ""
for h in hash_narrow[0]:
hash_narrow = hash_narrow + "{:02x}".format(h)
dist_ce_narrow[hash_hex_narrow] = id
item_name[id] = itemfile.stem
item_background[id] = classify_background(img)
item_dropPriority[id] = dropPriority
item_type[id] = category
break
return id
def classify_category(self, svm_card):
"""
カード判別器
"""
"""
カード判別器
この場合は画像全域のハッシュをとる
"""
# Hog特徴のパラメータ
win_size = (120, 60)
block_size = (16, 16)
block_stride = (4, 4)
cell_size = (4, 4)
bins = 9
test = []
carddic = {0: 'Quest Reward', 1: 'Item', 2: 'Point',
3: 'Craft Essence', 4: 'Exp. UP', 99: ""}
tmpimg = self.img_rgb[int(189/206*self.height):
int(201/206*self.height),
int(78/188*self.width):
int(115/188*self.width)]
tmpimg = cv2.resize(tmpimg, (win_size))
hog = cv2.HOGDescriptor(win_size, block_size, block_stride,
cell_size, bins)
test.append(hog.compute(tmpimg)) # 特徴量の格納
test = np.array(test)
pred = svm_card.predict(test)
return carddic[pred[1][0][0]]
def classify_card(self, img, currnet_dropPriority):
"""
アイテム判別器
"""
if self.category == "Point":
id = self.classify_point(img)
if id == "":
id = self.make_new_file(img, Point_dir, dist_point,
PRIORITY_POINT, self.category)
return id
elif self.category == "Quest Reward":
return 5
elif self.category == "Craft Essence":
id = self.classify_ce(img)
if id == "":
id = self.make_new_file(img, CE_dir, dist_ce,
PRIORITY_CE, self.category)
return id
elif self.category == "Exp. UP":
return self.classify_exp(img)
elif self.category == "Item":
id = self.classify_item(img, currnet_dropPriority)
if id == "":
id = self.make_new_file(img, Item_dir, dist_item,
PRIORITY_ITEM, self.category)
else:
# ここで category が判別できないのは三行目かつ
# スクロール位置の関係で下部表示が消えている場合
id = self.classify_item(img, currnet_dropPriority)
if id != "":
return id
id = self.classify_point(img)
if id != "":
return id
id = self.classify_ce(img)
if id != "":
return id
id = self.classify_exp(img)
if id != "":
return id
if id == "":
id = self.make_new_file(img, Item_dir, dist_item,
PRIORITY_ITEM, "Item")
return id
def compute_exp_rarity_hash(self, img_rgb):
"""
種火レアリティ判別器
この場合は画像全域のハッシュをとる
"""
img = img_rgb[int(53/189*self.height):int(136/189*self.height),
int(37/206*self.width):int(149/206*self.width)]
return hasher.compute(img)
def compute_exp_class_hash(self, img_rgb):
"""
種火クラス判別器
左上のクラスマークぎりぎりのハッシュを取る
記述した比率はiPhone6S画像の実測値
"""
img = img_rgb[int((5+9)/135*self.height):int((30+2)/135*self.height),
int(5/135*self.width):int((30+6)/135*self.width)]
return hasher.compute(img)
def compute_gem_hash(self, img_rgb):
"""
スキル石クラス判別器
中央のクラスマークぎりぎりのハッシュを取る
記述した比率はiPhone6S画像の実測値
"""
height, width = img_rgb.shape[:2]
img = img_rgb[int((145-16-60*0.8)/2/145*height)+2:
int((145-16+60*0.8)/2/145*height)+2,
int((132-52*0.8)/2/132*width):
int((132+52*0.8)/2/132*width)]
return hasher.compute(img)
def classify_background(img_rgb):
"""
背景判別
"""
_, width = img_rgb.shape[:2]
img = img_rgb[30:119, width - 25:width - 7]
target_hist = img_hist(img)
bg_score = []
score_z = calc_hist_score(target_hist, hist_zero)
bg_score.append({"background": "zero", "dist": score_z})
score_g = calc_hist_score(target_hist, hist_gold)
bg_score.append({"background": "gold", "dist": score_g})
score_s = calc_hist_score(target_hist, hist_silver)
bg_score.append({"background": "silver", "dist": score_s})
score_b = calc_hist_score(target_hist, hist_bronze)
bg_score.append({"background": "bronze", "dist": score_b})
bg_score = sorted(bg_score, key=lambda x: x['dist'])
# logger.debug("background dist: %s", bg_score)
return (bg_score[0]["background"])
def compute_hash(img_rgb):
"""
判別器
この判別器は下部のドロップ数を除いた部分を比較するもの
記述した比率はiPhone6S画像の実測値
"""
height, width = img_rgb.shape[:2]
img = img_rgb[int(22/135*height):
int(77/135*height),
int(23/135*width):
int(112/135*width)]
return hasher.compute(img)
def compute_hash_ce(img_rgb):
"""
判別器
この判別器は下部のドロップ数を除いた部分を比較するもの
記述した比率はiPpd2018画像の実測値
"""
img = img_rgb[12:176, 9:182]
return hasher.compute(img)
def compute_hash_ce_narrow(img_rgb):
"""
CE Identifier for scrolled down screenshot
"""
height, width = img_rgb.shape[:2]
img = img_rgb[int(30/206*height):int(155/206*height),
int(5/188*width):int(183/188*width)]
return hasher.compute(img)
def search_file(search_dir, dist_dic, dropPriority, category):
"""
Item, Craft Essence, Pointの各ファイルを探す
"""
files = search_dir.glob('**/*.png')
for fname in files:
img = imread(fname)
# id 候補を決める
# 既存のデータがあったらそれを使用
if fname.stem in item_name.values():
id = [k for k, v in item_name.items() if v == fname.stem][0]
elif fname.stem in item_shortname.values():
id = [k for k, v in item_shortname.items() if v == fname.stem][0]
else:
for j in range(99999):
id = j + ID_START
if id in item_name.keys():
continue
break
# priotiry は固定
item_name[id] = fname.stem
item_dropPriority[id] = dropPriority
item_type[id] = category
if category == "Craft Essence":
hash = compute_hash_ce(img)
else:
hash = compute_hash(img)
hash_hex = ""
for h in hash[0]:
hash_hex = hash_hex + "{:02x}".format(h)
dist_dic[hash_hex] = id
if category == "Item" or category == "Point":
item_background[id] = classify_background(img)
if category == "Craft Essence":
hash_narrow = compute_hash_ce_narrow(img)
hash_hex_narrow = ""
for h in hash_narrow[0]:
hash_hex_narrow = hash_hex_narrow + "{:02x}".format(h)
dist_ce_narrow[hash_hex_narrow] = id
def calc_hist_score(hist1, hist2):
scores = []
for channel1, channel2 in zip(hist1, hist2):
score = cv2.compareHist(channel1, channel2, cv2.HISTCMP_BHATTACHARYYA)
scores.append(score)
return np.mean(scores)
def img_hist(src_img):
img = cv2.cvtColor(src_img, cv2.COLOR_BGR2HSV)
hist1 = cv2.calcHist([img], [0], None, [256], [0, 256])
hist2 = cv2.calcHist([img], [1], None, [256], [0, 256])
hist3 = cv2.calcHist([img], [2], None, [256], [0, 256])
return hist1, hist2, hist3
def calc_dist_local():
"""
既所持のアイテム画像の距離(一次元配列)の辞書を作成して保持
"""
search_file(Item_dir, dist_item, PRIORITY_ITEM, "Item")
search_file(CE_dir, dist_ce, PRIORITY_CE, "Craft Essence")
search_file(Point_dir, dist_point, PRIORITY_POINT, "Point")
def hex2hash(hexstr):
hashlist = []
for i in range(8):
hashlist.append(int('0x' + hexstr[i*2:i*2+2], 0))
return np.array([hashlist], dtype='uint8')
def out_name(args, id):
if args.lang == "eng":
if id in item_name_eng.keys():
return item_name_eng[id]
if id in item_shortname.keys():
name = item_shortname[id]
else:
name = item_name[id]
return name
def imread(filename, flags=cv2.IMREAD_COLOR, dtype=np.uint8):
"""
OpenCVのimreadが日本語ファイル名が読めない対策
"""
try:
n = np.fromfile(filename, dtype)
img = cv2.imdecode(n, flags)
return img
except Exception as e:
logger.exception(e)
return None
def get_exif(img):
exif = img._getexif()
try:
for id, val in exif.items():
tg = TAGS.get(id, id)
if tg == "DateTimeOriginal":
return datetime.datetime.strptime(val, '%Y:%m:%d %H:%M:%S')
except AttributeError:
return "NON"
return "NON"
def get_output(filenames, args):
"""
出力内容を作成
"""
calc_dist_local()
if train_item.exists() is False:
logger.critical("item.xml is not found")
logger.critical("Try to run 'python makeitem.py'")
sys.exit(1)
if train_chest.exists() is False:
logger.critical("chest.xml is not found")
logger.critical("Try to run 'python makechest.py'")
sys.exit(1)
if train_dcnt.exists() is False:
logger.critical("dcnt.xml is not found")
logger.critical("Try to run 'python makedcnt.py'")
sys.exit(1)
if train_card.exists() is False:
logger.critical("card.xml is not found")
logger.critical("Try to run 'python makecard.py'")
sys.exit(1)
svm = cv2.ml.SVM_load(str(train_item))
svm_chest = cv2.ml.SVM_load(str(train_chest))
svm_dcnt = cv2.ml.SVM_load(str(train_dcnt))
svm_card = cv2.ml.SVM_load(str(train_card))
fileoutput = [] # 出力
output = {}
prev_pages = 0
prev_pagenum = 0
prev_total_qp = QP_UNKNOWN
prev_itemlist = []
prev_datetime = datetime.datetime(year=2015, month=7, day=30, hour=0)
prev_qp_gained = 0
prev_chestnum = 0
all_list = []
for filename in filenames:
exLogger = CustomAdapter(logger, {"target": filename})
logger.debug("filename: %s", filename)
f = Path(filename)
if f.exists() is False:
output = {'filename': str(filename) + ': not found'}
all_list.append([])
elif f.is_dir(): # for ZIP file from MacOS
continue
elif f.suffix.upper() not in ['.PNG', '.JPG', '.JPEG']:
output = {'filename': str(filename) + ': Not Supported'}
all_list.append([])
else:
img_rgb = imread(filename)
fileextention = Path(filename).suffix
try:
sc = ScreenShot(args, img_rgb,
svm, svm_chest, svm_dcnt, svm_card,
fileextention, exLogger)
if sc.itemlist[0]["id"] != ID_REWARD_QP and sc.pagenum == 1:
logger.warning(
"Page count recognition is failing: %s",
filename
)
# ドロップ内容が同じで下記のとき、重複除外
# QPカンストじゃない時、QPが前と一緒
# QPカンストの時、Exif内のファイル作成時間が15秒未満
pilimg = Image.open(filename)
dt = get_exif(pilimg)
if dt == "NON" or prev_datetime == "NON":
td = datetime.timedelta(days=1)
else:
td = dt - prev_datetime
if sc.pages - sc.pagenum == 0:
sc.itemlist = sc.itemlist[14-(sc.lines+2) % 3*7:]
if prev_itemlist == sc.itemlist:
if (sc.total_qp != -1 and sc.total_qp != 2000000000
and sc.total_qp == prev_total_qp) \
or ((sc.total_qp == -1 or sc.total_qp == 2000000000)
and td.total_seconds() < args.timeout):
logger.debug("args.timeout: %s", args.timeout)
logger.debug("filename: %s", filename)
logger.debug("prev_itemlist: %s", prev_itemlist)
logger.debug("sc.itemlist: %s", sc.itemlist)
logger.debug("sc.total_qp: %s", sc.total_qp)
logger.debug("prev_total_qp: %s", prev_total_qp)
logger.debug("datetime: %s", dt)
logger.debug("prev_datetime: %s", prev_datetime)
logger.debug("td.total_second: %s", td.total_seconds())
fileoutput.append(
{'filename': str(filename) + ': duplicate'})
all_list.append([])
continue
# 2頁目以前のスクショが無い場合に migging と出力
# 1. 前頁が最終頁じゃない&前頁の続き頁数じゃない
# または前頁が最終頁なのに1頁じゃない
# 2. 前頁の続き頁なのに獲得QPが違う
if (
prev_pages - prev_pagenum > 0
and sc.pagenum - prev_pagenum != 1) \
or (prev_pages - prev_pagenum == 0
and sc.pagenum != 1) \
or sc.pagenum != 1 \
and sc.pagenum - prev_pagenum == 1 \
and (
prev_qp_gained != sc.qp_gained
):
logger.debug("prev_pages: %s", prev_pages)
logger.debug("prev_pagenum: %s", prev_pagenum)
logger.debug("sc.pagenum: %s", sc.pagenum)
logger.debug("prev_qp_gained: %s", prev_qp_gained)
logger.debug("sc.qp_gained: %s", sc.qp_gained)
logger.debug("prev_chestnum: %s", prev_chestnum)
logger.debug("sc.chestnum: %s", sc.chestnum)
fileoutput.append({'filename': 'missing'})
all_list.append([])
all_list.append(sc.itemlist)
prev_pages = sc.pages
prev_pagenum = sc.pagenum
prev_total_qp = sc.total_qp
prev_itemlist = sc.itemlist
prev_datetime = dt
prev_qp_gained = sc.qp_gained
prev_chestnum = sc.chestnum
sumdrop = len([d for d in sc.itemlist
if d["id"] != ID_REWARD_QP])
if args.lang == "jpn":
drop_count = "ドロ数"
item_count = "アイテム数"
else:
drop_count = "item_count"
item_count = "item_count"
output = {'filename': str(filename), drop_count: sc.chestnum, item_count: sumdrop}
except Exception as e:
logger.error(filename)
logger.error(e, exc_info=True)
output = ({'filename': str(filename) + ': not valid'})
all_list.append([])
fileoutput.append(output)
return fileoutput, all_list
def load_svms():
svm = cv2.ml.SVM_load(str(train_item))
svm_chest = cv2.ml.SVM_load(str(train_chest))
svm_dcnt = cv2.ml.SVM_load(str(train_dcnt))
svm_card = cv2.ml.SVM_load(str(train_card))
return (svm, svm_chest, svm_dcnt, svm_card)
def parse_img(
program_args,
svm,
svm_chest,
svm_dcnt,
svm_card,
file_path,
prev_pages=0,
prev_pagenum=0,
prev_total_qp=QP_UNKNOWN,
prev_gained_qp=QP_UNKNOWN,
prev_itemlist=[],
prev_datetime=datetime.datetime(year=2015, month=7, day=30, hour=0)):
parsed_img_data = {"status": "Incomplete"}
logger.debug("filename: %s", file_path)
file_loc = Path(file_path).resolve()
parsed_img_data["image_path"] = str(file_loc)
if not file_loc.exists():
# TODO: is this needed?
parsed_img_data["status"] = "File not found"
return parsed_img_data
img_rgb = imread(file_path)
file_extention = file_loc.suffix
exLogger = CustomAdapter(logger, {"target": file_loc.name})
try:
screenshot = ScreenShot(
program_args, img_rgb, svm, svm_chest, svm_dcnt, svm_card, file_extention, exLogger)
# If the previous image indicated more coming, check whether this is the fated one.
if (prev_pages - prev_pagenum > 0 and screenshot.pagenum - prev_pagenum != 1) \
or (prev_pages - prev_pagenum == 0 and screenshot.pagenum != 1):
parsed_img_data["status"] = "Missing page before this"
# Detect whether image is a duplicate
# Image is a candidate duplicate if drops and gained QP match previous image.
# Duplicate is confirmed if:
# - QP is not capped and drops are the same as in the previous image
# - QP is capped and previous image was taken within 15sec
# TODO: is this needed?
pilimg = Image.open(file_path)
date_time = get_exif(pilimg)
if date_time == "NON" or prev_datetime == "NON":
time_delta = datetime.timedelta(days=1)
else:
time_delta = date_time - prev_datetime
if prev_itemlist == screenshot.itemlist and prev_gained_qp == screenshot.qp_gained:
if (screenshot.total_qp != 999999999 and screenshot.total_qp == prev_total_qp) \
or (screenshot.total_qp == 999999999 and time_delta.total_seconds() < args.timeout):
logger.debug("args.timeout: {}".format(args.timeout))
logger.debug("filename: {}".format(file_path))
logger.debug("prev_itemlist: {}".format(prev_itemlist))
logger.debug("screenshot.itemlist: {}".format(
screenshot.itemlist))
logger.debug("screenshot.total_qp: {}".format(
screenshot.total_qp))
logger.debug("prev_total_qp: {}".format(prev_total_qp))
logger.debug("datetime: {}".format(date_time))
logger.debug("prev_datetime: {}".format(prev_datetime))
logger.debug("td.total_second: {}".format(
time_delta.total_seconds()))
parsed_img_data["status"] = "Duplicate file"
return parsed_img_data
# Prep next iter
prev_pages = screenshot.pages
prev_pagenum = screenshot.pagenum
prev_total_qp = screenshot.total_qp
prev_gained_qp = screenshot.qp_gained
prev_itemlist = screenshot.itemlist
prev_datetime = date_time
# Gather data
parsed_img_data["qp_total"] = screenshot.total_qp
parsed_img_data["qp_gained"] = screenshot.qp_gained
parsed_img_data["scroll_position"] = screenshot.scroll_position
parsed_img_data["drop_count"] = screenshot.chestnum
parsed_img_data["drops_found"] = len(screenshot.itemlist)
parsed_img_data["drops"] = screenshot.itemlist
parsed_img_data["status"] = "OK" if parsed_img_data["status"] == "Incomplete" else parsed_img_data["status"]
return parsed_img_data
except Exception as e:
logger.error("Error during parsing of {}\n{}\n".format(
file_path, e), exc_info=True)
parsed_img_data["status"] = "Invalid file"
return parsed_img_data
def move_file_to_out_dir(src_file_path, out_dir):
if out_dir is not None:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
src_file_path = Path(src_file_path)
if not src_file_path.exists():
print("Cannot move {}. It does not exist.".format(src_file_path))
exit(1)
dst_file_path = "{}/{}".format(out_dir, src_file_path.name)
os.rename(src_file_path, dst_file_path)
return dst_file_path
return src_file_path
def check_svms_trained():
if train_item.exists() is False:
logger.critical("item.xml is not found")
logger.critical("Try to run 'python makeitem.py'")
sys.exit(1)
if train_chest.exists() is False:
logger.critical("chest.xml is not found")
logger.critical("Try to run 'python makechest.py'")
sys.exit(1)
if train_dcnt.exists() is False:
logger.critical("dcnt.xml is not found")
logger.critical("Try to run 'python makedcnt.py'")
sys.exit(1)
if train_card.exists() is False:
logger.critical("card.xml is not found")
logger.critical("Try to run 'python makecard.py'")
sys.exit(1)
def parse_into_json(input_file_paths, args):
"""
The version of output gathering used by AtlasAcademy. Made to resemble capy's output.
"""
calc_dist_local()
check_svms_trained()
(svm, svm_chest, svm_dcnt, svm_card) = load_svms()
prev_pages = 0
prev_pagenum = 0
prev_total_qp = QP_UNKNOWN
prev_gained_qp = QP_UNKNOWN
prev_itemlist = []
prev_datetime = datetime.datetime(year=2015, month=7, day=30, hour=0)
all_parsed_output = []
for file_path in input_file_paths:
file_path = move_file_to_out_dir(file_path, args.out_folder)
all_parsed_output.append(parse_img(
args,
svm,
svm_chest,
svm_dcnt,
svm_card,
file_path,
prev_pages,
prev_pagenum,
prev_total_qp,
prev_gained_qp,
prev_itemlist,
prev_datetime))
return all_parsed_output
def __parse_into_json_process(input_queue, args):
(svm, svm_chest, svm_dcnt, svm_card) = load_svms()
global watcher_running
while watcher_running or not input_queue.empty():
input_file_path = input_queue.get()
# Detection of missing screenshots/pages (e.g. scrolled down image with no previous
# image to go along with it), is dissabled with `prev_pages=-1`. This is because
# the technique depends on having the images sorted in chronological order. Sorting
# files and processing them in order is not possible in a multiprocess environment.
parsed_output = parse_img(
args,
svm,
svm_chest,
svm_dcnt,
svm_card,
input_file_path,
prev_pages=-1)
output_json([parsed_output], args.out_folder)
def __signal_handling(*_):
"""
Taken from capy-drop-parser
"""
global watcher_running
if not watcher_running:
sys.exit(1)
watcher_running = False
print(
"Notice: app may take up to polling frequency time and however long it takes to finish the queue before exiting."
)
def watch_parse_output_into_json(args):
"""
Continuously watch the given input directory for new files.
Processes any new images by parsing them, moving them to output dir, and writing parsed json to
output dir.
Works with a producer/consumer multiprocessing approach. This function watches and
fills the queue, while spawned processes use `__parse_into_json_process` to consume the
items.
"""
calc_dist_local()
check_svms_trained()
signal.signal(signal.SIGINT, __signal_handling)
# We estimate roughly 2secs per image parsing. Queue can hold as many images as can be
# processed by the given amount of processes in the given amount of poll time.
input_queue = multiprocessing.Queue(maxsize=int(
args.num_processes * args.polling_frequency / 2))
pool = multiprocessing.Pool(
args.num_processes, initializer=__parse_into_json_process, initargs=(input_queue, args))
global watcher_running
while watcher_running:
for f in Path(args.folder).iterdir():
if not f.is_file():
continue
file_path = move_file_to_out_dir(f, args.out_folder)
input_queue.put(file_path) # blocks when queue is full
time.sleep(int(args.polling_frequency))
input_queue.close()
input_queue.join_thread()
pool.close()
pool.join()
def sort_files(files, ordering):
if ordering == Ordering.NOTSPECIFIED:
return files
elif ordering == Ordering.FILENAME:
return sorted(files)
elif ordering == Ordering.TIMESTAMP:
return sorted(files, key=lambda f: Path(f).stat().st_ctime)
raise ValueError(f'Unsupported ordering: {ordering}')
def change_value(args, line):
if args.lang == 'jpn':
line = re.sub('000000$', "百万", str(line))
line = re.sub('0000$', "万", str(line))
line = re.sub('000$', "千", str(line))
else:
line = re.sub('000000$', "M", str(line))
line = re.sub('000$', "K", str(line))
return line
def make_quest_output(quest):
output = ""
if quest != "":
quest_list = [q["name"] for q in freequest
if q["place"] == quest["place"]]
if math.floor(quest["id"]/100)*100 == ID_NORTH_AMERICA:
output = quest["place"] + " " + quest["name"]
elif math.floor(quest["id"]/100)*100 == ID_SYURENJYO:
output = quest["chapter"] + " " + quest["place"]
elif math.floor(quest["id"]/100000)*100000 == ID_EVNET:
output = quest["shortname"]
else:
# クエストが0番目のときは場所を出力、それ以外はクエスト名を出力
if quest_list.index(quest["name"]) == 0:
output = quest["chapter"] + " " + quest["place"]
else:
output = quest["chapter"] + " " + quest["name"]
return output
UNKNOWN = -1
OTHER = 0
NOVICE = 1
INTERMEDIATE = 2
ADVANCED = 3
EXPERT = 4
MASTER = 5
def tv_quest_type(item_list):
quest_type = UNKNOWN
for item in item_list:
if item["id"] == ID_REWARD_QP:
if quest_type != UNKNOWN:
quest_type = OTHER
break
if item["dropnum"] == 1400:
quest_type = NOVICE
elif item["dropnum"] == 2900:
quest_type = INTERMEDIATE
elif item["dropnum"] == 4400:
quest_type = ADVANCED
elif item["dropnum"] == 6400:
quest_type = EXPERT
elif item["dropnum"] == 7400:
quest_type = MASTER
else:
quest_type = OTHER
break
return quest_type
def deside_tresure_valut_quest(item_list):
quest_type = tv_quest_type(item_list)
if quest_type in [UNKNOWN, OTHER]:
quest_candidate = ""
return quest_candidate
item_set = set()
for item in item_list:
if item["id"] == ID_REWARD_QP:
continue
elif item["id"] != ID_QP:
quest_candidate = ""
break
else:
item_set.add(item["dropnum"])
if quest_type == NOVICE and item_set == {10000, 15000, 30000, 45000}:
quest_candidate = {
"id": 94061636,
"name": "宝物庫の扉を開け 初級",
"place": "",
"chapter": "",
"qp": 1400,
"shortname": "宝物庫 初級",
}
elif quest_type == INTERMEDIATE and item_set == {10000, 15000, 30000, 45000, 90000, 135000}:
quest_candidate = {
"id": 94061637,
"name": "宝物庫の扉を開け 中級",
"place": "",
"chapter": "",
"qp": 2900,
"shortname": "宝物庫 中級",
}
elif quest_type == ADVANCED and item_set == {30000, 45000, 90000, 135000, 270000, 405000}:
quest_candidate = {
"id": 94061638,
"name": "宝物庫の扉を開け 上級",
"place": "",
"chapter": "",
"qp": 4400,
"shortname": "宝物庫 上級",
}
elif quest_type == EXPERT and item_set == {90000, 135000, 270000, 405000}:
quest_candidate = {
"id": 94061639,
"name": "宝物庫の扉を開け 超級",
"place": "",
"chapter": "",
"qp": 7400,
"shortname": "宝物庫 超級",
}
elif quest_type == MASTER and item_set == {270000, 405000, 1500000}:
quest_candidate = {
"id": 94061640,
"name": "宝物庫の扉を開け 極級",
"place": "",
"chapter": "",
"qp": 7400,
"shortname": "宝物庫 極級",
}
else:
quest_candidate = ""
return quest_candidate
def deside_quest(item_list):
quest_name = deside_tresure_valut_quest(item_list)
if quest_name != "":
return quest_name
item_set = set()
for item in item_list:
if item["id"] == ID_REWARD_QP:
item_set.add("QP(+" + str(item["dropnum"]) + ")")
elif item["id"] == 1 \
or item["category"] == "Craft Essence" \
or (9700 <= math.floor(item["id"]/1000) <= 9707
and str(item["id"])[4] not in ["4", "5"]):
continue
else:
item_set.add(item["name"])
quest_candidate = ""
for quest in reversed(freequest):
dropset = {i["name"] for i in quest["drop"]
if i["type"] != "Craft Essence"}
dropset.add("QP(+" + str(quest["qp"]) + ")")
if dropset == item_set:
quest_candidate = quest
break
return quest_candidate
def quest_name_recognition(item_list):
"""アイテムが全て埋まっていない場合のクエスト名の判別
Args:
item_list ([type]): [description]
Returns:
[type]: [description]
"""
reward_qp = 0
item_set = set()
for item in item_list:
if item["id"] == ID_REWARD_QP:
item_set.add("QP(+" + str(item["dropnum"]) + ")")
reward_qp = item["dropnum"]
elif item["id"] == 1 \
or item["category"] == "Craft Essence" \
or (9700 <= math.floor(item["id"]/1000) <= 9707
and str(item["id"])[4] not in ["4", "5"]):
continue
else:
item_set.add(item["name"])
if reward_qp == 0:
return "", []
quest_candidate = []
# 報酬QPが同じクエストの一覧を作る
for quest in reversed(freequest):
if quest["qp"] == reward_qp:
quest_candidate.append(quest)
# クエスト一覧に含まれるかチェック
# 含まれるクエストが一つだったら出力
quest_candidate2 = []
missing_items = []
for quest in quest_candidate:
dropset = {i["name"] for i in quest["drop"]
if i["type"] != "Craft Essence"}
dropset.add("QP(+" + str(quest["qp"]) + ")")
diff = item_set - dropset
if len(diff) == 0:
tmp_items = []
diff2 = dropset - item_set
quest_candidate2.append(quest)
for item in quest["drop"]:
if item["name"] in diff2:
item["dropnum"] = 0
item["category"] = "Item"
tmp_items.append(item)
missing_items.append(tmp_items)
if len(quest_candidate2) == 1:
return quest_candidate2[0], missing_items[0]
else:
return "", []
def make_csv_header(args, item_list):
"""
CSVのヘッダ情報を作成
礼装のドロップが無いかつ恒常以外のアイテムが有るとき礼装0をつける
"""
if args.lang == 'jpn':
drop_count = 'ドロ数'
item_count = 'アイテム数'
ce_str = '礼装'
else:
drop_count = 'drop_count'
item_count = 'item_count'
ce_str = 'CE'
if item_list == [[]]:
return ['filename', drop_count], False, ""
# リストを一次元に
flat_list = list(itertools.chain.from_iterable(item_list))
# 余計な要素を除く
short_list = [{"id": a["id"], "name": a["name"], "category": a["category"],
"dropPriority": a["dropPriority"], "dropnum": a["dropnum"]}
for a in flat_list]
# 概念礼装のカテゴリのアイテムが無くかつイベントアイテム(>ID_EXM_MAX)がある
if args.lang == 'jpn':
no_ce_exp_list = [
k for k in flat_list
if not k["name"].startswith("概念礼装EXPカード:")
]
else:
no_ce_exp_list = [
k for k in flat_list
if not k["name"].startswith("CE EXP Card:")
]
ce0_flag = ("Craft Essence"
not in [
d.get('category') for d in no_ce_exp_list
]
) and (
max([d.get("id") for d in flat_list]) > ID_EXP_MAX
)
if ce0_flag:
short_list.append({"id": 99999990, "name": ce_str,
"category": "Craft Essence",
"dropPriority": 9005, "dropnum": 0})
# 重複する要素を除く
unique_list = list(map(json.loads, set(map(json.dumps, short_list))))
# クエスト名判定
quest = deside_quest(unique_list)
if quest == "":
quest, items2 = quest_name_recognition(unique_list)
unique_list.extend(items2)
quest_output = make_quest_output(quest)
# ソート
new_list = sorted(sorted(sorted(unique_list, key=itemgetter('dropnum')),
key=itemgetter('id'), reverse=True),
key=itemgetter('dropPriority'), reverse=True)
header = []
for nlist in new_list:
if nlist['category'] in ['Quest Reward', 'Point'] \
or nlist["name"] == "QP":
tmp = out_name(args, nlist['id']) \
+ "(+" + change_value(args, nlist["dropnum"]) + ")"
elif nlist["dropnum"] > 1:
tmp = out_name(args, nlist['id']) \
+ "(x" + change_value(args, nlist["dropnum"]) + ")"
elif nlist["name"] == ce_str:
tmp = ce_str
else:
tmp = out_name(args, nlist['id'])
header.append(tmp)
return ['filename', drop_count, item_count] + header, ce0_flag, quest_output
def make_csv_data(args, sc_list, ce0_flag):
if sc_list == []:
return [{}], [{}]
csv_data = []
allitem = []
for sc in sc_list:
tmp = []
for item in sc:
if item['category'] in ['Quest Reward', 'Point'] \
or item["name"] == "QP":
tmp.append(out_name(args, item['id'])
+ "(+" + change_value(args, item["dropnum"]) + ")")
elif item["dropnum"] > 1:
tmp.append(out_name(args, item['id'])
+ "(x" + change_value(args, item["dropnum"]) + ")")
else:
tmp.append(out_name(args, item['id']))
allitem = allitem + tmp
csv_data.append(dict(Counter(tmp)))
csv_sum = dict(Counter(allitem))
if ce0_flag:
if args.lang == 'jpn':
ce_str = '礼装'
else:
ce_str = 'CE'
csv_sum.update({ce_str: 0})
return csv_sum, csv_data
def output_json(parsed_output, out_folder):
if out_folder is None:
sys.stdout.buffer.write(json.dumps(
parsed_output, ensure_ascii=False).encode('utf8'))
else:
if not os.path.exists(out_folder):
os.makedirs(out_folder)
for parsed_file in parsed_output:
title = Path(parsed_file["image_path"]).stem
with open(Path("{}/{}.json".format(out_folder, title)), "w", encoding="utf-8") as f:
json.dump(parsed_file, f, indent=4, ensure_ascii=False)
if __name__ == '__main__':
# オプションの解析
parser = argparse.ArgumentParser(
description='Image Parse for FGO Battle Results'
)
# 3. parser.add_argumentで受け取る引数を追加していく
parser.add_argument(
'-i', '--filenames', help='image file to parse', nargs='+') # 必須の引数を追加
parser.add_argument('--lang', default=DEFAULT_ITEM_LANG,
choices=('jpn', 'eng'),
help='Language to be used for output: Default '
+ DEFAULT_ITEM_LANG)
parser.add_argument('-f', '--folder', help='Specify by folder')
parser.add_argument('-o', '--out_folder',
help='folder to write parsed data to. If specified, parsed images will also be moved to here. Else, output will simply be written to stdout')
parser.add_argument('--ordering',
help='The order in which files are processed ',
type=Ordering,
choices=list(Ordering), default=Ordering.NOTSPECIFIED)
text_timeout = 'Duplicate check interval at QP MAX (sec): Default '
parser.add_argument('-t', '--timeout', type=int, default=TIMEOUT,
help=text_timeout + str(TIMEOUT) + ' sec')
parser.add_argument('--version', action='version',
version=PROGNAME + " " + VERSION)
parser.add_argument('-l', '--loglevel',
choices=('debug', 'info'), default='info')
subparsers = parser.add_subparsers(
title='subcommands', description='{subcommand} --help: show help message for the subcommand',)
watcher_parser = subparsers.add_parser(
'watch', help='continuously watch the folder specified by [-f FOLDER]')
watcher_parser.add_argument(
"-j",
"--num_processes",
required=False,
default=DEFAULT_AMT_PROCESSES,
type=int,
help="number of processes to allocate in the process pool. Default: {}".format(
DEFAULT_AMT_PROCESSES),
)
watcher_parser.add_argument(
"-p",
"--polling_frequency",
required=False,
default=DEFAULT_POLL_FREQ,
type=int,
help="how often to check for new images (in seconds). Default: {}s".format(
DEFAULT_POLL_FREQ),
)
args = parser.parse_args() # 引数を解析
lformat = '%(name)s <%(filename)s-L%(lineno)s> [%(levelname)s] %(message)s'
logging.basicConfig(
level=logging.INFO,
format=lformat,
)
logger.setLevel(args.loglevel.upper())
if args.out_folder is not None and not Path(args.out_folder):
print("{} is not a valid path".format(args.out_folder))
exit(1)
for ndir in [Item_dir, CE_dir, Point_dir]:
if not ndir.is_dir():
ndir.mkdir(parents=True)
# Attributes are only present if the watch subcommand has been invoked.
if hasattr(args, "num_processes") and hasattr(args, "polling_frequency"):
if args.folder is None or not Path(args.folder).exists():
print(
"The watch subcommands requires a valid input directory. Provide one with --folder.")
exit(1)
watch_parse_output_into_json(args)
else:
if args.filenames is None and args.folder is None:
print(
"No input files specified. Use --filenames or --folder to do so.")
exit(1)
# gather input image files
if args.folder:
inputs = [x for x in Path(args.folder).iterdir()]
else:
inputs = args.filenames
inputs = sort_files(inputs, args.ordering)
parsed_output = parse_into_json(inputs, args)
output_json(parsed_output, args.out_folder)
|
[
"argparse.ArgumentParser",
"cv2.approxPolyDP",
"cv2.arcLength",
"cv2.imdecode",
"numpy.ones",
"logging.getLogger",
"pageinfo.guess_lines",
"json.dumps",
"pathlib.Path",
"numpy.mean",
"cv2.rectangle",
"cv2.compareHist",
"cv2.HOGDescriptor",
"cv2.minMaxLoc",
"cv2.imshow",
"cv2.inRange",
"PIL.ExifTags.TAGS.get",
"cv2.matchTemplate",
"cv2.contourArea",
"cv2.img_hash.PHash_create",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"cv2.split",
"re.findall",
"datetime.timedelta",
"collections.Counter",
"cv2.boundingRect",
"cv2.destroyAllWindows",
"re.sub",
"cv2.resize",
"pageinfo.guess_pagenum",
"json.dump",
"cv2.bitwise_not",
"cv2.countNonZero",
"cv2.waitKey",
"cv2.calcHist",
"os.rename",
"re.match",
"datetime.datetime",
"datetime.datetime.strptime",
"multiprocessing.Pool",
"signal.signal",
"sys.exit",
"json.load",
"os.makedirs",
"logging.basicConfig",
"pageinfo.guess_pages",
"numpy.fromfile",
"cv2.threshold",
"math.floor",
"pytesseract.image_to_string",
"PIL.Image.open",
"pageinfo.guess_pageinfo",
"numpy.where",
"numpy.array",
"pageinfo.detect_qp_region",
"operator.itemgetter",
"itertools.chain.from_iterable",
"cv2.findContours"
] |
[((605, 632), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (622, 632), False, 'import logging\n'), ((1733, 1760), 'cv2.img_hash.PHash_create', 'cv2.img_hash.PHash_create', ([], {}), '()\n', (1758, 1760), False, 'import cv2\n'), ((1215, 1234), 'pathlib.Path', 'Path', (['"""item/equip/"""'], {}), "('item/equip/')\n", (1219, 1234), False, 'from pathlib import Path\n'), ((1254, 1270), 'pathlib.Path', 'Path', (['"""item/ce/"""'], {}), "('item/ce/')\n", (1258, 1270), False, 'from pathlib import Path\n'), ((1293, 1312), 'pathlib.Path', 'Path', (['"""item/point/"""'], {}), "('item/point/')\n", (1297, 1312), False, 'from pathlib import Path\n'), ((1336, 1352), 'pathlib.Path', 'Path', (['"""item.xml"""'], {}), "('item.xml')\n", (1340, 1352), False, 'from pathlib import Path\n'), ((1399, 1416), 'pathlib.Path', 'Path', (['"""chest.xml"""'], {}), "('chest.xml')\n", (1403, 1416), False, 'from pathlib import Path\n'), ((1464, 1480), 'pathlib.Path', 'Path', (['"""dcnt.xml"""'], {}), "('dcnt.xml')\n", (1468, 1480), False, 'from pathlib import Path\n'), ((1528, 1544), 'pathlib.Path', 'Path', (['"""card.xml"""'], {}), "('card.xml')\n", (1532, 1544), False, 'from pathlib import Path\n'), ((1580, 1612), 'pathlib.Path', 'Path', (['"""fgoscdata/hash_drop.json"""'], {}), "('fgoscdata/hash_drop.json')\n", (1584, 1612), False, 'from pathlib import Path\n'), ((1640, 1668), 'pathlib.Path', 'Path', (['"""fgoscdata/data/json/"""'], {}), "('fgoscdata/data/json/')\n", (1644, 1668), False, 'from pathlib import Path\n'), ((1691, 1722), 'pathlib.Path', 'Path', (['"""data/misc/items_img.png"""'], {}), "('data/misc/items_img.png')\n", (1695, 1722), False, 'from pathlib import Path\n'), ((2698, 2710), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2707, 2710), False, 'import json\n'), ((5203, 5215), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5212, 5215), False, 'import json\n'), ((7779, 7816), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (7791, 7816), False, 'import cv2\n'), ((7939, 8001), 'cv2.threshold', 'cv2.threshold', (['img_gray', 'threshold', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(img_gray, threshold, 255, cv2.THRESH_BINARY_INV)\n', (7952, 8001), False, 'import cv2\n'), ((8124, 8183), 'cv2.findContours', 'cv2.findContours', (['inv', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_NONE'], {}), '(inv, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n', (8140, 8183), False, 'import cv2\n'), ((8601, 8630), 'cv2.boundingRect', 'cv2.boundingRect', (['max_contour'], {}), '(max_contour)\n', (8617, 8630), False, 'import cv2\n'), ((9878, 9915), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (9890, 9915), False, 'import cv2\n'), ((10071, 10130), 'cv2.matchTemplate', 'cv2.matchTemplate', (['img_gray', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(img_gray, template, cv2.TM_CCOEFF_NORMED)\n', (10088, 10130), False, 'import cv2\n'), ((10275, 10301), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (10283, 10301), True, 'import numpy as np\n'), ((86662, 86677), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (86669, 86677), True, 'import numpy as np\n'), ((86713, 86753), 'cv2.cvtColor', 'cv2.cvtColor', (['src_img', 'cv2.COLOR_BGR2HSV'], {}), '(src_img, cv2.COLOR_BGR2HSV)\n', (86725, 86753), False, 'import cv2\n'), ((86766, 86813), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([img], [0], None, [256], [0, 256])\n', (86778, 86813), False, 'import cv2\n'), ((86826, 86873), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[1]', 'None', '[256]', '[0, 256]'], {}), '([img], [1], None, [256], [0, 256])\n', (86838, 86873), False, 'import cv2\n'), ((86886, 86933), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[2]', 'None', '[256]', '[0, 256]'], {}), '([img], [2], None, [256], [0, 256])\n', (86898, 86933), False, 'import cv2\n'), ((87363, 87398), 'numpy.array', 'np.array', (['[hashlist]'], {'dtype': '"""uint8"""'}), "([hashlist], dtype='uint8')\n", (87371, 87398), True, 'import numpy as np\n'), ((89351, 89404), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2015)', 'month': '(7)', 'day': '(30)', 'hour': '(0)'}), '(year=2015, month=7, day=30, hour=0)\n', (89368, 89404), False, 'import datetime\n'), ((95037, 95090), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2015)', 'month': '(7)', 'day': '(30)', 'hour': '(0)'}), '(year=2015, month=7, day=30, hour=0)\n', (95054, 95090), False, 'import datetime\n'), ((100377, 100430), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2015)', 'month': '(7)', 'day': '(30)', 'hour': '(0)'}), '(year=2015, month=7, day=30, hour=0)\n', (100394, 100430), False, 'import datetime\n'), ((102569, 102616), 'signal.signal', 'signal.signal', (['signal.SIGINT', '__signal_handling'], {}), '(signal.SIGINT, __signal_handling)\n', (102582, 102616), False, 'import signal\n'), ((102914, 103028), 'multiprocessing.Pool', 'multiprocessing.Pool', (['args.num_processes'], {'initializer': '__parse_into_json_process', 'initargs': '(input_queue, args)'}), '(args.num_processes, initializer=\n __parse_into_json_process, initargs=(input_queue, args))\n', (102934, 103028), False, 'import multiprocessing\n'), ((115145, 115218), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Image Parse for FGO Battle Results"""'}), "(description='Image Parse for FGO Battle Results')\n", (115168, 115218), False, 'import argparse\n'), ((117512, 117567), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'lformat'}), '(level=logging.INFO, format=lformat)\n', (117531, 117567), False, 'import logging\n'), ((5544, 5566), 'pathlib.Path', 'Path', (['"""background.npz"""'], {}), "('background.npz')\n", (5548, 5566), False, 'from pathlib import Path\n'), ((7841, 7870), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img_gray'], {}), "('image', img_gray)\n", (7851, 7870), False, 'import cv2\n'), ((7879, 7893), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7890, 7893), False, 'import cv2\n'), ((7902, 7925), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7923, 7925), False, 'import cv2\n'), ((8026, 8050), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'inv'], {}), "('image', inv)\n", (8036, 8050), False, 'import cv2\n'), ((8059, 8073), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8070, 8073), False, 'import cv2\n'), ((8082, 8105), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8103, 8105), False, 'import cv2\n'), ((8249, 8270), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (8265, 8270), False, 'import cv2\n'), ((8286, 8306), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (8301, 8306), False, 'import cv2\n'), ((9146, 9244), 'cv2.resize', 'cv2.resize', (['frame_img', '(0, 0)'], {'fx': 'resize_scale', 'fy': 'resize_scale', 'interpolation': 'cv2.INTER_CUBIC'}), '(frame_img, (0, 0), fx=resize_scale, fy=resize_scale,\n interpolation=cv2.INTER_CUBIC)\n', (9156, 9244), False, 'import cv2\n'), ((9530, 9560), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'frame_img'], {}), "('image', frame_img)\n", (9540, 9560), False, 'import cv2\n'), ((9569, 9583), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (9580, 9583), False, 'import cv2\n'), ((9592, 9615), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9613, 9615), False, 'import cv2\n'), ((9940, 9969), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img_gray'], {}), "('image', img_gray)\n", (9950, 9969), False, 'import cv2\n'), ((9978, 9992), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (9989, 9992), False, 'import cv2\n'), ((10001, 10024), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10022, 10024), False, 'import cv2\n'), ((11381, 11399), 'cv2.split', 'cv2.split', (['img_rgb'], {}), '(img_rgb)\n', (11390, 11399), False, 'import cv2\n'), ((11556, 11597), 'cv2.cvtColor', 'cv2.cvtColor', (['img_rgb', 'cv2.COLOR_BGR2GRAY'], {}), '(img_rgb, cv2.COLOR_BGR2GRAY)\n', (11568, 11597), False, 'import cv2\n'), ((11626, 11666), 'cv2.cvtColor', 'cv2.cvtColor', (['img_rgb', 'cv2.COLOR_BGR2HSV'], {}), '(img_rgb, cv2.COLOR_BGR2HSV)\n', (11638, 11666), False, 'import cv2\n'), ((11697, 11765), 'cv2.threshold', 'cv2.threshold', (['self.img_gray_orig', 'threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(self.img_gray_orig, threshold, 255, cv2.THRESH_BINARY)\n', (11710, 11765), False, 'import cv2\n'), ((12727, 12773), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img_rgb', 'cv2.COLOR_BGR2GRAY'], {}), '(self.img_rgb, cv2.COLOR_BGR2GRAY)\n', (12739, 12773), False, 'import cv2\n'), ((12799, 12862), 'cv2.threshold', 'cv2.threshold', (['self.img_gray', 'threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(self.img_gray, threshold, 255, cv2.THRESH_BINARY)\n', (12812, 12862), False, 'import cv2\n'), ((19582, 19636), 'cv2.threshold', 'cv2.threshold', (['gray_image', '(200)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray_image, 200, 255, cv2.THRESH_BINARY)\n', (19595, 19636), False, 'import cv2\n'), ((21913, 21939), 'cv2.countNonZero', 'cv2.countNonZero', (['bw_image'], {}), '(bw_image)\n', (21929, 21939), False, 'import cv2\n'), ((22713, 22752), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (22725, 22752), False, 'import cv2\n'), ((22775, 22826), 'cv2.threshold', 'cv2.threshold', (['gray', '(65)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(gray, 65, 255, cv2.THRESH_BINARY_INV)\n', (22788, 22826), False, 'import cv2\n'), ((22978, 23093), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['qp_image'], {'config': '"""-l eng --oem 1 --psm 7 -c tessedit_char_whitelist=+,0123456789"""'}), "(qp_image, config=\n '-l eng --oem 1 --psm 7 -c tessedit_char_whitelist=+,0123456789')\n", (23005, 23093), False, 'import pytesseract\n'), ((23309, 23359), 'pageinfo.detect_qp_region', 'pageinfo.detect_qp_region', (['self.img_rgb_orig', 'mode'], {}), '(self.img_rgb_orig, mode)\n', (23334, 23359), False, 'import pageinfo\n'), ((24592, 24642), 'pageinfo.detect_qp_region', 'pageinfo.detect_qp_region', (['self.img_rgb_orig', 'mode'], {}), '(self.img_rgb_orig, mode)\n', (24617, 24642), False, 'import pageinfo\n'), ((28149, 28171), 'cv2.bitwise_not', 'cv2.bitwise_not', (['im_th'], {}), '(im_th)\n', (28164, 28171), False, 'import cv2\n'), ((30484, 30532), 'cv2.cvtColor', 'cv2.cvtColor', (['drop_count_img', 'cv2.COLOR_BGR2GRAY'], {}), '(drop_count_img, cv2.COLOR_BGR2GRAY)\n', (30496, 30532), False, 'import cv2\n'), ((30554, 30612), 'cv2.threshold', 'cv2.threshold', (['img_gray', 'threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(img_gray, threshold, 255, cv2.THRESH_BINARY)\n', (30567, 30612), False, 'import cv2\n'), ((30664, 30688), 'cv2.bitwise_not', 'cv2.bitwise_not', (['img_num'], {}), '(img_num)\n', (30679, 30688), False, 'import cv2\n'), ((31194, 31219), 'cv2.resize', 'cv2.resize', (['img', 'win_size'], {}), '(img, win_size)\n', (31204, 31219), False, 'import cv2\n'), ((31236, 31306), 'cv2.HOGDescriptor', 'cv2.HOGDescriptor', (['win_size', 'block_size', 'block_stride', 'cell_size', 'bins'], {}), '(win_size, block_size, block_stride, cell_size, bins)\n', (31253, 31306), False, 'import cv2\n'), ((31405, 31419), 'numpy.array', 'np.array', (['char'], {}), '(char)\n', (31413, 31419), True, 'import numpy as np\n'), ((31933, 31990), 'cv2.threshold', 'cv2.threshold', (['newimg', 'threshold2', '(255)', 'cv2.THRESH_BINARY'], {}), '(newimg, threshold2, 255, cv2.THRESH_BINARY)\n', (31946, 31990), False, 'import cv2\n'), ((32810, 32835), 'numpy.ones', 'np.ones', (['(4, 4)', 'np.uint8'], {}), '((4, 4), np.uint8)\n', (32817, 32835), True, 'import numpy as np\n'), ((32850, 32898), 'cv2.cvtColor', 'cv2.cvtColor', (['drop_count_img', 'cv2.COLOR_BGR2GRAY'], {}), '(drop_count_img, cv2.COLOR_BGR2GRAY)\n', (32862, 32898), False, 'import cv2\n'), ((32919, 32972), 'cv2.threshold', 'cv2.threshold', (['img', 'threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(img, threshold, 255, cv2.THRESH_BINARY)\n', (32932, 32972), False, 'import cv2\n'), ((32990, 33030), 'cv2.dilate', 'cv2.dilate', (['img_th', 'kernel'], {'iterations': '(1)'}), '(img_th, kernel, iterations=1)\n', (33000, 33030), False, 'import cv2\n'), ((40146, 40186), 'cv2.cvtColor', 'cv2.cvtColor', (['img_rgb', 'cv2.COLOR_BGR2HSV'], {}), '(img_rgb, cv2.COLOR_BGR2HSV)\n', (40158, 40186), False, 'import cv2\n'), ((40207, 40264), 'cv2.threshold', 'cv2.threshold', (['self.img_gray', '(174)', '(255)', 'cv2.THRESH_BINARY'], {}), '(self.img_gray, 174, 255, cv2.THRESH_BINARY)\n', (40220, 40264), False, 'import cv2\n'), ((40287, 40310), 'cv2.bitwise_not', 'cv2.bitwise_not', (['img_th'], {}), '(img_th)\n', (40302, 40310), False, 'import cv2\n'), ((45370, 45400), 're.match', 're.match', (['pattern_normal', 'line'], {}), '(pattern_normal, line)\n', (45378, 45400), False, 'import re\n'), ((45812, 45841), 're.match', 're.match', (['pattern_small', 'line'], {}), '(pattern_small, line)\n', (45820, 45841), False, 'import re\n'), ((46248, 46276), 're.match', 're.match', (['pattern_tiny', 'line'], {}), '(pattern_tiny, line)\n', (46256, 46276), False, 'import re\n'), ((47384, 47414), 're.match', 're.match', (['pattern_normal', 'line'], {}), '(pattern_normal, line)\n', (47392, 47414), False, 'import re\n'), ((47754, 47783), 're.match', 're.match', (['pattern_small', 'line'], {}), '(pattern_small, line)\n', (47762, 47783), False, 'import re\n'), ((48119, 48147), 're.match', 're.match', (['pattern_tiny', 'line'], {}), '(pattern_tiny, line)\n', (48127, 48147), False, 'import re\n'), ((49061, 49085), 'numpy.array', 'np.array', (['[25, 175, 119]'], {}), '([25, 175, 119])\n', (49069, 49085), True, 'import numpy as np\n'), ((49109, 49133), 'numpy.array', 'np.array', (['[37, 255, 255]'], {}), '([37, 255, 255])\n', (49117, 49133), True, 'import numpy as np\n'), ((49164, 49218), 'cv2.inRange', 'cv2.inRange', (['img_hsv_lower', 'lower_yellow', 'upper_yellow'], {}), '(img_hsv_lower, lower_yellow, upper_yellow)\n', (49175, 49218), False, 'import cv2\n'), ((66867, 66895), 'cv2.resize', 'cv2.resize', (['tmpimg', 'win_size'], {}), '(tmpimg, win_size)\n', (66877, 66895), False, 'import cv2\n'), ((66912, 66982), 'cv2.HOGDescriptor', 'cv2.HOGDescriptor', (['win_size', 'block_size', 'block_stride', 'cell_size', 'bins'], {}), '(win_size, block_size, block_stride, cell_size, bins)\n', (66929, 66982), False, 'import cv2\n'), ((67071, 67085), 'numpy.array', 'np.array', (['char'], {}), '(char)\n', (67079, 67085), True, 'import numpy as np\n'), ((80282, 80310), 'cv2.resize', 'cv2.resize', (['tmpimg', 'win_size'], {}), '(tmpimg, win_size)\n', (80292, 80310), False, 'import cv2\n'), ((80327, 80397), 'cv2.HOGDescriptor', 'cv2.HOGDescriptor', (['win_size', 'block_size', 'block_stride', 'cell_size', 'bins'], {}), '(win_size, block_size, block_stride, cell_size, bins)\n', (80344, 80397), False, 'import cv2\n'), ((80496, 80510), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (80504, 80510), True, 'import numpy as np\n'), ((86559, 86621), 'cv2.compareHist', 'cv2.compareHist', (['channel1', 'channel2', 'cv2.HISTCMP_BHATTACHARYYA'], {}), '(channel1, channel2, cv2.HISTCMP_BHATTACHARYYA)\n', (86574, 86621), False, 'import cv2\n'), ((87788, 87816), 'numpy.fromfile', 'np.fromfile', (['filename', 'dtype'], {}), '(filename, dtype)\n', (87799, 87816), True, 'import numpy as np\n'), ((87831, 87853), 'cv2.imdecode', 'cv2.imdecode', (['n', 'flags'], {}), '(n, flags)\n', (87843, 87853), False, 'import cv2\n'), ((88495, 88506), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (88503, 88506), False, 'import sys\n'), ((88663, 88674), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (88671, 88674), False, 'import sys\n'), ((88828, 88839), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (88836, 88839), False, 'import sys\n'), ((88993, 89004), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (89001, 89004), False, 'import sys\n'), ((89623, 89637), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (89627, 89637), False, 'from pathlib import Path\n'), ((96390, 96411), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (96400, 96411), False, 'from PIL import Image\n'), ((98953, 98972), 'pathlib.Path', 'Path', (['src_file_path'], {}), '(src_file_path)\n', (98957, 98972), False, 'from pathlib import Path\n'), ((99187, 99226), 'os.rename', 'os.rename', (['src_file_path', 'dst_file_path'], {}), '(src_file_path, dst_file_path)\n', (99196, 99226), False, 'import os\n'), ((99464, 99475), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (99472, 99475), False, 'import sys\n'), ((99632, 99643), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (99640, 99643), False, 'import sys\n'), ((99797, 99808), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (99805, 99808), False, 'import sys\n'), ((99962, 99973), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (99970, 99973), False, 'import sys\n'), ((101910, 101921), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (101918, 101921), False, 'import sys\n'), ((111279, 111319), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['item_list'], {}), '(item_list)\n', (111308, 111319), False, 'import itertools\n'), ((114353, 114369), 'collections.Counter', 'Counter', (['allitem'], {}), '(allitem)\n', (114360, 114369), False, 'from collections import Counter\n'), ((1162, 1176), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1166, 1176), False, 'from pathlib import Path\n'), ((5389, 5401), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5398, 5401), False, 'import json\n'), ((9350, 9447), 'cv2.resize', 'cv2.resize', (['frame_img', '(0, 0)'], {'fx': 'resize_scale', 'fy': 'resize_scale', 'interpolation': 'cv2.INTER_AREA'}), '(frame_img, (0, 0), fx=resize_scale, fy=resize_scale,\n interpolation=cv2.INTER_AREA)\n', (9360, 9447), False, 'import cv2\n'), ((11158, 11190), 'pageinfo.guess_pageinfo', 'pageinfo.guess_pageinfo', (['img_rgb'], {}), '(img_rgb)\n', (11181, 11190), False, 'import pageinfo\n'), ((12464, 12504), 'cv2.imwrite', 'cv2.imwrite', (['"""frame_img.png"""', 'img_resize'], {}), "('frame_img.png', img_resize)\n", (12475, 12504), False, 'import cv2\n'), ((12664, 12701), 'cv2.imwrite', 'cv2.imwrite', (['"""dcnt_new.png"""', 'dcnt_new'], {}), "('dcnt_new.png', dcnt_new)\n", (12675, 12701), False, 'import cv2\n'), ((16937, 16990), 'cv2.calcHist', 'cv2.calcHist', (['[img_hsv_x]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([img_hsv_x], [0], None, [256], [0, 256])\n', (16949, 16990), False, 'import cv2\n'), ((17065, 17084), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['hist'], {}), '(hist)\n', (17078, 17084), False, 'import cv2\n'), ((17478, 17571), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)'], {'fx': 'resize_scale', 'fy': 'resize_scale', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, (0, 0), fx=resize_scale, fy=resize_scale, interpolation=cv2\n .INTER_CUBIC)\n', (17488, 17571), False, 'import cv2\n'), ((18787, 18816), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'dcnt_new'], {}), "('image', dcnt_new)\n", (18797, 18816), False, 'import cv2\n'), ((18829, 18843), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (18840, 18843), False, 'import cv2\n'), ((18856, 18879), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (18877, 18879), False, 'import cv2\n'), ((19237, 19298), 'cv2.rectangle', 'cv2.rectangle', (['img_copy', 'topleft', 'bottomright', '(0, 0, 255)', '(3)'], {}), '(img_copy, topleft, bottomright, (0, 0, 255), 3)\n', (19250, 19298), False, 'import cv2\n'), ((19311, 19362), 'cv2.imwrite', 'cv2.imwrite', (['"""./scroll_bar_selected2.jpg"""', 'img_copy'], {}), "('./scroll_bar_selected2.jpg', img_copy)\n", (19322, 19362), False, 'import cv2\n'), ((19696, 19741), 'cv2.imwrite', 'cv2.imwrite', (['"""scroll_bar_binary2.png"""', 'binary'], {}), "('scroll_bar_binary2.png', binary)\n", (19707, 19741), False, 'import cv2\n'), ((19761, 19823), 'cv2.findContours', 'cv2.findContours', (['binary', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_NONE'], {}), '(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n', (19777, 19823), False, 'import cv2\n'), ((20037, 20058), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (20053, 20058), False, 'import cv2\n'), ((21433, 21525), 'pageinfo.guess_pagenum', 'pageinfo.guess_pagenum', (['self.asr_y', 'esr_y', 'self.actual_height', 'entire_height', 'cap_height'], {}), '(self.asr_y, esr_y, self.actual_height, entire_height,\n cap_height)\n', (21455, 21525), False, 'import pageinfo\n'), ((21542, 21609), 'pageinfo.guess_pages', 'pageinfo.guess_pages', (['self.actual_height', 'entire_height', 'cap_height'], {}), '(self.actual_height, entire_height, cap_height)\n', (21562, 21609), False, 'import pageinfo\n'), ((21630, 21697), 'pageinfo.guess_lines', 'pageinfo.guess_lines', (['self.actual_height', 'entire_height', 'cap_height'], {}), '(self.actual_height, entire_height, cap_height)\n', (21650, 21697), False, 'import pageinfo\n'), ((22481, 22507), 're.findall', 're.findall', (['"""[0-9]+"""', 'text'], {}), "('[0-9]+', text)\n", (22491, 22507), False, 'import re\n'), ((23554, 23625), 'cv2.bitwise_not', 'cv2.bitwise_not', (['self.img_th_orig[pt[0][1]:pt[1][1], pt[0][0]:pt[1][0]]'], {}), '(self.img_th_orig[pt[0][1]:pt[1][1], pt[0][0]:pt[1][0]])\n', (23569, 23625), False, 'import cv2\n'), ((25695, 25756), 'cv2.rectangle', 'cv2.rectangle', (['img_copy', 'bounds[0]', 'bounds[1]', '(0, 0, 255)', '(3)'], {}), '(img_copy, bounds[0], bounds[1], (0, 0, 255), 3)\n', (25708, 25756), False, 'import cv2\n'), ((25769, 25817), 'cv2.imwrite', 'cv2.imwrite', (['"""./qp_gain_detection.jpg"""', 'img_copy'], {}), "('./qp_gain_detection.jpg', img_copy)\n", (25780, 25817), False, 'import cv2\n'), ((25895, 25987), 'cv2.bitwise_not', 'cv2.bitwise_not', (['self.img_th_orig[topleft[1]:bottomright[1], topleft[0]:bottomright[0]]'], {}), '(self.img_th_orig[topleft[1]:bottomright[1], topleft[0]:\n bottomright[0]])\n', (25910, 25987), False, 'import cv2\n'), ((26921, 26973), 'cv2.calcHist', 'cv2.calcHist', (['[img_th_x]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([img_th_x], [0], None, [256], [0, 256])\n', (26933, 26973), False, 'import cv2\n'), ((27043, 27062), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['hist'], {}), '(hist)\n', (27056, 27062), False, 'import cv2\n'), ((27280, 27332), 'cv2.calcHist', 'cv2.calcHist', (['[img_th_x]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([img_th_x], [0], None, [256], [0, 256])\n', (27292, 27332), False, 'import cv2\n'), ((27402, 27421), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['hist'], {}), '(hist)\n', (27415, 27421), False, 'import cv2\n'), ((28191, 28258), 'cv2.findContours', 'cv2.findContours', (['im_th', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (28207, 28258), False, 'import cv2\n'), ((28403, 28424), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (28419, 28424), False, 'import cv2\n'), ((28444, 28464), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (28459, 28464), False, 'import cv2\n'), ((29982, 30010), 'cv2.resize', 'cv2.resize', (['tmpimg', 'win_size'], {}), '(tmpimg, win_size)\n', (29992, 30010), False, 'import cv2\n'), ((30031, 30101), 'cv2.HOGDescriptor', 'cv2.HOGDescriptor', (['win_size', 'block_size', 'block_stride', 'cell_size', 'bins'], {}), '(win_size, block_size, block_stride, cell_size, bins)\n', (30048, 30101), False, 'import cv2\n'), ((30212, 30226), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (30220, 30226), True, 'import numpy as np\n'), ((33406, 33474), 'cv2.findContours', 'cv2.findContours', (['img_th', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (33422, 33474), False, 'import cv2\n'), ((33619, 33640), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (33635, 33640), False, 'import cv2\n'), ((36177, 36245), 'cv2.findContours', 'cv2.findContours', (['img_1strow', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_1strow, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (36193, 36245), False, 'import cv2\n'), ((36360, 36380), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (36375, 36380), False, 'import cv2\n'), ((49280, 49356), 'cv2.findContours', 'cv2.findContours', (['img_hsv_lower_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_hsv_lower_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (49296, 49356), False, 'import cv2\n'), ((49501, 49522), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (49517, 49522), False, 'import cv2\n'), ((49542, 49562), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (49557, 49562), False, 'import cv2\n'), ((60736, 60766), 're.match', 're.match', (['pattern_normal', 'line'], {}), '(pattern_normal, line)\n', (60744, 60766), False, 'import re\n'), ((61238, 61267), 're.match', 're.match', (['pattern_small', 'line'], {}), '(pattern_small, line)\n', (61246, 61267), False, 'import re\n'), ((61733, 61761), 're.match', 're.match', (['pattern_tiny', 'line'], {}), '(pattern_tiny, line)\n', (61741, 61761), False, 'import re\n'), ((64779, 64807), 'cv2.resize', 'cv2.resize', (['tmpimg', 'win_size'], {}), '(tmpimg, win_size)\n', (64789, 64807), False, 'import cv2\n'), ((64828, 64898), 'cv2.HOGDescriptor', 'cv2.HOGDescriptor', (['win_size', 'block_size', 'block_stride', 'cell_size', 'bins'], {}), '(win_size, block_size, block_stride, cell_size, bins)\n', (64845, 64898), False, 'import cv2\n'), ((64999, 65013), 'numpy.array', 'np.array', (['char'], {}), '(char)\n', (65007, 65013), True, 'import numpy as np\n'), ((88058, 88074), 'PIL.ExifTags.TAGS.get', 'TAGS.get', (['id', 'id'], {}), '(id, id)\n', (88066, 88074), False, 'from PIL.ExifTags import TAGS\n'), ((95200, 95215), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (95204, 95215), False, 'from pathlib import Path\n'), ((96531, 96557), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (96549, 96557), False, 'import datetime\n'), ((98870, 98893), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (98884, 98893), False, 'import os\n'), ((98907, 98927), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (98918, 98927), False, 'import os\n'), ((112905, 112931), 'operator.itemgetter', 'itemgetter', (['"""dropPriority"""'], {}), "('dropPriority')\n", (112915, 112931), False, 'from operator import itemgetter\n'), ((114755, 114781), 'os.path.exists', 'os.path.exists', (['out_folder'], {}), '(out_folder)\n', (114769, 114781), False, 'import os\n'), ((114795, 114818), 'os.makedirs', 'os.makedirs', (['out_folder'], {}), '(out_folder)\n', (114806, 114818), False, 'import os\n'), ((117678, 117699), 'pathlib.Path', 'Path', (['args.out_folder'], {}), '(args.out_folder)\n', (117682, 117699), False, 'from pathlib import Path\n'), ((8555, 8573), 'cv2.contourArea', 'cv2.contourArea', (['x'], {}), '(x)\n', (8570, 8573), False, 'import cv2\n'), ((12614, 12651), 'cv2.imwrite', 'cv2.imwrite', (['"""dcnt_old.png"""', 'dcnt_old'], {}), "('dcnt_old.png', dcnt_old)\n", (12625, 12651), False, 'import cv2\n'), ((17674, 17766), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)'], {'fx': 'resize_scale', 'fy': 'resize_scale', 'interpolation': 'cv2.INTER_AREA'}), '(img, (0, 0), fx=resize_scale, fy=resize_scale, interpolation=cv2\n .INTER_AREA)\n', (17684, 17766), False, 'import cv2\n'), ((18186, 18215), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'dcnt_old'], {}), "('image', dcnt_old)\n", (18196, 18215), False, 'import cv2\n'), ((18232, 18246), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (18243, 18246), False, 'import cv2\n'), ((18263, 18286), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (18284, 18286), False, 'import cv2\n'), ((36578, 36614), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', 'epsilon', '(True)'], {}), '(cnt, epsilon, True)\n', (36594, 36614), False, 'import cv2\n'), ((68127, 68186), 'cv2.matchTemplate', 'cv2.matchTemplate', (['img_gray', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(img_gray, template, cv2.TM_CCOEFF_NORMED)\n', (68144, 68186), False, 'import cv2\n'), ((68282, 68308), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (68290, 68308), True, 'import numpy as np\n'), ((72562, 72599), 're.sub', 're.sub', (['"""\\\\(|\\\\)|\\\\+"""', '""""""', 'self.bonus'], {}), "('\\\\(|\\\\)|\\\\+', '', self.bonus)\n", (72568, 72599), False, 'import re\n'), ((88139, 88191), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['val', '"""%Y:%m:%d %H:%M:%S"""'], {}), "(val, '%Y:%m:%d %H:%M:%S')\n", (88165, 88191), False, 'import datetime\n'), ((103105, 103122), 'pathlib.Path', 'Path', (['args.folder'], {}), '(args.folder)\n', (103109, 103122), False, 'from pathlib import Path\n'), ((104304, 104333), 'math.floor', 'math.floor', (["(quest['id'] / 100)"], {}), "(quest['id'] / 100)\n", (104314, 104333), False, 'import math\n'), ((112846, 112862), 'operator.itemgetter', 'itemgetter', (['"""id"""'], {}), "('id')\n", (112856, 112862), False, 'from operator import itemgetter\n'), ((114319, 114331), 'collections.Counter', 'Counter', (['tmp'], {}), '(tmp)\n', (114326, 114331), False, 'from collections import Counter\n'), ((114882, 114913), 'pathlib.Path', 'Path', (["parsed_file['image_path']"], {}), "(parsed_file['image_path'])\n", (114886, 114913), False, 'from pathlib import Path\n'), ((115032, 115087), 'json.dump', 'json.dump', (['parsed_file', 'f'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(parsed_file, f, indent=4, ensure_ascii=False)\n', (115041, 115087), False, 'import json\n'), ((36528, 36552), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (36541, 36552), False, 'import cv2\n'), ((36694, 36715), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (36710, 36715), False, 'import cv2\n'), ((63126, 63156), 're.match', 're.match', (['pattern_normal', 'line'], {}), '(pattern_normal, line)\n', (63134, 63156), False, 'import re\n'), ((63667, 63696), 're.match', 're.match', (['pattern_small', 'line'], {}), '(pattern_small, line)\n', (63675, 63696), False, 'import re\n'), ((64200, 64228), 're.match', 're.match', (['pattern_tiny', 'line'], {}), '(pattern_tiny, line)\n', (64208, 64228), False, 'import re\n'), ((104428, 104457), 'math.floor', 'math.floor', (["(quest['id'] / 100)"], {}), "(quest['id'] / 100)\n", (104438, 104457), False, 'import math\n'), ((112789, 112810), 'operator.itemgetter', 'itemgetter', (['"""dropnum"""'], {}), "('dropnum')\n", (112799, 112810), False, 'from operator import itemgetter\n'), ((114655, 114700), 'json.dumps', 'json.dumps', (['parsed_output'], {'ensure_ascii': '(False)'}), '(parsed_output, ensure_ascii=False)\n', (114665, 114700), False, 'import json\n'), ((90087, 90101), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (90091, 90101), False, 'from pathlib import Path\n'), ((90719, 90739), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (90729, 90739), False, 'from PIL import Image\n'), ((104551, 104583), 'math.floor', 'math.floor', (["(quest['id'] / 100000)"], {}), "(quest['id'] / 100000)\n", (104561, 104583), False, 'import math\n'), ((108643, 108672), 'math.floor', 'math.floor', (["(item['id'] / 1000)"], {}), "(item['id'] / 1000)\n", (108653, 108672), False, 'import math\n'), ((109639, 109668), 'math.floor', 'math.floor', (["(item['id'] / 1000)"], {}), "(item['id'] / 1000)\n", (109649, 109668), False, 'import math\n'), ((118089, 118106), 'pathlib.Path', 'Path', (['args.folder'], {}), '(args.folder)\n', (118093, 118106), False, 'from pathlib import Path\n'), ((90861, 90887), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (90879, 90887), False, 'import datetime\n'), ((118584, 118601), 'pathlib.Path', 'Path', (['args.folder'], {}), '(args.folder)\n', (118588, 118601), False, 'from pathlib import Path\n'), ((103713, 103720), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (103717, 103720), False, 'from pathlib import Path\n')]
|
import pytest
from easel.site import Site
from easel.site.defaults import Key
from easel.site.errors import Error, SiteConfigError
from easel.site.globals import Globals
from tests.test_configs import TestSites
# -----------------------------------------------------------------------------
# Site
# -----------------------------------------------------------------------------
def test__valid() -> None:
Globals.init(root=TestSites.valid)
site = Site()
site.build()
repr(site)
assert site.index is not None
assert site.get_page("page-layout") is not None
assert site.get_page("page-layout-gallery") is not None
assert site.get_page("page-lazy") is not None
assert site.get_page("page-lazy-gallery") is not None
assert site.get_page("page-missing") is None
assert site.config.title == "Testing Title"
assert site.config.author == "Testing Author"
assert site.config.copyright == "Testing Copyright"
assert site.config.description == "Testing Description"
assert site.config.favicon == "./testing-favicon.ico"
assert site.config.header == {
Key.TITLE: {
Key.LABEL: "Testing Header Title Label",
Key.IMAGE: "./testing-header-title-image.jpg",
},
}
assert Globals.site_paths.static_url_path == "/site"
def test__not_built() -> None:
Globals.init(root=TestSites.valid)
site = Site()
with pytest.raises(Error):
site.pages
with pytest.raises(Error):
site.menu
with pytest.raises(Error):
site.index
def test__rebuild_cache() -> None:
Globals.init(root=TestSites.valid)
site = Site()
site.build()
site.rebuild_cache()
def test__config_menu_empty() -> None:
Globals.init(root=TestSites.config_menu_empty)
site = Site()
site.build()
assert site.menu == []
def test__config_menu_missing_page() -> None:
Globals.init(root=TestSites.config_menu_missing_page)
site = Site()
with pytest.raises(SiteConfigError):
site.build()
def test__index_missing() -> None:
Globals.init(root=TestSites.index_missing)
site = Site()
with pytest.raises(SiteConfigError):
site.build()
def test__index_overload() -> None:
Globals.init(root=TestSites.index_overload)
site = Site()
with pytest.raises(SiteConfigError):
site.build()
# -----------------------------------------------------------------------------
# SitePaths
# -----------------------------------------------------------------------------
def test__SitePaths__root_not_set() -> None:
with pytest.raises(SiteConfigError):
Globals.site_paths.root
def test__SitePaths__root_missing() -> None:
with pytest.raises(SiteConfigError):
Globals.init(root="/path/to/missing-site")
def test__SitePaths__assets() -> None:
Globals.init(root=TestSites.valid)
Globals.site_paths.assets
def test__SitePaths__missing_site_yaml() -> None:
with pytest.raises(SiteConfigError):
Globals.init(root=TestSites.missing_site_yaml)
def test__SitePaths__contents_directory_missing() -> None:
with pytest.raises(SiteConfigError):
Globals.init(root=TestSites.contents_directory_missing)
def test__SitePaths__pages_directory_missing() -> None:
with pytest.raises(SiteConfigError):
Globals.init(root=TestSites.pages_directory_missing)
def test__SitePaths__pages_directory_empty() -> None:
with pytest.raises(SiteConfigError):
Globals.init(root=TestSites.pages_directory_empty)
# -----------------------------------------------------------------------------
# SiteConfig
# -----------------------------------------------------------------------------
def test__SiteConfig__config_menu_type_invalid() -> None:
with pytest.raises(SiteConfigError):
Globals.init(root=TestSites.config_menu_type_invalid)
def test__SiteConfig__config_header_type_invalid() -> None:
with pytest.raises(SiteConfigError):
Globals.init(root=TestSites.config_header_type_invalid)
def test__SiteConfig__config_theme_type_invalid() -> None:
with pytest.raises(SiteConfigError):
Globals.init(root=TestSites.config_theme_type_invalid)
|
[
"pytest.raises",
"easel.site.globals.Globals.init",
"easel.site.Site"
] |
[((415, 449), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.valid'}), '(root=TestSites.valid)\n', (427, 449), False, 'from easel.site.globals import Globals\n'), ((462, 468), 'easel.site.Site', 'Site', ([], {}), '()\n', (466, 468), False, 'from easel.site import Site\n'), ((1363, 1397), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.valid'}), '(root=TestSites.valid)\n', (1375, 1397), False, 'from easel.site.globals import Globals\n'), ((1410, 1416), 'easel.site.Site', 'Site', ([], {}), '()\n', (1414, 1416), False, 'from easel.site import Site\n'), ((1611, 1645), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.valid'}), '(root=TestSites.valid)\n', (1623, 1645), False, 'from easel.site.globals import Globals\n'), ((1658, 1664), 'easel.site.Site', 'Site', ([], {}), '()\n', (1662, 1664), False, 'from easel.site import Site\n'), ((1753, 1799), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.config_menu_empty'}), '(root=TestSites.config_menu_empty)\n', (1765, 1799), False, 'from easel.site.globals import Globals\n'), ((1812, 1818), 'easel.site.Site', 'Site', ([], {}), '()\n', (1816, 1818), False, 'from easel.site import Site\n'), ((1917, 1970), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.config_menu_missing_page'}), '(root=TestSites.config_menu_missing_page)\n', (1929, 1970), False, 'from easel.site.globals import Globals\n'), ((1983, 1989), 'easel.site.Site', 'Site', ([], {}), '()\n', (1987, 1989), False, 'from easel.site import Site\n'), ((2095, 2137), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.index_missing'}), '(root=TestSites.index_missing)\n', (2107, 2137), False, 'from easel.site.globals import Globals\n'), ((2150, 2156), 'easel.site.Site', 'Site', ([], {}), '()\n', (2154, 2156), False, 'from easel.site import Site\n'), ((2263, 2306), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.index_overload'}), '(root=TestSites.index_overload)\n', (2275, 2306), False, 'from easel.site.globals import Globals\n'), ((2319, 2325), 'easel.site.Site', 'Site', ([], {}), '()\n', (2323, 2325), False, 'from easel.site import Site\n'), ((2870, 2904), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.valid'}), '(root=TestSites.valid)\n', (2882, 2904), False, 'from easel.site.globals import Globals\n'), ((1427, 1447), 'pytest.raises', 'pytest.raises', (['Error'], {}), '(Error)\n', (1440, 1447), False, 'import pytest\n'), ((1478, 1498), 'pytest.raises', 'pytest.raises', (['Error'], {}), '(Error)\n', (1491, 1498), False, 'import pytest\n'), ((1528, 1548), 'pytest.raises', 'pytest.raises', (['Error'], {}), '(Error)\n', (1541, 1548), False, 'import pytest\n'), ((2000, 2030), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (2013, 2030), False, 'import pytest\n'), ((2167, 2197), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (2180, 2197), False, 'import pytest\n'), ((2336, 2366), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (2349, 2366), False, 'import pytest\n'), ((2620, 2650), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (2633, 2650), False, 'import pytest\n'), ((2741, 2771), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (2754, 2771), False, 'import pytest\n'), ((2781, 2823), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': '"""/path/to/missing-site"""'}), "(root='/path/to/missing-site')\n", (2793, 2823), False, 'from easel.site.globals import Globals\n'), ((2998, 3028), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (3011, 3028), False, 'import pytest\n'), ((3038, 3084), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.missing_site_yaml'}), '(root=TestSites.missing_site_yaml)\n', (3050, 3084), False, 'from easel.site.globals import Globals\n'), ((3156, 3186), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (3169, 3186), False, 'import pytest\n'), ((3196, 3251), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.contents_directory_missing'}), '(root=TestSites.contents_directory_missing)\n', (3208, 3251), False, 'from easel.site.globals import Globals\n'), ((3320, 3350), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (3333, 3350), False, 'import pytest\n'), ((3360, 3412), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.pages_directory_missing'}), '(root=TestSites.pages_directory_missing)\n', (3372, 3412), False, 'from easel.site.globals import Globals\n'), ((3479, 3509), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (3492, 3509), False, 'import pytest\n'), ((3519, 3569), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.pages_directory_empty'}), '(root=TestSites.pages_directory_empty)\n', (3531, 3569), False, 'from easel.site.globals import Globals\n'), ((3815, 3845), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (3828, 3845), False, 'import pytest\n'), ((3855, 3908), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.config_menu_type_invalid'}), '(root=TestSites.config_menu_type_invalid)\n', (3867, 3908), False, 'from easel.site.globals import Globals\n'), ((3981, 4011), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (3994, 4011), False, 'import pytest\n'), ((4021, 4076), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.config_header_type_invalid'}), '(root=TestSites.config_header_type_invalid)\n', (4033, 4076), False, 'from easel.site.globals import Globals\n'), ((4148, 4178), 'pytest.raises', 'pytest.raises', (['SiteConfigError'], {}), '(SiteConfigError)\n', (4161, 4178), False, 'import pytest\n'), ((4188, 4242), 'easel.site.globals.Globals.init', 'Globals.init', ([], {'root': 'TestSites.config_theme_type_invalid'}), '(root=TestSites.config_theme_type_invalid)\n', (4200, 4242), False, 'from easel.site.globals import Globals\n')]
|
import json
from tqdm import tqdm
import re, string
def normalize(s):
#return re.sub('[%s]' % chars, '0', s).lower()
return re.sub("\d", "0", s).lower().replace("-LRB-", "(").replace("-RRB-", ")")
seg_morph_data = "../segmentations/morph_data/"
morph_maps = {
'list_SW_TL_new' : [
('../../data/IARPA/SW/swahili-bitext-morph_sw.txt', '../segmentations/morph_SW_TL/sw.word.sw', '../segmentations/morph_SW_TL/sw.stem.sw', '../segmentations/morph_SW_TL/sw_morph_3p_map.sw', '../segmentations/morph_SW_TL/sw_morph_3_map.sw'),
('../../data/IARPA/SW/english-bitext-morph_sw.txt', '../segmentations/morph_SW_TL/sw.word.en', '../segmentations/morph_SW_TL/sw.stem.en', '../segmentations/morph_SW_TL/sw_morph_3p_map.en', '../segmentations/morph_SW_TL/sw_morph_3_map.en'),
('../../data/IARPA/TL/tagalog-bitext-morph_tl.txt', '../segmentations/morph_SW_TL/tl.word.tl', '../segmentations/morph_SW_TL/tl.stem.tl', '../segmentations/morph_SW_TL/tl_morph_3p_map.tl', '../segmentations/morph_SW_TL/tl_morph_3_map.tl'),
('../../data/IARPA/TL/english-bitext-morph_tl.txt', '../segmentations/morph_SW_TL/tl.word.en', '../segmentations/morph_SW_TL/tl.stem.en', '../segmentations/morph_SW_TL/tl_morph_3p_map.en', '../segmentations/morph_SW_TL/tl_morph_3_map.en')]
}
for list_morph in morph_maps:
if(list_morph == 'list_SW_TL_new'):
#for file_src, file_in, file_out_3p, file_out_3 in morph_maps[list_morph]:
for file_in, file_word, file_stem, file_out_3p, file_out_3 in morph_maps[list_morph]:
print("{}:\t{}\t{}\t{}\t{}\t{}".format(list_morph, file_in, file_word, file_stem, file_out_3p, file_out_3))
word_map = {}
word_map_3 = {}
#f1_src = open(file_src, "r")
f_o_word_recreate = open(file_word, "w")
f_o_stem_recreate = open(file_stem, "w")
for line in open(file_in):
a = json.loads(line)
new_recreate_stem_line = ""
new_recreate_word_line = ""
for sentence_id in range(len(a)):
for word_id in range(len(a[sentence_id])):
prefix = ""
stem = ""
postfix = ""
word = a[sentence_id][word_id]["word"]
word = normalize(word)
#word = '\'' if word == '`' else word
word_map[word] = [word]
word_map_3[word] = [word]
if(a[sentence_id][word_id]["prefixes"]):
a[sentence_id][word_id]["prefixes"] = normalize(a[sentence_id][word_id]["prefixes"])
for word_abc in a[sentence_id][word_id]["prefixes"].split('+'):
word_map[word].append(word_abc)
prefix = prefix + word_abc
word_map_3[word].append(prefix)
if(a[sentence_id][word_id]["stem"]):
stem = normalize(a[sentence_id][word_id]["stem"])
#stem = '\'' if stem == '`' else stem
word_map[word].append(stem)
word_map_3[word].append(stem)
if(stem == ""):
stem = "<EMPTY>"
if(a[sentence_id][word_id]["suffixes"]):
a[sentence_id][word_id]["suffixes"] = normalize(a[sentence_id][word_id]["suffixes"])
for word_abc in a[sentence_id][word_id]["suffixes"].split('+'):
word_map[word].append(word_abc)
postfix = postfix + word_abc
word_map_3[word].append(postfix)
new_recreate_stem_line = new_recreate_stem_line + stem + ' '
new_recreate_word_line = new_recreate_word_line + word + ' '
f_o_stem_recreate.write(new_recreate_stem_line[:-1] + "\n")
f_o_word_recreate.write(new_recreate_word_line[:-1] + "\n")
with open(file_out_3p, 'w') as f_o:
for key, value in word_map.items():
for word_morph_parts in value:
f_o.write(word_morph_parts + "\t")
f_o.write('\n')
with open(file_out_3, 'w') as f_o:
for key, value in word_map_3.items():
for word_morph_parts in value:
f_o.write(word_morph_parts + "\t")
f_o.write('\n')
f_o_stem_recreate.close()
f_o_word_recreate.close()
|
[
"re.sub",
"json.loads"
] |
[((1800, 1816), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1810, 1816), False, 'import json\n'), ((133, 154), 're.sub', 're.sub', (['"""\\\\d"""', '"""0"""', 's'], {}), "('\\\\d', '0', s)\n", (139, 154), False, 'import re, string\n')]
|
#!/usr/bin/python
# A simple python utility to summarize an fastavn run
import os
import fnmatch
import re
import sys
inp = sys.argv[1]
#print ("inp is ", inp)
for root, dir, file in os.walk(inp):
#print ("*************** " + root + " *************")
fm = fnmatch.filter(file, "smvexecute.log")
if (fm):
#find the full name
fpath = os.path.join(root,fm[0])
stats = [root]
header = ["procedure"]
for line in open(fpath,"r"):
if (line.__contains__("AV_STATS")):
r = re.compile(r'.*AV_STATS](.*) : (.*)')
g = re.search(r,line)
if (g):
r1 = re.compile(r'explain.*|blocked.*|bug.count.*')
g1 = re.search(r1,line)
if not g1:
#stats.append((g.group(1),g.group(2)))
stats.append(g.group(2))
header.append(g.group(1))
if (stats.__len__() > 1):
print (stats)
print (header)
|
[
"fnmatch.filter",
"os.walk",
"re.search",
"os.path.join",
"re.compile"
] |
[((187, 199), 'os.walk', 'os.walk', (['inp'], {}), '(inp)\n', (194, 199), False, 'import os\n'), ((276, 314), 'fnmatch.filter', 'fnmatch.filter', (['file', '"""smvexecute.log"""'], {}), "(file, 'smvexecute.log')\n", (290, 314), False, 'import fnmatch\n'), ((392, 417), 'os.path.join', 'os.path.join', (['root', 'fm[0]'], {}), '(root, fm[0])\n', (404, 417), False, 'import os\n'), ((628, 664), 're.compile', 're.compile', (['""".*AV_STATS](.*) : (.*)"""'], {}), "('.*AV_STATS](.*) : (.*)')\n", (638, 664), False, 'import re\n'), ((702, 720), 're.search', 're.search', (['r', 'line'], {}), '(r, line)\n', (711, 720), False, 'import re\n'), ((801, 846), 're.compile', 're.compile', (['"""explain.*|blocked.*|bug.count.*"""'], {}), "('explain.*|blocked.*|bug.count.*')\n", (811, 846), False, 'import re\n'), ((889, 908), 're.search', 're.search', (['r1', 'line'], {}), '(r1, line)\n', (898, 908), False, 'import re\n')]
|
import unittest
import utils
# O(n) time. O(1) space. Bit manipulation.
class Solution:
def binaryGap(self, n):
"""
:type n: int
:rtype: int
"""
result = 0
count = 1
while n > 0 and n & 1 == 0:
n >>= 1
n >>= 1
while n > 0:
if n & 1 == 0:
count += 1
else:
result = max(result, count)
count = 1
n >>= 1
return result
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().binaryGap(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"utils.load_test_json"
] |
[((833, 848), 'unittest.main', 'unittest.main', ([], {}), '()\n', (846, 848), False, 'import unittest\n'), ((570, 600), 'utils.load_test_json', 'utils.load_test_json', (['__file__'], {}), '(__file__)\n', (590, 600), False, 'import utils\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Execution Callbacks for Eager Mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.platform import tf_logging as logging
_DEFAULT_CALLBACK_ACTION = "raise"
_VALID_CALLBACK_ACTIONS = (None, "ignore", "print", "raise", "warn")
# TODO(cais): Consider moving this exception class to errors_impl.py.
class InfOrNanError(Exception):
"""Exception for inf and/or nan being present in tensor."""
def __init__(self,
op_type,
op_name,
output_index,
num_outputs,
value):
"""Constructor of InfOrNanError.
Args:
op_type: Type name of the op that generated the tensor that generated the
`inf`(s) or `nan`(s) (e.g., `Div`).
op_name: Name of the op that generated the tensor with `inf`(s) or
`nan`(s). This name is set by client and can be `None` if it is unset.
output_index: The 0-based output index of the tensor that contains
`inf`(s) or `nan`(s).
num_outputs: Total number of outputs of the operation.
value: The tensor value that contains `inf`(s) or `nan`(s).
"""
self._op_type = op_type
self._op_name = op_name
self._output_index = output_index
self._num_outputs = num_outputs
self._value = value
self._total_count = np.size(value)
self._inf_count = np.count_nonzero(np.isinf(value))
self._nan_count = np.count_nonzero(np.isnan(value))
super(InfOrNanError, self).__init__(self._get_error_message())
def _get_error_message(self):
"""Get the error message describing this InfOrNanError object."""
name_str = (("'%s'" % self._op_name) if self._op_name is not None
else str(self._op_name))
msg = "Output %d of %d of TFE operation %s (name: %s) contains " % (
self._output_index + 1, self._num_outputs, self._op_type, name_str)
if self._inf_count and self._nan_count:
msg += "%d inf(s) and %d nan(s) " % (self._inf_count, self._nan_count)
elif self._inf_count:
msg += "%d inf(s) " % self._inf_count
else:
msg += "%d nan(s) " % self._nan_count
msg += "out of a total of %d element(s). Tensor value: %s" % (
self._total_count, self._value)
return msg
@property
def op_type(self):
return self._op_type
@property
def op_name(self):
return self._op_name
@property
def output_index(self):
return self._output_index
@property
def num_outputs(self):
return self._num_outputs
@property
def value(self):
return self._value
def inf_nan_callback(op_type,
op_name,
attrs,
inputs,
outputs,
check_inf=True,
check_nan=True,
action=_DEFAULT_CALLBACK_ACTION):
"""An execution callback that checks for `inf`s and `nan`s in output tensors.
This callback can be used with `tfe.add_execute_callback` to check for invalid
numeric values. E.g.,
```python
tfe.add_execute_callback(tfe.inf_nan_callback)
```
Args:
op_type: Name of the TFE operation type (e.g., `MatMul`).
op_name: Name of the TFE operation. This name is set by client and can be
`None` if it unset.
attrs: Attributes of the TFE operation, as a tuple of alternating attribute
names and attribute values.
inputs: The `list` of input tensors to the operation, currently unused by
this callback.
outputs: The `list` of output tensors from the operation, checked by this
callback for `inf` and `nan` values.
check_inf: (`bool`) Whether this callback should check for `inf` values in
the output tensor values.
check_nan: (`bool`) Whether this callback should check for `nan` values in
the output tensor values.
action: (`str`) Action to be taken by the callback when `inf` or `nan`
values are detected. Possible values {"raise", "warn", "print"}
`"raise"`: Raise a `InfOrNanError`.
`"warn"`: Log a warning using `tf.logging.warn`.
`"print"`: Print a message to `sys.stdout`.
Raises:
InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and
`action` is `"raise"`.
ValueError: iff the value of `action` is invalid.
"""
del attrs, inputs # Not used.
ctx = context.get_default_context()
for index, output in enumerate(outputs):
if not output.dtype.is_numpy_compatible:
continue
numpy_dtype = output.dtype.as_numpy_dtype
if (np.issubdtype(numpy_dtype, np.float) or
np.issubdtype(numpy_dtype, np.complex) or
np.issubdtype(numpy_dtype, np.integer)):
try:
check_numerics_op_attrs = (
"message", "Eager-mode inf/nan check",
"T", outputs[0].dtype.as_datatype_enum)
# TODO(cais): Consider moving this into execute.py.
# pylint: disable=protected-access
pywrap_tensorflow.TFE_Py_Execute(
ctx._handle, output.device, "CheckNumerics", [output],
check_numerics_op_attrs, 1)
# pylint: enable=protected-access
except core._NotOkStatusException: # pylint: disable=protected-access
value = output.numpy()
inf_detected = np.any(np.isinf(value)) and check_inf
nan_detected = np.any(np.isnan(value)) and check_nan
if not inf_detected and not nan_detected:
continue
error = InfOrNanError(op_type, op_name, index, len(outputs), value)
if action == "print":
print("Warning: %s" % str(error))
elif action == "warn":
logging.warn(str(error))
elif action == "raise":
raise error
else:
raise ValueError(
"Invalid action for inf_nan_callback: %s. Valid actions are: "
"{print | warn | raise}" % action)
def inf_callback(op_type,
op_name,
attrs,
inputs,
outputs,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `inf`s only."""
inf_nan_callback(
op_type, op_name, attrs, inputs, outputs, check_inf=True, check_nan=False,
action=action)
def nan_callback(op_type,
op_name,
attrs,
inputs,
outputs,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `nan`s only."""
inf_nan_callback(
op_type, op_name, attrs, inputs, outputs, check_inf=False, check_nan=True,
action=action)
def add_execution_callback(callback):
"""Add an execution callback to the default eager context.
An execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added. To clear all execution callbacks that have been added, use
`clear_execution_callbacks()`.
Example:
```python
def print_even_callback(op_type, op_name, attrs, inputs, outputs):
# A callback that prints only the even output values.
if outputs[0].numpy() % 2 == 0:
print("Even output from %s: %s" % (op_name or op_type, outputs))
tfe.add_execution_callback(print_even_callback)
x = tf.pow(2.0, 3.0) - 3.0
y = tf.multiply(x, tf.add(1.0, 5.0))
# When the line above is run, you will see all intermediate outputs that are
# even numbers printed to the console.
tfe.clear_execution_callbacks()
```
Args:
callback: a callable of the signature
`f(op_type, op_name, attrs, inputs, outputs)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`op_name` is the name of the operation that has was just executed. This
name is set by the client who created the operation and can be `None` if
it is unset.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute name and attribute value.
`inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored.
"""
context.get_default_context().add_post_execution_callback(callback)
def clear_execution_callbacks():
"""Clear all execution callbacks from the default eager context."""
context.get_default_context().clear_post_execution_callbacks()
def seterr(inf_or_nan=None):
"""Set how abnormal conditions are handled by the default eager context.
Example:
```python
tfe.seterr(inf_or_nan="raise")
a = tf.constant(10.0)
b = tf.constant(0.0)
try:
c = a / b # <-- Raises InfOrNanError.
except Exception as e:
print("Caught Exception: %s" % e)
tfe.seterr(inf_or_nan="ignore")
c = a / b # <-- Does NOT raise exception anymore.
```
Args:
inf_or_nan: Set action for infinity (`inf`) and NaN (`nan`) values.
Possible values: `{"ignore", "print", "raise", "warn"}`.
`"ignore"`: take no action when `inf` values appear.
`"print"`: print a warning to `stdout`.
`"raise"`: raise an `InfOrNanError`.
`"warn"`: print a warning using `tf.logging.warn`.
A value of `None` leads to no change in the action of the condition.
Returns:
A dictionary of old actions.
Raises:
ValueError: If the value of any keyword arguments is invalid.
"""
if inf_or_nan not in _VALID_CALLBACK_ACTIONS:
raise ValueError(
"Invalid action value for inf_or_nan: %s. "
"Valid actions are %s." % (inf_or_nan, _VALID_CALLBACK_ACTIONS))
old_settings = {"inf_or_nan": "ignore"}
default_context = context.get_default_context()
carryover_callbacks = []
for callback in default_context.post_execution_callbacks:
# Check whether the callback is inf_nan_callback or a partial object of
# inf_nan_callback.
if (callback == inf_nan_callback or
isinstance(callback, functools.partial) and
callback.func == inf_nan_callback):
if callback == inf_nan_callback:
old_settings["inf_or_nan"] = _DEFAULT_CALLBACK_ACTION
else:
old_settings["inf_or_nan"] = callback.keywords.get(
"action", _DEFAULT_CALLBACK_ACTION)
elif inf_or_nan is not None:
carryover_callbacks.append(callback)
if inf_or_nan is not None:
default_context.clear_post_execution_callbacks()
for callback in carryover_callbacks:
default_context.add_post_execution_callback(callback)
if inf_or_nan != "ignore":
default_context.add_post_execution_callback(
functools.partial(inf_nan_callback, action=inf_or_nan))
return old_settings
|
[
"functools.partial",
"numpy.size",
"tensorflow.python.eager.context.get_default_context",
"tensorflow.python.pywrap_tensorflow.TFE_Py_Execute",
"numpy.isinf",
"numpy.isnan",
"numpy.issubdtype"
] |
[((5244, 5273), 'tensorflow.python.eager.context.get_default_context', 'context.get_default_context', ([], {}), '()\n', (5271, 5273), False, 'from tensorflow.python.eager import context\n'), ((10708, 10737), 'tensorflow.python.eager.context.get_default_context', 'context.get_default_context', ([], {}), '()\n', (10735, 10737), False, 'from tensorflow.python.eager import context\n'), ((2236, 2250), 'numpy.size', 'np.size', (['value'], {}), '(value)\n', (2243, 2250), True, 'import numpy as np\n'), ((2290, 2305), 'numpy.isinf', 'np.isinf', (['value'], {}), '(value)\n', (2298, 2305), True, 'import numpy as np\n'), ((2346, 2361), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (2354, 2361), True, 'import numpy as np\n'), ((5433, 5469), 'numpy.issubdtype', 'np.issubdtype', (['numpy_dtype', 'np.float'], {}), '(numpy_dtype, np.float)\n', (5446, 5469), True, 'import numpy as np\n'), ((5481, 5519), 'numpy.issubdtype', 'np.issubdtype', (['numpy_dtype', 'np.complex'], {}), '(numpy_dtype, np.complex)\n', (5494, 5519), True, 'import numpy as np\n'), ((5531, 5569), 'numpy.issubdtype', 'np.issubdtype', (['numpy_dtype', 'np.integer'], {}), '(numpy_dtype, np.integer)\n', (5544, 5569), True, 'import numpy as np\n'), ((9243, 9272), 'tensorflow.python.eager.context.get_default_context', 'context.get_default_context', ([], {}), '()\n', (9270, 9272), False, 'from tensorflow.python.eager import context\n'), ((9418, 9447), 'tensorflow.python.eager.context.get_default_context', 'context.get_default_context', ([], {}), '()\n', (9445, 9447), False, 'from tensorflow.python.eager import context\n'), ((5833, 5952), 'tensorflow.python.pywrap_tensorflow.TFE_Py_Execute', 'pywrap_tensorflow.TFE_Py_Execute', (['ctx._handle', 'output.device', '"""CheckNumerics"""', '[output]', 'check_numerics_op_attrs', '(1)'], {}), "(ctx._handle, output.device,\n 'CheckNumerics', [output], check_numerics_op_attrs, 1)\n", (5865, 5952), False, 'from tensorflow.python import pywrap_tensorflow\n'), ((11635, 11689), 'functools.partial', 'functools.partial', (['inf_nan_callback'], {'action': 'inf_or_nan'}), '(inf_nan_callback, action=inf_or_nan)\n', (11652, 11689), False, 'import functools\n'), ((6154, 6169), 'numpy.isinf', 'np.isinf', (['value'], {}), '(value)\n', (6162, 6169), True, 'import numpy as np\n'), ((6215, 6230), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (6223, 6230), True, 'import numpy as np\n')]
|
import numpy as np
from .distribution import NoDistribution
__all__ = ["Simulator"]
class Simulator(NoDistribution):
def __init__(self, function, *args, **kwargs):
"""
This class stores a function defined by the user in python language.
function : function
Simulation function defined by the user.
*args and **kwargs :
Arguments and keywords arguments that the function takes.
"""
self.function = function
observed = self.data
super().__init__(shape=np.prod(observed.shape), dtype=observed.dtype, *args, **kwargs)
def random(self, point=None, size=None):
"""
Draw random values from Simulator
Parameters
----------
point : dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size : int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
raise NotImplementedError("Not implemented yet")
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
name = r"\text{%s}" % name
function = dist.function
params = dist.parameters
sum_stat = dist.sum_stat
return r"${} \sim \text{{Simulator}}(\mathit{{function}}={},~\mathit{{parameters}}={},~\mathit{{summary statistics}}={})$".format(
name, function, params, sum_stat
)
|
[
"numpy.prod"
] |
[((556, 579), 'numpy.prod', 'np.prod', (['observed.shape'], {}), '(observed.shape)\n', (563, 579), True, 'import numpy as np\n')]
|
import os
import time
from plyer import notification
import caretakers.time_moniter as time_moniter
class caretaker:
def __init__(self) -> None:
print("Instance created: Eyes Caretaker is running")
print(
f"Starting Time : {self.formatted_date_time(time_moniter.time_moniter.get_current_time(self))}")
pass
# Format given datetime as dd-mm-yyyy hh:mm:ss
# return datetime.strftime(datetime.now(), "%d-%m-%Y %H:%M:%S")
def formatted_date_time(self, current_time: str) -> str:
return current_time.strftime("%d-%m-%Y %H:%M:%S")
def notify(self, title: str, message: str) -> None:
notification.notify(
title=title,
message=message,
app_name="Eyes Caretaker",
app_icon=os.path.realpath(".") + "/icons/eyes.png",
timeout=5, # hide notification after 5 seconds
)
def run(self) -> None:
while True:
self.notify("Eyes Caretaker", "Watch away for 20 seconds\n\
I will be back after 20 minuts.")
time.sleep(60 * 20) # 20 min
time_moniter.time_moniter().reset_time() # reset time
print(
f"Time counter is reseted at \
{time_moniter.time_moniter.start_t(self)}")
|
[
"caretakers.time_moniter.time_moniter",
"caretakers.time_moniter.time_moniter.start_t",
"os.path.realpath",
"time.sleep",
"caretakers.time_moniter.time_moniter.get_current_time"
] |
[((1077, 1096), 'time.sleep', 'time.sleep', (['(60 * 20)'], {}), '(60 * 20)\n', (1087, 1096), False, 'import time\n'), ((798, 819), 'os.path.realpath', 'os.path.realpath', (['"""."""'], {}), "('.')\n", (814, 819), False, 'import os\n'), ((1119, 1146), 'caretakers.time_moniter.time_moniter', 'time_moniter.time_moniter', ([], {}), '()\n', (1144, 1146), True, 'import caretakers.time_moniter as time_moniter\n'), ((285, 333), 'caretakers.time_moniter.time_moniter.get_current_time', 'time_moniter.time_moniter.get_current_time', (['self'], {}), '(self)\n', (327, 333), True, 'import caretakers.time_moniter as time_moniter\n'), ((1278, 1317), 'caretakers.time_moniter.time_moniter.start_t', 'time_moniter.time_moniter.start_t', (['self'], {}), '(self)\n', (1311, 1317), True, 'import caretakers.time_moniter as time_moniter\n')]
|
import os
import subprocess
import sys
import time
import pytest
@pytest.fixture
def mqtt_server():
print('Starting MQTT server')
mosquitto = subprocess.Popen('mosquitto')
time.sleep(1) # Let's wait a bit before it's started
yield mosquitto
print('Tearing down MQTT server')
mosquitto.kill()
try:
# This environment variable is used by Travis CI to define which
# dependencies are installed. Pytest uses it to define which modules
# are checked.
requirements = os.environ['SNIPSKIT_REQUIREMENTS']
if requirements == 'common':
collect_ignore = ['mqtt', 'hermes']
elif requirements == 'mqtt':
collect_ignore = ['hermes']
elif requirements == 'hermes':
collect_ignore = ['mqtt']
elif requirements == 'all':
# Run all the tests
pass
else:
sys.exit('Unkown value for SNIPSKIT_REQUIREMENTS environment variable: {}'.format(requirements))
except KeyError:
# Run all the tests
pass
|
[
"subprocess.Popen",
"time.sleep"
] |
[((153, 182), 'subprocess.Popen', 'subprocess.Popen', (['"""mosquitto"""'], {}), "('mosquitto')\n", (169, 182), False, 'import subprocess\n'), ((187, 200), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (197, 200), False, 'import time\n')]
|
import copy
import os
import random
import re
import time
from os.path import join
from threading import Thread
import cv2
import gym
import numpy as np
from filelock import FileLock, Timeout
from gym.utils import seeding
from vizdoom.vizdoom import ScreenResolution, DoomGame, Mode, AutomapMode
from algorithms.utils.spaces.discretized import Discretized
from utils.utils import log, project_tmp_dir
def doom_lock_file(max_parallel):
"""
Doom instances tend to have problems starting when a lot of them are initialized in parallel.
This is not a problem during normal execution once the envs are initialized.
The "sweet spot" for the number of envs that can be initialized in parallel is about 5-10.
Here we use file locking mechanism to ensure that only a limited amount of envs are being initialized at the same
time.
This tends to be more of a problem for multiplayer envs.
This also has an advantage of working across completely independent process groups, e.g. different experiments.
"""
lock_filename = f'doom_{random.randrange(0, max_parallel):03d}.lockfile'
tmp_dir = project_tmp_dir()
lock_path = join(tmp_dir, lock_filename)
return lock_path
def key_to_action_default(key):
"""
MOVE_FORWARD
MOVE_BACKWARD
MOVE_RIGHT
MOVE_LEFT
SELECT_WEAPON1
SELECT_WEAPON2
SELECT_WEAPON3
SELECT_WEAPON4
SELECT_WEAPON5
SELECT_WEAPON6
SELECT_WEAPON7
ATTACK
SPEED
TURN_LEFT_RIGHT_DELTA
"""
from pynput.keyboard import Key
# health gathering
action_table = {
Key.left: 0,
Key.right: 1,
Key.up: 2,
Key.down: 3,
}
# action_table = {
# Key.up: 0,
# Key.down: 1,
# Key.alt: 6,
# Key.ctrl: 11,
# Key.shift: 12,
# Key.space: 13,
# Key.right: 'turn_right',
# Key.left: 'turn_left',
# }
return action_table.get(key, None)
class VizdoomEnv(gym.Env):
def __init__(self,
action_space,
config_file,
coord_limits=None,
max_histogram_length=200,
show_automap=False,
skip_frames=1,
async_mode=False,
record_to=None):
self.initialized = False
# essential game data
self.game = None
self.state = None
self.curr_seed = 0
self.rng = None
self.skip_frames = skip_frames
self.async_mode = async_mode
# optional - for topdown view rendering and visitation heatmaps
self.show_automap = show_automap
self.coord_limits = coord_limits
# can be adjusted after the environment is created (but before any reset() call) via observation space wrapper
self.screen_w, self.screen_h, self.channels = 640, 480, 3
self.screen_resolution = ScreenResolution.RES_640X480
self.calc_observation_space()
self.black_screen = None
# provided as a part of environment definition, since these depend on the scenario and
# can be quite complex multi-discrete spaces
self.action_space = action_space
self.composite_action_space = hasattr(self.action_space, 'spaces')
self.delta_actions_scaling_factor = 7.5
scenarios_dir = join(os.path.dirname(__file__), 'scenarios')
self.config_path = join(scenarios_dir, config_file)
self.variable_indices = self._parse_variable_indices(self.config_path)
# only created if we call render() method
self.viewer = None
# record full episodes using VizDoom recording functionality
self.record_to = record_to
self.is_multiplayer = False # overridden in derived classes
# (optional) histogram to track positional coverage
# do not pass coord_limits if you don't need this, to avoid extra calculation
self.max_histogram_length = max_histogram_length
self.current_histogram, self.previous_histogram = None, None
if self.coord_limits:
x = (self.coord_limits[2] - self.coord_limits[0])
y = (self.coord_limits[3] - self.coord_limits[1])
if x > y:
len_x = self.max_histogram_length
len_y = int((y / x) * self.max_histogram_length)
else:
len_x = int((x / y) * self.max_histogram_length)
len_y = self.max_histogram_length
self.current_histogram = np.zeros((len_x, len_y), dtype=np.int32)
self.previous_histogram = np.zeros_like(self.current_histogram)
# helpers for human play with pynput keyboard input
self._terminate = False
self._current_actions = []
self._actions_flattened = None
self._prev_info = None
self._last_episode_info = None
self._num_episodes = 0
self.mode = 'algo'
self.seed()
def seed(self, seed=None):
self.curr_seed = seeding.hash_seed(seed, max_bytes=4)
self.rng, _ = seeding.np_random(seed=self.curr_seed)
return [self.curr_seed, self.rng]
def calc_observation_space(self):
self.observation_space = gym.spaces.Box(0, 255, (self.screen_h, self.screen_w, self.channels), dtype=np.uint8)
def _set_game_mode(self, mode):
if mode == 'replay':
self.game.set_mode(Mode.PLAYER)
else:
if self.async_mode:
log.info('Starting in async mode! Use this only for testing, otherwise PLAYER mode is much faster')
self.game.set_mode(Mode.ASYNC_PLAYER)
else:
self.game.set_mode(Mode.PLAYER)
def _create_doom_game(self, mode):
self.game = DoomGame()
self.game.load_config(self.config_path)
self.game.set_screen_resolution(self.screen_resolution)
self.game.set_seed(self.rng.randint(0, 2**32 - 1))
if mode == 'algo':
self.game.set_window_visible(False)
elif mode == 'human' or mode == 'replay':
self.game.add_game_args('+freelook 1')
self.game.set_window_visible(True)
else:
raise Exception('Unsupported mode')
self._set_game_mode(mode)
def _game_init(self, with_locking=True, max_parallel=10):
lock_file = lock = None
if with_locking:
lock_file = doom_lock_file(max_parallel)
lock = FileLock(lock_file)
init_attempt = 0
while True:
init_attempt += 1
try:
if with_locking:
with lock.acquire(timeout=20):
self.game.init()
else:
self.game.init()
break
except Timeout:
if with_locking:
log.debug(
'Another process currently holds the lock %s, attempt: %d', lock_file, init_attempt,
)
except Exception as exc:
log.warning('VizDoom game.init() threw an exception %r. Terminate process...', exc)
from envs.env_utils import EnvCriticalError
raise EnvCriticalError()
def initialize(self):
self._create_doom_game(self.mode)
# (optional) top-down view provided by the game engine
if self.show_automap:
self.game.set_automap_buffer_enabled(True)
self.game.set_automap_mode(AutomapMode.OBJECTS)
self.game.set_automap_rotate(False)
self.game.set_automap_render_textures(False)
# self.game.add_game_args("+am_restorecolors")
# self.game.add_game_args("+am_followplayer 1")
background_color = 'ffffff'
self.game.add_game_args('+viz_am_center 1')
self.game.add_game_args('+am_backcolor ' + background_color)
self.game.add_game_args('+am_tswallcolor dddddd')
# self.game.add_game_args("+am_showthingsprites 0")
self.game.add_game_args('+am_yourcolor ' + background_color)
self.game.add_game_args('+am_cheat 0')
self.game.add_game_args('+am_thingcolor 0000ff') # player color
self.game.add_game_args('+am_thingcolor_item 00ff00')
# self.game.add_game_args("+am_thingcolor_citem 00ff00")
self._game_init()
self.initialized = True
def _ensure_initialized(self):
if not self.initialized:
self.initialize()
@staticmethod
def _parse_variable_indices(config):
with open(config, 'r') as config_file:
lines = config_file.readlines()
lines = [l.strip() for l in lines]
variable_indices = {}
for line in lines:
if line.startswith('#'):
continue # comment
variables_syntax = r'available_game_variables[\s]*=[\s]*\{(.*)\}'
match = re.match(variables_syntax, line)
if match is not None:
variables_str = match.groups()[0]
variables_str = variables_str.strip()
variables = variables_str.split(' ')
for i, variable in enumerate(variables):
variable_indices[variable] = i
break
return variable_indices
def _black_screen(self):
if self.black_screen is None:
self.black_screen = np.zeros(self.observation_space.shape, dtype=np.uint8)
return self.black_screen
def _game_variables_dict(self, state):
game_variables = state.game_variables
variables = {}
for variable, idx in self.variable_indices.items():
variables[variable] = game_variables[idx]
return variables
def demo_path(self, episode_idx):
demo_name = f'e{episode_idx:03d}.lmp'
demo_path = join(self.record_to, demo_name)
demo_path = os.path.normpath(demo_path)
return demo_path
def reset(self):
self._ensure_initialized()
if self.record_to is not None and not self.is_multiplayer:
# does not work in multiplayer (uses different mechanism)
if not os.path.exists(self.record_to):
os.makedirs(self.record_to)
demo_path = self.demo_path(self._num_episodes)
log.warning('Recording episode demo to %s', demo_path)
self.game.new_episode(demo_path)
else:
if self._num_episodes > 0:
# no demo recording (default)
self.game.new_episode()
self.state = self.game.get_state()
img = None
try:
img = self.state.screen_buffer
except AttributeError:
# sometimes Doom does not return screen buffer at all??? Rare bug
pass
if img is None:
log.error('Game returned None screen buffer! This is not supposed to happen!')
img = self._black_screen()
# Swap current and previous histogram
if self.current_histogram is not None and self.previous_histogram is not None:
swap = self.current_histogram
self.current_histogram = self.previous_histogram
self.previous_histogram = swap
self.current_histogram.fill(0)
self._actions_flattened = None
self._last_episode_info = copy.deepcopy(self._prev_info)
self._prev_info = None
self._num_episodes += 1
return np.transpose(img, (1, 2, 0))
def _convert_actions(self, actions):
"""Convert actions from gym action space to the action space expected by Doom game."""
if self.composite_action_space:
# composite action space with multiple subspaces
spaces = self.action_space.spaces
else:
# simple action space, e.g. Discrete. We still treat it like composite of length 1
spaces = (self.action_space, )
actions = (actions, )
actions_flattened = []
for i, action in enumerate(actions):
if isinstance(spaces[i], Discretized):
# discretized continuous action
# check discretized first because it's a subclass of gym.spaces.Discrete
# the order of if clauses here matters! DON'T CHANGE THE ORDER OF IFS!
continuous_action = spaces[i].to_continuous(action)
actions_flattened.append(continuous_action)
elif isinstance(spaces[i], gym.spaces.Discrete):
# standard discrete action
num_non_idle_actions = spaces[i].n - 1
action_one_hot = np.zeros(num_non_idle_actions, dtype=np.uint8)
if action > 0:
action_one_hot[action - 1] = 1 # 0th action in each subspace is a no-op
actions_flattened.extend(action_one_hot)
elif isinstance(spaces[i], gym.spaces.Box):
# continuous action
actions_flattened.extend(list(action * self.delta_actions_scaling_factor))
else:
raise NotImplementedError(f'Action subspace type {type(spaces[i])} is not supported!')
return actions_flattened
def _vizdoom_variables_bug_workaround(self, info, done):
"""Some variables don't get reset to zero on game.new_episode(). This fixes it (also check overflow?)."""
if done and 'DAMAGECOUNT' in info:
log.info('DAMAGECOUNT value on done: %r', info.get('DAMAGECOUNT'))
if self._last_episode_info is not None:
bugged_vars = ['DEATHCOUNT', 'HITCOUNT', 'DAMAGECOUNT']
for v in bugged_vars:
if v in info:
info[v] -= self._last_episode_info.get(v, 0)
def _process_game_step(self, state, done, info):
if not done:
observation = np.transpose(state.screen_buffer, (1, 2, 0))
game_variables = self._game_variables_dict(state)
info.update(self.get_info(game_variables))
self._update_histogram(info)
self._prev_info = copy.deepcopy(info)
else:
observation = self._black_screen()
# when done=True Doom does not allow us to call get_info, so we provide info from the last frame
info.update(self._prev_info)
self._vizdoom_variables_bug_workaround(info, done)
return observation, done, info
def step(self, actions):
"""
Action is either a single value (discrete, one-hot), or a tuple with an action for each of the
discrete action subspaces.
"""
if self._actions_flattened is not None:
# provided externally, e.g. via human play
actions_flattened = self._actions_flattened
self._actions_flattened = None
else:
actions_flattened = self._convert_actions(actions)
default_info = {'num_frames': self.skip_frames}
reward = self.game.make_action(actions_flattened, self.skip_frames)
state = self.game.get_state()
done = self.game.is_episode_finished()
observation, done, info = self._process_game_step(state, done, default_info)
return observation, reward, done, info
def render(self, mode='human'):
try:
img = self.game.get_state().screen_buffer
img = np.transpose(img, [1, 2, 0])
if mode == 'rgb_array':
return img
h, w = img.shape[:2]
render_w = 1280
if w < render_w:
render_h = int(render_w * h / w)
img = cv2.resize(img, (render_w, render_h))
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer(maxwidth=render_w)
self.viewer.imshow(img)
return img
except AttributeError:
return None
def close(self):
try:
if self.game is not None:
self.game.close()
except RuntimeError as exc:
log.warning('Runtime error in VizDoom game close(): %r', exc)
if self.viewer is not None:
self.viewer.close()
def get_info(self, variables=None):
if variables is None:
variables = self._game_variables_dict(self.game.get_state())
info_dict = {'pos': self.get_positions(variables)}
info_dict.update(variables)
return info_dict
def get_info_all(self, variables=None):
if variables is None:
variables = self._game_variables_dict(self.game.get_state())
info = self.get_info(variables)
if self.previous_histogram is not None:
info['previous_histogram'] = self.previous_histogram
return info
def get_positions(self, variables):
return self._get_positions(variables)
@staticmethod
def _get_positions(variables):
have_coord_data = True
required_vars = ['POSITION_X', 'POSITION_Y', 'ANGLE']
for required_var in required_vars:
if required_var not in variables:
have_coord_data = False
break
x = y = a = np.nan
if have_coord_data:
x = variables['POSITION_X']
y = variables['POSITION_Y']
a = variables['ANGLE']
return {'agent_x': x, 'agent_y': y, 'agent_a': a}
def get_automap_buffer(self):
if self.game.is_episode_finished():
return None
state = self.game.get_state()
map_ = state.automap_buffer
map_ = np.swapaxes(map_, 0, 2)
map_ = np.swapaxes(map_, 0, 1)
return map_
def _update_histogram(self, info, eps=1e-8):
if self.current_histogram is None:
return
agent_x, agent_y = info['pos']['agent_x'], info['pos']['agent_y']
# Get agent coordinates normalized to [0, 1]
dx = (agent_x - self.coord_limits[0]) / (self.coord_limits[2] - self.coord_limits[0])
dy = (agent_y - self.coord_limits[1]) / (self.coord_limits[3] - self.coord_limits[1])
# Rescale coordinates to histogram dimensions
# Subtract eps to exclude upper bound of dx, dy
dx = int((dx - eps) * self.current_histogram.shape[0])
dy = int((dy - eps) * self.current_histogram.shape[1])
self.current_histogram[dx, dy] += 1
def _key_to_action(self, key):
if hasattr(self.action_space, 'key_to_action'):
return self.action_space.key_to_action(key)
else:
return key_to_action_default(key)
def _keyboard_on_press(self, key):
from pynput.keyboard import Key
if key == Key.esc:
self._terminate = True
return False
action = self._key_to_action(key)
if action is not None:
if action not in self._current_actions:
self._current_actions.append(action)
def _keyboard_on_release(self, key):
action = self._key_to_action(key)
if action is not None:
if action in self._current_actions:
self._current_actions.remove(action)
# noinspection PyProtectedMember
@staticmethod
def play_human_mode(env, skip_frames=1, num_episodes=3, num_actions=None):
from pynput.keyboard import Listener
doom = env.unwrapped
doom.skip_frames = 1 # handled by this script separately
# noinspection PyProtectedMember
def start_listener():
with Listener(on_press=doom._keyboard_on_press, on_release=doom._keyboard_on_release) as listener:
listener.join()
listener_thread = Thread(target=start_listener)
listener_thread.start()
for episode in range(num_episodes):
doom.mode = 'human'
env.reset()
last_render_time = time.time()
time_between_frames = 1.0 / 35.0
total_rew = 0.0
while not doom.game.is_episode_finished() and not doom._terminate:
num_actions = 14 if num_actions is None else num_actions
turn_delta_action_idx = num_actions - 1
actions = [0] * num_actions
for action in doom._current_actions:
if isinstance(action, int):
actions[action] = 1 # 1 for buttons currently pressed, 0 otherwise
else:
if action == 'turn_left':
actions[turn_delta_action_idx] = -doom.delta_actions_scaling_factor
elif action == 'turn_right':
actions[turn_delta_action_idx] = doom.delta_actions_scaling_factor
for frame in range(skip_frames):
doom._actions_flattened = actions
_, rew, _, _ = env.step(actions)
new_total_rew = total_rew + rew
if new_total_rew != total_rew:
log.info('Reward: %.3f, total: %.3f', rew, new_total_rew)
total_rew = new_total_rew
state = doom.game.get_state()
verbose = True
if state is not None and verbose:
info = doom.get_info()
print(
'Health:', info['HEALTH'],
# 'Weapon:', info['SELECTED_WEAPON'],
# 'ready:', info['ATTACK_READY'],
# 'ammo:', info['SELECTED_WEAPON_AMMO'],
# 'pc:', info['PLAYER_COUNT'],
# 'dmg:', info['DAMAGECOUNT'],
)
time_since_last_render = time.time() - last_render_time
time_wait = time_between_frames - time_since_last_render
if doom.show_automap and state.automap_buffer is not None:
map_ = state.automap_buffer
map_ = np.swapaxes(map_, 0, 2)
map_ = np.swapaxes(map_, 0, 1)
cv2.imshow('ViZDoom Automap Buffer', map_)
if time_wait > 0:
cv2.waitKey(int(time_wait) * 1000)
else:
if time_wait > 0:
time.sleep(time_wait)
last_render_time = time.time()
if doom.show_automap:
cv2.destroyAllWindows()
log.debug('Press ESC to exit...')
listener_thread.join()
# noinspection PyProtectedMember
@staticmethod
def replay(env, rec_path):
doom = env.unwrapped
doom.mode = 'replay'
doom._ensure_initialized()
doom.game.replay_episode(rec_path)
episode_reward = 0
start = time.time()
while not doom.game.is_episode_finished():
doom.game.advance_action()
r = doom.game.get_last_reward()
episode_reward += r
log.info('Episode reward: %.3f, time so far: %.1f s', episode_reward, time.time() - start)
log.info('Finishing replay')
doom.close()
|
[
"vizdoom.vizdoom.DoomGame",
"utils.utils.log.warning",
"utils.utils.log.info",
"cv2.imshow",
"os.path.join",
"gym.utils.seeding.np_random",
"numpy.zeros_like",
"os.path.dirname",
"numpy.transpose",
"os.path.exists",
"utils.utils.log.debug",
"gym.utils.seeding.hash_seed",
"os.path.normpath",
"numpy.swapaxes",
"cv2.destroyAllWindows",
"cv2.resize",
"gym.envs.classic_control.rendering.SimpleImageViewer",
"threading.Thread",
"copy.deepcopy",
"envs.env_utils.EnvCriticalError",
"pynput.keyboard.Listener",
"re.match",
"time.sleep",
"utils.utils.project_tmp_dir",
"os.makedirs",
"filelock.FileLock",
"numpy.zeros",
"time.time",
"random.randrange",
"gym.spaces.Box",
"utils.utils.log.error"
] |
[((1128, 1145), 'utils.utils.project_tmp_dir', 'project_tmp_dir', ([], {}), '()\n', (1143, 1145), False, 'from utils.utils import log, project_tmp_dir\n'), ((1162, 1190), 'os.path.join', 'join', (['tmp_dir', 'lock_filename'], {}), '(tmp_dir, lock_filename)\n', (1166, 1190), False, 'from os.path import join\n'), ((3471, 3503), 'os.path.join', 'join', (['scenarios_dir', 'config_file'], {}), '(scenarios_dir, config_file)\n', (3475, 3503), False, 'from os.path import join\n'), ((5063, 5099), 'gym.utils.seeding.hash_seed', 'seeding.hash_seed', (['seed'], {'max_bytes': '(4)'}), '(seed, max_bytes=4)\n', (5080, 5099), False, 'from gym.utils import seeding\n'), ((5122, 5160), 'gym.utils.seeding.np_random', 'seeding.np_random', ([], {'seed': 'self.curr_seed'}), '(seed=self.curr_seed)\n', (5139, 5160), False, 'from gym.utils import seeding\n'), ((5275, 5365), 'gym.spaces.Box', 'gym.spaces.Box', (['(0)', '(255)', '(self.screen_h, self.screen_w, self.channels)'], {'dtype': 'np.uint8'}), '(0, 255, (self.screen_h, self.screen_w, self.channels), dtype\n =np.uint8)\n', (5289, 5365), False, 'import gym\n'), ((5813, 5823), 'vizdoom.vizdoom.DoomGame', 'DoomGame', ([], {}), '()\n', (5821, 5823), False, 'from vizdoom.vizdoom import ScreenResolution, DoomGame, Mode, AutomapMode\n'), ((9939, 9970), 'os.path.join', 'join', (['self.record_to', 'demo_name'], {}), '(self.record_to, demo_name)\n', (9943, 9970), False, 'from os.path import join\n'), ((9991, 10018), 'os.path.normpath', 'os.path.normpath', (['demo_path'], {}), '(demo_path)\n', (10007, 10018), False, 'import os\n'), ((11442, 11472), 'copy.deepcopy', 'copy.deepcopy', (['self._prev_info'], {}), '(self._prev_info)\n', (11455, 11472), False, 'import copy\n'), ((11553, 11581), 'numpy.transpose', 'np.transpose', (['img', '(1, 2, 0)'], {}), '(img, (1, 2, 0))\n', (11565, 11581), True, 'import numpy as np\n'), ((17710, 17733), 'numpy.swapaxes', 'np.swapaxes', (['map_', '(0)', '(2)'], {}), '(map_, 0, 2)\n', (17721, 17733), True, 'import numpy as np\n'), ((17749, 17772), 'numpy.swapaxes', 'np.swapaxes', (['map_', '(0)', '(1)'], {}), '(map_, 0, 1)\n', (17760, 17772), True, 'import numpy as np\n'), ((19791, 19820), 'threading.Thread', 'Thread', ([], {'target': 'start_listener'}), '(target=start_listener)\n', (19797, 19820), False, 'from threading import Thread\n'), ((22663, 22696), 'utils.utils.log.debug', 'log.debug', (['"""Press ESC to exit..."""'], {}), "('Press ESC to exit...')\n", (22672, 22696), False, 'from utils.utils import log, project_tmp_dir\n'), ((22995, 23006), 'time.time', 'time.time', ([], {}), '()\n', (23004, 23006), False, 'import time\n'), ((23286, 23314), 'utils.utils.log.info', 'log.info', (['"""Finishing replay"""'], {}), "('Finishing replay')\n", (23294, 23314), False, 'from utils.utils import log, project_tmp_dir\n'), ((1064, 1097), 'random.randrange', 'random.randrange', (['(0)', 'max_parallel'], {}), '(0, max_parallel)\n', (1080, 1097), False, 'import random\n'), ((3404, 3429), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3419, 3429), False, 'import os\n'), ((4570, 4610), 'numpy.zeros', 'np.zeros', (['(len_x, len_y)'], {'dtype': 'np.int32'}), '((len_x, len_y), dtype=np.int32)\n', (4578, 4610), True, 'import numpy as np\n'), ((4649, 4686), 'numpy.zeros_like', 'np.zeros_like', (['self.current_histogram'], {}), '(self.current_histogram)\n', (4662, 4686), True, 'import numpy as np\n'), ((6509, 6528), 'filelock.FileLock', 'FileLock', (['lock_file'], {}), '(lock_file)\n', (6517, 6528), False, 'from filelock import FileLock, Timeout\n'), ((9007, 9039), 're.match', 're.match', (['variables_syntax', 'line'], {}), '(variables_syntax, line)\n', (9015, 9039), False, 'import re\n'), ((9494, 9548), 'numpy.zeros', 'np.zeros', (['self.observation_space.shape'], {'dtype': 'np.uint8'}), '(self.observation_space.shape, dtype=np.uint8)\n', (9502, 9548), True, 'import numpy as np\n'), ((10406, 10460), 'utils.utils.log.warning', 'log.warning', (['"""Recording episode demo to %s"""', 'demo_path'], {}), "('Recording episode demo to %s', demo_path)\n", (10417, 10460), False, 'from utils.utils import log, project_tmp_dir\n'), ((10927, 11005), 'utils.utils.log.error', 'log.error', (['"""Game returned None screen buffer! This is not supposed to happen!"""'], {}), "('Game returned None screen buffer! This is not supposed to happen!')\n", (10936, 11005), False, 'from utils.utils import log, project_tmp_dir\n'), ((13938, 13982), 'numpy.transpose', 'np.transpose', (['state.screen_buffer', '(1, 2, 0)'], {}), '(state.screen_buffer, (1, 2, 0))\n', (13950, 13982), True, 'import numpy as np\n'), ((14171, 14190), 'copy.deepcopy', 'copy.deepcopy', (['info'], {}), '(info)\n', (14184, 14190), False, 'import copy\n'), ((15447, 15475), 'numpy.transpose', 'np.transpose', (['img', '[1, 2, 0]'], {}), '(img, [1, 2, 0])\n', (15459, 15475), True, 'import numpy as np\n'), ((19985, 19996), 'time.time', 'time.time', ([], {}), '()\n', (19994, 19996), False, 'import time\n'), ((5533, 5642), 'utils.utils.log.info', 'log.info', (['"""Starting in async mode! Use this only for testing, otherwise PLAYER mode is much faster"""'], {}), "(\n 'Starting in async mode! Use this only for testing, otherwise PLAYER mode is much faster'\n )\n", (5541, 5642), False, 'from utils.utils import log, project_tmp_dir\n'), ((10258, 10288), 'os.path.exists', 'os.path.exists', (['self.record_to'], {}), '(self.record_to)\n', (10272, 10288), False, 'import os\n'), ((10306, 10333), 'os.makedirs', 'os.makedirs', (['self.record_to'], {}), '(self.record_to)\n', (10317, 10333), False, 'import os\n'), ((15702, 15739), 'cv2.resize', 'cv2.resize', (['img', '(render_w, render_h)'], {}), '(img, (render_w, render_h))\n', (15712, 15739), False, 'import cv2\n'), ((15870, 15916), 'gym.envs.classic_control.rendering.SimpleImageViewer', 'rendering.SimpleImageViewer', ([], {'maxwidth': 'render_w'}), '(maxwidth=render_w)\n', (15897, 15916), False, 'from gym.envs.classic_control import rendering\n'), ((16186, 16247), 'utils.utils.log.warning', 'log.warning', (['"""Runtime error in VizDoom game close(): %r"""', 'exc'], {}), "('Runtime error in VizDoom game close(): %r', exc)\n", (16197, 16247), False, 'from utils.utils import log, project_tmp_dir\n'), ((19638, 19723), 'pynput.keyboard.Listener', 'Listener', ([], {'on_press': 'doom._keyboard_on_press', 'on_release': 'doom._keyboard_on_release'}), '(on_press=doom._keyboard_on_press, on_release=doom._keyboard_on_release\n )\n', (19646, 19723), False, 'from pynput.keyboard import Listener\n'), ((22630, 22653), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (22651, 22653), False, 'import cv2\n'), ((7105, 7192), 'utils.utils.log.warning', 'log.warning', (['"""VizDoom game.init() threw an exception %r. Terminate process..."""', 'exc'], {}), "('VizDoom game.init() threw an exception %r. Terminate process...',\n exc)\n", (7116, 7192), False, 'from utils.utils import log, project_tmp_dir\n'), ((7271, 7289), 'envs.env_utils.EnvCriticalError', 'EnvCriticalError', ([], {}), '()\n', (7287, 7289), False, 'from envs.env_utils import EnvCriticalError\n'), ((12726, 12772), 'numpy.zeros', 'np.zeros', (['num_non_idle_actions'], {'dtype': 'np.uint8'}), '(num_non_idle_actions, dtype=np.uint8)\n', (12734, 12772), True, 'import numpy as np\n'), ((22567, 22578), 'time.time', 'time.time', ([], {}), '()\n', (22576, 22578), False, 'import time\n'), ((23256, 23267), 'time.time', 'time.time', ([], {}), '()\n', (23265, 23267), False, 'import time\n'), ((6910, 7008), 'utils.utils.log.debug', 'log.debug', (['"""Another process currently holds the lock %s, attempt: %d"""', 'lock_file', 'init_attempt'], {}), "('Another process currently holds the lock %s, attempt: %d',\n lock_file, init_attempt)\n", (6919, 7008), False, 'from utils.utils import log, project_tmp_dir\n'), ((21123, 21180), 'utils.utils.log.info', 'log.info', (['"""Reward: %.3f, total: %.3f"""', 'rew', 'new_total_rew'], {}), "('Reward: %.3f, total: %.3f', rew, new_total_rew)\n", (21131, 21180), False, 'from utils.utils import log, project_tmp_dir\n'), ((21887, 21898), 'time.time', 'time.time', ([], {}), '()\n', (21896, 21898), False, 'import time\n'), ((22158, 22181), 'numpy.swapaxes', 'np.swapaxes', (['map_', '(0)', '(2)'], {}), '(map_, 0, 2)\n', (22169, 22181), True, 'import numpy as np\n'), ((22213, 22236), 'numpy.swapaxes', 'np.swapaxes', (['map_', '(0)', '(1)'], {}), '(map_, 0, 1)\n', (22224, 22236), True, 'import numpy as np\n'), ((22261, 22303), 'cv2.imshow', 'cv2.imshow', (['"""ViZDoom Automap Buffer"""', 'map_'], {}), "('ViZDoom Automap Buffer', map_)\n", (22271, 22303), False, 'import cv2\n'), ((22505, 22526), 'time.sleep', 'time.sleep', (['time_wait'], {}), '(time_wait)\n', (22515, 22526), False, 'import time\n')]
|
from dotenv import load_dotenv, find_dotenv
import requests
import base64
import json
import os
load_dotenv(find_dotenv())
REFRESH_TOKEN = os.environ.get("REFRESH_TOKEN").strip()
CLIENT_ID = os.environ.get("CLIENT_ID").strip()
CLIENT_SECRET = os.environ.get("CLIENT_SECRET").strip()
DISCOVER_WEEKLY_ID = os.environ.get("DISCOVER_WEEKLY_ID").strip()
SAVE_TO_ID = os.environ.get("SAVE_TO_ID").strip()
OAUTH_TOKEN_URL = "https://accounts.spotify.com/api/token"
def refresh_access_token():
payload = {
"refresh_token": REFRESH_TOKEN,
"grant_type": "refresh_token",
"client_id": CLIENT_ID,
}
encoded_client = base64.b64encode((CLIENT_ID + ":" + CLIENT_SECRET).encode('ascii'))
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic %s" % encoded_client.decode('ascii')
}
response = requests.post(OAUTH_TOKEN_URL, data=payload, headers=headers)
return response.json()
def get_playlist(access_token):
url = "https://api.spotify.com/v1/playlists/%s" % DISCOVER_WEEKLY_ID
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer %s" % access_token
}
response = requests.get(url, headers=headers)
return response.json()
def add_to_playlist(access_token, tracklist):
url = "https://api.spotify.com/v1/playlists/%s/tracks" % SAVE_TO_ID
payload = {
"uris" : tracklist
}
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer %s" % access_token
}
response = requests.post(url, data=json.dumps(payload), headers=headers)
return response.json()
def main():
if REFRESH_TOKEN is None or CLIENT_ID is None or CLIENT_SECRET is None or DISCOVER_WEEKLY_ID is None or SAVE_TO_ID is None:
print("Environment variables have not been loaded!")
return
access_token = refresh_access_token()['access_token']
tracks = get_playlist(access_token)['tracks']['items']
tracklist = []
for item in tracks:
tracklist.append(item['track']['uri'])
response = add_to_playlist(access_token, tracklist)
if "snapshot_id" in response:
print("Successfully added all songs")
else:
print(response)
main()
|
[
"dotenv.find_dotenv",
"json.dumps",
"os.environ.get",
"requests.get",
"requests.post"
] |
[((109, 122), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (120, 122), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((877, 938), 'requests.post', 'requests.post', (['OAUTH_TOKEN_URL'], {'data': 'payload', 'headers': 'headers'}), '(OAUTH_TOKEN_URL, data=payload, headers=headers)\n', (890, 938), False, 'import requests\n'), ((1204, 1238), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (1216, 1238), False, 'import requests\n'), ((140, 171), 'os.environ.get', 'os.environ.get', (['"""REFRESH_TOKEN"""'], {}), "('REFRESH_TOKEN')\n", (154, 171), False, 'import os\n'), ((192, 219), 'os.environ.get', 'os.environ.get', (['"""CLIENT_ID"""'], {}), "('CLIENT_ID')\n", (206, 219), False, 'import os\n'), ((244, 275), 'os.environ.get', 'os.environ.get', (['"""CLIENT_SECRET"""'], {}), "('CLIENT_SECRET')\n", (258, 275), False, 'import os\n'), ((305, 341), 'os.environ.get', 'os.environ.get', (['"""DISCOVER_WEEKLY_ID"""'], {}), "('DISCOVER_WEEKLY_ID')\n", (319, 341), False, 'import os\n'), ((363, 391), 'os.environ.get', 'os.environ.get', (['"""SAVE_TO_ID"""'], {}), "('SAVE_TO_ID')\n", (377, 391), False, 'import os\n'), ((1589, 1608), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1599, 1608), False, 'import json\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import functools
from .backend import RedisBackend, FileBackend
from .tache import Tache
RedisCache = functools.partial(Tache, RedisBackend)
FileCache = functools.partial(Tache, FileBackend)
__all__ = ['RedisCache', 'FileCache']
|
[
"functools.partial"
] |
[((150, 188), 'functools.partial', 'functools.partial', (['Tache', 'RedisBackend'], {}), '(Tache, RedisBackend)\n', (167, 188), False, 'import functools\n'), ((201, 238), 'functools.partial', 'functools.partial', (['Tache', 'FileBackend'], {}), '(Tache, FileBackend)\n', (218, 238), False, 'import functools\n')]
|
# uma forma de criar uma home em usar app
from django.shortcuts import render
def index(request):
return render(request, 'home/index.html')
|
[
"django.shortcuts.render"
] |
[((110, 144), 'django.shortcuts.render', 'render', (['request', '"""home/index.html"""'], {}), "(request, 'home/index.html')\n", (116, 144), False, 'from django.shortcuts import render\n')]
|
import unittest
from shexer.shaper import Shaper
from test.const import G1_NT, G1, BASE_FILES, default_namespaces, G1_ALL_CLASSES_NO_COMMENTS
from test.t_utils import file_vs_str_tunned_comparison
import os.path as pth
from shexer.consts import TURTLE, NT
_BASE_DIR = BASE_FILES + "namespaces_dict" + pth.sep
class TestNamespacesDict(unittest.TestCase):
def test_same_namespaces_as_source_ttl_file(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=G1_ALL_CLASSES_NO_COMMENTS,
str_target=str_result))
def test_no_foaf(self):
namespaces = {"http://example.org/" : "ex",
"http://www.w3.org/XML/1998/namespace/" : "xml",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#" : "rdfs",
"http://www.w3.org/2001/XMLSchema#": "xsd"
# "http://xmlns.com/foaf/0.1/": "foaf"
}
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1_NT,
namespaces_dict=namespaces,
all_classes_mode=False,
input_format=NT,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "\\no_foaf.shex",
str_target=str_result))
def test_overwrite_empty(self):
namespaces = default_namespaces()
namespaces["http://unuseful.but.yet/here/"] = ""
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1,
namespaces_dict=namespaces,
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "\\overwrite_empty.shex",
str_target=str_result))
def test_overwrite_some_namespaces(self):
namespaces = {"http://example.org/": "ex",
"http://www.w3.org/XML/1998/namespace/": "xml",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://www.w3.org/2001/XMLSchema#": "xxssdd",
"http://xmlns.com/foaf/0.1/": "fooo"
}
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1,
namespaces_dict=namespaces,
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "\\overwrite.shex",
str_target=str_result))
|
[
"shexer.shaper.Shaper",
"test.const.default_namespaces",
"test.t_utils.file_vs_str_tunned_comparison"
] |
[((1564, 1789), 'shexer.shaper.Shaper', 'Shaper', ([], {'target_classes': "['http://xmlns.com/foaf/0.1/Person', 'http://xmlns.com/foaf/0.1/Document']", 'graph_file_input': 'G1_NT', 'namespaces_dict': 'namespaces', 'all_classes_mode': '(False)', 'input_format': 'NT', 'disable_comments': '(True)'}), "(target_classes=['http://xmlns.com/foaf/0.1/Person',\n 'http://xmlns.com/foaf/0.1/Document'], graph_file_input=G1_NT,\n namespaces_dict=namespaces, all_classes_mode=False, input_format=NT,\n disable_comments=True)\n", (1570, 1789), False, 'from shexer.shaper import Shaper\n'), ((2227, 2247), 'test.const.default_namespaces', 'default_namespaces', ([], {}), '()\n', (2245, 2247), False, 'from test.const import G1_NT, G1, BASE_FILES, default_namespaces, G1_ALL_CLASSES_NO_COMMENTS\n'), ((2323, 2549), 'shexer.shaper.Shaper', 'Shaper', ([], {'target_classes': "['http://xmlns.com/foaf/0.1/Person', 'http://xmlns.com/foaf/0.1/Document']", 'graph_file_input': 'G1', 'namespaces_dict': 'namespaces', 'all_classes_mode': '(False)', 'input_format': 'TURTLE', 'disable_comments': '(True)'}), "(target_classes=['http://xmlns.com/foaf/0.1/Person',\n 'http://xmlns.com/foaf/0.1/Document'], graph_file_input=G1,\n namespaces_dict=namespaces, all_classes_mode=False, input_format=TURTLE,\n disable_comments=True)\n", (2329, 2549), False, 'from shexer.shaper import Shaper\n'), ((3422, 3648), 'shexer.shaper.Shaper', 'Shaper', ([], {'target_classes': "['http://xmlns.com/foaf/0.1/Person', 'http://xmlns.com/foaf/0.1/Document']", 'graph_file_input': 'G1', 'namespaces_dict': 'namespaces', 'all_classes_mode': '(False)', 'input_format': 'TURTLE', 'disable_comments': '(True)'}), "(target_classes=['http://xmlns.com/foaf/0.1/Person',\n 'http://xmlns.com/foaf/0.1/Document'], graph_file_input=G1,\n namespaces_dict=namespaces, all_classes_mode=False, input_format=TURTLE,\n disable_comments=True)\n", (3428, 3648), False, 'from shexer.shaper import Shaper\n'), ((897, 991), 'test.t_utils.file_vs_str_tunned_comparison', 'file_vs_str_tunned_comparison', ([], {'file_path': 'G1_ALL_CLASSES_NO_COMMENTS', 'str_target': 'str_result'}), '(file_path=G1_ALL_CLASSES_NO_COMMENTS,\n str_target=str_result)\n', (926, 991), False, 'from test.t_utils import file_vs_str_tunned_comparison\n'), ((2021, 2117), 'test.t_utils.file_vs_str_tunned_comparison', 'file_vs_str_tunned_comparison', ([], {'file_path': "(_BASE_DIR + '\\\\no_foaf.shex')", 'str_target': 'str_result'}), "(file_path=_BASE_DIR + '\\\\no_foaf.shex',\n str_target=str_result)\n", (2050, 2117), False, 'from test.t_utils import file_vs_str_tunned_comparison\n'), ((2781, 2885), 'test.t_utils.file_vs_str_tunned_comparison', 'file_vs_str_tunned_comparison', ([], {'file_path': "(_BASE_DIR + '\\\\overwrite_empty.shex')", 'str_target': 'str_result'}), "(file_path=_BASE_DIR +\n '\\\\overwrite_empty.shex', str_target=str_result)\n", (2810, 2885), False, 'from test.t_utils import file_vs_str_tunned_comparison\n'), ((3880, 3978), 'test.t_utils.file_vs_str_tunned_comparison', 'file_vs_str_tunned_comparison', ([], {'file_path': "(_BASE_DIR + '\\\\overwrite.shex')", 'str_target': 'str_result'}), "(file_path=_BASE_DIR + '\\\\overwrite.shex',\n str_target=str_result)\n", (3909, 3978), False, 'from test.t_utils import file_vs_str_tunned_comparison\n'), ((652, 672), 'test.const.default_namespaces', 'default_namespaces', ([], {}), '()\n', (670, 672), False, 'from test.const import G1_NT, G1, BASE_FILES, default_namespaces, G1_ALL_CLASSES_NO_COMMENTS\n')]
|
import unittest
import tensorflow.contrib.keras as keras
import numpy as np
from vixstructure.models import term_structure_to_spread_price, term_structure_to_spread_price_v2
from vixstructure.models import term_structure_to_single_spread_price
from vixstructure.models import mask_output
from vixstructure.data import LongPricesDataset
class TestModels(unittest.TestCase):
def setUp(self):
self.dataset = LongPricesDataset("../../data/8_m_settle.csv", "../../data/expirations.csv")
def test_term_structure_to_spread_price(self):
model = term_structure_to_spread_price(5, 9)
self.assertEqual(len(model.layers), 7)
def test_mask_output_function_for_lambda_layers(self):
input = keras.layers.Input(shape=(9,))
output = keras.layers.Lambda(mask_output)(input)
model = keras.models.Model(inputs=input, outputs=output)
x, y = self.dataset.dataset()
preds = model.predict(x)
self.assertEqual(preds.shape, (2655, 6))
self.assertEqual(np.all(preds, axis=0).sum(), 5)
self.assertEqual(np.all(preds, axis=1).sum(), 2529)
self.assertEqual((preds == 0.).sum(), 126)
def test_term_structure_to_spread_prices_v2(self):
model = term_structure_to_spread_price_v2(5, 9)
x, y = self.dataset.dataset()
preds = model.predict(x)
self.assertEqual(preds.shape, (2655, 6))
self.assertEqual(np.all(preds, axis=0).sum(), 5)
self.assertEqual(np.all(preds, axis=1).sum(), 2529)
def test_term_structure_to_single_spread_price(self):
"""Just test model construction."""
model = term_structure_to_single_spread_price(5, 9)
self.assertEqual([layer.output_shape[1] for layer in model.layers], [8, 9, 9, 9, 9, 9, 1])
for distribution in (layer.kernel_initializer.distribution for layer in model.layers
if isinstance(layer, keras.layers.Dense)):
self.assertEqual(distribution, "uniform")
model_reduced_widths = term_structure_to_single_spread_price(5, 9, reduce_width=True)
self.assertEqual([layer.output_shape[1] for layer in model_reduced_widths.layers], [8, 9, 7, 6, 4, 3, 1])
for distribution in (layer.kernel_initializer.distribution for layer in model_reduced_widths.layers
if isinstance(layer, keras.layers.Dense)):
self.assertEqual(distribution, "uniform")
def test_term_structure_to_single_spread_price_with_selu(self):
model = term_structure_to_single_spread_price(5, 9, activation_function="selu")
self.assertEqual([layer.output_shape[1] for layer in model.layers], [8, 9, 9, 9, 9, 9, 1])
vars = [np.square(layer.kernel_initializer.stddev) for layer in model.layers
if isinstance(layer, keras.layers.Dense)]
self.assertAlmostEqual(1 / vars[0], 8 / 2)
for fst, snd in zip(vars[1:], [9, 9, 9, 9, 9]):
self.assertAlmostEqual(1 / fst, snd)
model_reduced_widths = term_structure_to_single_spread_price(5, 9, reduce_width=True, activation_function="selu")
self.assertEqual([layer.output_shape[1] for layer in model_reduced_widths.layers], [8, 9, 7, 6, 4, 3, 1])
vars_reduced_widths = [np.square(layer.kernel_initializer.stddev) for layer in model_reduced_widths.layers
if isinstance(layer, keras.layers.Dense)]
self.assertAlmostEqual(1 / vars[0], 8 / 2)
for fst, snd in zip(vars_reduced_widths[1:], [9, 7, 6, 4, 3]):
self.assertAlmostEqual(1 / fst, snd)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.square",
"vixstructure.models.term_structure_to_single_spread_price",
"tensorflow.contrib.keras.layers.Lambda",
"numpy.all",
"tensorflow.contrib.keras.layers.Input",
"tensorflow.contrib.keras.models.Model",
"vixstructure.models.term_structure_to_spread_price_v2",
"vixstructure.data.LongPricesDataset",
"vixstructure.models.term_structure_to_spread_price"
] |
[((3622, 3637), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3635, 3637), False, 'import unittest\n'), ((421, 497), 'vixstructure.data.LongPricesDataset', 'LongPricesDataset', (['"""../../data/8_m_settle.csv"""', '"""../../data/expirations.csv"""'], {}), "('../../data/8_m_settle.csv', '../../data/expirations.csv')\n", (438, 497), False, 'from vixstructure.data import LongPricesDataset\n'), ((566, 602), 'vixstructure.models.term_structure_to_spread_price', 'term_structure_to_spread_price', (['(5)', '(9)'], {}), '(5, 9)\n', (596, 602), False, 'from vixstructure.models import term_structure_to_spread_price, term_structure_to_spread_price_v2\n'), ((726, 756), 'tensorflow.contrib.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(9,)'}), '(shape=(9,))\n', (744, 756), True, 'import tensorflow.contrib.keras as keras\n'), ((830, 878), 'tensorflow.contrib.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input', 'outputs': 'output'}), '(inputs=input, outputs=output)\n', (848, 878), True, 'import tensorflow.contrib.keras as keras\n'), ((1239, 1278), 'vixstructure.models.term_structure_to_spread_price_v2', 'term_structure_to_spread_price_v2', (['(5)', '(9)'], {}), '(5, 9)\n', (1272, 1278), False, 'from vixstructure.models import term_structure_to_spread_price, term_structure_to_spread_price_v2\n'), ((1635, 1678), 'vixstructure.models.term_structure_to_single_spread_price', 'term_structure_to_single_spread_price', (['(5)', '(9)'], {}), '(5, 9)\n', (1672, 1678), False, 'from vixstructure.models import term_structure_to_single_spread_price\n'), ((2028, 2090), 'vixstructure.models.term_structure_to_single_spread_price', 'term_structure_to_single_spread_price', (['(5)', '(9)'], {'reduce_width': '(True)'}), '(5, 9, reduce_width=True)\n', (2065, 2090), False, 'from vixstructure.models import term_structure_to_single_spread_price\n'), ((2524, 2595), 'vixstructure.models.term_structure_to_single_spread_price', 'term_structure_to_single_spread_price', (['(5)', '(9)'], {'activation_function': '"""selu"""'}), "(5, 9, activation_function='selu')\n", (2561, 2595), False, 'from vixstructure.models import term_structure_to_single_spread_price\n'), ((3025, 3119), 'vixstructure.models.term_structure_to_single_spread_price', 'term_structure_to_single_spread_price', (['(5)', '(9)'], {'reduce_width': '(True)', 'activation_function': '"""selu"""'}), "(5, 9, reduce_width=True,\n activation_function='selu')\n", (3062, 3119), False, 'from vixstructure.models import term_structure_to_single_spread_price\n'), ((774, 806), 'tensorflow.contrib.keras.layers.Lambda', 'keras.layers.Lambda', (['mask_output'], {}), '(mask_output)\n', (793, 806), True, 'import tensorflow.contrib.keras as keras\n'), ((2711, 2753), 'numpy.square', 'np.square', (['layer.kernel_initializer.stddev'], {}), '(layer.kernel_initializer.stddev)\n', (2720, 2753), True, 'import numpy as np\n'), ((3261, 3303), 'numpy.square', 'np.square', (['layer.kernel_initializer.stddev'], {}), '(layer.kernel_initializer.stddev)\n', (3270, 3303), True, 'import numpy as np\n'), ((1024, 1045), 'numpy.all', 'np.all', (['preds'], {'axis': '(0)'}), '(preds, axis=0)\n', (1030, 1045), True, 'import numpy as np\n'), ((1081, 1102), 'numpy.all', 'np.all', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (1087, 1102), True, 'import numpy as np\n'), ((1424, 1445), 'numpy.all', 'np.all', (['preds'], {'axis': '(0)'}), '(preds, axis=0)\n', (1430, 1445), True, 'import numpy as np\n'), ((1481, 1502), 'numpy.all', 'np.all', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (1487, 1502), True, 'import numpy as np\n')]
|
import socket
def getLocalIP():
return socket.gethostbyname(socket.gethostname())
|
[
"socket.gethostname"
] |
[((66, 86), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (84, 86), False, 'import socket\n')]
|
#!/usr/bin/env python
import os
import sys
from setuptools import find_packages, setup
base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, "src")
# When executing the setup.py, we need to be able to import ourselves, this
# means that we need to add the src/ directory to the sys.path.
sys.path.insert(0, src_dir)
def read(fname):
"""read file from same path as setup.py"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setuptools_scm_template = """\
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
version = '{version}'
"""
setup(
name='python-escpos',
use_scm_version={
'write_to': 'src/escpos/version.py',
'write_to_template': setuptools_scm_template,
},
url='https://github.com/python-escpos/python-escpos',
download_url='https://github.com/python-escpos/python-escpos/archive/master.zip',
description='Python library to manipulate ESC/POS Printers',
license='MIT',
long_description=read('README.rst'),
author='<NAME> and others',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
keywords=[
'ESC/POS',
'thermoprinter',
'voucher printer',
'printing',
'receipt,',
],
platforms='any',
package_dir={"": "src"},
packages=find_packages(where="src", exclude=["tests", "tests.*"]),
package_data={'': ['COPYING', 'src/escpos/capabilities.json']},
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Office/Business :: Financial :: Point-Of-Sale',
],
install_requires=[
'pyusb>=1.0.0',
'Pillow>=2.0',
'qrcode>=4.0',
'pyserial',
'six',
'appdirs',
'PyYAML',
'argparse',
'argcomplete',
'future',
'viivakoodi>=0.8'
],
setup_requires=[
'setuptools_scm',
],
tests_require=[
'jaconv',
'tox',
'pytest!=3.2.0,!=3.3.0',
'pytest-cov',
'pytest-mock',
'nose',
'scripttest',
'mock',
'hypothesis!=3.56.9',
'flake8'
],
entry_points={
'console_scripts': [
'python-escpos = escpos.cli:main'
]
},
)
|
[
"os.path.dirname",
"os.path.join",
"sys.path.insert",
"setuptools.find_packages"
] |
[((101, 126), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (116, 126), False, 'import os\n'), ((137, 166), 'os.path.join', 'os.path.join', (['base_dir', '"""src"""'], {}), "(base_dir, 'src')\n", (149, 166), False, 'import os\n'), ((308, 335), 'sys.path.insert', 'sys.path.insert', (['(0)', 'src_dir'], {}), '(0, src_dir)\n', (323, 335), False, 'import sys\n'), ((1525, 1581), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""', 'exclude': "['tests', 'tests.*']"}), "(where='src', exclude=['tests', 'tests.*'])\n", (1538, 1581), False, 'from setuptools import find_packages, setup\n'), ((431, 456), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (446, 456), False, 'import os\n')]
|
import tensorflow as tf
import tensorflow.keras.layers as KL
from utils import conv_block
def protonet_graph(inputs, cfg):
P3, P4, P5 = inputs
# refine
R1 = conv_block(P3, 128)
R2 = conv_block(P4, 128)
R2 = KL.UpSampling2D(size=(2, 2), interpolation='bilinear', name="protonet_refine2upsampled")(R2)
R3 = conv_block(P5, 128)
R3 = KL.UpSampling2D(size=(4, 4), interpolation='bilinear', name="protonet_refine3upsampled")(R3)
bases = KL.Add(name="protonet_add")([R1, R2, R3])
# tower
for _ in range(3):
bases = conv_block(bases, 128)
bases = KL.UpSampling2D(2, interpolation='bilinear')(bases)
bases = conv_block(bases, 128)
bases = KL.Conv2D(4, kernel_size=(1, 1), strides=(1, 1), name='bases_out')(bases)
# seg_head
sem_out = P3
for _ in range(2):
sem_out = conv_block(sem_out, 128)
sem_out = KL.Conv2D(cfg.DATA.NUM_CLASSES, kernel_size=(1, 1), strides=(1, 1))(sem_out)
return bases, sem_out
def ProtoNet(inputs, cfg):
"""
"""
outputs = protonet_graph(inputs, cfg)
model = tf.keras.Model(inputs, outputs, name='protonet')
return model
|
[
"tensorflow.keras.layers.Conv2D",
"utils.conv_block",
"tensorflow.keras.Model",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.Add"
] |
[((172, 191), 'utils.conv_block', 'conv_block', (['P3', '(128)'], {}), '(P3, 128)\n', (182, 191), False, 'from utils import conv_block\n'), ((201, 220), 'utils.conv_block', 'conv_block', (['P4', '(128)'], {}), '(P4, 128)\n', (211, 220), False, 'from utils import conv_block\n'), ((332, 351), 'utils.conv_block', 'conv_block', (['P5', '(128)'], {}), '(P5, 128)\n', (342, 351), False, 'from utils import conv_block\n'), ((658, 680), 'utils.conv_block', 'conv_block', (['bases', '(128)'], {}), '(bases, 128)\n', (668, 680), False, 'from utils import conv_block\n'), ((1083, 1131), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""protonet"""'}), "(inputs, outputs, name='protonet')\n", (1097, 1131), True, 'import tensorflow as tf\n'), ((230, 323), 'tensorflow.keras.layers.UpSampling2D', 'KL.UpSampling2D', ([], {'size': '(2, 2)', 'interpolation': '"""bilinear"""', 'name': '"""protonet_refine2upsampled"""'}), "(size=(2, 2), interpolation='bilinear', name=\n 'protonet_refine2upsampled')\n", (245, 323), True, 'import tensorflow.keras.layers as KL\n'), ((361, 454), 'tensorflow.keras.layers.UpSampling2D', 'KL.UpSampling2D', ([], {'size': '(4, 4)', 'interpolation': '"""bilinear"""', 'name': '"""protonet_refine3upsampled"""'}), "(size=(4, 4), interpolation='bilinear', name=\n 'protonet_refine3upsampled')\n", (376, 454), True, 'import tensorflow.keras.layers as KL\n'), ((466, 493), 'tensorflow.keras.layers.Add', 'KL.Add', ([], {'name': '"""protonet_add"""'}), "(name='protonet_add')\n", (472, 493), True, 'import tensorflow.keras.layers as KL\n'), ((559, 581), 'utils.conv_block', 'conv_block', (['bases', '(128)'], {}), '(bases, 128)\n', (569, 581), False, 'from utils import conv_block\n'), ((594, 638), 'tensorflow.keras.layers.UpSampling2D', 'KL.UpSampling2D', (['(2)'], {'interpolation': '"""bilinear"""'}), "(2, interpolation='bilinear')\n", (609, 638), True, 'import tensorflow.keras.layers as KL\n'), ((693, 759), 'tensorflow.keras.layers.Conv2D', 'KL.Conv2D', (['(4)'], {'kernel_size': '(1, 1)', 'strides': '(1, 1)', 'name': '"""bases_out"""'}), "(4, kernel_size=(1, 1), strides=(1, 1), name='bases_out')\n", (702, 759), True, 'import tensorflow.keras.layers as KL\n'), ((840, 864), 'utils.conv_block', 'conv_block', (['sem_out', '(128)'], {}), '(sem_out, 128)\n', (850, 864), False, 'from utils import conv_block\n'), ((879, 946), 'tensorflow.keras.layers.Conv2D', 'KL.Conv2D', (['cfg.DATA.NUM_CLASSES'], {'kernel_size': '(1, 1)', 'strides': '(1, 1)'}), '(cfg.DATA.NUM_CLASSES, kernel_size=(1, 1), strides=(1, 1))\n', (888, 946), True, 'import tensorflow.keras.layers as KL\n')]
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial port support for Windows.
Requires PySerial and pywin32.
"""
# system imports
from serial import PARITY_NONE
from serial import STOPBITS_ONE
from serial import EIGHTBITS
import win32file, win32event
# twisted imports
from twisted.internet import abstract
# sibling imports
from serialport import BaseSerialPort
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""A serial device, acting as a transport, that uses a win32 event."""
connected = 1
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
stopbits = STOPBITS_ONE, xonxoff = 0, rtscts = 0):
self._serial = self._serialFactory(
deviceNameOrPortNumber, baudrate=baudrate, bytesize=bytesize,
parity=parity, stopbits=stopbits, timeout=None,
xonxoff=xonxoff, rtscts=rtscts)
self.flushInput()
self.flushOutput()
self.reactor = reactor
self.protocol = protocol
self.outQueue = []
self.closed = 0
self.closedNotifies = 0
self.writeInProgress = 0
self.protocol = protocol
self._overlappedRead = win32file.OVERLAPPED()
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32file.OVERLAPPED()
self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
self.reactor.addEvent(self._overlappedRead.hEvent, self, 'serialReadEvent')
self.reactor.addEvent(self._overlappedWrite.hEvent, self, 'serialWriteEvent')
self.protocol.makeConnection(self)
self._finishPortSetup()
def _finishPortSetup(self):
"""
Finish setting up the serial port.
This is a separate method to facilitate testing.
"""
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def serialReadEvent(self):
#get that character we set up
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 0)
if n:
first = str(self.read_buf[:n])
#now we should get everything that is already in the buffer
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
if comstat.cbInQue:
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(comstat.cbInQue),
self._overlappedRead)
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 1)
#handle all the received data:
self.protocol.dataReceived(first + str(buf[:n]))
else:
#handle all the received data:
self.protocol.dataReceived(first)
#set up next one
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def write(self, data):
if data:
if self.writeInProgress:
self.outQueue.append(data)
else:
self.writeInProgress = 1
win32file.WriteFile(self._serial.hComPort, data, self._overlappedWrite)
def serialWriteEvent(self):
try:
dataToWrite = self.outQueue.pop(0)
except IndexError:
self.writeInProgress = 0
return
else:
win32file.WriteFile(self._serial.hComPort, dataToWrite, self._overlappedWrite)
def connectionLost(self, reason):
"""
Called when the serial port disconnects.
Will call C{connectionLost} on the protocol that is handling the
serial data.
"""
self.reactor.removeEvent(self._overlappedRead.hEvent)
self.reactor.removeEvent(self._overlappedWrite.hEvent)
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
self.protocol.connectionLost(reason)
|
[
"win32event.ResetEvent",
"win32file.OVERLAPPED",
"win32file.WriteFile",
"win32event.CreateEvent",
"win32file.ClearCommError",
"twisted.internet.abstract.FileDescriptor.connectionLost",
"win32file.GetOverlappedResult",
"win32file.AllocateReadBuffer"
] |
[((1270, 1292), 'win32file.OVERLAPPED', 'win32file.OVERLAPPED', ([], {}), '()\n', (1290, 1292), False, 'import win32file, win32event\n'), ((1331, 1371), 'win32event.CreateEvent', 'win32event.CreateEvent', (['None', '(1)', '(0)', 'None'], {}), '(None, 1, 0, None)\n', (1353, 1371), False, 'import win32file, win32event\n'), ((1404, 1426), 'win32file.OVERLAPPED', 'win32file.OVERLAPPED', ([], {}), '()\n', (1424, 1426), False, 'import win32file, win32event\n'), ((1466, 1506), 'win32event.CreateEvent', 'win32event.CreateEvent', (['None', '(0)', '(0)', 'None'], {}), '(None, 0, 0, None)\n', (1488, 1506), False, 'import win32file, win32event\n'), ((1938, 1985), 'win32file.ClearCommError', 'win32file.ClearCommError', (['self._serial.hComPort'], {}), '(self._serial.hComPort)\n', (1962, 1985), False, 'import win32file, win32event\n'), ((2288, 2365), 'win32file.GetOverlappedResult', 'win32file.GetOverlappedResult', (['self._serial.hComPort', 'self._overlappedRead', '(0)'], {}), '(self._serial.hComPort, self._overlappedRead, 0)\n', (2317, 2365), False, 'import win32file, win32event\n'), ((3257, 3307), 'win32event.ResetEvent', 'win32event.ResetEvent', (['self._overlappedRead.hEvent'], {}), '(self._overlappedRead.hEvent)\n', (3278, 3307), False, 'import win32file, win32event\n'), ((4423, 4475), 'twisted.internet.abstract.FileDescriptor.connectionLost', 'abstract.FileDescriptor.connectionLost', (['self', 'reason'], {}), '(self, reason)\n', (4461, 4475), False, 'from twisted.internet import abstract\n'), ((2103, 2134), 'win32file.AllocateReadBuffer', 'win32file.AllocateReadBuffer', (['(1)'], {}), '(1)\n', (2131, 2134), False, 'import win32file, win32event\n'), ((2524, 2571), 'win32file.ClearCommError', 'win32file.ClearCommError', (['self._serial.hComPort'], {}), '(self._serial.hComPort)\n', (2548, 2571), False, 'import win32file, win32event\n'), ((3425, 3456), 'win32file.AllocateReadBuffer', 'win32file.AllocateReadBuffer', (['(1)'], {}), '(1)\n', (3453, 3456), False, 'import win32file, win32event\n'), ((4003, 4081), 'win32file.WriteFile', 'win32file.WriteFile', (['self._serial.hComPort', 'dataToWrite', 'self._overlappedWrite'], {}), '(self._serial.hComPort, dataToWrite, self._overlappedWrite)\n', (4022, 4081), False, 'import win32file, win32event\n'), ((2620, 2670), 'win32event.ResetEvent', 'win32event.ResetEvent', (['self._overlappedRead.hEvent'], {}), '(self._overlappedRead.hEvent)\n', (2641, 2670), False, 'import win32file, win32event\n'), ((2918, 2995), 'win32file.GetOverlappedResult', 'win32file.GetOverlappedResult', (['self._serial.hComPort', 'self._overlappedRead', '(1)'], {}), '(self._serial.hComPort, self._overlappedRead, 1)\n', (2947, 2995), False, 'import win32file, win32event\n'), ((3728, 3799), 'win32file.WriteFile', 'win32file.WriteFile', (['self._serial.hComPort', 'data', 'self._overlappedWrite'], {}), '(self._serial.hComPort, data, self._overlappedWrite)\n', (3747, 3799), False, 'import win32file, win32event\n'), ((2784, 2829), 'win32file.AllocateReadBuffer', 'win32file.AllocateReadBuffer', (['comstat.cbInQue'], {}), '(comstat.cbInQue)\n', (2812, 2829), False, 'import win32file, win32event\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 17:37:51 2020
@author: sawleen
"""
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
os.chdir('/Users/sawleen/Documents/Leen/Python/stock_analysis')
import data.get_yf_data as get_yf_data #Configured to the same root folder where display_webpg.py resides
import data.get_sgi_data as get_sgi_data #Configured to the same root folder where display_webpg.py resides
import data.get_morningstar_data as get_ms_data
import time
import math
class Update():
#### Get sector summaries (generate main list)
def prep_sector_summaries(self, stocks_map, stock_sectors, new_sectors, new_stocks=None):
summary_all_df = pd.DataFrame([]) # To track for all sectors
start_time = time.time()
# New entries detected
if new_sectors != 'All':
summary_all_df = pd.read_csv('data/sector_summaries/All.csv', index_col=None)
# Get all health metrics first
# Health metrics require selenium, which is prone to disconnections
health_metrics_dict_all = self.get_all_health_metrics(new_stocks)
for sector_to_update in new_sectors:
print('Sector to update: {}'.format(sector_to_update))
summary_df = pd.read_csv('data/sector_summaries/{}.csv'.format(sector_to_update), index_col=None)
for symbol in new_stocks:
# Update CSV for indiv sector
current_sector = stocks_map.loc[stocks_map['SGX_Symbol'] == symbol, ['Sector']].values[0][0]
print('Current stock sector: {}'.format(current_sector))
if current_sector == sector_to_update:
stocks_map_filtered = stocks_map.loc[stocks_map['SGX_Symbol'] == symbol, stocks_map.columns]
[summary_df, summary_all_df] = self.get_summary_df(sector_to_update, stocks_map_filtered, health_metrics_dict_all, summary_df, summary_all_df)
# Sector summary
summary_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_df.to_csv('data/sector_summaries/{}.csv'.format(sector_to_update), index=False)
summary_all_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_all_df.to_csv('data/sector_summaries/All.csv',index=False)
# No new entries but update for ALL sectors
else:
#expected_runtime = int(len(stocks_map)/60*15) # expected time to print to screen
print('Updating summary for all sectors...')
#print('Please hold on for about {}min...'.format(expected_runtime))
summary_all_df = pd.DataFrame([])
# Get all health metrics first
# Health metrics require selenium, which is prone to disconnections
symbols=stocks_map['SGX_Symbol']
health_metrics_dict_all = self.get_all_health_metrics(symbols)
for sector in stock_sectors:
summary_df = pd.DataFrame([])
if sector!= 'All':
stocks_map_filtered = stocks_map.loc[stocks_map['Sector'] == sector, stocks_map.columns]
[summary_df, summary_all_df] = self.get_summary_df(sector, stocks_map_filtered, health_metrics_dict_all, summary_df, summary_all_df)
# Sector summary
summary_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_df.to_csv('data/sector_summaries/{}.csv'.format(sector), index=False)
# All stocks summary
print('Sorting sector summary for ALL stocks...')
summary_all_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_all_df.to_csv('data/sector_summaries/All.csv', index=False)
total_time = round((time.time() - start_time)/60,2)
print('Total time taken: {}'.format(total_time))
#### End of prep_sector_summaries
def get_summary_df(self, sector_to_update, stocks_map_filtered, health_metrics_dict_all, summary_df, summary_all_df):
print('Prepping sector summary for {}...'.format(sector_to_update))
for sgx_symbol in stocks_map_filtered['SGX_Symbol']:
print('{}...'.format(sgx_symbol))
yf_data = get_yf_data.Data(sgx_symbol)
industry = yf_data.get_industry()
stats = yf_data.get_basic_stats()
[inc_yoy_avg_growth_df, inc_yrly_growth_df] = yf_data.process_inc_statement()
dividends_df = yf_data.get_dividends()
try:
div_fwd = dividends_df.loc[dividends_df['Dividend Type']=='Forward',['Values']].values[0][0]
except:
print('! Warning: No forward dividend data fetched for {}'.format(sgx_symbol))
div_fwd = math.nan
short_name = yf_data.get_name_short()
disp_name = yf_data.get_name_disp()
if '.SI' in sgx_symbol and type(short_name)==str:
sgi_data = get_sgi_data.Data(sgx_symbol, short_name)
url_tprice = sgi_data.get_sginvestor_url(sgx_symbol, short_name, industry)
#print(url_tprice)
soup_tprice = sgi_data.get_soup_tprice(url_tprice)
tpcalls = sgi_data.get_tpcalls(soup_tprice)
tpcalls_df = sgi_data.get_tpcalls_df(tpcalls)
strategies_summary = sgi_data.get_strategies_summary(tpcalls_df)
else: # create empty dataframe
strategies_summary = pd.DataFrame(index=[0],columns=['Strategy','Tally(%)','Tally'])
health_metrics = health_metrics_dict_all[sgx_symbol]
info={'Name':disp_name,
'Symbol':sgx_symbol,
'Market Cap (bil)':stats['Market Cap (bil)'],
'PB Ratio': stats['PB Ratio'],
'PE Ratio': stats['PE Ratio'],
'Dividend Payout Ratio': stats['Dividend Payout Ratio'],
'Income Growth (Avg YoY)':inc_yoy_avg_growth_df['Income'].values[0],
'ROE': stats['% Return on Equity'],
'Dividend (fwd)': div_fwd,
'Strategy': strategies_summary.at[0,'Strategy'],
'Tally(%)': strategies_summary.at[0,'Tally(%)'],
'Tally': strategies_summary.at[0,'Tally'],
'Price/Cash Flow':health_metrics['Price/Cash Flow'],
'Debt/Equity':health_metrics['Debt/Equity'],
'Interest Coverage':health_metrics['Interest Coverage']}
# Stock summary
info_df = pd.DataFrame.from_dict(info, orient='columns')
# Sector summary
if summary_df.empty:
summary_df = info_df
else:
summary_df = pd.concat([summary_df, info_df])
# All sector summary
if summary_all_df.empty:
summary_all_df = info_df
else:
summary_all_df = pd.concat([summary_all_df, info_df])
return [summary_df, summary_all_df]
def get_all_health_metrics(self, symbols):
print('Do you want to read from pre-generated health metrics?')
user_pref = input()
print('... Getting health metrics...')
# Read from stored data if user wants to save time
if 'y' in user_pref.lower():
print('...from CSV...')
health_metrics_all_df = pd.read_csv('data/health_metrics_all_df.csv',index_col='symbol')
health_metrics_dict_all = health_metrics_all_df.to_dict('index')
else:
# Initialize driver
driver_options = Options()
driver_options.add_argument("--headless") #for chromedriver to work remotely
chromedriver_path = '/usr/local/bin/chromedriver'
driver = webdriver.Chrome(chromedriver_path,options=driver_options)
# Get health metrics
health_metrics_dict_all={}
for sgx_symbol in symbols:
print('...{}...'.format(sgx_symbol))
health_metrics_dict = get_ms_data.Data().get_health_metrics_dict(sgx_symbol, driver)
health_metrics_dict_all[sgx_symbol] = health_metrics_dict
# Close driver
driver.quit()
print('... Metrics stored...')
print(health_metrics_dict_all)
# Option to save to CSV if user wants
print('... Do you want to save to local disk?')
save_health_metrics = input()
if 'y' in save_health_metrics.lower():
#print(health_metrics_dict_all)
# Write to CSV in case want to refer in future
health_metrics_dict_df = pd.DataFrame.from_dict(health_metrics_dict_all).T
health_metrics_dict_df.index.rename('symbol',inplace=True)
#health_metrics_dict_df.reset_index(inplace=True)
saved_health_metrics = pd.read_csv('data/health_metrics_all_df.csv', index_col=['symbol'])
for sgx_symbol in symbols:
print(sgx_symbol)
# Add to saved list if not already inside
if not sgx_symbol in saved_health_metrics.index:
health_metric_symbol = health_metrics_dict_df[health_metrics_dict_df.index==sgx_symbol]
saved_health_metrics = pd.concat([saved_health_metrics, health_metric_symbol])
# Update list if already inside
else:
saved_health_metrics[saved_health_metrics.index==sgx_symbol] = health_metrics_dict_df[health_metrics_dict_df.index==sgx_symbol]
#health_metrics_dict_df.sort_index(inplace=True)
#health_metrics_dict_df.to_csv('data/health_metrics_all_df.csv')
saved_health_metrics.sort_index(inplace=True)
saved_health_metrics.to_csv('data/health_metrics_all_df.csv')
return health_metrics_dict_all
|
[
"pandas.DataFrame",
"selenium.webdriver.chrome.options.Options",
"pandas.DataFrame.from_dict",
"pandas.read_csv",
"data.get_yf_data.Data",
"time.time",
"data.get_sgi_data.Data",
"selenium.webdriver.Chrome",
"data.get_morningstar_data.Data",
"pandas.concat",
"os.chdir"
] |
[((224, 287), 'os.chdir', 'os.chdir', (['"""/Users/sawleen/Documents/Leen/Python/stock_analysis"""'], {}), "('/Users/sawleen/Documents/Leen/Python/stock_analysis')\n", (232, 287), False, 'import os\n'), ((761, 777), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (773, 777), True, 'import pandas as pd\n'), ((826, 837), 'time.time', 'time.time', ([], {}), '()\n', (835, 837), False, 'import time\n'), ((932, 992), 'pandas.read_csv', 'pd.read_csv', (['"""data/sector_summaries/All.csv"""'], {'index_col': 'None'}), "('data/sector_summaries/All.csv', index_col=None)\n", (943, 992), True, 'import pandas as pd\n'), ((2906, 2922), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (2918, 2922), True, 'import pandas as pd\n'), ((4658, 4686), 'data.get_yf_data.Data', 'get_yf_data.Data', (['sgx_symbol'], {}), '(sgx_symbol)\n', (4674, 4686), True, 'import data.get_yf_data as get_yf_data\n'), ((6985, 7031), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['info'], {'orient': '"""columns"""'}), "(info, orient='columns')\n", (7007, 7031), True, 'import pandas as pd\n'), ((7819, 7884), 'pandas.read_csv', 'pd.read_csv', (['"""data/health_metrics_all_df.csv"""'], {'index_col': '"""symbol"""'}), "('data/health_metrics_all_df.csv', index_col='symbol')\n", (7830, 7884), True, 'import pandas as pd\n'), ((8037, 8046), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (8044, 8046), False, 'from selenium.webdriver.chrome.options import Options\n'), ((8219, 8278), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['chromedriver_path'], {'options': 'driver_options'}), '(chromedriver_path, options=driver_options)\n', (8235, 8278), False, 'from selenium import webdriver\n'), ((3238, 3254), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (3250, 3254), True, 'import pandas as pd\n'), ((5384, 5425), 'data.get_sgi_data.Data', 'get_sgi_data.Data', (['sgx_symbol', 'short_name'], {}), '(sgx_symbol, short_name)\n', (5401, 5425), True, 'import data.get_sgi_data as get_sgi_data\n'), ((5902, 5968), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[0]', 'columns': "['Strategy', 'Tally(%)', 'Tally']"}), "(index=[0], columns=['Strategy', 'Tally(%)', 'Tally'])\n", (5914, 5968), True, 'import pandas as pd\n'), ((7178, 7210), 'pandas.concat', 'pd.concat', (['[summary_df, info_df]'], {}), '([summary_df, info_df])\n', (7187, 7210), True, 'import pandas as pd\n'), ((7373, 7409), 'pandas.concat', 'pd.concat', (['[summary_all_df, info_df]'], {}), '([summary_all_df, info_df])\n', (7382, 7409), True, 'import pandas as pd\n'), ((9343, 9410), 'pandas.read_csv', 'pd.read_csv', (['"""data/health_metrics_all_df.csv"""'], {'index_col': "['symbol']"}), "('data/health_metrics_all_df.csv', index_col=['symbol'])\n", (9354, 9410), True, 'import pandas as pd\n'), ((4202, 4213), 'time.time', 'time.time', ([], {}), '()\n', (4211, 4213), False, 'import time\n'), ((9112, 9159), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['health_metrics_dict_all'], {}), '(health_metrics_dict_all)\n', (9134, 9159), True, 'import pandas as pd\n'), ((8480, 8498), 'data.get_morningstar_data.Data', 'get_ms_data.Data', ([], {}), '()\n', (8496, 8498), True, 'import data.get_morningstar_data as get_ms_data\n'), ((9782, 9837), 'pandas.concat', 'pd.concat', (['[saved_health_metrics, health_metric_symbol]'], {}), '([saved_health_metrics, health_metric_symbol])\n', (9791, 9837), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Methods to validate the database integrity and fix violations."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
WARNING_BORDER = '*' * 120
def write_database_integrity_violation(results, headers, reason_message, action_message=None):
"""Emit a integrity violation warning and write the violating records to a log file in the current directory
:param results: a list of tuples representing the violating records
:param headers: a tuple of strings that will be used as a header for the log file. Should have the same length
as each tuple in the results list.
:param reason_message: a human readable message detailing the reason of the integrity violation
:param action_message: an optional human readable message detailing a performed action, if any
"""
# pylint: disable=duplicate-string-formatting-argument
from datetime import datetime
from tabulate import tabulate
from tempfile import NamedTemporaryFile
from aiida.cmdline.utils import echo
from aiida.manage import configuration
if configuration.PROFILE.is_test_profile:
return
if action_message is None:
action_message = 'nothing'
with NamedTemporaryFile(prefix='migration-', suffix='.log', dir='.', delete=False, mode='w+') as handle:
echo.echo('')
echo.echo_warning(
'\n{}\nFound one or multiple records that violate the integrity of the database\nViolation reason: {}\n'
'Performed action: {}\nViolators written to: {}\n{}\n'.format(
WARNING_BORDER, reason_message, action_message, handle.name, WARNING_BORDER
)
)
handle.write('# {}\n'.format(datetime.utcnow().isoformat()))
handle.write('# Violation reason: {}\n'.format(reason_message))
handle.write('# Performed action: {}\n'.format(action_message))
handle.write('\n')
handle.write(tabulate(results, headers))
|
[
"tempfile.NamedTemporaryFile",
"tabulate.tabulate",
"aiida.cmdline.utils.echo.echo",
"datetime.datetime.utcnow"
] |
[((1913, 2006), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'prefix': '"""migration-"""', 'suffix': '""".log"""', 'dir': '"""."""', 'delete': '(False)', 'mode': '"""w+"""'}), "(prefix='migration-', suffix='.log', dir='.', delete=\n False, mode='w+')\n", (1931, 2006), False, 'from tempfile import NamedTemporaryFile\n'), ((2021, 2034), 'aiida.cmdline.utils.echo.echo', 'echo.echo', (['""""""'], {}), "('')\n", (2030, 2034), False, 'from aiida.cmdline.utils import echo\n'), ((2632, 2658), 'tabulate.tabulate', 'tabulate', (['results', 'headers'], {}), '(results, headers)\n', (2640, 2658), False, 'from tabulate import tabulate\n'), ((2408, 2425), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2423, 2425), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env python
fork_pushback_block_count = {
'Byzantium': 3000000,
'Constantinople': 5000000,
'MuirGlacier': 9000000,
'London': 9700000,
'ArrowGlacier': 10700000
}
emptyOmmersHash = "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
nonEmptyOmmersHash = "0x2dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
def adj_factor(parent_uncles, delta_timestamp):
return max((2 if parent_uncles else 1) - (delta_timestamp // 9), -99)
def periodCount(block_delta):
return block_delta // 100000
def calcBomb(block_delta):
return 0 if periodCount(block_delta) <= 0 else int(2 ** (periodCount(block_delta) - 2))
BLOCK_DIFF_FACTOR = 2048
def diff_minus_bomb(parent_difficulty, adjustment_factor):
return (parent_difficulty +
(parent_difficulty // BLOCK_DIFF_FACTOR) * adjustment_factor )
def diff_minus_bomb_timestamp(parent_difficulty, parent_uncles, delta_timestamp):
return diff_minus_bomb(parent_difficulty, adj_factor(parent_uncles, delta_timestamp))
def calc_new_difficulty(parent_difficulty, parent_uncles, delta_timestamp, block_delta):
return diff_minus_bomb_timestamp(parent_difficulty, parent_uncles, delta_timestamp) + calcBomb(block_delta)
def unclesTimestampFromAdjustmentFactor(step, adjFactor):
if adjFactor > 2 or adjFactor < -99:
raise Exception('Invalid adjustment factor')
timestampDelta = None
uncles = None
if adjFactor <= 0:
step = step % 18 # For each adjustment Factor, there are 18 uncles/timestampDelta combinations that produce a certain adjustment factor
timestampDelta = (((adjFactor * -1) + 1) * 9) + step # the range is 9 - 26, for adjFactor 0, 18 - 35 for adjFactor -1 and so on...
uncles = (step >= 9) # The lower half of timestamp delta range (0 - 9) is satisfied without the uncles, the upper half requires uncles to have the same adjFactor
elif adjFactor == 1: # For adjustment factor of 1, there are only 17 combinations (zero is excluded)
timestampDelta = (step % 17) + 1
uncles = timestampDelta >= 9
elif adjFactor == 2: # For adjustment factor of 1, there are only 8 combinations (zero is excluded)
uncles = True # For adjFactor two, uncles are always necessary
timestampDelta = (step % 8) + 1
return uncles, timestampDelta
def adjustmentFactorFromStep(step, adjFactorShares):
adjFactorShares = adjFactorShares[1:-1].split(';')
adjFactorShares = [tuple([int(y) for y in x.split(',')]) for x in adjFactorShares]
totalShares = sum([x[1] for x in adjFactorShares])
step = step % totalShares
currentTotal = 0
for adjFactor in adjFactorShares:
currentTotal += adjFactor[1]
if step < currentTotal:
return adjFactor[0]
return None
def unclesTimestampFromAdjustmentFactorShares(step, adjFactorShares):
nextAdjFactor = adjustmentFactorFromStep(step, adjFactorShares)
uncles, timestampDelta = unclesTimestampFromAdjustmentFactor(step, nextAdjFactor)
return uncles, timestampDelta
def fillTestCase(testCase):
parentTimestamp = testCase["startTimestamp"]
parentDifficulty = testCase["startDifficutly"]
retDict = {}
parentBlock = None
tc_count = 1
if "comment" in testCase:
retDict["_info"] = {"comment": testCase["comment"]}
for br in testCase["blockRanges"]:
startBlock = br["start"]
endBlock = br["end"]
stepBlocks = br["step"]
if startBlock > endBlock:
raise Exception('invalid start-end range')
currentFork = br["network"]
if currentFork not in fork_pushback_block_count:
raise Exception("Invalid fork")
currentPushbackBlocks = fork_pushback_block_count[currentFork]
if not parentBlock:
parentBlock = startBlock - 1
while parentBlock < startBlock - 1:
blockNum = parentBlock + 1
block_delta = blockNum - currentPushbackBlocks
uncles, timestampDelta = unclesTimestampFromAdjustmentFactorShares(blockNum, testCase["adjustmentFactor"])
parentDifficulty = hex(calc_new_difficulty(int(parentDifficulty, 16), uncles, timestampDelta, block_delta))
parentTimestamp = hex(int(parentTimestamp, 16) + timestampDelta)
parentBlock += 1
for blockNum in range(startBlock, endBlock + 1):
if parentBlock > blockNum:
raise Exception('invalid range')
block_delta = blockNum - currentPushbackBlocks
uncles, timestampDelta = unclesTimestampFromAdjustmentFactorShares(blockNum, testCase["adjustmentFactor"])
currentDifficulty = hex(calc_new_difficulty(int(parentDifficulty, 16), uncles, timestampDelta, block_delta))
currentTimestamp = hex(int(parentTimestamp, 16) + timestampDelta)
if ((blockNum - startBlock) % stepBlocks) == 0:
new_tc = {
"network": currentFork,
"currentBlockNumber": hex(blockNum),
"currentTimestamp": currentTimestamp,
"currentDifficulty": currentDifficulty,
"parentTimestamp" : str(parentTimestamp),
"parentDifficulty" : parentDifficulty,
"parentUncles": "0x01" if uncles else "0x00"
}
retDict["DifficultyTest{}".format(tc_count)] = new_tc
tc_count += 1
parentDifficulty = currentDifficulty
parentTimestamp = currentTimestamp
parentBlock = blockNum
return retDict
if __name__ == "__main__":
import sys
import json
from pprint import pprint
if len(sys.argv) != 3:
raise Exception('Incorrect number of arguments')
(_, tc_path, test_name) = sys.argv
with open(tc_path, 'r') as f:
tc = json.load(f)
print(json.dumps({"test_name": fillTestCase(tc)}, indent=4))
|
[
"json.load"
] |
[((5955, 5967), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5964, 5967), False, 'import json\n')]
|
from django.contrib import admin
from .models import Priority
admin.site.register(Priority)
|
[
"django.contrib.admin.site.register"
] |
[((64, 93), 'django.contrib.admin.site.register', 'admin.site.register', (['Priority'], {}), '(Priority)\n', (83, 93), False, 'from django.contrib import admin\n')]
|
import os
import sys
from types import ModuleType
import unittest
import warnings
from django.conf import LazySettings, Settings, settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import (SimpleTestCase, TransactionTestCase, TestCase,
modify_settings, override_settings, signals)
from django.utils import six
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3],
TEST='override', TEST_OUTER='outer')
class FullyDecoratedTranTestCase(TransactionTestCase):
available_apps = []
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
self.assertEqual(settings.TEST, 'override')
self.assertEqual(settings.TEST_OUTER, 'outer')
@modify_settings(ITEMS={
'append': ['e', 'f'],
'prepend': ['a'],
'remove': ['d', 'c'],
})
def test_method_list_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'e', 'f'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
@modify_settings(ITEMS={
'append': ['b'],
'prepend': ['d'],
'remove': ['a', 'c', 'e'],
})
def test_method_list_override_no_ops(self):
self.assertListEqual(settings.ITEMS, ['b', 'd'])
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
def test_method_list_override_strings(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
@modify_settings(ITEMS={'remove': ['b', 'd']})
@modify_settings(ITEMS={'append': ['b'], 'prepend': ['d']})
def test_method_list_override_nested_order(self):
self.assertListEqual(settings.ITEMS, ['d', 'c', 'b'])
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
self.assertEqual(settings.TEST_OUTER, 'outer')
def test_decorated_testcase_name(self):
self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase')
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertEqual(settings.TEST, 'override')
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
@override_settings(TEST='override2')
def test_method_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
self.assertEqual(settings.TEST, 'override2')
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
@override_settings(TEST='override')
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
def test_override(self):
self.assertEqual(settings.TEST, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
try:
super(ClassDecoratedTestCase, self).test_max_recursion_error()
except RuntimeError:
self.fail()
@modify_settings(ITEMS={'append': 'mother'})
@override_settings(ITEMS=['father'], TEST='override-parent')
class ParentDecoratedTestCase(TestCase):
pass
@modify_settings(ITEMS={'append': ['child']})
@override_settings(TEST='override-child')
class ChildDecoratedTestCase(ParentDecoratedTestCase):
def test_override_settings_inheritance(self):
self.assertEqual(settings.ITEMS, ['father', 'mother', 'child'])
self.assertEqual(settings.TEST, 'override-child')
class SettingsTests(TestCase):
def setUp(self):
self.testvalue = None
signals.setting_changed.connect(self.signal_callback)
def tearDown(self):
signals.setting_changed.disconnect(self.signal_callback)
def signal_callback(self, sender, setting, value, **kwargs):
if setting == 'TEST':
self.testvalue = value
def test_override(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test2'
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test'
self.assertRaises(AttributeError, getattr, settings, 'TEST')
@override_settings(TEST='override')
def test_decorator(self):
self.assertEqual('override', settings.TEST)
def test_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override = override_settings(TEST='override')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override.enable()
self.assertEqual('override', settings.TEST)
override.disable()
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_class_decorator(self):
# SimpleTestCase can be decorated by override_settings, but not ut.TestCase
class SimpleTestCaseSubclass(SimpleTestCase):
pass
class UnittestTestCaseSubclass(unittest.TestCase):
pass
decorated = override_settings(TEST='override')(SimpleTestCaseSubclass)
self.assertIsInstance(decorated, type)
self.assertTrue(issubclass(decorated, SimpleTestCase))
with six.assertRaisesRegex(self, Exception,
"Only subclasses of Django SimpleTestCase*"):
decorated = override_settings(TEST='override')(UnittestTestCaseSubclass)
def test_signal_callback_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual(self.testvalue, 'override')
self.assertEqual(self.testvalue, None)
@override_settings(TEST='override')
def test_signal_callback_decorator(self):
self.assertEqual(self.testvalue, 'override')
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_settings_delete_wrapped(self):
self.assertRaises(TypeError, delattr, settings, '_wrapped')
def test_override_settings_delete(self):
"""
Allow deletion of a setting in an overridden settings set (#18824)
"""
previous_i18n = settings.USE_I18N
previous_l10n = settings.USE_L10N
with self.settings(USE_I18N=False):
del settings.USE_I18N
self.assertRaises(AttributeError, getattr, settings, 'USE_I18N')
# Should also work for a non-overridden setting
del settings.USE_L10N
self.assertRaises(AttributeError, getattr, settings, 'USE_L10N')
self.assertEqual(settings.USE_I18N, previous_i18n)
self.assertEqual(settings.USE_L10N, previous_l10n)
def test_override_settings_nested(self):
"""
Test that override_settings uses the actual _wrapped attribute at
runtime, not when it was instantiated.
"""
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
inner = override_settings(TEST2='override')
with override_settings(TEST='override'):
self.assertEqual('override', settings.TEST)
with inner:
self.assertEqual('override', settings.TEST)
self.assertEqual('override', settings.TEST2)
# inner's __exit__ should have restored the settings of the outer
# context manager, not those when the class was instantiated
self.assertEqual('override', settings.TEST)
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
def test_allowed_include_roots_string(self):
"""
ALLOWED_INCLUDE_ROOTS is not allowed to be incorrectly set to a string
rather than a tuple.
"""
self.assertRaises(ValueError, setattr, settings,
'ALLOWED_INCLUDE_ROOTS', '/var/www/ssi/')
class TestComplexSettingOverride(TestCase):
def setUp(self):
self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy()
signals.COMPLEX_OVERRIDE_SETTINGS.add('TEST_WARN')
def tearDown(self):
signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings
self.assertFalse('TEST_WARN' in signals.COMPLEX_OVERRIDE_SETTINGS)
def test_complex_override_warning(self):
"""Regression test for #19031"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with override_settings(TEST_WARN='override'):
self.assertEqual(settings.TEST_WARN, 'override')
self.assertEqual(len(w), 1)
# File extension may by .py, .pyc, etc. Compare only basename.
self.assertEqual(os.path.splitext(w[0].filename)[0],
os.path.splitext(__file__)[0])
self.assertEqual(str(w[0].message),
'Overriding setting TEST_WARN can lead to unexpected behavior.')
class TrailingSlashURLTests(TestCase):
"""
Tests for the MEDIA_URL and STATIC_URL settings.
They must end with a slash to ensure there's a deterministic way to build
paths in templates.
"""
settings_module = settings
def setUp(self):
self._original_media_url = self.settings_module.MEDIA_URL
self._original_static_url = self.settings_module.STATIC_URL
def tearDown(self):
self.settings_module.MEDIA_URL = self._original_media_url
self.settings_module.STATIC_URL = self._original_static_url
def test_blank(self):
"""
The empty string is accepted, even though it doesn't end in a slash.
"""
self.settings_module.MEDIA_URL = ''
self.assertEqual('', self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = ''
self.assertEqual('', self.settings_module.STATIC_URL)
def test_end_slash(self):
"""
It works if the value ends in a slash.
"""
self.settings_module.MEDIA_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/'
self.assertEqual('http://media.foo.com/',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/'
self.assertEqual('http://static.foo.com/',
self.settings_module.STATIC_URL)
def test_no_end_slash(self):
"""
An ImproperlyConfigured exception is raised if the value doesn't end
in a slash.
"""
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = 'http://media.foo.com'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = 'http://static.foo.com'
def test_double_slash(self):
"""
If the value ends in more than one slash, presume they know what
they're doing.
"""
self.settings_module.MEDIA_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/stupid//'
self.assertEqual('http://media.foo.com/stupid//',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/stupid//'
self.assertEqual('http://static.foo.com/stupid//',
self.settings_module.STATIC_URL)
class SecureProxySslHeaderTest(TestCase):
settings_module = settings
def setUp(self):
self._original_setting = self.settings_module.SECURE_PROXY_SSL_HEADER
def tearDown(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = self._original_setting
def test_none(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = None
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_without_xheader(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_wrong(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue'
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_right(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https'
self.assertEqual(req.is_secure(), True)
class IsOverriddenTest(TestCase):
def test_configure(self):
s = LazySettings()
s.configure(SECRET_KEY='foo')
self.assertTrue(s.is_overridden('SECRET_KEY'))
def test_module(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
sys.modules['fake_settings_module'] = settings_module
try:
s = Settings('fake_settings_module')
self.assertTrue(s.is_overridden('SECRET_KEY'))
self.assertFalse(s.is_overridden('TEMPLATE_LOADERS'))
finally:
del sys.modules['fake_settings_module']
def test_override(self):
self.assertFalse(settings.is_overridden('TEMPLATE_LOADERS'))
with override_settings(TEMPLATE_LOADERS=[]):
self.assertTrue(settings.is_overridden('TEMPLATE_LOADERS'))
|
[
"django.test.signals.COMPLEX_OVERRIDE_SETTINGS.add",
"warnings.simplefilter",
"django.test.signals.COMPLEX_OVERRIDE_SETTINGS.copy",
"django.test.modify_settings",
"django.conf.Settings",
"django.test.signals.setting_changed.connect",
"django.http.HttpRequest",
"django.conf.settings.is_overridden",
"types.ModuleType",
"warnings.catch_warnings",
"django.utils.six.assertRaisesRegex",
"django.conf.LazySettings",
"os.path.splitext",
"django.test.signals.setting_changed.disconnect",
"django.test.override_settings"
] |
[((385, 470), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'prepend': ['b'], 'append': ['d'], 'remove': ['a', 'e']}"}), "(ITEMS={'prepend': ['b'], 'append': ['d'], 'remove': ['a', 'e']}\n )\n", (400, 470), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((481, 586), 'django.test.override_settings', 'override_settings', ([], {'ITEMS': "['a', 'c', 'e']", 'ITEMS_OUTER': '[1, 2, 3]', 'TEST': '"""override"""', 'TEST_OUTER': '"""outer"""'}), "(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3], TEST=\n 'override', TEST_OUTER='outer')\n", (498, 586), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((2365, 2450), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'prepend': ['b'], 'append': ['d'], 'remove': ['a', 'e']}"}), "(ITEMS={'prepend': ['b'], 'append': ['d'], 'remove': ['a', 'e']}\n )\n", (2380, 2450), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((2461, 2518), 'django.test.override_settings', 'override_settings', ([], {'ITEMS': "['a', 'c', 'e']", 'TEST': '"""override"""'}), "(ITEMS=['a', 'c', 'e'], TEST='override')\n", (2478, 2518), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((3225, 3259), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override"""'}), "(TEST='override')\n", (3242, 3259), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((3893, 3936), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'append': 'mother'}"}), "(ITEMS={'append': 'mother'})\n", (3908, 3936), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((3938, 3997), 'django.test.override_settings', 'override_settings', ([], {'ITEMS': "['father']", 'TEST': '"""override-parent"""'}), "(ITEMS=['father'], TEST='override-parent')\n", (3955, 3997), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((4051, 4095), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'append': ['child']}"}), "(ITEMS={'append': ['child']})\n", (4066, 4095), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((4097, 4137), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override-child"""'}), "(TEST='override-child')\n", (4114, 4137), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((948, 1038), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'append': ['e', 'f'], 'prepend': ['a'], 'remove': ['d', 'c']}"}), "(ITEMS={'append': ['e', 'f'], 'prepend': ['a'], 'remove': [\n 'd', 'c']})\n", (963, 1038), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((1241, 1330), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'append': ['b'], 'prepend': ['d'], 'remove': ['a', 'c', 'e']}"}), "(ITEMS={'append': ['b'], 'prepend': ['d'], 'remove': ['a',\n 'c', 'e']})\n", (1256, 1330), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((1469, 1538), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'append': 'e', 'prepend': 'a', 'remove': 'c'}"}), "(ITEMS={'append': 'e', 'prepend': 'a', 'remove': 'c'})\n", (1484, 1538), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((1692, 1737), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'remove': ['b', 'd']}"}), "(ITEMS={'remove': ['b', 'd']})\n", (1707, 1737), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((1743, 1801), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'append': ['b'], 'prepend': ['d']}"}), "(ITEMS={'append': ['b'], 'prepend': ['d']})\n", (1758, 1801), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((1924, 1959), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override2"""'}), "(TEST='override2')\n", (1941, 1959), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((2709, 2778), 'django.test.modify_settings', 'modify_settings', ([], {'ITEMS': "{'append': 'e', 'prepend': 'a', 'remove': 'c'}"}), "(ITEMS={'append': 'e', 'prepend': 'a', 'remove': 'c'})\n", (2724, 2778), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((2815, 2850), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override2"""'}), "(TEST='override2')\n", (2832, 2850), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((3406, 3441), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override2"""'}), "(TEST='override2')\n", (3423, 3441), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((5673, 5707), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override"""'}), "(TEST='override')\n", (5690, 5707), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((7129, 7163), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override"""'}), "(TEST='override')\n", (7146, 7163), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((4465, 4518), 'django.test.signals.setting_changed.connect', 'signals.setting_changed.connect', (['self.signal_callback'], {}), '(self.signal_callback)\n', (4496, 4518), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((4552, 4608), 'django.test.signals.setting_changed.disconnect', 'signals.setting_changed.disconnect', (['self.signal_callback'], {}), '(self.signal_callback)\n', (4586, 4608), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((5915, 5949), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override"""'}), "(TEST='override')\n", (5932, 5949), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((8675, 8710), 'django.test.override_settings', 'override_settings', ([], {'TEST2': '"""override"""'}), "(TEST2='override')\n", (8692, 8710), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((9784, 9824), 'django.test.signals.COMPLEX_OVERRIDE_SETTINGS.copy', 'signals.COMPLEX_OVERRIDE_SETTINGS.copy', ([], {}), '()\n', (9822, 9824), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((9833, 9883), 'django.test.signals.COMPLEX_OVERRIDE_SETTINGS.add', 'signals.COMPLEX_OVERRIDE_SETTINGS.add', (['"""TEST_WARN"""'], {}), "('TEST_WARN')\n", (9870, 9883), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((14106, 14119), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (14117, 14119), False, 'from django.http import HttpRequest\n'), ((14318, 14331), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (14329, 14331), False, 'from django.http import HttpRequest\n'), ((14533, 14546), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (14544, 14546), False, 'from django.http import HttpRequest\n'), ((14809, 14822), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (14820, 14822), False, 'from django.http import HttpRequest\n'), ((15005, 15019), 'django.conf.LazySettings', 'LazySettings', ([], {}), '()\n', (15017, 15019), False, 'from django.conf import LazySettings, Settings, settings\n'), ((15168, 15202), 'types.ModuleType', 'ModuleType', (['"""fake_settings_module"""'], {}), "('fake_settings_module')\n", (15178, 15202), False, 'from types import ModuleType\n'), ((6483, 6517), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override"""'}), "(TEST='override')\n", (6500, 6517), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((6666, 6753), 'django.utils.six.assertRaisesRegex', 'six.assertRaisesRegex', (['self', 'Exception', '"""Only subclasses of Django SimpleTestCase*"""'], {}), "(self, Exception,\n 'Only subclasses of Django SimpleTestCase*')\n", (6687, 6753), False, 'from django.utils import six\n'), ((8724, 8758), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override"""'}), "(TEST='override')\n", (8741, 8758), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((10160, 10196), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (10183, 10196), False, 'import warnings\n'), ((10215, 10246), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (10236, 10246), False, 'import warnings\n'), ((15337, 15369), 'django.conf.Settings', 'Settings', (['"""fake_settings_module"""'], {}), "('fake_settings_module')\n", (15345, 15369), False, 'from django.conf import LazySettings, Settings, settings\n'), ((15620, 15662), 'django.conf.settings.is_overridden', 'settings.is_overridden', (['"""TEMPLATE_LOADERS"""'], {}), "('TEMPLATE_LOADERS')\n", (15642, 15662), False, 'from django.conf import LazySettings, Settings, settings\n'), ((15677, 15715), 'django.test.override_settings', 'override_settings', ([], {'TEMPLATE_LOADERS': '[]'}), '(TEMPLATE_LOADERS=[])\n', (15694, 15715), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((6791, 6825), 'django.test.override_settings', 'override_settings', ([], {'TEST': '"""override"""'}), "(TEST='override')\n", (6808, 6825), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((10265, 10304), 'django.test.override_settings', 'override_settings', ([], {'TEST_WARN': '"""override"""'}), "(TEST_WARN='override')\n", (10282, 10304), False, 'from django.test import SimpleTestCase, TransactionTestCase, TestCase, modify_settings, override_settings, signals\n'), ((15745, 15787), 'django.conf.settings.is_overridden', 'settings.is_overridden', (['"""TEMPLATE_LOADERS"""'], {}), "('TEMPLATE_LOADERS')\n", (15767, 15787), False, 'from django.conf import LazySettings, Settings, settings\n'), ((10516, 10547), 'os.path.splitext', 'os.path.splitext', (['w[0].filename'], {}), '(w[0].filename)\n', (10532, 10547), False, 'import os\n'), ((10581, 10607), 'os.path.splitext', 'os.path.splitext', (['__file__'], {}), '(__file__)\n', (10597, 10607), False, 'import os\n')]
|
from django import forms
NIVEL_CHOICES =(
("Elementary", "Elementary"),
("Intermediate", "Intermediate"),
("Advanced", "Advanced")
)
class LogInForm(forms.Form):
username = forms.CharField(label="Username")
password = forms.CharField(label="Password")
class RegisterForm(forms.Form):
username = forms.CharField(label="Username")
email = forms.EmailField(label="Email")
password = forms.CharField(label="Password")
confirmation = forms.CharField(label="Confirm Password")
class ProfileForm(forms.Form):
first_name = forms.CharField(label="First Name", required=False)
last_name = forms.CharField(label="Last Name", required=False)
native_lan = forms.CharField(label="Native Language", required=False)
foto = forms.ImageField(required=False)
pais = forms.CharField(label="Country", required=False)
franja = forms.CharField(label="Time Zone", required=False)
nivel = forms.ChoiceField(choices = NIVEL_CHOICES, label="Level", required=False)
study_lan = forms.CharField(label="Study Language", required=False)
phone = forms.CharField(label="Phone", required=False)
|
[
"django.forms.ChoiceField",
"django.forms.CharField",
"django.forms.EmailField",
"django.forms.ImageField"
] |
[((191, 224), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Username"""'}), "(label='Username')\n", (206, 224), False, 'from django import forms\n'), ((240, 273), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Password"""'}), "(label='Password')\n", (255, 273), False, 'from django import forms\n'), ((322, 355), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Username"""'}), "(label='Username')\n", (337, 355), False, 'from django import forms\n'), ((368, 399), 'django.forms.EmailField', 'forms.EmailField', ([], {'label': '"""Email"""'}), "(label='Email')\n", (384, 399), False, 'from django import forms\n'), ((415, 448), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Password"""'}), "(label='Password')\n", (430, 448), False, 'from django import forms\n'), ((468, 509), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Confirm Password"""'}), "(label='Confirm Password')\n", (483, 509), False, 'from django import forms\n'), ((559, 610), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""First Name"""', 'required': '(False)'}), "(label='First Name', required=False)\n", (574, 610), False, 'from django import forms\n'), ((627, 677), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Last Name"""', 'required': '(False)'}), "(label='Last Name', required=False)\n", (642, 677), False, 'from django import forms\n'), ((695, 751), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Native Language"""', 'required': '(False)'}), "(label='Native Language', required=False)\n", (710, 751), False, 'from django import forms\n'), ((763, 795), 'django.forms.ImageField', 'forms.ImageField', ([], {'required': '(False)'}), '(required=False)\n', (779, 795), False, 'from django import forms\n'), ((807, 855), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Country"""', 'required': '(False)'}), "(label='Country', required=False)\n", (822, 855), False, 'from django import forms\n'), ((869, 919), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Time Zone"""', 'required': '(False)'}), "(label='Time Zone', required=False)\n", (884, 919), False, 'from django import forms\n'), ((932, 1003), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'NIVEL_CHOICES', 'label': '"""Level"""', 'required': '(False)'}), "(choices=NIVEL_CHOICES, label='Level', required=False)\n", (949, 1003), False, 'from django import forms\n'), ((1022, 1077), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Study Language"""', 'required': '(False)'}), "(label='Study Language', required=False)\n", (1037, 1077), False, 'from django import forms\n'), ((1090, 1136), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Phone"""', 'required': '(False)'}), "(label='Phone', required=False)\n", (1105, 1136), False, 'from django import forms\n')]
|
import zmq
import logging
import socket
import json
import sys
import pickle
import logging
from logging.handlers import RotatingFileHandler
from os import path
from os import makedirs
from datetime import datetime
# from logging.handlers import RotatingFileHandler
try:
from fusionlog_config import init
except Exception as e:
print(f'failed to load init from config: {e}')
sys.exit(-1)
try:
from interval_logger import (calc_interval_from_timestamp, get_interval_file_handle, update_interval_file_handle)
except Exception as e:
print(f'failed to import from interval_logger: {e}')
sys.exit(-1)
data_files = dict() # should contain file_handle, current_interval, log_dir
def main():
config = init()
print('msb_fusionlog.py starting up')
connect_to = f'{config["ipc_protocol"]}:{config["ipc_port"]}'
print(f'trying to bind zmq to {connect_to}')
ctx = zmq.Context()
zmq_socket = ctx.socket(zmq.SUB)
try:
zmq_socket.connect(connect_to)
except Exception as e:
logging.fatal(f'failed to bind to zeromq socket: {e}')
sys.exit(-1)
# let fusionlog subscribe to all available data
zmq_socket.setsockopt(zmq.SUBSCRIBE, b'')
print('successfully bound to zeroMQ receiver socket as subscriber')
# create new logger instance
data_dir = config['base_data_dir']
# data_file_prefix = f'{socket.gethostname()}'
if not path.exists(data_dir):
try:
makedirs(data_dir, exist_ok=True)
except Exception as e:
logging.fatal(f'failed to create log file dir: {data_dir}: {e}')
sys.exit(-1)
print(f'saving data to {data_dir}')
print(f'entering endless loop')
while True:
# recv = zmq_socket.recv_pyobj()
# [topic, data] = socket.recv_multipart()
try:
(id, payload) = zmq_socket.recv_multipart()
except Exception as e:
print(f'failed to receive message: {e}')
continue
id = id.decode('utf-8')
try:
payload = pickle.loads(payload)
except Exception as e:
print(f'failed to load pickle message, skipping: {e}')
continue
topic, data = [(t, d) for (t, d) in payload.items()][0]
# check if a file handle exists for the given id
if id not in data_files:
print(f'{id} not in data_files, creating...')
# create empty dict
data_files[id] = dict()
data_files[id]['log_dir'] = path.join(config['base_data_dir'], id)
if not path.exists(data_files[id]['log_dir']):
try:
makedirs(data_files[id]['log_dir'], exist_ok=True)
except Exception as e:
print(f'failed to create log file dir: {data_files[id]["log_dir"]}: {e}')
sys.exit(-1)
data_files[id]['current_interval'] = calc_interval_from_timestamp(
data[0],
dt_interval=config['logfile_interval']
)
data_files[id]['file_handle'] = get_interval_file_handle(
interval = data_files[id]['current_interval'],
log_file_prefix = id,
log_dir = data_files[id]['log_dir'],
)
print(f'created data_file for {id}: {data_files[id]}')
if data[0] >= data_files[id]['current_interval']:
data_files[id]['current_interval'], data_files[id]['file_handle'] = update_interval_file_handle(
current_interval = data_files[id]['current_interval'],
current_file_handle = data_files[id]['file_handle'],
log_file_prefix = id,
log_dir = data_files[id]['log_dir']
)
data_files[id]['file_handle'].write(f'{json.dumps({topic : data})}\n')
# id is the motion sensor box id
# data is a dictionary where the key denotes the type of data and
if config['print']:
print(f'{id} : {topic} : {data}')
if __name__ == '__main__':
main()
|
[
"pickle.loads",
"os.makedirs",
"fusionlog_config.init",
"interval_logger.calc_interval_from_timestamp",
"os.path.exists",
"interval_logger.update_interval_file_handle",
"json.dumps",
"logging.fatal",
"interval_logger.get_interval_file_handle",
"os.path.join",
"sys.exit",
"zmq.Context"
] |
[((726, 732), 'fusionlog_config.init', 'init', ([], {}), '()\n', (730, 732), False, 'from fusionlog_config import init\n'), ((904, 917), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (915, 917), False, 'import zmq\n'), ((388, 400), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (396, 400), False, 'import sys\n'), ((609, 621), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (617, 621), False, 'import sys\n'), ((1427, 1448), 'os.path.exists', 'path.exists', (['data_dir'], {}), '(data_dir)\n', (1438, 1448), False, 'from os import path\n'), ((1039, 1093), 'logging.fatal', 'logging.fatal', (['f"""failed to bind to zeromq socket: {e}"""'], {}), "(f'failed to bind to zeromq socket: {e}')\n", (1052, 1093), False, 'import logging\n'), ((1102, 1114), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1110, 1114), False, 'import sys\n'), ((1475, 1508), 'os.makedirs', 'makedirs', (['data_dir'], {'exist_ok': '(True)'}), '(data_dir, exist_ok=True)\n', (1483, 1508), False, 'from os import makedirs\n'), ((2076, 2097), 'pickle.loads', 'pickle.loads', (['payload'], {}), '(payload)\n', (2088, 2097), False, 'import pickle\n'), ((2542, 2580), 'os.path.join', 'path.join', (["config['base_data_dir']", 'id'], {}), "(config['base_data_dir'], id)\n", (2551, 2580), False, 'from os import path\n'), ((2973, 3050), 'interval_logger.calc_interval_from_timestamp', 'calc_interval_from_timestamp', (['data[0]'], {'dt_interval': "config['logfile_interval']"}), "(data[0], dt_interval=config['logfile_interval'])\n", (3001, 3050), False, 'from interval_logger import calc_interval_from_timestamp, get_interval_file_handle, update_interval_file_handle\n'), ((3142, 3270), 'interval_logger.get_interval_file_handle', 'get_interval_file_handle', ([], {'interval': "data_files[id]['current_interval']", 'log_file_prefix': 'id', 'log_dir': "data_files[id]['log_dir']"}), "(interval=data_files[id]['current_interval'],\n log_file_prefix=id, log_dir=data_files[id]['log_dir'])\n", (3166, 3270), False, 'from interval_logger import calc_interval_from_timestamp, get_interval_file_handle, update_interval_file_handle\n'), ((3544, 3739), 'interval_logger.update_interval_file_handle', 'update_interval_file_handle', ([], {'current_interval': "data_files[id]['current_interval']", 'current_file_handle': "data_files[id]['file_handle']", 'log_file_prefix': 'id', 'log_dir': "data_files[id]['log_dir']"}), "(current_interval=data_files[id][\n 'current_interval'], current_file_handle=data_files[id]['file_handle'],\n log_file_prefix=id, log_dir=data_files[id]['log_dir'])\n", (3571, 3739), False, 'from interval_logger import calc_interval_from_timestamp, get_interval_file_handle, update_interval_file_handle\n'), ((1552, 1616), 'logging.fatal', 'logging.fatal', (['f"""failed to create log file dir: {data_dir}: {e}"""'], {}), "(f'failed to create log file dir: {data_dir}: {e}')\n", (1565, 1616), False, 'import logging\n'), ((1629, 1641), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1637, 1641), False, 'import sys\n'), ((2613, 2651), 'os.path.exists', 'path.exists', (["data_files[id]['log_dir']"], {}), "(data_files[id]['log_dir'])\n", (2624, 2651), False, 'from os import path\n'), ((2694, 2744), 'os.makedirs', 'makedirs', (["data_files[id]['log_dir']"], {'exist_ok': '(True)'}), "(data_files[id]['log_dir'], exist_ok=True)\n", (2702, 2744), False, 'from os import makedirs\n'), ((3873, 3898), 'json.dumps', 'json.dumps', (['{topic: data}'], {}), '({topic: data})\n', (3883, 3898), False, 'import json\n'), ((2898, 2910), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (2906, 2910), False, 'import sys\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import contextlib
import logging
import os
import sys
import time
from argparse import Namespace
from itertools import chain
from typing import Any, Dict, List
import torch
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.models.ema import build_ema
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
from fairseq.utils import safe_hasattr
from omegaconf import OmegaConf
logger = logging.getLogger(__name__)
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None):
if isinstance(cfg, Namespace):
logger.warning(
"argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf"
)
cfg = convert_namespace_to_omegaconf(cfg)
self.cfg = cfg
self.task = task
# catalog shared parameters
shared_params = _catalog_shared_params(model)
self.tpu = cfg.common.tpu
self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu
if self.cuda:
self.device = torch.device("cuda")
elif self.tpu:
self.device = utils.get_tpu_device()
else:
self.device = torch.device("cpu")
if self.is_fsdp:
import fairscale
if self.cfg.common.bf16:
raise ValueError(
"FullyShardedDataParallel is not compatible with --bf16 or "
"--memory-efficient-bf16"
)
if self.cfg.distributed_training.zero_sharding != "none":
raise ValueError(
"FullyShardedDataParallel is not compatible with --zero-sharding "
"option (it's already built in)"
)
if (
max(self.cfg.optimization.update_freq) > 1
and fairscale.__version__ < "0.4.0"
):
raise RuntimeError(
"Please update to fairscale 0.4.0 or newer when combining "
"--update-freq with FullyShardedDataParallel"
)
else:
if (
hasattr(self.cfg.distributed_training, "cpu_offload")
and self.cfg.distributed_training.cpu_offload
):
raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded")
# copy model and criterion to current device/dtype
self._criterion = criterion
self._model = model
if not self.is_fsdp:
if cfg.common.fp16:
assert not cfg.common.amp, "Cannot use fp16 and AMP together"
self._criterion = self._criterion.half()
self._model = self._model.half()
elif cfg.common.bf16:
self._criterion = self._criterion.to(dtype=torch.bfloat16)
self._model = self._model.to(dtype=torch.bfloat16)
elif cfg.common.amp:
self._amp_retries = 0
if (
not cfg.distributed_training.pipeline_model_parallel
# the DistributedFairseqModel wrapper will handle moving to device,
# so only handle cases which don't use the wrapper
and not self.use_distributed_wrapper
):
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel
self.last_device = None
if self.cuda and self.pipeline_model_parallel:
self.last_device = torch.device(
cfg.distributed_training.pipeline_devices[-1]
)
# check that shared parameters are preserved after device transfer
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info(
"detected shared parameter: {} <- {}".format(shared_param[0], path)
)
_set_module_by_path(self._model, path, ref)
self._dummy_batch = None # indicates we don't have a dummy batch at first
self._lr_scheduler = None
self._num_updates = 0
self._num_xla_compiles = 0 # for TPUs
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
self._ema = None
# TODO(myleott): support tpu
if self.cuda and self.data_parallel_world_size > 1:
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
self.quantizer = quantizer
if self.quantizer is not None:
self.quantizer.set_trainer(self)
# get detailed cuda environment
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if self.data_parallel_world_size > 1:
self.cuda_env_arr = distributed_utils.all_gather_list(
self.cuda_env, group=distributed_utils.get_global_group()
)
else:
self.cuda_env_arr = [self.cuda_env]
if self.data_parallel_rank == 0:
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time("wall", priority=790, round=0)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
"""Reinitialize the Trainer, typically after model params change."""
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_process_group(self):
return distributed_utils.get_data_parallel_group()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
@property
def is_data_parallel_master(self):
# NOTE: this returns true for all model parallel replicas with data
# parallel rank 0
return self.data_parallel_rank == 0
@property
def use_distributed_wrapper(self) -> bool:
return (
self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf
) or (self.is_fsdp and self.cfg.distributed_training.cpu_offload)
@property
def should_save_checkpoint_on_current_rank(self) -> bool:
"""Indicates whether to save checkpoints on the current DDP rank."""
if (
self.is_fsdp and self.cfg.distributed_training.use_sharded_state
) or getattr(self.cfg.model, "base_layers", 0) > 0:
return True
else:
return self.is_data_parallel_master
@property
def always_call_state_dict_during_save_checkpoint(self) -> bool:
if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state:
# FSDP calls communication collective when consolidating checkpoints
return True
else:
return False
@property
def checkpoint_suffix(self) -> str:
"""Suffix to add to the checkpoint file name."""
if self.is_fsdp and self.cfg.distributed_training.use_sharded_state:
return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format(
self.data_parallel_rank
)
else:
return self.cfg.checkpoint.checkpoint_suffix or ""
@property
def criterion(self):
if self._wrapped_criterion is None:
if utils.has_parameters(self._criterion) and self.use_distributed_wrapper:
self._wrapped_criterion = models.DistributedFairseqModel(
self.cfg.distributed_training,
self._criterion,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
@property
def model(self):
if self._wrapped_model is None:
if self.use_distributed_wrapper:
self._wrapped_model = models.DistributedFairseqModel(
self.cfg.distributed_training,
self._model,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def ema(self):
if self._ema is None:
self._build_ema()
return self._ema
def _build_ema(self):
if self.cfg.ema.store_ema:
self._ema = build_ema(self._model, self.cfg.ema, self.device)
logger.info("Exponential Moving Average Shadow Model is initialized.")
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
params = list(
filter(
lambda p: p.requires_grad,
chain(self.model.parameters(), self.criterion.parameters()),
)
)
if self.is_fsdp and self.cfg.common.fp16:
# FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper,
# mostly for the grad scaling. But if we don't have the
# --memory-efficient-fp16 flag set, then we're effectively doing
# regular --fp16 and can allow the use of optimizers that would
# otherwise be unsupported by MemoryEfficientFP16Optimizer.
allow_unsupported = not self.cfg.common.memory_efficient_fp16
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params, allow_unsupported=allow_unsupported
)
elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp:
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
logger.info(
"NOTE: your device does NOT support faster training with --fp16 or --amp, "
"please switch to FP32 which is likely to be faster"
)
if (
self.cfg.common.memory_efficient_fp16
or self.cfg.common.memory_efficient_bf16
):
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params
)
elif self.cfg.common.amp:
self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:
logger.info(
"NOTE: your device may support faster training with --fp16 or --amp"
)
self._optimizer = optim.build_optimizer(self.cfg.optimizer, params)
if self.is_fsdp:
assert (
not self.cfg.optimization.use_bmuf
), "--ddp-backend=fully_sharded is not compatible with BMUF"
assert self._optimizer.supports_flat_params, (
"--ddp-backend=fully_sharded is only compatible with pointwise "
"optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). "
"However, the sharding will result in slightly different results when "
"using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)"
)
if self.cfg.optimization.use_bmuf:
self._optimizer = optim.FairseqBMUF(
self.cfg.bmuf,
self._optimizer,
)
if self.cfg.distributed_training.zero_sharding == "os":
if (
self.cfg.common.fp16
and not self.cfg.common.memory_efficient_fp16
and not self.cfg.common.memory_efficient_bf16
) and not self.cfg.common.fp16_no_flatten_grads:
raise ValueError(
"ZeRO is incomptabile with fp16 and flattened grads. "
"Please use --fp16-no-flatten-grads"
)
else:
optim.shard_(self._optimizer, self.data_parallel_process_group)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(
self.cfg.lr_scheduler,
self.optimizer,
)
self._lr_scheduler.step_update(0)
@property
def is_fsdp(self):
return self.cfg.distributed_training.ddp_backend == "fully_sharded"
def consolidate_optimizer(self):
"""For OSS, we need to consolidate the state dict."""
if self.cfg.checkpoint.no_save_optimizer_state:
return
self._gathered_optim_state = None
if hasattr(self.optimizer.optimizer, "consolidate_state_dict"):
self.optimizer.optimizer.consolidate_state_dict()
elif self.is_fsdp and not self.model.use_sharded_state:
st = self.model.gather_full_optim_state_dict(
self.optimizer
) # only returns on rank 0
self._gathered_optim_state = st
def state_dict(self):
state_dict = {
"args": None, # legacy
"cfg": (
OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True)
if OmegaConf.is_config(self.cfg)
else self.cfg
),
"model": self.model.state_dict(),
"criterion": (
self.criterion.state_dict()
if utils.has_parameters(self.criterion)
else None
),
"optimizer_history": (self._optim_history or [])
+ [
{
"criterion_name": self.get_criterion().__class__.__name__,
"optimizer_name": self.optimizer.__class__.__name__,
"lr_scheduler_state": self.lr_scheduler.state_dict(),
"num_updates": self.get_num_updates(),
}
],
"task_state": self.task.state_dict() if self.task is not None else {},
"extra_state": {
"metrics": metrics.state_dict(),
"previous_training_time": self.cumulative_training_time(),
},
}
if self.cfg.ema.store_ema:
# Save EMA model state as extra state
state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict()
if self.cfg.ema.ema_fp32:
# Save EMA params in fp32
state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params
if not self.cfg.checkpoint.no_save_optimizer_state:
if self._gathered_optim_state is not None:
state_dict["last_optimizer_state"] = self._gathered_optim_state
self._gathered_optim_state = None
else:
state_dict["last_optimizer_state"] = self.optimizer.state_dict()
if self.is_fsdp:
# save meta data for recombining checkpoint upon loading
state_dict["fsdp_metadata"] = self.model.local_metadata_dict()
return state_dict
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
logger.info(f"Saving checkpoint to {os.path.abspath(filename)}")
# call state_dict on all ranks in case it needs internal communication
state_dict = utils.move_to_cpu(self.state_dict())
state_dict["extra_state"].update(extra_state)
if self.should_save_checkpoint_on_current_rank:
checkpoint_utils.torch_persistent_save(
state_dict,
filename,
async_write=self.cfg.checkpoint.write_checkpoints_asynchronously,
)
logger.info(f"Finished saving checkpoint to {os.path.abspath(filename)}")
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
"""
Load all training state from a checkpoint file.
rank = 0 will load the checkpoint, and then broadcast it to all
other ranks.
"""
extra_state, self._optim_history, last_optim_state = None, [], None
logger.info(f"Preparing to load checkpoint {filename}")
is_distributed = self.data_parallel_world_size > 1
bexists = PathManager.isfile(filename)
if bexists:
load_on_all_ranks = (
self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks
# TPUs don't support broadcast yet, so load checkpoints
# on every worker for now
or self.tpu
# FSDP requires loading checkpoint shards on all ranks
or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state)
or getattr(self.cfg.model, "base_layers", 0) > 0
)
if load_on_all_ranks or self.data_parallel_rank == 0:
state = checkpoint_utils.load_checkpoint_to_cpu(
filename, load_on_all_ranks=load_on_all_ranks
)
last_optim_state = state.get("last_optimizer_state", None)
# If doing zero_sharding, do not broadcast global optimizer
# state. Later we will broadcast sharded states to each rank
# to avoid memory from exploding.
if (
not load_on_all_ranks
and self.cfg.distributed_training.zero_sharding == "os"
and "last_optimizer_state" in state
and is_distributed
):
state["last_optimizer_state"] = "SHARDED"
else:
last_optim_state = None
state = None
if is_distributed and not load_on_all_ranks:
state = distributed_utils.broadcast_object(
state,
src_rank=0,
group=self.data_parallel_process_group,
dist_device=self.device,
)
if self.data_parallel_rank > 0:
last_optim_state = state.get("last_optimizer_state", None)
# load model parameters
try:
if (
"optimizer_history" in state
and len(state["optimizer_history"]) > 0
and "num_updates" in state["optimizer_history"][-1]
):
self.model.set_num_updates(
state["optimizer_history"][-1]["num_updates"]
)
# this is the code related to AdaPrune
# In short, it removes redundant heads in multi-head attention module based on heads importance provided
# For more info, please refer to the paper: https://openreview.net/forum?id=_CMSV7FTzGI
# The idea of prune in mha can be summarized as
# Fine tune model (e.g. roberta encoder) on a certain datasets with regularization
# After the model is trained. User could use get_reserve_head_index and _adaptive_prune_heads functions to get the top X heads with most importance.
# Then user uses the rank to prune a new roberta encoder and save the pruned ckpt manually.
# User will fine tune the the new roberta encoder via the ckpt saved above
# To get rid of registering different pruned version of Roberta, I use the argument --mha-heads-to-keep to prune the Roberta model into a pruned version which matches the pruned ckpt.
if (
safe_hasattr(self.model, "args")
and safe_hasattr(self.model.args, "mha_heads_to_keep")
and self.model.args.mha_heads_to_keep != -1
):
logger.info(
f"Prune model: keep {self.model.args.mha_heads_to_keep} heads for each multihead attention module"
)
for layer in self.model.encoder.sentence_encoder.layers:
reserve_head_index = layer.self_attn._get_reserve_head_index(
num_heads_to_keep=self.model.args.mha_heads_to_keep
)
layer.self_attn._adaptive_prune_heads(
reserve_head_index=reserve_head_index
)
layer.self_attn._set_skip_embed_dim_check()
logger.info(self.model)
# this is the code related to AdaPrune
# In short, it removes redundant units in feedforward layer in each transformer layer based on importance
# For more info, please refer to the paper: https://openreview.net/forum?id=_CMSV7FTzGI
# The idea of prune in ffn can be summarized as
# Fine tune model (e.g. roberta encoder) on a certain datasets with regularization
# After the model is trained. User could use _get_fc_rank and _prune_fc_layer functions to get the top X units with most importance.
# Then user uses the rank to prune a new roberta encoder and save the pruned ckpt manually.
# User will fine tune the the new roberta encoder via the ckpt saved above
# To get rid of registering different pruned version of Roberta, I use the argument --ffn-blocks-to-remove to prune the Roberta model into a pruned version which matches the pruned ckpt.
if (
safe_hasattr(self.model, "args")
and safe_hasattr(self.model.args, "ffn_blocks_to_remove")
and self.model.args.ffn_blocks_to_remove != -1
):
logger.info(
f"Prune model: remove {self.model.args.ffn_blocks_to_remove} ffn blocks for each transformer layer"
)
for layer in self.model.encoder.sentence_encoder.layers:
remove_index = layer._get_fc_rank(
remove_num=self.model.args.ffn_blocks_to_remove
)
layer._prune_fc_layer(remove_index=remove_index)
logger.info(self.model)
self.model.load_state_dict(
state["model"], strict=True, model_cfg=self.cfg.model
)
# save memory for later steps
del state["model"]
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
del state["criterion"]
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = state["extra_state"]
self._optim_history = state["optimizer_history"]
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}"
assert (
last_optim["optimizer_name"] == self.optimizer.__class__.__name__
), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}"
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
if self.is_fsdp and not self.model.use_sharded_state:
# if use_sharded_state, the last_optim_state is already sharded, skip this
last_optim_state = self.model.get_shard_from_optim_state_dict(
last_optim_state
)
elif not load_on_all_ranks and is_distributed:
last_optim_state = self.optimizer.broadcast_global_state_dict(
last_optim_state
)
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
itr_state = extra_state["train_iterator"]
epoch = itr_state["epoch"]
if "previous_training_time" in extra_state:
self._previous_training_time = extra_state["previous_training_time"]
self._start_time = time.time()
self.lr_step(epoch)
if (
itr_state.get("version", 1) >= 2
and itr_state["iterations_in_epoch"] == 0
):
# reset meters at start of epoch
reset_meters = True
if "metrics" in extra_state and not reset_meters:
metrics.load_state_dict(extra_state["metrics"])
# reset TimeMeters, since their start times don't make sense anymore
for meter in metrics.get_meters("default"):
if isinstance(meter, meters.TimeMeter):
meter.reset()
if self.cfg.ema.store_ema:
if "ema" not in extra_state:
logger.warn(
"EMA not found in checkpoint. But store_ema is True. "
"EMA is re-initialized from checkpoint."
)
self.ema.restore(
state["model"], build_fp32_params=self.cfg.ema.ema_fp32
)
else:
logger.info("Loading EMA from checkpoint")
self.ema.restore(extra_state["ema"], build_fp32_params=False)
if self.cfg.ema.ema_fp32:
if "ema_fp32_params" in extra_state:
logger.info("Loading EMA fp32 params from checkpoint")
self.ema.build_fp32_params(extra_state["ema_fp32_params"])
else:
logger.info(
"Building EMA fp32 params from EMA model in checkpoint"
)
self.ema.build_fp32_params()
logger.info(
"Loaded checkpoint {} (epoch {} @ {} updates)".format(
filename, epoch, self.get_num_updates()
)
)
else:
logger.info("No existing checkpoint found {}".format(filename))
return extra_state
def get_train_iterator(
self,
epoch,
combine=True,
load_dataset=True,
data_selector=None,
shard_batch_itr=True,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
if load_dataset:
logger.info("loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.cfg.dataset.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
tpu=self.tpu,
)
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.train_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
self.cfg.dataset.max_tokens,
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=(self.cfg.common.seed + epoch)
if self.cfg.dataset.update_ordered_indices_seed
else self.cfg.common.seed,
num_shards=self.data_parallel_world_size if shard_batch_itr else 1,
shard_id=self.data_parallel_rank if shard_batch_itr else 0,
num_workers=self.cfg.dataset.num_workers,
epoch=epoch,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
skip_remainder_batch=self.cfg.optimization.skip_remainder_batch,
grouped_shuffling=self.cfg.dataset.grouped_shuffling,
update_epoch_batch_itr=self.cfg.dataset.update_epoch_batch_itr,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def get_valid_iterator(
self,
subset,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(subset),
max_tokens=self.cfg.dataset.max_tokens_valid,
max_sentences=self.cfg.dataset.batch_size_valid,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
# always pass a fixed "epoch" to keep validation data consistent
# across training epochs
epoch=1,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
skip_remainder_batch=False,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
self.lr_step_begin_epoch(epoch)
if self.quantizer is not None:
self.quantizer.begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("begin_epoch") # wait for all workers
xm.mark_step()
def begin_valid_epoch(self, epoch):
"""Called at the beginning of each validation epoch."""
# task specific setup per validation epoch
self.task.begin_valid_epoch(epoch, self.get_model())
def reset_dummy_batch(self, batch):
self._dummy_batch = batch
@metrics.aggregate("train")
def train_step(self, samples, raise_oom=False):
"""Do forward, backward and parameter update."""
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time("train_wall", priority=800, round=0)
# If EMA is enabled through store_ema=True
# and task.uses_ema is True, pass the EMA model as a keyword
# argument to the task.
extra_kwargs = {}
if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False):
extra_kwargs["ema_model"] = self.ema.get_model()
# forward and backward pass
logging_outputs, sample_size, ooms = [], 0, 0
for i, sample in enumerate(samples): # delayed update loop
sample, is_dummy_batch = self._prepare_sample(sample)
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.data_parallel_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
# The no_sync context manager results in increased memory
# usage with FSDP, since full-size gradients will be
# accumulated on each GPU. It's typically a better tradeoff
# to do the extra communication with FSDP.
and not self.is_fsdp
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
loss, sample_size_i, logging_output = self.task.train_step(
sample=sample,
model=self.model,
criterion=self.criterion,
optimizer=self.optimizer,
update_num=self.get_num_updates(),
ignore_grad=is_dummy_batch,
**extra_kwargs,
)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
# emptying the CUDA cache after the first step can
# reduce the chance of OOM
if self.cuda and self.get_num_updates() == 0:
torch.cuda.empty_cache()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
logger.warning(
"attempting to recover from OOM in forward/backward pass"
)
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if self.cfg.distributed_training.distributed_world_size == 1:
return None
else:
raise e
except Exception:
self.consolidate_optimizer()
self.save_checkpoint(
os.path.join(self.cfg.checkpoint.save_dir, "crash.pt"), {}
)
raise
if self.tpu and i < len(samples) - 1:
# tpu-comment: every XLA operation before marking step is
# appended to the IR graph, and processing too many batches
# before marking step can lead to OOM errors.
# To handle gradient accumulation use case, we explicitly
# mark step here for every forward pass without a backward pass
self._xla_markstep_and_send_to_cpu()
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
# gather logging outputs from all replicas
if self._sync_stats():
train_time = self._local_cumulative_training_time()
(
logging_outputs,
(
sample_size,
ooms,
total_train_time,
),
) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch
)
self._cumulative_training_time = (
total_train_time / self.data_parallel_world_size
)
overflow = False
try:
with torch.autograd.profiler.record_function("reduce-grads"):
# reduce gradients across workers
self.optimizer.all_reduce_grads(self.model)
if utils.has_parameters(self.criterion):
self.optimizer.all_reduce_grads(self.criterion)
with torch.autograd.profiler.record_function("multiply-grads"):
# multiply gradients by (data_parallel_size / sample_size) since
# DDP normalizes by the number of data parallel workers for
# improved fp16 precision.
# Thus we get (sum_of_gradients / sample_size) at the end.
# In case of fp16, this step also undoes loss scaling.
# (Debugging note: Some optimizers perform this scaling on the
# fly, so inspecting model.parameters() or optimizer.params may
# still show the original, unscaled gradients.)
numer = (
self.data_parallel_world_size
if not self.cfg.optimization.use_bmuf or self._sync_stats()
else 1
)
self.optimizer.multiply_grads(numer / (sample_size or 1.0))
# Note: (sample_size or 1.0) handles the case of a zero gradient, in a
# way that avoids CPU/device transfers in case sample_size is a GPU or
# TPU object. The assumption is that the gradient itself is also 0.
with torch.autograd.profiler.record_function("clip-grads"):
# clip grads
grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm)
# check that grad norms are consistent across workers
# on tpu check tensor is slow
if not self.tpu:
if (
not self.cfg.optimization.use_bmuf
and self.cfg.distributed_training.ddp_backend != "slowmo"
):
self._check_grad_norms(grad_norm)
if not torch.isfinite(grad_norm).all():
# in case of AMP, if gradients are Nan/Inf then
# optimizer step is still required
if self.cfg.common.amp:
overflow = True
else:
# check local gradnorm single GPU case, trigger NanDetector
raise FloatingPointError("gradients are Nan/Inf")
with torch.autograd.profiler.record_function("optimizer"):
# take an optimization step
self.task.optimizer_step(
self.optimizer, model=self.model, update_num=self.get_num_updates()
)
if self.cfg.common.amp and overflow:
if self._amp_retries == self.cfg.common.amp_batch_retries:
logger.info("AMP: skipping this batch.")
self._amp_retries = 0
else:
self._amp_retries += 1
return self.train_step(
samples, raise_oom
) # recursion to feed in same batch
except FloatingPointError:
self.consolidate_optimizer()
self.save_checkpoint(
os.path.join(self.cfg.checkpoint.save_dir, "crash.pt"), {}
)
# re-run the forward and backward pass with hooks attached to print
# out where it fails
self.zero_grad()
with NanDetector(self.get_model()):
for _, sample in enumerate(samples):
sample, _ = self._prepare_sample(sample)
self.task.train_step(
sample,
self.model,
self.criterion,
self.optimizer,
self.get_num_updates(),
ignore_grad=False,
**extra_kwargs,
)
raise
except OverflowError as e:
overflow = True
logger.info(
f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}"
)
grad_norm = torch.tensor(0.0).cuda()
self.zero_grad()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
logger.error("OOM during optimization, irrecoverable")
raise e
# Some distributed wrappers (e.g., SlowMo) need access to the optimizer
# after the step
if hasattr(self.model, "perform_slowmo"):
self.model.perform_slowmo(
self.optimizer.optimizer, getattr(self.optimizer, "fp32_params", None)
)
logging_output = None
if not overflow or self.cfg.distributed_training.ddp_backend == "slowmo":
self.set_num_updates(self.get_num_updates() + 1)
if self.cfg.ema.store_ema:
# Step EMA forward with new model.
self.ema.step(
self.get_model(),
self.get_num_updates(),
)
metrics.log_scalar(
"ema_decay",
self.ema.get_decay(),
priority=10000,
round=5,
weight=0,
)
if self.tpu:
import torch_xla.core.xla_model as xm
# mark step on TPUs
self._xla_markstep_and_send_to_cpu()
# only log stats every log_interval steps
# this causes wps to be misreported when log_interval > 1
logging_output = {}
if self.get_num_updates() % self.cfg.common.log_interval == 0:
# log memory usage
mem_info = xm.get_memory_info(self.device)
gb_free = mem_info["kb_free"] / 1024 / 1024
gb_total = mem_info["kb_total"] / 1024 / 1024
metrics.log_scalar(
"gb_free", gb_free, priority=1500, round=1, weight=0
)
metrics.log_scalar(
"gb_total", gb_total, priority=1600, round=1, weight=0
)
logging_outputs = self._xla_markstep_and_send_to_cpu(
logging_outputs
)
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm
)
# log whenever there's an XLA compilation, since these
# slow down training and may indicate opportunities for
# optimization
self._check_xla_compilation()
else:
if self.cuda and self.cuda_env is not None:
# log minimum free memory over the iteration
gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
torch.cuda.reset_peak_memory_stats()
gb_free = self.cuda_env.total_memory_in_GB - gb_used
metrics.log_scalar(
"gb_free", gb_free, priority=1500, round=1, weight=0
)
# log stats
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm
)
# clear CUDA cache to reduce memory fragmentation
if (
self.cuda
and self.cfg.common.empty_cache_freq > 0
and (
(self.get_num_updates() + self.cfg.common.empty_cache_freq - 1)
% self.cfg.common.empty_cache_freq
)
== 0
):
torch.cuda.empty_cache()
if self.cfg.common.fp16 or self.cfg.common.amp:
metrics.log_scalar(
"loss_scale",
(
self.optimizer.scaler.loss_scale
if self.cfg.common.fp16
else self.optimizer.scaler.get_scale()
),
priority=700,
round=4,
weight=0,
)
metrics.log_stop_time("train_wall")
return logging_output
@metrics.aggregate("valid")
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("valid_step") # wait for all workers
# If EMA is enabled through store_ema=True
# and task.uses_ema is True, pass the EMA model as a keyword
# argument to the task.
extra_kwargs = {}
if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False):
extra_kwargs["ema_model"] = self.ema.get_model()
with torch.no_grad():
self.model.eval()
self.criterion.eval()
sample, is_dummy_batch = self._prepare_sample(sample)
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion, **extra_kwargs
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
logger.warning(
"ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
# gather logging outputs from all replicas
if self.data_parallel_world_size > 1:
logging_outputs, (sample_size,) = self._aggregate_logging_outputs(
logging_outputs,
sample_size,
ignore=is_dummy_batch,
)
# log validation stats
if self.tpu:
logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_output
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step_begin_epoch(self, epoch):
"""Adjust the learning rate at the beginning of the epoch."""
self.lr_scheduler.step_begin_epoch(epoch)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate at the end of the epoch."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
if isinstance(new_lr, dict):
for k, v in new_lr.items():
metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300)
new_lr = new_lr.get("default", next(iter(new_lr.values())))
else:
metrics.log_scalar("lr", new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""[deprecated] Get a specific meter by name."""
from fairseq import meters
if "get_meter" not in self._warn_once:
self._warn_once.add("get_meter")
utils.deprecation_warning(
"Trainer.get_meter is deprecated. Please use fairseq.metrics instead."
)
train_meters = metrics.get_meters("train")
if train_meters is None:
train_meters = {}
if name == "train_loss" and "loss" in train_meters:
return train_meters["loss"]
elif name == "train_nll_loss":
# support for legacy train.py, which assumed this meter is
# always initialized
m = train_meters.get("nll_loss", None)
return m or meters.AverageMeter()
elif name == "wall":
# support for legacy train.py, which assumed this meter is
# always initialized
m = metrics.get_meter("default", "wall")
return m or meters.TimeMeter()
elif name == "wps":
m = metrics.get_meter("train", "wps")
return m or meters.TimeMeter()
elif name in {"valid_loss", "valid_nll_loss"}:
# support for legacy train.py, which assumed these meters
# are always initialized
k = name[len("valid_") :]
m = metrics.get_meter("valid", k)
return m or meters.AverageMeter()
elif name == "oom":
return meters.AverageMeter()
elif name in train_meters:
return train_meters[name]
return None
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
if self.quantizer:
self.quantizer.step_update(self._num_updates)
metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
def agg_norm_fn(total_norm):
total_norm = total_norm.cuda().float() ** 2
total_norm = distributed_utils.all_reduce(
total_norm, group=self.data_parallel_process_group
)
return total_norm ** 0.5
should_agg_norm = self.is_fsdp and (
self.data_parallel_process_group is not None
or torch.distributed.is_initialized()
)
return self.optimizer.clip_grad_norm(
clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None
)
def cumulative_training_time(self):
if self._cumulative_training_time is None:
# single GPU
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
"""Aggregate training time in seconds."""
return time.time() - self._start_time + self._previous_training_time
def _fp_convert_sample(self, sample):
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
def apply_bfloat16(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.bfloat16)
return t
if self.cfg.common.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if self.cfg.common.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
return sample
def _prepare_sample(self, sample, is_dummy=False):
if sample == "DUMMY":
raise Exception(
"Trying to use an uninitialized 'dummy' batch. This usually indicates "
"that the total number of batches is smaller than the number of "
"participating GPUs. Try reducing the batch size or using fewer GPUs."
)
if sample is None or len(sample) == 0:
assert (
self._dummy_batch is not None and len(self._dummy_batch) > 0
), "Invalid dummy batch: {}".format(self._dummy_batch)
sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True)
return sample, True
# Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth
# it makes sense to do the format conversion on the CPU and then transfer
# a smaller buffer to the device. This also saves GPU memory capacity.
if self.cfg.common.on_cpu_convert_precision:
sample = self._fp_convert_sample(sample)
if self.cuda:
if self.pipeline_model_parallel:
if "target" in sample:
sample["target"] = utils.move_to_cuda(
sample["target"], device=self.last_device
)
else:
sample = utils.move_to_cuda(sample)
elif self.tpu and is_dummy:
# the dummy batch may not be on the appropriate device
sample = utils.move_to_cuda(sample, device=self.device)
if not self.cfg.common.on_cpu_convert_precision:
sample = self._fp_convert_sample(sample)
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
return sample, False
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.cfg.common.seed + self.get_num_updates()
utils.set_torch_seed(seed)
def _sync_stats(self):
# Return True if it's using multiple GPUs and DDP or multiple GPUs with
# BMUF and it's a bmuf sync with warmup iterations completed before.
if self.data_parallel_world_size == 1:
return False
elif self.cfg.optimization.use_bmuf:
return (
self.get_num_updates() + 1
) % self.cfg.bmuf.global_sync_iter == 0 and (
self.get_num_updates() + 1
) > self.cfg.bmuf.warmup_iterations
else:
return True
def _log_oom(self, exc):
msg = "OOM: Ran out of memory with exception: {}".format(exc)
logger.warning(msg)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
else:
return self._all_gather_list_sync(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
def _all_gather_list_sync(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. all_gather_list_sync is
suitable when logging outputs are complex types.
"""
if self.tpu:
raise NotImplementedError
if ignore:
logging_outputs = []
results = list(
zip(
*distributed_utils.all_gather_list(
[logging_outputs] + list(extra_stats_to_sum),
max_size=getattr(self.cfg.common, "all_gather_list_size", 16384),
group=self.data_parallel_process_group,
)
)
)
logging_outputs, extra_stats_to_sum = results[0], results[1:]
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return logging_outputs, extra_stats_to_sum
def _fast_stat_sync_sum(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. fast_stat_sync_sum is
faster than all_gather_list_sync, but is only suitable when
logging outputs are scalars and can be summed. Note that
*logging_outputs* cannot contain any nested dicts/lists.
"""
data = {}
for i, stat in enumerate(extra_stats_to_sum):
data["extra_stats_" + str(i)] = stat
if len(logging_outputs) > 0:
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if not ignore:
v = sum(log[k] for log in logging_outputs if k in log)
else:
v = logging_outputs[0][k]
v = torch.zeros_like(v) if torch.is_tensor(v) else 0
data["logging_outputs_" + k] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(
data, device=self.device, group=self.data_parallel_process_group
)
extra_stats_to_sum = [
data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum))
]
if log_keys is not None:
logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}]
else:
logging_outputs = []
return logging_outputs, extra_stats_to_sum
def _check_grad_norms(self, grad_norm):
"""Check that grad norms are consistent across workers."""
if self._grad_norm_buf is not None:
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(
self._grad_norm_buf, group=self.data_parallel_process_group
)
def is_consistent(tensor):
max_abs_diff = torch.max(torch.abs(tensor - tensor[0]))
return (
(
torch.isfinite(tensor).all()
and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()
)
or (self.cfg.common.amp and not torch.isfinite(tensor).all())
# in case of amp non-finite grads are fine
)
if not is_consistent(self._grad_norm_buf):
pretty_detail = "\n".join(
"rank {:3d} = {:.8f}".format(r, n)
for r, n in enumerate(self._grad_norm_buf.tolist())
)
error_detail = "grad_norm across the workers:\n{}\n".format(
pretty_detail
)
# use FloatingPointError to trigger NanDetector
raise FloatingPointError(
"Fatal error: gradients are inconsistent between workers. "
"Try --ddp-backend=legacy_ddp. "
"Or are you mixing up different generation of GPUs in training?"
+ "\n"
+ "-" * 80
+ "\n{}\n".format(error_detail)
+ "-" * 80
)
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
if grad_norm is not None and (
not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm)
):
metrics.log_speed("ups", 1.0, priority=100, round=2)
metrics.log_scalar("gnorm", grad_norm, priority=400, round=3)
if self.cfg.optimization.clip_norm > 0:
metrics.log_scalar(
"clip",
torch.where(
grad_norm > self.cfg.optimization.clip_norm,
grad_norm.new_tensor(100),
grad_norm.new_tensor(0),
),
priority=500,
round=1,
)
with metrics.aggregate() as agg:
if logging_outputs is not None:
self.task.reduce_metrics(logging_outputs, self.get_criterion())
del logging_outputs
# extra warning for criterions that don't properly log a loss value
if "loss" not in agg:
if "loss" not in self._warn_once:
self._warn_once.add("loss")
logger.warning(
"Criterion.reduce_metrics did not log a 'loss' value, "
"which may break some functionality"
)
metrics.log_scalar("loss", -1)
# support legacy interface
if self.tpu:
logging_output = {}
else:
logging_output = agg.get_smoothed_values()
logging_output["sample_size"] = sample_size
for key_to_delete in ["ppl", "wps", "wpb", "bsz"]:
if key_to_delete in logging_output:
del logging_output[key_to_delete]
return logging_output
def _check_xla_compilation(self):
import torch_xla.debug.metrics as met
compile_stats = met.metric_data("CompileTime")
if compile_stats is None:
return
num_xla_compiles = compile_stats[0]
if num_xla_compiles > self._num_xla_compiles:
logger.warning(
"XLA compilation detected on device #{}; too many of these can lead "
"to slow training, but we expect a few in the beginning".format(
self.cfg.distributed_training.distributed_rank
)
)
self._num_xla_compiles = num_xla_compiles
def _xla_markstep_and_send_to_cpu(self, data=None):
import torch_xla.core.xla_model as xm
xm.mark_step()
if data is not None:
from fairseq.utils import xla_device_to_cpu
return xla_device_to_cpu(data)
def _catalog_shared_params(module, memo=None, prefix=""):
if memo is None:
first_call = True
memo = {}
else:
first_call = False
for name, param in module._parameters.items():
param_prefix = prefix + ("." if prefix else "") + name
if param not in memo:
memo[param] = []
memo[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
_catalog_shared_params(m, memo, submodule_prefix)
if first_call:
return [x for x in memo.values() if len(x) > 1]
def _get_module_by_path(module, path):
path = path.split(".")
for name in path:
module = getattr(module, name)
return module
def _set_module_by_path(module, path, value):
path = path.split(".")
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value)
|
[
"torch.distributed.is_initialized",
"fairseq.logging.metrics.log_speed",
"fairseq.logging.metrics.aggregate",
"fairseq.logging.metrics.log_scalar",
"torch.cuda.max_memory_allocated",
"fairseq.optim.MemoryEfficientFP16Optimizer.build_optimizer",
"torch.cuda.DoubleTensor",
"fairseq.utils.xla_device_to_cpu",
"torch.autograd.profiler.record_function",
"torch.cuda.device_count",
"fairseq.logging.metrics.load_state_dict",
"fairseq.optim.FP16Optimizer.build_optimizer",
"torch_xla.core.xla_model.get_memory_info",
"fairseq.distributed.utils.broadcast_object",
"fairseq.meters.TimeMeter",
"torch.device",
"torch.no_grad",
"fairseq.distributed.utils.get_data_parallel_world_size",
"fairseq.optim.build_optimizer",
"os.path.join",
"torch_xla.core.xla_model.mark_step",
"os.path.abspath",
"fairseq.models.ema.build_ema",
"fairseq.utils.has_parameters",
"fairseq.utils.deprecation_warning",
"fairseq.distributed.utils.get_data_parallel_group",
"fairseq.checkpoint_utils.torch_persistent_save",
"fairseq.models.DistributedFairseqModel",
"torch_xla.core.xla_model.rendezvous",
"contextlib.ExitStack",
"torch.cuda.get_device_capability",
"fairseq.checkpoint_utils.load_checkpoint_to_cpu",
"fairseq.utils.safe_hasattr",
"fairseq.utils.CudaEnvironment",
"fairseq.utils.get_tpu_device",
"sys.stderr.flush",
"torch.is_tensor",
"fairseq.distributed.utils.get_global_group",
"fairseq.distributed.utils.all_reduce_dict",
"fairseq.utils.CudaEnvironment.pretty_print_cuda_env_list",
"fairseq.optim.shard_",
"torch.cuda.memory_summary",
"fairseq.logging.metrics.log_stop_time",
"omegaconf.OmegaConf.is_config",
"fairseq.utils.apply_to_sample",
"torch.cuda.reset_peak_memory_stats",
"fairseq.utils.set_torch_seed",
"fairseq.meters.AverageMeter",
"fairseq.logging.metrics.get_meter",
"fairseq.optim.AMPOptimizer.build_optimizer",
"torch.abs",
"fairseq.file_io.PathManager.isfile",
"fairseq.dataclass.utils.convert_namespace_to_omegaconf",
"torch.cuda.is_available",
"torch.isfinite",
"fairseq.logging.metrics.log_start_time",
"fairseq.optim.FairseqBMUF",
"fairseq.optim.lr_scheduler.build_lr_scheduler",
"torch_xla.debug.metrics.metric_data",
"fairseq.utils.move_to_cuda",
"torch.tensor",
"torch.zeros_like",
"fairseq.distributed.utils.all_reduce",
"time.time",
"fairseq.distributed.utils.get_data_parallel_rank",
"fairseq.logging.metrics.get_meters",
"fairseq.logging.metrics.state_dict",
"torch.cuda.empty_cache",
"omegaconf.OmegaConf.to_container",
"itertools.chain.from_iterable",
"logging.getLogger"
] |
[((926, 953), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (943, 953), False, 'import logging\n'), ((33374, 33400), 'fairseq.logging.metrics.aggregate', 'metrics.aggregate', (['"""train"""'], {}), "('train')\n", (33391, 33400), False, 'from fairseq.logging import meters, metrics\n'), ((46869, 46895), 'fairseq.logging.metrics.aggregate', 'metrics.aggregate', (['"""valid"""'], {}), "('valid')\n", (46886, 46895), False, 'from fairseq.logging import meters, metrics\n'), ((6408, 6461), 'fairseq.logging.metrics.log_start_time', 'metrics.log_start_time', (['"""wall"""'], {'priority': '(790)', 'round': '(0)'}), "('wall', priority=790, round=0)\n", (6430, 6461), False, 'from fairseq.logging import meters, metrics\n'), ((6490, 6501), 'time.time', 'time.time', ([], {}), '()\n', (6499, 6501), False, 'import time\n'), ((6995, 7043), 'fairseq.distributed.utils.get_data_parallel_world_size', 'distributed_utils.get_data_parallel_world_size', ([], {}), '()\n', (7041, 7043), True, 'from fairseq.distributed import utils as distributed_utils\n'), ((7117, 7160), 'fairseq.distributed.utils.get_data_parallel_group', 'distributed_utils.get_data_parallel_group', ([], {}), '()\n', (7158, 7160), True, 'from fairseq.distributed import utils as distributed_utils\n'), ((7316, 7358), 'fairseq.distributed.utils.get_data_parallel_rank', 'distributed_utils.get_data_parallel_rank', ([], {}), '()\n', (7356, 7358), True, 'from fairseq.distributed import utils as distributed_utils\n'), ((14238, 14308), 'fairseq.optim.lr_scheduler.build_lr_scheduler', 'lr_scheduler.build_lr_scheduler', (['self.cfg.lr_scheduler', 'self.optimizer'], {}), '(self.cfg.lr_scheduler, self.optimizer)\n', (14269, 14308), False, 'from fairseq.optim import lr_scheduler\n'), ((18430, 18458), 'fairseq.file_io.PathManager.isfile', 'PathManager.isfile', (['filename'], {}), '(filename)\n', (18448, 18458), False, 'from fairseq.file_io import PathManager\n'), ((33627, 33686), 'fairseq.logging.metrics.log_start_time', 'metrics.log_start_time', (['"""train_wall"""'], {'priority': '(800)', 'round': '(0)'}), "('train_wall', priority=800, round=0)\n", (33649, 33686), False, 'from fairseq.logging import meters, metrics\n'), ((37573, 37601), 'torch.is_tensor', 'torch.is_tensor', (['sample_size'], {}), '(sample_size)\n', (37588, 37601), False, 'import torch\n'), ((46797, 46832), 'fairseq.logging.metrics.log_stop_time', 'metrics.log_stop_time', (['"""train_wall"""'], {}), "('train_wall')\n", (46818, 46832), False, 'from fairseq.logging import meters, metrics\n'), ((51009, 51036), 'fairseq.logging.metrics.get_meters', 'metrics.get_meters', (['"""train"""'], {}), "('train')\n", (51027, 51036), False, 'from fairseq.logging import meters, metrics\n'), ((52622, 52698), 'fairseq.logging.metrics.log_scalar', 'metrics.log_scalar', (['"""num_updates"""', 'self._num_updates'], {'weight': '(0)', 'priority': '(200)'}), "('num_updates', self._num_updates, weight=0, priority=200)\n", (52640, 52698), False, 'from fairseq.logging import meters, metrics\n'), ((56281, 56307), 'fairseq.utils.set_torch_seed', 'utils.set_torch_seed', (['seed'], {}), '(seed)\n', (56301, 56307), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((57218, 57236), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (57234, 57236), False, 'import sys\n'), ((59798, 59902), 'fairseq.distributed.utils.all_reduce_dict', 'distributed_utils.all_reduce_dict', (['data'], {'device': 'self.device', 'group': 'self.data_parallel_process_group'}), '(data, device=self.device, group=self.\n data_parallel_process_group)\n', (59831, 59902), True, 'from fairseq.distributed import utils as distributed_utils\n'), ((63977, 64007), 'torch_xla.debug.metrics.metric_data', 'met.metric_data', (['"""CompileTime"""'], {}), "('CompileTime')\n", (63992, 64007), True, 'import torch_xla.debug.metrics as met\n'), ((64615, 64629), 'torch_xla.core.xla_model.mark_step', 'xm.mark_step', ([], {}), '()\n', (64627, 64629), True, 'import torch_xla.core.xla_model as xm\n'), ((1646, 1681), 'fairseq.dataclass.utils.convert_namespace_to_omegaconf', 'convert_namespace_to_omegaconf', (['cfg'], {}), '(cfg)\n', (1676, 1681), False, 'from fairseq.dataclass.utils import convert_namespace_to_omegaconf\n'), ((1876, 1901), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1899, 1901), False, 'import torch\n'), ((1990, 2010), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2002, 2010), False, 'import torch\n'), ((4518, 4577), 'torch.device', 'torch.device', (['cfg.distributed_training.pipeline_devices[-1]'], {}), '(cfg.distributed_training.pipeline_devices[-1])\n', (4530, 4577), False, 'import torch\n'), ((5556, 5610), 'torch.cuda.DoubleTensor', 'torch.cuda.DoubleTensor', (['self.data_parallel_world_size'], {}), '(self.data_parallel_world_size)\n', (5579, 5610), False, 'import torch\n'), ((5875, 5898), 'fairseq.utils.CudaEnvironment', 'utils.CudaEnvironment', ([], {}), '()\n', (5896, 5898), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((10178, 10227), 'fairseq.models.ema.build_ema', 'build_ema', (['self._model', 'self.cfg.ema', 'self.device'], {}), '(self._model, self.cfg.ema, self.device)\n', (10187, 10227), False, 'from fairseq.models.ema import build_ema\n'), ((11397, 11506), 'fairseq.optim.MemoryEfficientFP16Optimizer.build_optimizer', 'optim.MemoryEfficientFP16Optimizer.build_optimizer', (['self.cfg', 'params'], {'allow_unsupported': 'allow_unsupported'}), '(self.cfg, params,\n allow_unsupported=allow_unsupported)\n', (11447, 11506), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((13372, 13421), 'fairseq.optim.FairseqBMUF', 'optim.FairseqBMUF', (['self.cfg.bmuf', 'self._optimizer'], {}), '(self.cfg.bmuf, self._optimizer)\n', (13389, 13421), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((17575, 17706), 'fairseq.checkpoint_utils.torch_persistent_save', 'checkpoint_utils.torch_persistent_save', (['state_dict', 'filename'], {'async_write': 'self.cfg.checkpoint.write_checkpoints_asynchronously'}), '(state_dict, filename, async_write=\n self.cfg.checkpoint.write_checkpoints_asynchronously)\n', (17613, 17706), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((32995, 33023), 'torch_xla.core.xla_model.rendezvous', 'xm.rendezvous', (['"""begin_epoch"""'], {}), "('begin_epoch')\n", (33008, 33023), True, 'import torch_xla.core.xla_model as xm\n'), ((33060, 33074), 'torch_xla.core.xla_model.mark_step', 'xm.mark_step', ([], {}), '()\n', (33072, 33074), True, 'import torch_xla.core.xla_model as xm\n'), ((37442, 37470), 'torch.is_tensor', 'torch.is_tensor', (['sample_size'], {}), '(sample_size)\n', (37457, 37470), False, 'import torch\n'), ((47081, 47108), 'torch_xla.core.xla_model.rendezvous', 'xm.rendezvous', (['"""valid_step"""'], {}), "('valid_step')\n", (47094, 47108), True, 'import torch_xla.core.xla_model as xm\n'), ((47464, 47479), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (47477, 47479), False, 'import torch\n'), ((50220, 50276), 'fairseq.logging.metrics.log_scalar', 'metrics.log_scalar', (['"""lr"""', 'new_lr'], {'weight': '(0)', 'priority': '(300)'}), "('lr', new_lr, weight=0, priority=300)\n", (50238, 50276), False, 'from fairseq.logging import meters, metrics\n'), ((50857, 50959), 'fairseq.utils.deprecation_warning', 'utils.deprecation_warning', (['"""Trainer.get_meter is deprecated. Please use fairseq.metrics instead."""'], {}), "(\n 'Trainer.get_meter is deprecated. Please use fairseq.metrics instead.')\n", (50882, 50959), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((52859, 52944), 'fairseq.distributed.utils.all_reduce', 'distributed_utils.all_reduce', (['total_norm'], {'group': 'self.data_parallel_process_group'}), '(total_norm, group=self.data_parallel_process_group\n )\n', (52887, 52944), True, 'from fairseq.distributed import utils as distributed_utils\n'), ((54099, 54140), 'fairseq.utils.apply_to_sample', 'utils.apply_to_sample', (['apply_half', 'sample'], {}), '(apply_half, sample)\n', (54120, 54140), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((54196, 54241), 'fairseq.utils.apply_to_sample', 'utils.apply_to_sample', (['apply_bfloat16', 'sample'], {}), '(apply_bfloat16, sample)\n', (54217, 54241), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((57000, 57025), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (57023, 57025), False, 'import torch\n'), ((58607, 58643), 'itertools.chain.from_iterable', 'chain.from_iterable', (['logging_outputs'], {}), '(logging_outputs)\n', (58626, 58643), False, 'from itertools import chain\n'), ((60536, 60630), 'fairseq.distributed.utils.all_reduce', 'distributed_utils.all_reduce', (['self._grad_norm_buf'], {'group': 'self.data_parallel_process_group'}), '(self._grad_norm_buf, group=self.\n data_parallel_process_group)\n', (60564, 60630), True, 'from fairseq.distributed import utils as distributed_utils\n'), ((62204, 62256), 'fairseq.logging.metrics.log_speed', 'metrics.log_speed', (['"""ups"""', '(1.0)'], {'priority': '(100)', 'round': '(2)'}), "('ups', 1.0, priority=100, round=2)\n", (62221, 62256), False, 'from fairseq.logging import meters, metrics\n'), ((62269, 62330), 'fairseq.logging.metrics.log_scalar', 'metrics.log_scalar', (['"""gnorm"""', 'grad_norm'], {'priority': '(400)', 'round': '(3)'}), "('gnorm', grad_norm, priority=400, round=3)\n", (62287, 62330), False, 'from fairseq.logging import meters, metrics\n'), ((62767, 62786), 'fairseq.logging.metrics.aggregate', 'metrics.aggregate', ([], {}), '()\n', (62784, 62786), False, 'from fairseq.logging import meters, metrics\n'), ((64735, 64758), 'fairseq.utils.xla_device_to_cpu', 'xla_device_to_cpu', (['data'], {}), '(data)\n', (64752, 64758), False, 'from fairseq.utils import xla_device_to_cpu\n'), ((2060, 2082), 'fairseq.utils.get_tpu_device', 'utils.get_tpu_device', ([], {}), '()\n', (2080, 2082), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((2123, 2142), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2135, 2142), False, 'import torch\n'), ((6247, 6314), 'fairseq.utils.CudaEnvironment.pretty_print_cuda_env_list', 'utils.CudaEnvironment.pretty_print_cuda_env_list', (['self.cuda_env_arr'], {}), '(self.cuda_env_arr)\n', (6295, 6314), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((8994, 9031), 'fairseq.utils.has_parameters', 'utils.has_parameters', (['self._criterion'], {}), '(self._criterion)\n', (9014, 9031), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((9108, 9264), 'fairseq.models.DistributedFairseqModel', 'models.DistributedFairseqModel', (['self.cfg.distributed_training', 'self._criterion'], {'process_group': 'self.data_parallel_process_group', 'device': 'self.device'}), '(self.cfg.distributed_training, self.\n _criterion, process_group=self.data_parallel_process_group, device=self\n .device)\n', (9138, 9264), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((9628, 9774), 'fairseq.models.DistributedFairseqModel', 'models.DistributedFairseqModel', (['self.cfg.distributed_training', 'self._model'], {'process_group': 'self.data_parallel_process_group', 'device': 'self.device'}), '(self.cfg.distributed_training, self._model,\n process_group=self.data_parallel_process_group, device=self.device)\n', (9658, 9774), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((12673, 12722), 'fairseq.optim.build_optimizer', 'optim.build_optimizer', (['self.cfg.optimizer', 'params'], {}), '(self.cfg.optimizer, params)\n', (12694, 12722), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((13991, 14054), 'fairseq.optim.shard_', 'optim.shard_', (['self._optimizer', 'self.data_parallel_process_group'], {}), '(self._optimizer, self.data_parallel_process_group)\n', (14003, 14054), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((15295, 15324), 'omegaconf.OmegaConf.is_config', 'OmegaConf.is_config', (['self.cfg'], {}), '(self.cfg)\n', (15314, 15324), False, 'from omegaconf import OmegaConf\n'), ((15211, 15275), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['self.cfg'], {'resolve': '(True)', 'enum_to_str': '(True)'}), '(self.cfg, resolve=True, enum_to_str=True)\n', (15233, 15275), False, 'from omegaconf import OmegaConf\n'), ((15506, 15542), 'fairseq.utils.has_parameters', 'utils.has_parameters', (['self.criterion'], {}), '(self.criterion)\n', (15526, 15542), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((16136, 16156), 'fairseq.logging.metrics.state_dict', 'metrics.state_dict', ([], {}), '()\n', (16154, 16156), False, 'from fairseq.logging import meters, metrics\n'), ((19050, 19141), 'fairseq.checkpoint_utils.load_checkpoint_to_cpu', 'checkpoint_utils.load_checkpoint_to_cpu', (['filename'], {'load_on_all_ranks': 'load_on_all_ranks'}), '(filename, load_on_all_ranks=\n load_on_all_ranks)\n', (19089, 19141), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((19938, 20061), 'fairseq.distributed.utils.broadcast_object', 'distributed_utils.broadcast_object', (['state'], {'src_rank': '(0)', 'group': 'self.data_parallel_process_group', 'dist_device': 'self.device'}), '(state, src_rank=0, group=self.\n data_parallel_process_group, dist_device=self.device)\n', (19972, 20061), True, 'from fairseq.distributed import utils as distributed_utils\n'), ((27128, 27139), 'time.time', 'time.time', ([], {}), '()\n', (27137, 27139), False, 'import time\n'), ((27477, 27524), 'fairseq.logging.metrics.load_state_dict', 'metrics.load_state_dict', (["extra_state['metrics']"], {}), "(extra_state['metrics'])\n", (27500, 27524), False, 'from fairseq.logging import meters, metrics\n'), ((27640, 27669), 'fairseq.logging.metrics.get_meters', 'metrics.get_meters', (['"""default"""'], {}), "('default')\n", (27658, 27669), False, 'from fairseq.logging import meters, metrics\n'), ((38367, 38422), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""reduce-grads"""'], {}), "('reduce-grads')\n", (38406, 38422), False, 'import torch\n'), ((38553, 38589), 'fairseq.utils.has_parameters', 'utils.has_parameters', (['self.criterion'], {}), '(self.criterion)\n', (38573, 38589), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((38677, 38734), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""multiply-grads"""'], {}), "('multiply-grads')\n", (38716, 38734), False, 'import torch\n'), ((39858, 39911), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""clip-grads"""'], {}), "('clip-grads')\n", (39897, 39911), False, 'import torch\n'), ((40853, 40905), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""optimizer"""'], {}), "('optimizer')\n", (40892, 40905), False, 'import torch\n'), ((48561, 48589), 'torch.is_tensor', 'torch.is_tensor', (['sample_size'], {}), '(sample_size)\n', (48576, 48589), False, 'import torch\n'), ((50065, 50121), 'fairseq.logging.metrics.log_scalar', 'metrics.log_scalar', (['f"""lr_{k}"""', 'v'], {'weight': '(0)', 'priority': '(300)'}), "(f'lr_{k}', v, weight=0, priority=300)\n", (50083, 50121), False, 'from fairseq.logging import meters, metrics\n'), ((53125, 53159), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (53157, 53159), False, 'import torch\n'), ((53660, 53671), 'time.time', 'time.time', ([], {}), '()\n', (53669, 53671), False, 'import time\n'), ((55629, 55655), 'fairseq.utils.move_to_cuda', 'utils.move_to_cuda', (['sample'], {}), '(sample)\n', (55647, 55655), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((55780, 55826), 'fairseq.utils.move_to_cuda', 'utils.move_to_cuda', (['sample'], {'device': 'self.device'}), '(sample, device=self.device)\n', (55798, 55826), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((57105, 57130), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (57128, 57130), False, 'import torch\n'), ((62155, 62180), 'torch.isfinite', 'torch.isfinite', (['grad_norm'], {}), '(grad_norm)\n', (62169, 62180), False, 'import torch\n'), ((63383, 63413), 'fairseq.logging.metrics.log_scalar', 'metrics.log_scalar', (['"""loss"""', '(-1)'], {}), "('loss', -1)\n", (63401, 63413), False, 'from fairseq.logging import meters, metrics\n'), ((12081, 12149), 'fairseq.optim.MemoryEfficientFP16Optimizer.build_optimizer', 'optim.MemoryEfficientFP16Optimizer.build_optimizer', (['self.cfg', 'params'], {}), '(self.cfg, params)\n', (12131, 12149), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((17287, 17312), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (17302, 17312), False, 'import os\n'), ((17818, 17843), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (17833, 17843), False, 'import os\n'), ((21747, 21779), 'fairseq.utils.safe_hasattr', 'safe_hasattr', (['self.model', '"""args"""'], {}), "(self.model, 'args')\n", (21759, 21779), False, 'from fairseq.utils import safe_hasattr\n'), ((21804, 21854), 'fairseq.utils.safe_hasattr', 'safe_hasattr', (['self.model.args', '"""mha_heads_to_keep"""'], {}), "(self.model.args, 'mha_heads_to_keep')\n", (21816, 21854), False, 'from fairseq.utils import safe_hasattr\n'), ((23688, 23720), 'fairseq.utils.safe_hasattr', 'safe_hasattr', (['self.model', '"""args"""'], {}), "(self.model, 'args')\n", (23700, 23720), False, 'from fairseq.utils import safe_hasattr\n'), ((23745, 23798), 'fairseq.utils.safe_hasattr', 'safe_hasattr', (['self.model.args', '"""ffn_blocks_to_remove"""'], {}), "(self.model.args, 'ffn_blocks_to_remove')\n", (23757, 23798), False, 'from fairseq.utils import safe_hasattr\n'), ((35124, 35146), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (35144, 35146), False, 'import contextlib\n'), ((36027, 36051), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (36049, 36051), False, 'import torch\n'), ((41699, 41753), 'os.path.join', 'os.path.join', (['self.cfg.checkpoint.save_dir', '"""crash.pt"""'], {}), "(self.cfg.checkpoint.save_dir, 'crash.pt')\n", (41711, 41753), False, 'import os\n'), ((44303, 44334), 'torch_xla.core.xla_model.get_memory_info', 'xm.get_memory_info', (['self.device'], {}), '(self.device)\n', (44321, 44334), True, 'import torch_xla.core.xla_model as xm\n'), ((44485, 44557), 'fairseq.logging.metrics.log_scalar', 'metrics.log_scalar', (['"""gb_free"""', 'gb_free'], {'priority': '(1500)', 'round': '(1)', 'weight': '(0)'}), "('gb_free', gb_free, priority=1500, round=1, weight=0)\n", (44503, 44557), False, 'from fairseq.logging import meters, metrics\n'), ((44624, 44698), 'fairseq.logging.metrics.log_scalar', 'metrics.log_scalar', (['"""gb_total"""', 'gb_total'], {'priority': '(1600)', 'round': '(1)', 'weight': '(0)'}), "('gb_total', gb_total, priority=1600, round=1, weight=0)\n", (44642, 44698), False, 'from fairseq.logging import meters, metrics\n'), ((45501, 45537), 'torch.cuda.reset_peak_memory_stats', 'torch.cuda.reset_peak_memory_stats', ([], {}), '()\n', (45535, 45537), False, 'import torch\n'), ((45631, 45703), 'fairseq.logging.metrics.log_scalar', 'metrics.log_scalar', (['"""gb_free"""', 'gb_free'], {'priority': '(1500)', 'round': '(1)', 'weight': '(0)'}), "('gb_free', gb_free, priority=1500, round=1, weight=0)\n", (45649, 45703), False, 'from fairseq.logging import meters, metrics\n'), ((46356, 46380), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (46378, 46380), False, 'import torch\n'), ((51419, 51440), 'fairseq.meters.AverageMeter', 'meters.AverageMeter', ([], {}), '()\n', (51438, 51440), False, 'from fairseq import meters\n'), ((51590, 51626), 'fairseq.logging.metrics.get_meter', 'metrics.get_meter', (['"""default"""', '"""wall"""'], {}), "('default', 'wall')\n", (51607, 51626), False, 'from fairseq.logging import meters, metrics\n'), ((55478, 55539), 'fairseq.utils.move_to_cuda', 'utils.move_to_cuda', (["sample['target']"], {'device': 'self.last_device'}), "(sample['target'], device=self.last_device)\n", (55496, 55539), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((57164, 57208), 'torch.cuda.memory_summary', 'torch.cuda.memory_summary', ([], {'device': 'device_idx'}), '(device=device_idx)\n', (57189, 57208), False, 'import torch\n'), ((60737, 60766), 'torch.abs', 'torch.abs', (['(tensor - tensor[0])'], {}), '(tensor - tensor[0])\n', (60746, 60766), False, 'import torch\n'), ((62125, 62151), 'torch.is_tensor', 'torch.is_tensor', (['grad_norm'], {}), '(grad_norm)\n', (62140, 62151), False, 'import torch\n'), ((6061, 6097), 'fairseq.distributed.utils.get_global_group', 'distributed_utils.get_global_group', ([], {}), '()\n', (6095, 6097), True, 'from fairseq.distributed import utils as distributed_utils\n'), ((12260, 12312), 'fairseq.optim.AMPOptimizer.build_optimizer', 'optim.AMPOptimizer.build_optimizer', (['self.cfg', 'params'], {}), '(self.cfg, params)\n', (12294, 12312), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((12365, 12418), 'fairseq.optim.FP16Optimizer.build_optimizer', 'optim.FP16Optimizer.build_optimizer', (['self.cfg', 'params'], {}), '(self.cfg, params)\n', (12400, 12418), False, 'from fairseq import checkpoint_utils, models, optim, utils\n'), ((36830, 36884), 'os.path.join', 'os.path.join', (['self.cfg.checkpoint.save_dir', '"""crash.pt"""'], {}), "(self.cfg.checkpoint.save_dir, 'crash.pt')\n", (36842, 36884), False, 'import os\n'), ((42645, 42662), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (42657, 42662), False, 'import torch\n'), ((51651, 51669), 'fairseq.meters.TimeMeter', 'meters.TimeMeter', ([], {}), '()\n', (51667, 51669), False, 'from fairseq import meters\n'), ((51714, 51747), 'fairseq.logging.metrics.get_meter', 'metrics.get_meter', (['"""train"""', '"""wps"""'], {}), "('train', 'wps')\n", (51731, 51747), False, 'from fairseq.logging import meters, metrics\n'), ((59665, 59683), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (59680, 59683), False, 'import torch\n'), ((59642, 59661), 'torch.zeros_like', 'torch.zeros_like', (['v'], {}), '(v)\n', (59658, 59661), False, 'import torch\n'), ((11644, 11679), 'torch.cuda.get_device_capability', 'torch.cuda.get_device_capability', (['(0)'], {}), '(0)\n', (11676, 11679), False, 'import torch\n'), ((12462, 12497), 'torch.cuda.get_device_capability', 'torch.cuda.get_device_capability', (['(0)'], {}), '(0)\n', (12494, 12497), False, 'import torch\n'), ((36504, 36528), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (36526, 36528), False, 'import torch\n'), ((40411, 40436), 'torch.isfinite', 'torch.isfinite', (['grad_norm'], {}), '(grad_norm)\n', (40425, 40436), False, 'import torch\n'), ((51772, 51790), 'fairseq.meters.TimeMeter', 'meters.TimeMeter', ([], {}), '()\n', (51788, 51790), False, 'from fairseq import meters\n'), ((52007, 52036), 'fairseq.logging.metrics.get_meter', 'metrics.get_meter', (['"""valid"""', 'k'], {}), "('valid', k)\n", (52024, 52036), False, 'from fairseq.logging import meters, metrics\n'), ((45426, 45459), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (45457, 45459), False, 'import torch\n'), ((48343, 48367), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (48365, 48367), False, 'import torch\n'), ((52061, 52082), 'fairseq.meters.AverageMeter', 'meters.AverageMeter', ([], {}), '()\n', (52080, 52082), False, 'from fairseq import meters\n'), ((52130, 52151), 'fairseq.meters.AverageMeter', 'meters.AverageMeter', ([], {}), '()\n', (52149, 52151), False, 'from fairseq import meters\n'), ((60839, 60861), 'torch.isfinite', 'torch.isfinite', (['tensor'], {}), '(tensor)\n', (60853, 60861), False, 'import torch\n'), ((61019, 61041), 'torch.isfinite', 'torch.isfinite', (['tensor'], {}), '(tensor)\n', (61033, 61041), False, 'import torch\n')]
|
from sklearn.utils.sparsefuncs import mean_variance_axis
from scipy.sparse import issparse
from scipy.special import loggamma
import numpy as np
def log_binom(n, k):
"""
log (n choose k)
Parameters
----------
n, k: int
Output
------
log_binom: float
"""
return loggamma(n + 1) - loggamma(k + 1) - loggamma(n - k + 1)
# TODO: add this test
# import numpy as np
# from scipy.special import binom
# n = 10
# k = 3
# abs(np.log(binom(n, k)) - log_binom(n, k)) < 1e-8
def weighted_mean_std(X, sample_weight=None, ddof=0):
"""
Computes possible weighted mean and standard deviations of each column of a data matrix. It is safe to call this function on either a sparse or dense matrix.
Parameters
-----------
X: array-like, shape (n_samples, n_features)
The data matrix.
sample_weight: None, array-like shape (n_samples)
The optional sample weights to use.
ddof: int
The divisor used in calculations
is ``TOT_WEIGHT - ddof``, where ``TOT_WEIGHT`` is the total weight.
If sample_weight is None or norm_weight=True then TOT_WEIGHT = n_samples.
Otherwise, TOT_WEIGHT = sample_weight.sum()
Output
------
mean, std
mean: array-like, shape (n_features, )
The weighted mean for each feature.
std: array-like, shape (n_features, )
The weighted standard deviation for each feature.
"""
n_samples = X.shape[0]
# process sample weights
if sample_weight is not None:
_sample_weight = np.array(sample_weight).reshape(-1).astype(X.dtype)
assert len(_sample_weight) == n_samples
# normalize the weights
_sample_weight /= _sample_weight.sum()
_sample_weight *= n_samples
TOT_WEIGHT = _sample_weight.sum()
else:
TOT_WEIGHT = n_samples
_sample_weight = None
# sklearn has this built in for sparse matrices
# TODO: can we find this somewhere for dense?
if issparse(X):
# TODO: handle ddof
MEAN, VAR, SUM_WEIGHTS = \
mean_variance_axis(X=X, axis=0, weights=_sample_weight,
return_sum_weights=True)
VAR *= SUM_WEIGHTS / (TOT_WEIGHT - ddof)
return MEAN, np.sqrt(VAR)
# unweighted, dense case
if sample_weight is None:
return X.mean(axis=0), X.std(axis=0, ddof=ddof)
else: # weighted, dense case
MEAN = X.T @ _sample_weight / TOT_WEIGHT
VAR = ((X - MEAN) ** 2).T @ _sample_weight
VAR = VAR / (TOT_WEIGHT - ddof)
return MEAN, np.sqrt(VAR)
|
[
"scipy.special.loggamma",
"scipy.sparse.issparse",
"numpy.array",
"sklearn.utils.sparsefuncs.mean_variance_axis",
"numpy.sqrt"
] |
[((1997, 2008), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (2005, 2008), False, 'from scipy.sparse import issparse\n'), ((341, 360), 'scipy.special.loggamma', 'loggamma', (['(n - k + 1)'], {}), '(n - k + 1)\n', (349, 360), False, 'from scipy.special import loggamma\n'), ((2086, 2171), 'sklearn.utils.sparsefuncs.mean_variance_axis', 'mean_variance_axis', ([], {'X': 'X', 'axis': '(0)', 'weights': '_sample_weight', 'return_sum_weights': '(True)'}), '(X=X, axis=0, weights=_sample_weight, return_sum_weights=True\n )\n', (2104, 2171), False, 'from sklearn.utils.sparsefuncs import mean_variance_axis\n'), ((305, 320), 'scipy.special.loggamma', 'loggamma', (['(n + 1)'], {}), '(n + 1)\n', (313, 320), False, 'from scipy.special import loggamma\n'), ((323, 338), 'scipy.special.loggamma', 'loggamma', (['(k + 1)'], {}), '(k + 1)\n', (331, 338), False, 'from scipy.special import loggamma\n'), ((2269, 2281), 'numpy.sqrt', 'np.sqrt', (['VAR'], {}), '(VAR)\n', (2276, 2281), True, 'import numpy as np\n'), ((2595, 2607), 'numpy.sqrt', 'np.sqrt', (['VAR'], {}), '(VAR)\n', (2602, 2607), True, 'import numpy as np\n'), ((1556, 1579), 'numpy.array', 'np.array', (['sample_weight'], {}), '(sample_weight)\n', (1564, 1579), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import datetime
import os
from pyvirtualdisplay import Display
from selenium import webdriver
import constants
# Choose and configure the browser of your choice
def get_browser():
# # These work on Mac
# return webdriver.Chrome()
# return webdriver.Firefox()
# On Linux you need to initialize a display
global display
display = Display(visible=0, size=(1024, 768))
display.start()
return webdriver.Firefox()
# If present and callable, it will be called at the end of the whole test suite
def teardown():
global display
display.stop()
# A failed login by a provider will be retried so many times as set here
MAX_LOGIN_ATTEMPTS = 3
# Multiplies the wait times set in expected values
WAIT_MULTIPLIER = 1
# Minimum wait time
MIN_WAIT = 0
# The host and port where the tested ap shoud listen.
HOST = '127.0.0.1'
PORT = 80
# The host alias set in the /etc/hosts file.
# The actual tests will navigate selenium browser to this host.
# This is necessary because some providers don't support localhost as the
# callback url.
HOST_ALIAS = 'authomatic.org'
# Only frameworks included here will be tested.
INCLUDE_FRAMEWORKS = [
# 'django',
'flask', # Runs with https
# 'pyramid',
]
# Only providers included here will be tested.
INCLUDE_PROVIDERS = [
# OAuth 1.0a
'bitbucket',
'flickr',
'plurk',
'twitter',
'tumblr',
# 'ubuntuone', # UbuntuOne service is no longer available
'vimeo',
# Xero requires creation of a new trial project every month which makes
# the setup of the automated test too laborious to support it.
# 'xero',
'xing',
'yahoo',
# OAuth 2.0
# 'amazon', # Asks for a captcha (cannot be automated)
# 'behance', # doesn't support third party authorization anymore.
'bitly',
'deviantart',
'facebook',
'foursquare',
'google',
'github',
'linkedin', # Asks for verification when running in Travis CI evnironment
'paypal',
'reddit',
'vk',
'windowslive',
'yammer',
'yandex',
# OpenID
# 'openid_livejournal', # Login and password elements are not visible.
'openid_verisignlabs',
'openid_wordpress',
'openid_yahoo',
]
# Recommended setup for Travis CI environment.
if os.environ.get('TRAVIS'):
MAX_LOGIN_ATTEMPTS = 20
WAIT_MULTIPLIER = 2
MIN_WAIT = 2
# LinkedIn and WindowsLive include a captcha in the login form
# if a user logs in from an unusual location.
INCLUDE_PROVIDERS = list(set(INCLUDE_PROVIDERS) -
set(['linkedin', 'windowslive']))
def get_browser():
# Eventbrite has problems with the login form on Firefox
return webdriver.Chrome()
def teardown():
pass
# Use these constants if you have the same user info by all tested providers.
EMAIL = '<EMAIL>'
FIRST_NAME = 'Andy'
LAST_NAME = 'Pipkin'
NAME = FIRST_NAME + ' ' + LAST_NAME
USERNAME = 'andypipkin'
USERNAME_REVERSE = 'pipkinandy'
NICKNAME = 'Mr. Pipkin'
BIRTH_YEAR = 1979
BIRTH_MONTH = 11
BIRTH_DAY = 18
BIRTH_DATE = datetime.datetime(BIRTH_YEAR, BIRTH_MONTH, BIRTH_DAY)
CITY = 'London'
COUNTRY = 'Great Britain'
COUNTRY_ISO2 = 'gb'
POSTAL_CODE = 'EC1A1DH'
PHONE = '??????????'
PHONE_INTERNATIONAL = '0044??????????'
GENDER = constants.GENDER_MALE
LOCALE = 'en_UK'
LOCATION = CITY + ', ' + COUNTRY
# Common values for all providers
COMMON = {
# Could be same if the user sets it so
'user_birth_date': BIRTH_DATE,
'user_birth_day': BIRTH_DAY,
'user_birth_month': BIRTH_MONTH,
'user_birth_year': BIRTH_YEAR,
'user_login': EMAIL,
'user_email': EMAIL,
'user_first_name': FIRST_NAME,
'user_last_name': LAST_NAME,
'user_name': NAME,
'user_username': USERNAME,
'user_username_reverse': USERNAME_REVERSE,
'user_nickname': NICKNAME,
'user_birth_year': BIRTH_YEAR,
'user_city': CITY,
'user_country': COUNTRY,
'user_gender': GENDER,
'user_phone': PHONE,
'user_postal_code': POSTAL_CODE,
'user_locale': LOCALE,
'user_location': LOCATION,
# It is not a good idea to have the same password for all providers
# 'user_password': '##########',
# Provider and user specific value
# 'user_id': '',
# 'user_locale': None,
# 'user_timezone': None,
# Provider specific format
# 'user_picture': '',
# 'user_link': '',
# Provider specific value
# 'consumer_key': '',
# 'consumer_secret': '',
}
# Values from COMMON will be overriden by values from PROVIDERS[provider_name]
# if set.
PROVIDERS = {
# OAuth 1.0a
'bitbucket': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': USERNAME,
},
'flickr': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'meetup': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_login': EMAIL,
'user_id': '??????????',
'user_country': COUNTRY_ISO2,
'user_location': '{0}, {1}'.format(CITY, COUNTRY_ISO2),
},
'plurk': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'twitter': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
# Twitter considers selenium login attempts suspicious and occasionally
# asks a security challenge question. This will be used as the answer.
'user_challenge_answer': '??????????',
},
'tumblr': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': USERNAME,
},
'vimeo': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'xero': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
},
'xing': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'yahoo': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
},
# OAuth 2.0
'amazon': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_id': '??????????',
'user_password': '##########',
},
# Behance doesn't support third party authorization anymore.
# 'behance': {
# 'consumer_key': '##########',
# 'consumer_secret': '##########',
# 'user_password': '##########',
# 'user_id': '??????????',
# },
'bitly': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'deviantart': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
},
'eventbrite': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'facebook': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'foursquare': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'google': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'github': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'linkedin': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'paypal': {
'consumer_key': '##########',
'consumer_secret': '##########',
},
'reddit': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_id': '??????????',
},
# Viadeo doesn't support access to its API
# http://dev.viadeo.com/documentation/authentication/request-an-api-key/
# 'viadeo': {
# 'consumer_key': '##########',
# 'consumer_secret': '##########',
# },
'vk': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'windowslive': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'yammer': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
'user_timezone': '??????????', # e.g. 'Pacific Time (US & Canada)'
},
'yandex': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
# OpenID
'openid_livejournal': {
'user_login': USERNAME,
'user_password': '##########',
},
'openid_wordpress': {
'user_login': EMAIL,
# user_username is used in the OpenID identifier
'user_password': '##########',
},
'openid_verisignlabs': {
'user_login': USERNAME,
'user_password': '##########',
},
'openid_yahoo': {
'user_id': 'https://me.yahoo.com/a/???',
'user_login': USERNAME,
'user_password': '##########',
},
}
|
[
"selenium.webdriver.Firefox",
"datetime.datetime",
"os.environ.get",
"pyvirtualdisplay.Display",
"selenium.webdriver.Chrome"
] |
[((2297, 2321), 'os.environ.get', 'os.environ.get', (['"""TRAVIS"""'], {}), "('TRAVIS')\n", (2311, 2321), False, 'import os\n'), ((3101, 3154), 'datetime.datetime', 'datetime.datetime', (['BIRTH_YEAR', 'BIRTH_MONTH', 'BIRTH_DAY'], {}), '(BIRTH_YEAR, BIRTH_MONTH, BIRTH_DAY)\n', (3118, 3154), False, 'import datetime\n'), ((381, 417), 'pyvirtualdisplay.Display', 'Display', ([], {'visible': '(0)', 'size': '(1024, 768)'}), '(visible=0, size=(1024, 768))\n', (388, 417), False, 'from pyvirtualdisplay import Display\n'), ((449, 468), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (466, 468), False, 'from selenium import webdriver\n'), ((2731, 2749), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (2747, 2749), False, 'from selenium import webdriver\n')]
|
# coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common image attention utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_image_attention
from tensor2tensor.utils import hparam
import tensorflow.compat.v1 as tf
class CommonImageAttentionTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testPostProcessImageTrainMode(self, likelihood, num_mixtures, depth):
batch = 1
rows = 8
cols = 24
hparams = hparam.HParams(
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.TRAIN,
num_mixtures=num_mixtures,
)
inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],
minval=-1., maxval=1.)
outputs = common_image_attention.postprocess_image(
inputs, rows, cols, hparams)
self.assertEqual(outputs.shape, (batch, rows, cols, depth))
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testPostProcessImageInferMode(self, likelihood, num_mixtures, depth):
batch = 1
rows = 8
cols = 24
block_length = 4
block_width = 2
hparams = hparam.HParams(
block_raster_scan=True,
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.PREDICT,
num_mixtures=num_mixtures,
query_shape=[block_length, block_width],
)
inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],
minval=-1., maxval=1.)
outputs = common_image_attention.postprocess_image(
inputs, rows, cols, hparams)
num_blocks_rows = rows // block_length
num_blocks_cols = cols // block_width
self.assertEqual(outputs.shape,
(batch, num_blocks_rows, num_blocks_cols,
block_length, block_width, depth))
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testCreateOutputTrainMode(self, likelihood, num_mixtures, depth):
batch = 1
height = 8
width = 8
channels = 3
rows = height
if likelihood == common_image_attention.DistributionType.CAT:
cols = channels * width
else:
cols = width
hparams = hparam.HParams(
hidden_size=2,
likelihood=likelihood,
num_channels=channels,
mode=tf.estimator.ModeKeys.TRAIN,
num_mixtures=num_mixtures,
)
decoder_output = tf.random_normal([batch, rows, cols, hparams.hidden_size])
targets = tf.random_uniform([batch, height, width, channels],
minval=-1., maxval=1.)
output = common_image_attention.create_output(
decoder_output, rows, cols, targets, hparams)
if hparams.likelihood == common_image_attention.DistributionType.CAT:
self.assertEqual(output.shape, (batch, height, width, channels, depth))
else:
self.assertEqual(output.shape, (batch, height, width, depth))
def testTransformerDecoderLayersGlobal(self):
one_hot_data = tf.constant([[[0., 1.], [1., 0.]],
[[0., 1.], [1., 0.]],
[[1., 0.], [1., 0.]]])
hparams = common_hparams.basic_params1()
hparams.hidden_size = 4
hparams.num_layers = 1
hparams.layer_prepostprocess_dropout = 0.
hparams.add_hparam("attention_key_channels", None)
hparams.add_hparam("attention_value_channels", None)
hparams.add_hparam("num_heads", 1)
hparams.add_hparam("attention_dropout", 0.)
hparams.add_hparam("shared_rel", False)
hparams.add_hparam("block_width", 1)
hparams.add_hparam("block_length", 1)
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("filter_size", 16)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
hparams.add_hparam("relu_dropout", 0.)
conv_1d = tf.keras.layers.Conv1D(filters=hparams.hidden_size,
kernel_size=1,
use_bias=False)
shifted_data = tf.pad(one_hot_data, [[0, 0], [1, 0], [0, 0]])[..., :-1, :]
net = conv_1d(shifted_data)
output = common_image_attention.transformer_decoder_layers(
inputs=net,
encoder_output=None,
num_layers=hparams.num_layers,
hparams=hparams,
self_attention_bias=common_image_attention.get_self_attention_bias(net),
attention_type=common_image_attention.AttentionType.GLOBAL)
self.evaluate(tf.global_variables_initializer())
output_val = self.evaluate(output)
# The outputs for the padded dimension should be equal across all data.
self.assertAllEqual(output_val[0, 0], output_val[1, 0])
self.assertAllEqual(output_val[1, 0], output_val[2, 0])
# The first and second elements of the batch are identical, so they should
# have the same outputs for the second latent dimension as well.
self.assertAllEqual(output_val[0, 1], output_val[1, 1])
if __name__ == "__main__":
tf.test.main()
|
[
"tensorflow.compat.v1.pad",
"tensorflow.compat.v1.constant",
"tensor2tensor.utils.hparam.HParams",
"absl.testing.parameterized.parameters",
"tensor2tensor.layers.common_image_attention.postprocess_image",
"tensorflow.compat.v1.test.main",
"tensorflow.compat.v1.keras.layers.Conv1D",
"tensor2tensor.layers.common_image_attention.get_self_attention_bias",
"tensorflow.compat.v1.random_normal",
"tensorflow.compat.v1.random_uniform",
"tensor2tensor.layers.common_hparams.basic_params1",
"tensor2tensor.layers.common_image_attention.create_output",
"tensorflow.compat.v1.global_variables_initializer"
] |
[((1064, 1206), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(common_image_attention.DistributionType.DMOL, 5, 50)', '(common_image_attention.DistributionType.CAT, None, 256)'], {}), '((common_image_attention.DistributionType.DMOL, 5, \n 50), (common_image_attention.DistributionType.CAT, None, 256))\n', (1088, 1206), False, 'from absl.testing import parameterized\n'), ((1791, 1933), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(common_image_attention.DistributionType.DMOL, 5, 50)', '(common_image_attention.DistributionType.CAT, None, 256)'], {}), '((common_image_attention.DistributionType.DMOL, 5, \n 50), (common_image_attention.DistributionType.CAT, None, 256))\n', (1815, 1933), False, 'from absl.testing import parameterized\n'), ((2819, 2961), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(common_image_attention.DistributionType.DMOL, 5, 50)', '(common_image_attention.DistributionType.CAT, None, 256)'], {}), '((common_image_attention.DistributionType.DMOL, 5, \n 50), (common_image_attention.DistributionType.CAT, None, 256))\n', (2843, 2961), False, 'from absl.testing import parameterized\n'), ((6035, 6049), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (6047, 6049), True, 'import tensorflow.compat.v1 as tf\n'), ((1350, 1468), 'tensor2tensor.utils.hparam.HParams', 'hparam.HParams', ([], {'hidden_size': '(2)', 'likelihood': 'likelihood', 'mode': 'tf.estimator.ModeKeys.TRAIN', 'num_mixtures': 'num_mixtures'}), '(hidden_size=2, likelihood=likelihood, mode=tf.estimator.\n ModeKeys.TRAIN, num_mixtures=num_mixtures)\n', (1364, 1468), False, 'from tensor2tensor.utils import hparam\n'), ((1516, 1604), 'tensorflow.compat.v1.random_uniform', 'tf.random_uniform', (['[batch, rows, cols, hparams.hidden_size]'], {'minval': '(-1.0)', 'maxval': '(1.0)'}), '([batch, rows, cols, hparams.hidden_size], minval=-1.0,\n maxval=1.0)\n', (1533, 1604), True, 'import tensorflow.compat.v1 as tf\n'), ((1644, 1713), 'tensor2tensor.layers.common_image_attention.postprocess_image', 'common_image_attention.postprocess_image', (['inputs', 'rows', 'cols', 'hparams'], {}), '(inputs, rows, cols, hparams)\n', (1684, 1713), False, 'from tensor2tensor.layers import common_image_attention\n'), ((2118, 2306), 'tensor2tensor.utils.hparam.HParams', 'hparam.HParams', ([], {'block_raster_scan': '(True)', 'hidden_size': '(2)', 'likelihood': 'likelihood', 'mode': 'tf.estimator.ModeKeys.PREDICT', 'num_mixtures': 'num_mixtures', 'query_shape': '[block_length, block_width]'}), '(block_raster_scan=True, hidden_size=2, likelihood=likelihood,\n mode=tf.estimator.ModeKeys.PREDICT, num_mixtures=num_mixtures,\n query_shape=[block_length, block_width])\n', (2132, 2306), False, 'from tensor2tensor.utils import hparam\n'), ((2367, 2455), 'tensorflow.compat.v1.random_uniform', 'tf.random_uniform', (['[batch, rows, cols, hparams.hidden_size]'], {'minval': '(-1.0)', 'maxval': '(1.0)'}), '([batch, rows, cols, hparams.hidden_size], minval=-1.0,\n maxval=1.0)\n', (2384, 2455), True, 'import tensorflow.compat.v1 as tf\n'), ((2495, 2564), 'tensor2tensor.layers.common_image_attention.postprocess_image', 'common_image_attention.postprocess_image', (['inputs', 'rows', 'cols', 'hparams'], {}), '(inputs, rows, cols, hparams)\n', (2535, 2564), False, 'from tensor2tensor.layers import common_image_attention\n'), ((3263, 3403), 'tensor2tensor.utils.hparam.HParams', 'hparam.HParams', ([], {'hidden_size': '(2)', 'likelihood': 'likelihood', 'num_channels': 'channels', 'mode': 'tf.estimator.ModeKeys.TRAIN', 'num_mixtures': 'num_mixtures'}), '(hidden_size=2, likelihood=likelihood, num_channels=channels,\n mode=tf.estimator.ModeKeys.TRAIN, num_mixtures=num_mixtures)\n', (3277, 3403), False, 'from tensor2tensor.utils import hparam\n'), ((3468, 3526), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', (['[batch, rows, cols, hparams.hidden_size]'], {}), '([batch, rows, cols, hparams.hidden_size])\n', (3484, 3526), True, 'import tensorflow.compat.v1 as tf\n'), ((3541, 3617), 'tensorflow.compat.v1.random_uniform', 'tf.random_uniform', (['[batch, height, width, channels]'], {'minval': '(-1.0)', 'maxval': '(1.0)'}), '([batch, height, width, channels], minval=-1.0, maxval=1.0)\n', (3558, 3617), True, 'import tensorflow.compat.v1 as tf\n'), ((3661, 3747), 'tensor2tensor.layers.common_image_attention.create_output', 'common_image_attention.create_output', (['decoder_output', 'rows', 'cols', 'targets', 'hparams'], {}), '(decoder_output, rows, cols, targets,\n hparams)\n', (3697, 3747), False, 'from tensor2tensor.layers import common_image_attention\n'), ((4051, 4147), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[[0.0, 1.0], [1.0, 0.0]], [[0.0, 1.0], [1.0, 0.0]], [[1.0, 0.0], [1.0, 0.0]]]'], {}), '([[[0.0, 1.0], [1.0, 0.0]], [[0.0, 1.0], [1.0, 0.0]], [[1.0, 0.0\n ], [1.0, 0.0]]])\n', (4062, 4147), True, 'import tensorflow.compat.v1 as tf\n'), ((4210, 4240), 'tensor2tensor.layers.common_hparams.basic_params1', 'common_hparams.basic_params1', ([], {}), '()\n', (4238, 4240), False, 'from tensor2tensor.layers import common_hparams\n'), ((4914, 5001), 'tensorflow.compat.v1.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': 'hparams.hidden_size', 'kernel_size': '(1)', 'use_bias': '(False)'}), '(filters=hparams.hidden_size, kernel_size=1, use_bias\n =False)\n', (4936, 5001), True, 'import tensorflow.compat.v1 as tf\n'), ((5090, 5136), 'tensorflow.compat.v1.pad', 'tf.pad', (['one_hot_data', '[[0, 0], [1, 0], [0, 0]]'], {}), '(one_hot_data, [[0, 0], [1, 0], [0, 0]])\n', (5096, 5136), True, 'import tensorflow.compat.v1 as tf\n'), ((5526, 5559), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5557, 5559), True, 'import tensorflow.compat.v1 as tf\n'), ((5387, 5438), 'tensor2tensor.layers.common_image_attention.get_self_attention_bias', 'common_image_attention.get_self_attention_bias', (['net'], {}), '(net)\n', (5433, 5438), False, 'from tensor2tensor.layers import common_image_attention\n')]
|
"""Tests for distutils.util."""
import os
import sys
import unittest
from copy import copy
from test.support import run_unittest
from unittest import mock
from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError
from distutils.util import (get_platform, convert_path, change_root,
check_environ, split_quoted, strtobool,
rfc822_escape, byte_compile,
grok_environment_error)
from distutils import util # used to patch _environ_checked
from distutils.sysconfig import get_config_vars
from distutils import sysconfig
from distutils.tests import support
import _osx_support
class UtilTestCase(support.EnvironGuard, unittest.TestCase):
def setUp(self):
super(UtilTestCase, self).setUp()
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = copy(sysconfig._config_vars)
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._uname = None
os.uname = self._get_uname
def tearDown(self):
# getting back the environment
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
sysconfig._config_vars = copy(self._config_vars)
super(UtilTestCase, self).tearDown()
def _set_uname(self, uname):
self._uname = uname
def _get_uname(self):
return self._uname
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
cursize = sys.maxsize
sys.maxsize = (2 ** 31)-1
try:
self.assertEqual(get_platform(), 'macosx-10.3-i386')
finally:
sys.maxsize = cursize
# macbook with fat binaries (fat, universal or fat64)
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3'%(arch,))
self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,))
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_convert_path(self):
# linux/mac
os.sep = '/'
def _join(path):
return '/'.join(path)
os.path.join = _join
self.assertEqual(convert_path('/home/to/my/stuff'),
'/home/to/my/stuff')
# win
os.sep = '\\'
def _join(*path):
return '\\'.join(path)
os.path.join = _join
self.assertRaises(ValueError, convert_path, '/home/to/my/stuff')
self.assertRaises(ValueError, convert_path, 'home/to/my/stuff/')
self.assertEqual(convert_path('home/to/my/stuff'),
'home\\to\\my\\stuff')
self.assertEqual(convert_path('.'),
os.curdir)
def test_change_root(self):
# linux/mac
os.name = 'posix'
def _isabs(path):
return path[0] == '/'
os.path.isabs = _isabs
def _join(*path):
return '/'.join(path)
os.path.join = _join
self.assertEqual(change_root('/root', '/old/its/here'),
'/root/old/its/here')
self.assertEqual(change_root('/root', 'its/here'),
'/root/its/here')
# windows
os.name = 'nt'
def _isabs(path):
return path.startswith('c:\\')
os.path.isabs = _isabs
def _splitdrive(path):
if path.startswith('c:'):
return ('', path.replace('c:', ''))
return ('', path)
os.path.splitdrive = _splitdrive
def _join(*path):
return '\\'.join(path)
os.path.join = _join
self.assertEqual(change_root('c:\\root', 'c:\\old\\its\\here'),
'c:\\root\\old\\its\\here')
self.assertEqual(change_root('c:\\root', 'its\\here'),
'c:\\root\\its\\here')
# BugsBunny os (it's a great os)
os.name = 'BugsBunny'
self.assertRaises(DistutilsPlatformError,
change_root, 'c:\\root', 'its\\here')
# XXX platforms to be covered: mac
@unittest.skipUnless((os.name == 'posix' and hasattr(os, 'fork') and os.allows_subprocesses),
"distutils cannot spawn child processes")
def test_check_environ(self):
util._environ_checked = 0
os.environ.pop('HOME', None)
check_environ()
self.assertEqual(os.environ['PLAT'], get_platform())
self.assertEqual(util._environ_checked, 1)
@unittest.skipUnless(os.name == 'posix', 'specific to posix')
def test_check_environ_getpwuid(self):
util._environ_checked = 0
os.environ.pop('HOME', None)
import pwd
# only set pw_dir field, other fields are not used
result = pwd.struct_passwd((None, None, None, None, None,
'/home/distutils', None))
with mock.patch.object(pwd, 'getpwuid', return_value=result):
check_environ()
self.assertEqual(os.environ['HOME'], '/home/distutils')
util._environ_checked = 0
os.environ.pop('HOME', None)
# bpo-10496: Catch pwd.getpwuid() error
with mock.patch.object(pwd, 'getpwuid', side_effect=KeyError):
check_environ()
self.assertNotIn('HOME', os.environ)
def test_split_quoted(self):
self.assertEqual(split_quoted('""one"" "two" \'three\' \\four'),
['one', 'two', 'three', 'four'])
def test_strtobool(self):
yes = ('y', 'Y', 'yes', 'True', 't', 'true', 'True', 'On', 'on', '1')
no = ('n', 'no', 'f', 'false', 'off', '0', 'Off', 'No', 'N')
for y in yes:
self.assertTrue(strtobool(y))
for n in no:
self.assertFalse(strtobool(n))
def test_rfc822_escape(self):
header = 'I am a\npoor\nlonesome\nheader\n'
res = rfc822_escape(header)
wanted = ('I am a%(8s)spoor%(8s)slonesome%(8s)s'
'header%(8s)s') % {'8s': '\n'+8*' '}
self.assertEqual(res, wanted)
def test_dont_write_bytecode(self):
# makes sure byte_compile raise a DistutilsError
# if sys.dont_write_bytecode is True
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
self.assertRaises(DistutilsByteCompileError, byte_compile, [])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
def test_grok_environment_error(self):
# test obsolete function to ensure backward compat (#4931)
exc = IOError("Unable to find batch file")
msg = grok_environment_error(exc)
self.assertEqual(msg, "error: Unable to find batch file")
def test_suite():
return unittest.makeSuite(UtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
[
"unittest.mock.patch.object",
"distutils.util.strtobool",
"distutils.util.rfc822_escape",
"distutils.util.grok_environment_error",
"os.uname",
"unittest.makeSuite",
"distutils.util.check_environ",
"copy.copy",
"distutils.sysconfig.get_config_vars",
"unittest.skipUnless",
"distutils.util.convert_path",
"distutils.util.change_root",
"distutils.util.split_quoted",
"distutils.util.get_platform",
"os.environ.pop",
"pwd.struct_passwd"
] |
[((9373, 9433), 'unittest.skipUnless', 'unittest.skipUnless', (["(os.name == 'posix')", '"""specific to posix"""'], {}), "(os.name == 'posix', 'specific to posix')\n", (9392, 9433), False, 'import unittest\n'), ((11645, 11677), 'unittest.makeSuite', 'unittest.makeSuite', (['UtilTestCase'], {}), '(UtilTestCase)\n', (11663, 11677), False, 'import unittest\n'), ((1103, 1131), 'copy.copy', 'copy', (['sysconfig._config_vars'], {}), '(sysconfig._config_vars)\n', (1107, 1131), False, 'from copy import copy\n'), ((1820, 1843), 'copy.copy', 'copy', (['self._config_vars'], {}), '(self._config_vars)\n', (1824, 1843), False, 'from copy import copy\n'), ((9200, 9228), 'os.environ.pop', 'os.environ.pop', (['"""HOME"""', 'None'], {}), "('HOME', None)\n", (9214, 9228), False, 'import os\n'), ((9238, 9253), 'distutils.util.check_environ', 'check_environ', ([], {}), '()\n', (9251, 9253), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((9519, 9547), 'os.environ.pop', 'os.environ.pop', (['"""HOME"""', 'None'], {}), "('HOME', None)\n", (9533, 9547), False, 'import os\n'), ((9645, 9719), 'pwd.struct_passwd', 'pwd.struct_passwd', (["(None, None, None, None, None, '/home/distutils', None)"], {}), "((None, None, None, None, None, '/home/distutils', None))\n", (9662, 9719), False, 'import pwd\n'), ((9965, 9993), 'os.environ.pop', 'os.environ.pop', (['"""HOME"""', 'None'], {}), "('HOME', None)\n", (9979, 9993), False, 'import os\n'), ((10765, 10786), 'distutils.util.rfc822_escape', 'rfc822_escape', (['header'], {}), '(header)\n', (10778, 10786), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((11520, 11547), 'distutils.util.grok_environment_error', 'grok_environment_error', (['exc'], {}), '(exc)\n', (11542, 11547), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((1254, 1264), 'os.uname', 'os.uname', ([], {}), '()\n', (1262, 1264), False, 'import os\n'), ((2263, 2277), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (2275, 2277), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((2511, 2525), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (2523, 2525), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((3031, 3048), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (3046, 3048), False, 'from distutils.sysconfig import get_config_vars\n'), ((3058, 3075), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (3073, 3075), False, 'from distutils.sysconfig import get_config_vars\n'), ((3122, 3139), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (3137, 3139), False, 'from distutils.sysconfig import get_config_vars\n'), ((3571, 3588), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (3586, 3588), False, 'from distutils.sysconfig import get_config_vars\n'), ((3598, 3615), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (3613, 3615), False, 'from distutils.sysconfig import get_config_vars\n'), ((3661, 3678), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (3676, 3678), False, 'from distutils.sysconfig import get_config_vars\n'), ((3969, 3983), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (3981, 3983), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((4050, 4067), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (4065, 4067), False, 'from distutils.sysconfig import get_config_vars\n'), ((4150, 4164), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (4162, 4164), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((4232, 4249), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (4247, 4249), False, 'from distutils.sysconfig import get_config_vars\n'), ((4259, 4276), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (4274, 4276), False, 'from distutils.sysconfig import get_config_vars\n'), ((4570, 4584), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (4582, 4584), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((4653, 4670), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (4668, 4670), False, 'from distutils.sysconfig import get_config_vars\n'), ((4680, 4697), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (4695, 4697), False, 'from distutils.sysconfig import get_config_vars\n'), ((5000, 5014), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (5012, 5014), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((5082, 5099), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (5097, 5099), False, 'from distutils.sysconfig import get_config_vars\n'), ((5109, 5126), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (5124, 5126), False, 'from distutils.sysconfig import get_config_vars\n'), ((5441, 5455), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (5453, 5455), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((5528, 5545), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (5543, 5545), False, 'from distutils.sysconfig import get_config_vars\n'), ((5555, 5572), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (5570, 5572), False, 'from distutils.sysconfig import get_config_vars\n'), ((5867, 5881), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (5879, 5881), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((6788, 6802), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (6800, 6802), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((7051, 7084), 'distutils.util.convert_path', 'convert_path', (['"""/home/to/my/stuff"""'], {}), "('/home/to/my/stuff')\n", (7063, 7084), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((7432, 7464), 'distutils.util.convert_path', 'convert_path', (['"""home/to/my/stuff"""'], {}), "('home/to/my/stuff')\n", (7444, 7464), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((7539, 7556), 'distutils.util.convert_path', 'convert_path', (['"""."""'], {}), "('.')\n", (7551, 7556), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((7879, 7916), 'distutils.util.change_root', 'change_root', (['"""/root"""', '"""/old/its/here"""'], {}), "('/root', '/old/its/here')\n", (7890, 7916), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((7990, 8022), 'distutils.util.change_root', 'change_root', (['"""/root"""', '"""its/here"""'], {}), "('/root', 'its/here')\n", (8001, 8022), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((8517, 8562), 'distutils.util.change_root', 'change_root', (['"""c:\\\\root"""', '"""c:\\\\old\\\\its\\\\here"""'], {}), "('c:\\\\root', 'c:\\\\old\\\\its\\\\here')\n", (8528, 8562), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((8642, 8678), 'distutils.util.change_root', 'change_root', (['"""c:\\\\root"""', '"""its\\\\here"""'], {}), "('c:\\\\root', 'its\\\\here')\n", (8653, 8678), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((9300, 9314), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (9312, 9314), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((9769, 9824), 'unittest.mock.patch.object', 'mock.patch.object', (['pwd', '"""getpwuid"""'], {'return_value': 'result'}), "(pwd, 'getpwuid', return_value=result)\n", (9786, 9824), False, 'from unittest import mock\n'), ((9838, 9853), 'distutils.util.check_environ', 'check_environ', ([], {}), '()\n', (9851, 9853), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((10056, 10112), 'unittest.mock.patch.object', 'mock.patch.object', (['pwd', '"""getpwuid"""'], {'side_effect': 'KeyError'}), "(pwd, 'getpwuid', side_effect=KeyError)\n", (10073, 10112), False, 'from unittest import mock\n'), ((10126, 10141), 'distutils.util.check_environ', 'check_environ', ([], {}), '()\n', (10139, 10141), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((10250, 10296), 'distutils.util.split_quoted', 'split_quoted', (['"""""one"" "two" \'three\' \\\\four"""'], {}), '(\'""one"" "two" \\\'three\\\' \\\\four\')\n', (10262, 10296), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((3376, 3390), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (3388, 3390), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((6010, 6027), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (6025, 6027), False, 'from distutils.sysconfig import get_config_vars\n'), ((6041, 6058), 'distutils.sysconfig.get_config_vars', 'get_config_vars', ([], {}), '()\n', (6056, 6058), False, 'from distutils.sysconfig import get_config_vars\n'), ((6361, 6375), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (6373, 6375), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((10585, 10597), 'distutils.util.strtobool', 'strtobool', (['y'], {}), '(y)\n', (10594, 10597), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n'), ((10650, 10662), 'distutils.util.strtobool', 'strtobool', (['n'], {}), '(n)\n', (10659, 10662), False, 'from distutils.util import get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error\n')]
|
import pip
from subprocess import call
for dist in pip.get_installed_distributions():
call("pip install --upgrade " + dist.project_name, shell=True)
# This is for only Linux system used.
|
[
"subprocess.call",
"pip.get_installed_distributions"
] |
[((55, 88), 'pip.get_installed_distributions', 'pip.get_installed_distributions', ([], {}), '()\n', (86, 88), False, 'import pip\n'), ((95, 157), 'subprocess.call', 'call', (["('pip install --upgrade ' + dist.project_name)"], {'shell': '(True)'}), "('pip install --upgrade ' + dist.project_name, shell=True)\n", (99, 157), False, 'from subprocess import call\n')]
|
from django.db import models
class Location(models.Model):
place = models.CharField(max_length=30)
def __str__(self):
return self.place
class Meta:
ordering = ['place']
def save_location(self):
self.save()
class Category(models.Model):
category = models.CharField(max_length=30)
def __str__(self):
return self.category
def save_category(self):
self.save()
class Image(models.Model):
image = models.ImageField(upload_to='gallery/')
image_name = models.CharField(max_length=25)
image_description = models.TextField(max_length=300)
image_location = models.ForeignKey(Location)
image_category = models.ForeignKey(Category)
def __str__(self):
return self.image_name
def save_image(self):
self.save()
def delete_image(self):
self.remove()
def update_image(self, id):
pass
def get_image_by_id(id):
pass
def search_image(image_category):
pass
def filter_by_location(image_location):
pass
@classmethod
def search_by_category(cls,search_term):
photos=cls.objects.filter(image_category__category__contains=search_term)
return photos
@classmethod
def get_one_image(cls,id):
try:
image=Image.objects.get(id=id)
return image
except DoesNotExist:
return Image.objects.get(id=1)
@classmethod
def get_all_images(cls):
all_images = Image.objects.all()
return all_images
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ImageField"
] |
[((71, 102), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (87, 102), False, 'from django.db import models\n'), ((284, 315), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (300, 315), False, 'from django.db import models\n'), ((450, 489), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""gallery/"""'}), "(upload_to='gallery/')\n", (467, 489), False, 'from django.db import models\n'), ((505, 536), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (521, 536), False, 'from django.db import models\n'), ((559, 591), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (575, 591), False, 'from django.db import models\n'), ((611, 638), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Location'], {}), '(Location)\n', (628, 638), False, 'from django.db import models\n'), ((658, 685), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Category'], {}), '(Category)\n', (675, 685), False, 'from django.db import models\n')]
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('delete_item/<name>', views.delete_item, name='delete_item'),
path('add_drink/', views.add_drink, name='add_drink'),
path('add_snack/', views.add_snack, name='add_snack'),
path('edit_item/<name>', views.edit_item, name='edit_item'),
path('features/<name>', views.features, name='features'),
path('items/<type>', views.items, name='items')
]
|
[
"django.urls.path"
] |
[((71, 106), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (75, 106), False, 'from django.urls import path\n'), ((112, 177), 'django.urls.path', 'path', (['"""delete_item/<name>"""', 'views.delete_item'], {'name': '"""delete_item"""'}), "('delete_item/<name>', views.delete_item, name='delete_item')\n", (116, 177), False, 'from django.urls import path\n'), ((183, 236), 'django.urls.path', 'path', (['"""add_drink/"""', 'views.add_drink'], {'name': '"""add_drink"""'}), "('add_drink/', views.add_drink, name='add_drink')\n", (187, 236), False, 'from django.urls import path\n'), ((242, 295), 'django.urls.path', 'path', (['"""add_snack/"""', 'views.add_snack'], {'name': '"""add_snack"""'}), "('add_snack/', views.add_snack, name='add_snack')\n", (246, 295), False, 'from django.urls import path\n'), ((301, 360), 'django.urls.path', 'path', (['"""edit_item/<name>"""', 'views.edit_item'], {'name': '"""edit_item"""'}), "('edit_item/<name>', views.edit_item, name='edit_item')\n", (305, 360), False, 'from django.urls import path\n'), ((366, 422), 'django.urls.path', 'path', (['"""features/<name>"""', 'views.features'], {'name': '"""features"""'}), "('features/<name>', views.features, name='features')\n", (370, 422), False, 'from django.urls import path\n'), ((428, 475), 'django.urls.path', 'path', (['"""items/<type>"""', 'views.items'], {'name': '"""items"""'}), "('items/<type>', views.items, name='items')\n", (432, 475), False, 'from django.urls import path\n')]
|
#
# Copyright 2018 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mib_db_api import *
from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, \
MibDeviceData, MibAttributeData, MessageType, ManagedEntity
from voltha.extensions.omci.omci_entities import *
from voltha.extensions.omci.omci_fields import *
from scapy.fields import StrField, FieldListField, PacketField
class MibDbStatistic(object):
"""
For debug/tuning purposes.
With etcd around the v1.5 time frame, seeing the following:
o Creates: Avg: 57.1 mS, Min: 76 mS, Max: 511 mS (146 samples)
o Sets: Avg: 303.9 mS, Min: 126 mS, Max: 689 mS (103 samples)
o Gets: Avg: 3.3 mS, Min: 0 mS, Max: 8 mS ( 9 samples)
o Deletes: No samples
"""
def __init__(self, name):
self._name = name
self._count = 0
self._total_time = 0 # Total milliseconds
self._min_time = 99999999
self._max_time = 0
def get_statistics(self):
return {
'name': self._name,
'count': self._count,
'total_time': self._total_time,
'min_time': self._min_time,
'max_time': self._max_time,
'avg_time': self._total_time / self._count if self._count > 0 else 0
}
def clear_statistics(self):
self._count = 0
self._total_time = 0 # Total milliseconds
self._min_time = 99999999
self._max_time = 0
def increment(self, time):
self._count += 1
self._total_time += time # Total milliseconds
if self._min_time > time:
self._min_time = time
if self._max_time < time:
self._max_time = time
class MibDbExternal(MibDbApi):
"""
A persistent external OpenOMCI MIB Database
"""
CURRENT_VERSION = 1 # VOLTHA v1.3.0 release
_TIME_FORMAT = '%Y%m%d-%H%M%S.%f'
# Paths from root proxy
MIB_PATH = '/omci_mibs'
DEVICE_PATH = MIB_PATH + '/{}' # .format(device_id)
# Classes, Instances, and Attributes as lists from root proxy
CLASSES_PATH = DEVICE_PATH + '/classes' # .format(device_id)
INSTANCES_PATH = DEVICE_PATH + '/classes/{}/instances' # .format(device_id, class_id)
ATTRIBUTES_PATH = DEVICE_PATH + '/classes/{}/instances/{}/attributes' # .format(device_id, class_id, instance_id)
# Single Class, Instance, and Attribute as objects from device proxy
CLASS_PATH = '/classes/{}' # .format(class_id)
INSTANCE_PATH = '/classes/{}/instances/{}' # .format(class_id, instance_id)
ATTRIBUTE_PATH = '/classes/{}/instances/{}/attributes/{}' # .format(class_id, instance_id
# attribute_name)
def __init__(self, omci_agent):
"""
Class initializer
:param omci_agent: (OpenOMCIAgent) OpenOMCI Agent
"""
super(MibDbExternal, self).__init__(omci_agent)
self._core = omci_agent.core
# Some statistics to help with debug/tuning/...
self._statistics = {
'get': MibDbStatistic('get'),
'set': MibDbStatistic('set'),
'create': MibDbStatistic('create'),
'delete': MibDbStatistic('delete')
}
def start(self):
"""
Start up/restore the database
"""
self.log.debug('start')
if not self._started:
super(MibDbExternal, self).start()
root_proxy = self._core.get_proxy('/')
try:
base = root_proxy.get(MibDbExternal.MIB_PATH)
self.log.info('db-exists', num_devices=len(base))
except Exception as e:
self.log.exception('start-failure', e=e)
raise
def stop(self):
"""
Start up the database
"""
self.log.debug('stop')
if self._started:
super(MibDbExternal, self).stop()
# TODO: Delete this method if nothing else is done except calling the base class
def _time_to_string(self, time):
return time.strftime(MibDbExternal._TIME_FORMAT) if time is not None else ''
def _string_to_time(self, time):
return datetime.strptime(time, MibDbExternal._TIME_FORMAT) if len(time) else None
def _attribute_to_string(self, device_id, class_id, attr_name, value, old_value = None):
"""
Convert an ME's attribute value to string representation
:param device_id: (str) ONU Device ID
:param class_id: (int) Class ID
:param attr_name: (str) Attribute Name (see EntityClasses)
:param value: (various) Attribute Value
:return: (str) String representation of the value
:raises KeyError: Device, Class ID, or Attribute does not exist
"""
try:
me_map = self._omci_agent.get_device(device_id).me_map
if class_id in me_map:
entity = me_map[class_id]
attr_index = entity.attribute_name_to_index_map[attr_name]
eca = entity.attributes[attr_index]
field = eca.field
else:
# Here for auto-defined MEs (ones not defined in ME Map)
from voltha.extensions.omci.omci_cc import UNKNOWN_CLASS_ATTRIBUTE_KEY
field = StrFixedLenField(UNKNOWN_CLASS_ATTRIBUTE_KEY, None, 24)
if isinstance(field, StrFixedLenField):
from scapy.base_classes import Packet_metaclass
if hasattr(value, 'to_json') and not isinstance(value, basestring):
# Packet Class to string
str_value = value.to_json()
elif isinstance(field.default, Packet_metaclass) \
and hasattr(field.default, 'json_from_value'):
#and not isinstance(value, basestring):
# Value/hex of Packet Class to string
str_value = field.default.json_from_value(value)
else:
str_value = str(value)
elif isinstance(field, (StrField, MACField, IPField)):
# For StrField, value is an str already
# For MACField, value is a string in ':' delimited form
# For IPField, value is a string in '.' delimited form
str_value = str(value)
elif isinstance(field, (ByteField, ShortField, IntField, LongField)):
# For ByteField, ShortField, IntField, and LongField value is an int
str_value = str(value)
elif isinstance(field, BitField):
# For BitField, value is a long
#
str_value = str(value)
elif hasattr(field, 'to_json'):
str_value = field.to_json(value, old_value)
elif isinstance(field, FieldListField):
str_value = json.dumps(value, separators=(',', ':'))
else:
self.log.warning('default-conversion', type=type(field),
class_id=class_id, attribute=attr_name, value=str(value))
str_value = str(value)
return str_value
except Exception as e:
self.log.exception('attr-to-string', device_id=device_id,
class_id=class_id, attr=attr_name,
value=value, e=e)
raise
def _string_to_attribute(self, device_id, class_id, attr_name, str_value):
"""
Convert an ME's attribute value-string to its Scapy decode equivalent
:param device_id: (str) ONU Device ID
:param class_id: (int) Class ID
:param attr_name: (str) Attribute Name (see EntityClasses)
:param str_value: (str) Attribute Value in string form
:return: (various) String representation of the value
:raises KeyError: Device, Class ID, or Attribute does not exist
"""
try:
me_map = self._omci_agent.get_device(device_id).me_map
if class_id in me_map:
entity = me_map[class_id]
attr_index = entity.attribute_name_to_index_map[attr_name]
eca = entity.attributes[attr_index]
field = eca.field
else:
# Here for auto-defined MEs (ones not defined in ME Map)
from voltha.extensions.omci.omci_cc import UNKNOWN_CLASS_ATTRIBUTE_KEY
field = StrFixedLenField(UNKNOWN_CLASS_ATTRIBUTE_KEY, None, 24)
if isinstance(field, StrFixedLenField):
from scapy.base_classes import Packet_metaclass
default = field.default
if isinstance(default, Packet_metaclass) and \
hasattr(default, 'to_json'):
value = json.loads(str_value)
else:
value = str_value
elif isinstance(field, MACField):
value = str_value
elif isinstance(field, IPField):
value = str_value
elif isinstance(field, (ByteField, ShortField, IntField, LongField)):
if str_value.lower() in ('true', 'false'):
str_value = '1' if str_value.lower() == 'true' else '0'
value = int(str_value)
elif isinstance(field, BitField):
value = long(str_value)
elif hasattr(field, 'load_json'):
value = field.load_json(str_value)
elif isinstance(field, FieldListField):
value = json.loads(str_value)
else:
self.log.warning('default-conversion', type=type(field),
class_id=class_id, attribute=attr_name, value=str_value)
value = None
return value
except Exception as e:
self.log.exception('attr-to-string', device_id=device_id,
class_id=class_id, attr=attr_name,
value=str_value, e=e)
raise
def add(self, device_id, overwrite=False):
"""
Add a new ONU to database
:param device_id: (str) Device ID of ONU to add
:param overwrite: (bool) Overwrite existing entry if found.
:raises KeyError: If device already exists and 'overwrite' is False
"""
self.log.debug('add-device', device_id=device_id, overwrite=overwrite)
now = datetime.utcnow()
found = False
root_proxy = self._core.get_proxy('/')
data = MibDeviceData(device_id=device_id,
created=self._time_to_string(now),
last_sync_time='',
mib_data_sync=0,
version=MibDbExternal.CURRENT_VERSION)
try:
dev_proxy = self._device_proxy(device_id)
found = True
if not overwrite:
# Device already exists
raise KeyError('Device with ID {} already exists in MIB database'.
format(device_id))
# Overwrite with new data
data = dev_proxy.get('/', depth=0)
self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id), data)
self._modified = now
except KeyError:
if found:
raise
# Did not exist, add it now
root_proxy.add(MibDbExternal.MIB_PATH, data)
self._created = now
self._modified = now
def remove(self, device_id):
"""
Remove an ONU from the database
:param device_id: (str) Device ID of ONU to remove from database
"""
self.log.debug('remove-device', device_id=device_id)
if not self._started:
raise DatabaseStateError('The Database is not currently active')
if not isinstance(device_id, basestring):
raise TypeError('Device ID should be an string')
try:
# self._root_proxy.get(MibDbExternal.DEVICE_PATH.format(device_id))
self._root_proxy.remove(MibDbExternal.DEVICE_PATH.format(device_id))
self._modified = datetime.utcnow()
except KeyError:
# Did not exists, which is not a failure
pass
except Exception as e:
self.log.exception('remove-exception', device_id=device_id, e=e)
raise
@property
def _root_proxy(self):
return self._core.get_proxy('/')
def _device_proxy(self, device_id):
"""
Return a config proxy to the OMCI MIB_DB leaf for a given device
:param device_id: (str) ONU Device ID
:return: (ConfigProxy) Configuration proxy rooted at OMCI MIB DB
:raises KeyError: If the device does not exist in the database
"""
if not isinstance(device_id, basestring):
raise TypeError('Device ID should be an string')
if not self._started:
raise DatabaseStateError('The Database is not currently active')
return self._core.get_proxy(MibDbExternal.DEVICE_PATH.format(device_id))
def _class_proxy(self, device_id, class_id, create=False):
"""
Get a config proxy to a specific managed entity class
:param device_id: (str) ONU Device ID
:param class_id: (int) Class ID
:param create: (bool) If true, create default instance (and class)
:return: (ConfigProxy) Class configuration proxy
:raises DatabaseStateError: If database is not started
:raises KeyError: If Instance does not exist and 'create' is False
"""
if not self._started:
raise DatabaseStateError('The Database is not currently active')
if not 0 <= class_id <= 0xFFFF:
raise ValueError('class-id is 0..0xFFFF')
fmt = MibDbExternal.DEVICE_PATH + MibDbExternal.CLASS_PATH
path = fmt.format(device_id, class_id)
try:
return self._core.get_proxy(path)
except KeyError:
if not create:
# This can occur right after a MIB Reset if the ONU publishes AVCs right away
# and during the MIB audit resync for ONU created MEs in response to an OLT
# created ME. Fail since for these test cases they occur during a verification
# 'query' and not the ME creation during resync. Calling code should handle
# they exception if it is expected to occur on occasion.
self.log.debug('class-proxy-does-not-exist', device_id=device_id,
class_id=class_id)
raise
# Create class
data = MibClassData(class_id=class_id)
root_path = MibDbExternal.CLASSES_PATH.format(device_id)
self._root_proxy.add(root_path, data)
return self._core.get_proxy(path)
def _instance_proxy(self, device_id, class_id, instance_id, create=False):
"""
Get a config proxy to a specific managed entity instance
:param device_id: (str) ONU Device ID
:param class_id: (int) Class ID
:param instance_id: (int) Instance ID
:param create: (bool) If true, create default instance (and class)
:return: (ConfigProxy) Instance configuration proxy
:raises DatabaseStateError: If database is not started
:raises KeyError: If Instance does not exist and 'create' is False
"""
if not self._started:
raise DatabaseStateError('The Database is not currently active')
if not isinstance(device_id, basestring):
raise TypeError('Device ID is a string')
if not 0 <= class_id <= 0xFFFF:
raise ValueError('class-id is 0..0xFFFF')
if not 0 <= instance_id <= 0xFFFF:
raise ValueError('instance-id is 0..0xFFFF')
fmt = MibDbExternal.DEVICE_PATH + MibDbExternal.INSTANCE_PATH
path = fmt.format(device_id, class_id, instance_id)
try:
return self._core.get_proxy(path)
except KeyError:
if not create:
# This can occur right after a MIB Reset if the ONU publishes AVCs right away
# and during the MIB audit resync for ONU created MEs in response to an OLT
# created ME. Fail since for these test cases they occur during a verification
# 'query' and not the ME creation during resync. Calling code should handle
# they exception if it is expected to occur on occasion.
self.log.info('instance-proxy-does-not-exist', device_id=device_id,
class_id=class_id, instance_id=instance_id)
raise
# Create instance, first make sure class exists
self._class_proxy(device_id, class_id, create=True)
now = self._time_to_string(datetime.utcnow())
data = MibInstanceData(instance_id=instance_id, created=now, modified=now)
root_path = MibDbExternal.INSTANCES_PATH.format(device_id, class_id)
self._root_proxy.add(root_path, data)
return self._core.get_proxy(path)
def on_mib_reset(self, device_id):
"""
Reset/clear the database for a specific Device
:param device_id: (str) ONU Device ID
:raises DatabaseStateError: If the database is not enabled
:raises KeyError: If the device does not exist in the database
"""
self.log.debug('on-mib-reset', device_id=device_id)
try:
device_proxy = self._device_proxy(device_id)
data = device_proxy.get(depth=2)
# Wipe out any existing class IDs
class_ids = [c.class_id for c in data.classes]
if len(class_ids):
for class_id in class_ids:
device_proxy.remove(MibDbExternal.CLASS_PATH.format(class_id))
# Reset MIB Data Sync to zero
now = datetime.utcnow()
data = MibDeviceData(device_id=device_id,
created=data.created,
last_sync_time=data.last_sync_time,
mib_data_sync=0,
version=MibDbExternal.CURRENT_VERSION)
# Update
self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
data)
self._modified = now
self.log.debug('mib-reset-complete', device_id=device_id)
except Exception as e:
self.log.exception('mib-reset-exception', device_id=device_id, e=e)
raise
def save_mib_data_sync(self, device_id, value):
"""
Save the MIB Data Sync to the database in an easy location to access
:param device_id: (str) ONU Device ID
:param value: (int) Value to save
"""
self.log.debug('save-mds', device_id=device_id, value=value)
try:
if not isinstance(value, int):
raise TypeError('MIB Data Sync is an integer')
if not 0 <= value <= 255:
raise ValueError('Invalid MIB-data-sync value {}. Must be 0..255'.
format(value))
device_proxy = self._device_proxy(device_id)
data = device_proxy.get(depth=0)
now = datetime.utcnow()
data.mib_data_sync = value
# Update
self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
data)
self._modified = now
self.log.debug('save-mds-complete', device_id=device_id)
except Exception as e:
self.log.exception('save-mds-exception', device_id=device_id, e=e)
raise
def get_mib_data_sync(self, device_id):
"""
Get the MIB Data Sync value last saved to the database for a device
:param device_id: (str) ONU Device ID
:return: (int) The Value or None if not found
"""
self.log.debug('get-mds', device_id=device_id)
try:
device_proxy = self._device_proxy(device_id)
data = device_proxy.get(depth=0)
return int(data.mib_data_sync)
except KeyError:
return None # OMCI MIB_DB entry has not yet been created
except Exception as e:
self.log.exception('get-mds-exception', device_id=device_id, e=e)
raise
def save_last_sync(self, device_id, value):
"""
Save the Last Sync time to the database in an easy location to access
:param device_id: (str) ONU Device ID
:param value: (DateTime) Value to save
"""
self.log.debug('save-last-sync', device_id=device_id, time=str(value))
try:
if not isinstance(value, datetime):
raise TypeError('Expected a datetime object, got {}'.
format(type(datetime)))
device_proxy = self._device_proxy(device_id)
data = device_proxy.get(depth=0)
now = datetime.utcnow()
data.last_sync_time = self._time_to_string(value)
# Update
self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
data)
self._modified = now
self.log.debug('save-mds-complete', device_id=device_id)
except Exception as e:
self.log.exception('save-last-sync-exception', device_id=device_id, e=e)
raise
def get_last_sync(self, device_id):
"""
Get the Last Sync Time saved to the database for a device
:param device_id: (str) ONU Device ID
:return: (int) The Value or None if not found
"""
self.log.debug('get-last-sync', device_id=device_id)
try:
device_proxy = self._device_proxy(device_id)
data = device_proxy.get(depth=0)
return self._string_to_time(data.last_sync_time)
except KeyError:
return None # OMCI MIB_DB entry has not yet been created
except Exception as e:
self.log.exception('get-last-sync-exception', e=e)
raise
def _add_new_class(self, device_id, class_id, instance_id, attributes):
"""
Create an entry for a new class in the external database
:param device_id: (str) ONU Device ID
:param class_id: (int) ME Class ID
:param instance_id: (int) ME Entity ID
:param attributes: (dict) Attribute dictionary
:returns: (bool) True if the value was saved to the database. False if the
value was identical to the current instance
"""
self.log.debug('add', device_id=device_id, class_id=class_id,
instance_id=instance_id, attributes=attributes)
now = self._time_to_string(datetime.utcnow())
attrs = []
for k, v in attributes.items():
if k == 'serial_number':
vendor_id = str(v[0:4])
vendor_specific = v[4:]
vendor_specific = str(vendor_specific.encode('hex'))
str_value = vendor_id + vendor_specific
attrs.append(MibAttributeData(name=k, value=str_value))
else:
str_value = self._attribute_to_string(device_id, class_id, k, v)
attrs.append(MibAttributeData(name=k, value=str_value))
class_data = MibClassData(class_id=class_id,
instances=[MibInstanceData(instance_id=instance_id,
created=now,
modified=now,
attributes=attrs)])
self._root_proxy.add(MibDbExternal.CLASSES_PATH.format(device_id), class_data)
self.log.debug('set-complete', device_id=device_id, class_id=class_id,
entity_id=instance_id, attributes=attributes)
return True
def _add_new_instance(self, device_id, class_id, instance_id, attributes):
"""
Create an entry for a instance of an existing class in the external database
:param device_id: (str) ONU Device ID
:param class_id: (int) ME Class ID
:param instance_id: (int) ME Entity ID
:param attributes: (dict) Attribute dictionary
:returns: (bool) True if the value was saved to the database. False if the
value was identical to the current instance
"""
self.log.debug('add', device_id=device_id, class_id=class_id,
instance_id=instance_id, attributes=attributes)
now = self._time_to_string(datetime.utcnow())
attrs = []
for k, v in attributes.items():
if k == 'serial_number':
vendor_id = str(v[0:4])
vendor_specific = v[4:]
vendor_specific = str(vendor_specific.encode('hex'))
str_value = vendor_id+vendor_specific
attrs.append(MibAttributeData(name=k, value=str_value))
else:
str_value = self._attribute_to_string(device_id, class_id, k, v)
attrs.append(MibAttributeData(name=k, value=str_value))
instance_data = MibInstanceData(instance_id=instance_id,
created=now,
modified=now,
attributes=attrs)
self._root_proxy.add(MibDbExternal.INSTANCES_PATH.format(device_id, class_id),
instance_data)
self.log.debug('set-complete', device_id=device_id, class_id=class_id,
entity_id=instance_id, attributes=attributes)
return True
def set(self, device_id, class_id, instance_id, attributes):
"""
Set a database value. This should only be called by the MIB synchronizer
and its related tasks
:param device_id: (str) ONU Device ID
:param class_id: (int) ME Class ID
:param instance_id: (int) ME Entity ID
:param attributes: (dict) Attribute dictionary
:returns: (bool) True if the value was saved to the database. False if the
value was identical to the current instance
:raises KeyError: If device does not exist
:raises DatabaseStateError: If the database is not enabled
"""
self.log.debug('set', device_id=device_id, class_id=class_id,
instance_id=instance_id, attributes=attributes)
try:
if not isinstance(device_id, basestring):
raise TypeError('Device ID should be a string')
if not 0 <= class_id <= 0xFFFF:
raise ValueError("Invalid Class ID: {}, should be 0..65535".format(class_id))
if not 0 <= instance_id <= 0xFFFF:
raise ValueError("Invalid Instance ID: {}, should be 0..65535".format(instance_id))
if not isinstance(attributes, dict):
raise TypeError("Attributes should be a dictionary")
if not self._started:
raise DatabaseStateError('The Database is not currently active')
# Determine the best strategy to add the information
dev_proxy = self._device_proxy(device_id)
operation = 'set'
start_time = None
try:
class_data = dev_proxy.get(MibDbExternal.CLASS_PATH.format(class_id), deep=True)
inst_data = next((inst for inst in class_data.instances
if inst.instance_id == instance_id), None)
if inst_data is None:
operation = 'create'
start_time = datetime.utcnow()
return self._add_new_instance(device_id, class_id, instance_id, attributes)
# Possibly adding to or updating an existing instance
# Get instance proxy, creating it if needed
modified = False
new_attributes = []
exist_attr_indexes = dict()
attr_len = len(inst_data.attributes)
for index in xrange(0, attr_len):
name = inst_data.attributes[index].name
value = inst_data.attributes[index].value
exist_attr_indexes[name] = index
new_attributes.append(MibAttributeData(name=name, value=value))
for k, v in attributes.items():
try:
old_value = None if k not in exist_attr_indexes \
else new_attributes[exist_attr_indexes[k]].value
str_value = self._attribute_to_string(device_id, class_id, k, v, old_value)
if k not in exist_attr_indexes:
new_attributes.append(MibAttributeData(name=k, value=str_value))
modified = True
elif new_attributes[exist_attr_indexes[k]].value != str_value:
new_attributes[exist_attr_indexes[k]].value = str_value
modified = True
except Exception as e:
self.log.exception('save-error', e=e, class_id=class_id,
attr=k, value_type=type(v))
if modified:
now = datetime.utcnow()
start_time = now
new_data = MibInstanceData(instance_id=instance_id,
created=inst_data.created,
modified=self._time_to_string(now),
attributes=new_attributes)
dev_proxy.remove(MibDbExternal.INSTANCE_PATH.format(class_id, instance_id))
self._root_proxy.add(MibDbExternal.INSTANCES_PATH.format(device_id,
class_id), new_data)
return modified
except KeyError:
# Here if the class-id does not yet exist in the database
self.log.debug("adding-key-not-found", class_id=class_id)
return self._add_new_class(device_id, class_id, instance_id,
attributes)
finally:
if start_time is not None:
diff = datetime.utcnow() - start_time
# NOTE: Change to 'debug' when checked in, manually change to 'info'
# for development testing.
self.log.debug('db-{}-time'.format(operation), milliseconds=diff.microseconds/1000)
self._statistics[operation].increment(diff.microseconds/1000)
except Exception as e:
self.log.exception('set-exception', device_id=device_id, class_id=class_id,
instance_id=instance_id, attributes=attributes, e=e)
raise
def delete(self, device_id, class_id, entity_id):
"""
Delete an entity from the database if it exists. If all instances
of a class are deleted, the class is deleted as well.
:param device_id: (str) ONU Device ID
:param class_id: (int) ME Class ID
:param entity_id: (int) ME Entity ID
:returns: (bool) True if the instance was found and deleted. False
if it did not exist.
:raises KeyError: If device does not exist
:raises DatabaseStateError: If the database is not enabled
"""
self.log.debug('delete', device_id=device_id, class_id=class_id,
entity_id=entity_id)
if not self._started:
raise DatabaseStateError('The Database is not currently active')
if not isinstance(device_id, basestring):
raise TypeError('Device ID should be an string')
if not 0 <= class_id <= 0xFFFF:
raise ValueError('class-id is 0..0xFFFF')
if not 0 <= entity_id <= 0xFFFF:
raise ValueError('instance-id is 0..0xFFFF')
start_time = datetime.utcnow()
try:
# Remove instance
self._instance_proxy(device_id, class_id, entity_id).remove('/')
now = datetime.utcnow()
# If resulting class has no instance, remove it as well
class_proxy = self._class_proxy(device_id, class_id)
class_data = class_proxy.get('/', depth=1)
if len(class_data.instances) == 0:
class_proxy.remove('/')
self._modified = now
return True
except KeyError:
return False # Not found
except Exception as e:
self.log.exception('get-last-sync-exception', device_id=device_id, e=e)
raise
finally:
diff = datetime.utcnow() - start_time
# NOTE: Change to 'debug' when checked in, manually change to 'info'
# for development testing.
self.log.debug('db-delete-time', milliseconds=diff.microseconds/1000)
self._statistics['delete'].increment(diff.microseconds/1000)
def query(self, device_id, class_id=None, instance_id=None, attributes=None):
"""
Get database information.
This method can be used to request information from the database to the detailed
level requested
:param device_id: (str) ONU Device ID
:param class_id: (int) Managed Entity class ID
:param instance_id: (int) Managed Entity instance
:param attributes: (list/set or str) Managed Entity instance's attributes
:return: (dict) The value(s) requested. If class/inst/attribute is
not found, an empty dictionary is returned
:raises KeyError: If the requested device does not exist
:raises DatabaseStateError: If the database is not enabled
"""
self.log.debug('query', device_id=device_id, class_id=class_id,
instance_id=instance_id, attributes=attributes)
start_time = datetime.utcnow()
end_time = None
try:
if class_id is None:
# Get full device info
dev_data = self._device_proxy(device_id).get('/', depth=-1)
end_time = datetime.utcnow()
data = self._device_to_dict(dev_data)
elif instance_id is None:
# Get all instances of the class
try:
cls_data = self._class_proxy(device_id, class_id).get('/', depth=-1)
end_time = datetime.utcnow()
data = self._class_to_dict(device_id, cls_data)
except KeyError:
data = dict()
else:
# Get all attributes of a specific ME
try:
inst_data = self._instance_proxy(device_id, class_id, instance_id).\
get('/', depth=-1)
end_time = datetime.utcnow()
if attributes is None:
# All Attributes
data = self._instance_to_dict(device_id, class_id, inst_data)
else:
# Specific attribute(s)
if isinstance(attributes, basestring):
attributes = {attributes}
data = {
attr.name: self._string_to_attribute(device_id,
class_id,
attr.name,
attr.value)
for attr in inst_data.attributes if attr.name in attributes}
except KeyError:
data = dict()
return data
except KeyError:
self.log.warn('query-no-device', device_id=device_id)
raise
except Exception as e:
self.log.exception('get-last-sync-exception', device_id=device_id, e=e)
raise
finally:
if end_time is not None:
diff = end_time.utcnow() - start_time
# NOTE: Change to 'debug' when checked in, manually change to 'info'
# for development testing.
self.log.debug('db-get-time', milliseconds=diff.microseconds/1000, class_id=class_id,
instance_id=instance_id)
self._statistics['get'].increment(diff.microseconds/1000)
def _instance_to_dict(self, device_id, class_id, instance):
if not isinstance(instance, MibInstanceData):
raise TypeError('{} is not of type MibInstanceData'.format(type(instance)))
data = {
INSTANCE_ID_KEY: instance.instance_id,
CREATED_KEY: self._string_to_time(instance.created),
MODIFIED_KEY: self._string_to_time(instance.modified),
ATTRIBUTES_KEY: dict()
}
for attribute in instance.attributes:
data[ATTRIBUTES_KEY][attribute.name] = self._string_to_attribute(device_id,
class_id,
attribute.name,
attribute.value)
return data
def _class_to_dict(self, device_id, val):
if not isinstance(val, MibClassData):
raise TypeError('{} is not of type MibClassData'.format(type(val)))
data = {
CLASS_ID_KEY: val.class_id,
}
for instance in val.instances:
data[instance.instance_id] = self._instance_to_dict(device_id,
val.class_id,
instance)
return data
def _device_to_dict(self, val):
if not isinstance(val, MibDeviceData):
raise TypeError('{} is not of type MibDeviceData'.format(type(val)))
data = {
DEVICE_ID_KEY: val.device_id,
CREATED_KEY: self._string_to_time(val.created),
LAST_SYNC_KEY: self._string_to_time(val.last_sync_time),
MDS_KEY: val.mib_data_sync,
VERSION_KEY: val.version,
ME_KEY: dict(),
MSG_TYPE_KEY: set()
}
for class_data in val.classes:
data[class_data.class_id] = self._class_to_dict(val.device_id,
class_data)
for managed_entity in val.managed_entities:
data[ME_KEY][managed_entity.class_id] = managed_entity.name
for msg_type in val.message_types:
data[MSG_TYPE_KEY].add(msg_type.message_type)
return data
def _managed_entity_to_name(self, device_id, class_id):
me_map = self._omci_agent.get_device(device_id).me_map
entity = me_map.get(class_id)
return entity.__name__ if entity is not None else 'UnknownManagedEntity'
def update_supported_managed_entities(self, device_id, managed_entities):
"""
Update the supported OMCI Managed Entities for this device
:param device_id: (str) ONU Device ID
:param managed_entities: (set) Managed Entity class IDs
"""
try:
me_list = [ManagedEntity(class_id=class_id,
name=self._managed_entity_to_name(device_id,
class_id))
for class_id in managed_entities]
device_proxy = self._device_proxy(device_id)
data = device_proxy.get(depth=0)
now = datetime.utcnow()
data.managed_entities.extend(me_list)
# Update
self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
data)
self._modified = now
self.log.debug('save-me-list-complete', device_id=device_id)
except Exception as e:
self.log.exception('add-me-failure', e=e, me_list=managed_entities)
raise
def update_supported_message_types(self, device_id, msg_types):
"""
Update the supported OMCI Managed Entities for this device
:param device_id: (str) ONU Device ID
:param msg_types: (set) Message Type values (ints)
"""
try:
msg_type_list = [MessageType(message_type=msg_type.value)
for msg_type in msg_types]
device_proxy = self._device_proxy(device_id)
data = device_proxy.get(depth=0)
now = datetime.utcnow()
data.message_types.extend(msg_type_list)
# Update
self._root_proxy.update(MibDbExternal.DEVICE_PATH.format(device_id),
data)
self._modified = now
self.log.debug('save-msg-types-complete', device_id=device_id)
except Exception as e:
self.log.exception('add-msg-types-failure', e=e, msg_types=msg_types)
raise
|
[
"voltha.protos.omci_mib_db_pb2.MibInstanceData",
"voltha.protos.omci_mib_db_pb2.MibAttributeData",
"voltha.protos.omci_mib_db_pb2.MibDeviceData",
"voltha.protos.omci_mib_db_pb2.MessageType",
"voltha.protos.omci_mib_db_pb2.MibClassData"
] |
[((15510, 15541), 'voltha.protos.omci_mib_db_pb2.MibClassData', 'MibClassData', ([], {'class_id': 'class_id'}), '(class_id=class_id)\n', (15522, 15541), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((17736, 17803), 'voltha.protos.omci_mib_db_pb2.MibInstanceData', 'MibInstanceData', ([], {'instance_id': 'instance_id', 'created': 'now', 'modified': 'now'}), '(instance_id=instance_id, created=now, modified=now)\n', (17751, 17803), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((26267, 26356), 'voltha.protos.omci_mib_db_pb2.MibInstanceData', 'MibInstanceData', ([], {'instance_id': 'instance_id', 'created': 'now', 'modified': 'now', 'attributes': 'attrs'}), '(instance_id=instance_id, created=now, modified=now,\n attributes=attrs)\n', (26282, 26356), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((18812, 18970), 'voltha.protos.omci_mib_db_pb2.MibDeviceData', 'MibDeviceData', ([], {'device_id': 'device_id', 'created': 'data.created', 'last_sync_time': 'data.last_sync_time', 'mib_data_sync': '(0)', 'version': 'MibDbExternal.CURRENT_VERSION'}), '(device_id=device_id, created=data.created, last_sync_time=\n data.last_sync_time, mib_data_sync=0, version=MibDbExternal.CURRENT_VERSION\n )\n', (18825, 18970), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((41893, 41933), 'voltha.protos.omci_mib_db_pb2.MessageType', 'MessageType', ([], {'message_type': 'msg_type.value'}), '(message_type=msg_type.value)\n', (41904, 41933), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((24129, 24170), 'voltha.protos.omci_mib_db_pb2.MibAttributeData', 'MibAttributeData', ([], {'name': 'k', 'value': 'str_value'}), '(name=k, value=str_value)\n', (24145, 24170), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((24300, 24341), 'voltha.protos.omci_mib_db_pb2.MibAttributeData', 'MibAttributeData', ([], {'name': 'k', 'value': 'str_value'}), '(name=k, value=str_value)\n', (24316, 24341), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((24442, 24531), 'voltha.protos.omci_mib_db_pb2.MibInstanceData', 'MibInstanceData', ([], {'instance_id': 'instance_id', 'created': 'now', 'modified': 'now', 'attributes': 'attrs'}), '(instance_id=instance_id, created=now, modified=now,\n attributes=attrs)\n', (24457, 24531), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((26028, 26069), 'voltha.protos.omci_mib_db_pb2.MibAttributeData', 'MibAttributeData', ([], {'name': 'k', 'value': 'str_value'}), '(name=k, value=str_value)\n', (26044, 26069), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((26199, 26240), 'voltha.protos.omci_mib_db_pb2.MibAttributeData', 'MibAttributeData', ([], {'name': 'k', 'value': 'str_value'}), '(name=k, value=str_value)\n', (26215, 26240), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((29471, 29511), 'voltha.protos.omci_mib_db_pb2.MibAttributeData', 'MibAttributeData', ([], {'name': 'name', 'value': 'value'}), '(name=name, value=value)\n', (29487, 29511), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n'), ((29946, 29987), 'voltha.protos.omci_mib_db_pb2.MibAttributeData', 'MibAttributeData', ([], {'name': 'k', 'value': 'str_value'}), '(name=k, value=str_value)\n', (29962, 29987), False, 'from voltha.protos.omci_mib_db_pb2 import MibInstanceData, MibClassData, MibDeviceData, MibAttributeData, MessageType, ManagedEntity\n')]
|
import warnings
from pygromos.files._basics import _general_gromos_file, parser
from pygromos.files.blocks import topology_blocks as blocks
class Distance_restraints(_general_gromos_file._general_gromos_file):
required_blocks = ["TITLE", "DISTANCERESPEC"]
_gromos_file_ending:str = "disres"
def __init__(self, in_value:(str or dict)=None):
self.blocksset = []
self.block_names = {"TITLE": "title_block", "DISTANCERESSPEC":"distance_res_spec_block"}
super().__init__(in_value=in_value)
"""
if(type(path) is str):
self.path = path
self.read_disres(path)
elif(path==None):
warnings.warn("Warning!: generated empty disres obj!")
else:
raise IOError("disres class got "+str(type(path))+" as input. Unknown input type for disres.")
"""
def read_blocks(self):
#parse file into dicts
data = parser.read_disres(self.path)
#add _blocks as attribute to objects
for key, sub_content in data.items():
#print(sub_content)
self.add_block(block=sub_content)
class Disres(Distance_restraints):
pass
|
[
"pygromos.files._basics.parser.read_disres"
] |
[((934, 963), 'pygromos.files._basics.parser.read_disres', 'parser.read_disres', (['self.path'], {}), '(self.path)\n', (952, 963), False, 'from pygromos.files._basics import _general_gromos_file, parser\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# A tf.keras implementation of ghostnet
#
#
import os, sys
import warnings
import math
from keras_applications.imagenet_utils import _obtain_input_shape
from keras_applications.imagenet_utils import preprocess_input as _preprocess_input
from tensorflow.keras.utils import get_source_inputs, get_file
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation
from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply
from tensorflow.keras import backend as K
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))
from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization
BASE_WEIGHT_PATH = (
'https://github.com/david8862/tf-keras-image-classifier/'
'releases/download/v1.0.0/')
def preprocess_input(x):
"""
"mode" option description in preprocess_input
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
"""
# here we use pytorch mode preprocess to align with origin
#x = _preprocess_input(x, mode='torch', backend=K)
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x):
return ReLU(6.0)(x + 3.0) / 6.0
def primary_conv(x, output_filters, kernel_size, strides=(1,1), padding='same', act=True, use_bias=False, name=None):
x = YoloConv2D(filters=output_filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name + '_0')(x)
x = CustomBatchNormalization(name=name+'_1')(x)
x = ReLU(name=name+'_relu')(x) if act else x
return x
def cheap_operations(x, output_filters, kernel_size, strides=(1,1), padding='same', act=True, use_bias=False, name=None):
x = YoloDepthwiseConv2D(kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name+'_0')(x)
x = CustomBatchNormalization(name=name+'_1')(x)
x = ReLU(name=name+'_relu')(x) if act else x
return x
def SqueezeExcite(input_x, se_ratio=0.25, reduced_base_chs=None, divisor=4, name=None):
reduce_chs =_make_divisible((reduced_base_chs or int(input_x.shape[-1]))*se_ratio, divisor)
x = GlobalAveragePooling2D(name=name+'_avg_pool2d')(input_x)
if K.image_data_format() == 'channels_first':
x = Reshape((int(input_x.shape[-1]), 1, 1))(x)
else:
x = Reshape((1, 1, int(input_x.shape[-1])))(x)
x = YoloConv2D(filters=reduce_chs, kernel_size=1, use_bias=True, name=name+'_conv_reduce')(x)
x = ReLU(name=name+'_act')(x)
x = YoloConv2D(filters=int(input_x.shape[-1]), kernel_size=1, use_bias=True, name=name+'_conv_expand')(x)
x = Activation(hard_sigmoid, name=name+'_hard_sigmoid')(x)
x = Multiply()([input_x, x])
return x
def ConvBnAct(input_x, out_chs, kernel_size, stride=(1,1), name=None):
x = YoloConv2D(filters=out_chs,
kernel_size=kernel_size,
strides=stride,
padding='valid',
use_bias=False,
name=name+'_conv')(input_x)
x = CustomBatchNormalization(name=name+'_bn1')(x)
x = ReLU(name=name+'_relu')(x)
return x
def GhostModule(input_x, output_chs, kernel_size=1, ratio=2, dw_size=3, stride=(1,1), act=True, name=None):
init_channels = int(math.ceil(output_chs / ratio))
new_channels = int(init_channels * (ratio - 1))
x1 = primary_conv(input_x,
init_channels,
kernel_size=kernel_size,
strides=stride,
padding='valid',
act=act,
name = name + '_primary_conv')
x2 = cheap_operations(x1,
new_channels,
kernel_size=dw_size,
strides=(1,1),
padding= 'same',
act=act,
name = name + '_cheap_operation')
x = Concatenate(axis=3,name=name+'_concat')([x1,x2])
return x
def GhostBottleneck(input_x, mid_chs, out_chs, dw_kernel_size=3, stride=(1,1), se_ratio=0., name=None):
'''ghostnet bottleneck w/optional se'''
has_se = se_ratio is not None and se_ratio > 0.
#1st ghost bottleneck
x = GhostModule(input_x, mid_chs, act=True, name=name+'_ghost1')
#depth_with convolution
if stride[0] > 1:
x = YoloDepthwiseConv2D(kernel_size=dw_kernel_size,
strides=stride,
padding='same',
use_bias=False,
name=name+'_conv_dw')(x)
x = CustomBatchNormalization(name=name+'_bn_dw')(x)
#Squeeze_and_excitation
if has_se:
x = SqueezeExcite(x, se_ratio=se_ratio, name=name+'_se')
#2nd ghost bottleneck
x = GhostModule(x, out_chs, act=False, name=name+'_ghost2')
#short cut
if (input_x.shape[-1] == out_chs and stride[0] == 1):
sc = input_x
else:
name1 = name + '_shortcut'
sc = YoloDepthwiseConv2D(kernel_size=dw_kernel_size,
strides=stride,
padding='same',
use_bias=False,
name=name1+'_0')(input_x)
sc = CustomBatchNormalization(name=name1+'_1')(sc)
sc = YoloConv2D(filters=out_chs,
kernel_size=1,
strides=(1,1),
padding='valid',
use_bias=False,
name=name1+'_2')(sc)
sc = CustomBatchNormalization(name=name1+'_3')(sc)
x = Add(name=name+'_add')([x, sc])
return x
DEFAULT_CFGS = [
# k, t, c, SE, s
# stage1
[[3, 16, 16, 0, 1]],
# stage2
[[3, 48, 24, 0, 2]],
[[3, 72, 24, 0, 1]],
# stage3
[[5, 72, 40, 0.25, 2]],
[[5, 120, 40, 0.25, 1]],
# stage4
[[3, 240, 80, 0, 2]],
[[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 0.25, 1],
[3, 672, 112, 0.25, 1]
],
# stage5
[[5, 672, 160, 0.25, 2]],
[[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1]
]
]
def GhostNet(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
cfgs=DEFAULT_CFGS,
width=1.0,
dropout_rate=0.2,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the GhostNet architecture.
# Arguments
input_shape: optional shape tuple, to be specified if you would
like to use a model with an input img resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
cfgs: model structure config list
width: controls the width of the network
dropout_rate: fraction of the input units to drop on the last layer
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape or invalid alpha, rows when
weights='imagenet'
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
# If input_shape is None and input_tensor is None using standard shape
if input_shape is None and input_tensor is None:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
#if not K.is_keras_tensor(input_tensor):
#img_input = Input(tensor=input_tensor, shape=input_shape)
#else:
#img_input = input_tensor
img_input = input_tensor
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
# building first layer
output_channel = int(_make_divisible(16 * width, 4))
x = YoloConv2D(filters=output_channel,
kernel_size=3,
strides=(2, 2),
padding='same',
use_bias=False,
name='conv_stem')(img_input)
x = CustomBatchNormalization(name='bn1')(x)
x = ReLU(name='Conv2D_1_act')(x)
# building inverted residual blocks
for index, cfg in enumerate(cfgs):
sub_index = 0
for k,exp_size,c,se_ratio,s in cfg:
output_channel = int(_make_divisible(c * width, 4))
hidden_channel = int(_make_divisible(exp_size * width, 4))
x = GhostBottleneck(x, hidden_channel, output_channel, k, (s,s),
se_ratio=se_ratio,
name='blocks_'+str(index)+'_'+str(sub_index))
sub_index += 1
output_channel = _make_divisible(exp_size * width, 4)
x = ConvBnAct(x, output_channel, kernel_size=1, name='blocks_9_0')
if include_top:
x = GlobalAveragePooling2D(name='global_avg_pooling2D')(x)
if K.image_data_format() == 'channels_first':
x = Reshape((output_channel, 1, 1))(x)
else:
x = Reshape((1, 1, output_channel))(x)
# building last several layers
output_channel = 1280
x = YoloConv2D(filters=output_channel,
kernel_size=1,
strides=(1,1),
padding='valid',
use_bias=True,
name='conv_head')(x)
x = ReLU(name='relu_head')(x)
if dropout_rate > 0.:
x = Dropout(dropout_rate, name='dropout_1')(x)
x = Flatten()(x)
x = Dense(units=classes, activation='softmax',
use_bias=True, name='classifier')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='ghostnet_%0.2f' % (width))
# Load weights.
if weights == 'imagenet':
if include_top:
file_name = 'ghostnet_weights_tf_dim_ordering_tf_kernels_224.h5'
weight_path = BASE_WEIGHT_PATH + file_name
else:
file_name = 'ghostnet_weights_tf_dim_ordering_tf_kernels_224_no_top.h5'
weight_path = BASE_WEIGHT_PATH + file_name
weights_path = get_file(file_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
if __name__ == '__main__':
input_tensor = Input(shape=(None, None, 3), name='image_input')
#model = GhostNet(include_top=False, input_tensor=input_tensor, weights='imagenet')
model = GhostNet(include_top=True, input_shape=(224, 224, 3), weights='imagenet')
model.summary()
K.set_learning_phase(0)
import numpy as np
from tensorflow.keras.applications.resnet50 import decode_predictions
from keras_preprocessing import image
img = image.load_img('../../example/eagle.jpg', target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds))
|
[
"keras_preprocessing.image.load_img",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Multiply",
"common.backbones.layers.YoloConv2D",
"tensorflow.keras.backend.set_learning_phase",
"common.backbones.layers.YoloDepthwiseConv2D",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Concatenate",
"os.path.exists",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Input",
"tensorflow.keras.utils.get_file",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.Dropout",
"math.ceil",
"os.path.realpath",
"keras_preprocessing.image.img_to_array",
"tensorflow.keras.models.Model",
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.applications.resnet50.decode_predictions",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"common.backbones.layers.CustomBatchNormalization",
"tensorflow.keras.layers.ReLU",
"numpy.expand_dims",
"tensorflow.keras.utils.get_source_inputs",
"tensorflow.keras.layers.Add"
] |
[((14029, 14076), 'tensorflow.keras.models.Model', 'Model', (['inputs', 'x'], {'name': "('ghostnet_%0.2f' % width)"}), "(inputs, x, name='ghostnet_%0.2f' % width)\n", (14034, 14076), False, 'from tensorflow.keras.models import Model\n'), ((14692, 14740), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, None, 3)', 'name': '"""image_input"""'}), "(shape=(None, None, 3), name='image_input')\n", (14697, 14740), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n'), ((14939, 14962), 'tensorflow.keras.backend.set_learning_phase', 'K.set_learning_phase', (['(0)'], {}), '(0)\n', (14959, 14962), True, 'from tensorflow.keras import backend as K\n'), ((15114, 15179), 'keras_preprocessing.image.load_img', 'image.load_img', (['"""../../example/eagle.jpg"""'], {'target_size': '(224, 224)'}), "('../../example/eagle.jpg', target_size=(224, 224))\n", (15128, 15179), False, 'from keras_preprocessing import image\n'), ((15188, 15211), 'keras_preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (15206, 15211), False, 'from keras_preprocessing import image\n'), ((15220, 15245), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (15234, 15245), True, 'import numpy as np\n'), ((2664, 2798), 'common.backbones.layers.YoloConv2D', 'YoloConv2D', ([], {'filters': 'output_filters', 'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': 'padding', 'use_bias': 'use_bias', 'name': "(name + '_0')"}), "(filters=output_filters, kernel_size=kernel_size, strides=strides,\n padding=padding, use_bias=use_bias, name=name + '_0')\n", (2674, 2798), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((2881, 2923), 'common.backbones.layers.CustomBatchNormalization', 'CustomBatchNormalization', ([], {'name': "(name + '_1')"}), "(name=name + '_1')\n", (2905, 2923), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((3119, 3239), 'common.backbones.layers.YoloDepthwiseConv2D', 'YoloDepthwiseConv2D', ([], {'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': 'padding', 'use_bias': 'use_bias', 'name': "(name + '_0')"}), "(kernel_size=kernel_size, strides=strides, padding=\n padding, use_bias=use_bias, name=name + '_0')\n", (3138, 3239), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((3340, 3382), 'common.backbones.layers.CustomBatchNormalization', 'CustomBatchNormalization', ([], {'name': "(name + '_1')"}), "(name=name + '_1')\n", (3364, 3382), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((3641, 3690), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'name': "(name + '_avg_pool2d')"}), "(name=name + '_avg_pool2d')\n", (3663, 3690), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n'), ((3705, 3726), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (3724, 3726), True, 'from tensorflow.keras import backend as K\n'), ((3877, 3969), 'common.backbones.layers.YoloConv2D', 'YoloConv2D', ([], {'filters': 'reduce_chs', 'kernel_size': '(1)', 'use_bias': '(True)', 'name': "(name + '_conv_reduce')"}), "(filters=reduce_chs, kernel_size=1, use_bias=True, name=name +\n '_conv_reduce')\n", (3887, 3969), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((3975, 3999), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {'name': "(name + '_act')"}), "(name=name + '_act')\n", (3979, 3999), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((4120, 4173), 'tensorflow.keras.layers.Activation', 'Activation', (['hard_sigmoid'], {'name': "(name + '_hard_sigmoid')"}), "(hard_sigmoid, name=name + '_hard_sigmoid')\n", (4130, 4173), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((4183, 4193), 'tensorflow.keras.layers.Multiply', 'Multiply', ([], {}), '()\n', (4191, 4193), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n'), ((4303, 4429), 'common.backbones.layers.YoloConv2D', 'YoloConv2D', ([], {'filters': 'out_chs', 'kernel_size': 'kernel_size', 'strides': 'stride', 'padding': '"""valid"""', 'use_bias': '(False)', 'name': "(name + '_conv')"}), "(filters=out_chs, kernel_size=kernel_size, strides=stride,\n padding='valid', use_bias=False, name=name + '_conv')\n", (4313, 4429), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((4516, 4560), 'common.backbones.layers.CustomBatchNormalization', 'CustomBatchNormalization', ([], {'name': "(name + '_bn1')"}), "(name=name + '_bn1')\n", (4540, 4560), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((4570, 4595), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {'name': "(name + '_relu')"}), "(name=name + '_relu')\n", (4574, 4595), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((4744, 4773), 'math.ceil', 'math.ceil', (['(output_chs / ratio)'], {}), '(output_chs / ratio)\n', (4753, 4773), False, 'import math\n'), ((5407, 5449), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(3)', 'name': "(name + '_concat')"}), "(axis=3, name=name + '_concat')\n", (5418, 5449), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n'), ((7066, 7089), 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(name + '_add')"}), "(name=name + '_add')\n", (7069, 7089), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n'), ((11460, 11484), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (11465, 11484), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n'), ((11869, 11989), 'common.backbones.layers.YoloConv2D', 'YoloConv2D', ([], {'filters': 'output_channel', 'kernel_size': '(3)', 'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""conv_stem"""'}), "(filters=output_channel, kernel_size=3, strides=(2, 2), padding=\n 'same', use_bias=False, name='conv_stem')\n", (11879, 11989), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((12079, 12115), 'common.backbones.layers.CustomBatchNormalization', 'CustomBatchNormalization', ([], {'name': '"""bn1"""'}), "(name='bn1')\n", (12103, 12115), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((12127, 12152), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {'name': '"""Conv2D_1_act"""'}), "(name='Conv2D_1_act')\n", (12131, 12152), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((13927, 13958), 'tensorflow.keras.utils.get_source_inputs', 'get_source_inputs', (['input_tensor'], {}), '(input_tensor)\n', (13944, 13958), False, 'from tensorflow.keras.utils import get_source_inputs, get_file\n'), ((14463, 14518), 'tensorflow.keras.utils.get_file', 'get_file', (['file_name', 'weight_path'], {'cache_subdir': '"""models"""'}), "(file_name, weight_path, cache_subdir='models')\n", (14471, 14518), False, 'from tensorflow.keras.utils import get_source_inputs, get_file\n'), ((15328, 15353), 'tensorflow.keras.applications.resnet50.decode_predictions', 'decode_predictions', (['preds'], {}), '(preds)\n', (15346, 15353), False, 'from tensorflow.keras.applications.resnet50 import decode_predictions\n'), ((726, 752), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (742, 752), False, 'import os, sys\n'), ((2511, 2520), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (2515, 2520), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((2933, 2958), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {'name': "(name + '_relu')"}), "(name=name + '_relu')\n", (2937, 2958), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((3392, 3417), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {'name': "(name + '_relu')"}), "(name=name + '_relu')\n", (3396, 3417), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((5830, 5954), 'common.backbones.layers.YoloDepthwiseConv2D', 'YoloDepthwiseConv2D', ([], {'kernel_size': 'dw_kernel_size', 'strides': 'stride', 'padding': '"""same"""', 'use_bias': '(False)', 'name': "(name + '_conv_dw')"}), "(kernel_size=dw_kernel_size, strides=stride, padding=\n 'same', use_bias=False, name=name + '_conv_dw')\n", (5849, 5954), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((6075, 6121), 'common.backbones.layers.CustomBatchNormalization', 'CustomBatchNormalization', ([], {'name': "(name + '_bn_dw')"}), "(name=name + '_bn_dw')\n", (6099, 6121), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((6476, 6595), 'common.backbones.layers.YoloDepthwiseConv2D', 'YoloDepthwiseConv2D', ([], {'kernel_size': 'dw_kernel_size', 'strides': 'stride', 'padding': '"""same"""', 'use_bias': '(False)', 'name': "(name1 + '_0')"}), "(kernel_size=dw_kernel_size, strides=stride, padding=\n 'same', use_bias=False, name=name1 + '_0')\n", (6495, 6595), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((6727, 6770), 'common.backbones.layers.CustomBatchNormalization', 'CustomBatchNormalization', ([], {'name': "(name1 + '_1')"}), "(name=name1 + '_1')\n", (6751, 6770), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((6786, 6900), 'common.backbones.layers.YoloConv2D', 'YoloConv2D', ([], {'filters': 'out_chs', 'kernel_size': '(1)', 'strides': '(1, 1)', 'padding': '"""valid"""', 'use_bias': '(False)', 'name': "(name1 + '_2')"}), "(filters=out_chs, kernel_size=1, strides=(1, 1), padding='valid',\n use_bias=False, name=name1 + '_2')\n", (6796, 6900), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((7011, 7054), 'common.backbones.layers.CustomBatchNormalization', 'CustomBatchNormalization', ([], {'name': "(name1 + '_3')"}), "(name=name1 + '_3')\n", (7035, 7054), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((10347, 10370), 'os.path.exists', 'os.path.exists', (['weights'], {}), '(weights)\n', (10361, 10370), False, 'import os, sys\n'), ((11097, 11118), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (11116, 11118), True, 'from tensorflow.keras import backend as K\n'), ((11726, 11747), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (11745, 11747), True, 'from tensorflow.keras import backend as K\n'), ((12833, 12884), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'name': '"""global_avg_pooling2D"""'}), "(name='global_avg_pooling2D')\n", (12855, 12884), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n'), ((12899, 12920), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (12918, 12920), True, 'from tensorflow.keras import backend as K\n'), ((13140, 13260), 'common.backbones.layers.YoloConv2D', 'YoloConv2D', ([], {'filters': 'output_channel', 'kernel_size': '(1)', 'strides': '(1, 1)', 'padding': '"""valid"""', 'use_bias': '(True)', 'name': '"""conv_head"""'}), "(filters=output_channel, kernel_size=1, strides=(1, 1), padding=\n 'valid', use_bias=True, name='conv_head')\n", (13150, 13260), False, 'from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization\n'), ((13365, 13387), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {'name': '"""relu_head"""'}), "(name='relu_head')\n", (13369, 13387), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((13493, 13502), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (13500, 13502), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((13518, 13594), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'classes', 'activation': '"""softmax"""', 'use_bias': '(True)', 'name': '"""classifier"""'}), "(units=classes, activation='softmax', use_bias=True, name='classifier')\n", (13523, 13594), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((12958, 12989), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(output_channel, 1, 1)'], {}), '((output_channel, 1, 1))\n', (12965, 12989), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((13023, 13054), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, 1, output_channel)'], {}), '((1, 1, output_channel))\n', (13030, 13054), False, 'from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, BatchNormalization, Dense, Flatten, ReLU, Reshape, Activation\n'), ((13438, 13477), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout_rate'], {'name': '"""dropout_1"""'}), "(dropout_rate, name='dropout_1')\n", (13445, 13477), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n'), ((13678, 13702), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (13700, 13702), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n'), ((13753, 13773), 'tensorflow.keras.layers.GlobalMaxPooling2D', 'GlobalMaxPooling2D', ([], {}), '()\n', (13771, 13773), False, 'from tensorflow.keras.layers import Input, GlobalAveragePooling2D, GlobalMaxPooling2D, Concatenate, Dropout, Add, Multiply\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Unit tests for the pydicom.charset module."""
import pytest
import pydicom.charset
from pydicom import dcmread, config
from pydicom.data import get_charset_files, get_testdata_file
from pydicom.dataelem import DataElement
from pydicom.filebase import DicomBytesIO
from pydicom.valuerep import PersonName
# The file names (without '.dcm' extension) of most of the character test
# files, together with the respective decoded PatientName tag values.
# Most of these (except the Korean file) are taken from <NAME>'s
# charset example files.
FILE_PATIENT_NAMES = [
('chrArab', 'قباني^لنزار'),
('chrFren', 'Buc^Jérôme'),
('chrFrenMulti', 'Buc^Jérôme'),
('chrGerm', 'Äneas^Rüdiger'),
('chrGreek', 'Διονυσιος'),
('chrH31', 'Yamada^Tarou=山田^太郎=やまだ^たろう'),
('chrH32', 'ヤマダ^タロウ=山田^太郎=やまだ^たろう'),
('chrHbrw', 'שרון^דבורה'),
('chrI2', 'Hong^Gildong=洪^吉洞=홍^길동'),
('chrJapMulti', 'やまだ^たろう'),
('chrJapMultiExplicitIR6', 'やまだ^たろう'),
('chrKoreanMulti', '김희중'),
('chrRuss', 'Люкceмбypг'),
('chrX1', 'Wang^XiaoDong=王^小東'),
('chrX2', 'Wang^XiaoDong=王^小东'),
]
# Test data for all single-byte coding extensions.
# Mostly taken from the same example files.
ENCODED_NAMES = [
('ISO 2022 IR 13', 'ヤマダ^タロウ',
b'\x1b\x29\x49\xd4\xcf\xc0\xde\x5e\xc0\xdb\xb3'),
('ISO 2022 IR 100', 'Buc^Jérôme',
b'\x1b\x2d\x41\x42\x75\x63\x5e\x4a\xe9\x72\xf4\x6d\x65'),
('ISO 2022 IR 101', 'Wałęsa',
b'\x1b\x2d\x42\x57\x61\xb3\xea\x73\x61'),
('ISO 2022 IR 109', 'antaŭnomo',
b'\x1b\x2d\x43\x61\x6e\x74\x61\xfd\x6e\x6f\x6d\x6f'),
('ISO 2022 IR 110', 'vārds',
b'\x1b\x2d\x44\x76\xe0\x72\x64\x73'),
('ISO 2022 IR 127', 'قباني^لنزار',
b'\x1b\x2d\x47\xe2\xc8\xc7\xe6\xea\x5e\xe4\xe6\xd2\xc7\xd1'),
('ISO 2022 IR 126', 'Διονυσιος',
b'\x1b\x2d\x46\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2'),
('ISO 2022 IR 138', 'שרון^דבורה',
b'\x1b\x2d\x48\xf9\xf8\xe5\xef\x5e\xe3\xe1\xe5\xf8\xe4'),
('ISO 2022 IR 144', 'Люкceмбypг',
b'\x1b\x2d\x4c\xbb\xee\xda\x63\x65\xdc\xd1\x79\x70\xd3'),
('ISO 2022 IR 148', 'Çavuşoğlu',
b'\x1b\x2d\x4d\xc7\x61\x76\x75\xfe\x6f\xf0\x6c\x75'),
('ISO 2022 IR 166', 'นามสกุล',
b'\x1b\x2d\x54\xb9\xd2\xc1\xca\xa1\xd8\xc5'),
]
class TestCharset:
def test_encodings(self):
test_string = 'Hello World'
for x in pydicom.charset.python_encoding.items():
test_string.encode(x[1])
def test_nested_character_sets(self):
"""charset: can read and decode SQ with different encodings........."""
ds = dcmread(get_charset_files("chrSQEncoding.dcm")[0])
ds.decode()
# These datasets inside of the SQ cannot be decoded with
# default_encoding OR UTF-8 (the parent dataset's encoding).
# Instead, we make sure that it is decoded using the
# (0008,0005) tag of the dataset
sequence = ds[0x32, 0x1064][0]
assert ['shift_jis', 'iso2022_jp'] == sequence._character_set
assert 'ヤマダ^タロウ=山田^太郎=やまだ^たろう' == sequence.PatientName
def test_inherited_character_set_in_sequence(self):
"""charset: can read and decode SQ with parent encoding............."""
ds = dcmread(get_charset_files('chrSQEncoding1.dcm')[0])
ds.decode()
# These datasets inside of the SQ shall be decoded with the parent
# dataset's encoding
sequence = ds[0x32, 0x1064][0]
assert ['shift_jis', 'iso2022_jp'] == sequence._character_set
assert 'ヤマダ^タロウ=山田^太郎=やまだ^たろう' == sequence.PatientName
def test_standard_file(self):
"""charset: can read and decode standard file without special char.."""
ds = dcmread(get_testdata_file("CT_small.dcm"))
ds.decode()
assert 'CompressedSamples^CT1' == ds.PatientName
def test_invalid_character_set(self, allow_invalid_values):
"""charset: replace invalid encoding with default encoding"""
ds = dcmread(get_testdata_file("CT_small.dcm"))
ds.read_encoding = None
ds.SpecificCharacterSet = 'Unsupported'
with pytest.warns(
UserWarning,
match=("Unknown encoding 'Unsupported' "
"- using default encoding instead")
):
ds.decode()
assert 'CompressedSamples^CT1' == ds.PatientName
def test_invalid_character_set_enforce_valid(self, enforce_valid_values):
"""charset: raise on invalid encoding"""
ds = dcmread(get_testdata_file("CT_small.dcm"))
ds.read_encoding = None
ds.SpecificCharacterSet = 'Unsupported'
with pytest.raises(LookupError,
match="Unknown encoding 'Unsupported'"):
ds.decode()
def test_decoding_with_specific_tags(self):
"""Decoding is correctly applied even if Specific Character Set
is not in specific tags..."""
rus_file = get_charset_files("chrRuss.dcm")[0]
ds = dcmread(rus_file, specific_tags=['PatientName'])
ds.decode()
assert 2 == len(ds) # specific character set is always decoded
assert 'Люкceмбypг' == ds.PatientName
def test_bad_charset(self):
"""Test bad charset defaults to ISO IR 6"""
# elem.value is PersonName
elem = DataElement(0x00100010, 'PN', 'CITIZEN')
pydicom.charset.decode_element(elem, ['ISO 2022 IR 126'])
assert 'iso_ir_126' in elem.value.encodings
assert 'iso8859' not in elem.value.encodings
# default encoding is iso8859
pydicom.charset.decode_element(elem, [])
assert 'iso8859' in elem.value.encodings
def test_empty_charset(self):
"""Empty charset defaults to ISO IR 6"""
elem = DataElement(0x00100010, 'PN', 'CITIZEN')
pydicom.charset.decode_element(elem, [''])
assert ('iso8859',) == elem.value.encodings
elem = DataElement(0x00100010, 'PN', 'CITIZEN')
pydicom.charset.decode_element(elem, None)
assert ('iso8859',) == elem.value.encodings
def test_bad_encoded_single_encoding(self, allow_invalid_values):
"""Test handling bad encoding for single encoding"""
elem = DataElement(0x00100010, 'PN',
b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
with pytest.warns(UserWarning, match="Failed to decode byte string "
"with encoding 'UTF8'"):
pydicom.charset.decode_element(elem, ['ISO_IR 192'])
assert '���������' == elem.value
def test_bad_encoded_single_encoding_enforce_standard(
self, enforce_valid_values):
"""Test handling bad encoding for single encoding if
config.enforce_valid_values is set"""
elem = DataElement(0x00100010, 'PN',
b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
msg = ("'utf.?8' codec can't decode byte 0xc4 in position 0: "
"invalid continuation byte")
with pytest.raises(UnicodeDecodeError, match=msg):
pydicom.charset.decode_element(elem, ['ISO_IR 192'])
def test_code_extensions_not_allowed(self):
"""Test that UTF8 does not allow code extensions"""
elem = DataElement(0x00100010, 'PN', b'Buc^J\xc3\xa9r\xc3\xb4me')
msg = ("Value 'ISO_IR 192' for Specific Character Set does not "
"allow code extensions, ignoring: ISO 2022 IR 100, "
"ISO 2022 IR 144")
with pytest.warns(UserWarning, match=msg):
pydicom.charset.decode_element(
elem,
['ISO_IR 192', 'ISO 2022 IR 100', 'ISO 2022 IR 144']
)
assert 'Buc^Jérôme' == elem.value
def test_convert_encodings_warnings(self):
"""Test warning if stand-alone encodings are used as code extension"""
with pytest.warns(UserWarning, match="Value 'GBK' cannot be used as "
"code extension, ignoring it"):
encodings = pydicom.charset.convert_encodings(
['ISO_IR 126', 'GBK', 'ISO 2022 IR 144', 'ISO_IR 192'])
assert ['iso_ir_126', 'iso_ir_144'] == encodings
def test_convert_python_encodings(self):
"""Test that unknown encodings are returned unchanged by
`convert_encodings`"""
encodings = ['iso_ir_126', 'iso_ir_144']
assert encodings == pydicom.charset.convert_encodings(encodings)
def test_convert_empty_encoding(self):
"""Test that empty encodings are handled as default encoding"""
encodings = ''
assert ['iso8859'] == pydicom.charset.convert_encodings(encodings)
encodings = ['']
assert ['iso8859'] == pydicom.charset.convert_encodings(encodings)
encodings = None
assert ['iso8859'] == pydicom.charset.convert_encodings(encodings)
def test_bad_decoded_multi_byte_encoding(self, allow_invalid_values):
"""Test handling bad encoding for single encoding"""
elem = DataElement(0x00100010, 'PN',
b'\x1b$(D\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
with pytest.warns(UserWarning, match='Failed to decode byte string '
'with encodings: iso2022_jp_2'):
pydicom.charset.decode_element(elem, ['ISO 2022 IR 159'])
assert '���������' == elem.value
def test_bad_decoded_multi_byte_encoding_enforce_standard(
self, enforce_valid_values):
"""Test handling bad encoding for single encoding if
`config.enforce_valid_values` is set"""
elem = DataElement(0x00100010, 'PN',
b'\x1b$(D\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
msg = ("'iso2022_jp_2' codec can't decode byte 0xc4 in position 4: "
"illegal multibyte sequence")
with pytest.raises(UnicodeDecodeError, match=msg):
pydicom.charset.decode_element(elem, ['ISO 2022 IR 159'])
def test_unknown_escape_sequence(self, allow_invalid_values):
"""Test handling bad encoding for single encoding"""
elem = DataElement(0x00100010, 'PN',
b'\x1b\x2d\x46\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
with pytest.warns(UserWarning, match='Found unknown escape sequence '
'in encoded string value'):
pydicom.charset.decode_element(elem, ['ISO_IR 100'])
assert '\x1b-FÄéïíõóéïò' == elem.value
def test_unknown_escape_sequence_enforce_standard(
self, enforce_valid_values):
"""Test handling bad encoding for single encoding if
`config.enforce_valid_values` is set"""
elem = DataElement(0x00100010, 'PN',
b'\x1b\x2d\x46\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
with pytest.raises(ValueError, match='Found unknown escape sequence '
'in encoded string value'):
pydicom.charset.decode_element(elem, ['ISO_IR 100'])
def test_patched_charset(self, allow_invalid_values):
"""Test some commonly misspelled charset values"""
elem = DataElement(0x00100010, 'PN', b'Buc^J\xc3\xa9r\xc3\xb4me')
pydicom.charset.decode_element(elem, ['ISO_IR 192'])
# correct encoding
assert 'Buc^Jérôme' == elem.value
# patched encoding shall behave correctly, but a warning is issued
elem = DataElement(0x00100010, 'PN', b'Buc^J\xc3\xa9r\xc3\xb4me')
with pytest.warns(UserWarning,
match='Incorrect value for Specific Character Set '
"'ISO IR 192' - assuming 'ISO_IR 192'"):
pydicom.charset.decode_element(elem, ['ISO IR 192'])
assert 'Buc^Jérôme' == elem.value
elem = DataElement(0x00100010, 'PN', b'Buc^J\xe9r\xf4me')
with pytest.warns(UserWarning,
match='Incorrect value for Specific Character Set '
"'ISO-IR 144' - assuming 'ISO_IR 144'") as w:
pydicom.charset.decode_element(elem, ['ISO_IR 100', 'ISO-IR 144'])
# make sure no warning is issued for the correct value
assert 1 == len(w)
# not patched incorrect encoding is replaced by default encoding
elem = DataElement(0x00100010, 'PN', b'Buc^J\xc3\xa9r\xc3\xb4me')
with pytest.warns(UserWarning,
match="Unknown encoding 'ISOIR 192' - "
"using default encoding instead"):
pydicom.charset.decode_element(elem, ['ISOIR 192'])
# Python encoding also can be used directly
elem = DataElement(0x00100010, 'PN', b'Buc^J\xc3\xa9r\xc3\xb4me')
pydicom.charset.decode_element(elem, ['utf8'])
assert 'Buc^Jérôme' == elem.value
def test_patched_code_extension_charset(self):
"""Test some commonly misspelled charset values for code extensions."""
elem = DataElement(0x00100010, 'PN',
b'Dionysios=\x1b\x2d\x46'
b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
# correct encoding
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO 2022 IR 126']
)
assert 'Dionysios=Διονυσιος' == elem.value
# patched encoding shall behave correctly, but a warning is issued
with pytest.warns(UserWarning,
match='Incorrect value for Specific Character Set '
"'ISO_2022-IR 100' - assuming "
"'ISO 2022 IR 100'"):
elem = DataElement(0x00100010, 'PN',
b'Dionysios=\x1b\x2d\x46'
b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
pydicom.charset.decode_element(
elem, ['ISO_2022-IR 100', 'ISO 2022 IR 126']
)
assert 'Dionysios=Διονυσιος' == elem.value
with pytest.warns(UserWarning,
match=r'Incorrect value for Specific Character Set '
r"'ISO_2022_IR\+126' - assuming "
r"'ISO 2022 IR 126'"):
elem = DataElement(0x00100010, 'PN',
b'Dionysios=\x1b\x2d\x46'
b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO_2022_IR+126']
)
assert 'Dionysios=Διονυσιος' == elem.value
def test_multi_charset_default_value(self):
"""Test that the first value is used if no escape code is given"""
# regression test for #707
elem = DataElement(0x00100010, 'PN', b'Buc^J\xe9r\xf4me')
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO 2022 IR 144']
)
assert 'Buc^Jérôme' == elem.value
elem = DataElement(0x00081039, 'LO', b'R\xf6ntgenaufnahme')
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO 2022 IR 144']
)
assert 'Röntgenaufnahme' == elem.value
def test_single_byte_multi_charset_personname(self):
# component groups with different encodings
elem = DataElement(0x00100010, 'PN',
b'Dionysios=\x1b\x2d\x46'
b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO 2022 IR 126']
)
assert 'Dionysios=Διονυσιος' == elem.value
# multiple values with different encodings
encoded = (b'Buc^J\xe9r\xf4me\\\x1b\x2d\x46'
b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2\\'
b'\x1b\x2d\x4C'
b'\xbb\xee\xda\x63\x65\xdc\xd1\x79\x70\xd3')
elem = DataElement(0x00100060, 'PN', encoded)
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO 2022 IR 144', 'ISO 2022 IR 126']
)
assert ['Buc^Jérôme', 'Διονυσιος', 'Люкceмбypг'] == elem.value
def test_single_byte_multi_charset_text(self):
# changed encoding inside the string
elem = DataElement(0x00081039, 'LO',
b'Dionysios is \x1b\x2d\x46'
b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2')
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO 2022 IR 126']
)
assert 'Dionysios is Διονυσιος' == elem.value
# multiple values with different encodings
elem = DataElement(0x00081039, 'LO',
b'Buc^J\xe9r\xf4me\\\x1b\x2d\x46'
b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2\\'
b'\x1b\x2d\x4C'
b'\xbb\xee\xda\x63\x65\xdc\xd1\x79\x70\xd3')
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO 2022 IR 144', 'ISO 2022 IR 126']
)
assert ['Buc^Jérôme', 'Διονυσιος', 'Люкceмбypг'] == elem.value
@pytest.mark.parametrize('encoding, decoded, raw_data', ENCODED_NAMES)
def test_single_byte_code_extensions(self, encoding, decoded, raw_data):
# single-byte encoding as code extension
elem = DataElement(0x00081039, 'LO', b'ASCII+' + raw_data)
pydicom.charset.decode_element(elem, ['', encoding])
assert 'ASCII+' + decoded == elem.value
@pytest.mark.parametrize('filename, patient_name', FILE_PATIENT_NAMES)
def test_charset_patient_names(self, filename, patient_name):
"""Test patient names are correctly decoded and encoded."""
# check that patient names are correctly read
file_path = get_charset_files(filename + '.dcm')[0]
ds = dcmread(file_path)
ds.decode()
assert patient_name == ds.PatientName
# check that patient names are correctly written back
fp = DicomBytesIO()
fp.is_implicit_VR = False
fp.is_little_endian = True
ds.save_as(fp, write_like_original=False)
fp.seek(0)
ds = dcmread(fp)
assert patient_name == ds.PatientName
# check that patient names are correctly written back
# without original byte string (PersonName only)
if hasattr(ds.PatientName, 'original_string'):
ds.PatientName.original_string = None
fp = DicomBytesIO()
fp.is_implicit_VR = False
fp.is_little_endian = True
ds.save_as(fp, write_like_original=False)
fp.seek(0)
ds = dcmread(fp)
assert patient_name == ds.PatientName
def test_changed_character_set(self):
# Regression test for #629
multiPN_name = get_charset_files("chrFrenMulti.dcm")[0]
ds = dcmread(multiPN_name) # is Latin-1
ds.SpecificCharacterSet = 'ISO_IR 192'
from pydicom.filebase import DicomBytesIO
fp = DicomBytesIO()
ds.save_as(fp, write_like_original=False)
fp.seek(0)
ds_out = dcmread(fp)
# we expect UTF-8 encoding here
assert b'Buc^J\xc3\xa9r\xc3\xb4me' == ds_out.get_item(0x00100010).value
def test_invalid_second_encoding(self, allow_invalid_values):
# regression test for #850
elem = DataElement(0x00100010, 'PN', 'CITIZEN')
with pytest.warns(UserWarning,
match="Unknown encoding 'ISO 2022 IR 146' "
"- using default encoding instead"):
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO 2022 IR 146'])
def test_invalid_second_encoding_strict(self, enforce_valid_values):
elem = DataElement(0x00100010, 'PN', 'CITIZEN')
with pytest.raises(LookupError,
match="Unknown encoding 'ISO 2022 IR 146'"):
pydicom.charset.decode_element(
elem, ['ISO 2022 IR 100', 'ISO 2022 IR 146'])
def test_japanese_multi_byte_personname(self):
"""Test japanese person name which has multi byte strings are
correctly encoded."""
file_path = get_charset_files('chrH32.dcm')[0]
ds = dcmread(file_path)
ds.decode()
if hasattr(ds.PatientName, 'original_string'):
original_string = ds.PatientName.original_string
ds.PatientName.original_string = None
fp = DicomBytesIO()
fp.is_implicit_VR = False
fp.is_little_endian = True
ds.save_as(fp, write_like_original=False)
fp.seek(0)
ds_out = dcmread(fp)
assert original_string == ds_out.PatientName.original_string
japanese_pn = PersonName("Mori^Ogai=森^鷗外=もり^おうがい")
pyencs = pydicom.charset.convert_encodings(["ISO 2022 IR 6",
"ISO 2022 IR 87",
"ISO 2022 IR 159"])
actual_encoded = bytes(japanese_pn.encode(pyencs))
expect_encoded = (
b"\x4d\x6f\x72\x69\x5e\x4f\x67\x61\x69\x3d\x1b\x24\x42\x3f"
b"\x39\x1b\x28\x42\x5e\x1b\x24\x28\x44\x6c\x3f\x1b\x24\x42"
b"\x33\x30\x1b\x28\x42\x3d\x1b\x24\x42\x24\x62\x24\x6a\x1b"
b"\x28\x42\x5e\x1b\x24\x42\x24\x2a\x24\x26\x24\x2c\x24\x24"
b"\x1b\x28\x42"
)
assert expect_encoded == actual_encoded
def test_japanese_multi_byte_encoding(self):
"""Test japanese multi byte strings are correctly encoded."""
encoded = pydicom.charset.encode_string('あaアア齩', ['shift_jis',
'iso2022_jp', 'iso2022_jp_2'])
expect = b'\x1b$B$"\x1b(Ja\x1b)I\xb1\x1b$B%"\x1b$(DmN\x1b(J'
assert expect == bytes(encoded)
def test_bad_japanese_encoding(self):
"""Test japanese multi byte strings are not correctly encoded."""
with pytest.warns(UserWarning,
match="Failed to encode value with encodings"
": shift_jis - using replacement character"
"s in encoded string"):
encoded = pydicom.charset.encode_string('あaアア', ['shift_jis'])
assert b'?a??' == encoded
|
[
"pydicom.dcmread",
"pydicom.filebase.DicomBytesIO",
"pydicom.data.get_charset_files",
"pytest.warns",
"pydicom.data.get_testdata_file",
"pytest.raises",
"pydicom.dataelem.DataElement",
"pytest.mark.parametrize",
"pydicom.valuerep.PersonName"
] |
[((17192, 17261), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""encoding, decoded, raw_data"""', 'ENCODED_NAMES'], {}), "('encoding, decoded, raw_data', ENCODED_NAMES)\n", (17215, 17261), False, 'import pytest\n'), ((17570, 17639), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""filename, patient_name"""', 'FILE_PATIENT_NAMES'], {}), "('filename, patient_name', FILE_PATIENT_NAMES)\n", (17593, 17639), False, 'import pytest\n'), ((5042, 5090), 'pydicom.dcmread', 'dcmread', (['rus_file'], {'specific_tags': "['PatientName']"}), "(rus_file, specific_tags=['PatientName'])\n", (5049, 5090), False, 'from pydicom import dcmread, config\n'), ((5364, 5401), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', '"""CITIZEN"""'], {}), "(1048592, 'PN', 'CITIZEN')\n", (5375, 5401), False, 'from pydicom.dataelem import DataElement\n'), ((5811, 5848), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', '"""CITIZEN"""'], {}), "(1048592, 'PN', 'CITIZEN')\n", (5822, 5848), False, 'from pydicom.dataelem import DataElement\n'), ((5970, 6007), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', '"""CITIZEN"""'], {}), "(1048592, 'PN', 'CITIZEN')\n", (5981, 6007), False, 'from pydicom.dataelem import DataElement\n'), ((6261, 6328), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN', b'\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (6272, 6328), False, 'from pydicom.dataelem import DataElement\n'), ((6840, 6907), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN', b'\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (6851, 6907), False, 'from pydicom.dataelem import DataElement\n'), ((7301, 7356), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Buc^J\\xc3\\xa9r\\xc3\\xb4me'"], {}), "(1048592, 'PN', b'Buc^J\\xc3\\xa9r\\xc3\\xb4me')\n", (7312, 7356), False, 'from pydicom.dataelem import DataElement\n'), ((9085, 9159), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'\\x1b$(D\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN', b'\\x1b$(D\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (9096, 9159), False, 'from pydicom.dataelem import DataElement\n'), ((9690, 9764), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'\\x1b$(D\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN', b'\\x1b$(D\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (9701, 9764), False, 'from pydicom.dataelem import DataElement\n'), ((10189, 10262), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN', b'\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (10200, 10262), False, 'from pydicom.dataelem import DataElement\n'), ((10788, 10861), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN', b'\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (10799, 10861), False, 'from pydicom.dataelem import DataElement\n'), ((11247, 11302), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Buc^J\\xc3\\xa9r\\xc3\\xb4me'"], {}), "(1048592, 'PN', b'Buc^J\\xc3\\xa9r\\xc3\\xb4me')\n", (11258, 11302), False, 'from pydicom.dataelem import DataElement\n'), ((11527, 11582), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Buc^J\\xc3\\xa9r\\xc3\\xb4me'"], {}), "(1048592, 'PN', b'Buc^J\\xc3\\xa9r\\xc3\\xb4me')\n", (11538, 11582), False, 'from pydicom.dataelem import DataElement\n'), ((11903, 11950), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Buc^J\\xe9r\\xf4me'"], {}), "(1048592, 'PN', b'Buc^J\\xe9r\\xf4me')\n", (11914, 11950), False, 'from pydicom.dataelem import DataElement\n'), ((12415, 12470), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Buc^J\\xc3\\xa9r\\xc3\\xb4me'"], {}), "(1048592, 'PN', b'Buc^J\\xc3\\xa9r\\xc3\\xb4me')\n", (12426, 12470), False, 'from pydicom.dataelem import DataElement\n'), ((12778, 12833), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Buc^J\\xc3\\xa9r\\xc3\\xb4me'"], {}), "(1048592, 'PN', b'Buc^J\\xc3\\xa9r\\xc3\\xb4me')\n", (12789, 12833), False, 'from pydicom.dataelem import DataElement\n'), ((13081, 13168), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Dionysios=\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN',\n b'Dionysios=\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (13092, 13168), False, 'from pydicom.dataelem import DataElement\n'), ((14847, 14894), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Buc^J\\xe9r\\xf4me'"], {}), "(1048592, 'PN', b'Buc^J\\xe9r\\xf4me')\n", (14858, 14894), False, 'from pydicom.dataelem import DataElement\n'), ((15063, 15111), 'pydicom.dataelem.DataElement', 'DataElement', (['(528441)', '"""LO"""', "b'R\\xf6ntgenaufnahme'"], {}), "(528441, 'LO', b'R\\xf6ntgenaufnahme')\n", (15074, 15111), False, 'from pydicom.dataelem import DataElement\n'), ((15395, 15482), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Dionysios=\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN',\n b'Dionysios=\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (15406, 15482), False, 'from pydicom.dataelem import DataElement\n'), ((15984, 16019), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048672)', '"""PN"""', 'encoded'], {}), "(1048672, 'PN', encoded)\n", (15995, 16019), False, 'from pydicom.dataelem import DataElement\n'), ((16332, 16421), 'pydicom.dataelem.DataElement', 'DataElement', (['(528441)', '"""LO"""', "b'Dionysios is \\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(528441, 'LO',\n b'Dionysios is \\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (16343, 16421), False, 'from pydicom.dataelem import DataElement\n'), ((16714, 16849), 'pydicom.dataelem.DataElement', 'DataElement', (['(528441)', '"""LO"""', "b'Buc^J\\xe9r\\xf4me\\\\\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2\\\\\\x1b-L\\xbb\\xee\\xdace\\xdc\\xd1yp\\xd3'"], {}), "(528441, 'LO',\n b'Buc^J\\xe9r\\xf4me\\\\\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2\\\\\\x1b-L\\xbb\\xee\\xdace\\xdc\\xd1yp\\xd3'\n )\n", (16725, 16849), False, 'from pydicom.dataelem import DataElement\n'), ((17403, 17450), 'pydicom.dataelem.DataElement', 'DataElement', (['(528441)', '"""LO"""', "(b'ASCII+' + raw_data)"], {}), "(528441, 'LO', b'ASCII+' + raw_data)\n", (17414, 17450), False, 'from pydicom.dataelem import DataElement\n'), ((17901, 17919), 'pydicom.dcmread', 'dcmread', (['file_path'], {}), '(file_path)\n', (17908, 17919), False, 'from pydicom import dcmread, config\n'), ((18062, 18076), 'pydicom.filebase.DicomBytesIO', 'DicomBytesIO', ([], {}), '()\n', (18074, 18076), False, 'from pydicom.filebase import DicomBytesIO\n'), ((18228, 18239), 'pydicom.dcmread', 'dcmread', (['fp'], {}), '(fp)\n', (18235, 18239), False, 'from pydicom import dcmread, config\n'), ((18931, 18952), 'pydicom.dcmread', 'dcmread', (['multiPN_name'], {}), '(multiPN_name)\n', (18938, 18952), False, 'from pydicom import dcmread, config\n'), ((19077, 19091), 'pydicom.filebase.DicomBytesIO', 'DicomBytesIO', ([], {}), '()\n', (19089, 19091), False, 'from pydicom.filebase import DicomBytesIO\n'), ((19178, 19189), 'pydicom.dcmread', 'dcmread', (['fp'], {}), '(fp)\n', (19185, 19189), False, 'from pydicom import dcmread, config\n'), ((19427, 19464), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', '"""CITIZEN"""'], {}), "(1048592, 'PN', 'CITIZEN')\n", (19438, 19464), False, 'from pydicom.dataelem import DataElement\n'), ((19841, 19878), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', '"""CITIZEN"""'], {}), "(1048592, 'PN', 'CITIZEN')\n", (19852, 19878), False, 'from pydicom.dataelem import DataElement\n'), ((20320, 20338), 'pydicom.dcmread', 'dcmread', (['file_path'], {}), '(file_path)\n', (20327, 20338), False, 'from pydicom import dcmread, config\n'), ((20841, 20877), 'pydicom.valuerep.PersonName', 'PersonName', (['"""Mori^Ogai=森^鷗外=もり^おうがい"""'], {}), "('Mori^Ogai=森^鷗外=もり^おうがい')\n", (20851, 20877), False, 'from pydicom.valuerep import PersonName\n'), ((3779, 3812), 'pydicom.data.get_testdata_file', 'get_testdata_file', (['"""CT_small.dcm"""'], {}), "('CT_small.dcm')\n", (3796, 3812), False, 'from pydicom.data import get_charset_files, get_testdata_file\n'), ((4047, 4080), 'pydicom.data.get_testdata_file', 'get_testdata_file', (['"""CT_small.dcm"""'], {}), "('CT_small.dcm')\n", (4064, 4080), False, 'from pydicom.data import get_charset_files, get_testdata_file\n'), ((4175, 4278), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Unknown encoding \'Unsupported\' - using default encoding instead"""'}), '(UserWarning, match=\n "Unknown encoding \'Unsupported\' - using default encoding instead")\n', (4187, 4278), False, 'import pytest\n'), ((4567, 4600), 'pydicom.data.get_testdata_file', 'get_testdata_file', (['"""CT_small.dcm"""'], {}), "('CT_small.dcm')\n", (4584, 4600), False, 'from pydicom.data import get_charset_files, get_testdata_file\n'), ((4695, 4761), 'pytest.raises', 'pytest.raises', (['LookupError'], {'match': '"""Unknown encoding \'Unsupported\'"""'}), '(LookupError, match="Unknown encoding \'Unsupported\'")\n', (4708, 4761), False, 'import pytest\n'), ((4993, 5025), 'pydicom.data.get_charset_files', 'get_charset_files', (['"""chrRuss.dcm"""'], {}), "('chrRuss.dcm')\n", (5010, 5025), False, 'from pydicom.data import get_charset_files, get_testdata_file\n'), ((6373, 6462), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Failed to decode byte string with encoding \'UTF8\'"""'}), '(UserWarning, match=\n "Failed to decode byte string with encoding \'UTF8\'")\n', (6385, 6462), False, 'import pytest\n'), ((7066, 7110), 'pytest.raises', 'pytest.raises', (['UnicodeDecodeError'], {'match': 'msg'}), '(UnicodeDecodeError, match=msg)\n', (7079, 7110), False, 'import pytest\n'), ((7549, 7585), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': 'msg'}), '(UserWarning, match=msg)\n', (7561, 7585), False, 'import pytest\n'), ((7922, 8019), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Value \'GBK\' cannot be used as code extension, ignoring it"""'}), '(UserWarning, match=\n "Value \'GBK\' cannot be used as code extension, ignoring it")\n', (7934, 8019), False, 'import pytest\n'), ((9204, 9301), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Failed to decode byte string with encodings: iso2022_jp_2"""'}), "(UserWarning, match=\n 'Failed to decode byte string with encodings: iso2022_jp_2')\n", (9216, 9301), False, 'import pytest\n'), ((9930, 9974), 'pytest.raises', 'pytest.raises', (['UnicodeDecodeError'], {'match': 'msg'}), '(UnicodeDecodeError, match=msg)\n', (9943, 9974), False, 'import pytest\n'), ((10313, 10406), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Found unknown escape sequence in encoded string value"""'}), "(UserWarning, match=\n 'Found unknown escape sequence in encoded string value')\n", (10325, 10406), False, 'import pytest\n'), ((10911, 11004), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Found unknown escape sequence in encoded string value"""'}), "(ValueError, match=\n 'Found unknown escape sequence in encoded string value')\n", (10924, 11004), False, 'import pytest\n'), ((11599, 11723), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Incorrect value for Specific Character Set \'ISO IR 192\' - assuming \'ISO_IR 192\'"""'}), '(UserWarning, match=\n "Incorrect value for Specific Character Set \'ISO IR 192\' - assuming \'ISO_IR 192\'"\n )\n', (11611, 11723), False, 'import pytest\n'), ((11967, 12091), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Incorrect value for Specific Character Set \'ISO-IR 144\' - assuming \'ISO_IR 144\'"""'}), '(UserWarning, match=\n "Incorrect value for Specific Character Set \'ISO-IR 144\' - assuming \'ISO_IR 144\'"\n )\n', (11979, 12091), False, 'import pytest\n'), ((12487, 12588), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Unknown encoding \'ISOIR 192\' - using default encoding instead"""'}), '(UserWarning, match=\n "Unknown encoding \'ISOIR 192\' - using default encoding instead")\n', (12499, 12588), False, 'import pytest\n'), ((13506, 13640), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Incorrect value for Specific Character Set \'ISO_2022-IR 100\' - assuming \'ISO 2022 IR 100\'"""'}), '(UserWarning, match=\n "Incorrect value for Specific Character Set \'ISO_2022-IR 100\' - assuming \'ISO 2022 IR 100\'"\n )\n', (13518, 13640), False, 'import pytest\n'), ((13747, 13834), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Dionysios=\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN',\n b'Dionysios=\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (13758, 13834), False, 'from pydicom.dataelem import DataElement\n'), ((14095, 14231), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Incorrect value for Specific Character Set \'ISO_2022_IR\\\\+126\' - assuming \'ISO 2022 IR 126\'"""'}), '(UserWarning, match=\n "Incorrect value for Specific Character Set \'ISO_2022_IR\\\\+126\' - assuming \'ISO 2022 IR 126\'"\n )\n', (14107, 14231), False, 'import pytest\n'), ((14340, 14427), 'pydicom.dataelem.DataElement', 'DataElement', (['(1048592)', '"""PN"""', "b'Dionysios=\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2'"], {}), "(1048592, 'PN',\n b'Dionysios=\\x1b-F\\xc4\\xe9\\xef\\xed\\xf5\\xf3\\xe9\\xef\\xf2')\n", (14351, 14427), False, 'from pydicom.dataelem import DataElement\n'), ((17848, 17884), 'pydicom.data.get_charset_files', 'get_charset_files', (["(filename + '.dcm')"], {}), "(filename + '.dcm')\n", (17865, 17884), False, 'from pydicom.data import get_charset_files, get_testdata_file\n'), ((18528, 18542), 'pydicom.filebase.DicomBytesIO', 'DicomBytesIO', ([], {}), '()\n', (18540, 18542), False, 'from pydicom.filebase import DicomBytesIO\n'), ((18714, 18725), 'pydicom.dcmread', 'dcmread', (['fp'], {}), '(fp)\n', (18721, 18725), False, 'from pydicom import dcmread, config\n'), ((18877, 18914), 'pydicom.data.get_charset_files', 'get_charset_files', (['"""chrFrenMulti.dcm"""'], {}), "('chrFrenMulti.dcm')\n", (18894, 18914), False, 'from pydicom.data import get_charset_files, get_testdata_file\n'), ((19481, 19588), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Unknown encoding \'ISO 2022 IR 146\' - using default encoding instead"""'}), '(UserWarning, match=\n "Unknown encoding \'ISO 2022 IR 146\' - using default encoding instead")\n', (19493, 19588), False, 'import pytest\n'), ((19895, 19965), 'pytest.raises', 'pytest.raises', (['LookupError'], {'match': '"""Unknown encoding \'ISO 2022 IR 146\'"""'}), '(LookupError, match="Unknown encoding \'ISO 2022 IR 146\'")\n', (19908, 19965), False, 'import pytest\n'), ((20272, 20303), 'pydicom.data.get_charset_files', 'get_charset_files', (['"""chrH32.dcm"""'], {}), "('chrH32.dcm')\n", (20289, 20303), False, 'from pydicom.data import get_charset_files, get_testdata_file\n'), ((20543, 20557), 'pydicom.filebase.DicomBytesIO', 'DicomBytesIO', ([], {}), '()\n', (20555, 20557), False, 'from pydicom.filebase import DicomBytesIO\n'), ((20733, 20744), 'pydicom.dcmread', 'dcmread', (['fp'], {}), '(fp)\n', (20740, 20744), False, 'from pydicom import dcmread, config\n'), ((22058, 22200), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Failed to encode value with encodings: shift_jis - using replacement characters in encoded string"""'}), "(UserWarning, match=\n 'Failed to encode value with encodings: shift_jis - using replacement characters in encoded string'\n )\n", (22070, 22200), False, 'import pytest\n'), ((2669, 2707), 'pydicom.data.get_charset_files', 'get_charset_files', (['"""chrSQEncoding.dcm"""'], {}), "('chrSQEncoding.dcm')\n", (2686, 2707), False, 'from pydicom.data import get_charset_files, get_testdata_file\n'), ((3301, 3340), 'pydicom.data.get_charset_files', 'get_charset_files', (['"""chrSQEncoding1.dcm"""'], {}), "('chrSQEncoding1.dcm')\n", (3318, 3340), False, 'from pydicom.data import get_charset_files, get_testdata_file\n')]
|
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
# <NAME>
from abc import ABCMeta, abstractmethod
import m5
from m5.objects import *
from m5.proxy import *
m5.util.addToPath('../configs/common')
import FSConfig
from Caches import *
_have_kvm_support = 'BaseKvmCPU' in globals()
class BaseSystem(object):
"""Base system builder.
This class provides some basic functionality for creating an ARM
system with the usual peripherals (caches, GIC, etc.). It allows
customization by defining separate methods for different parts of
the initialization process.
"""
__metaclass__ = ABCMeta
def __init__(self, mem_mode='timing', mem_class=SimpleMemory,
cpu_class=TimingSimpleCPU, num_cpus=1, num_threads=1,
checker=False,
mem_size=None):
"""Initialize a simple base system.
Keyword Arguments:
mem_mode -- String describing the memory mode (timing or atomic)
mem_class -- Memory controller class to use
cpu_class -- CPU class to use
num_cpus -- Number of CPUs to instantiate
checker -- Set to True to add checker CPUs
mem_size -- Override the default memory size
"""
self.mem_mode = mem_mode
self.mem_class = mem_class
self.cpu_class = cpu_class
self.num_cpus = num_cpus
self.num_threads = num_threads
self.checker = checker
def create_cpus(self, cpu_clk_domain):
"""Return a list of CPU objects to add to a system."""
cpus = [ self.cpu_class(clk_domain=cpu_clk_domain,
numThreads=self.num_threads,
cpu_id=i)
for i in range(self.num_cpus) ]
if self.checker:
for c in cpus:
c.addCheckerCpu()
return cpus
def create_caches_private(self, cpu):
"""Add private caches to a CPU.
Arguments:
cpu -- CPU instance to work on.
"""
cpu.addPrivateSplitL1Caches(L1_ICache(size='32kB', assoc=1),
L1_DCache(size='32kB', assoc=4))
def create_caches_shared(self, system):
"""Add shared caches to a system.
Arguments:
system -- System to work on.
Returns:
A bus that CPUs should use to connect to the shared cache.
"""
system.toL2Bus = L2XBar(clk_domain=system.cpu_clk_domain)
system.l2c = L2Cache(clk_domain=system.cpu_clk_domain,
size='4MB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.master
system.l2c.mem_side = system.membus.slave
return system.toL2Bus
def init_cpu(self, system, cpu, sha_bus):
"""Initialize a CPU.
Arguments:
system -- System to work on.
cpu -- CPU to initialize.
"""
if not cpu.switched_out:
self.create_caches_private(cpu)
cpu.createInterruptController()
cpu.connectAllPorts(sha_bus if sha_bus != None else system.membus,
system.membus)
def init_kvm(self, system):
"""Do KVM-specific system initialization.
Arguments:
system -- System to work on.
"""
system.vm = KvmVM()
def init_system(self, system):
"""Initialize a system.
Arguments:
system -- System to initialize.
"""
self.create_clk_src(system)
system.cpu = self.create_cpus(system.cpu_clk_domain)
if _have_kvm_support and \
any([isinstance(c, BaseKvmCPU) for c in system.cpu]):
self.init_kvm(system)
sha_bus = self.create_caches_shared(system)
for cpu in system.cpu:
self.init_cpu(system, cpu, sha_bus)
def create_clk_src(self,system):
# Create system clock domain. This provides clock value to every
# clocked object that lies beneath it unless explicitly overwritten
# by a different clock domain.
system.voltage_domain = VoltageDomain()
system.clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain =
system.voltage_domain)
# Create a seperate clock domain for components that should
# run at CPUs frequency
system.cpu_clk_domain = SrcClockDomain(clock = '2GHz',
voltage_domain =
system.voltage_domain)
@abstractmethod
def create_system(self):
"""Create an return an initialized system."""
pass
@abstractmethod
def create_root(self):
"""Create and return a simulation root using the system
defined by this class."""
pass
class BaseSESystem(BaseSystem):
"""Basic syscall-emulation builder."""
def __init__(self, **kwargs):
BaseSystem.__init__(self, **kwargs)
def init_system(self, system):
BaseSystem.init_system(self, system)
def create_system(self):
system = System(physmem = self.mem_class(),
membus = SystemXBar(),
mem_mode = self.mem_mode,
multi_thread = (self.num_threads > 1))
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
self.init_system(system)
return system
def create_root(self):
system = self.create_system()
m5.ticks.setGlobalFrequency('1THz')
return Root(full_system=False, system=system)
class BaseSESystemUniprocessor(BaseSESystem):
"""Basic syscall-emulation builder for uniprocessor systems.
Note: This class is only really needed to provide backwards
compatibility in existing test cases.
"""
def __init__(self, **kwargs):
BaseSESystem.__init__(self, **kwargs)
def create_caches_private(self, cpu):
# The atomic SE configurations do not use caches
if self.mem_mode == "timing":
# @todo We might want to revisit these rather enthusiastic L1 sizes
cpu.addTwoLevelCacheHierarchy(L1_ICache(size='128kB'),
L1_DCache(size='256kB'),
L2Cache(size='2MB'))
def create_caches_shared(self, system):
return None
class BaseFSSystem(BaseSystem):
"""Basic full system builder."""
def __init__(self, **kwargs):
BaseSystem.__init__(self, **kwargs)
def init_system(self, system):
BaseSystem.init_system(self, system)
# create the memory controllers and connect them, stick with
# the physmem name to avoid bumping all the reference stats
system.physmem = [self.mem_class(range = r)
for r in system.mem_ranges]
for i in xrange(len(system.physmem)):
system.physmem[i].port = system.membus.master
# create the iocache, which by default runs at the system clock
system.iocache = IOCache(addr_ranges=system.mem_ranges)
system.iocache.cpu_side = system.iobus.master
system.iocache.mem_side = system.membus.slave
def create_root(self):
system = self.create_system()
m5.ticks.setGlobalFrequency('1THz')
return Root(full_system=True, system=system)
class BaseFSSystemUniprocessor(BaseFSSystem):
"""Basic full system builder for uniprocessor systems.
Note: This class is only really needed to provide backwards
compatibility in existing test cases.
"""
def __init__(self, **kwargs):
BaseFSSystem.__init__(self, **kwargs)
def create_caches_private(self, cpu):
cpu.addTwoLevelCacheHierarchy(L1_ICache(size='32kB', assoc=1),
L1_DCache(size='32kB', assoc=4),
L2Cache(size='4MB', assoc=8))
def create_caches_shared(self, system):
return None
class BaseFSSwitcheroo(BaseFSSystem):
"""Uniprocessor system prepared for CPU switching"""
def __init__(self, cpu_classes, **kwargs):
BaseFSSystem.__init__(self, **kwargs)
self.cpu_classes = tuple(cpu_classes)
def create_cpus(self, cpu_clk_domain):
cpus = [ cclass(clk_domain = cpu_clk_domain,
cpu_id=0,
switched_out=True)
for cclass in self.cpu_classes ]
cpus[0].switched_out = False
return cpus
|
[
"m5.ticks.setGlobalFrequency",
"m5.util.addToPath"
] |
[((2211, 2249), 'm5.util.addToPath', 'm5.util.addToPath', (['"""../configs/common"""'], {}), "('../configs/common')\n", (2228, 2249), False, 'import m5\n'), ((7628, 7663), 'm5.ticks.setGlobalFrequency', 'm5.ticks.setGlobalFrequency', (['"""1THz"""'], {}), "('1THz')\n", (7655, 7663), False, 'import m5\n'), ((9403, 9438), 'm5.ticks.setGlobalFrequency', 'm5.ticks.setGlobalFrequency', (['"""1THz"""'], {}), "('1THz')\n", (9430, 9438), False, 'import m5\n')]
|
import os
from pathlib import Path
import logging
import configparser
from . import settings
CONFIG_FILE = '/etc/margot'
def init():
# logging
logger = logging.getLogger('margot config') # unless worker
# load config
cfg = configparser.ConfigParser()
logger.debug('loading config from {}'.format(CONFIG_FILE))
cfg.read(CONFIG_FILE)
if len(cfg.sections()):
for section in cfg.sections():
setattr(settings, section, dict())
for key, val in cfg.items(section):
getattr(settings, section)[key] = val
else:
logger.warning('config not found at: {}.'.format(CONFIG_FILE))
return
# find home folder
base_folder = settings.paths.get('base_folder')
home = Path.home().joinpath(base_folder)
logger.debug('using margot_home {}'.format(home))
settings.paths = {
"home": home,
"algo_folder": home.joinpath('algos'),
"log_folder": home.joinpath('logs'),
"journal_folder": home.joinpath('journal'),
"venv_folder": home.joinpath('venvs'),
"cache": home.joinpath('cache')
}
# create directory structure if it doesn't already exist.
for folder in settings.paths.values():
if not folder.exists():
logger.info('creating new directory {}'.format(folder))
folder.mkdir()
# look for algos
algo_files = list(settings.paths['algo_folder'].glob('*.cfg'))
logger.info('found {} cfg files in {}'.format(
len(algo_files),
settings.paths['algo_folder']))
# create a dict to hold the algo configs (ConfigParser objects)
settings.algos = dict()
for algo_file in algo_files:
logger.debug('parsing {}'.format(algo_file))
algo_config = configparser.ConfigParser()
algo_config.read(algo_file)
algo_name = algo_config.get('python', 'file')
settings.algos[algo_name] = dict()
if len(algo_config.sections()):
for section in algo_config.sections():
settings.algos[algo_name][section] = dict()
for key, val in algo_config.items(section):
settings.algos[algo_name][section][key] = val
else:
logger.warning('config not found at: {}.'.format(CONFIG_FILE))
return
# lets collect up the os environment variables here too.
if not hasattr(settings, 'env'):
settings.env = dict()
for key in os.environ:
settings.env[key] = os.environ[key]
if not settings.INITED:
init()
|
[
"configparser.ConfigParser",
"pathlib.Path.home",
"logging.getLogger"
] |
[((164, 198), 'logging.getLogger', 'logging.getLogger', (['"""margot config"""'], {}), "('margot config')\n", (181, 198), False, 'import logging\n'), ((245, 272), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (270, 272), False, 'import configparser\n'), ((1777, 1804), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1802, 1804), False, 'import configparser\n'), ((762, 773), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (771, 773), False, 'from pathlib import Path\n')]
|
import ruamel.yaml as yaml
import re
import json
class TestConfigAPI():
def test_config_api_user_role_error(self, hge_ctx):
admin_secret = hge_ctx.hge_key
auth_hook = hge_ctx.hge_webhook
jwt_conf = hge_ctx.hge_jwt_conf
if jwt_conf is not None:
jwt_conf_dict = json.loads(hge_ctx.hge_jwt_conf)
headers = { 'x-hasura-role': 'user' }
if admin_secret is not None:
headers['x-hasura-admin-secret'] = admin_secret
resp = hge_ctx.http.get(hge_ctx.hge_url + '/v1alpha1/config', headers=headers)
assert resp.status_code == 400, resp
def test_config_api(self, hge_ctx):
admin_secret = hge_ctx.hge_key
auth_hook = hge_ctx.hge_webhook
jwt_conf = hge_ctx.hge_jwt_conf
if jwt_conf is not None:
jwt_conf_dict = json.loads(hge_ctx.hge_jwt_conf)
headers = { 'x-hasura-role': 'admin' }
if admin_secret is not None:
headers['x-hasura-admin-secret'] = admin_secret
resp = hge_ctx.http.get(hge_ctx.hge_url + '/v1alpha1/config', headers=headers)
assert resp.status_code == 200, resp
body = resp.json()
# The tree may be dirty because we're developing tests locally while
# graphql-engine was built previously when tree was clean. If we're
# modifying graphql-engine too then both of these will be tagged dirty,
# since a rebuild would necessarily be forced:
assert body['version'] in (hge_ctx.version, re.sub('-dirty$', '', hge_ctx.version))
assert body['is_admin_secret_set'] == (admin_secret is not None)
assert body['is_auth_hook_set'] == (auth_hook is not None)
assert body['is_jwt_set'] == (jwt_conf is not None)
if jwt_conf is not None:
claims_format = "json"
if 'claims_namespace_path' in jwt_conf_dict:
assert body['jwt']['claims_namespace_path'] == jwt_conf_dict['claims_namespace_path']
assert body['jwt']['claims_format'] == claims_format
else:
claims_namespace = "https://hasura.io/jwt/claims"
if 'claims_namespace' in jwt_conf_dict:
claims_namespace = jwt_conf_dict['claims_namespace']
if 'claims_format' in jwt_conf_dict:
claims_format = jwt_conf_dict['claims_format']
assert body['jwt']['claims_namespace'] == claims_namespace
assert body['jwt']['claims_format'] == claims_format
else:
assert body['jwt'] == []
# test if the request fails without auth headers if admin secret is set
if admin_secret is not None:
resp = hge_ctx.http.get(hge_ctx.hge_url + '/v1alpha1/config')
body = resp.json()
assert ((resp.status_code == 401) or (resp.status_code == 400))
|
[
"re.sub",
"json.loads"
] |
[((309, 341), 'json.loads', 'json.loads', (['hge_ctx.hge_jwt_conf'], {}), '(hge_ctx.hge_jwt_conf)\n', (319, 341), False, 'import json\n'), ((841, 873), 'json.loads', 'json.loads', (['hge_ctx.hge_jwt_conf'], {}), '(hge_ctx.hge_jwt_conf)\n', (851, 873), False, 'import json\n'), ((1521, 1559), 're.sub', 're.sub', (['"""-dirty$"""', '""""""', 'hge_ctx.version'], {}), "('-dirty$', '', hge_ctx.version)\n", (1527, 1559), False, 'import re\n')]
|
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
USE_TZ = True
SITE_ID = 1
SECRET_KEY = 'secret_key_for_django'
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
USE_I18N = True
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
('de', 'German'),
)
ROOT_URLCONF = 'multilingual_news.tests.urls'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(os.path.dirname(__file__), '../../static/')
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '../../media/')
STATICFILES_DIRS = (
os.path.join(os.path.dirname(__file__), 'test_static'),
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(os.path.dirname(__file__), '../templates')],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'sekizai.context_processors.sekizai',
)
}
}]
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(
os.path.dirname(__file__), 'coverage')
COVERAGE_MODULE_EXCLUDES = [
'__init__$', 'tests$', 'settings$', 'urls$', 'locale$',
'south_migrations', 'fixtures', 'admin$', 'django_extensions',
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
)
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'djangocms_text_ckeditor', # must come before cms
'cms',
'djangocms_link',
'django_libs',
'sekizai',
'menus',
'mptt',
'filer',
'easy_thumbnails',
'hvad',
'document_library',
'people',
'multilingual_tags',
'treebeard',
]
INTERNAL_APPS = [
'multilingual_news.tests.test_app',
'multilingual_news',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
CMS_SOFTROOT = True
CMS_PERMISSION = False
CMS_SEO_FIELDS = True
CMS_MENU_TITLE_OVERWRITE = True
CMS_FRONTEND_LANGUAGES = ('en', 'de', )
CMS_TEMPLATES = (
('base.html', 'Standard'),
)
# settings for localized_names
LONG_NAME_FORMAT = '{g} {L}, {f}'
LONG_NAME_FORMAT_NON_ROMAN = '{g} {x}, {a} ({L}, {f})'
|
[
"os.path.dirname"
] |
[((490, 515), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (505, 515), False, 'import os\n'), ((560, 585), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (575, 585), False, 'import os\n'), ((1302, 1327), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1317, 1327), False, 'import os\n'), ((642, 667), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (657, 667), False, 'import os\n'), ((817, 842), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (832, 842), False, 'import os\n')]
|
import requests
url = "http://mockbin.com/har"
payload = { "foo": "bar" }
headers = { "content-type": "application/json" }
response = requests.post(url, json=payload, headers=headers)
print(response.text)
|
[
"requests.post"
] |
[((137, 186), 'requests.post', 'requests.post', (['url'], {'json': 'payload', 'headers': 'headers'}), '(url, json=payload, headers=headers)\n', (150, 186), False, 'import requests\n')]
|
# Copyright 2020 NXP Semiconductors
# Copyright 2020 NXP Semiconductors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Tests for simulation module
'''
import unittest
from unittest.mock import Mock, call
from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim
class SomeObj:
pass
class EventForwarder:
''' This represents basic forwarder which forwards events from time_process into a waiting
process. Some kind of a DSProducer which produces data into one exact running process
'''
def __init__(self, test_unit_running_sim, receiving_process):
self.testunit = test_unit_running_sim
self.receiving_process = receiving_process
def signal(self, **data):
self.testunit.sim.signal(self.receiving_process, **data)
class TestDSSchedulable(unittest.TestCase):
@DSSchedulable
def __fcn(self):
return 'Success'
@DSSchedulable
def __process(self):
yield 'First return'
return 'Success'
def test0_fcn(self):
process = self.__fcn()
try:
next(process)
except StopIteration as e:
retval = e.value
self.assertEqual(retval, 'Success')
def test1_generator(self):
process = self.__process()
retval = next(process)
self.assertEqual(retval, 'First return')
try:
next(process)
except StopIteration as e:
retval = e.value
self.assertEqual(retval, 'Success')
class TestSim(unittest.TestCase):
''' Test the time queue class behavior '''
def __my_time_process(self):
self.__time_process_event('kick-on')
while True:
event = yield
if isinstance(event, DSAbortException):
self.__time_process_event(self.sim.time_queue.time, abort=True, **event.info)
break
else:
# note: we cannot use self.sim.time because we are not running simulation
# so self.sim.time is always 0
self.__time_process_event(self.sim.time_queue.time, **event)
def __my_wait_process(self):
event = yield from self.sim.wait(2)
self.__time_process_event(self.sim.time, None)
event = yield from self.sim.wait(cond=lambda e: 'data' in e)
self.__time_process_event(self.sim.time, **event)
event = yield from self.sim.wait(cond=lambda e: True)
self.__time_process_event(self.sim.time, **event)
def __my_handler(self):
return True
@DSSchedulable
def __my_schedulable_handler(self):
return True
def setUp(self):
self.__time_process_event = Mock()
def test0_simple_event(self):
''' Assert kicking and pushing events '''
self.sim = DSSimulation()
self.assertIsNotNone(sim.time_process)
sim.time_process = self.__my_time_process()
sim._kick(sim.time_process) # kick on the time process
self.__time_process_event.assert_called_once_with('kick-on')
self.__time_process_event.reset_mock()
sim.signal(sim.time_process, data=1)
self.__time_process_event.assert_called_once_with(0, data=1)
self.__time_process_event.reset_mock()
def test1_time_process(self):
''' Assert correct time process and pushing events to time process '''
sim = DSSimulation()
self.assertIsNotNone(sim.time_process)
p = SomeObj()
p.signal = Mock()
sim.signal(sim.time_process, producer=p, data=1)
p.signal.assert_called_once_with(producer=p, data=1)
p.signal.reset_mock()
def test2_scheduling_events(self):
''' Assert working with time queue when pushing events '''
sim = DSSimulation()
sim.time_queue.add_element = Mock()
event_obj = {'producer': None, 'data': 1}
sim.schedule_event(10, event_obj)
sim.time_queue.add_element.assert_called_once_with(10, (sim.time_process, event_obj))
sim.time_queue.add_element.reset_mock()
sim.schedule_event(0, event_obj)
sim.time_queue.add_element.assert_called_once_with(0, (sim.time_process, event_obj))
sim.time_queue.add_element.reset_mock()
with self.assertRaises(ValueError):
sim.schedule_event(-0.5, event_obj)
def test3_deleting_events(self):
''' Assert deleting from time queue when deleting events '''
sim = DSSimulation()
sim.time_queue.delete = Mock()
condition = lambda x: 'A' * x
sim.delete(condition)
sim.time_queue.delete.assert_called_once_with(condition)
sim.time_queue.delete.reset_mock()
def test4_scheduling(self):
''' Assert working with time queue when pushing events '''
self.sim = DSSimulation()
my_process = self.__my_time_process()
# schedule a process
with self.assertRaises(ValueError):
# negative time
self.sim.schedule(-0.5, my_process)
with self.assertRaises(ValueError):
# missing producer
self.sim.schedule(1, self.__my_handler())
parent_process = sim.schedule(0, my_process)
self.assertNotEqual(parent_process, my_process)
self.__time_process_event.assert_called_once_with('kick-on')
self.__time_process_event.reset_mock()
# schedule an event
with self.assertRaises(ValueError):
# negative time
self.sim.schedule_event(-0.5, {'producer': parent_process, 'data': 1})
with self.assertRaises(ValueError):
# missing producer
self.sim.schedule_event(1, {'data': 1})
self.sim.schedule_event(2, {'producer': parent_process, 'data': 1})
time, (process, event_obj) = self.sim.time_queue.pop()
self.assertEqual((time, process), (2, self.sim.time_process))
self.assertEqual(event_obj, {'producer': parent_process, 'data': 1})
retval = self.sim._signal_object(event_obj['producer'], event_obj)
self.__time_process_event.assert_called_once_with(2, producer=event_obj['producer'], data=1)
self.__time_process_event.reset_mock()
self.assertEqual(retval, True)
retval = self.sim.abort(parent_process, testing=-1)
self.__time_process_event.assert_called_once_with(2, abort=True, testing=-1)
self.__time_process_event.reset_mock()
self.assertEqual(retval, False)
def test5_scheduling(self):
''' Assert the delay of scheduled process '''
self.sim = DSSimulation()
my_process = self.__my_time_process()
# schedule a process
with self.assertRaises(ValueError):
# scheduling with negative time delta
parent_process = self.sim.schedule(-0.5, my_process)
parent_process = self.sim.schedule(2, my_process)
self.assertEqual(len(self.sim.time_queue), 1)
def test6_schedulable_fcn(self):
self.sim = DSSimulation()
# The following has to pass without raising an error
self.sim._kick(self.__my_schedulable_handler())
def test7_run_infinite_process(self):
''' Assert event loop behavior '''
self.sim = DSSimulation()
producer = SomeObj()
producer.signal = Mock()
self.sim.schedule_event(1, {'producer': producer, 'data': 1})
self.sim.schedule_event(2, {'producer': producer, 'data': 2})
self.sim.schedule_event(3, {'producer': producer, 'data': 3})
num_events = self.sim.run()
self.assertEqual(num_events, 3)
calls = [call(producer=producer, data=1), call(producer=producer, data=2), call(producer=producer, data=3),]
producer.signal.assert_has_calls(calls)
producer.signal.reset_mock()
num_events = len(self.sim.time_queue)
self.assertEqual(num_events, 0)
def test8_run_finite_process(self):
self.sim = DSSimulation()
producer = SomeObj()
producer.signal = Mock()
self.sim.schedule_event(1, {'producer': producer, 'data': 1})
self.sim.schedule_event(2, {'producer': producer, 'data': 2})
self.sim.schedule_event(3, {'producer': producer, 'data': 3})
num_events = self.sim.run(2.5)
self.assertEqual(num_events, 2)
calls = [call(producer=producer, data=1), call(producer=producer, data=2),]
producer.signal.assert_has_calls(calls)
producer.signal.reset_mock()
num_events = len(self.sim.time_queue)
self.assertEqual(num_events, 1)
def test9_waiting(self):
self.sim = DSSimulation()
# the following process will create events for the time queue process
process = self.__my_wait_process()
# those events are required to contain a producer
producer = EventForwarder(self, process)
self.sim.parent_process = process
self.sim._kick(process)
self.sim.schedule_event(1, {'producer': producer, 'data': 1})
self.sim.schedule_event(2, {'producer': producer, 'data': 2})
self.sim.schedule_event(3, {'producer': producer, 'data': 3})
num_events = self.sim.run(5)
self.assertEqual(num_events, 4)
# first event is dropped, because though it was taken by the time_process, the process condition was
# to wait till timeout
calls = [
call(2, None), # timeout logged
call(2, producer=producer, data=2), # real event logged
call(3, producer=producer, data=3), # real event logged after time
]
self.__time_process_event.assert_has_calls(calls)
|
[
"dssim.simulation.sim.time_queue.delete.assert_called_once_with",
"dssim.simulation.sim.signal",
"dssim.simulation.sim.time_queue.add_element.assert_called_once_with",
"unittest.mock.Mock",
"dssim.simulation.sim.schedule_event",
"dssim.simulation.sim.time_queue.delete.reset_mock",
"dssim.simulation.sim.delete",
"dssim.simulation.sim.schedule",
"dssim.simulation.sim.time_queue.add_element.reset_mock",
"dssim.simulation.DSSimulation",
"dssim.simulation.sim._kick",
"unittest.mock.call"
] |
[((3168, 3174), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3172, 3174), False, 'from unittest.mock import Mock, call\n'), ((3279, 3293), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (3291, 3293), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((3401, 3428), 'dssim.simulation.sim._kick', 'sim._kick', (['sim.time_process'], {}), '(sim.time_process)\n', (3410, 3428), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((3581, 3617), 'dssim.simulation.sim.signal', 'sim.signal', (['sim.time_process'], {'data': '(1)'}), '(sim.time_process, data=1)\n', (3591, 3617), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((3862, 3876), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (3874, 3876), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((3965, 3971), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3969, 3971), False, 'from unittest.mock import Mock, call\n'), ((3980, 4028), 'dssim.simulation.sim.signal', 'sim.signal', (['sim.time_process'], {'producer': 'p', 'data': '(1)'}), '(sim.time_process, producer=p, data=1)\n', (3990, 4028), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4241, 4255), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (4253, 4255), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4293, 4299), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (4297, 4299), False, 'from unittest.mock import Mock, call\n'), ((4358, 4391), 'dssim.simulation.sim.schedule_event', 'sim.schedule_event', (['(10)', 'event_obj'], {}), '(10, event_obj)\n', (4376, 4391), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4400, 4489), 'dssim.simulation.sim.time_queue.add_element.assert_called_once_with', 'sim.time_queue.add_element.assert_called_once_with', (['(10)', '(sim.time_process, event_obj)'], {}), '(10, (sim.time_process,\n event_obj))\n', (4450, 4489), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4494, 4533), 'dssim.simulation.sim.time_queue.add_element.reset_mock', 'sim.time_queue.add_element.reset_mock', ([], {}), '()\n', (4531, 4533), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4542, 4574), 'dssim.simulation.sim.schedule_event', 'sim.schedule_event', (['(0)', 'event_obj'], {}), '(0, event_obj)\n', (4560, 4574), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4583, 4671), 'dssim.simulation.sim.time_queue.add_element.assert_called_once_with', 'sim.time_queue.add_element.assert_called_once_with', (['(0)', '(sim.time_process, event_obj)'], {}), '(0, (sim.time_process,\n event_obj))\n', (4633, 4671), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4676, 4715), 'dssim.simulation.sim.time_queue.add_element.reset_mock', 'sim.time_queue.add_element.reset_mock', ([], {}), '()\n', (4713, 4715), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4929, 4943), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (4941, 4943), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4976, 4982), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (4980, 4982), False, 'from unittest.mock import Mock, call\n'), ((5029, 5050), 'dssim.simulation.sim.delete', 'sim.delete', (['condition'], {}), '(condition)\n', (5039, 5050), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((5059, 5115), 'dssim.simulation.sim.time_queue.delete.assert_called_once_with', 'sim.time_queue.delete.assert_called_once_with', (['condition'], {}), '(condition)\n', (5104, 5115), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((5124, 5158), 'dssim.simulation.sim.time_queue.delete.reset_mock', 'sim.time_queue.delete.reset_mock', ([], {}), '()\n', (5156, 5158), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((5278, 5292), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (5290, 5292), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((5643, 5670), 'dssim.simulation.sim.schedule', 'sim.schedule', (['(0)', 'my_process'], {}), '(0, my_process)\n', (5655, 5670), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((7041, 7055), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (7053, 7055), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((7459, 7473), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (7471, 7473), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((7696, 7710), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (7708, 7710), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((7766, 7772), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (7770, 7772), False, 'from unittest.mock import Mock, call\n'), ((8407, 8421), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (8419, 8421), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((8477, 8483), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (8481, 8483), False, 'from unittest.mock import Mock, call\n'), ((9077, 9091), 'dssim.simulation.DSSimulation', 'DSSimulation', ([], {}), '()\n', (9089, 9091), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((4772, 4807), 'dssim.simulation.sim.schedule_event', 'sim.schedule_event', (['(-0.5)', 'event_obj'], {}), '(-0.5, event_obj)\n', (4790, 4807), False, 'from dssim.simulation import DSSimulation, DSAbortException, DSSchedulable, sim\n'), ((8076, 8107), 'unittest.mock.call', 'call', ([], {'producer': 'producer', 'data': '(1)'}), '(producer=producer, data=1)\n', (8080, 8107), False, 'from unittest.mock import Mock, call\n'), ((8109, 8140), 'unittest.mock.call', 'call', ([], {'producer': 'producer', 'data': '(2)'}), '(producer=producer, data=2)\n', (8113, 8140), False, 'from unittest.mock import Mock, call\n'), ((8142, 8173), 'unittest.mock.call', 'call', ([], {'producer': 'producer', 'data': '(3)'}), '(producer=producer, data=3)\n', (8146, 8173), False, 'from unittest.mock import Mock, call\n'), ((8790, 8821), 'unittest.mock.call', 'call', ([], {'producer': 'producer', 'data': '(1)'}), '(producer=producer, data=1)\n', (8794, 8821), False, 'from unittest.mock import Mock, call\n'), ((8823, 8854), 'unittest.mock.call', 'call', ([], {'producer': 'producer', 'data': '(2)'}), '(producer=producer, data=2)\n', (8827, 8854), False, 'from unittest.mock import Mock, call\n'), ((9851, 9864), 'unittest.mock.call', 'call', (['(2)', 'None'], {}), '(2, None)\n', (9855, 9864), False, 'from unittest.mock import Mock, call\n'), ((9896, 9930), 'unittest.mock.call', 'call', (['(2)'], {'producer': 'producer', 'data': '(2)'}), '(2, producer=producer, data=2)\n', (9900, 9930), False, 'from unittest.mock import Mock, call\n'), ((9965, 9999), 'unittest.mock.call', 'call', (['(3)'], {'producer': 'producer', 'data': '(3)'}), '(3, producer=producer, data=3)\n', (9969, 9999), False, 'from unittest.mock import Mock, call\n')]
|
import os
from setuptools import setup, find_packages
BASEDIR = os.path.dirname(os.path.abspath(__file__))
VERSION = open(os.path.join(BASEDIR, 'VERSION')).read().strip()
# Dependencies (format is 'PYPI_PACKAGE_NAME[>]=VERSION_NUMBER')
BASE_DEPENDENCIES = [
'pandas>=1.3',
'numpy>=1.19'
]
# TEST_DEPENDENCIES = [
# ]
# DEVELOPMENT_DEPENDENCIES = [
# ]
# LOCAL_DEPENDENCIES = [
# ]
# Allow setup.py to be run from any path
os.chdir(os.path.normpath(BASEDIR))
setup(
name='wf-wida-utils',
packages=find_packages(),
version=VERSION,
include_package_data=True,
description='Tools for working with WIDA assessments data',
long_description=open('README.md').read(),
url='https://github.com/WildflowerSchools/wf-wida-utils',
author='<NAME>',
author_email='<EMAIL>',
install_requires=BASE_DEPENDENCIES,
# tests_require=TEST_DEPENDENCIES,
# extras_require = {
# 'test': TEST_DEPENDENCIES,
# 'development': DEVELOPMENT_DEPENDENCIES,
# 'local': LOCAL_DEPENDENCIES
# },
# entry_points={
# "console_scripts": [
# "COMMAND_NAME = MODULE_PATH:METHOD_NAME"
# ]
# },
keywords=['database'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
[
"os.path.abspath",
"os.path.join",
"os.path.normpath",
"setuptools.find_packages"
] |
[((81, 106), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (96, 106), False, 'import os\n'), ((445, 470), 'os.path.normpath', 'os.path.normpath', (['BASEDIR'], {}), '(BASEDIR)\n', (461, 470), False, 'import os\n'), ((519, 534), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (532, 534), False, 'from setuptools import setup, find_packages\n'), ((123, 155), 'os.path.join', 'os.path.join', (['BASEDIR', '"""VERSION"""'], {}), "(BASEDIR, 'VERSION')\n", (135, 155), False, 'import os\n')]
|
"""
Copyright (c) 2019 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
import torch.nn as nn
import numpy as np
def get_wav(in_channels, pool=True):
"""wavelet decomposition using conv2d"""
harr_wav_L = 1 / np.sqrt(2) * np.ones((1, 2))
harr_wav_H = 1 / np.sqrt(2) * np.ones((1, 2))
harr_wav_H[0, 0] = -1 * harr_wav_H[0, 0]
harr_wav_LL = np.transpose(harr_wav_L) * harr_wav_L
harr_wav_LH = np.transpose(harr_wav_L) * harr_wav_H
harr_wav_HL = np.transpose(harr_wav_H) * harr_wav_L
harr_wav_HH = np.transpose(harr_wav_H) * harr_wav_H
filter_LL = torch.from_numpy(harr_wav_LL).unsqueeze(0)
filter_LH = torch.from_numpy(harr_wav_LH).unsqueeze(0)
filter_HL = torch.from_numpy(harr_wav_HL).unsqueeze(0)
filter_HH = torch.from_numpy(harr_wav_HH).unsqueeze(0)
if pool:
net = nn.Conv2d
else:
net = nn.ConvTranspose2d
LL = net(in_channels, in_channels,
kernel_size=2, stride=2, padding=0, bias=False,
groups=in_channels)
LH = net(in_channels, in_channels,
kernel_size=2, stride=2, padding=0, bias=False,
groups=in_channels)
HL = net(in_channels, in_channels,
kernel_size=2, stride=2, padding=0, bias=False,
groups=in_channels)
HH = net(in_channels, in_channels,
kernel_size=2, stride=2, padding=0, bias=False,
groups=in_channels)
LL.weight.requires_grad = False
LH.weight.requires_grad = False
HL.weight.requires_grad = False
HH.weight.requires_grad = False
LL.weight.data = filter_LL.float().unsqueeze(0).expand(in_channels, -1, -1, -1).clone()
LH.weight.data = filter_LH.float().unsqueeze(0).expand(in_channels, -1, -1, -1).clone()
HL.weight.data = filter_HL.float().unsqueeze(0).expand(in_channels, -1, -1, -1).clone()
HH.weight.data = filter_HH.float().unsqueeze(0).expand(in_channels, -1, -1, -1).clone()
return LL, LH, HL, HH
class WavePool(nn.Module):
def __init__(self, in_channels):
super(WavePool, self).__init__()
self.LL, self.LH, self.HL, self.HH = get_wav(in_channels)
def forward(self, x):
return self.LL(x), self.LH(x), self.HL(x), self.HH(x)
class WaveUnpool(nn.Module):
def __init__(self, in_channels, option_unpool='cat5'):
super(WaveUnpool, self).__init__()
self.in_channels = in_channels
self.option_unpool = option_unpool
self.LL, self.LH, self.HL, self.HH = get_wav(self.in_channels, pool=False)
def forward(self, LL, LH, HL, HH, original=None):
if self.option_unpool == 'sum':
return self.LL(LL) + self.LH(LH) + self.HL(HL) + self.HH(HH)
elif self.option_unpool == 'cat5' and original is not None:
return torch.cat([self.LL(LL), self.LH(LH), self.HL(HL), self.HH(HH), original], dim=1)
else:
raise NotImplementedError
class WaveEncoder(nn.Module):
def __init__(self, option_unpool):
super(WaveEncoder, self).__init__()
self.option_unpool = option_unpool
self.pad = nn.ReflectionPad2d(1)
self.relu = nn.ReLU(inplace=True)
self.conv0 = nn.Conv2d(3, 3, 1, 1, 0)
self.conv1_1 = nn.Conv2d(3, 64, 3, 1, 0)
self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 0)
self.pool1 = WavePool(64)
self.conv2_1 = nn.Conv2d(64, 128, 3, 1, 0)
self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 0)
self.pool2 = WavePool(128)
self.conv3_1 = nn.Conv2d(128, 256, 3, 1, 0)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 0)
self.pool3 = WavePool(256)
self.conv4_1 = nn.Conv2d(256, 512, 3, 1, 0)
def forward(self, x):
skips = {}
for level in [1, 2, 3, 4]:
x = self.encode(x, skips, level)
return x
def encode(self, x, skips, level):
assert level in {1, 2, 3, 4}
if self.option_unpool == 'sum':
if level == 1:
out = self.conv0(x)
out = self.relu(self.conv1_1(self.pad(out)))
out = self.relu(self.conv1_2(self.pad(out)))
skips['conv1_2'] = out
LL, LH, HL, HH = self.pool1(out)
skips['pool1'] = [LH, HL, HH]
return LL
elif level == 2:
out = self.relu(self.conv2_1(self.pad(x)))
out = self.relu(self.conv2_2(self.pad(out)))
skips['conv2_2'] = out
LL, LH, HL, HH = self.pool2(out)
skips['pool2'] = [LH, HL, HH]
return LL
elif level == 3:
out = self.relu(self.conv3_1(self.pad(x)))
out = self.relu(self.conv3_2(self.pad(out)))
out = self.relu(self.conv3_3(self.pad(out)))
out = self.relu(self.conv3_4(self.pad(out)))
skips['conv3_4'] = out
LL, LH, HL, HH = self.pool3(out)
skips['pool3'] = [LH, HL, HH]
return LL
else:
return self.relu(self.conv4_1(self.pad(x)))
elif self.option_unpool == 'cat5':
if level == 1:
out = self.conv0(x)
out = self.relu(self.conv1_1(self.pad(out)))
return out
elif level == 2:
out = self.relu(self.conv1_2(self.pad(x)))
skips['conv1_2'] = out
LL, LH, HL, HH = self.pool1(out)
skips['pool1'] = [LH, HL, HH]
out = self.relu(self.conv2_1(self.pad(LL)))
return out
elif level == 3:
out = self.relu(self.conv2_2(self.pad(x)))
skips['conv2_2'] = out
LL, LH, HL, HH = self.pool2(out)
skips['pool2'] = [LH, HL, HH]
out = self.relu(self.conv3_1(self.pad(LL)))
return out
else:
out = self.relu(self.conv3_2(self.pad(x)))
out = self.relu(self.conv3_3(self.pad(out)))
out = self.relu(self.conv3_4(self.pad(out)))
skips['conv3_4'] = out
LL, LH, HL, HH = self.pool3(out)
skips['pool3'] = [LH, HL, HH]
out = self.relu(self.conv4_1(self.pad(LL)))
return out
else:
raise NotImplementedError
class WaveDecoder(nn.Module):
def __init__(self, option_unpool):
super(WaveDecoder, self).__init__()
self.option_unpool = option_unpool
if option_unpool == 'sum':
multiply_in = 1
elif option_unpool == 'cat5':
multiply_in = 5
else:
raise NotImplementedError
self.pad = nn.ReflectionPad2d(1)
self.relu = nn.ReLU(inplace=True)
self.conv4_1 = nn.Conv2d(512, 256, 3, 1, 0)
self.recon_block3 = WaveUnpool(256, option_unpool)
if option_unpool == 'sum':
self.conv3_4 = nn.Conv2d(256*multiply_in, 256, 3, 1, 0)
else:
self.conv3_4_2 = nn.Conv2d(256*multiply_in, 256, 3, 1, 0)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_1 = nn.Conv2d(256, 128, 3, 1, 0)
self.recon_block2 = WaveUnpool(128, option_unpool)
if option_unpool == 'sum':
self.conv2_2 = nn.Conv2d(128*multiply_in, 128, 3, 1, 0)
else:
self.conv2_2_2 = nn.Conv2d(128*multiply_in, 128, 3, 1, 0)
self.conv2_1 = nn.Conv2d(128, 64, 3, 1, 0)
self.recon_block1 = WaveUnpool(64, option_unpool)
if option_unpool == 'sum':
self.conv1_2 = nn.Conv2d(64*multiply_in, 64, 3, 1, 0)
else:
self.conv1_2_2 = nn.Conv2d(64*multiply_in, 64, 3, 1, 0)
self.conv1_1 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, x, skips):
for level in [4, 3, 2, 1]:
x = self.decode(x, skips, level)
return x
def decode(self, x, skips, level):
assert level in {4, 3, 2, 1}
if level == 4:
out = self.relu(self.conv4_1(self.pad(x)))
LH, HL, HH = skips['pool3']
original = skips['conv3_4'] if 'conv3_4' in skips.keys() else None
out = self.recon_block3(out, LH, HL, HH, original)
_conv3_4 = self.conv3_4 if self.option_unpool == 'sum' else self.conv3_4_2
out = self.relu(_conv3_4(self.pad(out)))
out = self.relu(self.conv3_3(self.pad(out)))
return self.relu(self.conv3_2(self.pad(out)))
elif level == 3:
out = self.relu(self.conv3_1(self.pad(x)))
LH, HL, HH = skips['pool2']
original = skips['conv2_2'] if 'conv2_2' in skips.keys() else None
out = self.recon_block2(out, LH, HL, HH, original)
_conv2_2 = self.conv2_2 if self.option_unpool == 'sum' else self.conv2_2_2
return self.relu(_conv2_2(self.pad(out)))
elif level == 2:
out = self.relu(self.conv2_1(self.pad(x)))
LH, HL, HH = skips['pool1']
original = skips['conv1_2'] if 'conv1_2' in skips.keys() else None
out = self.recon_block1(out, LH, HL, HH, original)
_conv1_2 = self.conv1_2 if self.option_unpool == 'sum' else self.conv1_2_2
return self.relu(_conv1_2(self.pad(out)))
else:
return self.conv1_1(self.pad(x))
|
[
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.ReflectionPad2d",
"torch.nn.Conv2d",
"numpy.transpose",
"numpy.ones",
"numpy.sqrt"
] |
[((1237, 1252), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (1244, 1252), True, 'import numpy as np\n'), ((1287, 1302), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (1294, 1302), True, 'import numpy as np\n'), ((1367, 1391), 'numpy.transpose', 'np.transpose', (['harr_wav_L'], {}), '(harr_wav_L)\n', (1379, 1391), True, 'import numpy as np\n'), ((1423, 1447), 'numpy.transpose', 'np.transpose', (['harr_wav_L'], {}), '(harr_wav_L)\n', (1435, 1447), True, 'import numpy as np\n'), ((1479, 1503), 'numpy.transpose', 'np.transpose', (['harr_wav_H'], {}), '(harr_wav_H)\n', (1491, 1503), True, 'import numpy as np\n'), ((1535, 1559), 'numpy.transpose', 'np.transpose', (['harr_wav_H'], {}), '(harr_wav_H)\n', (1547, 1559), True, 'import numpy as np\n'), ((4091, 4112), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (4109, 4112), True, 'import torch.nn as nn\n'), ((4133, 4154), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4140, 4154), True, 'import torch.nn as nn\n'), ((4177, 4201), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)', '(1)', '(1)', '(0)'], {}), '(3, 3, 1, 1, 0)\n', (4186, 4201), True, 'import torch.nn as nn\n'), ((4225, 4250), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3)', '(1)', '(0)'], {}), '(3, 64, 3, 1, 0)\n', (4234, 4250), True, 'import torch.nn as nn\n'), ((4274, 4300), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)', '(1)', '(0)'], {}), '(64, 64, 3, 1, 0)\n', (4283, 4300), True, 'import torch.nn as nn\n'), ((4359, 4386), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)', '(1)', '(0)'], {}), '(64, 128, 3, 1, 0)\n', (4368, 4386), True, 'import torch.nn as nn\n'), ((4410, 4438), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(0)'], {}), '(128, 128, 3, 1, 0)\n', (4419, 4438), True, 'import torch.nn as nn\n'), ((4498, 4526), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)', '(1)', '(0)'], {}), '(128, 256, 3, 1, 0)\n', (4507, 4526), True, 'import torch.nn as nn\n'), ((4550, 4578), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (4559, 4578), True, 'import torch.nn as nn\n'), ((4602, 4630), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (4611, 4630), True, 'import torch.nn as nn\n'), ((4654, 4682), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (4663, 4682), True, 'import torch.nn as nn\n'), ((4742, 4770), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3)', '(1)', '(0)'], {}), '(256, 512, 3, 1, 0)\n', (4751, 4770), True, 'import torch.nn as nn\n'), ((7842, 7863), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (7860, 7863), True, 'import torch.nn as nn\n'), ((7884, 7905), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7891, 7905), True, 'import torch.nn as nn\n'), ((7929, 7957), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)', '(3)', '(1)', '(0)'], {}), '(512, 256, 3, 1, 0)\n', (7938, 7957), True, 'import torch.nn as nn\n'), ((8228, 8256), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (8237, 8256), True, 'import torch.nn as nn\n'), ((8280, 8308), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (8289, 8308), True, 'import torch.nn as nn\n'), ((8332, 8360), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(3)', '(1)', '(0)'], {}), '(256, 128, 3, 1, 0)\n', (8341, 8360), True, 'import torch.nn as nn\n'), ((8631, 8658), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3)', '(1)', '(0)'], {}), '(128, 64, 3, 1, 0)\n', (8640, 8658), True, 'import torch.nn as nn\n'), ((8924, 8949), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(3)', '(3)', '(1)', '(0)'], {}), '(64, 3, 3, 1, 0)\n', (8933, 8949), True, 'import torch.nn as nn\n'), ((1224, 1234), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1231, 1234), True, 'import numpy as np\n'), ((1274, 1284), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1281, 1284), True, 'import numpy as np\n'), ((1590, 1619), 'torch.from_numpy', 'torch.from_numpy', (['harr_wav_LL'], {}), '(harr_wav_LL)\n', (1606, 1619), False, 'import torch\n'), ((1649, 1678), 'torch.from_numpy', 'torch.from_numpy', (['harr_wav_LH'], {}), '(harr_wav_LH)\n', (1665, 1678), False, 'import torch\n'), ((1708, 1737), 'torch.from_numpy', 'torch.from_numpy', (['harr_wav_HL'], {}), '(harr_wav_HL)\n', (1724, 1737), False, 'import torch\n'), ((1767, 1796), 'torch.from_numpy', 'torch.from_numpy', (['harr_wav_HH'], {}), '(harr_wav_HH)\n', (1783, 1796), False, 'import torch\n'), ((8080, 8122), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * multiply_in)', '(256)', '(3)', '(1)', '(0)'], {}), '(256 * multiply_in, 256, 3, 1, 0)\n', (8089, 8122), True, 'import torch.nn as nn\n'), ((8164, 8206), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * multiply_in)', '(256)', '(3)', '(1)', '(0)'], {}), '(256 * multiply_in, 256, 3, 1, 0)\n', (8173, 8206), True, 'import torch.nn as nn\n'), ((8483, 8525), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * multiply_in)', '(128)', '(3)', '(1)', '(0)'], {}), '(128 * multiply_in, 128, 3, 1, 0)\n', (8492, 8525), True, 'import torch.nn as nn\n'), ((8567, 8609), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * multiply_in)', '(128)', '(3)', '(1)', '(0)'], {}), '(128 * multiply_in, 128, 3, 1, 0)\n', (8576, 8609), True, 'import torch.nn as nn\n'), ((8780, 8820), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * multiply_in)', '(64)', '(3)', '(1)', '(0)'], {}), '(64 * multiply_in, 64, 3, 1, 0)\n', (8789, 8820), True, 'import torch.nn as nn\n'), ((8862, 8902), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * multiply_in)', '(64)', '(3)', '(1)', '(0)'], {}), '(64 * multiply_in, 64, 3, 1, 0)\n', (8871, 8902), True, 'import torch.nn as nn\n')]
|
# Generated by Django 3.0.7 on 2021-02-16 12:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('payment_system', '0034_auto_20210216_0923'),
]
operations = [
migrations.AlterModelOptions(
name='customsubscriptionrequest',
options={'ordering': ['is_processed', '-created_at'], 'verbose_name': 'custom subscription request', 'verbose_name_plural': 'custom subscription requests'},
),
migrations.AlterModelOptions(
name='invitation',
options={'verbose_name': 'invitation', 'verbose_name_plural': 'invitations'},
),
]
|
[
"django.db.migrations.AlterModelOptions"
] |
[((234, 466), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""customsubscriptionrequest"""', 'options': "{'ordering': ['is_processed', '-created_at'], 'verbose_name':\n 'custom subscription request', 'verbose_name_plural':\n 'custom subscription requests'}"}), "(name='customsubscriptionrequest', options={\n 'ordering': ['is_processed', '-created_at'], 'verbose_name':\n 'custom subscription request', 'verbose_name_plural':\n 'custom subscription requests'})\n", (262, 466), False, 'from django.db import migrations\n'), ((498, 627), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""invitation"""', 'options': "{'verbose_name': 'invitation', 'verbose_name_plural': 'invitations'}"}), "(name='invitation', options={'verbose_name':\n 'invitation', 'verbose_name_plural': 'invitations'})\n", (526, 627), False, 'from django.db import migrations\n')]
|
#!/usr/bin/env python3
"""
Take the output of Cargo.toml and print target name.
"""
import sys
import json
def main(inp):
try:
o = json.loads(inp)
except:
print("Failed to interpret JSON", file=sys.stderr)
if 'targets' in o:
for target in o['targets']:
if 'kind' in target and (target['kind'] == 'bin' or 'bin' in target['kind']):
print(target['name'])
else:
print("No kind found")
if __name__=="__main__":
if len(sys.argv) < 2:
print("Usage: %s json"%sys.argv[0], file=sys.stderr)
sys.exit(1)
if len(sys.argv) == 2 and sys.argv[1] == '-':
inp = sys.stdin.read()
main(inp)
else:
main(' '.join(sys.argv[1:]))
|
[
"sys.stdin.read",
"json.loads",
"sys.exit"
] |
[((144, 159), 'json.loads', 'json.loads', (['inp'], {}), '(inp)\n', (154, 159), False, 'import json\n'), ((596, 607), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (604, 607), False, 'import sys\n'), ((672, 688), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (686, 688), False, 'import sys\n')]
|
import numpy
class ReverseProjection:
"""
I suppose that
the photo is xy plane having z=0.
coordinates of a point of laser plane having laser_position=0 are (p_1, p_2, p_3).
normal vector of laser plane is (n_1, n_2, n_3).
focus is in (width/2, heigth/2, -f).
"""
def __init__(self, laser_plane_normal, a_point_at_laser_plane, photo_size_px, up, focal_point_z):
self.laser_plane_normal = laser_plane_normal
self.a_point_at_laser_plane = a_point_at_laser_plane
self.photo_size = photo_size_px
# focal point
self.focal_point = numpy.array([photo_size_px[0]/2, photo_size_px[1]/2, focal_point_z])
self.normal_unit = laser_plane_normal / numpy.linalg.norm(laser_plane_normal)
self.up = up
# Normalized up vector
self.up_unit = up / numpy.linalg.norm(up)
# axis X is orthogonal to Y and Z
self.axis_x = numpy.cross(self.up_unit, self.normal_unit)
self.axis_x_unit = self.axis_x / numpy.linalg.norm(self.axis_x)
def get3D(self, photo_x, photo_y, laser_position):
"""
:param photo_x:
:param photo_y:
:param laser_position:
:return:
"""
laser_point = self.a_point_at_laser_plane + laser_position * self.normal_unit # is a point in laser plane
photo_point = numpy.array([photo_x, photo_y, 0])
line_direction = photo_point - self.focal_point
# line containing points photo_point and focus has equation focus + line_param * line_direction
# laser plane has equation normal.(x, y, z) - normal.laser_point = 0, so the intersection is defined by
line_param = (numpy.dot(self.normal_unit,laser_point) - numpy.dot(self.normal_unit, self.focal_point)) / (numpy.dot(self.normal_unit, line_direction))
coordinates = self.focal_point + line_param * line_direction
return coordinates
def coordinate_change(self, old_coordinates):
"""
:param old_coordinates:
:return:
"""
# from photo coordinates the cuboid coordinates are obtained using change of basis matrix
new_basis = numpy.transpose(numpy.array([self.axis_x, self.up, self.laser_plane_normal]))
print(new_basis)
new_basis_inversion = numpy.linalg.inv(new_basis)
print(new_basis_inversion)
new_coordinates = numpy.dot(new_basis_inversion, old_coordinates)
return new_coordinates
|
[
"numpy.cross",
"numpy.linalg.inv",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot"
] |
[((602, 674), 'numpy.array', 'numpy.array', (['[photo_size_px[0] / 2, photo_size_px[1] / 2, focal_point_z]'], {}), '([photo_size_px[0] / 2, photo_size_px[1] / 2, focal_point_z])\n', (613, 674), False, 'import numpy\n'), ((923, 966), 'numpy.cross', 'numpy.cross', (['self.up_unit', 'self.normal_unit'], {}), '(self.up_unit, self.normal_unit)\n', (934, 966), False, 'import numpy\n'), ((1352, 1386), 'numpy.array', 'numpy.array', (['[photo_x, photo_y, 0]'], {}), '([photo_x, photo_y, 0])\n', (1363, 1386), False, 'import numpy\n'), ((2291, 2318), 'numpy.linalg.inv', 'numpy.linalg.inv', (['new_basis'], {}), '(new_basis)\n', (2307, 2318), False, 'import numpy\n'), ((2380, 2427), 'numpy.dot', 'numpy.dot', (['new_basis_inversion', 'old_coordinates'], {}), '(new_basis_inversion, old_coordinates)\n', (2389, 2427), False, 'import numpy\n'), ((719, 756), 'numpy.linalg.norm', 'numpy.linalg.norm', (['laser_plane_normal'], {}), '(laser_plane_normal)\n', (736, 756), False, 'import numpy\n'), ((837, 858), 'numpy.linalg.norm', 'numpy.linalg.norm', (['up'], {}), '(up)\n', (854, 858), False, 'import numpy\n'), ((1008, 1038), 'numpy.linalg.norm', 'numpy.linalg.norm', (['self.axis_x'], {}), '(self.axis_x)\n', (1025, 1038), False, 'import numpy\n'), ((1773, 1816), 'numpy.dot', 'numpy.dot', (['self.normal_unit', 'line_direction'], {}), '(self.normal_unit, line_direction)\n', (1782, 1816), False, 'import numpy\n'), ((2174, 2234), 'numpy.array', 'numpy.array', (['[self.axis_x, self.up, self.laser_plane_normal]'], {}), '([self.axis_x, self.up, self.laser_plane_normal])\n', (2185, 2234), False, 'import numpy\n'), ((1681, 1721), 'numpy.dot', 'numpy.dot', (['self.normal_unit', 'laser_point'], {}), '(self.normal_unit, laser_point)\n', (1690, 1721), False, 'import numpy\n'), ((1723, 1768), 'numpy.dot', 'numpy.dot', (['self.normal_unit', 'self.focal_point'], {}), '(self.normal_unit, self.focal_point)\n', (1732, 1768), False, 'import numpy\n')]
|
#!/usr/bin/env python
from common import open_example_serial_interface
from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear
with open_example_serial_interface() as interface:
# Clear the entire screen, except status line.
interface.execute([LoadAddressCounterHi(0), LoadAddressCounterLo(80), LoadMask(0x00), Clear(0x00)])
status = interface.execute(ReadStatus())
print(status)
while status.busy:
status = interface.execute(ReadStatus())
print(status)
input('Press ENTER...')
# Write something...
interface.execute([LoadAddressCounterHi(0), LoadAddressCounterLo(80)])
interface.execute(WriteData(bytes.fromhex('a7 84 8b 8b 8e 33 00 96 8e 91 8b 83 19')))
input('Press ENTER...')
interface.execute([LoadAddressCounterHi(0), LoadAddressCounterLo(81), LoadMask(0xf0), Clear(0x30)])
status = interface.execute(ReadStatus())
print(status)
while status.busy:
status = interface.execute(ReadStatus())
print(status)
[hi, lo] = interface.execute([ReadAddressCounterHi(), ReadAddressCounterLo()])
print(f'hi = {hi}, lo = {lo}')
|
[
"coax.LoadAddressCounterHi",
"coax.LoadAddressCounterLo",
"coax.Clear",
"coax.ReadStatus",
"common.open_example_serial_interface",
"coax.ReadAddressCounterHi",
"coax.ReadAddressCounterLo",
"coax.LoadMask"
] |
[((223, 254), 'common.open_example_serial_interface', 'open_example_serial_interface', ([], {}), '()\n', (252, 254), False, 'from common import open_example_serial_interface\n'), ((456, 468), 'coax.ReadStatus', 'ReadStatus', ([], {}), '()\n', (466, 468), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((972, 984), 'coax.ReadStatus', 'ReadStatus', ([], {}), '()\n', (982, 984), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((343, 366), 'coax.LoadAddressCounterHi', 'LoadAddressCounterHi', (['(0)'], {}), '(0)\n', (363, 366), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((368, 392), 'coax.LoadAddressCounterLo', 'LoadAddressCounterLo', (['(80)'], {}), '(80)\n', (388, 392), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((394, 405), 'coax.LoadMask', 'LoadMask', (['(0)'], {}), '(0)\n', (402, 405), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((410, 418), 'coax.Clear', 'Clear', (['(0)'], {}), '(0)\n', (415, 418), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((548, 560), 'coax.ReadStatus', 'ReadStatus', ([], {}), '()\n', (558, 560), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((663, 686), 'coax.LoadAddressCounterHi', 'LoadAddressCounterHi', (['(0)'], {}), '(0)\n', (683, 686), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((688, 712), 'coax.LoadAddressCounterLo', 'LoadAddressCounterLo', (['(80)'], {}), '(80)\n', (708, 712), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((859, 882), 'coax.LoadAddressCounterHi', 'LoadAddressCounterHi', (['(0)'], {}), '(0)\n', (879, 882), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((884, 908), 'coax.LoadAddressCounterLo', 'LoadAddressCounterLo', (['(81)'], {}), '(81)\n', (904, 908), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((910, 923), 'coax.LoadMask', 'LoadMask', (['(240)'], {}), '(240)\n', (918, 923), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((926, 935), 'coax.Clear', 'Clear', (['(48)'], {}), '(48)\n', (931, 935), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((1064, 1076), 'coax.ReadStatus', 'ReadStatus', ([], {}), '()\n', (1074, 1076), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((1136, 1158), 'coax.ReadAddressCounterHi', 'ReadAddressCounterHi', ([], {}), '()\n', (1156, 1158), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n'), ((1160, 1182), 'coax.ReadAddressCounterLo', 'ReadAddressCounterLo', ([], {}), '()\n', (1180, 1182), False, 'from coax import ReadAddressCounterHi, ReadAddressCounterLo, ReadStatus, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, LoadMask, Clear\n')]
|
#!/usr/bin/env python
'''
Feature homography
==================
Example of using features2d framework for interactive video homography matching.
ORB features and FLANN matcher are used. The actual tracking is implemented by
PlaneTracker class in plane_tracker.py
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
# local modules
from tst_scene_render import TestSceneRender
def intersectionRate(s1, s2):
x1, y1, x2, y2 = s1
s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
area, intersection = cv2.intersectConvexConvex(s1, np.array(s2))
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(np.array(s2)))
from tests_common import NewOpenCVTests
class feature_homography_test(NewOpenCVTests):
render = None
tracker = None
framesCounter = 0
frame = None
def test_feature_homography(self):
self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'),
self.get_sample('samples/data/box.png'), noise = 0.5, speed = 0.5)
self.frame = self.render.getNextFrame()
self.tracker = PlaneTracker()
self.tracker.clear()
self.tracker.add_target(self.frame, self.render.getCurrentRect())
while self.framesCounter < 100:
self.framesCounter += 1
tracked = self.tracker.track(self.frame)
if len(tracked) > 0:
tracked = tracked[0]
self.assertGreater(intersectionRate(self.render.getCurrentRect(), np.int32(tracked.quad)), 0.6)
else:
self.assertEqual(0, 1, 'Tracking error')
self.frame = self.render.getNextFrame()
# built-in modules
from collections import namedtuple
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
MIN_MATCH_COUNT = 10
'''
image - image to track
rect - tracked rectangle (x1, y1, x2, y2)
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
'''
PlanarTarget = namedtuple('PlaneTarget', 'image, rect, keypoints, descrs, data')
'''
target - reference to PlanarTarget
p0 - matched points coords in target image
p1 - matched points coords in input frame
H - homography matrix from p0 to p1
quad - target bounary quad in input frame
'''
TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad')
class PlaneTracker:
def __init__(self):
self.detector = cv2.AKAZE_create(threshold = 0.003)
self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
self.targets = []
self.frame_points = []
def add_target(self, image, rect, data=None):
'''Add a new tracking target.'''
x0, y0, x1, y1 = rect
raw_points, raw_descrs = self.detect_features(image)
points, descs = [], []
for kp, desc in zip(raw_points, raw_descrs):
x, y = kp.pt
if x0 <= x <= x1 and y0 <= y <= y1:
points.append(kp)
descs.append(desc)
descs = np.uint8(descs)
self.matcher.add([descs])
target = PlanarTarget(image = image, rect=rect, keypoints = points, descrs=descs, data=data)
self.targets.append(target)
def clear(self):
'''Remove all targets'''
self.targets = []
self.matcher.clear()
def track(self, frame):
'''Returns a list of detected TrackedTarget objects'''
self.frame_points, frame_descrs = self.detect_features(frame)
if len(self.frame_points) < MIN_MATCH_COUNT:
return []
matches = self.matcher.knnMatch(frame_descrs, k = 2)
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
if len(matches) < MIN_MATCH_COUNT:
return []
matches_by_id = [[] for _ in xrange(len(self.targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < MIN_MATCH_COUNT:
continue
target = self.targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [self.frame_points[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < MIN_MATCH_COUNT:
continue
p0, p1 = p0[status], p1[status]
x0, y0, x1, y1 = target.rect
quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
tracked.append(track)
tracked.sort(key = lambda t: len(t.p0), reverse=True)
return tracked
def detect_features(self, frame):
'''detect_features(self, frame) -> keypoints, descrs'''
keypoints, descrs = self.detector.detectAndCompute(frame, None)
if descrs is None: # detectAndCompute returns descs=None if no keypoints found
descrs = []
return keypoints, descrs
|
[
"cv2.contourArea",
"numpy.uint8",
"numpy.float32",
"cv2.AKAZE_create",
"cv2.FlannBasedMatcher",
"numpy.array",
"collections.namedtuple",
"numpy.int32",
"cv2.findHomography"
] |
[((2274, 2339), 'collections.namedtuple', 'namedtuple', (['"""PlaneTarget"""', '"""image, rect, keypoints, descrs, data"""'], {}), "('PlaneTarget', 'image, rect, keypoints, descrs, data')\n", (2284, 2339), False, 'from collections import namedtuple\n'), ((2588, 2642), 'collections.namedtuple', 'namedtuple', (['"""TrackedTarget"""', '"""target, p0, p1, H, quad"""'], {}), "('TrackedTarget', 'target, p0, p1, H, quad')\n", (2598, 2642), False, 'from collections import namedtuple\n'), ((563, 613), 'numpy.array', 'np.array', (['[[x1, y1], [x2, y1], [x2, y2], [x1, y2]]'], {}), '([[x1, y1], [x2, y1], [x2, y2], [x1, y2]])\n', (571, 613), True, 'import numpy as np\n'), ((669, 681), 'numpy.array', 'np.array', (['s2'], {}), '(s2)\n', (677, 681), True, 'import numpy as np\n'), ((2712, 2745), 'cv2.AKAZE_create', 'cv2.AKAZE_create', ([], {'threshold': '(0.003)'}), '(threshold=0.003)\n', (2728, 2745), False, 'import cv2\n'), ((2771, 2810), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['flann_params', '{}'], {}), '(flann_params, {})\n', (2792, 2810), False, 'import cv2\n'), ((3334, 3349), 'numpy.uint8', 'np.uint8', (['descs'], {}), '(descs)\n', (3342, 3349), True, 'import numpy as np\n'), ((706, 725), 'cv2.contourArea', 'cv2.contourArea', (['s1'], {}), '(s1)\n', (721, 725), False, 'import cv2\n'), ((4578, 4598), 'numpy.float32', 'np.float32', (['(p0, p1)'], {}), '((p0, p1))\n', (4588, 4598), True, 'import numpy as np\n'), ((4623, 4666), 'cv2.findHomography', 'cv2.findHomography', (['p0', 'p1', 'cv2.RANSAC', '(3.0)'], {}), '(p0, p1, cv2.RANSAC, 3.0)\n', (4641, 4666), False, 'import cv2\n'), ((4885, 4937), 'numpy.float32', 'np.float32', (['[[x0, y0], [x1, y0], [x1, y1], [x0, y1]]'], {}), '([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])\n', (4895, 4937), True, 'import numpy as np\n'), ((744, 756), 'numpy.array', 'np.array', (['s2'], {}), '(s2)\n', (752, 756), True, 'import numpy as np\n'), ((1597, 1619), 'numpy.int32', 'np.int32', (['tracked.quad'], {}), '(tracked.quad)\n', (1605, 1619), True, 'import numpy as np\n')]
|
"""Tests for josepy.jws."""
import base64
import unittest
from unittest import mock
import OpenSSL
from josepy import errors, json_util, jwa, jwk, test_util
CERT = test_util.load_comparable_cert('cert.pem')
KEY = jwk.JWKRSA.load(test_util.load_vector('rsa512_key.pem'))
class MediaTypeTest(unittest.TestCase):
"""Tests for josepy.jws.MediaType."""
def test_decode(self):
from josepy.jws import MediaType
self.assertEqual('application/app', MediaType.decode('application/app'))
self.assertEqual('application/app', MediaType.decode('app'))
self.assertRaises(
errors.DeserializationError, MediaType.decode, 'app;foo')
def test_encode(self):
from josepy.jws import MediaType
self.assertEqual('app', MediaType.encode('application/app'))
self.assertEqual('application/app;foo',
MediaType.encode('application/app;foo'))
class HeaderTest(unittest.TestCase):
"""Tests for josepy.jws.Header."""
def setUp(self):
from josepy.jws import Header
self.header1 = Header(jwk='foo')
self.header2 = Header(jwk='bar')
self.crit = Header(crit=('a', 'b'))
self.empty = Header()
def test_add_non_empty(self):
from josepy.jws import Header
self.assertEqual(Header(jwk='foo', crit=('a', 'b')),
self.header1 + self.crit)
def test_add_empty(self):
self.assertEqual(self.header1, self.header1 + self.empty)
self.assertEqual(self.header1, self.empty + self.header1)
def test_add_overlapping_error(self):
self.assertRaises(TypeError, self.header1.__add__, self.header2)
def test_add_wrong_type_error(self):
self.assertRaises(TypeError, self.header1.__add__, 'xxx')
def test_crit_decode_always_errors(self):
from josepy.jws import Header
self.assertRaises(errors.DeserializationError, Header.from_json,
{'crit': ['a', 'b']})
def test_x5c_decoding(self):
from josepy.jws import Header
header = Header(x5c=(CERT, CERT))
jobj = header.to_partial_json()
cert_asn1 = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1, CERT.wrapped)
cert_b64 = base64.b64encode(cert_asn1)
self.assertEqual(jobj, {'x5c': [cert_b64, cert_b64]})
self.assertEqual(header, Header.from_json(jobj))
jobj['x5c'][0] = base64.b64encode(b'xxx' + cert_asn1)
self.assertRaises(errors.DeserializationError, Header.from_json, jobj)
def test_find_key(self):
self.assertEqual('foo', self.header1.find_key())
self.assertEqual('bar', self.header2.find_key())
self.assertRaises(errors.Error, self.crit.find_key)
class SignatureTest(unittest.TestCase):
"""Tests for josepy.jws.Signature."""
def test_from_json(self):
from josepy.jws import Header
from josepy.jws import Signature
self.assertEqual(
Signature(signature=b'foo', header=Header(alg=jwa.RS256)),
Signature.from_json(
{'signature': 'Zm9v', 'header': {'alg': 'RS256'}}))
def test_from_json_no_alg_error(self):
from josepy.jws import Signature
self.assertRaises(errors.DeserializationError,
Signature.from_json, {'signature': 'foo'})
class JWSTest(unittest.TestCase):
"""Tests for josepy.jws.JWS."""
def setUp(self):
self.privkey = KEY
self.pubkey = self.privkey.public_key()
from josepy.jws import JWS
self.unprotected = JWS.sign(
payload=b'foo', key=self.privkey, alg=jwa.RS256)
self.protected = JWS.sign(
payload=b'foo', key=self.privkey, alg=jwa.RS256,
protect=frozenset(['jwk', 'alg']))
self.mixed = JWS.sign(
payload=b'foo', key=self.privkey, alg=jwa.RS256,
protect=frozenset(['alg']))
def test_pubkey_jwk(self):
self.assertEqual(self.unprotected.signature.combined.jwk, self.pubkey)
self.assertEqual(self.protected.signature.combined.jwk, self.pubkey)
self.assertEqual(self.mixed.signature.combined.jwk, self.pubkey)
def test_sign_unprotected(self):
self.assertIs(self.unprotected.verify(), True)
def test_sign_protected(self):
self.assertIs(self.protected.verify(), True)
def test_sign_mixed(self):
self.assertIs(self.mixed.verify(), True)
def test_compact_lost_unprotected(self):
compact = self.mixed.to_compact()
self.assertEqual(
b'<KEY>'
b'_893n1zQjpim_eLS5J1F61lkvrCrCDErTEJnBGOGesJ72M7b6Ve1cAJA',
compact)
from josepy.jws import JWS
mixed = JWS.from_compact(compact)
self.assertNotEqual(self.mixed, mixed)
self.assertEqual(
set(['alg']), set(mixed.signature.combined.not_omitted()))
def test_from_compact_missing_components(self):
from josepy.jws import JWS
self.assertRaises(errors.DeserializationError, JWS.from_compact, b'.')
def test_json_omitempty(self):
protected_jobj = self.protected.to_partial_json(flat=True)
unprotected_jobj = self.unprotected.to_partial_json(flat=True)
self.assertNotIn('protected', unprotected_jobj)
self.assertNotIn('header', protected_jobj)
unprotected_jobj['header'] = unprotected_jobj['header'].to_json()
from josepy.jws import JWS
self.assertEqual(JWS.from_json(protected_jobj), self.protected)
self.assertEqual(JWS.from_json(unprotected_jobj), self.unprotected)
def test_json_flat(self):
jobj_to = {
'signature': json_util.encode_b64jose(
self.mixed.signature.signature),
'payload': json_util.encode_b64jose(b'foo'),
'header': self.mixed.signature.header,
'protected': json_util.encode_b64jose(
self.mixed.signature.protected.encode('utf-8')),
}
jobj_from = jobj_to.copy()
jobj_from['header'] = jobj_from['header'].to_json()
self.assertEqual(self.mixed.to_partial_json(flat=True), jobj_to)
from josepy.jws import JWS
self.assertEqual(self.mixed, JWS.from_json(jobj_from))
def test_json_not_flat(self):
jobj_to = {
'signatures': (self.mixed.signature,),
'payload': json_util.encode_b64jose(b'foo'),
}
jobj_from = jobj_to.copy()
jobj_from['signatures'] = [jobj_to['signatures'][0].to_json()]
self.assertEqual(self.mixed.to_partial_json(flat=False), jobj_to)
from josepy.jws import JWS
self.assertEqual(self.mixed, JWS.from_json(jobj_from))
def test_from_json_mixed_flat(self):
from josepy.jws import JWS
self.assertRaises(errors.DeserializationError, JWS.from_json,
{'signatures': (), 'signature': 'foo'})
def test_from_json_hashable(self):
from josepy.jws import JWS
hash(JWS.from_json(self.mixed.to_json()))
class CLITest(unittest.TestCase):
def setUp(self):
self.key_path = test_util.vector_path('rsa512_key.pem')
def test_unverified(self):
from josepy.jws import CLI
with mock.patch('sys.stdin') as sin:
sin.read.return_value = '{"payload": "foo", "signature": "xxx"}'
with mock.patch('sys.stdout'):
self.assertEqual(-1, CLI.run(['verify']))
def test_json(self):
from josepy.jws import CLI
with mock.patch('sys.stdin') as sin:
sin.read.return_value = 'foo'
with mock.patch('sys.stdout') as sout:
CLI.run(['sign', '-k', self.key_path, '-a', 'RS256',
'-p', 'jwk'])
sin.read.return_value = sout.write.mock_calls[0][1][0]
self.assertEqual(0, CLI.run(['verify']))
def test_compact(self):
from josepy.jws import CLI
with mock.patch('sys.stdin') as sin:
sin.read.return_value = 'foo'
with mock.patch('sys.stdout') as sout:
CLI.run(['--compact', 'sign', '-k', self.key_path])
sin.read.return_value = sout.write.mock_calls[0][1][0]
self.assertEqual(0, CLI.run([
'--compact', 'verify', '--kty', 'RSA',
'-k', self.key_path]))
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
[
"unittest.main",
"josepy.jws.JWS.sign",
"josepy.jws.MediaType.decode",
"josepy.jws.MediaType.encode",
"josepy.jws.Signature.from_json",
"josepy.json_util.encode_b64jose",
"josepy.test_util.vector_path",
"josepy.jws.JWS.from_compact",
"josepy.jws.Header",
"base64.b64encode",
"unittest.mock.patch",
"josepy.jws.CLI.run",
"josepy.jws.Header.from_json",
"josepy.test_util.load_comparable_cert",
"josepy.jws.JWS.from_json",
"OpenSSL.crypto.dump_certificate",
"josepy.test_util.load_vector"
] |
[((167, 209), 'josepy.test_util.load_comparable_cert', 'test_util.load_comparable_cert', (['"""cert.pem"""'], {}), "('cert.pem')\n", (197, 209), False, 'from josepy import errors, json_util, jwa, jwk, test_util\n'), ((232, 271), 'josepy.test_util.load_vector', 'test_util.load_vector', (['"""rsa512_key.pem"""'], {}), "('rsa512_key.pem')\n", (253, 271), False, 'from josepy import errors, json_util, jwa, jwk, test_util\n'), ((8447, 8462), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8460, 8462), False, 'import unittest\n'), ((1086, 1103), 'josepy.jws.Header', 'Header', ([], {'jwk': '"""foo"""'}), "(jwk='foo')\n", (1092, 1103), False, 'from josepy.jws import Header\n'), ((1127, 1144), 'josepy.jws.Header', 'Header', ([], {'jwk': '"""bar"""'}), "(jwk='bar')\n", (1133, 1144), False, 'from josepy.jws import Header\n'), ((1165, 1188), 'josepy.jws.Header', 'Header', ([], {'crit': "('a', 'b')"}), "(crit=('a', 'b'))\n", (1171, 1188), False, 'from josepy.jws import Header\n'), ((1210, 1218), 'josepy.jws.Header', 'Header', ([], {}), '()\n', (1216, 1218), False, 'from josepy.jws import Header\n'), ((2086, 2110), 'josepy.jws.Header', 'Header', ([], {'x5c': '(CERT, CERT)'}), '(x5c=(CERT, CERT))\n', (2092, 2110), False, 'from josepy.jws import Header\n'), ((2171, 2246), 'OpenSSL.crypto.dump_certificate', 'OpenSSL.crypto.dump_certificate', (['OpenSSL.crypto.FILETYPE_ASN1', 'CERT.wrapped'], {}), '(OpenSSL.crypto.FILETYPE_ASN1, CERT.wrapped)\n', (2202, 2246), False, 'import OpenSSL\n'), ((2279, 2306), 'base64.b64encode', 'base64.b64encode', (['cert_asn1'], {}), '(cert_asn1)\n', (2295, 2306), False, 'import base64\n'), ((2451, 2487), 'base64.b64encode', 'base64.b64encode', (["(b'xxx' + cert_asn1)"], {}), "(b'xxx' + cert_asn1)\n", (2467, 2487), False, 'import base64\n'), ((3604, 3661), 'josepy.jws.JWS.sign', 'JWS.sign', ([], {'payload': "b'foo'", 'key': 'self.privkey', 'alg': 'jwa.RS256'}), "(payload=b'foo', key=self.privkey, alg=jwa.RS256)\n", (3612, 3661), False, 'from josepy.jws import JWS\n'), ((4755, 4780), 'josepy.jws.JWS.from_compact', 'JWS.from_compact', (['compact'], {}), '(compact)\n', (4771, 4780), False, 'from josepy.jws import JWS\n'), ((7158, 7197), 'josepy.test_util.vector_path', 'test_util.vector_path', (['"""rsa512_key.pem"""'], {}), "('rsa512_key.pem')\n", (7179, 7197), False, 'from josepy import errors, json_util, jwa, jwk, test_util\n'), ((470, 505), 'josepy.jws.MediaType.decode', 'MediaType.decode', (['"""application/app"""'], {}), "('application/app')\n", (486, 505), False, 'from josepy.jws import MediaType\n'), ((551, 574), 'josepy.jws.MediaType.decode', 'MediaType.decode', (['"""app"""'], {}), "('app')\n", (567, 574), False, 'from josepy.jws import MediaType\n'), ((774, 809), 'josepy.jws.MediaType.encode', 'MediaType.encode', (['"""application/app"""'], {}), "('application/app')\n", (790, 809), False, 'from josepy.jws import MediaType\n'), ((884, 923), 'josepy.jws.MediaType.encode', 'MediaType.encode', (['"""application/app;foo"""'], {}), "('application/app;foo')\n", (900, 923), False, 'from josepy.jws import MediaType\n'), ((1317, 1351), 'josepy.jws.Header', 'Header', ([], {'jwk': '"""foo"""', 'crit': "('a', 'b')"}), "(jwk='foo', crit=('a', 'b'))\n", (1323, 1351), False, 'from josepy.jws import Header\n'), ((2402, 2424), 'josepy.jws.Header.from_json', 'Header.from_json', (['jobj'], {}), '(jobj)\n', (2418, 2424), False, 'from josepy.jws import Header\n'), ((3074, 3144), 'josepy.jws.Signature.from_json', 'Signature.from_json', (["{'signature': 'Zm9v', 'header': {'alg': 'RS256'}}"], {}), "({'signature': 'Zm9v', 'header': {'alg': 'RS256'}})\n", (3093, 3144), False, 'from josepy.jws import Signature\n'), ((5511, 5540), 'josepy.jws.JWS.from_json', 'JWS.from_json', (['protected_jobj'], {}), '(protected_jobj)\n', (5524, 5540), False, 'from josepy.jws import JWS\n'), ((5583, 5614), 'josepy.jws.JWS.from_json', 'JWS.from_json', (['unprotected_jobj'], {}), '(unprotected_jobj)\n', (5596, 5614), False, 'from josepy.jws import JWS\n'), ((5710, 5766), 'josepy.json_util.encode_b64jose', 'json_util.encode_b64jose', (['self.mixed.signature.signature'], {}), '(self.mixed.signature.signature)\n', (5734, 5766), False, 'from josepy import errors, json_util, jwa, jwk, test_util\n'), ((5808, 5840), 'josepy.json_util.encode_b64jose', 'json_util.encode_b64jose', (["b'foo'"], {}), "(b'foo')\n", (5832, 5840), False, 'from josepy import errors, json_util, jwa, jwk, test_util\n'), ((6260, 6284), 'josepy.jws.JWS.from_json', 'JWS.from_json', (['jobj_from'], {}), '(jobj_from)\n', (6273, 6284), False, 'from josepy.jws import JWS\n'), ((6415, 6447), 'josepy.json_util.encode_b64jose', 'json_util.encode_b64jose', (["b'foo'"], {}), "(b'foo')\n", (6439, 6447), False, 'from josepy import errors, json_util, jwa, jwk, test_util\n'), ((6712, 6736), 'josepy.jws.JWS.from_json', 'JWS.from_json', (['jobj_from'], {}), '(jobj_from)\n', (6725, 6736), False, 'from josepy.jws import JWS\n'), ((7278, 7301), 'unittest.mock.patch', 'mock.patch', (['"""sys.stdin"""'], {}), "('sys.stdin')\n", (7288, 7301), False, 'from unittest import mock\n'), ((7563, 7586), 'unittest.mock.patch', 'mock.patch', (['"""sys.stdin"""'], {}), "('sys.stdin')\n", (7573, 7586), False, 'from unittest import mock\n'), ((8002, 8025), 'unittest.mock.patch', 'mock.patch', (['"""sys.stdin"""'], {}), "('sys.stdin')\n", (8012, 8025), False, 'from unittest import mock\n'), ((7404, 7428), 'unittest.mock.patch', 'mock.patch', (['"""sys.stdout"""'], {}), "('sys.stdout')\n", (7414, 7428), False, 'from unittest import mock\n'), ((7654, 7678), 'unittest.mock.patch', 'mock.patch', (['"""sys.stdout"""'], {}), "('sys.stdout')\n", (7664, 7678), False, 'from unittest import mock\n'), ((7704, 7770), 'josepy.jws.CLI.run', 'CLI.run', (["['sign', '-k', self.key_path, '-a', 'RS256', '-p', 'jwk']"], {}), "(['sign', '-k', self.key_path, '-a', 'RS256', '-p', 'jwk'])\n", (7711, 7770), False, 'from josepy.jws import CLI\n'), ((8093, 8117), 'unittest.mock.patch', 'mock.patch', (['"""sys.stdout"""'], {}), "('sys.stdout')\n", (8103, 8117), False, 'from unittest import mock\n'), ((8143, 8194), 'josepy.jws.CLI.run', 'CLI.run', (["['--compact', 'sign', '-k', self.key_path]"], {}), "(['--compact', 'sign', '-k', self.key_path])\n", (8150, 8194), False, 'from josepy.jws import CLI\n'), ((3038, 3059), 'josepy.jws.Header', 'Header', ([], {'alg': 'jwa.RS256'}), '(alg=jwa.RS256)\n', (3044, 3059), False, 'from josepy.jws import Header\n'), ((7467, 7486), 'josepy.jws.CLI.run', 'CLI.run', (["['verify']"], {}), "(['verify'])\n", (7474, 7486), False, 'from josepy.jws import CLI\n'), ((7903, 7922), 'josepy.jws.CLI.run', 'CLI.run', (["['verify']"], {}), "(['verify'])\n", (7910, 7922), False, 'from josepy.jws import CLI\n'), ((8302, 8371), 'josepy.jws.CLI.run', 'CLI.run', (["['--compact', 'verify', '--kty', 'RSA', '-k', self.key_path]"], {}), "(['--compact', 'verify', '--kty', 'RSA', '-k', self.key_path])\n", (8309, 8371), False, 'from josepy.jws import CLI\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.dns import dns_service
from openstack.dns.v2 import _proxy
from openstack.dns.v2 import ptr as _ptr
from openstack.dns.v2 import recordset as _recordset
from openstack.dns.v2 import zone as _zone
from openstack.tests.unit.test_proxy_base3 import BaseProxyTestCase
class TestDNSProxy(BaseProxyTestCase):
def __init__(self, *args, **kwargs):
super(TestDNSProxy, self).__init__(
*args,
proxy_class=_proxy.Proxy,
service_class=dns_service.DNSService,
**kwargs)
class TestZone(TestDNSProxy):
def test_list_zones(self):
query = {
'zone_type': 'public',
'limit': 10
}
self.mock_response_json_file_values('list_zone.json')
zones = list(self.proxy.zones(**query))
self.assert_session_list_with('/zones', {'type': 'public',
'limit': 10})
self.assertEqual(2, len(zones))
zone = zones[0]
self.assertEqual(zone.id, '2c9eb155587194ec01587224c9f90149')
self.assertEqual(zone.name, 'example.com.')
self.assertEqual(zone.description, "This is an example zone.")
self.assertEqual(zone.email, "<EMAIL>")
self.assertEqual(zone.ttl, 300)
self.assertEqual(zone.serial, 0)
self.assertEqual(zone.masters, [])
self.assertEqual(zone.status, "ACTIVE")
self.assertEqual(zone.pool_id, "00000000570e54ee01570e9939b20019")
self.assertEqual(zone.zone_type, "public")
self.assertEqual(zone.created_at, "2016-11-17T11:56:03.439")
self.assertEqual(zone.record_num, 2)
def test_create_public_zone(self):
attrs = {
"name": "example.com.",
"description": "This is an example zone.",
"zone_type": "public",
"email": "<EMAIL>"
}
self.mock_response_json_file_values('create_public_zone.json')
zone = self.proxy.create_zone(**attrs)
self.assert_session_post_with('/zones', json=attrs, headers={})
self.assertEqual(zone.name, 'example.com.')
self.assertEqual(zone.zone_type, 'public')
self.assertEqual(zone.email, '<EMAIL>')
self.assertIsNotNone(zone.id)
def test_create_private_zone(self):
attrs = {
"name": "example.com.",
"description": "This is an example zone.",
"zone_type": "private",
"email": "<EMAIL>",
"router": {
"router_id": "19664294-0bf6-4271-ad3a-94b8c79c6558",
"router_region": "eu-de"
}
}
self.mock_response_json_file_values('create_private_zone.json')
zone = self.proxy.create_zone(**attrs)
self.assert_session_post_with('/zones', json=attrs, headers={})
self.assertEqual(zone.name, 'example.com.')
self.assertEqual(zone.zone_type, 'private')
self.assertEqual(zone.email, '<EMAIL>')
self.assertIsNotNone(zone.router)
self.assertEqual(zone.router.router_id,
"19664294-0bf6-4271-ad3a-94b8c79c6558")
self.assertEqual(zone.router.router_region, "eu-de")
self.assertIsNotNone(zone.id)
def test_get_zone_with_id(self):
self.mock_response_json_file_values("get_zone_response.json")
zone = self.proxy.get_zone("zone-id")
self.session.get.assert_called_once_with(
"zones/zone-id",
endpoint_filter=self.service,
endpoint_override=self.service.get_endpoint_override(),
)
self.assertIsInstance(zone, _zone.Zone)
self.assertEqual("2c9eb155587194ec01587224c9f90149", zone.id)
self.assertEqual("example.com.", zone.name)
self.assertEqual("This is an example zone.", zone.description)
self.assertEqual("<EMAIL>", zone.email)
self.assertEqual(300, zone.ttl)
self.assertEqual(0, zone.serial)
self.assertEqual([], zone.masters)
self.assertEqual("ACTIVE", zone.status)
self.assertEqual("00000000570e54ee01570e9939b20019", zone.pool_id)
self.assertEqual("e55c6f3dc4e34c9f86353b664ae0e70c", zone.project_id)
self.assertEqual("public", zone.zone_type)
self.assertEqual("2016-11-17T11:56:03.439", zone.created_at)
self.assertEqual(2, zone.record_num)
def test_delete_zone_with_id(self):
self.proxy.delete_zone("zone-id")
self.assert_session_delete("zones/zone-id")
def test_delete_zone_with_instance(self):
self.proxy.delete_zone(_zone.Zone(id="zone-id"))
self.assert_session_delete("zones/zone-id")
def test_list_public_zone_nameservers(self):
self.mock_response_json_file_values(
"list_public_zone_ns_response.json")
nameservers = list(self.proxy.nameservers("zone-id"))
self.assert_session_list_with("/zones/zone-id/nameservers")
self.assertEqual(2, len(nameservers))
ns = nameservers[0]
self.assertEqual("ns1.huawei.com.", ns.hostname)
self.assertEqual(1, ns.priority)
def test_list_private_zone_nameservers(self):
self.mock_response_json_file_values(
"list_private_zone_ns_response.json")
nameservers = list(self.proxy.nameservers("zone-id"))
self.assert_session_list_with("/zones/zone-id/nameservers")
self.assertEqual(2, len(nameservers))
ns = nameservers[0]
self.assertEqual("192.168.3.11", ns.address)
self.assertEqual(1, ns.priority)
def test_add_router_to_zone(self):
self.mock_response_json_values({
"status": "PENDING_CREATE",
"router_id": "f0791650-db8c-4a20-8a44-a06c6e24b15b",
"router_region": "xx"
})
router = {
"router_id": "f0791650-db8c-4a20-8a44-a06c6e24b15b",
"router_region": "xx"
}
result = self.proxy.add_router_to_zone("zone-id", **router)
self.assert_session_post_with("/zones/zone-id/associaterouter",
dict(router=router))
self.assertEqual("f0791650-db8c-4a20-8a44-a06c6e24b15b",
result.router_id)
self.assertEqual("xx", result.router_region)
self.assertEqual("PENDING_CREATE", result.status)
def test_remove_router_from_zone(self):
self.mock_response_json_values({
"status": "PENDING_DELETE",
"router_id": "f0791650-db8c-4a20-8a44-a06c6e24b15b",
"router_region": "xx"
})
router = {
"router_id": "f0791650-db8c-4a20-8a44-a06c6e24b15b",
"router_region": "xx"
}
result = self.proxy.remove_router_from_zone("zone-id", **router)
self.assert_session_post_with("/zones/zone-id/disassociaterouter",
dict(router=router))
self.assertEqual("f0791650-db8c-4a20-8a44-a06c6e24b15b",
result.router_id)
self.assertEqual("xx", result.router_region)
self.assertEqual("PENDING_DELETE", result.status)
class TestRecordset(TestDNSProxy):
def __init__(self, *args, **kwargs):
super(TestRecordset, self).__init__(*args, **kwargs)
def test_create_recordset(self):
self.mock_response_json_file_values(
"create_recordset_response.json")
data = {
"name": "www.example.com.",
"description": "This is an example record set.",
"type": "A",
"ttl": 3600,
"records": [
"192.168.10.1",
"192.168.10.2"
]
}
recordset = self.proxy.create_recordset("zone-id", **data)
self.assert_session_post_with("/zones/zone-id/recordsets",
json=data)
self.assertIsInstance(recordset, _recordset.Recordset)
self.assertEqual("2c9eb155587228570158722b6ac30007", recordset.id)
self.assertEqual("www.example.com.", recordset.name)
self.assertEqual("This is an example record set.",
recordset.description)
self.assertEqual("A", recordset.type)
self.assertEqual(300, recordset.ttl)
self.assertEqual(["192.168.10.1", "192.168.10.2"], recordset.records)
self.assertEqual("PENDING_CREATE", recordset.status)
self.assertEqual("zone-id", recordset.zone_id)
self.assertEqual("example.com.", recordset.zone_name)
self.assertEqual("2016-11-17T12:03:17.827", recordset.create_at)
self.assertEqual("e55c6f3dc4e34c9f86353b664ae0e70c",
recordset.project_id)
self.assertFalse(recordset.is_default)
def test_list_recordset(self):
query = {
"limit": 20,
"marker": "recordset-id"
}
self.mock_response_json_file_values("list_recordset_response.json")
recordsets = list(self.proxy.recordsets("zone-id", **query))
self.assert_session_list_with("/zones/zone-id/recordsets",
params=query)
self.assertEqual(5, len(recordsets))
recordset = recordsets[0]
self.verify_recordset(recordset)
def test_list_all_recordset(self):
query = {
"limit": 20,
"marker": "recordset-id",
"zone_type": "private"
}
self.mock_response_json_file_values("list_all_recordset_response.json")
recordsets = list(self.proxy.all_recordsets(**query))
self.assert_session_list_with("/recordsets", params=query)
self.assertEqual(5, len(recordsets))
recordset = recordsets[0]
self.verify_recordset(recordset)
def verify_recordset(self, recordset):
self.assertIsInstance(recordset, _recordset.Recordset)
self.assertEqual("2c9eb155587194ec01587224c9f9014a", recordset.id)
self.assertEqual("example.com.", recordset.name)
self.assertIsNone(recordset.description)
self.assertEqual("SOA", recordset.type)
self.assertEqual(300, recordset.ttl)
self.assertEqual(
["ns1.hotrot.de. xx.example.com. (1 7200 900 1209600 300)"],
recordset.records)
self.assertEqual("ACTIVE", recordset.status)
self.assertEqual("2c9eb155587194ec01587224c9f90149", recordset.zone_id)
self.assertEqual("example.com.", recordset.zone_name)
self.assertEqual("2016-11-17T11:56:03.439", recordset.create_at)
self.assertEqual("e55c6f3dc4e34c9f86353b664ae0e70c",
recordset.project_id)
self.assertTrue(recordset.is_default)
def test_get_recordset_with_id(self):
self.mock_response_json_file_values("get_recordset_response.json")
recordset = self.proxy.get_recordset("zone-id", "recordset-id")
self.session.get.assert_called_once_with(
"zones/zone-id/recordsets/recordset-id",
endpoint_filter=self.service,
endpoint_override=self.service.get_endpoint_override(),
)
self.assertIsInstance(recordset, _recordset.Recordset)
self.assertEqual("2c9eb155587228570158722b6ac30007", recordset.id)
self.assertEqual("www.example.com.", recordset.name)
self.assertEqual("This is an example record set.",
recordset.description)
self.assertEqual("A", recordset.type)
self.assertEqual(300, recordset.ttl)
self.assertEqual(["192.168.10.2", "192.168.10.1"], recordset.records)
self.assertEqual("PENDING_CREATE", recordset.status)
self.assertEqual("zone-id", recordset.zone_id)
self.assertEqual("example.com.", recordset.zone_name)
self.assertEqual("2016-11-17T12:03:17.827", recordset.create_at)
self.assertEqual("e55c6f3dc4e34c9f86353b664ae0e70c",
recordset.project_id)
self.assertFalse(recordset.is_default)
def test_delete_recordset_with_id(self):
self.proxy.delete_recordset("zone-id", "recordset-id")
self.assert_session_delete("zones/zone-id/recordsets/recordset-id")
def test_delete_recordset_with_id2(self):
self.proxy.delete_recordset(_zone.Zone(id="zone-id"), "recordset-id")
self.assert_session_delete("zones/zone-id/recordsets/recordset-id")
def test_delete_recordset_with_instance(self):
self.proxy.delete_recordset("zone-id",
_recordset.Recordset(id="recordset-id"))
self.assert_session_delete("zones/zone-id/recordsets/recordset-id")
def test_delete_recordset_with_instance2(self):
self.proxy.delete_recordset(_zone.Zone(id="zone-id"),
_recordset.Recordset(id="recordset-id"))
self.assert_session_delete("zones/zone-id/recordsets/recordset-id")
class TestPTR(TestDNSProxy):
def __init__(self, *args, **kwargs):
super(TestPTR, self).__init__(*args, **kwargs)
def test_create_ptr(self):
self.mock_response_json_file_values("create_ptr_response.json")
data = {
'region': 'eu-de',
'floating_ip_id': '9e9c6d33-51a6-4f84-b504-c13301f1cc8c',
'ptrdname': 'www.turnbig.net',
'description': 'HaveFun.lee - For Test',
'ttl': 300,
}
expect = {
'region': 'eu-de',
'floatingip_id': '9e9c6d33-51a6-4f84-b504-c13301f1cc8c',
'ptrdname': 'www.turnbig.net',
'description': 'HaveFun.lee - For Test',
'ttl': 300,
}
ptr = self.proxy.create_ptr(**data)
self.assert_session_patch_with(
"reverse/floatingips/eu-de:9e9c6d33-51a6-4f84-b504-c13301f1cc8c",
json=expect)
self.assertIsInstance(ptr, _ptr.PTR)
self.assertEqual("region_id:c5504932-bf23-4171-b655-b87a6bc59334",
ptr.id)
self.assertEqual("www.example.com.", ptr.ptrdname)
self.assertEqual("Description for this PTR record",
ptr.description)
self.assertEqual("10.154.52.138", ptr.address)
self.assertEqual("CREATE", ptr.action)
self.assertEqual(300, ptr.ttl)
self.assertEqual("PENDING_CREATE", ptr.status)
def test_list_ptr(self):
query = {
"marker": "last-ptr-id",
"limit": 20
}
self.mock_response_json_file_values("list_ptr_response.json")
ptrs = list(self.proxy.ptrs(**query))
self.assert_session_list_with("/reverse/floatingips", params=query)
self.assertEqual(1, len(ptrs))
ptr = ptrs[0]
self.assertIsInstance(ptr, _ptr.PTR)
self.assertEqual("region_id:c5504932-bf23-4171-b655-b87a6bc59334",
ptr.id)
self.assertEqual("www.example.com.", ptr.ptrdname)
self.assertEqual("Description for this PTR record",
ptr.description)
self.assertEqual("10.154.52.138", ptr.address)
self.assertEqual("NONE", ptr.action)
self.assertEqual(300, ptr.ttl)
self.assertEqual("ACTIVE", ptr.status)
def test_get_ptr_with_id(self):
self.mock_response_json_file_values("get_ptr_response.json")
region = 'eu-de'
floating_ip_id = '9e9c6d33-51a6-4f84-b504-c13301f1cc8c'
ptr = self.proxy.get_ptr(region, floating_ip_id)
self.assert_session_get_with(
"reverse/floatingips/eu-de:9e9c6d33-51a6-4f84-b504-c13301f1cc8c")
self.assertIsInstance(ptr, _ptr.PTR)
self.assertEqual("region_id:c5504932-bf23-4171-b655-b87a6bc59334",
ptr.id)
self.assertEqual("www.example.com.", ptr.ptrdname)
self.assertEqual("Description for this PTR record",
ptr.description)
self.assertEqual("10.154.52.138", ptr.address)
self.assertEqual("CREATE", ptr.action)
self.assertEqual(300, ptr.ttl)
self.assertEqual("ACTIVE", ptr.status)
def test_restore_ptr_with_id(self):
region = 'eu-de'
floating_ip_id = '9e9c6d33-51a6-4f84-b504-c13301f1cc8c'
self.proxy.restore_ptr(region, floating_ip_id)
self.assert_session_patch_with(
"reverse/floatingips/eu-de:9e9c6d33-51a6-4f84-b504-c13301f1cc8c",
json={"ptrdname": None}
)
|
[
"openstack.dns.v2.recordset.Recordset",
"openstack.dns.v2.zone.Zone"
] |
[((5111, 5135), 'openstack.dns.v2.zone.Zone', '_zone.Zone', ([], {'id': '"""zone-id"""'}), "(id='zone-id')\n", (5121, 5135), True, 'from openstack.dns.v2 import zone as _zone\n'), ((12767, 12791), 'openstack.dns.v2.zone.Zone', '_zone.Zone', ([], {'id': '"""zone-id"""'}), "(id='zone-id')\n", (12777, 12791), True, 'from openstack.dns.v2 import zone as _zone\n'), ((13020, 13059), 'openstack.dns.v2.recordset.Recordset', '_recordset.Recordset', ([], {'id': '"""recordset-id"""'}), "(id='recordset-id')\n", (13040, 13059), True, 'from openstack.dns.v2 import recordset as _recordset\n'), ((13226, 13250), 'openstack.dns.v2.zone.Zone', '_zone.Zone', ([], {'id': '"""zone-id"""'}), "(id='zone-id')\n", (13236, 13250), True, 'from openstack.dns.v2 import zone as _zone\n'), ((13288, 13327), 'openstack.dns.v2.recordset.Recordset', '_recordset.Recordset', ([], {'id': '"""recordset-id"""'}), "(id='recordset-id')\n", (13308, 13327), True, 'from openstack.dns.v2 import recordset as _recordset\n')]
|
import re
from string import ascii_letters
import numpy as np
import pandas as pd
import pytest
import anndata as ad
ADATA_ATTRS = ("obs", "var", "varm", "obsm", "layers", "obsp", "varp", "uns")
@pytest.fixture
def adata():
return ad.AnnData(
np.zeros((20, 10)),
obs=pd.DataFrame(
dict(obs_key=list(ascii_letters[:20])),
index=[f"cell{i}" for i in range(20)],
),
var=pd.DataFrame(
dict(var_key=np.arange(10)), index=[f"gene{i}" for i in range(10)]
),
varm=dict(varm_key=np.zeros((10, 20))),
obsm=dict(obsm_key=np.zeros((20, 20))),
layers=dict(layers_key=np.zeros((20, 10))),
obsp=dict(obsp_key=np.zeros((20, 20))),
varp=dict(varp_key=np.zeros((10, 10))),
uns=dict(uns_key=dict(zip("abc", range(3)))),
)
@pytest.fixture(params=ADATA_ATTRS)
def adata_attr(request):
return request.param
def test_anndata_repr(adata):
assert f"{adata.n_obs} × {adata.n_vars}" in repr(adata)
for idxr in [
(slice(10, 20), 9),
(12, 9),
(["cell1", "cell2"], slice(10, 15)),
]:
v = adata[idxr]
v_repr = repr(v)
assert f"{v.n_obs} × {v.n_vars}" in v_repr
assert "View of" in v_repr
for attr in ADATA_ATTRS:
assert re.search(
rf"^\s+{attr}:[^$]*{attr}_key.*$", v_repr, flags=re.MULTILINE
)
def test_removal(adata, adata_attr):
attr = adata_attr
assert re.search(rf"^\s+{attr}:.*$", repr(adata), flags=re.MULTILINE)
delattr(adata, attr)
assert re.search(rf"^\s+{attr}:.*$", repr(adata), flags=re.MULTILINE) is None
|
[
"numpy.zeros",
"pytest.fixture",
"numpy.arange",
"re.search"
] |
[((843, 877), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'ADATA_ATTRS'}), '(params=ADATA_ATTRS)\n', (857, 877), False, 'import pytest\n'), ((260, 278), 'numpy.zeros', 'np.zeros', (['(20, 10)'], {}), '((20, 10))\n', (268, 278), True, 'import numpy as np\n'), ((1323, 1395), 're.search', 're.search', (['f"""^\\\\s+{attr}:[^$]*{attr}_key.*$"""', 'v_repr'], {'flags': 're.MULTILINE'}), "(f'^\\\\s+{attr}:[^$]*{attr}_key.*$', v_repr, flags=re.MULTILINE)\n", (1332, 1395), False, 'import re\n'), ((563, 581), 'numpy.zeros', 'np.zeros', (['(10, 20)'], {}), '((10, 20))\n', (571, 581), True, 'import numpy as np\n'), ((611, 629), 'numpy.zeros', 'np.zeros', (['(20, 20)'], {}), '((20, 20))\n', (619, 629), True, 'import numpy as np\n'), ((663, 681), 'numpy.zeros', 'np.zeros', (['(20, 10)'], {}), '((20, 10))\n', (671, 681), True, 'import numpy as np\n'), ((711, 729), 'numpy.zeros', 'np.zeros', (['(20, 20)'], {}), '((20, 20))\n', (719, 729), True, 'import numpy as np\n'), ((759, 777), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (767, 777), True, 'import numpy as np\n'), ((471, 484), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (480, 484), True, 'import numpy as np\n')]
|
# Copyright 2019 Graphcore Ltd.
"""Networks for VAE implementation in VCD paper"""
import numpy as np
import tensorflow as tf
def encoder(X_input, Z_dim, dtype, n_hidden=200):
"""As in paper"""
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE, use_resource=True):
# Calculate sqrt(n_hidden) - for initialisers
sqrt_n_hid_inv = 1. / np.sqrt(float(n_hidden))
# Separate networks for approx posterior mean and log std
with tf.variable_scope('mean', use_resource=True, reuse=tf.AUTO_REUSE):
relu0_mean = tf.layers.dense(
X_input,
units=n_hidden,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
bias_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
name='relu0_mu')
relu1_mean = tf.layers.dense(
relu0_mean,
units=n_hidden,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
bias_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
name='relu1_mu')
Z_cond_X_mean = tf.layers.dense(
relu1_mean,
units=Z_dim,
activation=None,
kernel_initializer=tf.random_normal_initializer(dtype=dtype),
bias_initializer=tf.random_normal_initializer(dtype=dtype),
name='posterior_mean')
with tf.variable_scope('std', use_resource=True, reuse=tf.AUTO_REUSE):
relu0_std = tf.layers.dense(
X_input,
units=n_hidden,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
bias_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
name='relu0_std')
relu1_std = tf.layers.dense(
relu0_std,
units=n_hidden,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
bias_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
name='relu1_std')
Z_cond_X_log_std = tf.layers.dense(
relu1_std,
units=Z_dim,
activation=None,
kernel_initializer=tf.random_normal_initializer(dtype=dtype),
bias_initializer=tf.random_normal_initializer(dtype=dtype),
name='posterior_log_std')
# More numerically-stable exponential function
def _pos_softplus(x):
return x + tf.log(1. + tf.exp(1e-4 - x))
def _neg_softplus(x):
return 1e-4 + tf.log(1. + tf.exp(x - 1e-4))
Z_cond_X_std = tf.where(Z_cond_X_log_std >= 0,
_pos_softplus(Z_cond_X_log_std),
_neg_softplus(Z_cond_X_log_std))
return Z_cond_X_mean, Z_cond_X_std
def decoder(Z_cond_X_samples, output_dims, dtype, n_hidden=200):
"""As in paper"""
# Calculate sqrt(n_hidden) - for initialisers
sqrt_n_hid_inv = 1. / np.sqrt(float(n_hidden))
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE, use_resource=True):
relu0_dec = tf.layers.dense(
Z_cond_X_samples,
units=n_hidden,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
bias_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
name='relu0_dec')
relu1_dec = tf.layers.dense(
relu0_dec,
units=n_hidden,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
bias_initializer=tf.random_normal_initializer(stddev=sqrt_n_hid_inv, dtype=dtype),
name='relu1_dec')
lin_out = tf.layers.dense(
relu1_dec,
units=np.prod(output_dims),
activation=None,
kernel_initializer=tf.random_normal_initializer(dtype=dtype),
bias_initializer=tf.random_normal_initializer(dtype=dtype),
name='dec_lin_out')
return {'logits': lin_out}
|
[
"tensorflow.exp",
"tensorflow.random_normal_initializer",
"tensorflow.variable_scope",
"numpy.prod"
] |
[((209, 277), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {'reuse': 'tf.AUTO_REUSE', 'use_resource': '(True)'}), "('encoder', reuse=tf.AUTO_REUSE, use_resource=True)\n", (226, 277), True, 'import tensorflow as tf\n'), ((3424, 3492), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {'reuse': 'tf.AUTO_REUSE', 'use_resource': '(True)'}), "('decoder', reuse=tf.AUTO_REUSE, use_resource=True)\n", (3441, 3492), True, 'import tensorflow as tf\n'), ((469, 534), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""mean"""'], {'use_resource': '(True)', 'reuse': 'tf.AUTO_REUSE'}), "('mean', use_resource=True, reuse=tf.AUTO_REUSE)\n", (486, 534), True, 'import tensorflow as tf\n'), ((1624, 1688), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""std"""'], {'use_resource': '(True)', 'reuse': 'tf.AUTO_REUSE'}), "('std', use_resource=True, reuse=tf.AUTO_REUSE)\n", (1641, 1688), True, 'import tensorflow as tf\n'), ((3655, 3719), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (3683, 3719), True, 'import tensorflow as tf\n'), ((3750, 3814), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (3778, 3814), True, 'import tensorflow as tf\n'), ((4001, 4065), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (4029, 4065), True, 'import tensorflow as tf\n'), ((4096, 4160), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (4124, 4160), True, 'import tensorflow as tf\n'), ((4269, 4289), 'numpy.prod', 'np.prod', (['output_dims'], {}), '(output_dims)\n', (4276, 4289), True, 'import numpy as np\n'), ((4351, 4392), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (4379, 4392), True, 'import tensorflow as tf\n'), ((4423, 4464), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (4451, 4464), True, 'import tensorflow as tf\n'), ((709, 773), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (737, 773), True, 'import tensorflow as tf\n'), ((808, 872), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (836, 872), True, 'import tensorflow as tf\n'), ((1084, 1148), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (1112, 1148), True, 'import tensorflow as tf\n'), ((1183, 1247), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (1211, 1247), True, 'import tensorflow as tf\n'), ((1452, 1493), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (1480, 1493), True, 'import tensorflow as tf\n'), ((1528, 1569), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (1556, 1569), True, 'import tensorflow as tf\n'), ((1862, 1926), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (1890, 1926), True, 'import tensorflow as tf\n'), ((1961, 2025), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (1989, 2025), True, 'import tensorflow as tf\n'), ((2236, 2300), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (2264, 2300), True, 'import tensorflow as tf\n'), ((2335, 2399), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'sqrt_n_hid_inv', 'dtype': 'dtype'}), '(stddev=sqrt_n_hid_inv, dtype=dtype)\n', (2363, 2399), True, 'import tensorflow as tf\n'), ((2608, 2649), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (2636, 2649), True, 'import tensorflow as tf\n'), ((2684, 2725), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (2712, 2725), True, 'import tensorflow as tf\n'), ((2890, 2908), 'tensorflow.exp', 'tf.exp', (['(0.0001 - x)'], {}), '(0.0001 - x)\n', (2896, 2908), True, 'import tensorflow as tf\n'), ((2977, 2995), 'tensorflow.exp', 'tf.exp', (['(x - 0.0001)'], {}), '(x - 0.0001)\n', (2983, 2995), True, 'import tensorflow as tf\n')]
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from lndgrpc.compiled import signer_pb2 as lndgrpc_dot_compiled_dot_signer__pb2
class SignerStub(object):
"""Signer is a service that gives access to the signing functionality of the
daemon's wallet.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SignOutputRaw = channel.unary_unary(
'/signrpc.Signer/SignOutputRaw',
request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString,
response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignResp.FromString,
)
self.ComputeInputScript = channel.unary_unary(
'/signrpc.Signer/ComputeInputScript',
request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString,
response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.InputScriptResp.FromString,
)
self.SignMessage = channel.unary_unary(
'/signrpc.Signer/SignMessage',
request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignMessageReq.SerializeToString,
response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignMessageResp.FromString,
)
self.VerifyMessage = channel.unary_unary(
'/signrpc.Signer/VerifyMessage',
request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageReq.SerializeToString,
response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.FromString,
)
self.DeriveSharedKey = channel.unary_unary(
'/signrpc.Signer/DeriveSharedKey',
request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyRequest.SerializeToString,
response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.FromString,
)
class SignerServicer(object):
"""Signer is a service that gives access to the signing functionality of the
daemon's wallet.
"""
def SignOutputRaw(self, request, context):
"""
SignOutputRaw is a method that can be used to generated a signature for a
set of inputs/outputs to a transaction. Each request specifies details
concerning how the outputs should be signed, which keys they should be
signed with, and also any optional tweaks. The return value is a fixed
64-byte signature (the same format as we use on the wire in Lightning).
If we are unable to sign using the specified keys, then an error will be
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ComputeInputScript(self, request, context):
"""
ComputeInputScript generates a complete InputIndex for the passed
transaction with the signature as defined within the passed SignDescriptor.
This method should be capable of generating the proper input script for
both regular p2wkh output and p2wkh outputs nested within a regular p2sh
output.
Note that when using this method to sign inputs belonging to the wallet,
the only items of the SignDescriptor that need to be populated are pkScript
in the TxOut field, the value in that same field, and finally the input
index.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SignMessage(self, request, context):
"""
SignMessage signs a message with the key specified in the key locator. The
returned signature is fixed-size LN wire format encoded.
The main difference to SignMessage in the main RPC is that a specific key is
used to sign the message instead of the node identity private key.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VerifyMessage(self, request, context):
"""
VerifyMessage verifies a signature over a message using the public key
provided. The signature must be fixed-size LN wire format encoded.
The main difference to VerifyMessage in the main RPC is that the public key
used to sign the message does not have to be a node known to the network.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeriveSharedKey(self, request, context):
"""
DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key
derivation between the ephemeral public key in the request and the node's
key specified in the key_desc parameter. Either a key locator or a raw
public key is expected in the key_desc, if neither is supplied, defaults to
the node's identity private key:
P_shared = privKeyNode * ephemeralPubkey
The resulting shared public key is serialized in the compressed format and
hashed with sha256, resulting in the final key length of 256bit.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SignerServicer_to_server(servicer, server):
rpc_method_handlers = {
'SignOutputRaw': grpc.unary_unary_rpc_method_handler(
servicer.SignOutputRaw,
request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.FromString,
response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignResp.SerializeToString,
),
'ComputeInputScript': grpc.unary_unary_rpc_method_handler(
servicer.ComputeInputScript,
request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.FromString,
response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.InputScriptResp.SerializeToString,
),
'SignMessage': grpc.unary_unary_rpc_method_handler(
servicer.SignMessage,
request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignMessageReq.FromString,
response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignMessageResp.SerializeToString,
),
'VerifyMessage': grpc.unary_unary_rpc_method_handler(
servicer.VerifyMessage,
request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageReq.FromString,
response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.SerializeToString,
),
'DeriveSharedKey': grpc.unary_unary_rpc_method_handler(
servicer.DeriveSharedKey,
request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyRequest.FromString,
response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'signrpc.Signer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Signer(object):
"""Signer is a service that gives access to the signing functionality of the
daemon's wallet.
"""
@staticmethod
def SignOutputRaw(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/SignOutputRaw',
lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString,
lndgrpc_dot_compiled_dot_signer__pb2.SignResp.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ComputeInputScript(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/ComputeInputScript',
lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString,
lndgrpc_dot_compiled_dot_signer__pb2.InputScriptResp.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SignMessage(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/SignMessage',
lndgrpc_dot_compiled_dot_signer__pb2.SignMessageReq.SerializeToString,
lndgrpc_dot_compiled_dot_signer__pb2.SignMessageResp.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def VerifyMessage(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/VerifyMessage',
lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageReq.SerializeToString,
lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeriveSharedKey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/DeriveSharedKey',
lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyRequest.SerializeToString,
lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
[
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler",
"grpc.experimental.unary_unary"
] |
[((7612, 7687), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""signrpc.Signer"""', 'rpc_method_handlers'], {}), "('signrpc.Signer', rpc_method_handlers)\n", (7648, 7687), False, 'import grpc\n'), ((5906, 6142), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.SignOutputRaw'], {'request_deserializer': 'lndgrpc_dot_compiled_dot_signer__pb2.SignReq.FromString', 'response_serializer': 'lndgrpc_dot_compiled_dot_signer__pb2.SignResp.SerializeToString'}), '(servicer.SignOutputRaw,\n request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.\n FromString, response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.\n SignResp.SerializeToString)\n', (5941, 6142), False, 'import grpc\n'), ((6239, 6487), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.ComputeInputScript'], {'request_deserializer': 'lndgrpc_dot_compiled_dot_signer__pb2.SignReq.FromString', 'response_serializer': 'lndgrpc_dot_compiled_dot_signer__pb2.InputScriptResp.SerializeToString'}), '(servicer.ComputeInputScript,\n request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.\n FromString, response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.\n InputScriptResp.SerializeToString)\n', (6274, 6487), False, 'import grpc\n'), ((6577, 6825), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.SignMessage'], {'request_deserializer': 'lndgrpc_dot_compiled_dot_signer__pb2.SignMessageReq.FromString', 'response_serializer': 'lndgrpc_dot_compiled_dot_signer__pb2.SignMessageResp.SerializeToString'}), '(servicer.SignMessage,\n request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.\n SignMessageReq.FromString, response_serializer=\n lndgrpc_dot_compiled_dot_signer__pb2.SignMessageResp.SerializeToString)\n', (6612, 6825), False, 'import grpc\n'), ((6917, 7171), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.VerifyMessage'], {'request_deserializer': 'lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageReq.FromString', 'response_serializer': 'lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.SerializeToString'}), '(servicer.VerifyMessage,\n request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.\n VerifyMessageReq.FromString, response_serializer=\n lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.SerializeToString)\n', (6952, 7171), False, 'import grpc\n'), ((7265, 7521), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.DeriveSharedKey'], {'request_deserializer': 'lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyRequest.FromString', 'response_serializer': 'lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.SerializeToString'}), '(servicer.DeriveSharedKey,\n request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.\n SharedKeyRequest.FromString, response_serializer=\n lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.SerializeToString)\n', (7300, 7521), False, 'import grpc\n'), ((8264, 8589), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/signrpc.Signer/SignOutputRaw"""', 'lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString', 'lndgrpc_dot_compiled_dot_signer__pb2.SignResp.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/signrpc.Signer/SignOutputRaw', lndgrpc_dot_compiled_dot_signer__pb2.\n SignReq.SerializeToString, lndgrpc_dot_compiled_dot_signer__pb2.\n SignResp.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (8293, 8589), False, 'import grpc\n'), ((8952, 9291), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/signrpc.Signer/ComputeInputScript"""', 'lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString', 'lndgrpc_dot_compiled_dot_signer__pb2.InputScriptResp.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/signrpc.Signer/ComputeInputScript',\n lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString,\n lndgrpc_dot_compiled_dot_signer__pb2.InputScriptResp.FromString,\n options, channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (8981, 9291), False, 'import grpc\n'), ((9645, 9982), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/signrpc.Signer/SignMessage"""', 'lndgrpc_dot_compiled_dot_signer__pb2.SignMessageReq.SerializeToString', 'lndgrpc_dot_compiled_dot_signer__pb2.SignMessageResp.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/signrpc.Signer/SignMessage', lndgrpc_dot_compiled_dot_signer__pb2.\n SignMessageReq.SerializeToString, lndgrpc_dot_compiled_dot_signer__pb2.\n SignMessageResp.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (9674, 9982), False, 'import grpc\n'), ((10340, 10686), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/signrpc.Signer/VerifyMessage"""', 'lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageReq.SerializeToString', 'lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/signrpc.Signer/VerifyMessage', lndgrpc_dot_compiled_dot_signer__pb2.\n VerifyMessageReq.SerializeToString,\n lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.FromString,\n options, channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (10369, 10686), False, 'import grpc\n'), ((11043, 11391), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/signrpc.Signer/DeriveSharedKey"""', 'lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyRequest.SerializeToString', 'lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/signrpc.Signer/DeriveSharedKey', lndgrpc_dot_compiled_dot_signer__pb2\n .SharedKeyRequest.SerializeToString,\n lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.FromString,\n options, channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (11072, 11391), False, 'import grpc\n')]
|
from django import template
register = template.Library()
@register.inclusion_tag('datatables/css.html')
def css_bundle():
return {}
@register.inclusion_tag('datatables/js.html')
def js_bundle():
return {}
|
[
"django.template.Library"
] |
[((40, 58), 'django.template.Library', 'template.Library', ([], {}), '()\n', (56, 58), False, 'from django import template\n')]
|
import cv2
import os
"""
This is a simple script written to find the smallest safest dimension
to crop all images to. The result is a tuple representing the
smallest height found and the smallest width found.
The reason this is necessary is because the standardized dimensions
for the dataset needs to be within the range of this tuple.
"""
root_dir = os.getcwd()
downloads_dir = root_dir + '\\downloads'
smallest_dim = [10000, 10000]
for subdir, dirs, files in os.walk(downloads_dir):
for file in files:
try:
image = cv2.imread(os.path.join(subdir, file))
width = int(image.shape[1])
height = int(image.shape[0])
if smallest_dim[0] > height:
smallest_dim[0] = height
print('new smallest dim:', tuple(smallest_dim))
if smallest_dim[1] > width:
smallest_dim[1] = width
print('new smallest dim:', tuple(smallest_dim))
except AttributeError:
print("Weird attribute error that's not worth worrying about")
pass
print("smallest dim:", tuple(smallest_dim))
|
[
"os.getcwd",
"os.walk",
"os.path.join"
] |
[((358, 369), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (367, 369), False, 'import os\n'), ((470, 492), 'os.walk', 'os.walk', (['downloads_dir'], {}), '(downloads_dir)\n', (477, 492), False, 'import os\n'), ((543, 569), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (555, 569), False, 'import os\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, <NAME> <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_role
short_description: Manages user roles on Apache CloudStack based clouds.
description:
- Create, update, delete user roles.
version_added: "2.3"
author: "<NAME> (@resmo)"
options:
name:
description:
- Name of the role.
required: true
id:
description:
- ID of the role.
- If provided, C(id) is used as key.
aliases: [ 'uuid' ]
role_type:
description:
- Type of the role.
- Only considered for creation.
default: User
choices: [ 'User', 'DomainAdmin', 'ResourceAdmin', 'Admin' ]
description:
description:
- Description of the role.
state:
description:
- State of the role.
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure an user role is present
- local_action:
module: cs_role
name: myrole_user
# Ensure a role having particular ID is named as myrole_user
- local_action:
module: cs_role
name: myrole_user
id: 04589590-ac63-4ffc-93f5-b698b8ac38b6
# Ensure a role is absent
- local_action:
module: cs_role
name: myrole_user
state: absent
'''
RETURN = '''
---
id:
description: UUID of the role.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the role.
returned: success
type: string
sample: myrole
description:
description: Description of the role.
returned: success
type: string
sample: "This is my role description"
role_type:
description: Type of the role.
returned: success
type: string
sample: User
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackRole(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRole, self).__init__(module)
self.returns = {
'type': 'role_type',
}
def get_role(self):
uuid = self.module.params.get('uuid')
if uuid:
args = {
'id': uuid,
}
roles = self.query_api('listRoles', **args)
if roles:
return roles['role'][0]
else:
args = {
'name': self.module.params.get('name'),
}
roles = self.query_api('listRoles', **args)
if roles:
return roles['role'][0]
return None
def present_role(self):
role = self.get_role()
if role:
role = self._update_role(role)
else:
role = self._create_role(role)
return role
def _create_role(self, role):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'type': self.module.params.get('role_type'),
'description': self.module.params.get('description'),
}
if not self.module.check_mode:
res = self.query_api('createRole', **args)
role = res['role']
return role
def _update_role(self, role):
args = {
'id': role['id'],
'name': self.module.params.get('name'),
'description': self.module.params.get('description'),
}
if self.has_changed(args, role):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateRole', **args)
# The API as in 4.9 does not return an updated role yet
if 'role' not in res:
role = self.get_role()
else:
role = res['role']
return role
def absent_role(self):
role = self.get_role()
if role:
self.result['changed'] = True
args = {
'id': role['id'],
}
if not self.module.check_mode:
self.query_api('deleteRole', **args)
return role
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
uuid=dict(aliases=['id']),
name=dict(required=True),
description=dict(),
role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_role = AnsibleCloudStackRole(module)
state = module.params.get('state')
if state == 'absent':
role = acs_role.absent_role()
else:
role = acs_role.present_role()
result = acs_role.get_result(role)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
[
"ansible.module_utils.cloudstack.cs_required_together",
"ansible.module_utils.cloudstack.cs_argument_spec"
] |
[((4987, 5005), 'ansible.module_utils.cloudstack.cs_argument_spec', 'cs_argument_spec', ([], {}), '()\n', (5003, 5005), False, 'from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together\n'), ((5402, 5424), 'ansible.module_utils.cloudstack.cs_required_together', 'cs_required_together', ([], {}), '()\n', (5422, 5424), False, 'from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together\n')]
|
# Copyright 2015 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import logging
import sys
import docutils.nodes
import docutils.parsers.rst
import docutils.utils
from typist import _py
logger = logging.getLogger('typist')
# TODO: maybe Sphinx can do this better?
class ParamType(object):
def __init__(self):
self.types = []
self.callable = False
def validate(self, argument):
if not self.types:
return True
if isinstance(argument, tuple(self.types)):
return True
if self.callable and callable(argument):
return True
return False
@classmethod
def from_list(cls, obj, type_list):
ptype = cls()
for part in type_list:
if part == 'or':
continue
part = part.rstrip(',')
if part == 'callable':
ptype.callable = True
_type = _resolve_type(part)
if _type:
ptype.types.append(_type)
else:
# TODO: maybe a log message here?
logger.error('could not resolve type declaration for %r' % obj)
pass
return ptype
class _FieldVisitor(docutils.nodes.SparseNodeVisitor):
def __init__(self, document):
self.parts = []
docutils.nodes.NodeVisitor.__init__(self, document)
def visit_Text(self, node):
self.parts.extend(node.strip().split(' '))
class _DocStringVisitor(docutils.nodes.SparseNodeVisitor):
# Others are possible, but in Keystone we only use 'param'. For more
# info see: http://sphinx-doc.org/domains.html#info-field-lists
PARAM_FIELD_TAGS = set(['param'])
TYPE_FIELD_TAGS = set(['type'])
RTYPE_FIELD_TAGS = set(['rtype'])
UNKNOWN = object()
def __init__(self, document, obj):
self.obj = obj
self.params = {}
self.returns = []
docutils.nodes.NodeVisitor.__init__(self, document)
def visit_field(self, node):
name_node, value_node = node.children
tag = name_node.children[0].astext().split()[0]
if tag in self.PARAM_FIELD_TAGS:
self._process_params(name_node)
elif tag in self.TYPE_FIELD_TAGS:
try:
self._process_param_type(name_node, value_node)
except Exception as e:
logger.warning('failed parsing :type: for %r: %r', self.obj, e)
raise
elif tag in self.RTYPE_FIELD_TAGS:
self._process_return_type(value_node)
raise docutils.nodes.SkipChildren
def _multi_values(self, parts):
for part in parts:
if part == 'or':
continue
part = part.rstrip(',')
_type = _resolve_type(part)
if _type:
yield _type
else:
# TODO: maybe a log message here?
pass
def _process_params(self, name_node):
visitor = _FieldVisitor(self.document)
name_node.walk(visitor)
try:
tag, _type, name = visitor.parts
self.params[name] = ParamType.from_list(self.obj, visitor.parts)
except ValueError:
# TODO: maybe a warning here? or UNKNOWN?
pass # hopefully this is defined with a 'type' later
# raise
def _process_param_type(self, name_node, value_node):
visitor = _FieldVisitor(self.document)
name_node.walk(visitor)
tag, name = visitor.parts
visitor = _FieldVisitor(self.document)
value_node.walk(visitor)
self.params[name] = ParamType.from_list(self.obj, visitor.parts)
def _process_return_type(self, value_node):
visitor = _FieldVisitor(self.document)
value_node.walk(visitor)
self.returns = tuple(self._multi_values(visitor.parts))
def parse(obj):
docstring = inspect.getdoc(obj)
if docstring is None:
return {}, ()
parser = docutils.parsers.rst.Parser()
settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)).get_default_values()
document = docutils.utils.new_document('', settings)
visitor = _DocStringVisitor(document, obj)
parser.parse(docstring, document)
document.walk(visitor)
return visitor.params, tuple(visitor.returns)
def _resolve_type(_type): # noqa
if isinstance(_type, type):
return _type
if _type == 'None':
return type(None)
if _type == 'string':
_type = 'str'
if _py.PY2 and _type == 'str':
return unicode
if _type in __builtins__:
return __builtins__[_type]
try:
type_ = eval(_type)
if isinstance(type_, type):
return type_
except:
pass
try:
module = __import__(_type)
except ImportError:
if '.' not in _type:
# TODO: maybe an error here?
return
else:
return sys.modules[_type]
module_name, object_name = _type.rsplit('.', 1)
module = _resolve_type(module_name)
try:
attr = getattr(module, object_name)
except AttributeError:
# TODO: the docstring is likely incorrect
return None
if isinstance(attr, type):
return attr
return None
|
[
"inspect.getdoc",
"logging.getLogger"
] |
[((731, 758), 'logging.getLogger', 'logging.getLogger', (['"""typist"""'], {}), "('typist')\n", (748, 758), False, 'import logging\n'), ((4438, 4457), 'inspect.getdoc', 'inspect.getdoc', (['obj'], {}), '(obj)\n', (4452, 4457), False, 'import inspect\n')]
|
from functools import reduce
from itertools import product
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
class _Domain:
__slots__ = ['_dimensions_discrete',
'_avg_distance',
'_dimensions_continuous',
'_dimension_length',
'_dimensions',
'_domain',
'_total_size']
def __init__(self,
discrete_domain: Optional[List[Iterable]] = None,
continuous_bounds: Optional[List[Tuple]] = None,
default_resolution: int = 30):
domain_discrete: Dict[str, Any] = {
f'd{i}': list(d) for i, d in enumerate(discrete_domain or [])}
self._dimensions_discrete = list(domain_discrete.keys())
continuous_bounds = [c if len(c) == 2 else (*c, default_resolution)
for c in continuous_bounds]
self._dimension_length = {
f'c{i}': (c[1] - c[0])
for i, c in enumerate(continuous_bounds)}
self._avg_distance = {f'c{i}': (c[1] - c[0]) / c[2]
for i, c in enumerate(continuous_bounds)}
domain_continuous = {f'c{i}': np.linspace(*c)
for i, c in enumerate(continuous_bounds or [])}
self._dimensions_continuous = list(domain_continuous.keys())
self._dimensions = (self._dimensions_discrete +
self._dimensions_continuous)
self._domain = domain_discrete
self._domain.update(domain_continuous)
self._total_size = reduce(lambda x, y: x * y,
[len(v) for v in self._domain.values()])
assert self._total_size > 0, 'Please define domain components!'
assert default_resolution > 0, 'default_resolution must be > 0!'
def __iter__(self) -> Iterable[str]:
return self._domain.keys().__iter__()
def __getitem__(self, item: str) -> Union[List[Any], np.ndarray]:
return self._domain[item]
@property
def dimensions(self):
return self._dimensions
@property
def dimensions_discrete(self):
return self._dimensions_discrete
@property
def dimensions_continuous(self):
return self._dimensions_continuous
@property
def dimension_length(self):
return self._dimension_length
@property
def avg_distance(self):
return self._avg_distance
@property
def total_size(self):
return self._total_size
def keys(self) -> List[str]:
return list(self._domain.keys())
def values(self) -> List[Any]:
return list(self._domain.values())
def items(self) -> List[Tuple[str, Any]]:
return list(zip(self.keys(), self.values()))
class _Individual:
__slots__ = ['_values',
'_scores',
'_payload']
def __init__(self,
values: Dict[str, Any]):
self._values = values
self._scores: Optional[np.ndarray] = None
self._payload: Dict[str, Any] = {}
def __eq__(self,
other: '_Individual') -> bool:
return self._values == other._values
@property
def values(self) -> Dict[str, Any]:
return self._values
@property
def scores(self) -> Union[np.ndarray, None]:
return self._scores
@scores.setter
def scores(self, scores: np.ndarray):
self._scores = scores
@property
def payload(self) -> Dict[str, Any]:
return self._payload
def __getitem__(self, item: str) -> Any:
return self._payload[item]
def __setitem__(self, key: str, value: Any):
self._payload[key] = value
class _Population:
__slots__ = ['_individuals']
def __init__(self,
individuals: Optional[List[_Individual]] = None):
unique_individuals = []
if individuals is not None:
for individual in individuals:
for ind in unique_individuals:
if ind == individual:
continue
unique_individuals.append(individual)
self._individuals: List[_Individual] = unique_individuals
def __len__(self) -> int:
return len(self._individuals)
def __iter__(self) -> Iterable[_Individual]:
return self._individuals.__iter__()
def __add__(self,
other: '_Population'):
return _Population(self.individuals + other.individuals)
def __getitem__(self,
n: Union[int, slice]
) -> Union[_Individual, '_Population']:
if isinstance(n, int):
return self._individuals[n]
return _Population(self._individuals[n])
@property
def individuals(self) -> List[_Individual]:
return self._individuals
class _DistanceCalculator:
__slots__ = ['_domain']
def __init__(self,
domain: _Domain):
self._domain = domain
def distance_values(self,
individual1: _Individual,
individual2: _Individual) -> float:
s1 = individual1.values
s2 = individual2.values
# Hamming-distance:
distance_discrete = float(np.sum([
s1[k] != s2[k] for k in self._domain.dimensions_discrete]))
# L1-distance:
distance_continuous = float(np.sum([
abs(s1[k] - s2[k]) for k in self._domain.dimensions_continuous]))
return distance_discrete + distance_continuous
class _Scorer:
__slots__ = ['_domain',
'_objectives']
def __init__(self,
domain: _Domain,
objectives: List[Callable]):
self._domain = domain
self._objectives = objectives
def score_individual(self,
individual: _Individual):
if individual.scores is None:
individual.scores = np.array([
obj(individual, self._domain) for obj in self._objectives
])
def score_population(self,
population: _Population):
for ind in population.individuals:
self.score_individual(ind)
class _SimpleSorter:
__slots__ = []
@staticmethod
def sort_population(population: _Population,
score_index: int = 0
) -> _Population:
individuals_sorted = sorted(population,
key=lambda i: i.scores[score_index])
return _Population(individuals_sorted)
class _GridSampler:
__slots__ = ['_domain']
def __init__(self,
domain: _Domain):
self._domain = domain
def get_grid_positions(self) -> List[Dict[str, Any]]:
keys = self._domain.keys()
combinations = product(*self._domain.values())
return [dict(zip(keys, values)) for values in combinations]
def get_grid_population(self) -> _Population:
individuals = [_Individual(values)
for values in self.get_grid_positions()]
return _Population(individuals=individuals)
class _RandomSampler:
__slots__ = ['_domain',
'_rng']
def __init__(self,
domain: _Domain,
seed: int = 111):
self._rng = np.random.default_rng(seed=seed)
self._domain = domain
def get_random_position(self) -> Dict[str, Any]:
return {k: self._rng.choice(self._domain[k]) for k in self._domain}
def get_random_individual(self) -> _Individual:
values = self.get_random_position()
return _Individual(values)
def get_random_population(self,
n_individuals: int) -> _Population:
"""
Get a random population with (unique) samples. If less than the
desired number of individuals are available in the grid, the full
grid is returned.
"""
if n_individuals >= self._domain.total_size:
# If n_individuals exhausts the space, just return the full grid:
return _GridSampler(self._domain).get_grid_population()
individuals = []
while len(individuals) < n_individuals:
new_individual = self.get_random_individual()
if not any([new_individual == ind for ind in individuals]):
individuals.append(new_individual)
return _Population(individuals=individuals)
class _Mutator:
__slots__ = ['_domain',
'_mutation_probability',
'_crossover_probability',
'_selection_probability',
'_seed',
'_rng']
def __init__(self,
domain: _Domain,
mutation_probability: float = 0.2,
crossover_probability: float = 0.2,
selection_probability: float = 0.3,
seed: int = 112):
self._domain = domain
self._mutation_probability = mutation_probability
self._crossover_probability = crossover_probability
self._selection_probability = selection_probability
self._seed = seed
self._rng = np.random.default_rng(seed=seed)
def crossover_individuals(self,
individual1: _Individual,
individual2: _Individual
) -> Tuple[_Individual, _Individual]:
values1 = individual1.values.copy()
values2 = individual2.values.copy()
for d in self._domain.dimensions_discrete:
if self._rng.random() < self._crossover_probability:
values1[d], values2[d] = values2[d], values1[d]
for d in self._domain.dimensions_continuous:
if self._rng.random() < self._crossover_probability:
v1 = values1[d]
v2 = values2[d]
dist = v2 - v1
rand = self._rng.random()
v1 += dist * rand
v2 += dist * rand
# Match new values to grid:
arg_min1 = np.argmin(np.abs(self._domain[d] - v1))
values1[d] = self._domain[d][arg_min1]
arg_min2 = np.argmin(np.abs(self._domain[d] - v2))
values2[d] = self._domain[d][arg_min2]
return _Individual(values1), _Individual(values2)
def mutate_individual(self,
individual: _Individual) -> _Individual:
values = individual.values.copy()
for d in self._domain.dimensions_discrete:
if self._rng.random() < self._mutation_probability:
# Update discrete variables uniform by random:
values[d] = np.random.choice(self._domain[d])
for d in self._domain.dimensions_continuous:
if self._rng.random() < self._mutation_probability:
# Normally distributed mutation with std equal to
# 2x the typical distance between points:
new_value = (values[d] + self._rng.normal() *
2 * self._domain.avg_distance[d])
# Match new value to grid:
arg_min = np.argmin(np.abs(self._domain[d] - new_value))
values[d] = self._domain[d][arg_min]
return _Individual(values=values)
def mutate_population(self, population: _Population) -> _Population:
p = 1.0 - self._selection_probability
weights = np.array([p ** i for i in range(len(population))])
weights = np.cumsum(weights)
weights /= weights[-1]
counter = 0
new_individuals = population.individuals.copy()
while len(new_individuals) < 2 * len(population):
idx1 = int(np.where(self._rng.random() < weights)[0][0])
idx2 = int(np.where(self._rng.random() < weights)[0][0])
individual1 = population[idx1]
individual2 = population[idx2]
if idx1 != idx2:
individual1, individual2 = self.crossover_individuals(
individual1=individual1,
individual2=individual2)
individual1 = self.mutate_individual(individual1)
individual2 = self.mutate_individual(individual2)
if not np.any([individual1 == ind for ind in new_individuals]):
new_individuals.append(individual1)
if not np.any([individual2 == ind for ind in new_individuals]):
new_individuals.append(individual2)
counter += 1
if counter > 10 * len(population):
break
return _Population(new_individuals)
|
[
"numpy.sum",
"numpy.abs",
"numpy.random.default_rng",
"numpy.cumsum",
"numpy.any",
"numpy.linspace",
"numpy.random.choice"
] |
[((7364, 7396), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'seed'}), '(seed=seed)\n', (7385, 7396), True, 'import numpy as np\n'), ((9225, 9257), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'seed'}), '(seed=seed)\n', (9246, 9257), True, 'import numpy as np\n'), ((11582, 11600), 'numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (11591, 11600), True, 'import numpy as np\n'), ((1240, 1255), 'numpy.linspace', 'np.linspace', (['*c'], {}), '(*c)\n', (1251, 1255), True, 'import numpy as np\n'), ((5292, 5360), 'numpy.sum', 'np.sum', (['[(s1[k] != s2[k]) for k in self._domain.dimensions_discrete]'], {}), '([(s1[k] != s2[k]) for k in self._domain.dimensions_discrete])\n', (5298, 5360), True, 'import numpy as np\n'), ((10763, 10796), 'numpy.random.choice', 'np.random.choice', (['self._domain[d]'], {}), '(self._domain[d])\n', (10779, 10796), True, 'import numpy as np\n'), ((12327, 12384), 'numpy.any', 'np.any', (['[(individual1 == ind) for ind in new_individuals]'], {}), '([(individual1 == ind) for ind in new_individuals])\n', (12333, 12384), True, 'import numpy as np\n'), ((12455, 12512), 'numpy.any', 'np.any', (['[(individual2 == ind) for ind in new_individuals]'], {}), '([(individual2 == ind) for ind in new_individuals])\n', (12461, 12512), True, 'import numpy as np\n'), ((10148, 10176), 'numpy.abs', 'np.abs', (['(self._domain[d] - v1)'], {}), '(self._domain[d] - v1)\n', (10154, 10176), True, 'import numpy as np\n'), ((10270, 10298), 'numpy.abs', 'np.abs', (['(self._domain[d] - v2)'], {}), '(self._domain[d] - v2)\n', (10276, 10298), True, 'import numpy as np\n'), ((11242, 11277), 'numpy.abs', 'np.abs', (['(self._domain[d] - new_value)'], {}), '(self._domain[d] - new_value)\n', (11248, 11277), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import numpy as np
import chainer
from chainer import cuda, serializers, Variable # , optimizers, training
import cv2
import os.path
#import chainer.functions as F
#import chainer.links as L
#import six
#import os
#from chainer.training import extensions
#from train import Image2ImageDataset
from img2imgDataset import ImageAndRefDataset
import unet
import lnet
class Painter:
def __init__(self, gpu=0):
print("start")
self.root = "./images/"
self.batchsize = 1
self.outdir = self.root + "out/"
self.outdir_min = self.root + "out_min/"
self.gpu = gpu
self._dtype = np.float32
if not os.path.isfile("./models/unet_128_standard"):
print("./models/unet_128_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/")
if not os.path.isfile("./models/unet_512_standard"):
print("./models/unet_512_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/")
print("load model")
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB
chainer.Function.type_check_enable = False
self.cnn_128 = unet.UNET()
self.cnn_512 = unet.UNET()
if self.gpu >= 0:
self.cnn_128.to_gpu()
self.cnn_512.to_gpu()
#lnn = lnet.LNET()
#serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128)
serializers.load_npz(
"./models/unet_128_standard", self.cnn_128)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn)
serializers.load_npz(
"./models/unet_512_standard", self.cnn_512)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_p2_1", self.cnn)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_10000", self.cnn)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/liner_f", lnn)
def save_as_img(self, array, name):
array = array.transpose(1, 2, 0)
array = array.clip(0, 255).astype(np.uint8)
array = cuda.to_cpu(array)
(major, minor, _) = cv2.__version__.split(".")
if major == '3':
img = cv2.cvtColor(array, cv2.COLOR_YUV2RGB)
else:
img = cv2.cvtColor(array, cv2.COLOR_YUV2BGR)
cv2.imwrite(name, img)
def liner(self, id_str):
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
image1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
image1 = np.asarray(image1, self._dtype)
if image1.ndim == 2:
image1 = image1[:, :, np.newaxis]
img = image1.transpose(2, 0, 1)
x = np.zeros((1, 3, img.shape[1], img.shape[2]), dtype='f')
if self.gpu >= 0:
x = cuda.to_gpu(x)
lnn = lnet.LNET()
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
y = lnn.calc(Variable(x))
self.save_as_img(y.data[0], self.root + "line/" + id_str + ".jpg")
def colorize(self, id_str, step='C', blur=0, s_size=128,colorize_format="jpg"):
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
_ = {'S': "ref/", 'L': "out_min/", 'C': "ref/"}
dataset = ImageAndRefDataset(
[id_str + ".png"], self.root + "line/", self.root + _[step])
_ = {'S': True, 'L': False, 'C': True}
sample = dataset.get_example(0, minimize=_[step], blur=blur, s_size=s_size)
_ = {'S': 0, 'L': 1, 'C': 0}[step]
sample_container = np.zeros(
(1, 4, sample[_].shape[1], sample[_].shape[2]), dtype='f')
sample_container[0, :] = sample[_]
if self.gpu >= 0:
sample_container = cuda.to_gpu(sample_container)
cnn = {'S': self.cnn_128, 'L': self.cnn_512, 'C': self.cnn_128}
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
image_conv2d_layer = cnn[step].calc(Variable(sample_container))
del sample_container
if step == 'C':
input_bat = np.zeros((1, 4, sample[1].shape[1], sample[1].shape[2]), dtype='f')
print(input_bat.shape)
input_bat[0, 0, :] = sample[1]
output = cuda.to_cpu(image_conv2d_layer.data[0])
del image_conv2d_layer # release memory
for channel in range(3):
input_bat[0, 1 + channel, :] = cv2.resize(
output[channel, :],
(sample[1].shape[2], sample[1].shape[1]),
interpolation=cv2.INTER_CUBIC)
if self.gpu >= 0:
link = cuda.to_gpu(input_bat, None)
else:
link = input_bat
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
image_conv2d_layer = self.cnn_512.calc(Variable(link))
del link # release memory
image_out_path = {
'S': self.outdir_min + id_str + ".png",
'L': self.outdir + id_str + ".jpg",
'C': self.outdir + id_str + "_0." + colorize_format}
self.save_as_img(image_conv2d_layer.data[0], image_out_path[step])
del image_conv2d_layer
if __name__ == '__main__':
for n in range(1):
p = Painter()
print(n)
p.colorize(n * p.batchsize)
|
[
"lnet.LNET",
"chainer.cuda.set_max_workspace_size",
"cv2.resize",
"chainer.Variable",
"chainer.cuda.get_device",
"chainer.serializers.load_npz",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.asarray",
"numpy.zeros",
"chainer.cuda.to_cpu",
"cv2.imread",
"unet.UNET",
"cv2.__version__.split",
"chainer.cuda.to_gpu",
"chainer.no_backprop_mode",
"chainer.using_config",
"img2imgDataset.ImageAndRefDataset"
] |
[((1298, 1309), 'unet.UNET', 'unet.UNET', ([], {}), '()\n', (1307, 1309), False, 'import unet\n'), ((1333, 1344), 'unet.UNET', 'unet.UNET', ([], {}), '()\n', (1342, 1344), False, 'import unet\n'), ((1649, 1713), 'chainer.serializers.load_npz', 'serializers.load_npz', (['"""./models/unet_128_standard"""', 'self.cnn_128'], {}), "('./models/unet_128_standard', self.cnn_128)\n", (1669, 1713), False, 'from chainer import cuda, serializers, Variable\n'), ((1918, 1982), 'chainer.serializers.load_npz', 'serializers.load_npz', (['"""./models/unet_512_standard"""', 'self.cnn_512'], {}), "('./models/unet_512_standard', self.cnn_512)\n", (1938, 1982), False, 'from chainer import cuda, serializers, Variable\n'), ((2394, 2412), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['array'], {}), '(array)\n', (2405, 2412), False, 'from chainer import cuda, serializers, Variable\n'), ((2441, 2467), 'cv2.__version__.split', 'cv2.__version__.split', (['"""."""'], {}), "('.')\n", (2462, 2467), False, 'import cv2\n'), ((2629, 2651), 'cv2.imwrite', 'cv2.imwrite', (['name', 'img'], {}), '(name, img)\n', (2640, 2651), False, 'import cv2\n'), ((2770, 2809), 'cv2.imread', 'cv2.imread', (['path1', 'cv2.IMREAD_GRAYSCALE'], {}), '(path1, cv2.IMREAD_GRAYSCALE)\n', (2780, 2809), False, 'import cv2\n'), ((2827, 2858), 'numpy.asarray', 'np.asarray', (['image1', 'self._dtype'], {}), '(image1, self._dtype)\n', (2837, 2858), True, 'import numpy as np\n'), ((2986, 3041), 'numpy.zeros', 'np.zeros', (['(1, 3, img.shape[1], img.shape[2])'], {'dtype': '"""f"""'}), "((1, 3, img.shape[1], img.shape[2]), dtype='f')\n", (2994, 3041), True, 'import numpy as np\n'), ((3114, 3125), 'lnet.LNET', 'lnet.LNET', ([], {}), '()\n', (3123, 3125), False, 'import lnet\n'), ((3570, 3649), 'img2imgDataset.ImageAndRefDataset', 'ImageAndRefDataset', (["[id_str + '.png']", "(self.root + 'line/')", '(self.root + _[step])'], {}), "([id_str + '.png'], self.root + 'line/', self.root + _[step])\n", (3588, 3649), False, 'from img2imgDataset import ImageAndRefDataset\n'), ((3866, 3933), 'numpy.zeros', 'np.zeros', (['(1, 4, sample[_].shape[1], sample[_].shape[2])'], {'dtype': '"""f"""'}), "((1, 4, sample[_].shape[1], sample[_].shape[2]), dtype='f')\n", (3874, 3933), True, 'import numpy as np\n'), ((1166, 1211), 'chainer.cuda.set_max_workspace_size', 'cuda.set_max_workspace_size', (['(64 * 1024 * 1024)'], {}), '(64 * 1024 * 1024)\n', (1193, 1211), False, 'from chainer import cuda, serializers, Variable\n'), ((2511, 2549), 'cv2.cvtColor', 'cv2.cvtColor', (['array', 'cv2.COLOR_YUV2RGB'], {}), '(array, cv2.COLOR_YUV2RGB)\n', (2523, 2549), False, 'import cv2\n'), ((2582, 2620), 'cv2.cvtColor', 'cv2.cvtColor', (['array', 'cv2.COLOR_YUV2BGR'], {}), '(array, cv2.COLOR_YUV2BGR)\n', (2594, 2620), False, 'import cv2\n'), ((3084, 3098), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['x'], {}), '(x)\n', (3095, 3098), False, 'from chainer import cuda, serializers, Variable\n'), ((3139, 3165), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (3163, 3165), False, 'import chainer\n'), ((4048, 4077), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['sample_container'], {}), '(sample_container)\n', (4059, 4077), False, 'from chainer import cuda, serializers, Variable\n'), ((4164, 4190), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (4188, 4190), False, 'import chainer\n'), ((4405, 4472), 'numpy.zeros', 'np.zeros', (['(1, 4, sample[1].shape[1], sample[1].shape[2])'], {'dtype': '"""f"""'}), "((1, 4, sample[1].shape[1], sample[1].shape[2]), dtype='f')\n", (4413, 4472), True, 'import numpy as np\n'), ((4573, 4612), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['image_conv2d_layer.data[0]'], {}), '(image_conv2d_layer.data[0])\n', (4584, 4612), False, 'from chainer import cuda, serializers, Variable\n'), ((3184, 3220), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (3204, 3220), False, 'import chainer\n'), ((4209, 4245), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (4229, 4245), False, 'import chainer\n'), ((4751, 4858), 'cv2.resize', 'cv2.resize', (['output[channel, :]', '(sample[1].shape[2], sample[1].shape[1])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(output[channel, :], (sample[1].shape[2], sample[1].shape[1]),\n interpolation=cv2.INTER_CUBIC)\n', (4761, 4858), False, 'import cv2\n'), ((4972, 5000), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['input_bat', 'None'], {}), '(input_bat, None)\n', (4983, 5000), False, 'from chainer import cuda, serializers, Variable\n'), ((5069, 5095), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (5093, 5095), False, 'import chainer\n'), ((1122, 1147), 'chainer.cuda.get_device', 'cuda.get_device', (['self.gpu'], {}), '(self.gpu)\n', (1137, 1147), False, 'from chainer import cuda, serializers, Variable\n'), ((2720, 2745), 'chainer.cuda.get_device', 'cuda.get_device', (['self.gpu'], {}), '(self.gpu)\n', (2735, 2745), False, 'from chainer import cuda, serializers, Variable\n'), ((3251, 3262), 'chainer.Variable', 'Variable', (['x'], {}), '(x)\n', (3259, 3262), False, 'from chainer import cuda, serializers, Variable\n'), ((3463, 3488), 'chainer.cuda.get_device', 'cuda.get_device', (['self.gpu'], {}), '(self.gpu)\n', (3478, 3488), False, 'from chainer import cuda, serializers, Variable\n'), ((4299, 4325), 'chainer.Variable', 'Variable', (['sample_container'], {}), '(sample_container)\n', (4307, 4325), False, 'from chainer import cuda, serializers, Variable\n'), ((5118, 5154), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (5138, 5154), False, 'import chainer\n'), ((5215, 5229), 'chainer.Variable', 'Variable', (['link'], {}), '(link)\n', (5223, 5229), False, 'from chainer import cuda, serializers, Variable\n')]
|
import datetime
import time
import logging
from common.utils import getSiteDBCollection
from common.utils import getSiteDB
from common.utils import getLatestUserOrderDatetime
from api.mongo_client import MongoClient
def insertUserOrderFromRawLog(connection, site_id, raw_log):
c_user_orders = getSiteDBCollection(connection, site_id, "user_orders")
amount = 0
for order_item in raw_log["order_content"]:
amount += float(order_item["price"]) * int(order_item["amount"])
c_user_orders.insert({"user_id": raw_log["filled_user_id"], "order_datetime": raw_log["created_on"],
"raw_log_id": raw_log["_id"], "amount": amount})
def doUpdateUserOrdersCollection(connection, site_id):
c_raw_logs = getSiteDBCollection(connection, site_id, "raw_logs")
c_user_orders = getSiteDBCollection(connection, site_id, "user_orders")
latest_order_datetime = getLatestUserOrderDatetime(connection, site_id)
query_condition = {"behavior": "PLO"}
if latest_order_datetime is not None:
query_condition["created_on"] = {"$gt": latest_order_datetime}
# scan for and add new user_orders
# NOTE: sort "created_on" to ensure scanning from oldest to newest (otherwise we will miss some logs next time if this process fails on the half way)
for raw_log in c_raw_logs.find(query_condition).sort("created_on", 1):
if raw_log.has_key("filled_user_id") and not raw_log["filled_user_id"].startswith("ANO_"):
insertUserOrderFromRawLog(connection, site_id, raw_log)
# process those raw_logs which was previously filled with an "ANO_" user id and now got identified as a registered user.
c_tmp_user_identified_logs_plo = getSiteDBCollection(connection, site_id, "tmp_user_identified_logs_plo")
for tmp_user_identified_log_plo in c_tmp_user_identified_logs_plo.find():
raw_log = c_raw_logs.find_one({"_id": tmp_user_identified_log_plo["log_id"]})
insertUserOrderFromRawLog(connection, site_id, raw_log)
c_tmp_user_identified_logs_plo.remove({"_id": tmp_user_identified_log_plo["_id"]})
# NOTE: This function is only for small set of data
EMAILING_USER_ORDERS_MAX_DAY = 14
EXPECTED_RECOMMENDATION_ITEMS = 5
def generateEdmEmailingList(connection, site_id):
logger = logging.getLogger("EDMCalculations")
c_user_orders = getSiteDBCollection(connection, site_id, "user_orders")
latest_order_datetime = getLatestUserOrderDatetime(connection, site_id)
if latest_order_datetime is None:
query = {}
else:
query = {"order_datetime": {"$gte": latest_order_datetime \
- datetime.timedelta(days=EMAILING_USER_ORDERS_MAX_DAY)}}
db = getSiteDB(connection, site_id)
result = db.command({"distinct": "user_orders", "key": "user_id",
"query": query})
user_ids = result["values"]
mongo_client = MongoClient(connection)
c_edm_emailing_list = getSiteDBCollection(connection, site_id, "edm_emailing_list")
c_edm_emailing_list.drop()
c_edm_emailing_list = getSiteDBCollection(connection, site_id, "edm_emailing_list")
count = 0
t0 = time.time()
for user_id in user_ids:
count += 1
if count % 100 == 0:
logger.info("Count: %s, %s users/sec" % (count, count/(time.time() - t0)))
recommendation_result, _ = mongo_client.recommend_for_edm(site_id, user_id,
max_amount=EXPECTED_RECOMMENDATION_ITEMS)
if len(recommendation_result) == EXPECTED_RECOMMENDATION_ITEMS:
c_edm_emailing_list.insert({"user_id": user_id, "recommendation_result": recommendation_result})
|
[
"common.utils.getLatestUserOrderDatetime",
"common.utils.getSiteDB",
"common.utils.getSiteDBCollection",
"time.time",
"api.mongo_client.MongoClient",
"datetime.timedelta",
"logging.getLogger"
] |
[((299, 354), 'common.utils.getSiteDBCollection', 'getSiteDBCollection', (['connection', 'site_id', '"""user_orders"""'], {}), "(connection, site_id, 'user_orders')\n", (318, 354), False, 'from common.utils import getSiteDBCollection\n'), ((745, 797), 'common.utils.getSiteDBCollection', 'getSiteDBCollection', (['connection', 'site_id', '"""raw_logs"""'], {}), "(connection, site_id, 'raw_logs')\n", (764, 797), False, 'from common.utils import getSiteDBCollection\n'), ((818, 873), 'common.utils.getSiteDBCollection', 'getSiteDBCollection', (['connection', 'site_id', '"""user_orders"""'], {}), "(connection, site_id, 'user_orders')\n", (837, 873), False, 'from common.utils import getSiteDBCollection\n'), ((902, 949), 'common.utils.getLatestUserOrderDatetime', 'getLatestUserOrderDatetime', (['connection', 'site_id'], {}), '(connection, site_id)\n', (928, 949), False, 'from common.utils import getLatestUserOrderDatetime\n'), ((1702, 1774), 'common.utils.getSiteDBCollection', 'getSiteDBCollection', (['connection', 'site_id', '"""tmp_user_identified_logs_plo"""'], {}), "(connection, site_id, 'tmp_user_identified_logs_plo')\n", (1721, 1774), False, 'from common.utils import getSiteDBCollection\n'), ((2279, 2315), 'logging.getLogger', 'logging.getLogger', (['"""EDMCalculations"""'], {}), "('EDMCalculations')\n", (2296, 2315), False, 'import logging\n'), ((2336, 2391), 'common.utils.getSiteDBCollection', 'getSiteDBCollection', (['connection', 'site_id', '"""user_orders"""'], {}), "(connection, site_id, 'user_orders')\n", (2355, 2391), False, 'from common.utils import getSiteDBCollection\n'), ((2420, 2467), 'common.utils.getLatestUserOrderDatetime', 'getLatestUserOrderDatetime', (['connection', 'site_id'], {}), '(connection, site_id)\n', (2446, 2467), False, 'from common.utils import getLatestUserOrderDatetime\n'), ((2702, 2732), 'common.utils.getSiteDB', 'getSiteDB', (['connection', 'site_id'], {}), '(connection, site_id)\n', (2711, 2732), False, 'from common.utils import getSiteDB\n'), ((2893, 2916), 'api.mongo_client.MongoClient', 'MongoClient', (['connection'], {}), '(connection)\n', (2904, 2916), False, 'from api.mongo_client import MongoClient\n'), ((2943, 3004), 'common.utils.getSiteDBCollection', 'getSiteDBCollection', (['connection', 'site_id', '"""edm_emailing_list"""'], {}), "(connection, site_id, 'edm_emailing_list')\n", (2962, 3004), False, 'from common.utils import getSiteDBCollection\n'), ((3062, 3123), 'common.utils.getSiteDBCollection', 'getSiteDBCollection', (['connection', 'site_id', '"""edm_emailing_list"""'], {}), "(connection, site_id, 'edm_emailing_list')\n", (3081, 3123), False, 'from common.utils import getSiteDBCollection\n'), ((3147, 3158), 'time.time', 'time.time', ([], {}), '()\n', (3156, 3158), False, 'import time\n'), ((2637, 2690), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'EMAILING_USER_ORDERS_MAX_DAY'}), '(days=EMAILING_USER_ORDERS_MAX_DAY)\n', (2655, 2690), False, 'import datetime\n'), ((3303, 3314), 'time.time', 'time.time', ([], {}), '()\n', (3312, 3314), False, 'import time\n')]
|
import asyncio
# todo 以后可以想想实现子集、并集、交集等,最小元素考虑是互斥时期
from loguru import logger
class AsyncExclusivePeriod:
obj_has_async_exclusive_periods = {}
@classmethod
def create_obj_periods(cls, obj, *period_names: str):
'''
Initially create periods for some object.
:param obj:
:param period_names: Period names.The first one would be the initial period.
:return:
'''
if obj not in cls.obj_has_async_exclusive_periods.keys():
cls.obj_has_async_exclusive_periods[obj] = cls.obj_has_async_exclusive_periods.get(obj, {})
for period_name in period_names:
cls.obj_has_async_exclusive_periods[obj][period_name] = AsyncExclusivePeriod(period_name)
cls.set_obj_period(obj, period_names[0])
else:
raise KeyError(f'{repr(obj)} has already got some periods! Please use add_period.')
@classmethod
def add_period(cls, obj, new_period_name: str):
'''
Dynamically add a period for some object.
:return:
'''
if obj not in cls.obj_has_async_exclusive_periods.keys():
cls.create_obj_periods(obj, new_period_name)
else:
cls.obj_has_async_exclusive_periods[obj][new_period_name] = AsyncExclusivePeriod(new_period_name)
@classmethod
def _get_obj_period(cls, obj, period_name: str):
if obj in cls.obj_has_async_exclusive_periods.keys() and \
period_name in cls.obj_has_async_exclusive_periods[obj].keys():
return cls.obj_has_async_exclusive_periods[obj][period_name]
else:
raise KeyError(f'You did not create {period_name} for {repr(obj)}!')
@classmethod
def get_obj_present_period(cls, obj):
for name, period in cls._get_obj_periods(obj).items():
if period._get_state():
return name
@classmethod
def get_obj_period_names(cls, obj):
if obj in cls.obj_has_async_exclusive_periods.keys():
return cls.obj_has_async_exclusive_periods[obj].keys()
else:
raise KeyError(f'You did not create any AsyncExclusivePeriod for {repr(obj)}!')
@classmethod
def _get_obj_periods(cls, obj):
if obj in cls.obj_has_async_exclusive_periods.keys():
return cls.obj_has_async_exclusive_periods[obj]
@classmethod
def set_obj_period(cls, obj, period_name: str):
for name, period in cls._get_obj_periods(obj).items():
# 目标
if name == period_name:
period._ensure_state(True)
else:
period._ensure_state(False)
logger.debug(f'set {repr(obj)} to period {period_name}.')
@classmethod
async def wait_inside_period(cls, obj, period_name: str):
period: cls = cls._get_obj_period(obj, period_name)
await period._wait_true()
@classmethod
async def wait_outside_period(cls, obj, period_name: str):
period: cls = cls._get_obj_period(obj, period_name)
await period._wait_false()
@classmethod
async def wait_enter_period(cls, obj, period_name: str):
period: cls = cls._get_obj_period(obj, period_name)
await period._wait_change_into_true()
@classmethod
async def wait_exit_period(cls, obj, period_name: str):
period: cls = cls._get_obj_period(obj, period_name)
await period._wait_change_into_false()
def __init__(self, name):
self._true_event = asyncio.Event()
self._false_event = asyncio.Event()
self._name = name
def _ensure_state(self, state: bool):
if state:
if not self._true_event.is_set():
self._true_event.set()
if self._false_event.is_set():
self._false_event.clear()
else:
if self._true_event.is_set():
self._true_event.clear()
if not self._false_event.is_set():
self._false_event.set()
def _get_state(self):
return self._true_event.is_set() and not self._false_event.is_set()
async def _wait_true(self):
await self._true_event.wait()
async def _wait_false(self):
await self._false_event.wait()
async def _wait_change_into_true(self):
if self._get_state():
await self._wait_false()
await self._wait_true()
else:
await self._wait_true()
async def _wait_change_into_false(self):
if self._get_state():
await self._wait_false()
else:
await self._wait_true()
await self._wait_false()
|
[
"asyncio.Event"
] |
[((3497, 3512), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (3510, 3512), False, 'import asyncio\n'), ((3541, 3556), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (3554, 3556), False, 'import asyncio\n')]
|