id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11524648
|
import mxnet as mx
from mxnet import gluon
from mxnet import nd
from mxnet.gluon.loss import Loss, _reshape_like
import horovod.mxnet as hvd
def _as_list(arr):
"""Make sure input is a list of mxnet NDArray"""
if not isinstance(arr, (list, tuple)):
return [arr]
return arr
class SSDMultiBoxLoss(Loss):
r"""Single-Shot Multibox Object Detection Loss.
.. note::
Since cross device synchronization is required to compute batch-wise statistics,
it is slightly sub-optimal compared with non-sync version. However, we find this
is better for converged model performance.
Parameters
----------
local_batch_size: int
The size of mini-batch.
batch_axis : int, default 0
The axis that represents mini-batch.
weight : float or None
Global scalar weight for loss.
negative_mining_ratio : float, default is 3
Ratio of negative vs. positive samples.
rho : float, default is 1.0
Threshold for trimmed mean estimator. This is the smooth parameter for the
L1-L2 transition.
lambd : float, default is 1.0
Relative weight between classification and box regression loss.
The overall loss is computed as :math:`L = loss_{class} + \lambda \times loss_{loc}`.
min_hard_negatives : int, default is 0
Minimum number of negatives samples.
"""
def __init__(self, net, local_batch_size, bulk_last_wgrad=False,
batch_axis=0, weight=None, negative_mining_ratio=3,
rho=1.0, lambd=1.0, min_hard_negatives=0, **kwargs):
super(SSDMultiBoxLoss, self).__init__(weight, batch_axis, **kwargs)
self.net = net
self.bulk_last_wgrad = bulk_last_wgrad
self._negative_mining_ratio = max(0, negative_mining_ratio)
self._rho = rho
self._lambd = lambd
self._min_hard_negatives = max(0, min_hard_negatives)
# precomute arange functions for scatter_nd
self.s0 = local_batch_size
self.s1 = 8732 # TODO(ahmadki): hard coded :(
r_init = mx.nd.arange(0, self.s1).tile(reps=(self.s0, 1)).reshape((1,-1)).squeeze(axis=0)
idx_r_init = mx.nd.arange(0, self.s0).repeat(repeats=self.s1) # row indices
with self.name_scope():
self.cls_target = self.params.get('cls_target',
shape=(local_batch_size, self.s1),
differentiable=False)
self.box_target = self.params.get('box_target',
shape=(local_batch_size, self.s1, 4),
differentiable=False)
self.r = self.params.get('r', shape=r_init.shape,
init=mx.initializer.Constant(r_init),
differentiable=False)
self.idx_r = self.params.get('idx_r', shape=idx_r_init.shape,
init=mx.initializer.Constant(idx_r_init),
differentiable=False)
def hybrid_forward(self, F, images, cls_target, box_target, r, idx_r):
if self.bulk_last_wgrad:
# make the last wgrad use the copy of the input
# so it joins the bulk
images = F.identity(images)
cls_pred, box_pred = self.net(images)
# loss needs to be done in FP32
cls_pred = cls_pred.astype(dtype='float32')
box_pred = box_pred.astype(dtype='float32')
pred = F.log_softmax(cls_pred, axis=-1)
pos = cls_target > 0
pos_num = pos.sum(axis=1)
cls_loss = -F.pick(pred, cls_target, axis=-1, keepdims=False)
idx = (cls_loss * (pos - 1)).argsort(axis=1)
# use scatter_nd to save one argsort
idx_c = idx.reshape((1,-1)).squeeze(axis=0) # column indices
idx = F.stack(idx_r, idx_c)
rank = F.scatter_nd(r, idx, (self.s0, self.s1))
hard_negative = F.broadcast_lesser(rank, F.maximum(self._min_hard_negatives, pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1))
# mask out if not positive or negative
cls_loss = F.where((pos + hard_negative) > 0, cls_loss, F.zeros_like(cls_loss))
cls_loss = F.sum(cls_loss, axis=0, exclude=True)
box_pred = _reshape_like(F, box_pred, box_target)
box_loss = F.abs(box_pred - box_target)
box_loss = F.smooth_l1(data=box_loss, scalar=1.0)
# box loss only apply to positive samples
box_loss = F.broadcast_mul(box_loss, pos.expand_dims(axis=-1))
box_loss = F.sum(box_loss, axis=0, exclude=True)
# normalize loss with num_pos_per_image
# see https://github.com/mlperf/training/blob/master/single_stage_detector/ssd/base_model.py#L201-L204
num_mask = (pos_num > 0).astype('float32')
pos_num = pos_num.astype('float32').clip(a_min=1e-6, a_max=8732)
sum_loss = (num_mask * (cls_loss + self._lambd * box_loss) / pos_num).mean(axis=0)
return sum_loss
|
11524649
|
import numpy as np
a = np.arange(3)
print(a)
# [0 1 2]
b = np.arange(3)
print(b)
# [0 1 2]
c = np.arange(1, 4)
print(c)
# [1 2 3]
print(np.all(a == b))
# True
print(np.all(a == c))
# False
print(np.array_equal(a, b))
# True
print(np.array_equal(a, c))
# False
print(np.array_equiv(a, b))
# True
print(np.array_equiv(a, c))
# False
b_f = np.arange(3, dtype=float)
print(b_f)
# [0. 1. 2.]
print(np.array_equal(a, b_f))
# True
print(np.array_equiv(a, b_f))
# True
ones = np.ones(3)
print(ones)
# [1. 1. 1.]
print(np.array_equal(ones, 1))
# False
print(np.array_equiv(ones, 1))
# True
a_2d = np.array([[0, 1, 2], [0, 1, 2], [0, 1, 2]])
print(a_2d)
# [[0 1 2]
# [0 1 2]
# [0 1 2]]
print(np.array_equal(a_2d, b))
# False
print(np.array_equiv(a_2d, b))
# True
a_nan = np.array([np.nan, 1, 2])
print(a_nan)
# [nan 1. 2.]
b_nan = np.array([np.nan, 1, 2])
print(b_nan)
# [nan 1. 2.]
print(np.array_equal(a_nan, b_nan))
# False
print(np.array_equiv(a_nan, b_nan))
# False
print(np.all(a_nan == b_nan))
# False
|
11524658
|
import pylearn2
import pylearn2.datasets as ds
import pickle
import numpy as np
train_sets = []
valid_sets = []
test_sets = []
for i in range(5):
with open("../../data/pylearn2/train_car_{:02d}.pkl".format(i),'r') as f:
train_sets.append(pickle.load(f))
with open("../../data/pylearn2/valid_car_{:02d}.pkl".format(i),'r') as f:
valid_sets.append(pickle.load(f))
with open("../../data/pylearn2/test_car_{:02d}.pkl".format(i),'r') as f:
test_sets.append(pickle.load(f))
train_X = np.concatenate([train_set.X for train_set in train_sets], axis=0)
valid_X = np.concatenate([valid_set.X for valid_set in valid_sets], axis=0)
test_X = np.concatenate([test_set.X for test_set in test_sets], axis=0)
train_y = np.concatenate([train_set.y for train_set in train_sets], axis=0)
valid_y = np.concatenate([valid_set.y for valid_set in valid_sets], axis=0)
test_y = np.concatenate([test_set.y for test_set in test_sets], axis=0)
print train_X.shape
print valid_X.shape
print test_X.shape
train_set = ds.DenseDesignMatrix(X=train_X,y=train_y)
valid_set = ds.DenseDesignMatrix(X=valid_X,y=valid_y)
test_set = ds.DenseDesignMatrix(X=test_X,y=test_y)
with open("../../data/pylearn2/train_car_all.pkl",'w') as f:
pickle.dump(train_set,f)
with open("../../data/pylearn2/valid_car_all.pkl",'w') as f:
pickle.dump(valid_set,f)
with open("../../data/pylearn2/test_car_all.pkl",'w') as f:
pickle.dump(test_set,f)
|
11524706
|
import sys
import os
import argparse
import winreg
import pathlib
from IPython import embed
from traitlets.config import get_config
import winsandbox
def read_args():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interactive", default=False, action="store_true",
help="Interactive mode with a `sandbox` instance available. Embeds an IPython shell.")
parser.add_argument("--no-vgpu", default=False, action="store_true", help="Disable the virtual GPU.")
parser.add_argument("-s", "--logon-script", required=False,
help="Logon script for non-interactive (ipy) sandbox.", default="")
parser.add_argument("-f", "--folder", action="append",
help="Folders to map to the sandbox.", default=[])
parser.add_argument("-r", "--register", default=False, action="store_true",
help="Register a Shell extension that allows opening executable files in the sandbox")
parser.add_argument("-u", "--unregister", default=False, action="store_true",
help="Unregister the Shell extension")
return parser.parse_args()
def register_shell_extension(name, cli_flags = ''):
key = winreg.CreateKeyEx(winreg.HKEY_CLASSES_ROOT, r'exefile\shell\{}'.format(name))
# Calculate paths
package_root_dir = pathlib.Path(__file__).absolute().parent
icon_path = package_root_dir / 'shell_extension' / 'sandbox.ico'
# Set icon
winreg.SetValueEx(key, 'Icon', 0, winreg.REG_SZ, str(icon_path))
# Set shell script command
command_key = winreg.CreateKeyEx(key, 'Command')
command = '{} -m winsandbox.shell_extension.open_sandboxed "%1" {}'.format(sys.executable, cli_flags)
winreg.SetValue(command_key, None, winreg.REG_SZ, command)
print("Registered the '{}' shell extension successfully!".format(name))
def unregister_shell_extension(name):
try:
winreg.DeleteKey(winreg.HKEY_CLASSES_ROOT, r'exefile\shell\{}\Command'.format(name))
except FileNotFoundError:
print("Shell extension '{}' is not registered".format(name))
return
winreg.DeleteKey(winreg.HKEY_CLASSES_ROOT, r'exefile\shell\{}'.format(name))
print("Unregistered the '{}' shell extension successfully!".format(name))
def main():
args = read_args()
# Handle shell extension commands
try:
if args.register:
register_shell_extension('Open Sandboxed')
register_shell_extension('Run Sandboxed', cli_flags='--run')
return
elif args.unregister:
unregister_shell_extension('Open Sandboxed')
unregister_shell_extension('Run Sandboxed')
return
except PermissionError:
print("Try running again as an Administrator")
return
# Launch a new sandbox
sandbox = winsandbox.new_sandbox(networking=args.interactive,
virtual_gpu=not args.no_vgpu,
logon_script=args.logon_script,
folder_mappers=[winsandbox.FolderMapper(folder_path=folder, read_only=False)
for folder in args.folder])
if args.interactive:
config = get_config()
config.InteractiveShellEmbed.colors = "Neutral" # Workaround to enable colors in embedded IPy.
embed(config=config,
header='Welcome to the Windows Sandbox!\nUse the `sandbox` object to control the sandbox.')
if __name__ == '__main__':
main()
|
11524721
|
from django.apps import AppConfig
class HealthCenterConfig(AppConfig):
name = 'applications.health_center'
|
11524731
|
from setuptools import setup
import os
setup(
name='FlaskSearch',
version='0.1',
url='https://github.com/dhamaniasad/Flask-Search',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description='Powerful search functionality for Flask apps via ElasticSearch',
py_modules=['flask_search'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[x.strip() for x in
open(os.path.join(os.path.dirname(__file__),
'requirements.txt'))],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
11524748
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from numpy import sqrt, exp, array
from inference.mcmc import HamiltonianChain
"""
# Hamiltonian sampling example
Hamiltonian Monte-Carlo (HMC) is a MCMC algorithm which is able to
efficiently sample from complex PDFs which present difficulty for
other algorithms, such as those which strong non-linear correlations.
However, this requires not only the log-posterior probability but also
its gradient in order to function. In cases where this gradient can be
calculated analytically HMC can be very effective.
The implementation of HMC shown here as HamiltonianChain is somewhat
naive, and should at some point be replaced with a more advanced
self-tuning version, such as the NUTS algorithm.
"""
# define a non-linearly correlated posterior distribution
class ToroidalGaussian(object):
def __init__(self):
self.R0 = 1. # torus major radius
self.ar = 10. # torus aspect ratio
self.w2 = (self.R0/self.ar)**2
def __call__(self, theta):
x, y, z = theta
r = sqrt(z**2 + (sqrt(x**2 + y**2) - self.R0)**2)
return -0.5*r**2 / self.w2
def gradient(self, theta):
x, y, z = theta
R = sqrt(x**2 + y**2)
K = 1 - self.R0/R
g = array([K*x, K*y, z])
return -g/self.w2
# create an instance of our posterior class
posterior = ToroidalGaussian()
# create the chain object
chain = HamiltonianChain(posterior = posterior, grad = posterior.gradient, start = [1,0.1,0.1])
# advance the chain to generate the sample
chain.advance(6000)
# choose how many samples will be thrown away from the start
# of the chain as 'burn-in'
chain.burn = 2000
chain.matrix_plot(filename = 'hmc_matrix_plot.png')
# extract sample and probability data from the chain
probs = chain.get_probabilities()
colors = exp(probs - max(probs))
xs, ys, zs = [ chain.get_parameter(i) for i in [0,1,2] ]
import plotly.graph_objects as go
from plotly import offline
fig = go.Figure(data=[go.Scatter3d(
x=xs,
y=ys,
z=zs,
mode='markers',
marker=dict( size=5, color=colors, colorscale='Viridis', opacity=0.6)
)])
fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) # tight layout
offline.plot(fig, filename='hmc_scatterplot.html')
|
11524749
|
from math import sqrt, floor
from collections import Counter
def center_of_geometry(Coordinates):
'''
Give me x, y, z coordinates, get back the cooresponding center of geometry
'''
return [sum(Coordinate)/len(Coordinate) for Coordinate in zip(*Coordinates)]
def distance(Coordinates):
'''
Euclidean distance between to x, y, z coordinates
'''
x, y, z = zip(*Coordinates)
return sqrt((x[0] - x[1])**2 + (y[0] - y[1])**2 + (z[0] - z[1])**2)
def density(Distances, MinDist=0, MaxDist=10):
'''
For a given residues center of geometry, count the number of other residue
center of geometries between a min and max disatance
'''
Counts = []
for Distance in Distances:
Counts.append(floor(Distance))
Density = []
for Distance, Count in sorted(Counter(Counts).items(), key=lambda Count: Count[0]):
if Distance in range(MinDist, MaxDist):
Density.append(Count)
return sum(Density)
def cumulative_density(Distances, num_res, MinDist=0, MaxDist=10):
'''
For a given residues center of geometry, count the number of other residue
center of geometries between a min and max disatance
'''
Counts = []
for Distance in Distances:
Counts.append(floor(Distance))
Density = []
dists = []
widths = []
for i, (Distance, Count) in enumerate(sorted(Counter(Counts).items(),
key=lambda Count: Count[0])):
if Distance in range(MinDist, MaxDist):
#dists.append(Distance)
if Density == []:
Density.append(Count)
dists.append(Distance)
else:
Density.append(Density[-1] + Count)
widths.append(Distance - dists[-1])
dists.append(Distance)
widths.append(MaxDist+1-Distance)
cum_dens = [Density[k]*widths[k] for k in range(len(Density))]
return float(sum(cum_dens)) / num_res
def density2(Distances, mutation_cts, MinDist=0, MaxDist=10):
'''
For a given residues center of geometry, count the number of other residue
center of geometries between a min and max disatance
'''
#Counts = []
#for Distance in Distances:
#Counts.append(floor(Distance))
#Density = []
#for Distance, Count in sorted(Counter(Counts).items(), key=lambda Count: Count[0]):
#if Distance in range(MinDist, MaxDist):
#Density.append(Count)
return sum(mutation_cts[d[1]] for d in Distances if MinDist <= int(d[0]) <= MaxDist)
|
11524768
|
import traceback
import math
import numpy as np
import litenn as nn
import litenn.core as nc
from litenn.core import CLKernelHelper as ph
def conv2DTranspose (input_t, kernel_t, stride=2, dilation=1, padding='same'):
"""
conv2DTranspose operator.
input_t Tensor shape must be
(batch, in_ch, height, width)
kernel_t Tensor shape must be
(out_ch,in_ch, k_height,k_width)
stride(2) int
dilation(1) int
padding(same) 'valid'
'same'
"""
op = nc.Cacheton.get(_Conv2DTransposeOp,input_t.shape, kernel_t.shape, int(stride), int(dilation), padding)
output_t = nn.Tensor( op.output_shape )
output_t._set_op_name('conv2DTranspose')
output_t._assign_gradfn (input_t, lambda O_t, dO_t: conv2DTranspose_dI_gradfn(op, input_t, kernel_t, O_t, dO_t))
output_t._assign_gradfn (kernel_t, lambda O_t, dO_t: conv2DTranspose_dK_gradfn(op, input_t, kernel_t, O_t, dO_t))
out = nc.op.matmul( kernel_t.reshape( (kernel_t.shape[0], -1) ),
op.im2colT(input_t) )
nc.op.transpose( out.reshape(op.OC_N_OH_OW), (1,0,2,3), output_t=output_t)
return output_t
def conv2DTranspose_dI_gradfn(op, input_t, kernel_t, O, dO_t):
kernel_t = kernel_t.transpose((0,2,3,1))
kernel_t = kernel_t.reshape( (-1,kernel_t.shape[-1]) )
dI = nc.op.matmul(op.im2row(dO_t), kernel_t).reshape(op.N_IH_IW_IC)
nc.op.transpose( dI, (0,3,1,2), output_t=input_t.get_grad(), is_add_to_output=True)
def conv2DTranspose_dK_gradfn(op, input_t, kernel_t, O, dO_t):
nc.op.matmul ( dO_t.transpose((1,0,2,3)).reshape(op.OC_NxOHxOW),
op.im2rowT(input_t), output_t=kernel_t.get_grad(), is_add_to_output=True)
class _Conv2DTransposeOp:
def __init__(self, input_shape, kernel_shape, stride, dilation, padding):
if padding not in ['valid','same']:
raise ValueError('Wrong padding value, only valid or same supported for conv2DTranspose.')
N,IC,IH,IW = input_shape
KO,KI,KH,KW = kernel_shape
if KI != IC:
raise ValueError(f'Kernel input channels {KI} does not match input channels {IC}.')
ci = nc.info.InfoConv2D(IH, IW, KH, KW, stride, dilation, padding)
OC, OH, OW = KO, ci.OH_T, ci.OW_T
self.output_shape = output_shape = nc.TensorShape( (N, OC, OH, OW) )
self.OC_N_OH_OW = (OC,N,OH,OW)
self.OC_NxOHxOW = (OC,N*OH*OW)
self.N_IH_IW_IC = (N,IH,IW,IC)
self.im2colT = lambda x: nc.op.unfold2D(x, N, IC, IH, IW, OH, OW, KH, KW, ci.PADL, ci.PADT, dilation, stride, 'CJI_NHW', is_transpose=True)
self.im2rowT = lambda x: nc.op.unfold2D(x, N, IC, IH, IW, OH, OW, KH, KW, ci.PADL, ci.PADT, dilation, stride, 'NHW_CJI', is_transpose=True)
self.im2row = lambda x: nc.op.unfold2D(x, N, OC, OH, OW, IH, IW, KH, KW, ci.PADL, ci.PADT, dilation, stride, 'NHW_CJI', is_transpose=False)
def conv2DTranspose_test():
for padding in ['same','valid']:
for dilation in [1]:
for stride in [3,2,1]:
for ks in [1,3,5,7]:
for n in [4,1]:
for ic in [1,4]:
for oc in [1,4]:
for ih,iw in zip(*[[16,8,4]]*2):
if padding == 'valid' and iw < ks:
continue
try:
input_shape = (n, ic, ih, iw)
kernel_shape = (oc, ic, ks, ks)
input_n = np.random.randint( 2**4, size=input_shape ).astype(np.float32)
kernel_n = np.random.randint( 2**4, size=kernel_shape ).astype(np.float32)
input_t = nn.Tensor_from_value(input_n)
kernel_t = nn.Tensor_from_value(kernel_n)
conved_t = nn.conv2DTranspose(input_t, kernel_t, stride=stride, dilation=dilation, padding=padding)
conved_n_grad = np.random.randint( 2**4, size=conved_t.shape).astype(np.float32)
conved_n, dI_val, dK_val = _numpy_conv2dtranspose(input_n, kernel_n, conved_n_grad, STRIDE=stride, DILATION=dilation, padding=padding)
if conved_n.shape != conved_t.shape:
raise Exception(f'shape is not equal')
if not all ( np.ndarray.flatten( conved_t.np()== conved_n) ):
raise Exception(f'data is not equal')
input_t.get_grad().fill(1.0)
kernel_t.get_grad().fill(1.0)
nn.backward( {conved_t:conved_n_grad}, grad_for_non_trainables=True )
if not all ( np.ndarray.flatten( (input_t.get_grad().np()-1.0) == dI_val) ):
raise Exception(f'dI not equal')
if not all ( np.ndarray.flatten( (kernel_t.get_grad().np()-1.0) == dK_val) ):
raise Exception(f'dK not equal')
except:
raise Exception(f"""
input_shape : {input_shape}
kernel_shape : {kernel_shape}
padding : {padding}
stride : {stride}
dilation : {dilation}
conved_n.shape : {conved_n.shape}
conved_t.shape : {conved_t.shape}
{traceback.format_exc()}
""")
def _numpy_conv2dtranspose(input_n, kernel_n, conved_n_grad, STRIDE=1, DILATION=1, padding='same'):
N, IC, IH, IW = input_n.shape
KO, KI, KH, KW = kernel_n.shape
ci = nc.info.InfoConv2D(IH, IW, KH, KW, STRIDE, DILATION, padding)
PADL, PADT = ci.PADL, ci.PADT
OC, OH, OW = KO, ci.OH_T, ci.OW_T
O_IK_idxs = { idx:[ [ [], [] ], [ [], [] ] ] for idx in range(OH*OW) }
K_IO_idxs = { idx:[ [ [], [] ], [ [], [] ] ] for idx in range(KH*KW) }
I_KO_idxs = { idx:[ [ [], [] ], [ [], [] ] ] for idx in range(IH*IW) }
for ow in range(OW):
for oh in range(OH):
O_idx = oh*OW + ow
for kh in range(KH):
for kw in range(KW):
iw = (PADL + ow - kw*DILATION ) // STRIDE;
ih = (PADT + oh - kh*DILATION ) // STRIDE;
if (iw >= 0) & (ih >= 0) & (iw < IW) & (ih < IH) \
& (ow == (-PADL + kw*DILATION + iw*STRIDE)) \
& (oh == (-PADT + kh*DILATION + ih*STRIDE)):
O_IK_idxs[O_idx][0][0].append (ih)
O_IK_idxs[O_idx][0][1].append (iw)
O_IK_idxs[O_idx][1][0].append (kh)
O_IK_idxs[O_idx][1][1].append (kw)
K_idx = kh*KW + kw
K_IO_idxs[K_idx][0][0].append (ih)
K_IO_idxs[K_idx][0][1].append (iw)
K_IO_idxs[K_idx][1][0].append (oh)
K_IO_idxs[K_idx][1][1].append (ow)
I_idx = ih*IW + iw
I_KO_idxs[I_idx][0][0].append (kh)
I_KO_idxs[I_idx][0][1].append (kw)
I_KO_idxs[I_idx][1][0].append (oh)
I_KO_idxs[I_idx][1][1].append (ow)
output_shape = (N, OC, OH, OW)
output = np.empty( output_shape, np.float32)
for n in range(N):
for oc in range(OC):
for oh in range(OH):
for ow in range(OW):
O_idx = oh*OW + ow
I_idxs = O_IK_idxs[O_idx][0]
K_idxs = O_IK_idxs[O_idx][1]
ic_range = [*range(IC)]
v = ( input_n[n,ic_range][..., I_idxs[0], I_idxs[1]] *
kernel_n[oc,ic_range][..., K_idxs[0], K_idxs[1]] ).sum()
output[n,oc,oh,ow] = v
dK = np.zeros(kernel_n.shape, np.float32)
for oc in range(OC):
for ic in range(IC):
for kh in range(KH):
for kw in range(KW):
K_idx = kh*KW + kw
I_idxs = K_IO_idxs[K_idx][0]
O_idxs = K_IO_idxs[K_idx][1]
n_range = [*range(N)]
v = ( input_n[n_range][..., ic, I_idxs[0], I_idxs[1]] *
conved_n_grad[n_range][..., oc, O_idxs[0], O_idxs[1]] ).sum()
dK[oc,ic,kh,kw] = v
dI = np.zeros(input_n.shape, np.float32)
for n in range(N):
for ic in range(IC):
for ih in range(IH):
for iw in range(IW):
I_idx = ih*IW + iw
K_idxs = I_KO_idxs[I_idx][0]
O_idxs = I_KO_idxs[I_idx][1]
oc_range = [*range(OC)]
v = (kernel_n[oc_range][...,ic, K_idxs[0], K_idxs[1]] *
conved_n_grad[n,oc_range][..., O_idxs[0], O_idxs[1]] ).sum()
dI[n,ic,ih,iw] = v
return output, dI, dK
|
11524780
|
import tensorflow as tf
import edward as ed
from edward.models import Categorical, Normal, Bernoulli
from keras.utils import to_categorical
import numpy as np
############################################
### THIS INTERNAL DEFINITION FOR THE NN ####
############################################
#############################
### REPRODUCIBILITY ######
#############################
ed.set_seed(314159)
balanced = False
#############################
## PARAMETER SIMULATION #####
#############################
N = 10 # number of isolated signals in a minibatch.
D = 15 # number of features.
hidden1 = 1250
inferences = 200
nsamples_total = 200 # 200
nhidden_prob = 1250 ####### AQUI
# 42, 142, 442, 12345
my_random = 42
initialization = 'glorot_uniform'
K = 7 #number of classes, in our paper, 7
def neural_network(x):
h = tf.tanh(tf.matmul(x, w0) + b0)
h = tf.matmul(h, w1) + b1
return h
def nn_inference(X_test, w0sampled, w1sampled, b0sampled, b1sampled):
first = tf.tanh(tf.matmul(X_test, w0sampled) + b0sampled)
second = tf.matmul(first, w1sampled) + b1sampled
return second
def run_model(D, hidden1, K, X_train, y_train):
w0 = Normal(loc=tf.zeros([D, hidden1]), scale=tf.ones([D, hidden1]))
w1 = Normal(loc=tf.zeros([hidden1, K]), scale=tf.ones([hidden1, K]))
b0 = Normal(loc=tf.zeros(hidden1), scale=tf.ones(hidden1))
b1 = Normal(loc=tf.zeros(K), scale=tf.ones(K))
x = tf.placeholder(tf.float32, [None, D])
y = Categorical(logits=neural_network(x))
qw0 = Normal(loc=tf.Variable(tf.random_normal([D, hidden1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([D, hidden1]))))
qw1 = Normal(loc=tf.Variable(tf.random_normal([hidden1, K])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([hidden1, K]))))
qb0 = Normal(loc=tf.Variable(tf.random_normal([hidden1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([hidden1]))))
qb1 = Normal(loc=tf.Variable(tf.random_normal([K])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([K]))))
# the posterior
y_ph = tf.placeholder(tf.int32, [N])
inference = ed.KLqp({w0: qw0, b0: qb0, w1: qw1, b1: qb1}, data={y: y_ph})
# Intialize the infernce variables
inference.initialize(n_iter=inferences, n_print=100, scale={y: float(nsamples_training) / N})
# We will use an interactive session.
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
data = generator([X_train, y_train], N)
##### INFERENCE
for _ in range(inference.n_iter):
X_batch, Y_batch = next(data)
Y_batch = np.argmax(Y_batch, axis=1)
# TensorFlow method gives the label data in a one hot vector format. We convert that into a single label.
info_dict = inference.update(feed_dict={x: X_batch, y_ph: Y_batch})
inference.print_progress(info_dict)
# once again, we shall convert to get the single label (not one_hot), otherwise
# we will have mismatch dimensions.
# we measure the progress now
n_samples = nsamples_total
y_post = ed.copy(y, {w0: qw0, b0: qb0, w1: qw1, b1: qb1})
prob_lst = []
w0_samples = []
b0_samples = []
w1_samples = []
b1_samples = []
for _ in range(n_samples):
w0_samp = qw0.sample()
b0_samp = qb0.sample()
w1_samp = qw1.sample()
b1_samp = qb1.sample()
w0_samples.append(w0_samp)
b0_samples.append(b0_samp)
w1_samples.append(w1_samp)
b1_samples.append(b1_samp)
prob = tf.nn.softmax(nn_inference(tf.cast(X_test, tf.float32), w0_samp, w1_samp, b0_samp, b1_samp))
prob_lst.append(prob.eval())
# sample = tf.concat([tf.reshape(w_samp, [-1]), b_samp], 0)
# samples.append(sample.eval())
print "... Testing"
Y_pred = np.argmax(np.mean(prob_lst, axis=0), axis=1)
print("accuracy in predicting the test data = ", (Y_pred == y_test).mean() * 100)
print "... Confusion matrix"
print confusion_matrix(y_true=y_test, y_pred=Y_pred)
confusion = confusion_matrix(y_true=y_test, y_pred=Y_pred)
toSave = "%s%s.npy" % ("BNN", my_random)
np.save(toSave, confusion)
precision = precision_score(y_true=y_test, y_pred=Y_pred, average='weighted')
recall = recall_score(y_true=y_test, y_pred=Y_pred, average='weighted')
f1_score = 2 * (precision * recall) / float(precision + recall)
print "Precision BNN weighted %s" % (precision)
print "Recall BNN weighted %s" % (recall)
print "F1 score BNN %s" % (f1_score)
|
11524787
|
import ast
from collections import defaultdict
from typing import Any, Dict, List, Optional, Union
from flake8_plugin_utils import Visitor
from .errors import (
ImplicitReturn,
ImplicitReturnValue,
UnnecessaryAssign,
UnnecessaryReturnNone,
)
from .utils import is_false, is_none
NameToLines = Dict[str, List[int]]
Function = Union[ast.AsyncFunctionDef, ast.FunctionDef]
ASSIGNS = 'assigns'
REFS = 'refs'
RETURNS = 'returns'
class UnnecessaryAssignMixin(Visitor):
def __init__(self) -> None:
super().__init__()
self._loop_count: int = 0
@property
def assigns(self) -> NameToLines:
return self._stack[-1][ASSIGNS]
@property
def refs(self) -> NameToLines:
return self._stack[-1][REFS]
def visit_For(self, node: ast.For) -> None:
self._visit_loop(node)
def visit_AsyncFor(self, node: ast.AsyncFor) -> None:
self._visit_loop(node)
def visit_While(self, node: ast.While) -> None:
self._visit_loop(node)
def _visit_loop(self, node: ast.AST) -> None:
self._loop_count += 1
self.generic_visit(node)
self._loop_count -= 1
def visit_Assign(self, node: ast.Assign) -> None:
if not self._stack:
return
self.generic_visit(node.value)
target = node.targets[0]
if isinstance(target, ast.Tuple) and not isinstance(
node.value, ast.Tuple
):
# skip unpacking assign e.g: x, y = my_object
return
self._visit_assign_target(target)
def visit_Name(self, node: ast.Name) -> None:
if self._stack:
self.refs[node.id].append(node.lineno)
def _visit_assign_target(self, node: ast.AST) -> None:
if isinstance(node, ast.Tuple):
for n in node.elts:
self._visit_assign_target(n)
return
if not self._loop_count and isinstance(node, ast.Name):
self.assigns[node.id].append(node.lineno)
return
# get item, etc.
self.generic_visit(node)
def _check_unnecessary_assign(self, node: ast.AST) -> None:
if not isinstance(node, ast.Name):
return
var_name = node.id
return_lineno = node.lineno
if var_name not in self.assigns:
return
if var_name not in self.refs:
self.error_from_node(UnnecessaryAssign, node)
return
if self._has_refs_before_next_assign(var_name, return_lineno):
return
self.error_from_node(UnnecessaryAssign, node)
def _has_refs_before_next_assign(
self, var_name: str, return_lineno: int
) -> bool:
before_assign = 0
after_assign: Optional[int] = None
for lineno in sorted(self.assigns[var_name]):
if lineno > return_lineno:
after_assign = lineno
break
if lineno <= return_lineno:
before_assign = lineno
for lineno in self.refs[var_name]:
if lineno == return_lineno:
continue
if after_assign:
if before_assign < lineno <= after_assign:
return True
elif before_assign < lineno:
return True
return False
class UnnecessaryReturnNoneMixin(Visitor):
def _check_unnecessary_return_none(self) -> None:
for node in self.returns:
if is_none(node.value):
self.error_from_node(UnnecessaryReturnNone, node)
class ImplicitReturnValueMixin(Visitor):
def _check_implicit_return_value(self) -> None:
for node in self.returns:
if not node.value:
self.error_from_node(ImplicitReturnValue, node)
class ImplicitReturnMixin(Visitor):
def _check_implicit_return(self, last_node: ast.AST) -> None:
if isinstance(last_node, ast.If):
if not last_node.body or not last_node.orelse:
self.error_from_node(ImplicitReturn, last_node)
return
self._check_implicit_return(last_node.body[-1])
self._check_implicit_return(last_node.orelse[-1])
return
if isinstance(last_node, (ast.For, ast.AsyncFor)) and last_node.orelse:
self._check_implicit_return(last_node.orelse[-1])
return
if isinstance(last_node, (ast.With, ast.AsyncWith)):
self._check_implicit_return(last_node.body[-1])
return
if isinstance(last_node, ast.Assert) and is_false(last_node.test):
return
if not isinstance(
last_node, (ast.Return, ast.Raise, ast.While, ast.Try)
):
self.error_from_node(ImplicitReturn, last_node)
class ReturnVisitor(
UnnecessaryAssignMixin,
UnnecessaryReturnNoneMixin,
ImplicitReturnMixin,
ImplicitReturnValueMixin,
):
def __init__(self) -> None:
super().__init__()
self._stack: List[Any] = []
@property
def returns(self) -> List[ast.Return]:
return self._stack[-1][RETURNS]
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
self._visit_with_stack(node)
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
self._visit_with_stack(node)
def _visit_with_stack(self, node: Function) -> None:
self._stack.append(
{ASSIGNS: defaultdict(list), REFS: defaultdict(list), RETURNS: []}
)
self.generic_visit(node)
self._check_function(node)
self._stack.pop()
def visit_Return(self, node: ast.Return) -> None:
self.returns.append(node)
self.generic_visit(node)
def _check_function(self, node: Function) -> None:
if not self.returns or not node.body:
return
if len(node.body) == 1 and isinstance(node.body[-1], ast.Return):
# skip functions that consist only `return None`
return
if not self._result_exists():
self._check_unnecessary_return_none()
return
self._check_implicit_return_value()
self._check_implicit_return(node.body[-1])
for n in self.returns:
if n.value:
self._check_unnecessary_assign(n.value)
def _result_exists(self) -> bool:
for node in self.returns:
value = node.value
if value and not is_none(value):
return True
return False
|
11524789
|
from falcon_unzip.dedup_h_tigs import main
import sys
if __name__ == "__main__":
main(sys.argv)
|
11524792
|
import asyncio
from threading import Thread
class Pool:
def __init__(self):
self.loop = asyncio.get_event_loop()
self.loop_runner = Thread(target=self.loop.run_forever)
self.loop_runner.daemon = True
self.loop_runner.start()
def run(self, coro):
return asyncio.run_coroutine_threadsafe(
coro,
self.loop,
)
async def shutdown(self):
self.loop.stop()
await self.loop.shutdown_asyncgens()
await self.loop.aclose()
def join(self):
self.loop_runner.join()
pool: Pool = None
def setup():
global pool
if pool is not None:
return
pool = Pool()
def join_initialized():
pool.loop_runner.join()
def join_uninitialized():
pass
def run_initialized(coro):
return pool.run(coro)
def run_uninitialized(coro):
global run
global join
global shutdown
setup()
run = run_initialized
join = join_initialized
shutdown = shutdown_initialized
return run_initialized(coro)
def shutdown_initialized():
global run
global join
global shutdown
global pool
run = run_uninitialized
join = shutdown_unitialized
shutdown = shutdown_unitialized
coro = pool.shutdown()
pool = None
return coro
def shutdown_unitialized():
pass
run = run_uninitialized
join = join_uninitialized
shutdown = shutdown_unitialized
|
11524826
|
import sys
assert sys.version_info >= (3, 5)
import os
import pathlib
import json
import open3d as o3d
if __name__ == '__main__':
base_folder = "/cluster/project/infk/courses/3d_vision_21/group_14/1_data"
model_base_folder = os.path.join(base_folder, "ShapeNetCore.v2")
pcd_base_folder = os.path.join(base_folder, "ShapeNetCore.v2-pcd")
model_pool_json = os.path.join(base_folder, "ScanCADJoint", "model_pool_large.json")
down_point: int = 10000
with open(model_pool_json) as f:
model_pool = json.load(f)
for cat, cat_model_pool in model_pool.items():
print("------------ Begin Category [", cat, "] ------------")
pathlib.Path(os.path.join(pcd_base_folder, cat)).mkdir(parents=True, exist_ok=True)
for cad_str in cat_model_pool:
cad_path = os.path.join(model_base_folder, cad_str, "models", "model_normalized.obj")
cad_mesh = o3d.io.read_triangle_mesh(cad_path)
cad_pcd = cad_mesh.sample_points_uniformly(down_point)
#o3d.visualization.draw_geometries([cad_pcd])
pcd_path = os.path.join(pcd_base_folder, cad_str + ".pcd")
o3d.io.write_point_cloud(pcd_path, cad_pcd)
print("Output [", pcd_path, "]")
|
11524857
|
import torch
def to_batch(state, action, reward, next_state, done, device):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
action = torch.FloatTensor([action]).view(1, -1).to(device)
reward = torch.FloatTensor([reward]).unsqueeze(0).to(device)
next_state = torch.FloatTensor(next_state).unsqueeze(0).to(device)
done = torch.FloatTensor([done]).unsqueeze(0).to(device)
return state, action, reward, next_state, done
def update_params(optim, network, loss, grad_clip=None, retain_graph=False):
optim.zero_grad()
loss.backward(retain_graph=retain_graph)
if grad_clip is not None:
for p in network.modules():
torch.nn.utils.clip_grad_norm_(p.parameters(), grad_clip)
optim.step()
def soft_update(target, source, tau):
for t, s in zip(target.parameters(), source.parameters()):
t.data.copy_(t.data * (1.0 - tau) + s.data * tau)
def hard_update(target, source):
target.load_state_dict(source.state_dict())
|
11524917
|
import sys
import subprocess
from xml.dom import minidom
import json
class TestResultParser:
def parse_file(self, filename):
return self.parse(open(filename).read())
def parse(self, xmltext):
doc = minidom.parseString(xmltext)
root = doc.documentElement
test_suites = [self.process_testsuite(e) for e in root.childNodes]
suite = test_suites[0]
testcases = suite['testcases']
stats = {
"tests": suite.get('tests', '-'),
"passed": sum(1 for t in testcases if t['outcome'] == 'passed'),
"failed": sum(1 for t in testcases if t['outcome'] == 'failed'),
"time_taken": suite.get('time')
}
return {
"testcases": testcases,
"stats": stats
}
def process_testsuite(self, e):
d = self.get_attrs(e)
nodes = e.getElementsByTagName("testcase")
d['testcases'] =[self.process_testcase(node) for node in nodes]
return d
def process_testcase(self, e):
d = self.get_attrs(e)
filename = d['classname'] + ".py"
name = d['name']
time_taken = d['time']
outcome = "passed"
d = {
"filename": filename,
"name": name,
"time_taken": time_taken,
"outcome": outcome
}
nodes = e.getElementsByTagName("failure")
if nodes:
d['outcome'] = "failed"
d.update(self.process_failure(nodes[0]))
return d
def get_attrs(self, e):
return {k: a.value for k, a in dict(e.attributes).items()}
def get_text(self, e):
rc = []
for node in e.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def process_failure(self, e):
return {
"error_message": e.getAttribute("message"),
"error_detail": self.get_text(e)
}
cmd = "py.test -p no:cacheprovider --junitxml /tmp/pytest.xml".split()
p = subprocess.run(cmd, capture_output=True)
d = TestResultParser().parse_file("/tmp/pytest.xml")
print(json.dumps(d, indent=True))
sys.exit(p.returncode)
|
11524927
|
import argparse
import sys
import csv
import os
parser = argparse.ArgumentParser()
parser.add_argument('--primer-snp-bed', type=str, required=True)
parser.add_argument('--amplicon-depth-tsv', type=str, required=True)
parser.add_argument('--sample-name', type=str, default="none")
args, files = parser.parse_known_args()
def main():
print("\t".join(["sample", "contig", "position", "ref", "alt", "primer_name", "amplicon", "depth"]))
# read in the depth of each amplicon
depth_map = dict()
with open(args.amplicon_depth_tsv, 'r') as ifh:
reader = csv.DictReader(ifh, delimiter='\t')
for record in reader:
depth_map[record['amplicon_id']] = record['mean_depth']
# read in the bed file containing mutations that overlap primers
seen = dict()
with(open(args.primer_snp_bed, 'r')) as ifh:
for line in ifh:
fields = line.rstrip().split()
assert(len(fields) == 16)
primer_name = fields[13]
primer_direction_idx = max(primer_name.find("_LEFT"), primer_name.find("_RIGHT"))
if primer_direction_idx == -1:
sys.stderr.write("Could not parse primer name %s" % (primer_name))
sys.exit(1)
amplicon_id = primer_name[0:primer_direction_idx]
# skip duplicate entries when a mutation lands in ALT versions of primers
key = ":".join([fields[0], fields[1], fields[3], fields[4], amplicon_id])
if key not in seen:
print("\t".join([args.sample_name, fields[0], fields[1], fields[3], fields[4], primer_name, amplicon_id, depth_map[amplicon_id]]))
seen[key] = 1
if __name__ == '__main__':
main()
|
11524940
|
from osr2mp4.ImageProcess import imageproc
from osr2mp4.ImageProcess.PrepareFrames.YImage import YImage
scoreboard = "menu-button-background"
def prepare_scoreboard(scale, settings):
"""
:param scale: float
:return: [PIL.Image]
"""
img = YImage(scoreboard, settings, scale).img
img = img.crop((int(img.size[0] * 2/3), 0, img.size[0], img.size[1]))
img = img.resize((int(140 * scale), int(64 * scale)))
imageproc.changealpha(img, 0.3)
playerimg = imageproc.add_color(img, [80, 80, 80])
img = imageproc.add_color(img, [60, 70, 120])
return [img, playerimg]
|
11524983
|
import os
SERVICE_VERSIONS = (
("service-less-equal", "2.2", "3.0"),
("service-greater-equal", "1.5", "2.2"),
("service-equal", "2.2", '2.2'),
('service-less', '2.3', '2.4'),
("service-greater", '1', '2'),
)
CLUSTER_VERSIONS = (
("cluster-less-equal", "1.6", "2.0"),
("cluster-greater-equal", "1.0", "1.6"),
("cluster-equal", "1.6", '1.6'),
('cluster-less', '1.7', '2.4'),
("cluster-greater", '0.5', '0.9'),
)
TEMPLATE_SERVICE = """
-
type: cluster
name: ADH
version: 1.6
upgrade:
- versions:
min: 0.4
max: 1.5
name: upgrade to 1.6
description: New cool upgrade
states:
available: any
on_success: upgradable
- versions:
min: 1.0
max: 1.8
description: Super new upgrade
name: upgrade 2
states:
available: [created, installed, upgradable]
on_success: upgradated
import:
hadoop:
versions:
min_strict: {0}
max_strict: {1}
ADH:
versions:
min_strict: 0.1
max_strict: 4.0
- type: service
name: hadoop
version: 2.2
config:
core-site:
param1:
type: string
required: false
param2:
type: integer
required: false
quorum:
type: integer
default: 3
"""
TEMPLATE_CLUSTER = """
-
type: cluster
name: ADH
version: 1.6
upgrade:
- versions:
min: 0.4
max: 1.5
name: upgrade to 1.6
description: New cool upgrade
states:
available: any
on_success: upgradable
- versions:
min: 1.0
max: 1.8
description: Super new upgrade
name: upgrade 2
states:
available: [created, installed, upgradable]
on_success: upgradated
import:
hadoop:
versions:
min_strict: 1.5
max_strict: 2.5
ADH:
versions:
min_strict: {0}
max_strict: {1}
- type: service
name: hadoop
version: 2.2
config:
core-site:
param1:
type: string
required: false
param2:
type: integer
required: false
quorum:
type: integer
default: 3
"""
for t in SERVICE_VERSIONS:
d_name = f"upgradable_cluster_with_strict_incorrect_version/{t[0]}"
os.makedirs(d_name)
with open(f"{d_name}/config.yaml", "w+", encoding='utf_8') as f:
f.write(TEMPLATE_SERVICE.format(t[1], t[2]))
for t in CLUSTER_VERSIONS:
d_name = f"upgradable_cluster_with_strict_incorrect_version/{t[0]}"
os.makedirs(d_name)
with open(f"{d_name}/config.yaml", "w+", encoding='utf_8') as f:
f.write(TEMPLATE_CLUSTER.format(t[1], t[2]))
|
11524992
|
from __future__ import absolute_import
import os
import deepthought.spearmint.wrapper as spearmint_wrapper
# Write a function like this called 'main'
def main(job_id, params):
print 'Anything printed here will end up in the output directory for job #:', str(job_id);
print params;
print os.environ['PYTHONPATH'].split(os.pathsep)
# yaml template and base_config are expected to be in the same directory
meta_job_path = os.path.dirname(__file__);
yaml_template_file = os.path.join(meta_job_path,'_template.yaml');
base_config_path = os.path.join(meta_job_path,'_base_config.properties');
return spearmint_wrapper.run_job(job_id, meta_job_path, yaml_template_file, base_config_path, params);
|
11525026
|
import os
import sqlalchemy
import datetime
from sqlalchemy import Column, VARCHAR, Integer, String, DateTime, ForeignKey, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.sql import exists
from dotenv import load_dotenv
load_dotenv()
SESSION_DURATION = os.getenv("SESSION_DURATION").split(" ")
drop_timer = int(SESSION_DURATION[0])
check_timer = int(SESSION_DURATION[1])
end_timer = int(SESSION_DURATION[2])
# assigns sqlite for local environment when debug is true and assigns remote heroku database when debug is false
DEBUG = (os.getenv("DEBUG") == 'True')
if DEBUG == True:
SQLITE = 'sqlite:///database/database.db'
engine = create_engine(SQLITE, connect_args={'check_same_thread': False})
if DEBUG == False:
DATABASE_URL = os.getenv("DATABASE_URL")
if DATABASE_URL==None:
print("Cannot connect to heroku database check exposed vars of postgres setup")
engine = create_engine(DATABASE_URL)
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
class Users(Base):
"""User class"""
__tablename__ = "users"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, unique=True)
name = Column(String, nullable=False)
username = Column(String)
join_date = Column(DateTime)
warns = Column(Integer)
pool_count = Column(Integer)
blocked = Column(Boolean)
lang = Column(String)
def __init__(self, user_id, name, username=None, join_date=None, warns=0, pool_count=0, blocked=False, lang="en"):
self.user_id = user_id
self.name = name
self.lang = lang
self.username = username
self.join_date = join_date
self.warns = warns
self.pool_count = pool_count
self.blocked = blocked
def commit(self):
"""commits query object to db"""
session.add(self)
session.commit()
def warning(self):
"""increament warn by 1 everytime its called"""
try:
self.warns +=1
except TypeError:
self.warns = 1
finally:
if self.warns>=3:
self.blocked=True
session.commit()
return self.warns
# def blocked(self):
# if self.warns >= 3:
# return True
# else:
# return False
def engaged(self):
try:
self.pool_count +=1
except TypeError:
self.pool_count=1
finally:
session.commit()
return self.pool_count
@classmethod
def get(cls, userid):
"""retrive user from id"""
user = session.query(cls).filter_by(user_id=userid).first()
if user:
return user
else:
return None
@classmethod
def get_username(cls, username):
"""retrive user from username"""
user = session.query(cls).filter_by(username=username).first()
if user:
return user
else:
return None
@classmethod
def get_ids(cls):
userall = session.query(cls).all()
users = [i.user_id for i in userall]
return users
@classmethod
def get_users(cls):
userall = session.query(cls).all()
users = [i for i in userall]
return users
@classmethod
def create(cls, userid, name):
"""create new users by passing user id and name"""
user = cls(
user_id=userid,
name=name
)
session.add(user)
session.commit()
return user
@classmethod
def delete_user(cls, userid):
"""retrive user from id"""
user = session.query(cls).filter_by(user_id=userid).first()
if user:
session.delete(user)
session.commit()
return True
else:
return None
def delete(self):
"""delete user object"""
session.delete(self)
session.commit()
def __repr__(self):
return f"User {self.name} {self.user_id}"
class Rounds(Base):
__tablename__="rounds"
id = Column(Integer, primary_key=True)
start_time = Column(DateTime)
memberlist = relationship("MemberList", uselist=True, backref="round")
def __init__(self, start_time):
"""initializes rounds and set start time"""
if type(start_time) == str:
self.start_time = datetime.datetime.fromisoformat(start_time)
else:
self.start_time = start_time
@classmethod
def create(cls, start_time):
"""create round function by passing in time"""
rounds = cls(
start_time
)
session.add(rounds)
session.commit()
return rounds
@classmethod
def create_now(cls):
"""create round function immediately"""
start_time = datetime.datetime.now()
rounds = cls(
start_time
)
session.add(rounds)
session.commit()
return rounds
def start(self):
"""retrieve the start time of round"""
return self.start_time
def check_time(self):
t = self.end() - datetime.timedelta(minutes=check_timer)
return t
def end(self):
"""retrieve the end time of round"""
return self.start_time + datetime.timedelta(minutes=end_timer)
def drop_duration(self):
"""returns time left time drop username period ends and returns false after it ends"""
delta = self.start_time + datetime.timedelta(minutes=drop_timer)
now = datetime.datetime.now()
if now > delta:
return False
else:
return (delta-now).seconds
def join(self, user):
"""adds user to round by passing in the user object"""
user_id = user.user_id
# if MemberList.exist(user_id):
# self.memberlist
# return True
# else:
entry = MemberList(
round_id=self.id,
user=user
)
session.add(entry)
session.commit()
@classmethod
def get_round(cls, id):
"""get round object by id"""
return session.query(cls).filter_by(id=id).first()
@classmethod
def get_memberList(cls, id):
"""get list of all members in that round"""
return session.query(MemberList).all()
@classmethod
def get_lastRound(cls):
"""get last round"""
return session.query(Rounds).all()[-1]
def commit(self):
"""add and commit session changes to db"""
session.add(self)
session.commit()
def __repr__(self):
"""string representation of object"""
return f"Round {self.id} {str(self.start_time)}"
class MemberList(Base):
"""member list class"""
__tablename__="memberlist"
id = Column(Integer, primary_key=True)
round_id = Column(Integer, ForeignKey("rounds.id"))
#userinfo
user_id = Column(Integer)
name = Column(String)
username = Column(String)
def __init__(self, round_id, user):
self.round_id = round_id
self.user_id = user.user_id
self.name = user.name
self.username = user.username
@classmethod
def all(cls):
return session.query(cls).all()
@classmethod
def exist(cls,user_id):
"""checks if user is on the list returns boolean"""
return session.query(exists().where(cls.user_id==user_id)).scalar()
def __repr__(self):
return f"MemberList {self.name} round{self.round_id}"
Base.metadata.create_all(engine)
|
11525034
|
import pytest
@pytest.mark.parametrize('query, expected_terms', [
# query expected
('the one', ['the one', 'the', 'one', '1']),
('to be or not to be', ['to be', 'be or', 'or not', 'not to', 'to', 'be', 'or']),
('html', ['html', 'Hypertext Markup Language'])
])
def test_search_terms(gb_api, query, expected_terms):
result = gb_api.search(query)
result_terms = []
for result_term in result['queryInfo']['terms']:
result_terms.append(result_term['termStr'])
for expected_term in expected_terms:
assert expected_term in result_terms
|
11525036
|
import tensorflow as tf
from data_utils import Vocabulary, batch_generator
from model import LSTMModel
import os
import codecs
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('checkpoint_path', 'checkpoint/base', 'model path')
tf.flags.DEFINE_integer('batch_size', 100, 'number of seqs in one batch')
tf.flags.DEFINE_integer('num_steps', 100, 'length of one seq')
tf.flags.DEFINE_integer('lstm_size', 128, 'size of hidden state of lstm')
tf.flags.DEFINE_integer('num_layers', 2, 'number of lstm layers')
tf.flags.DEFINE_boolean('use_embedding', False, 'whether to use embedding')
tf.flags.DEFINE_integer('embedding_size', 128, 'size of embedding')
tf.flags.DEFINE_float('learning_rate', 0.001, 'learning_rate')
tf.flags.DEFINE_float('train_keep_prob', 0.5, 'dropout rate during training')
tf.flags.DEFINE_string('input_file', '', 'utf8 encoded text file')
tf.flags.DEFINE_string('vocab_file', '', 'vocabulary pkl file')
tf.flags.DEFINE_integer('max_steps', 100000, 'max steps to train')
tf.flags.DEFINE_integer('save_every_n', 1000, 'save the model every n steps')
tf.flags.DEFINE_integer('log_every_n', 10, 'log to the screen every n steps')
tf.flags.DEFINE_integer('max_vocab', 3500, 'max char number')
def main(_):
if os.path.exists(checkpoint_path) is False:
os.makedirs(checkpoint_path)
# 读取训练文本
with open(datafile, 'r', encoding='utf-8') as f:
train_data = f.read()
# 加载/生成 词典
vocabulary = Vocabulary()
if FLAGS.vocab_file:
vocabulary.load_vocab(FLAGS.vocab_file)
else:
vocabulary.build_vocab(train_data)
vocabulary.save(FLAGS.vocab_file)
input_ids = vocabulary.encode(train_data)
g = batch_generator(input_ids, FLAGS.batch_size, FLAGS.num_steps)
model = LSTMModel(vocabulary.vocab_size,
batch_size=FLAGS.batch_size,
num_steps=FLAGS.num_steps,
lstm_size=FLAGS.lstm_size,
num_layers=FLAGS.num_layers,
learning_rate=FLAGS.learning_rate,
train_keep_prob=FLAGS.train_keep_prob,
use_embedding=FLAGS.use_embedding,
embedding_size=FLAGS.embedding_size
)
model.train(g,
FLAGS.max_steps,
checkpoint_path,
FLAGS.save_every_n,
FLAGS.log_every_n,
)
if __name__ == '__main__':
tf.app.run()
|
11525100
|
import numpy as np
from scipy.stats import norm
from scipy.special import gammaln
def two_tailed_ztest(success1, success2, total1, total2):
"""
Two-tailed z score for proportions
Parameters
-------
success1 : int
the number of success in `total1` trials/observations
success2 : int
the number of success in `total2` trials/observations
total1 : int
the number of trials or observations of class 1
total2 : int
the number of trials or observations of class 2
Returns
-------
zstat : float
z score for two tailed z-test
p_value : float
p value for two tailed z-test
"""
p1 = success1 / float(total1)
p2 = success2 / float(total2)
p_pooled = (success1 + success2) / float(total1 + total2)
obs_ratio = (1. / total1 + 1. / total2)
var = p_pooled * (1 - p_pooled) * obs_ratio
# calculate z-score using foregoing values
zstat = (p1 - p2) / np.sqrt(var)
# calculate associated p-value for 2-tailed normal distribution
p_value = norm.sf(abs(zstat)) * 2
return zstat, p_value
def dirichln(arr):
"""
Dirichlet gamma function
Albert (2007) Bayesian Computation with R, 1st ed., pg 178
Parameters
----------
arr : array or matrix of float values
Returns
-------
val : float or array,
logged Dirichlet transformed value if array or matrix
"""
val = np.sum(gammaln(arr)) - gammaln(np.sum(arr))
return val
def get_unique_name(new_name, name_list, addendum='_new'):
"""
Utility function to return a new unique name if name is in list.
Parameters
----------
new_name : string
name to be updated
name_list: list
list of existing names
addendum: string
addendum appended to new_name if new_name is in name_list
Returns
-------
new_name : string,
updated name
Example
-------
new_name = 'feat1'
name_list = ['feat1', 'feat2']
first iteration: new_name returned = 'feat1_new'
now with name_list being updated to include new feature:
name_list = ['feat1', 'feat2', 'feat1_new']
second iteration: new_name returned = 'feat1_new_new'
"""
# keep appending "new" until new_name is not in list
while new_name in name_list:
new_name += addendum
return new_name
|
11525102
|
import numpy as np
import cv2
import scipy.misc
def normalization(img):
# rescale input img within [-1,1]
return img / 127.5 - 1
def inverse_normalization(img):
# rescale output img within [0,1], then saving by 'scipy.misc.imsave'
return (img + 1.) / 2.
def read_one_img(img_dir):
img = cv2.imread(img_dir)[:, :, ::-1]
img = normalization(img)
img_HR = img[:, 256:, :]
img_LR = img[:, :256, :]
return img_HR, img_LR
def gen_batch(X_list, batch_size=32):
idx = np.random.choice(X_list.shape[0], batch_size, replace=False)
X_HR_batch = np.zeros((batch_size, 256, 256, 3), dtype=np.float32)
X_LR_batch = np.zeros((batch_size, 256, 256, 3), dtype=np.float32)
for i in range(batch_size):
X_HR_batch[i], X_LR_batch[i] = read_one_img(X_list[idx[i]])
return X_HR_batch, X_LR_batch
def get_disc_batch(X_HR_batch, X_LR_batch, G_model, batch_counter):
# Create X_disc: alternatively only generated or real images
if batch_counter % 2 == 0:
# Produce an output
X_disc = G_model.predict(X_LR_batch)
y_disc = np.zeros((X_disc.shape[0], 1), dtype=np.uint8)
y_disc[:, 0] = 0
else:
X_disc = X_HR_batch
y_disc = np.zeros((X_disc.shape[0], 1), dtype=np.uint8)
y_disc[:, 0] = 1
return X_disc, y_disc
def plot_generated_batch(X_HR, X_LR, G_model, epoch):
# Generate images
X_SR = G_model.predict(X_LR[:4])
X_SR = inverse_normalization(X_SR)
X_LR = inverse_normalization(X_LR[:4])
X_HR = inverse_normalization(X_HR[:4])
X = np.concatenate((X_LR, X_SR, X_HR), axis=0)
list_rows = []
for i in range(int(X.shape[0] // 4)):
Xr = np.concatenate([X[k] for k in range(4 * i, 4 * (i + 1))], axis=1)
list_rows.append(Xr)
Xr = np.concatenate(list_rows, axis=0)
scipy.misc.imsave("./figures/val_epoch%s.png" % epoch, Xr)
|
11525130
|
from django.db import models
from .product import Product
class TestCase(models.Model):
PRIORITY_CHOICES = ((0, 'Urgent'), (1, 'High'), (2, 'Medium'), (3, 'Low'))
name = models.CharField(max_length=200)
full_name = models.CharField(max_length=400, default='')
keyword = models.CharField(max_length=100, null=True, blank=True)
priority = models.IntegerField(default=2, choices=PRIORITY_CHOICES)
description = models.TextField(blank=True)
owner = models.CharField(max_length=50, blank=True)
created_by = models.CharField(max_length=50)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='cases')
def product_name(self):
return self.product.name
def team_name(self):
return self.product.team.name
def execution_info(self):
return {
'total': self.results.count(),
'passed': self.results.filter(outcome=0).count(),
'failed': self.results.filter(outcome=1).count(),
'other': self.results.filter(outcome__gt=1).count()
}
def tags_list(self):
return ':'.join([t.name for t in getattr(self, 'tags')])
def stability(self):
latest_results = self.results.all()[:10]
passed = len([s for s in latest_results if s.outcome == 0])
return "%.f%%" % round(passed / len(latest_results) * 100)
class Meta:
ordering = ['-id']
def __str__(self):
return self.full_name or self.name
|
11525140
|
import base64
import secrets
import string
import struct
from itertools import islice, cycle
from encoders.Encoder import Encoder
from engine.component.CallComponent import CallComponent
from engine.component.CodeComponent import CodeComponent
from engine.modules.EncoderModule import EncoderModule
from enums.Language import Language
from Crypto.Util import strxor
class XorEncoder(Encoder):
def __init__(self):
super().__init__()
self.decoder_in = [bytes]
self.decoder_out = [bytes]
self.key = ''.join(secrets.choice(".+-,:;_%=()" + string.ascii_letters + string.digits) for _ in range(12)).encode()
def slow_encode(self, data):
encoded = b""
if isinstance(data, str):
data = bytes(data, 'utf-8')
for i in range(len(data)):
print(f" [>] Progress: {i * 100 / (len(data) - 1):.2f}% ", end='\r')
encoded += struct.pack("B", (data[i] ^ (self.key[i % len(self.key)])))
print()
return encoded
def encode(self, data):
if isinstance(data, str):
data = bytes(data, 'utf-8')
return strxor.strxor(data, bytearray(islice(cycle(self.key), len(data))))
def supported_languages(self):
return [Language.CSHARP, Language.CPP, Language.POWERSHELL]
def decode(self, data):
return self.encode(data)
def translate(self, language=Language.CSHARP, arch=None):
module = EncoderModule()
module.name = self.__class__.__name__
code = self.template(language=language)
if language == Language.CSHARP:
module.call_component = CallComponent("XorEncoder.Decode")
module.components = [
CodeComponent(code.replace("####KEY####", self.key.decode()))
]
elif language == Language.CPP:
module.call_component = CallComponent("length = xor_encode(encoded, length);")
module.components = [
CodeComponent(code.replace("####KEY####", self.key.decode()).replace("####KEY_LENGTH####", str(len(self.key))))
]
elif language == Language.POWERSHELL:
module.call_component = CallComponent("Invoke-Xor")
module.components = [
CodeComponent(code.replace("####KEY####", self.key.decode()))
]
return module
|
11525250
|
from udpwkpf import WuClass, Device
import sys
import mraa
from twisted.protocols import basic
from twisted.internet import reactor, protocol
PORT = 2222
if __name__ == "__main__":
class Number(WuClass): #"Number" WuClass has been defined in ~/wukong-darjeeling/wukong/ComponentDefinitions/WuKongStandardLibrary.xml
def __init__(self):
WuClass.__init__(self)
self.ID = 2014 #2014 is WuClass id of "Number"
print "Number init success"
def update(self,obj,pID,val):
if pID == 0 or pID == 1: # pID is property ID, 0 is the first property of "Number"
print "NUMBER(int) is %d" % val
else:
print "NUMBER(boolean) is ", val
class Light_Actuator(WuClass):
def __init__(self, pin):
WuClass.__init__(self)
self.ID = 2001
self.light_actuator_gpio = mraa.Gpio(pin)
self.light_actuator_gpio.dir(mraa.DIR_OUT)
print "Light Actuator init success"
def update(self,obj,pID,val):
if pID == 0:
if val == True:
self.light_actuator_gpio.write(1)
print "Light Actuator On"
else:
self.light_actuator_gpio.write(0)
print "Light Actuator Off"
else:
print "Light Actuator garbage"
class EEGServerProtocol(basic.LineReceiver):
def connectionMade(self):
print "Got new client!"
self.factory.clients.append(self)
def connectionLost(self, reason):
print "Lost a client!", str(reason)
self.factory.clients.remove(self)
def dataReceived(self, data):
data = data[data.find("#"):data.find("@")]
if len(data) < 3: return
print "received", repr(data), map(ord, data)
output_short = int(data[1])
output_boolean = bool(int(data[2]))
print "output_short: ", output_short, " output_boolean: ", output_boolean
self.factory.obj_eeg_server.setProperty(0, output_short) # setProperty has two parameters, the first is pID, the second is output value
self.factory.obj_eeg_server.setProperty(1, output_boolean)
class EEGServerFactory(protocol.ServerFactory):
protocol = EEGServerProtocol
def __init__(self, obj):
self.clients = []
self.obj_eeg_server = obj
class EEGServer(WuClass):
def __init__(self):
WuClass.__init__(self)
self.ID = 1912
print "EEGServer init success"
def update(self,obj,pID,val):
pass
class MyDevice(Device):
def __init__(self,addr,localaddr):
Device.__init__(self,addr,localaddr)
def init(self):
m1 = Number()
self.addClass(m1,1)
self.obj_num = self.addObject(m1.ID)
Light_Actuator_Pin = 2
m2 = Light_Actuator(Light_Actuator_Pin)
self.addClass(m2,0)
self.obj_light_actuator = self.addObject(m2.ID)
m3 = EEGServer()
self.addClass(m3,1)
self.obj_eeg_server = self.addObject(m3.ID)
if len(sys.argv) <= 2:
print 'python %s <gip> <dip>:<port>' % sys.argv[0]
print ' <gip>: IP addrees of gateway'
print ' <dip>: IP address of Python device'
print ' <port>: An unique port number'
print ' ex. python %s 192.168.4.7 127.0.0.1:3000' % sys.argv[0]
sys.exit(-1)
d = MyDevice(sys.argv[1],sys.argv[2])
factory = EEGServerFactory(d.obj_eeg_server)
reactor.listenTCP(PORT, factory)
reactor.run()
|
11525259
|
from unittest import TestCase
from DivideTwoIntegers import DivideTwoIntegers
class TestDivideTwoIntegers(TestCase):
def test_divide(self):
d = DivideTwoIntegers()
self.assertTrue(d.divide(1, 1) == 1)
self.assertTrue(d.divide(0, 1) == 0)
self.assertTrue(d.divide(-1, -1) == 1)
self.assertTrue(d.divide(2147483647, 1) == 2147483647)
self.assertTrue(d.divide(2147483647, -2147483648) == 0)
self.assertTrue(d.divide(-2147483648, -1) == 2147483647)
self.assertTrue(d.divide(100, 6) == 16)
|
11525264
|
DEBUG = True
SECRET_KEY = "unguessable"
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"rest_framework.authtoken",
"redis_pubsub",
"testapp"
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "redis_pubsub",
"USER": "postgres"
}
}
SITE_ID = 1
REDIS_HOST = "localhost", 6379
|
11525269
|
import Cell
class GridWorld:
def __init__(self, width, height, obstacles):
self.width = width
self.height = height
self.cells = [[Cell.Cell(i, j) for j in range(width)] for i in range(height)]
self.obstacles = obstacles
for obstacle in self.obstacles:
self.cells[obstacle[0]][obstacle[1]].obstacle = True
def isstart(self,item):
self.cells[item[0]][item[1]].start = True
def isgoal(self,item):
self.cells[item[0]][item[1]].goal = True
def mark(self,item):
self.cells[item[0]][item[1]].visited = True
def markjump(self,item):
self.cells[item[0]][item[1]].isjp = True
def markpath(self,item):
self.cells[item[0]][item[1]].ispath = True
def get4Neighbors(self, id):
(curX, curY) = id
if self.cells[curX][curY].obstacle == True:
return []
neighbors = [(curX - 1, curY), (curX, curY + 1), (curX + 1, curY), (curX, curY - 1)]
neighbors = filter(self.inBounds, neighbors)
neighbors = filter(self.passable, neighbors)
return neighbors
def get8Neighbors(self, id):
(curX, curY) = id
if self.cells[curX][curY].obstacle == True:
return []
neighbors = [(curX - 1, curY), (curX - 1, curY + 1), (curX, curY + 1), (curX + 1, curY + 1), (curX + 1, curY), (curX + 1, curY - 1), (curX, curY - 1), (curX - 1, curY - 1)]
neighbors = filter(self.inBounds, neighbors)
neighbors = filter(self.passable, neighbors)
return neighbors
def inBounds(self, id):
(x, y) = id
return 0 <= x < self.height and 0 <= y < self.width
def passable(self, id):
(x, y) = id
return not (self.cells[x][y].obstacle)
|
11525282
|
from models import (cluster)
generator_dict = {'standard': cluster.G}
discriminator_dict = {'standard': cluster.D}
|
11525289
|
from .model import Model
import numpy as np
import time
def softmax(x):
r"""Compute softmax values for each sets of scores in $x$.
Args:
x (numpy.ndarray): Input vector to compute softmax
Returns:
numpy.ndarray: softmax(x)
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
class Policy(object):
"""A basic policy class"""
TYPE = 'policy'
def __init__(self, obs_n, act_n):
self.obs_n = obs_n
self.act_n = act_n
self.learn_step = 0
def setup(self):
"""Initialize policy model"""
self.model = Model(self.obs_n, self.act_n)
self.model.setup()
def sample(self, obs):
"""Sample actions with respect to the observation
Args:
obs (numpy.ndarray): The observation.
Returns:
np.ndarray: Sampled actions.
"""
logit = self.model.forward(obs)
p = softmax(logit)[0]
a = np.random.choice(self.act_n, 1, p=p)[0]
return a
def learn(self, sample):
"""Learn with sample
Args:
sample (numpy.ndarray): Samples containing 'r'
"""
r = sample['r']
r = np.transpose(r)[0]
loss = r[0]
params = self.model.get_params()
grad = loss - np.random.randn(*params.shape)
params += grad
self.model.set_params(params)
time.sleep(0.1)
|
11525290
|
try:
import ldap
except ImportError:
pass
import logging
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Group, User
from django.core.exceptions import PermissionDenied
from django_auth_adfs.backend import AdfsAuthCodeBackend
from qatrack.accounts.models import ActiveDirectoryGroupMap, DefaultGroup
class QATrackAccountBackend(ModelBackend):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logging.getLogger('auth.QATrackAccountBackend')
def authenticate(self, request, username=None, password=<PASSWORD>):
self.logger.info("Attempting to authenticate %s" % username)
username = self.clean_username(username)
self.logger.info("Cleaned username: %s" % username)
user = super().authenticate(request, username=username, password=password)
if user:
self.logger.info("Successfully authenticated user: %s" % username)
self.update_user_groups(user)
else:
self.logger.info("Authentication failed for user: %s" % username)
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
if settings.ACCOUNTS_CLEAN_USERNAME and callable(settings.ACCOUNTS_CLEAN_USERNAME):
return settings.ACCOUNTS_CLEAN_USERNAME(username)
return username.replace(settings.CLEAN_USERNAME_STRING, "")
def update_user_groups(self, user):
existing_user_groups = list(user.groups.all())
default_groups = [dg.group for dg in DefaultGroup.objects.select_related("group")]
for qat_group in default_groups:
if qat_group not in existing_user_groups:
self.logger.debug("User added to group '{}'".format(qat_group.name))
user.groups.add(qat_group)
# stripped down version of http://djangosnippets.org/snippets/901/
class ActiveDirectoryGroupMembershipSSLBackend:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logging.getLogger('auth.ActiveDirectoryGroupMembershipSSLBackend')
ldap.set_option(ldap.OPT_REFERRALS, 0) # DO NOT TURN THIS OFF OR SEARCH WON'T WORK!
if settings.AD_CERT_FILE:
self.logger.debug("Setting TLS CERTFILE %s." % settings.AD_CERT_FILE)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, settings.AD_CERT_FILE)
def authenticate(self, request, username=None, password=None):
self.logger.info("Attempting to authenticate %s" % username)
username = self.clean_username(username)
self.logger.info("Cleaned username: %s" % username)
try:
if len(password) == 0:
self.logger.info("Failed to authenticate user. No password provided.")
return None
self.logger.debug("Initializing with ldap url=%s" % settings.AD_LDAP_URL)
l = ldap.initialize(settings.AD_LDAP_URL)
l.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
binddn = "%s@%s" % (username, settings.AD_NT4_DOMAIN)
self.logger.debug("Binding with binddn=%s" % binddn)
l.simple_bind_s(binddn, password)
user_attrs = self.get_user_attrs(l, username)
qualified_groups = ActiveDirectoryGroupMap.qualified_ad_group_names()
if qualified_groups:
if len(set(qualified_groups) & set(user_attrs['member_of'])) == 0:
self.logger.info(
(
"successfully authenticated: %s but they don't belong to a qualified group. "
"Qualified Groups: %s AD member_of: %s"
) %
(username, ', '.join(qualified_groups), ', '.join(user_attrs['member_of']))
)
return None
self.logger.debug("successfully authenticated: %s" % username)
return self.get_or_create_user(username, user_attrs)
except ldap.INVALID_CREDENTIALS:
self.logger.info("Invalid username or password for user: %s" % username)
return None
except ldap.SERVER_DOWN:
self.logger.exception("Unable to contact LDAP server")
except Exception:
self.logger.exception("Exception occurred while trying to authenticate %s" % username)
return None
finally:
try:
l.unbind_s()
except Exception:
pass
def get_user_attrs(self, con, username):
self.logger.debug("Searching active directory for user attributes with search DN: %s" % settings.AD_SEARCH_DN)
result = con.search_ext_s(
settings.AD_SEARCH_DN,
ldap.SCOPE_SUBTREE,
"%s=%s" % (settings.AD_LU_ACCOUNT_NAME, username),
settings.AD_SEARCH_FIELDS,
)[0][1]
email = result.get(settings.AD_LU_MAIL, [""])[0]
last_name = result.get(settings.AD_LU_SURNAME, [""])[0]
first_name = result.get(settings.AD_LU_GIVEN_NAME, [""])[0]
email = email.decode('utf-8') if isinstance(email, bytes) else email
last_name = last_name.decode('utf-8') if isinstance(last_name, bytes) else last_name
first_name = first_name.decode('utf-8') if isinstance(first_name, bytes) else first_name
attrs = {
'email': email,
'last_name': last_name,
'first_name': first_name,
}
memberships = set()
for member_of in result.get(settings.AD_LU_MEMBER_OF, [""]):
if isinstance(member_of, bytes):
member_of = member_of.decode()
# member of comes in format like CN=TestGroup,CN=Users,DC=foo,DC=example,DC=com
for m in member_of.split(","):
if "cn=" not in m.lower():
continue
memberships.add(m.split("=")[1])
attrs['member_of'] = memberships
return attrs
def get_or_create_user(self, username, user_attrs):
try:
self.logger.debug("Looking for existing user with username: %s" % username)
user = User.objects.get(username=username)
self.logger.debug("Found existing user with username: %s" % username)
except User.DoesNotExist:
self.logger.debug("No existing user with username: %s" % username)
user = User(username=username, is_staff=False, is_superuser=False)
user.set_unusable_password()
try:
user.save()
self.logger.info("Created user with username: %s" % username)
except Exception:
self.logger.info("Creation of user failed")
return None
self.update_user_attributes(user, user_attrs)
return user
def update_user_attributes(self, user, user_attrs):
self.logger.info("Updating user info for %s" % user.username)
# get personal info
user.email = user_attrs['email'] or user.email
user.last_name = user_attrs['last_name'] or user.last_name
user.first_name = user_attrs['first_name'] or user.first_name
ad_groups = user_attrs['member_of']
existing_user_groups = list(user.groups.all())
default_groups = [dg.group for dg in DefaultGroup.objects.select_related("group")]
for qat_group in default_groups:
if qat_group not in existing_user_groups:
self.logger.debug("User added to group '{}'".format(qat_group.name))
user.groups.add(qat_group)
ad_group_map = ActiveDirectoryGroupMap.group_map()
for ad_group_name in ad_groups:
qatrack_groups = ad_group_map.get(ad_group_name, [])
try:
if settings.AD_MIRROR_GROUPS and ad_group_name:
group, _ = Group.objects.get_or_create(name=ad_group_name)
self.logger.debug("Created group '{}'".format(ad_group_name))
else:
group = Group.objects.get(name=ad_group_name)
qatrack_groups.append(group)
except Group.DoesNotExist:
pass
for qat_group in qatrack_groups:
if qat_group not in existing_user_groups:
self.logger.debug("User added to group '{}'".format(qat_group.name))
user.groups.add(qat_group)
user.save()
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
if settings.AD_CLEAN_USERNAME and callable(settings.AD_CLEAN_USERNAME):
return settings.AD_CLEAN_USERNAME(username)
return username.replace(
settings.CLEAN_USERNAME_STRING, ""
).replace(
settings.AD_CLEAN_USERNAME_STRING, ""
)
class WindowsIntegratedAuthenticationBackend(ModelBackend):
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, request, username=None):
"""
The username passed is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not username:
return
username = self.clean_username(username)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = User.objects.get_or_create(username=username)
if created:
user = self.configure_user(user)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
if settings.AD_CLEAN_USERNAME and callable(settings.AD_CLEAN_USERNAME):
return settings.AD_CLEAN_USERNAME(username)
return username.replace(settings.CLEAN_USERNAME_STRING, "").replace(settings.AD_CLEAN_USERNAME_STRING, "")
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
try:
ldap.set_option(ldap.OPT_REFERRALS, 0) # DO NOT TURN THIS OFF OR SEARCH WON'T WORK!
# initialize
l = ldap.initialize(settings.AD_LDAP_URL)
# bind
binddn = "%s@%s" % (settings.AD_LDAP_USER, settings.AD_NT4_DOMAIN)
l.bind_s(binddn, settings.AD_LDAP_PW)
# search
result = l.search_ext_s(
settings.AD_SEARCH_DN,
ldap.SCOPE_SUBTREE,
"%s=%s" % (settings.AD_LU_ACCOUNT_NAME, user),
settings.AD_SEARCH_FIELDS,
)[0][1]
l.unbind_s()
# get personal info
user.email = result.get(settings.AD_LU_MAIL, [None])[0]
user.last_name = result.get(settings.AD_LU_SURNAME, [None])[0]
user.first_name = result.get(settings.AD_LU_GIVEN_NAME, [None])[0]
except Exception:
return None
user.is_staff = False
user.is_superuser = False
user.set_unusable_password()
user.save()
return user
class QATrackAdfsAuthCodeBackend(AdfsAuthCodeBackend):
"""Note https://github.com/jobec/django-auth-adfs/issues/31#issuecomment-384034365
was extremely helpful in getting an Windows Server 2016 ADFS test server set up!"""
def authenticate(self, request=None, authorization_code=None, **kwargs):
try:
return super().authenticate(request=request, authorization_code=authorization_code, **kwargs)
except PermissionDenied:
return None
def create_user(self, claims):
from django_auth_adfs.config import settings as adfs_settings
from django_auth_adfs.backend import logger
username = claims[adfs_settings.USERNAME_CLAIM]
qualified_groups = ActiveDirectoryGroupMap.qualified_ad_group_names()
if qualified_groups and adfs_settings.GROUPS_CLAIM:
if len(set(qualified_groups) & set(claims[adfs_settings.GROUPS_CLAIM])) == 0:
logger.info(
"successfully authenticated: %s but they don't belong to a qualifying group (%s)" %
(username, ', '.join(qualified_groups))
)
raise PermissionDenied
username = self.clean_username(username)
claims[adfs_settings.USERNAME_CLAIM] = username
return super().create_user(claims)
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
if settings.ACCOUNTS_CLEAN_USERNAME and callable(settings.ACCOUNTS_CLEAN_USERNAME):
return settings.ACCOUNTS_CLEAN_USERNAME(username)
return username.replace(settings.CLEAN_USERNAME_STRING, "")
def update_user_groups(self, user, claims):
"""
AdfsAuthCodeBackend syncs Django groups and AD FS groups by default.
That is to say it will remove a user from a group if the group is not
present in the claims. Since we rely so heavily on Django group based
permissions and may be adapting existing QATrack+ installations to use
AD FS, we will not remove users from groups when using this backend.
If you want to use the default behaviour (that is always sync groups)
then you can use the django_adfs.backends.AdfsAuthCodeBackend.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): Claims from the access token
"""
from django_auth_adfs.backend import logger
from django_auth_adfs.config import settings as adfs_settings
if adfs_settings.GROUPS_CLAIM:
if adfs_settings.GROUPS_CLAIM in claims:
claim_groups = claims[adfs_settings.GROUPS_CLAIM]
if not isinstance(claim_groups, list):
claim_groups = [claim_groups, ]
else:
logger.debug(
"The configured groups claim '{}' was not found in the access token".format(
adfs_settings.GROUPS_CLAIM
),
)
claim_groups = []
claim_groups += [""]
existing_user_groups = list(user.groups.all())
default_groups = [dg.group for dg in DefaultGroup.objects.select_related("group")]
for qat_group in default_groups:
if qat_group not in existing_user_groups:
logger.debug("User added to group '{}'".format(qat_group.name))
user.groups.add(qat_group)
ad_group_map = ActiveDirectoryGroupMap.group_map()
for ad_group_name in claim_groups:
qatrack_groups = ad_group_map.get(ad_group_name, [])
try:
if adfs_settings.MIRROR_GROUPS and ad_group_name:
group, _ = Group.objects.get_or_create(name=ad_group_name)
logger.debug("Created group '{}'".format(ad_group_name))
else:
group = Group.objects.get(name=ad_group_name)
qatrack_groups.append(group)
except Group.DoesNotExist:
pass
for qat_group in qatrack_groups:
if qat_group not in existing_user_groups:
logger.debug("User added to group '{}'".format(qat_group.name))
user.groups.add(qat_group)
|
11525295
|
import numpy as np
from scipy.special import hyp2f1, gammaln
def get_r2(iv, dv, stack_intercept=True):
""" Regress dv onto iv and return r-squared.
Parameters
----------
iv : numpy array
Array of shape N (samples) x K (features)
dv : numpy array
Array of shape N (samples) x 1
stack_intercept : bool
Whether to stack an intercept (vector with ones of length N).
Returns
-------
r2 : float
R-squared model fit.
"""
if iv.ndim == 1:
# Add axis if shape is (N,)
iv = iv[:, np.newaxis]
if stack_intercept:
iv = np.hstack((np.ones((iv.shape[0], 1)), iv))
beta = np.linalg.lstsq(iv, dv)[0]
dv_hat = iv.dot(beta).squeeze()
r2 = 1 - (((dv - dv_hat) ** 2).sum() / ((dv - dv.mean()) ** 2).sum())
return r2
def vectorized_corr(arr, arr_2D):
""" Computes the correlation between an array and each column
in a 2D array (each column represents a variable) in a vectorized
way.
Parameters
----------
arr : numpy array
Array of shape (N,)
arr_2D : numpy array
Array of shape (N, P), with P indicating different variables that
will be correlated with arr
Returns
-------
corrs : numpy array
Array of shape (P,) with all correlations between arr and columns in arr_2D
"""
if arr.ndim == 1:
arr = arr[:, np.newaxis]
arr_c, arr_2D_c = arr - arr.mean(), arr_2D - arr_2D.mean(axis=0)
r_num = np.sum(arr_c * arr_2D_c, axis=0)
r_den = np.sqrt(np.sum(arr_c ** 2, axis=0) * np.sum(arr_2D_c ** 2, axis=0))
corrs = r_num / r_den
return corrs
def vectorized_partial_corr(arr, c, arr_2D, stack_intercept=True):
""" Computes the correlation between an array and each column
in a 2D array (each column represents a variable) in a vectorized
way.
Parameters
----------
arr : numpy array
Array of shape (N,)
c : numpy array
Array of shape (N,) that should be partialled out of arr_2D and arr
arr_2D : numpy array
Array of shape (N, P), with P indicating different variables that
will be correlated with arr
Returns
-------
corrs : numpy array
Array of shape (P,) with all correlations between arr and columns in arr_2D
"""
if arr.ndim == 1:
arr = arr[:, np.newaxis]
if c.ndim == 1:
# Add axis if shape is (N,)
c = c[:, np.newaxis]
if stack_intercept:
c = np.hstack((np.ones((c.shape[0], 1)), c))
arr_resid = arr - c.dot(np.linalg.lstsq(c, arr, rcond=None)[0])
arr_2d_resid = arr_2D - c.dot(np.linalg.lstsq(c, arr_2D, rcond=None)[0])
return vectorized_corr(arr_resid, arr_2d_resid)
def vectorized_semipartial_corr(arr, c, arr_2D, which='2D', stack_intercept=True):
""" Computes the semipartial correlation between an array and each column
in a 2D array (each column represents a variable) in a vectorized
way.
Parameters
----------
arr : numpy array
Array of shape (N,)
c : numpy array
Array of shape (N,) that should be partialled out of arr_2D and arr
arr_2D : numpy array
Array of shape (N, P), with P indicating different variables that
will be correlated with arr
Returns
-------
corrs : numpy array
Array of shape (P,) with all correlations between arr and columns in arr_2D
"""
if arr.ndim == 1:
arr = arr[:, np.newaxis]
if c.ndim == 1:
# Add axis if shape is (N,)
c = c[:, np.newaxis]
if stack_intercept:
c = np.hstack((np.ones((c.shape[0], 1)), c))
if which == '2D':
arr_2D_resid = arr_2D - c.dot(np.linalg.lstsq(c, arr_2D, rcond=None)[0])
return vectorized_corr(arr, arr_2D_resid)
else:
arr_resid = arr - c.dot(np.linalg.lstsq(c, arr)[0])
return vectorized_corr(arr_resid, arr_2D)
def rpdf(rho, n, rs):
""" rho = population correlation coefficient. """
lnum = np.log(n-2) + gammaln(n-1) + np.log((1-rho**2)**(.5*(n-1))) + np.log((1-rs**2)**(.5*(n-4)))
lden = np.log(np.sqrt(2*np.pi)) + gammaln(n-.5) + np.log((1-rho*rs)**(n-3/2))
fac = lnum - lden
hyp = hyp2f1(.5, .5, (2*n-1)/2, (rho*rs+1)/2)
return np.exp(fac) * hyp
|
11525310
|
import warnings
from typing import Any
from prefect.executors import LocalExecutor as _LocalExecutor
class LocalExecutor(_LocalExecutor):
def __new__(cls, *args: Any, **kwargs: Any) -> "LocalExecutor":
warnings.warn(
"prefect.engine.executors.LocalExecutor has been moved to "
"`prefect.executors.LocalExecutor`, please update your imports",
stacklevel=2,
)
return super().__new__(cls)
|
11525312
|
import os
import sys
import time
import pytest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', stay_alive=True, main_configs=['config/models_config.xml'])
def copy_file_to_container(local_path, dist_path, container_id):
os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path))
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
copy_file_to_container(os.path.join(SCRIPT_DIR, 'model/.'), '/etc/clickhouse-server/model', node.docker_id)
node.restart_clickhouse()
yield cluster
finally:
cluster.shutdown()
def test(started_cluster):
node.query("select modelEvaluate('titanic', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);")
|
11525326
|
import dexy.reporters.nodegraph.d3
import dexy.reporters.nodegraph.text
import dexy.reporters.nodegraph.graphviz
|
11525338
|
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
class SimpleText(models.Model):
"""A Testing app"""
firstname = models.CharField(blank=True, max_length=255)
lastname = models.CharField(blank=True, max_length=255)
favorite_color = models.CharField(blank=True, max_length=255)
def __unicode__(self):
return self.firstname
class SimpleTaggedItem(models.Model):
tag = models.SlugField()
simple_text = models.ForeignKey(SimpleText)
def __unicode__(self):
return self.tag
import objectpermissions
permissions = ['perm1', 'perm2', 'perm3', 'perm4']
objectpermissions.register(SimpleText, permissions)
objectpermissions.register(SimpleTaggedItem, permissions)
from django.contrib import admin
from objectpermissions.admin import TabularUserPermInline, StackedUserPermInline
class SimpleTaggedItemInline(admin.TabularInline):
model = SimpleTaggedItem
class SimpleTextAdmin(admin.ModelAdmin):
list_display = ('firstname','lastname','favorite_color')
inlines = [SimpleTaggedItemInline, TabularUserPermInline, ]
admin.site.register(SimpleText, SimpleTextAdmin)
|
11525361
|
from flask_restplus import Resource, reqparse
from flask_jwt import jwt_required
from models.item import ItemModel
class Item(Resource):
# Adding parser as part of the class
parser = reqparse.RequestParser()
parser.add_argument('price',
type = float,
required = True,
help = "Price is required!" )
@jwt_required()
def get(self, name):
item = ItemModel.find_by_name(name)
if item is not None:
return item.json()
else:
return {'message' : 'Item not found.'}, 404
def post(self, name):
if ItemModel.find_by_name(name) is not None:
return {'message' : 'Item already exists.'}, 400
else:
# Parsing
request_data = Item.parser.parse_args()
item = ItemModel(name, request_data['price'])
# Dealing with possible insertion error
try:
item.save_to_db()
except:
# Returning 500 - Internal Server Error
return {'message' : 'An error occurred inserting the item.'}, 500
return item.json(), 201
def delete(self, name):
item = ItemModel.find_by_name(name)
if item is not None:
item.delete_from_db()
return {'message' : 'Item deleted.'}
def put(self, name):
# Parsing
request_data = Item.parser.parse_args()
item = ItemModel.find_by_name(name)
if item is None:
item = ItemModel(name, request_data['price'])
else:
item.price = request_data['price']
item.save_to_db()
return item.json()
class ItemList(Resource):
def get(self):
return {'items' : [item.json() for item in ItemModel.query.all()]}
|
11525368
|
import keras.layers as KL
import keras.backend as K
import tensorflow as tf
from lib.nets.resnet_backbone import identity_block as bottleneck
from keras.utils import conv_utils
from keras.engine import InputSpec
import numpy as np
class UpsampleBilinear(KL.Layer):
def call(self, inputs, **kwargs):
source, target = inputs
target_shape = tf.shape(target)
return tf.image.resize_bilinear(source, (target_shape[1], target_shape[2]), align_corners=True)
def compute_output_shape(self, input_shape):
return (input_shape[0][0],) + input_shape[1][1:3] + (input_shape[0][-1],)
def _conv_bn_relu(input_tensor, kernel_size, nb_filters,
padding="same", namebase="res", has_act=True, rate=1):
output = KL.Conv2D(nb_filters, kernel_size, \
padding=padding,
dilation_rate=(rate, rate),
name=namebase+"_conv")(input_tensor)
output = KL.BatchNormalization(axis=3, \
name=namebase+"_bn")(output)
if has_act:
output = KL.Activation('relu')(output)
return output
def _bn_relu_conv(input_tensor, kernel_size, nb_filters,
padding="same", namebase="res", has_act=True):
x = input_tensor
x = KL.BatchNormalization(axis=3, \
name=namebase+"_bn")(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filters, kernel_size, \
padding=padding,
name=namebase+"_conv")(x)
return x
def create_global_net(blocks, cfg, has_bn=True, bn_trainable=True):
""" create global net in cpn
# Inputs:
blocks = [C2, C3, C4, C5]
"""
global_fms = []
global_outs = []
last_fm = None
## define pyramid feature maps
for i, block in enumerate(reversed(blocks)):
lateral = _conv_bn_relu(block, (1, 1), 256, "same", 'lateral/res{}'.format(5-i))
if last_fm is not None:
upsample = UpsampleBilinear(\
name='fpn/p{}upsampled'.format(5-i+1))([last_fm, lateral])
upsample = KL.Conv2D(256, (1, 1), \
name='fpn/p{}upsampled_conv'.format(5-i))(upsample)
if has_bn:
upsample = KL.BatchNormalization(name='fpn/p{}upsampled_bn'.format(5-i), axis=3)(upsample)
last_fm = KL.Add(name='fpn/p{}merge'.format(5-i))([\
upsample, lateral])
else:
last_fm = lateral
tmp = _conv_bn_relu(last_fm, (1, 1), 256, "SAME", 'tmp/res{}'.format(5-i))
out = KL.Conv2D(cfg.KEYPOINTS_NUM, (3, 3), padding="SAME", \
name='pyramid/res{}'.format(5-i))(tmp)
if has_bn:
out = KL.BatchNormalization(axis=3, name='pyramid/res{}_bn'.format(5-i))(out)
global_fms.append(last_fm)
out = KL.Lambda(lambda t: tf.image.resize_bilinear(t, \
(cfg.OUTPUT_SHAPE[0], cfg.OUTPUT_SHAPE[1])), \
name='pyramid/res{}up'.format(5-i))(out)
global_outs.append(out)
global_fms.reverse()
global_outs.reverse()
return global_fms, global_outs
## original cpn RefineNet version
def create_refine_net(blocks, cfg, use_bn=True):
refine_fms = []
for i, block in enumerate(blocks):
mid_fm = block
for j in range(i):
mid_fm = bottleneck(mid_fm, 3, [128, 128, 256],
stage=(2+i),
block='refine_conv' + str(j), use_bn=use_bn)
mid_fm = KL.Lambda(lambda t: tf.image.resize_bilinear(t, \
(cfg.OUTPUT_SHAPE[0], cfg.OUTPUT_SHAPE[1]), align_corners=True),\
name='upsample_conv/res{}'.format(2+i))(mid_fm)
refine_fms.append(mid_fm)
refine_fm = KL.Concatenate(axis=3)(refine_fms)
refine_fm = KL.Conv2D(256, (1, 1),
padding="SAME", name="refine_shotcut")(refine_fm)
refine_fm = bottleneck(refine_fm, 3, [128, 128, 256], stage=0, block='final_bottleneck')
res = KL.Conv2D(cfg.KEYPOINTS_NUM, (3, 3),
padding='SAME', name='refine_out')(refine_fm)
if use_bn:
res = KL.BatchNormalization(name='refine_out_bn', axis=3)(res)
return res
|
11525385
|
import FWCore.ParameterSet.Config as cms
pythiaUESettingsBlock = cms.PSet(
pythiaUESettings = cms.vstring(
'MSTU(21)=1 ! Check on possible errors during program execution',
'MSTJ(22)=2 ! Decay those unstable particles',
'PARJ(71)=10 . ! for which ctau 10 mm',
'MSTP(33)=0 ! no K factors in hard cross sections',
'MSTP(2)=1 ! which order running alphaS',
'MSTP(51)=7 ! structure function chosen (internal PDF CTEQ5L)',
'MSTP(52)=1 ! work with internal PDFs',
'PARP(82)=1.932 ! pt cutoff for multiparton interactions',
'PARP(89)=1800. ! sqrts for which PARP82 is set',
'PARP(90)=0.275 ! Multiple interactions: rescaling power',
'MSTP(95)=6 ! CR (color reconnection parameters)',
'PARP(77)=1.016 ! CR',
'PARP(78)=0.538 ! CR',
'PARP(80)=0.1 ! Prob. colored parton from BBR',
'PARP(83)=0.356 ! Multiple interactions: matter distribution parameter',
'PARP(84)=0.651 ! Multiple interactions: matter distribution parameter',
'PARP(62)=1.025 ! ISR cutoff',
'MSTP(91)=1 ! Gaussian primordial kT',
'PARP(93)=10.0 ! primordial kT-max',
'MSTP(81)=21 ! multiple parton interactions 1 is Pythia default',
'MSTP(82)=4 ! Defines the multi-parton model',
)
)
|
11525409
|
try:
import tensorflow as tf
except ImportError:
tf = None
class NegBinOutput(tf.keras.layers.Layer):
"""Negative binomial output layer"""
def __init__(
self,
original_dim=None,
name='neg_bin_output',
**kwargs
):
super().__init__(name=name, **kwargs)
self.means = tf.keras.layers.Dense(original_dim, activation='linear')
self.var = tf.keras.layers.Dense(original_dim, activation='linear')
def call(self, inputs, **kwargs):
activation, sf = inputs
mean, var = self.means(activation), self.var(activation)
# clip to log of largest values supported by log operation
bound = 60.
mean_clip = tf.clip_by_value(mean, -bound, bound, "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
invlinker_mean = tf.exp(mean_clip + sf)
invlinker_var = tf.exp(var_clip)
return [invlinker_mean, invlinker_var]
class NegBinSharedDispOutput(tf.keras.layers.Layer):
"""Negative binomial output layer with a single dispersion estimate per features"""
def __init__(
self,
original_dim=None,
name='neg_bin_shared_disp_output',
**kwargs
):
super().__init__(name=name, **kwargs)
self.means = tf.keras.layers.Dense(original_dim, activation='linear')
self.var = self.add_weight(
"var_bias",
shape=[1, original_dim]
)
def call(self, inputs, **kwargs):
activation, sf = inputs
mean = self.means(activation)
var = self.var
var = tf.broadcast_to(var, tf.shape(mean))
# clip to log of largest values supported by log operation
bound = 60.
mean_clip = tf.clip_by_value(mean, -bound, bound, "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
invlinker_mean = tf.exp(mean_clip + sf)
invlinker_var = tf.exp(var_clip)
return [invlinker_mean, invlinker_var]
class NegBinConstDispOutput(tf.keras.layers.Layer):
"""Negative binomial output layer with dispersion set as constant (=1)."""
def __init__(
self,
original_dim=None,
name='neg_bin_const_disp_output',
**kwargs
):
super().__init__(name=name, **kwargs)
self.means = tf.keras.layers.Dense(original_dim, activation='linear')
self.var_constant = 1.
def call(self, inputs, **kwargs):
activation, sf = inputs
mean = self.means(activation)
var = tf.constant([[self.var_constant]], dtype=activation.dtype)
var = tf.broadcast_to(var, tf.shape(mean))
# clip to log of largest values supported by log operation
bound = 60.
mean_clip = tf.clip_by_value(mean, -bound, bound, "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
invlinker_mean = tf.exp(mean_clip + sf)
invlinker_var = tf.exp(var_clip)
return [invlinker_mean, invlinker_var]
class GaussianOutput(tf.keras.layers.Layer):
"""
Gaussian output layer.
Size factor only makes sense if logged and data is positive and logged.
"""
def __init__(
self,
original_dim=None,
name='gaussian_output',
**kwargs
):
super().__init__(name=name, **kwargs)
self.means = tf.keras.layers.Dense(original_dim, activation='linear')
self.var = tf.keras.layers.Dense(original_dim, activation='linear')
def call(self, inputs, **kwargs):
activation, sf = inputs
mean, var = self.means(activation), self.var(activation)
# clip to log of largest values supported by log operation
bound = 60.
mean_clip = tf.clip_by_value(mean, tf.exp(-bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
invlinker_mean = mean_clip + sf
invlinker_var = tf.exp(var_clip)
return [invlinker_mean, invlinker_var]
class GaussianSharedStdOutput(tf.keras.layers.Layer):
"""
Gaussian output layer with a single standard deviation estimate per features.
Size factor only makes sense if logged and data is positive and logged.
"""
def __init__(
self,
original_dim=None,
name='gaussian_shared_disp_output',
**kwargs
):
super().__init__(name=name, **kwargs)
self.means = tf.keras.layers.Dense(original_dim, activation='linear')
self.var = self.add_weight(
"var_bias",
shape=[1, original_dim]
)
def call(self, inputs, **kwargs):
activation, sf = inputs
mean = self.means(activation)
var = self.var
var = tf.broadcast_to(var, tf.shape(mean))
# clip to log of largest values supported by log operation
bound = 60.
mean_clip = tf.clip_by_value(mean, tf.exp(-bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
invlinker_mean = mean_clip + sf
invlinker_var = tf.exp(var_clip)
return [invlinker_mean, invlinker_var]
class GaussianConstStdOutput(tf.keras.layers.Layer):
"""
Gaussian output layer with standard deviation set as constant (=1).
Size factor only makes sense if logged and data is positive and logged.
"""
def __init__(
self,
original_dim=None,
name='gaussian_const_disp_output',
**kwargs
):
super().__init__(name=name, **kwargs)
self.means = tf.keras.layers.Dense(original_dim, activation='linear')
self.var_constant = 1.
def call(self, inputs, **kwargs):
activation, sf = inputs
mean = self.means(activation)
var = tf.constant([[self.var_constant]], dtype=activation.dtype)
var = tf.broadcast_to(var, tf.shape(mean))
# clip to log of largest values supported by log operation
bound = 60.
mean_clip = tf.clip_by_value(mean, tf.exp(-bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
invlinker_mean = mean_clip + sf
invlinker_var = tf.exp(var_clip)
return [invlinker_mean, invlinker_var]
|
11525429
|
from django.db import models
from data_refinery_common.models.computational_result import ComputationalResult
from data_refinery_common.models.sample import Sample
class SampleResultAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_result_associations"
unique_together = ("result", "sample")
|
11525447
|
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = """Print the list of users."""
def handle(self, *args, **options):
# Print out users.
User = get_user_model()
for x in User.objects.all():
print '\t'.join(map(str, [x.id, x.username, x.email, x.date_joined, x.last_login]))
|
11525458
|
from __future__ import annotations
from textwrap import dedent
from typing import Callable
import pytest
from pytest_mock import MockerFixture
from tomlkit.toml_document import TOMLDocument
from pyproject_fmt.formatter import format_pyproject
from pyproject_fmt.formatter.config import Config
from tests import Fmt
@pytest.fixture()
def fmt(mocker: MockerFixture) -> Fmt:
def _func(formatter: Callable[[TOMLDocument, Config], None], start: str, expected: str) -> None:
mocker.patch("pyproject_fmt.formatter._perform", formatter)
opts = Config(toml=dedent(start))
result = format_pyproject(opts)
expected = dedent(expected)
assert result == expected
return _func
|
11525474
|
import numpy as np
full_data_dir = '../cifar100/cifar-100-python/train'
vali_dir = '../cifar100/cifar-100-python/test'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
IMG_WIDTH = 32
IMG_HEIGHT = 32
IMG_DEPTH = 3
NUM_CLASS = 100
NUM_TRAIN_BATCH = 1
EPOCH_SIZE = 50000 * NUM_TRAIN_BATCH
def _read_one_batch(path):
dicts = np.load(path, encoding='latin1')
data = dicts['data']
label = np.array(dicts['fine_labels'])
return data, label
def read_in_all_images(address_list, shuffle=True):
data = np.array([]).reshape([0, IMG_WIDTH * IMG_HEIGHT * IMG_DEPTH])
label = np.array([])
for address in address_list:
batch_data, batch_label = _read_one_batch(address)
data = np.concatenate((data, batch_data))
label = np.concatenate((label, batch_label))
num_data = len(label)
data = data.reshape((num_data, IMG_HEIGHT * IMG_WIDTH, IMG_DEPTH), order='F')
data = data.reshape((num_data, IMG_HEIGHT, IMG_WIDTH, IMG_DEPTH))
if shuffle is True:
order = np.random.permutation(num_data)
data = data[order, ...]
label = label[order]
data = data.astype(np.float32)
return data, label
def horizontal_flip(image, axis):
flip_prop = np.random.randint(low=0, high=2)
if flip_prop == 0:
image = np.flip(image, axis)
return image
def whitening_image(image_np):
for i in range(len(image_np)):
mean = np.mean(image_np[i, ...])
std = np.max([np.std(image_np[i, ...]), 1.0 / np.sqrt(IMG_HEIGHT * IMG_WIDTH * IMG_DEPTH)])
image_np[i, ...] = (image_np[i, ...] - mean) / std
return image_np
def random_crop_and_flip(batch_data, padding_size):
pad_width = ((0, 0), (padding_size, padding_size), (padding_size, padding_size), (0, 0))
batch_data = np.pad(batch_data, pad_width=pad_width, mode='constant', constant_values=0)
cropped_batch = np.zeros(len(batch_data) * IMG_HEIGHT * IMG_WIDTH * IMG_DEPTH).reshape(
len(batch_data), IMG_HEIGHT, IMG_WIDTH, IMG_DEPTH)
for i in range(len(batch_data)):
x_offset = np.random.randint(low=0, high=2 * padding_size, size=1)[0]
y_offset = np.random.randint(low=0, high=2 * padding_size, size=1)[0]
cropped_batch[i, ...] = batch_data[i, ...][x_offset:x_offset + IMG_HEIGHT,
y_offset:y_offset + IMG_WIDTH, :]
cropped_batch[i, ...] = horizontal_flip(image=cropped_batch[i, ...], axis=1)
return cropped_batch
def read_train_data():
path_list = []
path_list.append(full_data_dir)
data, label = read_in_all_images(path_list)
return data, label
def read_vali_data():
validation_array, validation_labels = read_in_all_images([vali_dir])
return validation_array, validation_labels
|
11525480
|
import json
class BaseModel(object):
def to_dict(self):
return self.schema.dump(self).data
@classmethod
def from_dict(cls, dct):
return cls.schema.load(dct).data
|
11525494
|
import datetime
import logging
from django.core.management.base import BaseCommand
from main.models import Localization
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Deletes any localizations marked for deletion with null project, type, version, or media.'
def add_arguments(self, parser):
parser.add_argument('--min_age_days', type=int, default=30,
help="Minimum age in days of localization objects for deletion.")
def handle(self, **options):
BATCH_SIZE = 1000
num_deleted = 0
min_delta = datetime.timedelta(days=options['min_age_days'])
max_datetime = datetime.datetime.now(datetime.timezone.utc) - min_delta
while True:
# We cannot delete with a LIMIT query, so make a separate query
# using IDs.
deleted = Localization.objects.filter(deleted=True,
modified_datetime__lte=max_datetime)
null_project = Localization.objects.filter(project__isnull=True,
modified_datetime__lte=max_datetime)
null_meta = Localization.objects.filter(meta__isnull=True,
modified_datetime__lte=max_datetime)
null_version = Localization.objects.filter(version__isnull=True,
modified_datetime__lte=max_datetime)
null_media = Localization.objects.filter(media__isnull=True,
modified_datetime__lte=max_datetime)
loc_ids = (deleted | null_project | null_meta | null_version | null_media)\
.distinct()\
.values_list('pk', flat=True)[:BATCH_SIZE]
localizations = Localization.objects.filter(pk__in=loc_ids)
num_localizations = localizations.count()
if num_localizations == 0:
break
localizations.delete()
num_deleted += num_localizations
logger.info(f"Deleted a total of {num_deleted} localizations...")
logger.info(f"Deleted a total of {num_deleted} localizations!")
|
11525520
|
from runboat.github import CommitInfo
from runboat.k8s import DeploymentMode, _render_kubefiles, make_deployment_vars
from runboat.settings import BuildSettings
EXPECTED = """\
resources:
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
namespace: runboat-builds
namePrefix: "build-name-"
commonLabels:
runboat/build: "build-name"
commonAnnotations:
runboat/repo: "oca/mis-builder"
runboat/target-branch: "15.0"
runboat/pr: ""
runboat/git-commit: "<KEY>"
images:
- name: odoo
newName: "ghcr.io/oca/oca-ci"
newTag: "py3.8-odoo15.0"
secretGenerator:
- name: odoosecretenv
literals:
- PGPASSWORD=<PASSWORD>
configMapGenerator:
- name: odooenv
literals:
- PGDATABASE=build-name
- ADDONS_DIR=/mnt/data/odoo-addons-dir
- RUNBOAT_GIT_REPO=oca/mis-builder
- RUNBOAT_GIT_REF=abcdef123456789
- name: runboat-scripts
files:
- runboat-clone-and-install.sh
- runboat-initialize.sh
- runboat-cleanup.sh
- runboat-start.sh
generatorOptions:
disableNameSuffixHash: true
patches:
- target:
kind: PersistentVolumeClaim
name: data
patch: |-
- op: replace
path: /spec/storageClassName
value: my-storage-class
- target:
kind: Ingress
name: odoo
patch: |-
- op: replace
path: /spec/rules/0/host
value: build-slug.runboat.odoo-community.org
"""
def test_render_kubefiles() -> None:
deployment_vars = make_deployment_vars(
mode=DeploymentMode.deployment,
build_name="build-name",
slug="build-slug",
commit_info=CommitInfo(
repo="oca/mis-builder",
target_branch="15.0",
pr=None,
git_commit="<KEY>",
),
build_settings=BuildSettings(image="ghcr.io/oca/oca-ci:py3.8-odoo15.0"),
)
with _render_kubefiles(deployment_vars) as tmp_path:
assert (tmp_path / "kustomization.yaml").is_file()
assert (tmp_path / "deployment.yaml").is_file()
kustomization = (tmp_path / "kustomization.yaml").read_text()
assert kustomization.strip() == EXPECTED.strip()
|
11525532
|
import os
import sys
import logging
from pyomo.environ import *
from pyomo.opt import TerminationCondition
import numpy as np
import pandas as pd
class CALVIN():
def __init__(self, linksfile, ic=None, log_name="calvin"):
"""
Initialize CALVIN model object.
:param linksfile: (string) CSV file containing network link information
:param ic: (dict) Initial storage conditions for surface reservoirs
only used for annual optimization
:param log_name: A name for a logger - will be used to keep logs from different model runs separate in files.
Defaults to "calvin", which results in a log file in the current working directory named "calvin.log".
You can change this each time you instantiate the CALVIN class if you want to output separate logs
for different runs. Otherwise, all results will be appended to the log file (not overwritten). If you
run multiple copies of CALVIN simultaneously, make sure to change this, or you could get errors writing
to the log file.
Do not provide a full path to a log file here because this value is also used in a way that is *not* a
file path. If being able to specify a full path is important for your workflow, please raise a GitHub
issue. It could be supported, but there is no need at this moment.
:returns: CALVIN model object
"""
# set up logging code
self.log = logging.getLogger(log_name)
if not self.log.hasHandlers(): # hasHandlers will only be True if someone already called CALVIN with the same log_name in the same session
self.log.setLevel("DEBUG")
screen_handler = logging.StreamHandler(sys.stdout)
screen_handler.setLevel(logging.INFO)
screen_formatter = logging.Formatter('%(levelname)s - %(message)s')
screen_handler.setFormatter(screen_formatter)
self.log.addHandler(screen_handler)
file_handler = logging.FileHandler("{}.log".format(log_name))
file_handler.setLevel(logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(file_formatter)
self.log.addHandler(file_handler)
df = pd.read_csv(linksfile)
df['link'] = df.i.map(str) + '_' + df.j.map(str) + '_' + df.k.map(str)
df.set_index('link', inplace=True)
self.df = df
self.linksfile = os.path.splitext(linksfile)[0] # filename w/o extension
# self.T = len(self.df)
SR_stats = pd.read_csv('calvin/data/SR_stats.csv', index_col=0).to_dict()
self.min_storage = SR_stats['min']
self.max_storage = SR_stats['max']
if ic:
self.apply_ic(ic)
# a few network fixes to make things work
self.add_ag_region_sinks()
self.fix_hydropower_lbs()
self.nodes = pd.unique(df[['i','j']].values.ravel()).tolist()
self.links = list(zip(df.i,df.j,df.k))
self.networkcheck() # make sure things aren't broken
def apply_ic(self, ic):
"""
Set initial storage conditions.
:param ic: (dict) initial storage values
:returns: nothing, but modifies the model object
"""
for k in ic:
ix = (self.df.i.str.contains('INITIAL') &
self.df.j.str.contains(k))
self.df.loc[ix, ['lower_bound','upper_bound']] = ic[k]
def inflow_multiplier(self, x):
"""
Multiply all network inflows by a constant.
:param x: (float) value to multiply inflows
:returns: nothing, but modifies the model object
"""
ix = self.df.i.str.contains('INFLOW')
self.df.loc[ix, ['lower_bound','upper_bound']] *= x
def eop_constraint_multiplier(self, x):
"""
Set end-of-period storage constraints as a fraction of maximum
available storage. Needed for limited foresight (annual) optimization.
:param x: (float) fraction of maximum storage to set lower bound
:returns: nothing, but modifies the model object
"""
for k in self.max_storage:
ix = (self.df.i.str.contains(k) &
self.df.j.str.contains('FINAL'))
lb = self.min_storage[k] + (self.max_storage[k]-self.min_storage[k])*x
self.df.loc[ix,'lower_bound'] = lb
self.df.loc[ix,'upper_bound'] = self.max_storage[k]
def no_gw_overdraft(self):
"""
Impose constraints to prevent groundwater overdraft
(not currently implemented)
"""
pass
def networkcheck(self):
"""
Confirm constraint feasibility for the model object.
(No inputs or outputs)
:raises: ValueError when infeasibilities are identified.
"""
nodes = self.nodes
links = self.df.values
num_in = {n: 0 for n in nodes}
num_out = {n: 0 for n in nodes}
lb_in = {n: 0 for n in nodes}
lb_out = {n: 0 for n in nodes}
ub_in = {n: 0 for n in nodes}
ub_out = {n: 0 for n in nodes}
# loop over links
for l in links:
lb = float(l[5])
ub = float(l[6])
num_in[l[1]] += 1
lb_in[l[1]] += lb
ub_in[l[1]] += ub
num_out[l[0]] += 1
lb_out[l[0]] += lb
ub_out[l[0]] += ub
if lb > ub:
raise ValueError('lb > ub for link %s' % (l[0]+'-'+l[1]))
for n in nodes:
if num_in[n] == 0 and n not in ['SOURCE','SINK']:
raise ValueError('no incoming link for ' + n)
if num_out[n] == 0 and n not in ['SOURCE','SINK']:
raise ValueError('no outgoing link for ' + n)
if ub_in[n] < lb_out[n]:
raise ValueError('ub_in < lb_out for %s (%d < %d)' % (n, ub_in[n], lb_out[n]))
if lb_in[n] > ub_out[n]:
raise ValueError('lb_in > ub_out for %s (%d > %d)' % (n, lb_in[n], ub_out[n]))
def add_ag_region_sinks(self):
"""
Hack to get rid of surplus water at no cost from agricultural regions.
Called internally when model is initialized.
:returns: nothing, but modifies the model object
"""
df = self.df
links = df[df.i.str.contains('HSU') & ~df.j.str.contains('DBUG')].copy(deep=True)
if not links.empty:
maxub = links.upper_bound.max()
links.j = links.apply(lambda l: 'SINK.'+l.i.split('.')[1], axis=1)
links.cost = 0.0
links.amplitude = 1.0
links.lower_bound = 0.0
links.upper_bound = maxub
links['link'] = links.i.map(str) + '_' + links.j.map(str) + '_' + links.k.map(str)
links.set_index('link', inplace=True)
self.df = self.df.append(links.drop_duplicates())
def fix_hydropower_lbs(self):
"""
Hack to fix lower bound constraints on piecewise hydropower links.
Storage piecewise links > 0 should have 0.0 lower bound, and
the k=0 pieces should always have lb = dead pool.
:returns: nothing, but modifies the model object
"""
def get_lb(link):
if link.i.split('.')[0] == link.j.split('.')[0]:
if link.k > 0:
return 0.0
elif link.i.split('.')[0] in self.min_storage:
return min(self.min_storage[link.i.split('.')[0]], link.lower_bound)
return link.lower_bound
ix = (self.df.i.str.contains('SR_') & self.df.j.str.contains('SR_'))
self.df.loc[ix, 'lower_bound'] = self.df.loc[ix].apply(get_lb, axis=1)
def remove_debug_links(self):
"""
Remove debug links from model object.
:returns: dataframe of links, excluding debug links.
"""
df = self.df
ix = df.index[df.index.str.contains('DBUG')]
df.drop(ix, inplace=True, axis=0)
self.nodes = pd.unique(df[['i','j']].values.ravel()).tolist()
self.links = list(zip(df.i,df.j,df.k))
return df
def create_pyomo_model(self, debug_mode=False, debug_cost=2e7):
"""
Use link data to create Pyomo model (constraints and objective function)
But do not solve yet.
:param debug_mode: (boolean) Whether to run in debug mode.
Use when there may be infeasibilities in the network.
:param debug_cost: When in debug mode, assign this cost ($/AF) to flow on debug links.
This should be an arbitrarily high number.
:returns: nothing, but creates the model object (self.model)
"""
# work on a local copy of the dataframe
if not debug_mode and self.df.index.str.contains('DBUG').any():
# previously ran in debug mode, but now done
df = self.remove_debug_links()
df.to_csv(self.linksfile + '-final.csv')
else:
df = self.df
self.log.info('Creating Pyomo Model (debug=%s)' % debug_mode)
model = ConcreteModel()
model.N = Set(initialize=self.nodes)
model.k = Set(initialize=range(15))
model.A = Set(within=model.N*model.N*model.k,
initialize=self.links, ordered=True)
model.source = Param(initialize='SOURCE')
model.sink = Param(initialize='SINK')
def init_params(p):
if p == 'cost' and debug_mode:
return (lambda model,i,j,k: debug_cost
if ('DBUG' in str(i)+'_'+str(j))
else 1.0)
else:
return lambda model,i,j,k: df.loc[str(i)+'_'+str(j)+'_'+str(k)][p]
model.u = Param(model.A, initialize=init_params('upper_bound'), mutable=True)
model.l = Param(model.A, initialize=init_params('lower_bound'), mutable=True)
model.a = Param(model.A, initialize=init_params('amplitude'))
model.c = Param(model.A, initialize=init_params('cost'))
# The flow over each arc
model.X = Var(model.A, within=Reals)
# Minimize total cost
def obj_fxn(model):
return sum(model.c[i,j,k]*model.X[i,j,k] for (i,j,k) in model.A)
model.total = Objective(rule=obj_fxn, sense=minimize)
# Enforce an upper bound limit on the flow across each arc
def limit_rule_upper(model, i, j, k):
return model.X[i,j,k] <= model.u[i,j,k]
model.limit_upper = Constraint(model.A, rule=limit_rule_upper)
# Enforce a lower bound limit on the flow across each arc
def limit_rule_lower(model, i, j, k):
return model.X[i,j,k] >= model.l[i,j,k]
model.limit_lower = Constraint(model.A, rule=limit_rule_lower)
# To speed up creating the mass balance constraints, first
# create dictionaries of arcs_in and arcs_out of every node
# These are NOT Pyomo data, and Pyomo does not use "model._" at all
arcs_in = {}
arcs_out = {}
def arc_list_hack(model, i,j,k):
if j not in arcs_in:
arcs_in[j] = []
arcs_in[j].append((i,j,k))
if i not in arcs_out:
arcs_out[i] = []
arcs_out[i].append((i,j,k))
return [0]
model._ = Set(model.A, initialize=arc_list_hack)
# Enforce flow through each node (mass balance)
def flow_rule(model, node):
if node in [value(model.source), value(model.sink)]:
return Constraint.Skip
outflow = sum(model.X[i,j,k]/model.a[i,j,k] for i,j,k in arcs_out[node])
inflow = sum(model.X[i,j,k] for i,j,k in arcs_in[node])
return inflow == outflow
model.flow = Constraint(model.N, rule=flow_rule)
model.dual = Suffix(direction=Suffix.IMPORT)
self.model = model
def solve_pyomo_model(self, solver='glpk', nproc=1, debug_mode=False, maxiter=10):
"""
Solve Pyomo model (must be called after create_pyomo_model)
:param solver: (string) solver name. glpk, cplex, cbc, gurobi.
:param nproc: (int) number of processors. 1=serial.
:param debug_mode: (boolean) Whether to run in debug mode.
Use when there may be infeasibilities in the network.
:param maxiter: (int) maximum iterations for debug mode.
:returns: nothing, but assigns results to self.model.solutions.
:raises: RuntimeError, if problem is found to be infeasible.
"""
from pyomo.opt import SolverFactory
opt = SolverFactory(solver)
if nproc > 1 and solver is not 'glpk':
opt.options['threads'] = nproc
if debug_mode:
run_again = True
i = 0
vol_total = 0
while run_again and i < maxiter:
self.log.info('-----Solving Pyomo Model (debug=%s)' % debug_mode)
self.results = opt.solve(self.model)
self.log.info('Finished. Fixing debug flows...')
run_again,vol = self.fix_debug_flows()
i += 1
vol_total += vol
if run_again:
self.log.info(('Warning: Debug mode maximum iterations reached.'
' Will still try to solve without debug mode.'))
else:
self.log.info('All debug flows eliminated (iter=%d, vol=%0.2f)' % (i,vol_total))
else:
self.log.info('-----Solving Pyomo Model (debug=%s)' % debug_mode)
self.results = opt.solve(self.model, tee=False)
if self.results.solver.termination_condition == TerminationCondition.optimal:
self.log.info('Optimal Solution Found (debug=%s).' % debug_mode)
self.model.solutions.load_from(self.results)
else:
raise RuntimeError('Problem Infeasible. Run again starting from debug mode.')
def fix_debug_flows(self, tol=1e-7):
"""
Find infeasible constraints where debug flows occur.
Fix them by either raising the UB, or lowering the LB.
:param tol: (float) Tolerance to identify nonzero debug flows
:returns run_again: (boolean) whether debug mode needs to run again
:returns vol: (float) total volume of constraint changes
also modifies the model object.
"""
df, model = self.df, self.model
dbix = (df.i.str.contains('DBUGSRC') | df.j.str.contains('DBUGSNK'))
debuglinks = df[dbix].values
run_again = False
vol_total = 0
for dbl in debuglinks:
s = tuple(dbl[0:3])
if model.X[s].value > tol:
run_again = True
# if we need to get rid of extra water,
# raise some upper bounds (just do them all)
if 'DBUGSNK' in dbl[1]:
raiselinks = df[(df.i == dbl[0]) & ~ df.j.str.contains('DBUGSNK')].values
for l in raiselinks:
s2 = tuple(l[0:3])
iv = model.u[s2].value
v = model.X[s].value*1.2
model.u[s2].value += v
vol_total += v
self.log.info('%s UB raised by %0.2f (%0.2f%%)' % (l[0]+'_'+l[1], v, v*100/iv))
df.loc['_'.join(str(x) for x in l[0:3]), 'upper_bound'] = model.u[s2].value
# if we need to bring in extra water
# this is a much more common problem
# want to avoid reducing carryover requirements. look downstream instead.
max_depth = 10
if 'DBUGSRC' in dbl[0]:
vol_to_reduce = max(model.X[s].value*1.2, 0.5)
self.log.info('Volume to reduce: %.2e' % vol_to_reduce)
children = [dbl[1]]
for i in range(max_depth):
children += df[df.i.isin(children)
& ~ df.j.str.contains('DBUGSNK')].j.tolist()
children = set(children)
reducelinks = (df[df.i.isin(children)
& (df.lower_bound > 0)]
.sort_values(by='lower_bound', ascending=False).values)
if reducelinks.size == 0:
raise RuntimeError(('Not possible to reduce LB on links'
' with origin %s by volume %0.2f' %
(dbl[1],vol_to_reduce)))
for l in reducelinks:
s2 = tuple(l[0:3])
iv = model.l[s2].value
dl = model.dual[model.limit_lower[s2]] if s2 in model.limit_lower else 0.0
if iv > 0 and vol_to_reduce > 0 and dl > 1e6:
v = min(vol_to_reduce, iv)
# don't allow big reductions on carryover links
carryover = ['SR_', 'INITIAL', 'FINAL', 'GW_']
if any(c in l[0] for c in carryover) and any(c in l[1] for c in carryover):
v = min(v, max(25.0, 0.1*iv))
model.l[s2].value -= v
vol_to_reduce -= v
vol_total += v
self.log.info('%s LB reduced by %.2e (%0.2f%%). Dual=%.2e' % (l[0]+'_'+l[1], v, v*100/iv, dl))
df.loc['_'.join(str(x) for x in l[0:3]), 'lower_bound'] = model.l[s2].value
if vol_to_reduce == 0:
break
if vol_to_reduce > 0:
self.log.info('Debug -> %s: could not reduce full amount (%.2e left)' % (dbl[1],vol_to_reduce))
self.df, self.model = df, model
return run_again, vol_total
|
11525542
|
from flask import render_template
from flask import request
from flask import Blueprint
from flask import flash
from flask import redirect
from flask import url_for
from flask import jsonify
from flaskapp.config import ExampleData
from flaskapp.utils import gmaps_tool as tls
main = Blueprint("main", __name__)
@main.route("/", methods=["GET"])
def home():
return "<h1>Homepage</h1>"
# A route to return all of the available entries in our catalog.
@main.route("/api/v1/resources/books/all", methods=["GET"])
def api_all():
return jsonify(ExampleData.data)
@main.route("/api", methods=["GET"])
def api():
""" Provide parameters as:
https://www.URL.com/api?location=the-location&name=the-place-name
"""
if "placeId" in request.args:
place_id = str(request.args["placeId"])
result = tls.get_place_data(place_id)
elif "location" in request.args and "name" in request.args:
location = str(request.args["location"])
name = str(request.args["name"])
result = tls.get_ptimes_data(location, name)
else:
return "Error: Please specify both a location and name."
# Use the jsonify function from Flask to convert our list of
# Python dictionaries to the JSON format.
return jsonify(result)
|
11525545
|
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../pytorch'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import yaml
import pickle
import matplotlib.pyplot as plt
from sklearn import metrics
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from data_generator import DataGenerator, InferenceDataGenerator
from main_pytorch import get_model
from utilities import (create_folder, search_meta_by_mixture_name,
get_sed_from_meta, ideal_binary_mask, target_to_labels,
get_ground_truth_indexes, read_audio, write_audio,
ideal_ratio_mask)
from models_pytorch import get_model, move_data_to_gpu
import config
from features import LogMelExtractor
from get_avg_stats import get_est_event_list, get_ref_event_list
from stft import stft, real_to_complex, istft, get_cola_constant, overlap_add
def plot_waveform(args):
# Arugments & parameters
workspace = args.workspace
holdout_fold = args.holdout_fold
scene_type = args.scene_type
snr = args.snr
cuda = args.cuda
labels = config.labels
classes_num = len(labels)
sample_rate = config.sample_rate
window_size = config.window_size
overlap = config.overlap
hop_size = window_size-overlap
mel_bins = config.mel_bins
seq_len = config.seq_len
ix_to_lb = config.ix_to_lb
thres = 0.1
batch_size = 24
# Paths
hdf5_path = os.path.join(workspace, 'features', 'logmel',
'scene_type={},snr={}'.format(scene_type, snr), 'development.h5')
yaml_path = os.path.join(workspace, 'mixture.yaml')
audios_dir = os.path.join(workspace, 'mixed_audios',
'scene_type={},snr={}'.format(scene_type, snr))
# Load yaml file
load_yaml_time = time.time()
with open(yaml_path, 'r') as f:
meta = yaml.load(f)
print('Load yaml file time: {:.3f} s'.format(time.time() - load_yaml_time))
# Data generator
generator = InferenceDataGenerator(
hdf5_path=hdf5_path,
batch_size=batch_size,
holdout_fold=holdout_fold)
generate_func = generator.generate_validate(
data_type='validate',
shuffle=False,
max_iteration=None)
# Evaluate on mini-batch
for (iteration, data) in enumerate(generate_func):
print(iteration)
(batch_x, batch_y, batch_audio_names) = data
batch_x = move_data_to_gpu(batch_x, cuda)
batch_gt_masks = []
batch_single_gt_masks = []
batch_mixture_stfts = []
for n in range(len(batch_audio_names)):
curr_meta = search_meta_by_mixture_name(meta, batch_audio_names[n])
curr_events = curr_meta['events']
gt_indexes = get_ground_truth_indexes(curr_events)
gt_sed = get_sed_from_meta(curr_events) # (seq_len, classes_num)
(events_stft, scene_stft, mixture_stft) = \
generator.get_events_scene_mixture_stft(batch_audio_names[n])
gt_mask = ideal_ratio_mask(events_stft, scene_stft) # (seq_len, fft_size)
gt_masks = gt_mask[:, :, None] * gt_sed[:, None, :] # (seq_len, fft_size, classes_num)
gt_masks = gt_masks.astype(np.float32)
batch_gt_masks.append(gt_masks)
batch_single_gt_masks.append(gt_mask)
batch_mixture_stfts.append(mixture_stft)
# Plot waveform & spectrogram & ideal ratio mask
if True:
for n in range(len(batch_x)):
print(batch_audio_names[n])
print(batch_y[n])
target_labels = target_to_labels(batch_y[n], labels)
print(target_labels)
mixed_audio_path = os.path.join(audios_dir, batch_audio_names[n])
(mixed_audio, _) = read_audio(mixed_audio_path, target_fs=config.sample_rate, mono=True)
mixed_audio /= np.max(np.abs(mixed_audio))
fig, axs = plt.subplots(3, 1, figsize=(6, 6))
axs[0].plot(mixed_audio)
axs[0].set_title('Waveform')
axs[0].xaxis.set_ticks([0, len(mixed_audio)])
axs[0].xaxis.set_ticklabels(['0.0', '10.0 s'])
axs[0].set_xlim(0, len(mixed_audio))
axs[0].set_ylim(-1, 1)
axs[0].set_xlabel('time')
axs[0].set_ylabel('Amplitude')
axs[1].matshow(np.log(batch_mixture_stfts[n]).T, origin='lower', aspect='auto', cmap='jet')
axs[1].set_title('Spectrogram')
axs[1].xaxis.set_ticks([0, 310])
axs[1].xaxis.set_ticklabels(['0.0', '10.0 s'])
axs[1].xaxis.tick_bottom()
axs[1].yaxis.set_ticks([0, 1024])
axs[1].yaxis.set_ticklabels(['0', '1025'])
axs[1].set_xlabel('time')
axs[1].set_ylabel('FFT bins')
axs[2].matshow(batch_single_gt_masks[n].T, origin='lower', aspect='auto', cmap='jet')
axs[2].set_title('Ideal ratio mask')
axs[2].xaxis.set_ticks([0, 310])
axs[2].xaxis.set_ticklabels(['0.0', '10.0 s'])
axs[2].xaxis.tick_bottom()
axs[2].yaxis.set_ticks([0, 1024])
axs[2].yaxis.set_ticklabels(['0', '1025'])
axs[2].set_xlabel('time')
axs[2].set_ylabel('FFT bins')
plt.tight_layout()
plt.show()
def plot_mel_masks(args):
# Arugments & parameters
workspace = args.workspace
holdout_fold = args.holdout_fold
scene_type = args.scene_type
snr = args.snr
iteration = args.iteration
model_type = args.model_type
cuda = args.cuda
labels = config.labels
classes_num = len(labels)
sample_rate = config.sample_rate
window_size = config.window_size
overlap = config.overlap
hop_size = window_size-overlap
mel_bins = config.mel_bins
seq_len = config.seq_len
ix_to_lb = config.ix_to_lb
thres = 0.1
batch_size = 24
# Paths
hdf5_path = os.path.join(workspace, 'features', 'logmel',
'scene_type={},snr={}'.format(scene_type, snr), 'development.h5')
model_path = os.path.join(workspace, 'models', 'main_pytorch',
'model_type={}'.format(model_type), 'scene_type={},snr={}'
''.format(scene_type, snr), 'holdout_fold{}'.format(holdout_fold),
'md_{}_iters.tar'.format(iteration))
yaml_path = os.path.join(workspace, 'mixture.yaml')
audios_dir = os.path.join(workspace, 'mixed_audios',
'scene_type={},snr={}'.format(scene_type, snr))
sep_wavs_dir = os.path.join(workspace, 'separated_wavs', 'main_pytorch',
'model_type={}'.format(model_type),
'scene_type={},snr={}'.format(scene_type, snr),
'holdout_fold{}'.format(holdout_fold))
create_folder(sep_wavs_dir)
# Load yaml file
load_yaml_time = time.time()
with open(yaml_path, 'r') as f:
meta = yaml.load(f)
print('Load yaml file time: {:.3f} s'.format(time.time() - load_yaml_time))
feature_extractor = LogMelExtractor(
sample_rate=sample_rate,
window_size=window_size,
overlap=overlap,
mel_bins=mel_bins)
inverse_melW = feature_extractor.get_inverse_melW()
# Load model
Model = get_model(model_type)
model = Model(classes_num, seq_len, mel_bins, cuda)
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['state_dict'])
if cuda:
model.cuda()
# Data generator
generator = InferenceDataGenerator(
hdf5_path=hdf5_path,
batch_size=batch_size,
holdout_fold=holdout_fold)
generate_func = generator.generate_validate(
data_type='validate',
shuffle=False,
max_iteration=None)
# Evaluate on mini-batch
for (iteration, data) in enumerate(generate_func):
(batch_x, batch_y, batch_audio_names) = data
batch_x = move_data_to_gpu(batch_x, cuda)
# Predict
with torch.no_grad():
model.eval()
(batch_output, batch_bottleneck) = model(
batch_x, return_bottleneck=True)
batch_output = batch_output.data.cpu().numpy()
'''(batch_size, classes_num)'''
batch_bottleneck = batch_bottleneck.data.cpu().numpy()
'''(batch_size, classes_num, seq_len, mel_bins)'''
batch_pred_sed = np.mean(batch_bottleneck, axis=-1)
batch_pred_sed = np.transpose(batch_pred_sed, (0, 2, 1))
'''(batch_size, seq_len, classes_num)'''
batch_gt_masks = []
for n in range(len(batch_audio_names)):
curr_meta = search_meta_by_mixture_name(meta, batch_audio_names[n])
curr_events = curr_meta['events']
pred_indexes = np.where(batch_output[n] > thres)[0]
gt_indexes = get_ground_truth_indexes(curr_events)
gt_sed = get_sed_from_meta(curr_events) # (seq_len, classes_num)
pred_sed = np.zeros((seq_len, classes_num))
pred_sed[:, pred_indexes] = batch_pred_sed[n][:, pred_indexes] # (seq_len, classes_num)
(events_stft, scene_stft, _) = generator.get_events_scene_mixture_stft(batch_audio_names[n])
events_stft = np.dot(events_stft, feature_extractor.melW)
scene_stft = np.dot(scene_stft, feature_extractor.melW)
gt_mask = ideal_binary_mask(events_stft, scene_stft) # (seq_len, fft_size)
gt_masks = gt_mask[:, :, None] * gt_sed[:, None, :] # (seq_len, fft_size, classes_num)
gt_masks = gt_masks.astype(np.float32)
batch_gt_masks.append(gt_masks)
pred_masks = batch_bottleneck[n].transpose(1, 2, 0) # (seq_len, fft_size, classes_num)
# Save out separated audio
if True:
curr_audio_name = curr_meta['mixture_name']
audio_path = os.path.join(audios_dir, curr_audio_name)
(mixed_audio, fs) = read_audio(audio_path, target_fs=sample_rate, mono=True)
out_wav_path = os.path.join(sep_wavs_dir, curr_audio_name)
write_audio(out_wav_path, mixed_audio, sample_rate)
window = np.hamming(window_size)
mixed_stft_cmplx = stft(x=mixed_audio, window_size=window_size, hop_size=hop_size, window=window, mode='complex')
mixed_stft_cmplx = mixed_stft_cmplx[0 : seq_len, :]
mixed_stft = np.abs(mixed_stft_cmplx)
for k in gt_indexes:
masked_stft = np.dot(pred_masks[:, :, k], inverse_melW) * mixed_stft
masked_stft_cmplx = real_to_complex(masked_stft, mixed_stft_cmplx)
frames = istft(masked_stft_cmplx)
cola_constant = get_cola_constant(hop_size, window)
sep_audio = overlap_add(frames, hop_size, cola_constant)
sep_wav_path = os.path.join(sep_wavs_dir, '{}_{}.wav'.format(os.path.splitext(curr_audio_name)[0], ix_to_lb[k]))
write_audio(sep_wav_path, sep_audio, sample_rate)
print('Audio wrote to {}'.format(sep_wav_path))
# Visualize learned representations
if True:
for n in range(len(batch_output)):
# Plot segmentation masks. (00013.wav is used for plot in the paper)
print('audio_name: {}'.format(batch_audio_names[n]))
print('target: {}'.format(batch_y[n]))
target_labels = target_to_labels(batch_y[n], labels)
print('target labels: {}'.format(target_labels))
(events_stft, scene_stft, _) = generator.get_events_scene_mixture_stft(batch_audio_names[n])
fig, axs = plt.subplots(7, 7, figsize=(15, 10))
for k in range(classes_num):
axs[k // 6, k % 6].matshow(batch_bottleneck[n, k].T, origin='lower', aspect='auto', cmap='jet')
if labels[k] in target_labels:
color = 'r'
else:
color = 'k'
axs[k // 6, k % 6].set_title(labels[k], color=color)
axs[k // 6, k % 6].xaxis.set_ticks([])
axs[k // 6, k % 6].yaxis.set_ticks([])
axs[k // 6, k % 6].set_xlabel('time')
axs[k // 6, k % 6].set_ylabel('mel bins')
axs[6, 5].matshow(np.log(events_stft + 1e-8).T, origin='lower', aspect='auto', cmap='jet')
axs[6, 5].set_title('Spectrogram (in log scale)')
axs[6, 5].xaxis.set_ticks([0, 310])
axs[6, 5].xaxis.set_ticklabels(['0.0', '10.0 s'])
axs[6, 5].xaxis.tick_bottom()
axs[6, 5].yaxis.set_ticks([0, 1024])
axs[6, 5].yaxis.set_ticklabels(['0', '1025'])
axs[6, 5].set_xlabel('time')
axs[6, 5].set_ylabel('FFT bins')
axs[6, 6].matshow(np.log(np.dot(events_stft, feature_extractor.melW) + 1e-8).T, origin='lower', aspect='auto', cmap='jet')
axs[6, 6].set_title('Log mel pectrogram')
axs[6, 6].xaxis.set_ticks([0, 310])
axs[6, 6].xaxis.set_ticklabels(['0.0', '10.0 s'])
axs[6, 6].xaxis.tick_bottom()
axs[6, 6].yaxis.set_ticks([0, 63])
axs[6, 6].yaxis.set_ticklabels(['0', '64'])
axs[6, 6].set_xlabel('time')
axs[6, 6].set_ylabel('mel bins')
plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=0.5)
plt.show()
# Plot frame-wise SED
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
score_mat = []
for k in range(classes_num):
score = np.mean(batch_bottleneck[n, k], axis=-1)
score_mat.append(score)
score_mat = np.array(score_mat)
ax.matshow(score_mat, origin='lower', aspect='auto', cmap='jet')
ax.set_title('Frame-wise predictions')
ax.xaxis.set_ticks([0, 310])
ax.xaxis.set_ticklabels(['0.0', '10.0 s'])
ax.xaxis.tick_bottom()
ax.set_xlabel('time')
ax.yaxis.set_ticks(np.arange(classes_num))
ax.yaxis.set_ticklabels(config.labels, fontsize='xx-small')
ax.yaxis.grid(color='k', linestyle='solid', linewidth=0.3)
plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=0.5)
plt.show()
# Plot event-wise SED
est_event_list = get_est_event_list(batch_pred_sed[n:n+1], batch_audio_names[n:n+1], labels)
event_mat = event_list_to_matrix(est_event_list)
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.matshow(event_mat.T, origin='lower', aspect='auto', cmap='jet')
ax.set_title('Event-wise predictions')
ax.xaxis.set_ticks([0, 310])
ax.xaxis.set_ticklabels(['0.0', '10.0 s'])
ax.xaxis.tick_bottom()
ax.set_xlabel('time')
ax.yaxis.set_ticks(np.arange(classes_num))
ax.yaxis.set_ticklabels(config.labels, fontsize='xx-small')
ax.yaxis.grid(color='k', linestyle='solid', linewidth=0.3)
plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=0.5)
plt.show()
# Plot event-wise ground truth
ref_event_list = get_ref_event_list(meta, batch_audio_names[n:n+1])
event_mat = event_list_to_matrix(ref_event_list)
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.matshow(event_mat.T, origin='lower', aspect='auto', cmap='jet')
ax.set_title('Event-wise ground truth')
ax.xaxis.set_ticks([0, 310])
ax.xaxis.set_ticklabels(['0.0', '10.0 s'])
ax.xaxis.tick_bottom()
ax.set_xlabel('time')
ax.yaxis.set_ticks(np.arange(classes_num))
ax.yaxis.set_ticklabels(config.labels, fontsize='xx-small')
ax.yaxis.grid(color='k', linestyle='solid', linewidth=0.3)
plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=0.5)
plt.show()
def event_list_to_matrix(event_list):
lb_to_ix = config.lb_to_ix
hop_size = config.window_size - config.overlap
frames_per_second = config.sample_rate / float(hop_size)
mat = np.zeros((config.seq_len, len(config.labels)))
for event in event_list:
onset = int(event['onset'] * frames_per_second) + 1
offset = int(event['offset'] * frames_per_second) + 1
event_label = event['event_label']
ix = lb_to_ix[event_label]
mat[onset : offset, ix] = 1
return mat
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_waveform = subparsers.add_parser('waveform')
parser_waveform.add_argument('--workspace', type=str, required=True)
parser_waveform.add_argument('--scene_type', type=str, required=True)
parser_waveform.add_argument('--snr', type=int, required=True)
parser_waveform.add_argument('--holdout_fold', type=int)
parser_waveform.add_argument('--cuda', action='store_true', default=False)
parser_mel_masks = subparsers.add_parser('mel_masks')
parser_mel_masks.add_argument('--workspace', type=str, required=True)
parser_mel_masks.add_argument('--model_type', type=str, required=True)
parser_mel_masks.add_argument('--scene_type', type=str, required=True)
parser_mel_masks.add_argument('--snr', type=int, required=True)
parser_mel_masks.add_argument('--holdout_fold', type=int)
parser_mel_masks.add_argument('--iteration', type=int, required=True)
parser_mel_masks.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
if args.mode == 'waveform':
plot_waveform(args)
elif args.mode == 'mel_masks':
plot_mel_masks(args)
else:
raise Exception('Error!')
|
11525553
|
import pytest
from dbt.tests.util import run_dbt
from tests.functional.graph_selection.fixtures import SelectionFixtures
def run_schema_and_assert(project, include, exclude, expected_tests):
# deps must run before seed
run_dbt(["deps"])
run_dbt(["seed"])
results = run_dbt(["run", "--exclude", "never_selected"])
assert len(results) == 10
test_args = ["test"]
if include:
test_args += ["--select", include]
if exclude:
test_args += ["--exclude", exclude]
test_results = run_dbt(test_args)
ran_tests = sorted([test.node.name for test in test_results])
expected_sorted = sorted(expected_tests)
assert ran_tests == expected_sorted
class TestSchemaTestGraphSelection(SelectionFixtures):
@pytest.fixture(scope="class")
def packages(self):
return {
"packages": [
{
"git": "https://github.com/dbt-labs/dbt-integration-project",
"revision": "dbt/1.0.0",
}
]
}
def test_schema_tests_no_specifiers(self, project):
run_schema_and_assert(
project,
None,
None,
[
"not_null_emails_email",
"unique_table_model_id",
"unique_users_id",
"unique_users_rollup_gender",
],
)
def test_schema_tests_specify_model(self, project):
run_schema_and_assert(project, "users", None, ["unique_users_id"])
def test_schema_tests_specify_tag(self, project):
run_schema_and_assert(
project, "tag:bi", None, ["unique_users_id", "unique_users_rollup_gender"]
)
def test_schema_tests_specify_model_and_children(self, project):
run_schema_and_assert(
project, "users+", None, ["unique_users_id", "unique_users_rollup_gender"]
)
def test_schema_tests_specify_tag_and_children(self, project):
run_schema_and_assert(
project,
"tag:base+",
None,
["not_null_emails_email", "unique_users_id", "unique_users_rollup_gender"],
)
def test_schema_tests_specify_model_and_parents(self, project):
run_schema_and_assert(
project,
"+users_rollup",
None,
["unique_users_id", "unique_users_rollup_gender"],
)
def test_schema_tests_specify_model_and_parents_with_exclude(self, project):
run_schema_and_assert(project, "+users_rollup", "users_rollup", ["unique_users_id"])
def test_schema_tests_specify_exclude_only(self, project):
run_schema_and_assert(
project,
None,
"users_rollup",
["not_null_emails_email", "unique_table_model_id", "unique_users_id"],
)
def test_schema_tests_specify_model_in_pkg(self, project):
run_schema_and_assert(
project,
"test.users_rollup",
None,
# TODO: change this. there's no way to select only direct ancestors
# atm.
["unique_users_rollup_gender"],
)
def test_schema_tests_with_glob(self, project):
run_schema_and_assert(
project,
"*",
"users",
[
"not_null_emails_email",
"unique_table_model_id",
"unique_users_rollup_gender",
],
)
def test_schema_tests_dep_package_only(self, project):
run_schema_and_assert(project, "dbt_integration_project", None, ["unique_table_model_id"])
def test_schema_tests_model_in_dep_pkg(self, project):
run_schema_and_assert(
project,
"dbt_integration_project.table_model",
None,
["unique_table_model_id"],
)
def test_schema_tests_exclude_pkg(self, project):
run_schema_and_assert(
project,
None,
"dbt_integration_project",
["not_null_emails_email", "unique_users_id", "unique_users_rollup_gender"],
)
|
11525592
|
from amaranth_boards.upduino_v2 import *
from amaranth_boards.upduino_v2 import __all__
import warnings
warnings.warn("instead of nmigen_boards.upduino_v2, use amaranth_boards.upduino_v2",
DeprecationWarning, stacklevel=2)
|
11525596
|
import string
import random
from time import sleep
from decimal import Decimal
from mock import patch, MagicMock
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from django.test import TransactionTestCase
from cc.models import Wallet, Address, Currency, Operation, Transaction, WithdrawTransaction
from cc import tasks
from cc import settings
settings.CC_CONFIRMATIONS = 2
settings.CC_ACCOUNT = ''
URL = 'http://root:toor@litecoind:19335/'
class WalletAddressGet(TransactionTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.coin = AuthServiceProxy(URL)
starting = True
while starting:
try:
cls.coin.generate(101)
except JSONRPCException as e:
if e.code != -28:
raise
else:
starting = False
sleep(1)
@classmethod
def tearDownClass(cls):
cls.coin.stop()
super().tearDownClass()
def setUp(self):
self.currency = Currency.objects.create(label='Litecoin regtest', ticker='tbtc', api_url=URL, magicbyte='58')
tasks.refill_addresses_queue()
def test_address_refill(self):
wallet = Wallet.objects.create(currency=self.currency)
address = wallet.get_address()
self.assertTrue(address)
def test_deposit(self):
wallet_before = Wallet.objects.create(currency=self.currency)
address = wallet_before.get_address().address
self.coin.sendtoaddress(address, Decimal('1'))
self.coin.generate(1)
tasks.query_transactions('tbtc')
wallet_after1 = Wallet.objects.get(id=wallet_before.id)
self.assertEqual(wallet_after1.balance, Decimal('0'))
self.assertEqual(wallet_after1.holded, Decimal('0'))
self.assertEqual(wallet_after1.unconfirmed, Decimal('1'))
self.coin.generate(1)
tasks.query_transactions('tbtc')
wallet_after2 = Wallet.objects.get(id=wallet_before.id)
self.assertEqual(wallet_after2.balance, Decimal('1'))
self.assertEqual(wallet_after2.holded, Decimal('0'))
self.assertEqual(wallet_after2.unconfirmed, Decimal('0'))
def test_withdraw(self):
wallet_before = Wallet.objects.create(currency=self.currency, balance=Decimal('1.0'))
wallet_before.withdraw_to_address('QfMAfiiLioTdkWmSG9v6VjDot9LH1d1nJo', Decimal('0.1'))
wallet_before.withdraw_to_address('QfiFeWcRV5txjDXS5wzzj1t8dD2hRXR78t', Decimal('0.1'))
wallet_before.withdraw_to_address('QfMAfiiLioTdkWmSG9v6VjDot9LH1d1nJo', Decimal('0.1'))
wallet_after1 = Wallet.objects.get(id=wallet_before.id)
self.assertEqual(wallet_after1.balance, Decimal('0.7'))
self.assertEqual(wallet_after1.holded, Decimal('0.3'))
tasks.process_withdraw_transactions('tbtc')
self.coin.generate(2)
wtx = WithdrawTransaction.objects.last()
tx = self.coin.gettransaction(wtx.txid)
wallet_after2 = Wallet.objects.get(id=wallet_before.id)
self.assertEqual(wallet_after2.balance, Decimal('0.7') + tx['fee'])
self.assertEqual(wallet_after2.holded, Decimal('0'))
def test_withdraw_error(self):
wallet_before = Wallet.objects.create(currency=self.currency, balance=Decimal('21000000'))
wallet_before.withdraw_to_address('QfMAfiiLioTdkWmSG9v6VjDot9LH1d1nJo', Decimal('21000000'))
try:
tasks.process_withdraw_transactions('tbtc')
except JSONRPCException:
pass
wallet_after1 = Wallet.objects.get(id=wallet_before.id)
self.assertEqual(wallet_after1.balance, Decimal('0'))
self.assertEqual(wallet_after1.holded, Decimal('21000000'))
wtx = WithdrawTransaction.objects.last()
wallet_after1 = Wallet.objects.get(id=wallet_before.id)
self.assertEqual(wtx.state, wtx.ERROR)
|
11525598
|
import numpy as np
import scipy.stats
import subprocess
import os
import warnings
from genome_integration import simulate_mr
from genome_integration import utils
from genome_integration.association import GeneticAssociation
def read_assocs_from_plink_qassoc(assoc_file):
assocs = {}
with open(assoc_file, "r") as f:
f.readline()
for line in f:
split = line.split()
for i in range(len(split)):
if split[i] == "NA":
split[i] = np.nan
snp_name = split[1]
tmp_assoc = GeneticAssociation(
dependent_name="sim_pheno",
explanatory_name=snp_name,
n_observations = int(split[3]),
beta = float(split[4]),
se = float(split[5]),
r_squared= float(split[6]),
chromosome=split[0],
position=split[3],
major_allele=None,
minor_allele=None,
minor_allele_frequency=None,
reference_allele=None,
effect_allele=None
)
tmp_assoc.set_p_val(float(split[8]))
assocs[snp_name] = tmp_assoc
return assocs
def turn_assocs_into_genetic_associations(assocs, ordered_loci, allele_frequency, sample_sizes):
#warnings turned off for this, as it's a divide by zero sometimes.
warnings.filterwarnings("ignore", category=RuntimeWarning)
z_scores = assocs[:,0] / assocs[:,1]
warnings.filterwarnings("default")
p_values = scipy.stats.norm.sf(np.abs(z_scores)) *2
assocs = {ordered_loci[i].snp_name:
GeneticAssociation(dependent_name="simulation",
explanatory_name=ordered_loci[i].snp_name,
n_observations = sample_sizes[i],
beta=assocs[i,0],
se=assocs[i,1],
r_squared = None,
chromosome = ordered_loci[i].chromosome,
position = ordered_loci[i].position,
major_allele = ordered_loci[i].major_allele,
minor_allele = ordered_loci[i].minor_allele,
minor_allele_frequency = allele_frequency[i],
reference_allele = None,
effect_allele = None
)
for i in range(len(assocs))
}
[assocs[ordered_loci[i].snp_name].set_p_val(p_values[i]) for i in range(len(assocs))]
return assocs
def test_compare_plink_assoc():
np.random.seed(13289)
rel_path = '/'.join(('test_resources', 'subset_of_exposure_cohort'))
if len(__file__.split("/")) > 1:
plink_loc = "{}/{}".format("/".join(__file__.split("/")[:-1]), rel_path)
else:
plink_loc = rel_path
temp_data = '/'.join(('temp_data', 'plink_file_cojo_test'))
if len(__file__.split("/")) > 1:
temp_data = "{}/{}".format("/".join(__file__.split("/")[:-1]), temp_data)
plinkfile = utils.PlinkFile(plink_loc)
geno_mat = plinkfile.read_bed_file_into_numpy_array()
#one causal SNP.
beta = [0.5, 0.5, -0.4]
phenotypes = simulate_mr.scale_geno_vec(geno_mat[:,5]) * beta[0]
phenotypes += simulate_mr.scale_geno_vec(geno_mat[:,7]) * beta[1]
phenotypes += simulate_mr.scale_geno_vec(geno_mat[:, 100]) * beta[2]
phenotypes += np.random.normal(size=phenotypes.shape)
phenotypes -= np.mean(phenotypes)
phenotypes /= np.std(phenotypes)
#Write and do the plink association.
pheno_file = temp_data + "_pheno"
assoc_file = temp_data + "_assoc"
with open(pheno_file, "w") as f:
f.write(f"FID\tIID\tPHENO\n")
for sample_name, phenotype in zip(plinkfile.fam_data.sample_names, phenotypes):
sample = plinkfile.fam_data.fam_samples[sample_name]
f.write(f"{sample.fid}\t{sample.iid}\t{phenotype}\n")
subprocess.run(["plink",
"--bfile", plink_loc,
"--assoc", "--allow-no-sex",
"--pheno", pheno_file,
"--out", assoc_file,
], check=True, stdout=subprocess.DEVNULL, stderr = subprocess.DEVNULL)
plink_ref_assocs = read_assocs_from_plink_qassoc(assoc_file + ".qassoc")
own_assocs = np.apply_along_axis(simulate_mr.do_gwas_on_scaled_variants,
axis=0,
arr=geno_mat,
dependent=phenotypes).T
plink_assocs = np.asarray([ [plink_ref_assocs[x].beta, plink_ref_assocs[x].se]
for x in plinkfile.bim_data.snp_names])
plink_assocs[np.isnan(plink_assocs)] = 0.
#tolerance is relatively low, plink reports about three sig digits. therefore tolerance is low.
assert(np.all(np.isclose(own_assocs, plink_assocs, rtol=1e-3, atol = 1e-3)))
#clean up.
np.random.seed()
subprocess.run(["rm", "-f",
pheno_file,
assoc_file + ".log",
assoc_file + ".nosex",
assoc_file + ".qassoc",
])
rel_path = '/'.join(('temp_data', ''))
if len(__file__.split("/")) >1:
test_data_dir = "{}/{}".format("/".join(__file__.split("/")[:-1]), rel_path)
else:
test_data_dir = rel_path
if not os.path.isdir(test_data_dir):
os.mkdir(test_data_dir)
test_compare_plink_assoc()
|
11525607
|
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
from tensorflow.keras.callbacks import TensorBoard
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=128,
shuffle=True,
validation_data=(x_test, x_test),
callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])
decoded_imgs = autoencoder.predict(x_test)
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i+1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
|
11525629
|
from bayesnet.sampler.hmc import hmc
from bayesnet.sampler.metropolis import metropolis
__all__ = [
"hmc",
"metropolis"
]
|
11525649
|
from google.appengine.ext import ndb
from werkzeug.test import Client
from backend.api.handlers.helpers.model_properties import simple_team_properties
from backend.common.consts.auth_type import AuthType
from backend.common.models.api_auth_access import ApiAuthAccess
from backend.common.models.event_team import EventTeam
from backend.common.models.team import Team
def validate_nominal_keys(team):
assert set(team.keys()).difference(set(simple_team_properties)) != set()
def validate_simple_keys(team):
assert set(team.keys()).difference(set(simple_team_properties)) == set()
def test_team(ndb_stub, api_client: Client) -> None:
ApiAuthAccess(
id="test_auth_key",
auth_types_enum=[AuthType.READ_API],
).put()
Team(id="frc254", team_number=254).put()
# Nominal response
resp = api_client.get(
"/api/v3/team/frc254", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert resp.json["key"] == "frc254"
validate_nominal_keys(resp.json)
# Simple response
resp = api_client.get(
"/api/v3/team/frc254/simple", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert resp.json["key"] == "frc254"
validate_simple_keys(resp.json)
# Keys response
resp = api_client.get(
"/api/v3/team/frc254/keys", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 404
def test_team_list_all(ndb_stub, api_client: Client) -> None:
ApiAuthAccess(
id="test_auth_key",
auth_types_enum=[AuthType.READ_API],
).put()
Team(id="frc67", team_number=67).put()
Team(id="frc254", team_number=254).put()
Team(id="frc604", team_number=604).put()
Team(id="frc9999", team_number=9999).put()
# Nominal response
resp = api_client.get(
"/api/v3/teams/all", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 4
for team in resp.json:
validate_nominal_keys(team)
assert resp.json[0]["key"] == "frc67"
assert resp.json[1]["key"] == "frc254"
assert resp.json[2]["key"] == "frc604"
assert resp.json[3]["key"] == "frc9999"
# Simple response
resp = api_client.get(
"/api/v3/teams/all/simple", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 4
for team in resp.json:
validate_simple_keys(team)
assert resp.json[0]["key"] == "frc67"
assert resp.json[1]["key"] == "frc254"
assert resp.json[2]["key"] == "frc604"
assert resp.json[3]["key"] == "frc9999"
# Keys response
resp = api_client.get(
"/api/v3/teams/all/keys", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 4
assert resp.json[0] == "frc67"
assert resp.json[1] == "frc254"
assert resp.json[2] == "frc604"
assert resp.json[3] == "frc9999"
def test_team_list(ndb_stub, api_client: Client) -> None:
ApiAuthAccess(
id="test_auth_key",
auth_types_enum=[AuthType.READ_API],
).put()
Team(id="frc67", team_number=67).put()
Team(id="frc254", team_number=254).put()
Team(id="frc604", team_number=604).put()
Team(id="frc9999", team_number=9999).put()
# Nominal response
resp = api_client.get(
"/api/v3/teams/0", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 2
for team in resp.json:
validate_nominal_keys(team)
assert resp.json[0]["key"] == "frc67"
assert resp.json[1]["key"] == "frc254"
resp = api_client.get(
"/api/v3/teams/1", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 1
for team in resp.json:
validate_nominal_keys(team)
assert resp.json[0]["key"] == "frc604"
resp = api_client.get(
"/api/v3/teams/2", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 0
# Simple response
resp = api_client.get(
"/api/v3/teams/0/simple", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 2
for team in resp.json:
validate_simple_keys(team)
assert resp.json[0]["key"] == "frc67"
assert resp.json[1]["key"] == "frc254"
resp = api_client.get(
"/api/v3/teams/1/simple", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 1
for team in resp.json:
validate_simple_keys(team)
assert resp.json[0]["key"] == "frc604"
resp = api_client.get(
"/api/v3/teams/2/simple", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 0
# Keys response
resp = api_client.get(
"/api/v3/teams/0/keys", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 2
assert resp.json[0] == "frc67"
assert resp.json[1] == "frc254"
resp = api_client.get(
"/api/v3/teams/1/keys", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 1
assert resp.json[0] == "frc604"
resp = api_client.get(
"/api/v3/teams/2/keys", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 0
def test_team_list_year(ndb_stub, api_client: Client) -> None:
ApiAuthAccess(
id="test_auth_key",
auth_types_enum=[AuthType.READ_API],
).put()
Team(id="frc67", team_number=67).put()
Team(id="frc254", team_number=254).put()
EventTeam(
id="2020casj_frc67",
event=ndb.Key("Event", "2020casj"),
team=ndb.Key("Team", "frc67"),
year=2020,
).put()
EventTeam(
id="2019casj_frc254",
event=ndb.Key("Event", "2019casj"),
team=ndb.Key("Team", "frc254"),
year=2019,
).put()
# Nominal response
resp = api_client.get(
"/api/v3/teams/2020/0", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 1
for team in resp.json:
validate_nominal_keys(team)
assert resp.json[0]["key"] == "frc67"
resp = api_client.get(
"/api/v3/teams/2019/0", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 1
for team in resp.json:
validate_nominal_keys(team)
assert resp.json[0]["key"] == "frc254"
resp = api_client.get(
"/api/v3/teams/2018/0", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 0
# Simple response
resp = api_client.get(
"/api/v3/teams/2020/0/simple", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 1
for team in resp.json:
validate_simple_keys(team)
assert resp.json[0]["key"] == "frc67"
resp = api_client.get(
"/api/v3/teams/2019/0/simple", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 1
for team in resp.json:
validate_simple_keys(team)
assert resp.json[0]["key"] == "frc254"
resp = api_client.get(
"/api/v3/teams/2018/0/simple", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 0
# Keys response
resp = api_client.get(
"/api/v3/teams/2020/0/keys", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 1
assert resp.json[0] == "frc67"
resp = api_client.get(
"/api/v3/teams/2019/0/keys", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 1
assert resp.json[0] == "frc254"
resp = api_client.get(
"/api/v3/teams/2018/0/keys", headers={"X-TBA-Auth-Key": "test_auth_key"}
)
assert resp.status_code == 200
assert len(resp.json) == 0
|
11525667
|
import visdom
import pickle
import os
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
class FeatureVisualizer(object):
def __init__(self, data_root, feature_root, env):
self.data_root = data_root
self.feature_root = feature_root
self.feature_data = pickle.load(open(self.feature_root, 'rb'))
self.vis = visdom.Visdom(env=env)
self.c_index = dict()
cnames = sorted(os.listdir(data_root))
for i, cname in enumerate(cnames):
self.c_index[cname] = i
cname_list, item_num = self._get_feature_info(data_root)
self.cname_list = cname_list
self.item_num = item_num
# for (cname, num) in zip(cname_list, item_num):
# print(cname, ':', num)
# visualize the self.data_root
def visualize(self, max_class, win='Feature Vis'):
calculate_num = 0
for i in range(max_class):
calculate_num += self.item_num[i]
feature = self.feature_data['feature'][:calculate_num]
name = self.feature_data['name'][:calculate_num]
feature_class = []
for f_name in name:
class_name = f_name.split('/')[0]
feature_class.append(self.c_index[class_name])
# print(np.shape(feature))
tsne = TSNE(n_components=2, init='pca', random_state=0)
X_tsen = tsne.fit_transform(feature)
# print(np.shape(X_tsen))
# print(set(feature_class))
plt.figure()
plt.scatter(X_tsen[:, 0], X_tsen[:, 1], c=np.array(feature_class), marker='.', cmap=plt.cm.Spectral)
plt.colorbar()
plt.grid(True)
plt.xlabel('1st')
plt.ylabel('2nd')
plt.title('Feature Vis')
self.vis.matplot(plt, win=win, opts=dict(title=win))
# embedding vis
def embedding_vis(self, em_data_root, em_feature_root, max_class, min_class=0, win='embedding vis'):
cal_a = 0
cal_b = 0
em_cname_list, em_item_num = self._get_feature_info(em_data_root)
em_feature_data = pickle.load(open(em_feature_root, 'rb'))
start_a = 0
start_b = 0
for i in range(min_class):
start_a += self.item_num[i]
start_b += em_item_num[i]
for i in range(min_class, max_class):
cal_a += self.item_num[i]
cal_b += em_item_num[i]
a_feature = self.feature_data['feature'][start_a:cal_a+start_a]
a_name = self.feature_data['name'][start_a:cal_a+start_a]
print('np.shape(a_feature)', np.shape(a_feature))
b_feature = em_feature_data['feature'][start_b:cal_b+start_b]
b_name = em_feature_data['name'][start_b:cal_b+start_b]
print('np.shape(b_feature)', np.shape(b_feature))
a_class = []
for name in a_name:
c_name = name.split('/')[0]
a_class.append(self.c_index[c_name])
b_class = []
for name in b_name:
c_name = name.split('/')[0]
b_class.append(self.c_index[c_name])
tsne = TSNE(n_components=2, init='pca', random_state=0)
feature = np.append(a_feature, b_feature, axis=0)
print('np.shape(feature) :', np.shape(feature))
X_tsen = tsne.fit_transform(feature)
X_a = X_tsen[:cal_a]
X_b = X_tsen[cal_a:]
print('np.shape(X_b) :', np.shape(X_b))
print('np.shape(b_class) :', np.shape(b_class))
print('a_class :', a_class)
print('b_class :', b_class)
plt.figure()
plt.scatter(X_a[:, 0], X_a[:, 1], c=np.array(a_class), marker='.', cmap=plt.cm.Spectral)
plt.scatter(X_b[:, 0], X_b[:, 1], c=np.array(b_class), marker='s', cmap=plt.cm.Spectral)
plt.colorbar()
plt.grid(True)
plt.xlabel('1st')
plt.ylabel('2nd')
plt.title('Embedding Vis')
self.vis.matplot(plt, win=win, opts=dict(title=win))
def _get_feature_info(self, data_root):
item_num = []
cname_list = []
cnames = sorted(os.listdir(data_root))
for cname in cnames:
num = len(os.listdir(os.path.join(data_root, cname)))
item_num.append(num)
cname_list.append(cname)
return cname_list, item_num
|
11525668
|
from glob import glob
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLabel, QWidget, QHBoxLayout, QSizePolicy, QApplication
from bauh import __version__, __app_name__, ROOT_DIR
from bauh.context import generate_i18n
from bauh.view.util import resource
PROJECT_URL = 'https://github.com/vinifmor/' + __app_name__
LICENSE_URL = 'https://raw.githubusercontent.com/vinifmor/{}/master/LICENSE'.format(__app_name__)
class AboutDialog(QDialog):
def __init__(self, app_config: dict):
super(AboutDialog, self).__init__()
i18n = generate_i18n(app_config, resource.get_path('locale/about'))
self.setWindowTitle('{} ({})'.format(i18n['about.title'].capitalize(), __app_name__))
layout = QVBoxLayout()
self.setLayout(layout)
logo_container = QWidget()
logo_container.setObjectName('logo_container')
logo_container.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
logo_container.setLayout(QHBoxLayout())
label_logo = QLabel()
label_logo.setObjectName('logo')
logo_container.layout().addWidget(label_logo)
layout.addWidget(logo_container)
label_name = QLabel(__app_name__)
label_name.setObjectName('app_name')
layout.addWidget(label_name)
label_version = QLabel(i18n['about.version'].lower() + ' ' + __version__)
label_version.setObjectName('app_version')
layout.addWidget(label_version)
layout.addWidget(QLabel(''))
line_desc = QLabel(i18n['about.info.desc'])
line_desc.setObjectName('app_description')
layout.addWidget(line_desc)
layout.addWidget(QLabel(''))
available_gems = [f for f in glob('{}/gems/*'.format(ROOT_DIR)) if not f.endswith('.py') and not f.endswith('__pycache__')]
available_gems.sort()
gems_widget = QWidget()
gems_widget.setLayout(QHBoxLayout())
gems_widget.layout().addWidget(QLabel())
gem_logo_size = int(0.032552083 * QApplication.primaryScreen().size().height())
for gem_path in available_gems:
icon = QLabel()
icon.setObjectName('gem_logo')
icon_path = gem_path + '/resources/img/{}.svg'.format(gem_path.split('/')[-1])
icon.setPixmap(QIcon(icon_path).pixmap(gem_logo_size, gem_logo_size))
gems_widget.layout().addWidget(icon)
gems_widget.layout().addWidget(QLabel())
layout.addWidget(gems_widget)
layout.addWidget(QLabel(''))
label_more_info = QLabel()
label_more_info.setObjectName('app_more_information')
label_more_info.setText(i18n['about.info.link'] + " <a href='{url}'>{url}</a>".format(url=PROJECT_URL))
label_more_info.setOpenExternalLinks(True)
layout.addWidget(label_more_info)
label_license = QLabel()
label_license.setObjectName('app_license')
label_license.setText("<a href='{}'>{}</a>".format(LICENSE_URL, i18n['about.info.license']))
label_license.setOpenExternalLinks(True)
layout.addWidget(label_license)
layout.addWidget(QLabel(''))
label_trouble_question = QLabel(i18n['about.info.trouble.question'])
label_trouble_question.setObjectName('app_trouble_question')
layout.addWidget(label_trouble_question)
label_trouble_answer = QLabel(i18n['about.info.trouble.answer'])
label_trouble_answer.setObjectName('app_trouble_answer')
layout.addWidget(label_trouble_answer)
layout.addWidget(QLabel(''))
label_rate_question = QLabel(i18n['about.info.rate.question'])
label_rate_question.setObjectName('app_rate_question')
layout.addWidget(label_rate_question)
label_rate_answer = QLabel(i18n['about.info.rate.answer'])
label_rate_answer.setObjectName('app_rate_answer')
layout.addWidget(label_rate_answer)
layout.addWidget(QLabel(''))
self.adjustSize()
self.setFixedSize(self.size())
def closeEvent(self, event):
event.ignore()
self.hide()
|
11525709
|
from __future__ import annotations
from typing import List, Iterator, Type, Tuple, Union, TYPE_CHECKING
import ast
from ..action import default_actions, actions
if TYPE_CHECKING:
from ..models import (
SchemaActionModel,
MorphActionModel,
CategoryActionModel,
ColumnModel,
CategoryModel,
FieldModel,
)
from ..base import BaseSchemaAction, BaseMorphAction, BaseCategoryAction
from ..schema import Schema
class ParserScript:
"""Parsing utility functions for all types of action scripts.
Supports parsing and validation of any action script of the form:
"ACTION > [term] < [term]"
Where term fields are optional. Has no opinion on what the terms are, or whether they are nested.
"""
###################################################################################################
### ACTION UTILITIES
###################################################################################################
def get_action_model(self, action: str) -> Union[SchemaActionModel, MorphActionModel, CategoryActionModel, None]:
"""Return a specific set of Action definitions in response to an Action name.
Parameters
----------
action: str
An Action name.
Returns
-------
SchemaActionModel, MorphActionModel, CategoryActionModel or None.
For the requested Action name. Or None, if it doesn't exist.
"""
return next((da for da in default_actions if da.name == action.upper()), None)
def get_anchor_action(self, script: str) -> Union[SchemaActionModel, MorphActionModel, CategoryActionModel, None]:
"""Return the first action term from a script as its Model type.
Parameters
----------
script: str
Action script, defined as "ACTION > TERM < TERM"
Raises
------
ValueError if not a valid ACTION.
Returns
-------
SchemaActionModel, MorphActionModel, or CategoryActionModel.
"""
# Get the action, where any of the `<` or `>` referenced terms may be absent.
root = self.get_split_terms(script, "<")
# There must always be a first term and it must *always* be an ACTION. Everything else is optional.
root = self.get_split_terms(root[0], ">")
action = self.get_action_model(root[0])
if not action:
raise ValueError(f"Term '{root[0]} is not a recognised ACTION.")
return action
def get_action_class(
self, actn: Union[SchemaActionModel, MorphActionModel, CategoryActionModel]
) -> Type[Union[BaseSchemaAction, BaseMorphAction, BaseCategoryAction]]:
"""Return the ACTION class for an ACTION model.
Parameters
----------
action: SchemaActionModel, MorphActionModel, CategoryActionModel
Returns
-------
class of Action
"""
return actions[actn.name]
###################################################################################################
### FIELD UTILITIES
###################################################################################################
def get_field_model(
self, name: str, fields: List[Union[ColumnModel, CategoryModel, FieldModel]]
) -> Union[ColumnModel, CategoryModel, FieldModel]:
"""Recover a field model from a string.
Parameters
----------
name: str
fields: list of ColumnModel, CategoryModel, FieldModel
Returns
-------
Union[ColumnModel, CategoryModel, FieldModel]
"""
# It is statistically almost impossible to have a field name that matches a randomly-generated UUID
# Can be used to recover fields from hex
return next((f for f in fields if f.name == name or f.uuid.hex == name), None)
def get_field_from_script(
self, name: str, fields: List[Union[ColumnModel, FieldModel]], schema: Type[Schema]
) -> Union[ColumnModel, FieldModel]:
"""Recover a field model from a string.
Parameters
----------
name: str
fields: list of ColumnModel, FieldModel
schema: schema
Raises
------
Returns
-------
Union[ColumnModel, CategoryModel, FieldModel]
"""
# If a column name is the same as a schema name, it's probably best to get the schema first...
# Is it in the schema?
field = schema.get_field(name)
if not field:
# Is it a ColumnModel?
field = self.get_field_model(name, fields)
if not field:
raise ValueError(
f"Field name is not recognised from either of the table columns, or the schema fields ({name})."
)
return field
###################################################################################################
### SCRIPT PARSING UTILITIES
###################################################################################################
def generate_contents(self, text) -> Iterator[Tuple[int, str]]:
"""Generate parenthesized contents in string as pairs (level, contents).
Parameters
----------
text: str
Returns
-------
Generator
References
----------
https://stackoverflow.com/a/4285211/295606
"""
stack = []
for i, c in enumerate(text):
if c == "[":
stack.append(i)
elif c == "]" and stack:
start = stack.pop()
yield (len(stack), text[start + 1 : i])
def get_split_terms(self, script: str, by: str) -> List[str]:
return [s.strip() for s in script.split(by)]
def get_literal(self, text: str) -> str:
literal = text
try:
literal = ast.literal_eval(text)
except ValueError:
pass
return literal
def get_listed_literal(self, text: str) -> List[str]:
listed_literal = []
for t in text.split(","):
try:
listed_literal.append(self.get_literal(t))
except SyntaxError:
if t:
listed_literal.append(t)
return listed_literal
def get_normalised_script(self, script: str, fields: List[Union[ColumnModel, CategoryModel, FieldModel]]) -> str:
"""Replace field names with its hex. Ensures that any fruity characters literals don't cause havoc during
parsing.
Parameters
----------
script: str
fields: list of ColumnModel, CategoryModel, FieldModel
Returns
-------
str
"""
# Going to sort these so the longest is first to avoid replacing partial matches
# https://docs.python.org/3/howto/sorting.html
for f in sorted(fields, key=lambda f: len(f.name), reverse=True):
if f"'{f.name}'" in script:
script = script.replace(f"'{f.name}'", f"'{f.uuid.hex}'")
return script
|
11525717
|
from unittest import TestCase
import numpy as np
import nibabel as nib
from unet3d.utils.resample import resample
from unet3d.utils.augment import scale_affine, generate_permutation_keys, permute_data
class TestAugmentation(TestCase):
def setUp(self):
self.shape = (4, 4, 4)
self.affine = np.diag(np.ones(4))
self.data = np.arange(np.prod(self.shape), dtype=np.float).reshape(self.shape)
self.image = nib.Nifti1Image(self.data, self.affine)
def test_scale_affine(self):
scale = (0.5, 0.5, 0.5)
new_affine = scale_affine(self.affine, self.shape, scale)
new_image = resample(self.image, target_affine=new_affine, target_shape=self.shape)
new_data = new_image.get_data()
self.assertEqual(np.sum(new_data[:1]), 0)
self.assertEqual(np.sum(new_data[-1:]), 0)
self.assertEqual(np.sum(new_data[:, :1]), 0)
self.assertEqual(np.sum(new_data[:, -1:]), 0)
self.assertEqual(np.sum(new_data[..., :1]), 0)
self.assertEqual(np.sum(new_data[..., -1:]), 0)
self.affine[0, 0] *= -1
self.image = nib.Nifti1Image(self.data, self.affine)
new_affine = scale_affine(self.affine, self.shape, scale)
new_image = resample(self.image, target_affine=new_affine, target_shape=self.shape)
new_data = new_image.get_data()
print(new_data)
self.assertEqual(np.sum(new_data[:1]), 0)
self.assertEqual(np.sum(new_data[-1:]), 0)
self.assertEqual(np.sum(new_data[:, :1]), 0)
self.assertEqual(np.sum(new_data[:, -1:]), 0)
self.assertEqual(np.sum(new_data[..., :1]), 0)
self.assertEqual(np.sum(new_data[..., -1:]), 0)
def test_permutations(self):
permutation_keys = generate_permutation_keys()
assert len(permutation_keys) == 48
permutations = list()
for key in permutation_keys:
data = permute_data(self.data[None], key)
if any([np.array_equal(data, other) for other in permutations]):
raise ValueError("Key {} generates a permuted data array that is not unique.".format(key))
permutations.append(data)
|
11525729
|
import numpy as np
from skimage.measure import compare_ssim as ssim
import imageio
import os
def calculate_ssim_l1_given_paths(paths):
file_list = os.listdir(paths[0])
ssim_value = 0
l1_value = 0
for f in file_list:
# assert(i[0] == i[1])
fake = load_img(paths[0] + f)
real = load_img(paths[1] + f)
ssim_value += np.mean(
ssim(fake, real, multichannel=True))
l1_value += np.mean(abs(fake - real))
ssim_value = ssim_value/float(len(file_list))
l1_value = l1_value/float(len(file_list))
return ssim_value, l1_value
def calculate_ssim_l1_given_tensor(images_fake, images_real):
bs = images_fake.size(0)
images_fake = images_fake.permute(0, 2, 3, 1).cpu().numpy()
images_real = images_real.permute(0, 2, 3, 1).cpu().numpy()
ssim_value = 0
l1_value = 0
for i in range(bs):
# assert(i[0] == i[1])
fake = images_fake[i]
real = images_real[i]
ssim_value += np.mean(
ssim(fake, real, multichannel=True))
l1_value += np.mean(abs(fake - real))
ssim_value = ssim_value/float(bs)
l1_value = l1_value/float(bs)
return ssim_value, l1_value
def load_img(path):
img = imageio.imread(path)
img = img.astype(np.float64) / 255
if img.ndim == 2:
img = np.stack([img, img, img], axis=-1)
elif img.shape[2] == 1:
img = np.concatenate([img, img, img], axis=-1)
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
|
11525767
|
import re
import numpy as np
import pandas as pd
def read_file(path):
with open(path, 'r') as f:
content = f.read()
return content
def tosec(t):
return round(float(re.findall(r'.*(?=s)', t)[0]), 3)
def minsec2sec(t):
m = re.findall(r'\d+(?=min)', t)[0]
_s = re.findall(r'\d+(?=s)', t)
if len(_s) > 0:
s = _s[0]
else:
s = 0
return int(m) * 60 + int(s)
def ms2sec(t):
ms = re.findall(r'.*(?=ms)', t)[0]
return float(ms) / 1000
def sanitize_hms(t):
x = t.split(':')
if len(x) == 3:
h, m, s = x
elif len(x) == 2:
m, s = x
h = 0
else:
NotImplemented
return int(h) * 60 * 60 + int(m) * 60 + int(s)
def sanitize_py(t):
if re.match(r'\d+min', t):
return minsec2sec(t)
elif re.match(r'\d+(\.\d+)*\s+ms', t):
return ms2sec(t)
else:
return tosec(t)
def get_stan_gp_times(path):
nb_content = read_file(path)
ts = re.findall(r'(?<=Wall time:\s).*(?=\\n)', nb_content)
ts = [sanitize_py(t) for t in ts]
return dict(model='gp', ppl='stan',
advi_compile=ts[0],
hmc_compile=ts[0],
nuts_compile=ts[0],
advi_run=ts[1],
hmc_run=ts[2],
nuts_run=ts[3])
def get_turing_gp_times(path):
nb_content = read_file(path)
t = re.findall(r'\d+\.?\d+\s(?=seconds)', nb_content)
# r = re.findall(r'(?<=Time:\s)\d+:\d+:\d+', nb_content)
t = list(map(float, t))
# r = list(map(sanitize_hms, r))
return dict(model='gp',
ppl='turing',
advi_compile=t[0],
hmc_compile=t[2],
nuts_compile=t[4],
advi_run=t[1],
hmc_run=t[3],
nuts_run=t[5])
def get_pyro_gp_times(path):
nb_content = read_file(path)
t = re.findall(r'(?<=\[)\d+[:\d+]+', nb_content)
t = list(map(sanitize_hms, t))
r = re.findall(r'(?<=Wall time:\s).*(?=\\n)', nb_content)
r = list(map(sanitize_py, r))
return dict(model='gp',
ppl='pyro',
advi_compile=0,
hmc_compile=0,
nuts_compile=0,
advi_run=r[3],
hmc_run=t[1],
nuts_run=t[3])
def get_numpyro_gp_times(path):
nb_content = read_file(path)
r = re.findall(r'(?<=\[)\d+[:\d+]+', nb_content)
t = re.findall(r'(?<=Wall time:\s).*(?=\\n)', nb_content)
r = list(map(sanitize_hms, r))
t = list(map(sanitize_py, t))
return dict(model='gp',
ppl='numpyro',
advi_compile=t[2],
hmc_compile=t[0] - r[0],
nuts_compile=t[1] - r[1],
advi_run=t[3],
hmc_run=r[0],
nuts_run=r[1])
def get_tfp_gp_times(path):
nb_content = read_file(path)
t = re.findall(r'(?<=Wall time:\s).*(?=\\n)', nb_content)
t = list(map(sanitize_py, t))
return dict(model='gp',
ppl='tfp',
advi_compile=0,
hmc_compile=t[0],
nuts_compile=t[2],
advi_run=t[4],
hmc_run=t[1],
nuts_run=t[3])
if __name__ == '__main__':
path_to_nb="../notebooks"
times_df = pd.DataFrame()
# STAN GP Timings.
stan_nb_path = '{}/gp_stan.ipynb'.format(path_to_nb)
times = get_stan_gp_times(stan_nb_path)
times_df = times_df.append(times, ignore_index=True)
# Turing GP Timings.
turing_nb_path = '{}/gp_turing.ipynb'.format(path_to_nb)
times = get_turing_gp_times(turing_nb_path)
times_df = times_df.append(times, ignore_index=True)
# Pyro GP Timings.
pyro_nb_path = '{}/gp_pyro.ipynb'.format(path_to_nb)
times = get_pyro_gp_times(pyro_nb_path)
times_df = times_df.append(times, ignore_index=True)
# Numpyro GP Timings.
numpyro_nb_path = '{}/gp_numpyro.ipynb'.format(path_to_nb)
times = get_numpyro_gp_times(numpyro_nb_path)
times_df = times_df.append(times, ignore_index=True)
# TFP GP Timings.
tfp_nb_path = '{}/gp_tfp.ipynb'.format(path_to_nb)
times = get_tfp_gp_times(tfp_nb_path)
times_df = times_df.append(times, ignore_index=True)
# TODO: NIMBLE GP Timings.
# Save CSV
times_df = times_df[['ppl',
'advi_run', 'hmc_run', 'nuts_run',
'advi_compile', 'hmc_compile', 'nuts_compile']]
times_df = times_df.rename(columns={"ppl": "PPL",
"advi_run": "ADVI (run)",
"hmc_run": "HMC (run)",
"nuts_run": "NUTS (run)",
"advi_compile": "ADVI (compile)",
"hmc_compile": "HMC (compile)",
"nuts_compile": "NUTS (compile)"})
times_df = times_df.round(3)
times_df.to_csv('../timings/timings.csv', index=False, na_rep='NA')
print(times_df)
print('Written to ../timings/timings.csv')
|
11525773
|
import ntpath
from datetime import datetime as dt
import os
import pandas as pd
import numpy as np
import math
import sqlite3
# clean the original raw data by storing only the columns that we need, and removing the rest.
def clean(from_path, to_path, columns):
def convert_date(date):
if date == '':
return None
else:
if len(date.split('-')) == 3:
return date
year = date.split('/')[-1]
if len(year) == 4:
return dt.strptime(date, '%d/%m/%Y').date()
else:
return dt.strptime(date, '%d/%m/%y').date()
def convert_score(score):
if math.isnan(score):
return score
else:
return int(score)
df = pd.read_csv(from_path, error_bad_lines=False)
df = df[columns]
df = df[pd.notnull(df['Date'])]
df['FTHG'] = df['FTHG'].apply(convert_score)
df['FTAG'] = df['FTAG'].apply(convert_score)
df['Date'] = df['Date'].apply(convert_date)
head, _ = ntpath.split(to_path)
if not os.path.exists(head):
os.makedirs(head)
df.to_csv(to_path, index=False)
def clean_all(from_folder, to_folder, from_year, to_year):
columns = ['Date', 'HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR']
for year in range(from_year, to_year + 1):
csvFile = '{}-{}.csv'.format(year, year + 1)
from_path = os.path.join(from_folder, csvFile)
to_path = os.path.join(to_folder, csvFile)
print("Cleaning ", from_path, "...")
clean(from_path, to_path, columns)
def combine_matches(cleaned_folder_path, final_path, start_year, end_year, make_file=True):
print("Combining matches from {} to {}...".format(start_year, end_year))
dfList = []
for year in range(start_year, end_year + 1):
file = '{}-{}.csv'.format(year, year + 1)
path = os.path.join(cleaned_folder_path, file)
df = pd.read_csv(path)
dfList.append(df)
df = pd.concat(dfList, ignore_index=True, sort=False)
if make_file:
df.to_csv(final_path, index=False)
return df
def get_match_results_against(file_path, cleaned_folder_path, final_path, from_year, to_year):
print("Getting head-to-head results...")
team_detail, match_detail = {}, {}
match_detail_columns = [
'HT_win_rate_against',
'AT_win_rate_against'
]
for item in match_detail_columns:
match_detail[item] = []
# Get head-to-head result from from_year to to_year
df = combine_matches(cleaned_folder_path, final_path, from_year, to_year, make_file=False)
for _, row in df.iterrows():
HT = row['HomeTeam']
AT = row['AwayTeam']
if HT not in team_detail:
team_detail[HT] = {}
if AT not in team_detail:
team_detail[AT] = {}
if AT not in team_detail[HT]:
team_detail[HT][AT] = {
'match_played': 0,
'win': 0
}
if HT not in team_detail[AT]:
team_detail[AT][HT] = {
'match_played': 0,
'win': 0
}
TD_HT_AT = team_detail[HT][AT]
TD_AT_HT = team_detail[AT][HT]
HT_WR = TD_HT_AT['win'] / TD_HT_AT['match_played'] if TD_HT_AT['match_played'] > 0 else np.nan
AT_WR = TD_AT_HT['win'] / TD_AT_HT['match_played'] if TD_AT_HT['match_played'] > 0 else np.nan
match_detail['HT_win_rate_against'].append(HT_WR)
match_detail['AT_win_rate_against'].append(AT_WR)
TD_HT_AT['match_played'] += 1
TD_AT_HT['match_played'] += 1
game_result = row['FTR']
if game_result == 'H':
TD_HT_AT['win'] += 1
elif game_result == 'A':
TD_AT_HT['win'] += 1
# Only take the last x results of df and combine with filedf. This is because we don't always want to merge all data from 1993 to 2018
filedf = pd.read_csv(file_path)
row_count = filedf.shape[0]
filedf['HT_win_rate_against'] = pd.Series(match_detail['HT_win_rate_against'][-row_count:], index=filedf.index)
filedf['AT_win_rate_against'] = pd.Series(match_detail['AT_win_rate_against'][-row_count:], index=filedf.index)
filedf.to_csv(file_path, index=False)
def remove_goal_scores(final_path):
print("Removing Goal Scores...")
df = pd.read_csv(final_path)
df = df.drop(columns=['FTHG','FTAG'])
df.to_csv(final_path, index=False)
def save_new_data_to_database(database_path, final_data_file, prediction_results_file, standing_predictions_file,
final_data_file_name='previous_results', prediction_results_file_name='prediction_results',
standing_predictions_file_name='prediction_rankings'):
conn = sqlite3.connect(database_path)
previous_results_df = pd.read_csv(final_data_file)
previous_results_df = previous_results_df[["Date", "HomeTeam", "AwayTeam", "FTHG", "FTAG", "FTR"]]
previous_results_df = previous_results_df.loc[(previous_results_df['FTHG'] != 0) |
(previous_results_df['FTAG'] != 0) |
((previous_results_df['FTR'] != 'A') &
(previous_results_df['FTR'] != 'H'))]
prediction_results_df = pd.read_csv(prediction_results_file)
prediction_results_df = prediction_results_df[["Date", "HomeTeam", "AwayTeam", "FTR", "prob_A", "prob_D", "prob_H"]]
prediction_results_df = prediction_results_df.loc[prediction_results_df['prob_A'].notna()]
standing_result_df = pd.read_csv(standing_predictions_file)
previous_results_df.to_sql(final_data_file_name, con=conn, if_exists='replace')
prediction_results_df.to_sql(prediction_results_file_name, con=conn, if_exists='replace')
standing_result_df.to_sql(standing_predictions_file_name, con=conn, if_exists='replace')
def save_summary_to_database(database_path, best_clf_average, winner):
conn = sqlite3.connect(database_path)
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS summary')
cur.execute('CREATE TABLE summary (time DATE, accuracy NUMBER, winner TEXT)')
cur.execute('INSERT INTO summary (time, accuracy, winner) VALUES (?, ?, ?)',
(dt.now().strftime('%Y-%m-%d'), best_clf_average, winner))
conn.commit()
|
11525796
|
import logging
logging.warning('ramiro_analysis is deprecated, use composite analysis instead')
from pycqed.analysis.composite_analysis import *
|
11525801
|
import os
import unittest
import chapter
test_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'test_files')
class ChapterTests(unittest.TestCase):
def setUp(self):
self.factory = chapter.ChapterFactory()
## def test_create_chapter_from_url(self):
## c = self.factory.create_chapter_from_url('http://example.com')
## self.assertEqual(c.title, 'Example Domain')
## self.assertEqual(c.url, 'http://example.com')
## self.assertEqual(c.html_title, 'Example Domain')
## c = self.factory.create_chapter_from_url(
## 'http://www.bothsidesofthetable.com/')
## self.assertEqual(c.title,
## 'Bothsides of the Table | 2x entrepreneur turned VC')
## self.assertEqual(c.url, 'http://www.bothsidesofthetable.com/')
## self.assertEqual(c.html_title,
## 'Bothsides of the Table | 2x entrepreneur turned VC')
def test_create_chapter_from_file(self):
test_file = os.path.join(test_directory, 'example.html')
c = self.factory.create_chapter_from_file(
test_file)
self.assertEqual(c.title, 'Example Domain')
self.assertEqual(c.url, None)
self.assertEqual(c.html_title, 'Example Domain')
def test_html_title(self):
test_file = os.path.join(test_directory, 'strategy&.html')
c = self.factory.create_chapter_from_file(
test_file, 'http://www.strategyand.pwc.com/')
self.assertEqual(c.title,
'Strategy& (Formerly Booz & Company) - A global management and strategy consulting firm')
self.assertEqual(c.url, 'http://www.strategyand.pwc.com/')
self.assertEqual(c.html_title,
'Strategy& (Formerly Booz & Company) - A global management and strategy consulting firm')
def test_chapter_type_errors(self):
self.assertRaises(TypeError, chapter.Chapter, 1, 'Dummy Content')
self.assertRaises(TypeError, chapter.Chapter, 'Dummy Title', 1)
self.assertRaises(ValueError, chapter.Chapter, '', 'Dummy Content')
self.assertRaises(ValueError, chapter.Chapter, 'Dummy Title', '')
def test_chapter_write_error(self):
test_file = os.path.join(test_directory, 'example.html')
c = self.factory.create_chapter_from_file(
test_file)
self.assertRaises(ValueError, c.write, '')
if __name__ == '__main__':
unittest.main()
|
11525804
|
from .helpers import *
import numpy as np
import json
import copy
import scipy.interpolate as interp
import matplotlib.pyplot as plt
class Airfoil:
"""A class defining an airfoil.
Parameters
----------
name : str
Name of the airfoil.
input_dict : dict
Dictionary describing the airfoil.
Returns
-------
Airfoil
A newly created airfoil object.
Raises
------
IOError
If the input is invalid.
"""
def __init__(self, name, input_dict={}):
raise RuntimeWarning("This airfoil class script is depreciated and no longer used")
self.name = name
self._input_dict = input_dict
self._type = self._input_dict.get("type", "linear")
self._initialize_data()
self._initialize_geometry()
def _initialize_data(self):
# Initializes the necessary data structures for the airfoil
# Linear airfoils are entirely defined by coefficients and coefficient derivatives
if self._type == "linear":
# Load from file
try:
filename = self._input_dict["path"]
check_filepath(filename, ".json")
with open(filename, 'r') as airfoil_file_handle:
params = json.load(airfoil_file_handle)
# Load from input dict
except KeyError:
params = self._input_dict
# Save params
self._aL0 = import_value("aL0", params, "SI", 0.0) # The unit system doesn't matter
self._CLa = import_value("CLa", params, "SI", 2*np.pi)
self._CmL0 = import_value("CmL0", params, "SI", 0.0)
self._Cma = import_value("Cma", params, "SI", 0.0)
self._CD0 = import_value("CD0", params, "SI", 0.0)
self._CD1 = import_value("CD1", params, "SI", 0.0)
self._CD2 = import_value("CD2", params, "SI", 0.0)
self._CL_max = import_value("CL_max", params, "SI", np.inf)
self._CLM = import_value("CLM", params, "SI", 0.0)
self._CLRe = import_value("CLRe", params, "SI", 0.0)
else:
raise IOError("'{0}' is not an allowable airfoil type.".format(self._type))
def _initialize_geometry(self):
# Creates outline splines to use in generating .stl and .stp files
geom_params = self._input_dict.get("geometry", {})
# Check that there's only one geometry definition
points = geom_params.get("outline_points", None)
naca_des = geom_params.get("NACA", None)
if points is not None and naca_des is not None:
raise IOError("Outline points and a NACA designation may not be both specified for airfoil {0}.".format(self.name))
# Check for user-given points
if points is not None:
if isinstance(points, str): # Filepath
with open(points, 'r') as input_handle:
outline_points = np.genfromtxt(input_handle, delimiter=',')
elif isinstance(points, list) and isinstance(points[0], list): # Array
outline_points = np.array(points)
# NACA definition
elif naca_des is not None:
# Cosine distribution of chord locations
theta = np.linspace(-np.pi, np.pi, 200)
x = 0.5*(1-np.cos(theta))
# 4-digit series
if len(naca_des) == 4:
m = float(naca_des[0])/100
p = float(naca_des[1])/10
t = float(naca_des[2:])/100
# Thickness distribution
y_t = 5*t*(0.2969*np.sqrt(x)-0.1260*x-0.3516*x**2+0.2843*x**3-0.1036*x**4) # Uses formulation to seal trailing edge
# Camber line equations
if abs(m)<1e-10 or abs(p)<1e-10: # Symmetric
y_c = np.zeros_like(x)
dy_c_dx = np.zeros_like(x)
else:
y_c = np.where(x<p, m/p**2*(2*p*x-x**2), m/(1-p)**2*((1-2*p)+2*p*x-x**2))
dy_c_dx = np.where(x<p, 2*m/p**2*(p-x), 2*m/(1-p**2)*(p-x))
# Outline points
X = x-y_t*np.sin(np.arctan(dy_c_dx))*np.sign(theta)
Y = y_c+y_t*np.cos(np.arctan(dy_c_dx))*np.sign(theta)
outline_points = np.concatenate([X[:,np.newaxis], Y[:,np.newaxis]], axis=1)
else:
return
# Create splines defining the outline as a function of distance along the outline
x_diff = np.diff(outline_points[:,0])
y_diff = np.diff(outline_points[:,1])
ds = np.sqrt(x_diff*x_diff+y_diff*y_diff)
ds = np.insert(ds, 0, 0.0)
s = np.cumsum(ds)
s_normed = s/s[-1]
self._x_outline = interp.UnivariateSpline(s_normed, outline_points[:,0], k=5, s=1e-10)
self._y_outline = interp.UnivariateSpline(s_normed, outline_points[:,1], k=5, s=1e-10)
def get_CL(self, inputs):
"""Returns the coefficient of lift.
Parameters
----------
inputs : ndarray
Parameters which can affect the airfoil coefficients. The first
three are always alpha, Reynolds number, and Mach number. Fourth
is flap efficiency and fifth is flap deflection.
Returns
-------
float
Lift coefficient
"""
if self._type == "linear":
CL = self._CLa*(inputs[0]-self._aL0+inputs[3]*inputs[4])
if CL > self._CL_max or CL < -self._CL_max:
CL = np.sign(CL)*self._CL_max
return CL
def get_CD(self, inputs):
"""Returns the coefficient of drag
Parameters
----------
inputs : ndarray
Parameters which can affect the airfoil coefficients. The first
three are always alpha, Reynolds number, and Mach number. Fourth
is flap efficiency and fifth is flap deflection.
Returns
-------
float
Drag coefficient
"""
if self._type == "linear":
delta_flap = inputs[4]
inputs_wo_flap = copy.copy(inputs)
inputs_wo_flap[3:] = 0.0
CL = self.get_CL(inputs_wo_flap)
CD_flap = 0.002*np.abs(delta_flap)*180/np.pi # A rough estimate for flaps
return self._CD0+self._CD1*CL+self._CD2*CL*CL+CD_flap
def get_Cm(self, inputs):
"""Returns the moment coefficient
Parameters
----------
inputs : ndarray
Parameters which can affect the airfoil coefficients. The first
three are always alpha, Reynolds number, and Mach number. Fourth
is flap efficiency and fifth is flap deflection.
Returns
-------
float
Moment coefficient
"""
if self._type == "linear":
return self._Cma*inputs[0]+self._CmL0+inputs[3]*inputs[4]
def get_aL0(self, inputs):
"""Returns the zero-lift angle of attack
Parameters
----------
inputs : ndarray
Parameters which can affect the airfoil coefficients. The first
three are always alpha, Reynolds number, and Mach number. Fourth
is flap efficiency and fifth is flap deflection.
Returns
-------
float
Zero-lift angle of attack
"""
if self._type == "linear":
return self._aL0
def get_CLM(self, inputs):
"""Returns the lift slope with respect to Mach number
Parameters
----------
inputs : ndarray
Parameters which can affect the airfoil coefficients. The first
three are always alpha, Reynolds number, and Mach number. Fourth
is flap efficiency and fifth is flap deflection.
Returns
-------
float
Lift slope with respect to Mach number
"""
if self._type == "linear":
return self._CLM
def get_CLRe(self, inputs):
"""Returns the lift slope with respect to Reynolds number
Parameters
----------
inputs : ndarray
Parameters which can affect the airfoil coefficients. The first
three are always alpha, Reynolds number, and Mach number. Fourth
is flap efficiency and fifth is flap deflection.
Returns
-------
float
Lift slope with respect to Reynolds number
"""
if self._type == "linear":
return self._CLRe
def get_CLa(self, inputs):
"""Returns the lift slope
Parameters
----------
inputs : ndarray
Parameters which can affect the airfoil coefficients. The first
three are always alpha, Reynolds number, and Mach number. Fourth
is flap efficiency and fifth is flap deflection.
Returns
-------
float
Lift slope
"""
if self._type == "linear":
return self._CLa
def get_outline_points(self, N=200, cluster=True):
"""Returns an array of outline points showing the geometry of the airfoil.
Parameters
----------
N : int, optional
The number of outline points to return. Defaults to 200.
cluster : bool, optional
Whether to use cosing clustering at the leading and trailing edges. Defaults to true.
Returns
-------
ndarray
Outline points in airfoil coordinates.
"""
if hasattr(self, "_x_outline"):
# Determine spacing of points
if cluster:
# Divide points between top and bottom
self._s_le = 0.5
N_t = int(N*self._s_le)
N_b = N-N_t
# Create distributions using cosine clustering
theta_t = np.linspace(0.0, np.pi, N_t)
s_t = 0.5*(1-np.cos(theta_t))*self._s_le
theta_b = np.linspace(0.0, np.pi, N_b)
s_b = 0.5*(1-np.cos(theta_b))*(1-self._s_le)+self._s_le
s = np.concatenate([s_t, s_b])
else:
s = np.linspace(0.0, 1.0, N)
# Get outline
X = self._x_outline(s)
Y = self._y_outline(s)
return np.concatenate([X[:,np.newaxis], Y[:,np.newaxis]], axis=1)
else:
raise RuntimeError("The geometry has not been defined for airfoil {0}.".format(self.name))
|
11525811
|
from typing import Callable, List
from pip._internal.req.req_install import InstallRequirement
from pip._internal.req.req_set import RequirementSet
InstallRequirementProvider = Callable[[str, InstallRequirement], InstallRequirement]
class BaseResolver:
def resolve(
self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
) -> RequirementSet:
raise NotImplementedError()
def get_installation_order(
self, req_set: RequirementSet
) -> List[InstallRequirement]:
raise NotImplementedError()
|
11525823
|
from typing import Any, Dict, Optional
from aiogram import Bot
from aiogram.dispatcher.filters.state import State
from aiogram.types import Chat, User
from .protocols import DialogRegistryProto, BaseDialogManager
from ..context.events import (
Data, Action, DialogStartEvent, DialogSwitchEvent, DialogUpdateEvent, StartMode
)
from ..context.stack import DEFAULT_STACK_ID
class BgManager(BaseDialogManager):
def __init__(
self,
user: User,
chat: Chat,
bot: Bot,
registry: DialogRegistryProto,
intent_id: Optional[str],
stack_id: Optional[str],
):
self.user = user
self.chat = chat
self.bot = bot
self._registry = registry
self.intent_id = intent_id
self.stack_id = stack_id
@property
def registry(self) -> DialogRegistryProto:
return self._registry
def bg(
self,
user_id: Optional[int] = None,
chat_id: Optional[int] = None,
stack_id: Optional[str] = None,
) -> "BaseDialogManager":
if user_id is not None:
user = User(id=user_id)
else:
user = self.user
if chat_id is not None:
chat = Chat(id=chat_id)
else:
chat = self.chat
same_chat = (user.id == self.user.id and chat.id == self.chat.id)
if stack_id is None:
if same_chat:
stack_id = self.stack_id
intent_id = self.intent_id
else:
stack_id = DEFAULT_STACK_ID
intent_id = None
else:
intent_id = None
return BgManager(
user=user,
chat=chat,
bot=self.bot,
registry=self.registry,
intent_id=intent_id,
stack_id=stack_id,
)
def _base_event_params(self):
return {
"bot": self.bot,
"from_user": self.user,
"chat": self.chat,
"intent_id": self.intent_id,
"stack_id": self.stack_id,
}
async def done(self, result: Any = None) -> None:
await self.registry.notify(DialogUpdateEvent(
action=Action.DONE,
data=result,
**self._base_event_params()
))
async def start(self, state: State, data: Data = None,
mode: StartMode = StartMode.NORMAL) -> None:
await self.registry.notify(DialogStartEvent(
action=Action.START,
data=data,
new_state=state,
mode=mode,
**self._base_event_params()
))
async def switch_to(self, state: State) -> None:
await self.registry.notify(DialogSwitchEvent(
action=Action.SWITCH,
data={},
new_state=state,
**self._base_event_params()
))
async def update(self, data: Dict) -> None:
await self.registry.notify(DialogUpdateEvent(
action=Action.UPDATE,
data=data,
**self._base_event_params()
))
|
11525834
|
from typing import Optional, List
import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch import Tensor
from torch.nn.parameter import Parameter
from . import _reduction as _Reduction
from ..__init__ import _PARALLEL_DIM
from ..__init__ import set_attribute
from ..distributed import scatter, reduce, gather, copy
from ..distributed import get_world_size, get_group_size
from ..distributed import get_group, get_rank
from ..utils import VocabUtility
def parallel_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, dim: Optional[int] = None) -> Tensor:
"""
Parallel version of :func::`torch.nn.functional.linear`.
Applies a linear transformation to the incoming data: :math::`y = xA^T + b`.
This operator supports :ref::`TensorFloat32<tf32_on_ampere>`.
"""
assert weight.dim() == 2, 'weight must be two-dimensional, but got a {}D Tensor'.format(weight.dim())
if dim == None:
return F.linear(input, weight, bias=bias)
elif dim == 0:
return _row_parallel_linear(input, weight, bias=bias)
elif dim == 1 or dim == -1:
return _col_parallel_linear(input, weight, bias=bias)
else:
raise ValueError(f"parallel linear supports the 2D Tensor slice along the dimension of '0', '1', '-1' and 'None', but got {dim}")
def _row_parallel_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor:
if hasattr(input, _PARALLEL_DIM) and getattr(input, _PARALLEL_DIM) in [-1, 1]:
input = input
else:
input = scatter(input, dim=-1)
output = F.linear(input, weight, bias=None)
output = reduce(output)
if bias is not None:
output = torch.add(output, bias)
set_attribute(input, -1)
set_attribute(output, None)
return output
def _col_parallel_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor:
input = copy(input)
output = F.linear(input, weight, bias=bias)
set_attribute(input, None)
set_attribute(output, -1)
return output
def parallel_cross_entropy(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
ignore_index: int = -100,
reduce: Optional[bool] = None,
reduction: str = "mean"
) -> Tensor:
"""
Parallel version of :func::`torch.nn.functional.cross_entropy`.
"""
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
if hasattr(input, _PARALLEL_DIM):
if getattr(input, _PARALLEL_DIM) in [-1, 1]:
return ParallelCrossEntropy.apply(input, target, weight, None, ignore_index, None, reduction)
else:
return F.cross_entropy(input, target, weight, None, ignore_index, None, reduction)
else:
return F.cross_entropy(input, target, weight, None, ignore_index, None, reduction)
class ParallelCrossEntropy(torch.autograd.Function):
"""
Column parallel version of :class::`torch.nn.CrossEntropy`.
"""
@staticmethod
def forward(
ctx: "ParallelCrossEntropy",
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
ignore_index: int = -100,
reduce: Optional[bool] = None,
reduction: str = "mean"
) -> Tensor:
dim = input.dim()
if dim != 2:
raise ValueError(f'Expected 2 dimensional input, but got {dim}')
if input.size(0) != target.size(0):
raise ValueError(f'Expected input batch size ({input.size(0)}) to match target batch size ({target.size(0)}).')
world_size = get_world_size()
group = get_group()
n, c = input.size()
input_max = input.max(dim=-1, keepdim=True)[0]
dist.all_reduce(
input_max,
op=dist.ReduceOp.MAX,
group=group
)
input = input.sub(input_max)
exp_sum = input.exp().sum(dim=-1, keepdim=True)
dist.all_reduce(
exp_sum,
op=dist.ReduceOp.SUM,
group=group
)
softmax = input.exp().div(exp_sum)
one_hot = torch.zeros(
(n, c * world_size),
device=input.device,
requires_grad=False
)
one_hot.scatter_(1, target.unsqueeze(dim=-1), 1)
# scatter one-hot vectors into GPUs
one_hot = scatter(one_hot, dim=-1)
input = input.mul(one_hot).sum(dim=-1, keepdim=True)
dist.all_reduce(
input,
op=dist.ReduceOp.SUM,
group=group
)
out = torch.log(exp_sum).sub(input)
# perform reduction
reduction_enum = _Reduction.get_enum(reduction)
if reduction == 'sum' and reduction_enum == 2:
out = out.sum()
elif reduction == 'mean' and reduction_enum == 1:
out = out.mean()
elif reduction == 'none' and reduction_enum == 0:
pass
else:
raise ValueError(f"reduction must be in 'none' | 'sum' | 'mean', but got {reduction}. Default: 'mean'.")
# TODO:
# apply weight and reduction
# save for backward
ctx.save_for_backward(softmax, one_hot, torch.Tensor([reduction_enum]))
return out
@staticmethod
def backward(
ctx: "ParallelCrossEntropy",
grad_output: Tensor
) -> Tensor:
softmax, one_hot, reduction_enum, = ctx.saved_tensors
grad_input = softmax.sub(one_hot)
if reduction_enum == 1:
grad_input.div_(softmax.shape[0])
grad_input.mul_(grad_output)
return grad_input, None, None, None, None, None, None
def parallel_embedding(input: Tensor, weight: Tensor, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False, sparse: bool = False,
num_embeddings: int = None, dim: Optional[int] = None) -> Tensor:
assert weight.dim() == 2, 'weight must be two-dimensional, but got a {}D Tensor'.format(weight.dim())
if dim == None:
return F.embedding(input, weight, padding_idx, max_norm,
norm_type, scale_grad_by_freq, sparse)
elif dim == 0:
return _row_parallel_embedding(input, weight, padding_idx, max_norm,
norm_type, scale_grad_by_freq, sparse, num_embeddings)
elif dim == 1 or dim == -1:
return _col_parallel_embedding(input, weight, padding_idx, max_norm,
norm_type, scale_grad_by_freq, sparse, num_embeddings)
else:
raise ValueError(f"parallel linear supports the 2D Tensor slice along the dimension of '0', '1', '-1' and 'None', but got {dim}")
def _row_parallel_embedding(input: Tensor, weight: Tensor, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2.,
scale_grad_by_freq: bool = False, sparse: bool = False,
num_embeddings: Optional[int] = None) -> Tensor:
# Divide the weight matrix along the vocaburaly dimension.
vocab_start_index, vocab_end_index = \
VocabUtility.vocab_range_from_global_vocab_size(
num_embeddings,
get_rank(),
get_group_size()
)
if get_group_size() > 1:
# Build the mask.
input_mask = (input < vocab_start_index) | \
(input >= vocab_end_index)
# Mask the input.
masked_input = input.clone() - vocab_start_index
masked_input[input_mask] = 0
else:
masked_input = input
# Get embeddings
output = F.embedding(masked_input, weight, padding_idx, max_norm,
norm_type, scale_grad_by_freq, sparse)
# Mask the output embedding.
if get_group_size() > 1:
output[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = reduce(output)
return output
def _col_parallel_embedding(input: Tensor, weight: Tensor, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2.,
scale_grad_by_freq: bool = False, sparse: bool = False,
num_embeddings: Optional[int] = None) -> Tensor:
output = F.embedding(input, weight, padding_idx, max_norm,
norm_type, scale_grad_by_freq, sparse)
# Gather from all the model parallel GPUs.
output = gather(output, dim=-1)
return output
|
11525837
|
import tkinter as tk
import requests
from bs4 import BeautifulSoup
url = 'https://weather.com/en-IN/weather/today/l/32355ced66b7ce3ab7ccafb0a4f45f12e7c915bcf8454f712efa57474ba8d6c8'
frameWindow = tk.Tk()
frameWindow.title("Weather")
frameWindow.config(bg = 'white')
def getWeather():
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
location = soup.find('h1',class_="_1Ayv3").text
temperature = soup.find('span',class_="_3KcTQ").text
airquality = soup.find('text',class_='k2Z7I').text
airqualitytitle = soup.find('span',class_='_1VMr2').text
sunrise = soup.find('div',class_='_2ATeV').text
sunset = soup.find('div',class_='_2_gJb _2ATeV').text
humidity = soup.find('div',class_='_23DP5').text
wind = soup.find('span',class_='_1Va1P undefined').text
pressure = soup.find('span',class_='_3olKd undefined').text
locationlabel.config(text=(location))
templabel.config(text = temperature+"C")
WeatherText = "Sunrise : "+sunrise+"\n"+"SunSet : "+sunset+"\n"+"Pressure : "+pressure+"\n"+"Wind : "+wind+"\n"
weatherPrediction.config(text=WeatherText)
airqualityText = airquality + " "*5 + airqualitytitle + "\n"
airqualitylabel.config(text = airqualityText)
weatherPrediction.after(120000,getWeather)
frameWindow.update()
locationlabel = tk.Label(frameWindow , font = ('Calibri bold',20), bg = 'white')
locationlabel.grid(row = 0,column = 1, sticky='N',padx=20,pady=40)
templabel = tk.Label(frameWindow , font = ('Caliber bold', 40), bg="white")
templabel.grid(row=0,column = 0,sticky="W",padx=17)
weatherPrediction = tk.Label(frameWindow, font = ('Caliber', 15), bg="white")
weatherPrediction.grid(row=2,column=1,sticky="W",padx=40)
tk.Label(frameWindow,text = "Air Quality", font = ('Calibri bold',20), bg = 'white').grid(row = 1,column = 2, sticky='W',padx=20)
airqualitylabel = tk.Label(frameWindow, font = ('Caliber bold', 20), bg="white")
airqualitylabel.grid(row=2,column=2,sticky="W")
getWeather()
frameWindow.mainloop()
|
11525842
|
from __future__ import absolute_import, print_function, unicode_literals
import sys
from collections.abc import Callable
is_ironpython = "IronPython" in sys.version
def is_callable(x):
return isinstance(x, Callable)
def execfile(fname, glob, loc=None):
loc = loc if (loc is not None) else glob
exec(
compile(
open(
fname,
'r',
encoding='utf-8',
).read(),
fname,
'exec',
),
glob,
loc,
)
|
11525855
|
from __future__ import annotations
from typing import Any, Dict, Optional
from boa3.constants import GAS_SCRIPT
from boa3.model.method import Method
from boa3.model.property import Property
from boa3.model.type.classes.classarraytype import ClassArrayType
from boa3.model.variable import Variable
class GasClass(ClassArrayType):
"""
A class used to represent GAS native contract
"""
def __init__(self):
super().__init__('GAS')
self._variables: Dict[str, Variable] = {}
self._class_methods: Dict[str, Method] = {}
self._constructor: Optional[Method] = None
@property
def instance_variables(self) -> Dict[str, Variable]:
return self._variables.copy()
@property
def class_variables(self) -> Dict[str, Variable]:
return {}
@property
def properties(self) -> Dict[str, Property]:
return {}
@property
def static_methods(self) -> Dict[str, Method]:
return {}
@property
def class_methods(self) -> Dict[str, Method]:
# avoid recursive import
from boa3.model.builtin.native.nep17_methods import (BalanceOfMethod, DecimalsMethod, SymbolMethod,
TotalSupplyMethod, TransferMethod)
if len(self._class_methods) == 0:
self._class_methods = {
'balanceOf': BalanceOfMethod(GAS_SCRIPT),
'decimals': DecimalsMethod(GAS_SCRIPT),
'symbol': SymbolMethod(GAS_SCRIPT),
'totalSupply': TotalSupplyMethod(GAS_SCRIPT),
'transfer': TransferMethod(GAS_SCRIPT)
}
return self._class_methods
@property
def instance_methods(self) -> Dict[str, Method]:
return {}
def constructor_method(self) -> Optional[Method]:
return self._constructor
@classmethod
def build(cls, value: Any = None) -> GasClass:
if value is None or cls._is_type_of(value):
return _Gas
@classmethod
def _is_type_of(cls, value: Any):
return isinstance(value, GasClass)
_Gas = GasClass()
|
11525876
|
import hoverxref
import setuptools
with open('README.rst', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='sphinx-hoverxref',
version=hoverxref.version,
author='<NAME>',
author_email='<EMAIL>',
description='Sphinx extension to embed content in a tooltip on xref hover',
url='https://github.com/readthedocs/sphinx-hoverxref',
license='MIT',
packages=setuptools.find_packages(exclude=['common', 'tests']),
long_description=long_description,
long_description_content_type='text/x-rst',
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Sphinx',
'Framework :: Sphinx :: Extension',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Documentation :: Sphinx',
'Topic :: Software Development :: Documentation',
],
keywords='tooltip hoverxref sphinx',
project_urls={
'Documentation': 'https://sphinx-hoverxref.readthedocs.io/',
'Source': 'https://github.com/readthedocs/sphinx-hoverxref',
'Tracker': 'https://github.com/readthedocs/sphinx-hoverxref/issues',
},
)
|
11525881
|
import functools
import re
import parsimonious
from parsimonious import (
expressions,
)
from eth_abi.exceptions import (
ABITypeError,
ParseError,
)
grammar = parsimonious.Grammar(r"""
type = tuple_type / basic_type
tuple_type = components arrlist?
components = non_zero_tuple / zero_tuple
non_zero_tuple = "(" type next_type* ")"
next_type = "," type
zero_tuple = "()"
basic_type = base sub? arrlist?
base = alphas
sub = two_size / digits
two_size = (digits "x" digits)
arrlist = (const_arr / dynam_arr)+
const_arr = "[" digits "]"
dynam_arr = "[]"
alphas = ~"[A-Za-z]+"
digits = ~"[1-9][0-9]*"
""")
class NodeVisitor(parsimonious.NodeVisitor):
"""
Parsimonious node visitor which performs both parsing of type strings and
post-processing of parse trees. Parsing operations are cached.
"""
grammar = grammar
def visit_non_zero_tuple(self, node, visited_children):
# Ignore left and right parens
_, first, rest, _ = visited_children
return (first,) + rest
def visit_tuple_type(self, node, visited_children):
components, arrlist = visited_children
return TupleType(components, arrlist, node=node)
def visit_next_type(self, node, visited_children):
# Ignore comma
_, abi_type = visited_children
return abi_type
def visit_zero_tuple(self, node, visited_children):
return tuple()
def visit_basic_type(self, node, visited_children):
base, sub, arrlist = visited_children
return BasicType(base, sub, arrlist, node=node)
def visit_two_size(self, node, visited_children):
# Ignore "x"
first, _, second = visited_children
return first, second
def visit_const_arr(self, node, visited_children):
# Ignore left and right brackets
_, int_value, _ = visited_children
return (int_value,)
def visit_dynam_arr(self, node, visited_children):
return tuple()
def visit_alphas(self, node, visited_children):
return node.text
def visit_digits(self, node, visited_children):
return int(node.text)
def generic_visit(self, node, visited_children):
if isinstance(node.expr, expressions.OneOf):
# Unwrap value chosen from alternatives
return visited_children[0]
if isinstance(node.expr, expressions.Optional):
# Unwrap optional value or return `None`
if len(visited_children) != 0:
return visited_children[0]
return None
return tuple(visited_children)
@functools.lru_cache(maxsize=None)
def parse(self, type_str):
"""
Parses a type string into an appropriate instance of
:class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,
throws :class:`~eth_abi.exceptions.ParseError`.
:param type_str: The type string to be parsed.
:returns: An instance of :class:`~eth_abi.grammar.ABIType` containing
information about the parsed type string.
"""
if not isinstance(type_str, str):
raise TypeError('Can only parse string values: got {}'.format(type(type_str)))
try:
return super().parse(type_str)
except parsimonious.ParseError as e:
raise ParseError(e.text, e.pos, e.expr)
visitor = NodeVisitor()
class ABIType:
"""
Base class for results of type string parsing operations.
"""
__slots__ = ('arrlist', 'node')
def __init__(self, arrlist=None, node=None):
self.arrlist = arrlist
"""
The list of array dimensions for a parsed type. Equal to ``None`` if
type string has no array dimensions.
"""
self.node = node
"""
The parsimonious ``Node`` instance associated with this parsed type.
Used to generate error messages for invalid types.
"""
def __repr__(self): # pragma: no cover
return '<{} {}>'.format(
type(self).__qualname__,
repr(self.to_type_str()),
)
def __eq__(self, other):
# Two ABI types are equal if their string representations are equal
return (
type(self) is type(other) and
self.to_type_str() == other.to_type_str()
)
def to_type_str(self): # pragma: no cover
"""
Returns the string representation of an ABI type. This will be equal to
the type string from which it was created.
"""
raise NotImplementedError('Must implement `to_type_str`')
@property
def item_type(self):
"""
If this type is an array type, equal to an appropriate
:class:`~eth_abi.grammar.ABIType` instance for the array's items.
"""
raise NotImplementedError('Must implement `item_type`')
def validate(self): # pragma: no cover
"""
Validates the properties of an ABI type against the solidity ABI spec:
https://solidity.readthedocs.io/en/develop/abi-spec.html
Raises :class:`~eth_abi.exceptions.ABITypeError` if validation fails.
"""
raise NotImplementedError('Must implement `validate`')
def invalidate(self, error_msg):
# Invalidates an ABI type with the given error message. Expects that a
# parsimonious node was provided from the original parsing operation
# that yielded this type.
node = self.node
raise ABITypeError(
"For '{comp_str}' type at column {col} "
"in '{type_str}': {error_msg}".format(
comp_str=node.text,
col=node.start + 1,
type_str=node.full_text,
error_msg=error_msg,
),
)
@property
def is_array(self):
"""
Equal to ``True`` if a type is an array type (i.e. if it has an array
dimension list). Otherwise, equal to ``False``.
"""
return self.arrlist is not None
@property
def is_dynamic(self):
"""
Equal to ``True`` if a type has a dynamically sized encoding.
Otherwise, equal to ``False``.
"""
raise NotImplementedError('Must implement `is_dynamic`')
@property
def _has_dynamic_arrlist(self):
return self.is_array and any(len(dim) == 0 for dim in self.arrlist)
class TupleType(ABIType):
"""
Represents the result of parsing a tuple type string e.g. "(int,bool)".
"""
__slots__ = ('components',)
def __init__(self, components, arrlist=None, *, node=None):
super().__init__(arrlist, node)
self.components = components
"""
A tuple of :class:`~eth_abi.grammar.ABIType` instances for each of the
tuple type's components.
"""
def to_type_str(self):
arrlist = self.arrlist
if isinstance(arrlist, tuple):
arrlist = ''.join(repr(list(a)) for a in arrlist)
else:
arrlist = ''
return '({}){}'.format(
','.join(c.to_type_str() for c in self.components),
arrlist,
)
@property
def item_type(self):
if not self.is_array:
raise ValueError("Cannot determine item type for non-array type '{}'".format(
self.to_type_str(),
))
return type(self)(
self.components,
self.arrlist[:-1] or None,
node=self.node,
)
def validate(self):
for c in self.components:
c.validate()
@property
def is_dynamic(self):
if self._has_dynamic_arrlist:
return True
return any(c.is_dynamic for c in self.components)
class BasicType(ABIType):
"""
Represents the result of parsing a basic type string e.g. "uint", "address",
"ufixed128x19[][2]".
"""
__slots__ = ('base', 'sub')
def __init__(self, base, sub=None, arrlist=None, *, node=None):
super().__init__(arrlist, node)
self.base = base
"""The base of a basic type e.g. "uint" for "uint256" etc."""
self.sub = sub
"""
The sub type of a basic type e.g. ``256`` for "uint256" or ``(128, 18)``
for "ufixed128x18" etc. Equal to ``None`` if type string has no sub
type.
"""
def to_type_str(self):
sub, arrlist = self.sub, self.arrlist
if isinstance(sub, int):
sub = str(sub)
elif isinstance(sub, tuple):
sub = 'x'.join(str(s) for s in sub)
else:
sub = ''
if isinstance(arrlist, tuple):
arrlist = ''.join(repr(list(a)) for a in arrlist)
else:
arrlist = ''
return self.base + sub + arrlist
@property
def item_type(self):
if not self.is_array:
raise ValueError("Cannot determine item type for non-array type '{}'".format(
self.to_type_str(),
))
return type(self)(
self.base,
self.sub,
self.arrlist[:-1] or None,
node=self.node,
)
@property
def is_dynamic(self):
if self._has_dynamic_arrlist:
return True
if self.base == 'string':
return True
if self.base == 'bytes' and self.sub is None:
return True
return False
def validate(self):
base, sub = self.base, self.sub
# Check validity of string type
if base == 'string':
if sub is not None:
self.invalidate('string type cannot have suffix')
# Check validity of bytes type
elif base == 'bytes':
if not (sub is None or isinstance(sub, int)):
self.invalidate('bytes type must have either no suffix or a numerical suffix')
if isinstance(sub, int) and sub > 32:
self.invalidate('maximum 32 bytes for fixed-length bytes')
# Check validity of integer type
elif base in ('int', 'uint'):
if not isinstance(sub, int):
self.invalidate('integer type must have numerical suffix')
if sub < 8 or 256 < sub:
self.invalidate('integer size out of bounds (max 256 bits)')
if sub % 8 != 0:
self.invalidate('integer size must be multiple of 8')
# Check validity of fixed type
elif base in ('fixed', 'ufixed'):
if not isinstance(sub, tuple):
self.invalidate(
'fixed type must have suffix of form <bits>x<exponent>, e.g. 128x19',
)
bits, minus_e = sub
if bits < 8 or 256 < bits:
self.invalidate('fixed size out of bounds (max 256 bits)')
if bits % 8 != 0:
self.invalidate('fixed size must be multiple of 8')
if minus_e < 1 or 80 < minus_e:
self.invalidate(
'fixed exponent size out of bounds, {} must be in 1-80'.format(
minus_e,
),
)
# Check validity of hash type
elif base == 'hash':
if not isinstance(sub, int):
self.invalidate('hash type must have numerical suffix')
# Check validity of address type
elif base == 'address':
if sub is not None:
self.invalidate('address cannot have suffix')
TYPE_ALIASES = {
'int': 'int256',
'uint': 'uint256',
'fixed': 'fixed128x18',
'ufixed': 'ufixed128x18',
'function': 'bytes24',
'byte': 'bytes1',
}
TYPE_ALIAS_RE = re.compile(r'\b({})\b'.format(
'|'.join(re.escape(a) for a in TYPE_ALIASES.keys())
))
def normalize(type_str):
"""
Normalizes a type string into its canonical version e.g. the type string
'int' becomes 'int256', etc.
:param type_str: The type string to be normalized.
:returns: The canonical version of the input type string.
"""
return TYPE_ALIAS_RE.sub(
lambda match: TYPE_ALIASES[match.group(0)],
type_str,
)
parse = visitor.parse
|
11525902
|
from ramda import *
from ramda.private.asserts import *
def remove_test():
assert_equal(remove(2, 3, [1, 2, 3, 4, 5, 6, 7, 8]), [1, 2, 6, 7, 8])
|
11525942
|
from typing import NamedTuple, List, Dict
from ..asserts.asserts import FieldValidator
class Production(NamedTuple):
id: str
children: List[str]
@classmethod
def from_json(cls, d: dict) -> 'Production':
v = FieldValidator(cls, d)
return Production(
id=v.get('id', int),
children=v.get_list('children', int),
)
def to_json(self) -> dict:
return {
'id': self.id,
'children': self.children,
}
class Index(NamedTuple):
productions: Dict[str, Production]
indices: Dict[str, int]
@classmethod
def from_json(cls, d: dict) -> 'Index':
v = FieldValidator(cls, d)
return Index(
productions=v.get_map('productions', str, dict, val_build=Production.from_json),
indices=v.get_map('indices', str, int),
)
def to_json(self) -> dict:
return {
'productions': {k: v.to_json() for k, v in self.productions.items()},
'indices': self.indices,
}
def vocab(self) -> int:
return len(self.indices)
|
11525959
|
from random import randint
class Product(object):
def __init__(self, name, price = 10, weight = 20, flammability = 0.5):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier = randint(10e5, 10e6-1)
def stealability(self):
ratio = self.price / self.weight
if ratio < 0.5:
return "Not so stealable..."
elif ratio < 1.0:
return "Kinda stealable."
else:
return "Very stealable!"
def explode(self):
explosiveness = self.flammability * self.weight
if explosiveness < 10:
return "...fizzle."
elif explosiveness < 50:
return "...boom!"
else:
return "...BABOOM!!"
class BoxingGlove(Product):
def __init__(self, name, price = 10, weight = 10, flammability = 0.5):
super().__init__(name, price, weight, flammability)
|
11525991
|
del_items(0x8013CAB4)
SetType(0x8013CAB4, "struct Creds CreditsTitle[6]")
del_items(0x8013CC5C)
SetType(0x8013CC5C, "struct Creds CreditsSubTitle[28]")
del_items(0x8013D0F8)
SetType(0x8013D0F8, "struct Creds CreditsText[35]")
del_items(0x8013D210)
SetType(0x8013D210, "int CreditsTable[224]")
del_items(0x8013E420)
SetType(0x8013E420, "struct DIRENTRY card_dir[16][2]")
del_items(0x8013E920)
SetType(0x8013E920, "struct file_header card_header[16][2]")
del_items(0x8013E380)
SetType(0x8013E380, "struct sjis sjis_table[37]")
del_items(0x80143834)
SetType(0x80143834, "unsigned char save_buffer[106496]")
del_items(0x801437B0)
SetType(0x801437B0, "struct FeTable McLoadGameMenu")
del_items(0x80143790)
SetType(0x80143790, "char *CharFileList[5]")
del_items(0x801437A4)
SetType(0x801437A4, "char *Classes[3]")
del_items(0x801437CC)
SetType(0x801437CC, "struct FeTable McLoadCharMenu")
del_items(0x801437E8)
SetType(0x801437E8, "struct FeTable McLoadCard1Menu")
del_items(0x80143804)
SetType(0x80143804, "struct FeTable McLoadCard2Menu")
|
11525993
|
import numpy as np
import cv2
from lanetracker.window import Window
from lanetracker.line import Line
from lanetracker.gradients import get_edges
from lanetracker.perspective import flatten_perspective
class LaneTracker(object):
"""
Tracks the lane in a series of consecutive frames.
"""
def __init__(self, first_frame, n_windows=9):
"""
Initialises a tracker object.
Parameters
----------
first_frame : First frame of the frame series. We use it to get dimensions and initialise values.
n_windows : Number of windows we use to track each lane edge.
"""
(self.h, self.w, _) = first_frame.shape
self.win_n = n_windows
self.left = None
self.right = None
self.l_windows = []
self.r_windows = []
self.initialize_lines(first_frame)
def initialize_lines(self, frame):
"""
Finds starting points for left and right lines (e.g. lane edges) and initialises Window and Line objects.
Parameters
----------
frame : Frame to scan for lane edges.
"""
# Take a histogram of the bottom half of the image
edges = get_edges(frame)
(flat_edges, _) = flatten_perspective(edges)
histogram = np.sum(flat_edges[int(self.h / 2):, :], axis=0)
nonzero = flat_edges.nonzero()
# Create empty lists to receive left and right lane pixel indices
l_indices = np.empty([0], dtype=np.int)
r_indices = np.empty([0], dtype=np.int)
window_height = int(self.h / self.win_n)
for i in range(self.win_n):
l_window = Window(
y1=self.h - (i + 1) * window_height,
y2=self.h - i * window_height,
x=self.l_windows[-1].x if len(self.l_windows) > 0 else np.argmax(histogram[:self.w // 2])
)
r_window = Window(
y1=self.h - (i + 1) * window_height,
y2=self.h - i * window_height,
x=self.r_windows[-1].x if len(self.r_windows) > 0 else np.argmax(histogram[self.w // 2:]) + self.w // 2
)
# Append nonzero indices in the window boundary to the lists
l_indices = np.append(l_indices, l_window.pixels_in(nonzero), axis=0)
r_indices = np.append(r_indices, r_window.pixels_in(nonzero), axis=0)
self.l_windows.append(l_window)
self.r_windows.append(r_window)
self.left = Line(x=nonzero[1][l_indices], y=nonzero[0][l_indices], h=self.h, w = self.w)
self.right = Line(x=nonzero[1][r_indices], y=nonzero[0][r_indices], h=self.h, w = self.w)
def scan_frame_with_windows(self, frame, windows):
"""
Scans a frame using initialised windows in an attempt to track the lane edges.
Parameters
----------
frame : New frame
windows : Array of windows to use for scanning the frame.
Returns
-------
A tuple of arrays containing coordinates of points found in the specified windows.
"""
indices = np.empty([0], dtype=np.int)
nonzero = frame.nonzero()
window_x = None
for window in windows:
indices = np.append(indices, window.pixels_in(nonzero, window_x), axis=0)
window_x = window.mean_x
return (nonzero[1][indices], nonzero[0][indices])
def process(self, frame, draw_lane=True, draw_statistics=True):
"""
Performs a full lane tracking pipeline on a frame.
Parameters
----------
frame : New frame to process.
draw_lane : Flag indicating if we need to draw the lane on top of the frame.
draw_statistics : Flag indicating if we need to render the debug information on top of the frame.
Returns
-------
Resulting frame.
"""
edges = get_edges(frame)
(flat_edges, unwarp_matrix) = flatten_perspective(edges)
(l_x, l_y) = self.scan_frame_with_windows(flat_edges, self.l_windows)
self.left.process_points(l_x, l_y)
(r_x, r_y) = self.scan_frame_with_windows(flat_edges, self.r_windows)
self.right.process_points(r_x, r_y)
if draw_statistics:
edges = get_edges(frame, separate_channels=True)
debug_overlay = self.draw_debug_overlay(flatten_perspective(edges)[0])
top_overlay = self.draw_lane_overlay(flatten_perspective(frame)[0])
debug_overlay = cv2.resize(debug_overlay, (0, 0), fx=0.3, fy=0.3)
top_overlay = cv2.resize(top_overlay, (0, 0), fx=0.3, fy=0.3)
frame[:250, :, :] = frame[:250, :, :] * .4
(h, w, _) = debug_overlay.shape
frame[20:20 + h, 20:20 + w, :] = debug_overlay
frame[20:20 + h, 20 + 20 + w:20 + 20 + w + w, :] = top_overlay
text_x = 20 + 20 + w + w + 20
self.draw_text(frame, 'Radius of curvature: {} m'.format(self.radius_of_curvature()), text_x, 80)
self.draw_text(frame, 'Distance (left): {:.1f} m'.format(self.left.camera_distance()), text_x, 140)
self.draw_text(frame, 'Distance (right): {:.1f} m'.format(self.right.camera_distance()), text_x, 200)
if draw_lane:
frame = self.draw_lane_overlay(frame, unwarp_matrix)
return frame
def draw_text(self, frame, text, x, y):
cv2.putText(frame, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .8, (255, 255, 255), 2)
def draw_debug_overlay(self, binary, lines=True, windows=True):
"""
Draws an overlay with debugging information on a bird's-eye view of the road (e.g. after applying perspective
transform).
Parameters
----------
binary : Frame to overlay.
lines : Flag indicating if we need to draw lines.
windows : Flag indicating if we need to draw windows.
Returns
-------
Frame with an debug information overlay.
"""
if len(binary.shape) == 2:
image = np.dstack((binary, binary, binary))
else:
image = binary
if windows:
for window in self.l_windows:
coordinates = window.coordinates()
cv2.rectangle(image, coordinates[0], coordinates[1], (1., 1., 0), 2)
for window in self.r_windows:
coordinates = window.coordinates()
cv2.rectangle(image, coordinates[0], coordinates[1], (1., 1., 0), 2)
if lines:
cv2.polylines(image, [self.left.get_points()], False, (1., 0, 0), 2)
cv2.polylines(image, [self.right.get_points()], False, (1., 0, 0), 2)
return image * 255
def draw_lane_overlay(self, image, unwarp_matrix=None):
"""
Draws an overlay with tracked lane applying perspective unwarp to project it on the original frame.
Parameters
----------
image : Original frame.
unwarp_matrix : Transformation matrix to unwarp the bird's eye view to initial frame. Defaults to `None` (in
which case no unwarping is applied).
Returns
-------
Frame with a lane overlay.
"""
# Create an image to draw the lines on
overlay = np.zeros_like(image).astype(np.uint8)
points = np.vstack((self.left.get_points(), np.flipud(self.right.get_points())))
# Draw the lane onto the warped blank image
cv2.fillPoly(overlay, [points], (0, 255, 0))
if unwarp_matrix is not None:
# Warp the blank back to original image space using inverse perspective matrix (Minv)
overlay = cv2.warpPerspective(overlay, unwarp_matrix, (image.shape[1], image.shape[0]))
# Combine the result with the original image
return cv2.addWeighted(image, 1, overlay, 0.3, 0)
def radius_of_curvature(self):
"""
Calculates radius of the lane curvature by averaging curvature of the edge lines.
Returns
-------
Radius of the lane curvature in meters.
"""
return int(np.average([self.left.radius_of_curvature(), self.right.radius_of_curvature()]))
|
11526013
|
import gym
from gym.wrappers import TimeLimit
from rlkit.envs.wrappers import CustomInfoEnv, NormalizedBoxEnv
def make_env(name):
env = gym.make(name)
# Remove TimeLimit Wrapper
if isinstance(env, TimeLimit):
env = env.unwrapped
env = CustomInfoEnv(env)
env = NormalizedBoxEnv(env)
return env
|
11526062
|
from functools import partial
from textwrap import dedent
from unittest import skip, expectedFailure # noqa: F401
import sublime
from SublimeLinter.lint import (
backend,
Linter,
linter as linter_module,
util,
)
from unittesting import DeferrableTestCase
from SublimeLinter.tests.parameterized import parameterized as p
from SublimeLinter.tests.mockito import (
unstub,
verify,
when,
)
class _BaseTestCase(DeferrableTestCase):
@classmethod
def setUpClass(cls):
cls.view = sublime.active_window().new_file()
# make sure we have a window to work with
s = sublime.load_settings("Preferences.sublime-settings")
s.set("close_windows_when_empty", False)
@classmethod
def tearDownClass(cls):
if cls.view:
cls.view.set_scratch(True)
cls.view.window().focus_view(cls.view)
cls.view.window().run_command("close_file")
def setUp(self):
when(util).which('fake_linter_1').thenReturn('fake_linter_1')
# it's just faster if we mock this out
when(linter_module).register_linter(...).thenReturn(None)
def tearDown(self):
unstub()
VIEW_UNCHANGED = lambda: False # noqa: E731
execute_lint_task = partial(
backend.execute_lint_task, offsets=(0, 0, 0), view_has_changed=VIEW_UNCHANGED
)
class TestPostFilterResults(_BaseTestCase):
@p.expand([
# Ensure 'falsy' values do not filter anything
([], [{'line': 0}, {'line': 1}, {'line': 2}, {'line': 3}]),
(None, [{'line': 0}, {'line': 1}, {'line': 2}, {'line': 3}]),
(False, [{'line': 0}, {'line': 1}, {'line': 2}, {'line': 3}]),
(['age'], []),
(['massage'], [{'line': 0}, {'line': 1}, {'line': 2}]),
# For convenience allow strings as input
('age', []),
('massage', [{'line': 0}, {'line': 1}, {'line': 2}]),
# All input is interpreted as regex strings
(['m[ae]ss'], []),
(['mess|mass'], []),
(['mess', 'mas{2}'], []),
# the error code (aka rule name) can be checked
(['W3:'], [{'line': 1}, {'line': 2}, {'line': 3}]),
(['W3: '], [{'line': 1}, {'line': 2}, {'line': 3}]),
(['W3: '], [{'line': 0}, {'line': 1}, {'line': 2}, {'line': 3}]),
([r'W\d*:'], [{'line': 1}, {'line': 2}, {'line': 3}]),
# filter error_type 'error'
(['error'], [{'line': 0}, {'line': 1}]),
(['error:'], [{'line': 0}, {'line': 1}]),
(['error: '], [{'line': 0}, {'line': 1}]),
(['error: '], [{'line': 0}, {'line': 1}, {'line': 2}, {'line': 3}]),
# filter error_type 'warning'
(['warning'], [{'line': 2}, {'line': 3}]),
(['warning:'], [{'line': 2}, {'line': 3}]),
(['warning: '], [{'line': 2}, {'line': 3}]),
(['warning: '], [{'line': 0}, {'line': 1}, {'line': 2}, {'line': 3}]),
], doc_func=lambda f, n, param: repr(param.args[0]))
def test_post_filter_results(self, filter_errors, expected):
class FakeLinter(Linter):
cmd = ('fake_linter_1')
defaults = {'selector': None}
regex = r"""(?x)
^stdin:(?P<line>\d+):(?P<col>\d+)?\s
(\[(?P<warning>[^\]]+)\]\s)?
(?P<message>.*)$
"""
settings = {
'filter_errors': filter_errors
}
linter = FakeLinter(self.view, settings)
INPUT = dedent("""
I
am
the
swan""")
OUTPUT = dedent("""\
stdin:1:1 [w3] The message
stdin:2:1 [S534] The message
stdin:3:1 The mess age
stdin:4:1 The massage
""")
when(linter)._communicate(...).thenReturn(OUTPUT)
result = execute_lint_task(linter, INPUT)
result = [{'line': error['line']} for error in result]
self.assertEqual(result, expected)
@p.expand([
(['d('], "'d(' in 'filter_errors' is not a valid regex pattern: 'unbalanced parenthesis'."),
(True, "'filter_errors' must be set to a string or a list of strings.\nGot 'True' instead"),
(123, "'filter_errors' must be set to a string or a list of strings.\nGot '123' instead"),
])
def test_warn_on_illegal_regex_string(self, filter_errors, message):
class FakeLinter(Linter):
cmd = ('fake_linter_1')
defaults = {'selector': None}
regex = r"""(?x)
^stdin:(?P<line>\d+):(?P<col>\d+)?\s
(\[(?P<warning>[^\]]+)\]\s)?
(?P<message>.*)$
"""
settings = {
'filter_errors': filter_errors
}
linter = FakeLinter(self.view, settings)
INPUT = dedent("""
I
am
the
swan""")
OUTPUT = dedent("""\
stdin:1:1 [w3] The message
stdin:2:1 [S534] The message
stdin:3:1 The mess age
stdin:4:1 The massage
""")
when(linter)._communicate(...).thenReturn(OUTPUT)
when(linter.logger).error(message)
execute_lint_task(linter, INPUT)
verify(linter.logger, times=1).error(message)
|
11526118
|
from .dataloader import *
from .dataset import *
from .openml_download import *
from .mxutils import *
|
11526127
|
from unification import var
from kanren import run, membero
from kanren.arith import lt, gt, lte, gte, add, sub, mul, mod, div
x = var('x')
y = var('y')
def results(g):
return list(g({}))
def test_lt():
assert results(lt(1, 2))
assert not results(lt(2, 1))
assert not results(lt(2, 2))
def test_gt():
assert results(gt(2, 1))
assert not results(gt(1, 2))
assert not results(gt(2, 2))
def test_lte():
assert results(lte(2, 2))
def test_gte():
assert results(gte(2, 2))
def test_add():
assert results(add(1, 2, 3))
assert not results(add(1, 2, 4))
assert results(add(1, 2, 3))
assert results(add(1, 2, x)) == [{x: 3}]
assert results(add(1, x, 3)) == [{x: 2}]
assert results(add(x, 2, 3)) == [{x: 1}]
def test_sub():
assert results(sub(3, 2, 1))
assert not results(sub(4, 2, 1))
assert results(sub(3, 2, x)) == [{x: 1}]
assert results(sub(3, x, 1)) == [{x: 2}]
assert results(sub(x, 2, 1)) == [{x: 3}]
def test_mul():
assert results(mul(2, 3, 6))
assert not results(mul(2, 3, 7))
assert results(mul(2, 3, x)) == [{x: 6}]
assert results(mul(2, x, 6)) == [{x: 3}]
assert results(mul(x, 3, 6)) == [{x: 2}]
assert mul.__name__ == 'mul'
def test_mod():
assert results(mod(5, 3, 2))
def test_div():
assert results(div(6, 2, 3))
assert not results(div(6, 2, 2))
assert results(div(6, 2, x)) == [{x: 3}]
def test_complex():
numbers = tuple(range(10))
results = set(run(0, x, (sub, y, x, 1), (membero, y, numbers), (
mod, y, 2, 0), (membero, x, numbers)))
expected = set((1, 3, 5, 7))
assert results == expected
|
11526136
|
import wx
import re
import platform
import datetime
import Utils
reNonTimeChars = re.compile('[^0-9:.]')
def secsToValue( secs, allow_none, display_seconds, display_milliseconds ):
if secs is None and allow_none:
return secs
v = Utils.formatTime(
secs,
highPrecision=True, extraPrecision=True,
forceHours=True, twoDigitHours=True,
)
if not display_seconds:
v = v[:v.rfind(':')]
elif not display_milliseconds:
if '.' in v:
v = v[:v.find('.')]
return v
def valueToSecs( v, display_seconds, display_milliseconds ):
if v is None:
return v
v = reNonTimeChars.sub( '', '{}'.format(v) )
if not display_seconds:
if len(v.split(':')) < 3:
v += ':00'
elif not display_milliseconds:
v = v[:v.find('.')] if '.' in v else v
return Utils.StrToSeconds( v )
def getSeconds( v, display_seconds, display_milliseconds ):
if isinstance( v, str ):
return valueToSecs( v, display_seconds, display_milliseconds )
elif isinstance( v, wx.DateTime ):
return v.GetHour()*60*60 + v.GetMinute()*60 + v.GetSecond() + v.GetMillisecond()/1000.0
elif isinstance( v, (datetime.datetime, datetime.time) ):
return v.hour*60*60 + v.minute*60 + v.second + v.microsecond/1000000.0
elif isinstance( v, (float, int) ):
return v
else:
return 0.0
# Masked controls still don't work on anything but Windows. Sigh :(
if platform.system() == 'Windows':
import wx.lib.masked as masked
class HighPrecisionTimeEdit( masked.TextCtrl ):
mask = '##:##:##.###'
validRegex = '[0-9][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9]'
def __init__( self, parent, id=wx.ID_ANY, seconds=None, display_seconds=True, display_milliseconds=True, value=None, allow_none=False, style=0, size=wx.DefaultSize ):
# Utils.writeLog( 'HighPrecisionTimeEdit: Windows' )
self.allow_none = allow_none
self.display_seconds = display_seconds
self.display_milliseconds = display_seconds and display_milliseconds
if not display_seconds:
self.mask = '##:##'
self.validRegex = '[0-9][0-9]:[0-5][0-9]'
elif not display_milliseconds:
self.mask = '##:##:##'
self.validRegex = '[0-9][0-9]:[0-5][0-9]:[0-5][0-9]'
self.defaultValue = self.mask.replace('#', '0')
self.emptyValue = self.mask.replace('#', ' ')
super().__init__(
parent, id,
mask = self.mask,
defaultValue = value or self.defaultValue,
validRegex = self.validRegex,
useFixedWidthFont = False,
style = style & ~(wx.TE_PROCESS_ENTER|wx.TE_PROCESS_TAB|wx.TE_MULTILINE|wx.TE_PASSWORD),
size = size,
)
if seconds is not None:
self.SetSeconds( seconds )
def GetSeconds( self ):
v = self.GetValue()
if self.allow_none and v == self.emptyValue:
return None
return valueToSecs( v, self.display_seconds, self.display_milliseconds )
def SetSeconds( self, secs ):
super().SetValue( secsToValue(secs, self.allow_none, self.display_seconds, self.display_milliseconds) )
def SetValue( self, v ):
if self.allow_none and v is None:
super().SetValue( '' )
else:
self.SetSeconds( getSeconds(v, self.display_seconds, self.display_milliseconds) )
else:
import string
class TextBoxTipPopup( wx.PopupTransientWindow ):
"""Basic Tooltip"""
def __init__(self, parent, style, text):
super().__init__(parent, style)
self.SetBackgroundColour(wx.YELLOW)
border = 10
st = wx.StaticText(self, label = text, pos=(border/2,border/2))
sz = st.GetBestSize()
self.SetSize( (sz.width+border, sz.height+border) )
class HighPrecisionTimeEdit( wx.TextCtrl ):
defaultValue = '00:00:00.000'
emptyValue = ''
def __init__( self, parent, id=wx.ID_ANY, seconds=None, value=None, display_seconds=True, display_milliseconds=True, allow_none=False, style=0, size=wx.DefaultSize ):
# Utils.writeLog( 'HighPrecisionTimeEdit: Mac/Linux' )
self.allow_none = allow_none
self.display_seconds = display_seconds
self.display_milliseconds = display_seconds and display_milliseconds
if not display_seconds:
self.defaultValue = '00:00'
elif not display_milliseconds:
self.defaultValue = '00:00:00'
value = self.defaultValue if value is None else reNonTimeChars.sub( '', '{}'.format(value) )
super().__init__(
parent, id,
value = value,
style = style & ~(wx.TE_PROCESS_ENTER|wx.TE_PROCESS_TAB|wx.TE_MULTILINE|wx.TE_PASSWORD),
size = size,
)
seconds = seconds if seconds is not None else valueToSecs( value, self.display_seconds, self.display_milliseconds )
self.SetSeconds( seconds )
self.Bind(wx.EVT_CHAR, self.onKeypress)
self.Bind(wx.EVT_TEXT_PASTE, self.onPaste)
self.Bind(wx.EVT_LEFT_DCLICK, self.onDoubleClick)
def onKeypress(self, event):
keycode = event.GetKeyCode()
obj = event.GetEventObject()
val = obj.GetValue()
# filter unicode characters
if keycode == wx.WXK_NONE:
pass
# allow digits and colon
elif chr(keycode) in string.digits or keycode == 9 or keycode == 58:
event.Skip()
# allow special, non-printable keycodes
elif chr(keycode) not in string.printable:
event.Skip() # allow all other special keycode
# allow one '.'
elif chr(keycode) == '.' and '.' not in val:
event.Skip()
return
def onPaste(self, event):
self.text_data = wx.TextDataObject()
if wx.TheClipboard.Open():
success = wx.TheClipboard.GetData(self.text_data)
wx.TheClipboard.Close()
if success:
self.text_data = self.text_data.GetText()
if self.ValidateTimeFormat(self.text_data):
self.SetValue(self.text_data)
return
else:
WarnTip = TextBoxTipPopup(self, wx.SIMPLE_BORDER,"Incorrect time format on the clipboard")
xPos, yPos = self.GetPosition()
height = WarnTip.GetClientSize()[1]
pos = self.ClientToScreen( (xPos - xPos, yPos - yPos + height) )
WarnTip.Position( pos, (0,0) )
WarnTip.Popup()
def onDoubleClick(self, event):
self.SetSelection(-1,-1)
def ValidateTimeFormat(self, time):
if not time and self.allow_none:
return True
for format in ('%H:%M', '%H:%M:%S', '%H:%M:%S.%f'):
try:
datetime.datetime.strptime(time, format)
return True
except Exception:
pass
return False
def GetSeconds( self ):
v = self.GetValue()
if self.allow_none and v == self.emptyValue:
return None
return valueToSecs( v, self.display_seconds, self.display_milliseconds )
def SetSeconds( self, secs ):
super().SetValue( secsToValue(secs, self.allow_none, self.display_seconds, self.display_milliseconds) )
def SetValue( self, v ):
if self.allow_none and v is None:
super().SetValue( '' )
else:
self.SetSeconds( getSeconds(v, self.display_seconds, self.display_milliseconds) )
def GetValue( self ):
v = super().GetValue()
if v is None:
return None
v = reNonTimeChars.sub( '', v )
if v == self.emptyValue and self.allow_none:
return None
if not self.display_seconds and len(v.split(':')) < 3:
v += ':00'
secs = Utils.StrToSeconds( v )
return secsToValue( secs, self.allow_none, self.display_seconds, self.display_milliseconds )
if __name__ == '__main__':
# Self-test.
app = wx.App(False)
mainWin = wx.Frame(None,title="hpte", size=(1024,600))
vs = wx.BoxSizer( wx.VERTICAL )
hpte1 = HighPrecisionTimeEdit( mainWin, value="10:00:00", size=(200,-1) )
hpte2 = HighPrecisionTimeEdit( mainWin, display_milliseconds=False, value="10:00", size=(200,-1) )
hpte3 = HighPrecisionTimeEdit( mainWin, display_seconds=False, value="10:00", size=(200,-1) )
def getValues( event ):
print( 'hpte1: {}, {}'.format(hpte1.GetValue(), hpte1.GetSeconds()) )
print( 'hpte2: {}, {}'.format(hpte2.GetValue(), hpte2.GetSeconds()) )
print( 'hpte3: {}, {}'.format(hpte3.GetValue(), hpte3.GetSeconds()) )
button = wx.Button( mainWin, label='Get Values' )
button.Bind( wx.EVT_BUTTON, getValues )
vs.Add( hpte1, flag=wx.ALL, border=16 )
vs.Add( hpte2, flag=wx.ALL, border=16 )
vs.Add( hpte3, flag=wx.ALL, border=16 )
vs.Add( button, flag=wx.ALL, border=16 )
mainWin.SetSizerAndFit( vs )
mainWin.Show()
app.MainLoop()
|
11526142
|
from machine import Pin
from time import sleep_ms, sleep_us
ser = Pin(5, Pin.OUT)
ser.value(0)
rclock = Pin(6, Pin.OUT)
rclock.value(0)
srclock = Pin(7, Pin.OUT)
srclock.value(0)
def srclock_pulse():
srclock.value(1)
sleep_us(10)
srclock.value(0)
sleep_us(10)
def rclock_pulse():
rclock.value(1)
sleep_us(10)
rclock.value(0)
sleep_us(10)
def cycle():
srclock_pulse()
ser.value(1)
srclock_pulse()
ser.value(0)
sleep_ms(10)
for j in range(9):
rclock_pulse()
sleep_us(10)
srclock_pulse()
sleep_ms(50)
while True:
cycle()
|
11526181
|
from casadi import *
x = SX.sym('x')
u = SX.sym('u')
xu = vertcat(x, u)
rhs = x - u
discrete_model = Function('discrete_model', [x, u], [rhs, jacobian(rhs, xu)])
discrete_model.generate('discrete_model.c', {'with_header': True})
discrete_model_cost = Function('discrete_model_cost', [x, u], [xu, jacobian(xu, xu)])
discrete_model_cost.generate('discrete_model_cost.c', {'with_header': True})
discrete_model_costN = Function('discrete_model_costN', [x], [x, jacobian(x, x)])
discrete_model_costN.generate('discrete_model_costN.c', {'with_header': True})
|
11526218
|
import random
from torch.utils.data.sampler import Sampler
def _batchify(l, batch_size):
for i in range(0, len(l), batch_size):
yield l[i:i + batch_size]
def _flatten(l):
return [item for sublist in l for item in sublist]
class SubsetSequentialSampler(Sampler):
"""
Samples elements sequentially based on a list of indexes.
"""
def __init__(self, indexes):
"""
:param indexes: a list of indexes.
"""
super(SubsetSequentialSampler, self).__init__(None)
self._indexes = indexes
def __iter__(self):
return (self._indexes[i] for i in range(len(self._indexes)))
def __len__(self):
return len(self._indexes)
class OrderedBatchWiseRandomSampler(Sampler):
"""
Semi-randomly samples indexes from a dataset ensuring that the corresponding examples will have similar values.
Values are returned by a callable.
"""
def __init__(self, data_source, get_order_value_callable, batch_size, seed=1234):
"""
:param data_source: a data source (usually a dataset object).
:param get_order_value_callable: a callable that takes as input the example's index and returns the ordering
value.
:param batch_size: the batch size.
:param seed: the initial seed.
"""
super(OrderedBatchWiseRandomSampler, self).__init__(None)
self._sorted_indexes = sorted(list(range(len(data_source))), key=lambda x: get_order_value_callable(x))
self._batch_size = batch_size
self._current_seed = seed
def __iter__(self):
self._current_seed += 1
rand_state = random.Random(self._current_seed)
indexes = list(_batchify(self._sorted_indexes.copy(), self._batch_size))
rand_state.shuffle(indexes)
return iter(_flatten(indexes))
def __len__(self):
return len(self._sorted_indexes)
class SubsetOrderedBatchWiseRandomSampler(Sampler):
"""
Semi-randomly samples indexes from a list ensuring that the corresponding examples will have similar values. Values
are returned by a callable.
"""
def __init__(self, indexes, get_order_value_callable, batch_size, seed=1234):
"""
:param indexes: a list of indexes.
:param get_order_value_callable: a callable that takes as input the example's index and returns the ordering
value.
:param batch_size: the batch size.
:param seed: the initial seed.
"""
super(SubsetOrderedBatchWiseRandomSampler, self).__init__(None)
self._sorted_indexes = sorted(indexes, key=lambda i: get_order_value_callable(i))
self._batch_size = batch_size
self._current_seed = seed
def __iter__(self):
self._current_seed += 1
rand_state = random.Random(self._current_seed)
indexes = list(_batchify(self._sorted_indexes.copy(), self._batch_size))
rand_state.shuffle(indexes)
return iter(_flatten(indexes))
def __len__(self):
return len(self._sorted_indexes)
class OrderedSequentialSampler(Sampler):
"""
Samples elements from a dataset ordered by a value returned by a callable for each example.
"""
def __init__(self, data_source, get_order_value_callable):
"""
:param data_source: a data source (usually a dataset object).
:param get_order_value_callable: a callable that takes as input the example's index and returns the ordering
value.
"""
super(OrderedSequentialSampler, self).__init__(None)
self._sorted_indexes = sorted(list(range(len(data_source))), key=lambda i: get_order_value_callable(i))
def __iter__(self):
return iter(self._sorted_indexes)
def __len__(self):
return len(self._sorted_indexes)
class SubsetOrderedSequentialSampler(Sampler):
"""
Samples elements from a list of indexes ordered by a value returned by a callable for each example.
"""
def __init__(self, indexes, get_order_value_callable):
"""
:param indexes: a list of indexes.
:param get_order_value_callable: a callable that takes as input the example's index and returns the ordering
value.
"""
super(SubsetOrderedSequentialSampler, self).__init__(None)
self._sorted_indexes = sorted(indexes, key=lambda i: get_order_value_callable(i))
def __iter__(self):
return iter(self._sorted_indexes)
def __len__(self):
return len(self._sorted_indexes)
|
11526232
|
from flask import Flask, render_template, send_file, make_response, url_for, Response,request,redirect
from flask_restful import reqparse, abort, Api, Resource
import pickle
import numpy as np
import werkzeug
from Predictor import *
app = Flask(__name__)
api = Api(app)
predictor = Predictor()
parser = reqparse.RequestParser()
parser.add_argument('url')
parser.add_argument('text')
class PredictLink(Resource):
def get(self):
args = parser.parse_args()
user_query = args['url']
return {"url_type":predictor.predict(user_query)}
class PredictToxicness(Resource):
def get(self):
args = parser.parse_args()
user_query = args['text']
result = predictor.predictToxicComment(user_query)
return {
"toxicity" : str(result['toxicity']),
"severe_toxicity": str(result['severe_toxicity']),
"obscene" : str(result['obscene']),
"threat" : str(result['threat']),
"insult" : str(result["insult"]),
"identity_hate" : str(result['identity_hate'])
}
api.add_resource(PredictLink, '/predict')
api.add_resource(PredictToxicness, '/predict/toxic')
if __name__ == '__main__':
app.run(debug=True)
|
11526238
|
import json
from ..exceptions import SparkPostAPIException as RequestsSparkPostAPIException
class SparkPostAPIException(RequestsSparkPostAPIException):
def __init__(self, response, *args, **kwargs):
errors = None
# noinspection PyBroadException
try:
data = json.loads(response.body.decode("utf-8"))
if data:
errors = data['errors']
errors = [e['message'] + ': ' + e.get('description', '')
for e in errors]
# TODO: select exception to catch here
except: # noqa: E722
pass
if not errors:
errors = [response.body.decode("utf-8") or ""]
self.status = response.code
self.response = response
self.errors = errors
message = """Call to {uri} returned {status_code}, errors:
{errors}
""".format(
uri=response.effective_url,
status_code=response.code,
errors='\n'.join(errors)
)
super(RequestsSparkPostAPIException, self).__init__(message, *args,
**kwargs)
|
11526241
|
import unittest
from smsframework import Gateway
from smsframework.providers import NullProvider
from smsframework import OutgoingMessage, IncomingMessage, MessageStatus
class GatewayTest(unittest.TestCase):
""" Test Gateway """
def setUp(self):
self.gw = Gateway()
# Providers
self.gw.add_provider('one', NullProvider)
self.gw.add_provider('two', NullProvider)
self.gw.add_provider('three', NullProvider)
# Router
def router(message, module, method):
if module == 'main':
return None # use 'one' for module 'main'
elif method == 'alarm':
return 'two' # use 'three' for all alerting methods
else:
return 'three' # use 'two' for everything else
self.gw.router = router
def test_struct(self):
""" Test structure """
# Default provider is fine
self.assertEqual(self.gw.default_provider, 'one')
# Getting providers
self.assertIsInstance(self.gw.get_provider('one'), NullProvider)
self.assertIsInstance(self.gw.get_provider('two'), NullProvider)
self.assertIsInstance(self.gw.get_provider('three'), NullProvider)
self.assertRaises(KeyError, self.gw.get_provider, 'none')
# Redeclare a provider
self.assertRaises(AssertionError, self.gw.add_provider, 'one', NullProvider)
# Pass a non-IProvider class
self.assertRaises(AssertionError, self.gw.add_provider, 'ok', Exception)
def test_routing(self):
""" Test routing """
# Sends through 'one'
msg = self.gw.send(OutgoingMessage('', '').route('main', ''))
self.assertEqual(msg.provider, 'one')
# Sends through 'two'
msg = self.gw.send(OutgoingMessage('', '').route('', 'alarm'))
self.assertEqual(msg.provider, 'two')
# Sends through 'three'
msg = self.gw.send(OutgoingMessage('', '').route('', ''))
self.assertEqual(msg.provider, 'three')
# Send through 'one' (explicitly set)
msg = self.gw.send(OutgoingMessage('', '', provider='one').route('', ''))
self.assertEqual(msg.provider, 'one')
# No routing specified: using the default route
msg = self.gw.send(OutgoingMessage('', '', provider='one'))
self.assertEqual(msg.provider, 'one')
# Wrong provider specified
self.assertRaises(AssertionError, self.gw.send, OutgoingMessage('', '', provider='zzz'))
def test_events(self):
""" Test events """
# Counters
self.recv = 0
self.send = 0
self.status = 0
def inc_recv(message): self.recv += 1
def inc_send(message): self.send += 1
def inc_status(message): self.status += 1
# Hooks
self.gw.onReceive += inc_recv
self.gw.onSend += inc_send
self.gw.onStatus += inc_status
# Emit some events
provider = self.gw.get_provider('one')
self.gw.send(OutgoingMessage('', ''))
provider._receive_message(IncomingMessage('', ''))
provider._receive_status(MessageStatus(''))
# Check
self.assertEqual(self.recv, 1)
self.assertEqual(self.send, 1)
self.assertEqual(self.status, 1)
|
11526245
|
import itertools
import json
import random
import time
from ast import literal_eval as make_tuple
from multiprocessing import Process, Queue
import numpy as np
import psutil
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier, Perceptron, PassiveAggressiveClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.utils import shuffle
from configs import LOGGER, DATA_DIR, BASELINE_PATH, JSON_LOGGER
from utils import mnist_reader
from utils.helper import now_int
class PredictJob:
def __init__(self, clf_name, clf_par, num_repeat: int = 1):
self.clf_name = clf_name
self.clf_par = clf_par
self.result = None
self.start_time = None
self.done_time = None
self.num_repeat = num_repeat
class JobWorker(Process):
def __init__(self, pending_q: Queue) -> None:
super().__init__()
self.pending_q = pending_q
X, self.Y = mnist_reader.load_mnist(path=DATA_DIR, kind='train')
Xt, self.Yt = mnist_reader.load_mnist(path=DATA_DIR, kind='t10k')
scaler = preprocessing.StandardScaler().fit(X)
self.X = scaler.transform(X)
self.Xt = scaler.transform(Xt)
# self.X = X[:100]
# self.Y = self.Y[:100]
def run(self) -> None:
while True:
cur_job = self.pending_q.get() # type: PredictJob
LOGGER.info('job received! repeat: %d classifier: "%s" parameter: "%s"' % (cur_job.num_repeat,
cur_job.clf_name,
cur_job.clf_par))
if cur_job.clf_name in globals():
try:
acc = []
cur_job.start_time = now_int()
for j in range(cur_job.num_repeat):
cur_score = self.get_accuracy(cur_job.clf_name, cur_job.clf_par, j)
acc.append(cur_score)
if len(acc) == 2 and abs(acc[0] - cur_score) < 1e-3:
LOGGER.info('%s is invariant to training data shuffling, will stop repeating!' %
cur_job.clf_name)
break
cur_job.done_time = now_int()
test_info = {
'name': cur_job.clf_name,
'parameter': cur_job.clf_par,
'score': acc,
'start_time': cur_job.start_time,
'done_time': cur_job.done_time,
'num_repeat': len(acc),
'mean_accuracy': np.array(acc).mean(),
'std_accuracy': np.array(acc).std() * 2,
'time_per_repeat': int((cur_job.done_time - cur_job.start_time) / len(acc))
}
JSON_LOGGER.info(json.dumps(test_info, sort_keys=True))
LOGGER.info('done! acc: %0.3f (+/- %0.3f) repeated: %d classifier: "%s" '
'parameter: "%s" ' % (np.array(acc).mean(),
np.array(acc).std() * 2,
len(acc),
cur_job.clf_name,
cur_job.clf_par))
except Exception as e:
LOGGER.error('%s with %s failed! reason: %s' % (cur_job.clf_name, cur_job.clf_par, e))
else:
LOGGER.error('Can not found "%s" in scikit-learn, missing import?' % cur_job.clf_name)
def get_accuracy(self, clf_name, clf_par, id):
start_time = time.clock()
clf = globals()[clf_name](**clf_par)
Xs, Ys = shuffle(self.X, self.Y)
cur_score = clf.fit(Xs, Ys).score(self.Xt, self.Yt)
duration = time.clock() - start_time
LOGGER.info('#test: %d acc: %0.3f time: %.3fs classifier: "%s" parameter: "%s"' % (id, cur_score,
duration,
clf_name,
clf_par))
return cur_score
class JobManager:
def __init__(self, num_worker: int = 2, num_repeat: int = 2, do_shuffle: bool = False,
respawn_memory_pct: float = 90):
self.pending_q = Queue()
self.num_worker = num_worker
self.num_repeat = num_repeat
self.do_shuffle = do_shuffle
self.valid_jobs = self._sanity_check(self._parse_tasks(BASELINE_PATH))
self.respawn_memory_pct = respawn_memory_pct
for v in self.valid_jobs:
self.pending_q.put(v)
def memory_guard(self):
LOGGER.info('memory usage: %.1f%%, RESPAWN_LIMIT: %.1f%%',
psutil.virtual_memory()[2], self.respawn_memory_pct)
if psutil.virtual_memory()[2] > self.respawn_memory_pct:
LOGGER.warn('releasing memory now! kill iterator processes and restart!')
self.restart()
def restart(self):
self.close()
self.start()
def _parse_list(self, v):
for idx, vv in enumerate(v):
if isinstance(vv, str) and vv.startswith('('):
v[idx] = make_tuple(vv)
return v
def _parse_tasks(self, fn):
with open(fn) as fp:
tmp = json.load(fp)
def get_par_comb(tmp, clf_name):
all_par_vals = list(itertools.product(*[self._parse_list(vv)
for v in tmp['classifiers'][clf_name]
for vv in v.values()]))
all_par_name = [vv for v in tmp['classifiers'][clf_name] for vv in v.keys()]
return [{all_par_name[idx]: vv for idx, vv in enumerate(v)} for v in all_par_vals]
result = [{v: vv} for v in tmp['classifiers'] for vv in get_par_comb(tmp, v)]
for v in result:
for vv in v.values():
vv.update(tmp['common'])
if self.do_shuffle:
random.shuffle(result)
return result
def close(self):
for w in self.workers:
w.join(timeout=1)
w.terminate()
def start(self):
self.workers = [JobWorker(self.pending_q) for _ in range(self.num_worker)]
for w in self.workers:
w.start()
def _sanity_check(self, all_tasks):
total_clf = 0
failed_clf = 0
Xt, Yt = mnist_reader.load_mnist(path=DATA_DIR, kind='t10k')
Xt = preprocessing.StandardScaler().fit_transform(Xt)
Xs, Ys = shuffle(Xt, Yt)
num_dummy = 10
Xs = Xs[:num_dummy]
Ys = [j for j in range(10)]
valid_jobs = []
for v in all_tasks:
clf_name = list(v.keys())[0]
clf_par = list(v.values())[0]
total_clf += 1
try:
globals()[clf_name](**clf_par).fit(Xs, Ys)
valid_jobs.append(PredictJob(clf_name, clf_par, self.num_repeat))
except Exception as e:
failed_clf += 1
LOGGER.error('Can not create classifier "%s" with parameter "%s". Reason: %s' % (clf_name, clf_par, e))
LOGGER.info('%d classifiers to test, %d fail to create!' % (total_clf, failed_clf))
return valid_jobs
# no use, just prevent intellij to remove implicit import
placeholder = [PassiveAggressiveClassifier,
SGDClassifier,
Perceptron,
DecisionTreeClassifier,
RandomForestClassifier,
LogisticRegression,
MLPClassifier,
KNeighborsClassifier,
SVC,
GaussianNB,
ExtraTreeClassifier,
LinearSVC,
GaussianProcessClassifier,
GradientBoostingClassifier]
if __name__ == "__main__":
# predicting()
jm = JobManager()
jm.start()
# jm.start()
|
11526250
|
import unittest
import torch
from torch.autograd import Variable
from torchsample.metrics import CategoricalAccuracy
class TestMetrics(unittest.TestCase):
def test_categorical_accuracy(self):
metric = CategoricalAccuracy()
predicted = Variable(torch.eye(10))
expected = Variable(torch.LongTensor(list(range(10))))
self.assertEqual(metric(predicted, expected), 100.0)
# Set 1st column to ones
predicted = Variable(torch.zeros(10, 10))
predicted.data[:, 0] = torch.ones(10)
self.assertEqual(metric(predicted, expected), 55.0)
if __name__ == '__main__':
unittest.main()
|
11526258
|
import torch
from torch.autograd import Variable
from unittest import TestCase
from wavenet_modules import dilate
class Test_Dilation(TestCase):
def test_dilate(self):
input = Variable(torch.linspace(0, 12, steps=13).view(1, 1, 13))
dilated = dilate(input, 1)
assert dilated.size() == (1, 1, 13)
assert dilated.data[0, 0, 4] == 4
print(dilated)
dilated = dilate(input, 2)
assert dilated.size() == (2, 1, 7)
assert dilated.data[1, 0, 2] == 4
print(dilated)
dilated = dilate(dilated, 4, init_dilation=2)
assert dilated.size() == (4, 1, 4)
assert dilated.data[3, 0, 1] == 4
print(dilated)
dilated = dilate(dilated, 1, init_dilation=4)
assert dilated.size() == (1, 1, 16)
assert dilated.data[0, 0, 7] == 4
print(dilated)
def test_dilate_multichannel(self):
input = Variable(torch.linspace(0, 35, steps=36).view(2, 3, 6))
dilated = dilate(input, 1)
dilated = dilate(input, 2)
dilated = dilate(input, 4)
class Test_Conv1dExtendable:
def test_ncc(self):
module = Conv1dExtendable(in_channels=3,
out_channels=5,
kernel_size=4)
rand = Variable(torch.rand(5, 3, 4))
module._parameters['weight'] = module.weight * module.weight + rand * 1
ncc = module.normalized_cross_correlation()
print(ncc)
class Test_Tensor_Inserting:
def test_insertion(self):
tensor = torch.rand(3, 4, 5)
print(tensor)
slice = torch.zeros(3, 5)
i = insert_slice(tensor=tensor, slice=slice, dim=1, at_index=2)
print(i)
i = insert_slice(tensor=tensor, slice=slice, dim=1, at_index=0)
print(i)
i = insert_slice(tensor=tensor, slice=slice, dim=1, at_index=4)
print(i)
|
11526259
|
import requests
plugin_name = 'malshare'
config = None
def check(query):
API_KEY = config['api']
url ='https://malshare.com/api.php?api_key={}&action=details&hash={}'.format(API_KEY, query)
print(url)
req = requests.get(url)
res = {}
res['found'] = True if b'Sample not found by hash' not in req.content else False
res['data'] = req.json() if res['found'] else []
res['name'] = 'malshare'
return res
class Plugin:
def __init__(self, conf):
global config
config = conf
def register(self):
return {plugin_name: {'check': check}}
|
11526261
|
import numpy as np
import matplotlib.pyplot as plt
from moviepy.editor import *
from moviepy.video.io.bindings import mplfig_to_npimage
if __name__ == '__main__':
#Boolean to set whether to include audio or not in final product
include_audio = False
# Read in data generated from previous video analysis
scenes = np.loadtxt('MV_scenelist.csv', delimiter=',')
# Break these columns apart to make it a little easier to index later
scene_frames = scenes[:, 0]
scene_times = scenes[:, 1]
scene_durs = scenes[:, 2]
# Calculate the time that passes each frame
msec_per_frame = scene_times[-1] / scene_frames[-1]
# Initialize a list to keep track of edits/sec
rolling_average = []
# Size of the rolling window to average over
window_sec = 5.0
# Keep track of number of scenes to have passed
scene_count = []
for i in range(int(np.max(scene_frames)) + 1):
# First frame, starts with 0 edits per second
if i == 0:
rolling_average.append(0)
scene_count.append(1)
continue
# Find current time in msec
current_time = i * msec_per_frame
# Find all scenes that have happened prior to current frame
in_window_scenes = scene_times[np.where(current_time >= scene_times)]
# Keep track of total number of scenes to have passed
scene_count.append(len(in_window_scenes))
# Then filter that down to scenes that have happened within the rolling
# window prior to current frame
in_window_scenes = len(in_window_scenes[np.where(
current_time - window_sec * 1000. <= in_window_scenes)])
# Find the rate from number of scenes
scenes_per_sec = in_window_scenes / window_sec
# Add this frame's rate to the list
rolling_average.append(scenes_per_sec)
# Calculate the average rate at which edits are made
avg_rate = scene_count[-1] / (scene_times[-1] / 1000.0)
# duration = 30
duration = scene_times[-1] / 1000.0
# Make the first figure that will keep track of the rate of transitions
fig1, ax = plt.subplots(1, figsize=(4, 4), facecolor='white')
ax.set_title("Rate of Scene Transitions \n (%0d sec Rolling Average)" % window_sec)
ax.set_ylim(0, max(rolling_average))
ax.set_xlim(0, duration)
ax.set_xlabel('Time (sec)')
ax.set_ylabel('Detected Rate of Transitions (changes/sec)')
line, = ax.plot(0, 0, 'k-')
line2, = ax.plot([0, duration], [avg_rate, avg_rate], 'b-')
plt.tight_layout()
# Initialize lists to keep track of things
times = []
rates = []
def make_frame1(t):
"""Function to make a graph of the rate of scene transitions."""
times.append(t)
# Find all scenes that have happened prior to current frame
in_window_scenes = scene_times[np.where(t * 1000.0 >= scene_times)]
# Then filter that down to scenes that have happened within the rolling
# window prior to current frame
in_window_scenes = len(in_window_scenes[np.where(
t * 1000.0 - window_sec * 1000.0 <= in_window_scenes)])
# Find the rate from number of scenes
scenes_per_sec = in_window_scenes / window_sec
# Add the rate to the list
rates.append(scenes_per_sec)
# Update the graph
line.set_xdata(times)
line.set_ydata(rates)
return mplfig_to_npimage(fig1)
# Use our function to animate a video and save it to file
animation1 = VideoClip(make_frame1, duration=duration).resize(height=540)
# animation1.write_videofile('rate_animation.mp4', fps=23.976)
plt.close()
# Animate plot of total scene transitions detected
fig2, ax = plt.subplots(1, figsize=(4, 4), facecolor='white')
ax.set_title("Number of Scene Transitions")
ax.set_ylim(0, max(scene_count))
ax.set_xlim(0, duration)
ax.set_xlabel('Time (sec)')
ax.set_ylabel('Total Number of Detected Scenes')
line, = ax.plot(0, 1, 'k-')
line2, = ax.plot([0, duration], [1, max(scene_count)], 'b-')
plt.tight_layout()
# Initialize lists
times = []
scenes = []
def make_frame2(t):
"""Function to graph the total number of scene transitions over time."""
# Keep track of the time
times.append(t)
# Find all scenes that have happened prior to current frame
in_window_scenes = scene_times[np.where(t * 1000.0 >= scene_times)]
# Keep track of total number of scenes to have passed
scenes.append(len(in_window_scenes) + 1)
# Update the graph
line.set_xdata(times)
line.set_ydata(scenes)
return mplfig_to_npimage(fig2)
# Animate the graph and save it to file
animation2 = VideoClip(make_frame2, duration=duration).resize(height=540)
# animation2.write_videofile('total_animation.mp4', fps=23.976)
# Reload saved videos of graphs as they cannot be composited together
# until they are each rendered and saved independently
animation1 = VideoFileClip('rate_animation.mp4')
animation2 = VideoFileClip('total_animation.mp4')
# Stack the two graphs on top of each other
animation_array = clips_array([[animation1], [animation2]])
# animation_array.write_videofile('stacked_animation.mp4', fps=23.976)
# Load the main video
mv_video = (VideoFileClip('BTS_2017_DNA_Annotated_33_10.mp4').
resize(width=1920 - animation_array.w))
#Get rid of audio if set
if not include_audio:
mv_video = mv_video.set_audio(None)
# Stick the videos together and save the result
final_array = clips_array([[mv_video, animation_array]])
# final_array.write_videofile('final_composition.mp4', fps=23.976)
# Make a final results screen
# Calculate final stats
sec_per_scene = duration / float(max(scene_count))
result_string1 = "Results: \n"
result_string2 = "%03d scenes in %03.1f seconds " % (max(scene_count), duration)
result_string3 = "Average of %0.2f transitions per second" % avg_rate
result_string4 = "Average of %0.2f seconds per scene " % sec_per_scene
result_text = [result_string1, result_string2,
result_string3, result_string4]
final_result_text = "\n".join(result_text)
result_screen_text = TextClip(final_result_text, fontsize=72,
font="FreeMono-Bold", color='white',
size=(final_array.w, final_array.h)
).set_duration(7.5).set_pos('center')
video_result = concatenate_videoclips([final_array, result_screen_text])
# Tack on the longest scene at the end
# Find the longest scene
scene_idx = np.argmax(scene_durs)
# Figure out the timing of the longest scene
scene_start = scene_times[scene_idx - 1] / 1000.0
scene_end = (scene_times[scene_idx] - msec_per_frame) / 1000.0
scene_duration = scene_end - scene_start
# Make the text for the top of the screen
scene_text = (TextClip("Longest Scene", fontsize=144, font="FreeMono-Bold",
stroke_color='black', stroke_width=3, color='white').
set_duration(scene_duration).set_opacity(0.6))
scene_text = scene_text.set_pos("center").set_pos('top')
# Make the text for the bottom of the screen
dur_text = (TextClip("%0.3f seconds long" % scene_duration, fontsize=144,
font="FreeMono-Bold", stroke_color='black',
color='white', stroke_width=3).
set_duration(scene_duration).set_opacity(0.6))
dur_text = dur_text.set_pos('center').set_pos('bottom')
# Load the longest scene from the previously annotated video
video_file = '/home/walter/Videos/Music Videos/BTS_2017_DNA.mkv'
longest_scene = (VideoFileClip(video_file).
subclip(scene_start, scene_end))
#Get rid of audio if set
if not include_audio:
longest_scene = longest_scene.set_audio(None)
# Combine the text and the longest scene together
final_scene = CompositeVideoClip([longest_scene, scene_text, dur_text])
# Add the longest scene onto the end of the annotated video
added_scene = concatenate_videoclips([video_result, final_scene])
added_scene.write_videofile('added_scene.mp4', fps=23.976, preset='medium')
|
11526268
|
from openstatesapi.jurisdiction import make_jurisdiction
J = make_jurisdiction('ks')
J.url = 'http://kansas.gov'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.