id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1638285
|
def rc4(key, content):
key_len = len(key)
S = [i for i in range(256)]
T = [key[i % key_len] for i in range(256)]
j = 0
for i in range(256):
j = (j + S[i] + T[i]) % 256
S[i], S[j] = S[j], S[i]
i = j = 0
res = []
for c in content:
i = (i+1) % 256
j = (j+S[i]) % 256
S[i], S[j] = S[j], S[i]
t = (S[i] + S[j]) % 256
res.append(c ^ t)
return bytes(res)
|
1638308
|
import bayesnewton
import numpy as np
from bayesnewton.utils import solve
from jax.config import config
config.update("jax_enable_x64", True)
import pytest
def wiggly_time_series(x_):
noise_var = 0.15 # true observation noise
return (np.cos(0.04*x_+0.33*np.pi) * np.sin(0.2*x_) +
np.math.sqrt(noise_var) * np.random.normal(0, 1, x_.shape))
def build_data(N):
# np.random.seed(12345)
x = np.random.permutation(np.linspace(-25.0, 150.0, num=N) + 0.5*np.random.randn(N)) # unevenly spaced
x = np.sort(x) # since MarkovGP sorts the inputs, they must also be sorted for GP
y = wiggly_time_series(x)
# x_test = np.linspace(np.min(x)-15.0, np.max(x)+15.0, num=500)
# y_test = wiggly_time_series(x_test)
# x_plot = np.linspace(np.min(x)-20.0, np.max(x)+20.0, 200)
x = x[:, None]
# y = y[:, None]
# x_plot = x_plot[:, None]
return x, y
def initialise_gp_model(var_f, len_f, var_y, x, y):
kernel = bayesnewton.kernels.Matern52(variance=var_f, lengthscale=len_f)
likelihood = bayesnewton.likelihoods.Gaussian(variance=var_y)
model = bayesnewton.models.VariationalGP(kernel=kernel, likelihood=likelihood, X=x, Y=y)
return model
@pytest.mark.parametrize('var_f', [0.5, 1.5])
@pytest.mark.parametrize('len_f', [0.75, 2.5])
@pytest.mark.parametrize('var_y', [0.1, 0.5])
@pytest.mark.parametrize('N', [30, 60])
def test_marg_lik(var_f, len_f, var_y, N):
"""
test whether VI with newt's GP and Gaussian likelihood gives the exact marginal likelihood
"""
x, y = build_data(N)
gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)
gp_model.inference(lr=1.) # update variational params
loss_gp = gp_model.energy()
print(loss_gp)
K_X = gp_model.kernel(x, x)
K_Y = K_X + var_y * np.eye(K_X.shape[0])
L_Y = np.linalg.cholesky(K_Y)
exact_marg_lik = (
-0.5 * y.T @ solve(K_Y, y)
- np.sum(np.log(np.diag(L_Y)))
- 0.5 * y.shape[0] * np.log(2 * np.pi)
)
print(exact_marg_lik)
np.testing.assert_almost_equal(loss_gp, -exact_marg_lik, decimal=4)
|
1638314
|
import nose
from nose.tools import assert_equal
from nose.tools import with_setup
import torch
import numpy as np
from torch.autograd import Variable
from vis.activations import GuidedBackProRelu
def set_up():
print("Test start")
def tear_down():
print("Test done")
@with_setup(set_up, tear_down)
def test_forward():
x = Variable(torch.randn(2,3))
grelu = GuidedBackProRelu()
out = grelu(x)
x_mask = torch.clamp(x, min=0)
res = torch.sum(x_mask - out)
res = res.data.cpu().numpy()
assert_equal(res, 0)
@with_setup(set_up, tear_down)
def test_backward():
x = Variable(torch.randn(2,3), requires_grad=True)
grelu = GuidedBackProRelu()
out = grelu(x)
out = torch.sum(out)
out.backward()
grad = x.grad
x_mask = torch.gt(x, 0).float()
result = torch.sum(x_mask - grad.float())
result = result.data.numpy()
assert_equal(result, 0)
if __name__=='__main__':
nose.run()
|
1638337
|
import pytest
import requests
import json
from settings import NGINX_API_VERSION
from suite.nginx_api_utils import wait_for_empty_array, wait_for_non_empty_array, get_nginx_generation_value
from suite.resources_utils import scale_deployment
@pytest.mark.vsr
@pytest.mark.skip_for_nginx_oss
@pytest.mark.parametrize('crd_ingress_controller, v_s_route_setup',
[({"type": "complete", "extra_args": ["-enable-custom-resources",
"-nginx-status-allow-cidrs=0.0.0.0/0"]},
{"example": "virtual-server-route-dynamic-configuration"})],
indirect=True)
class TestVSRNginxPlusApi:
def test_dynamic_configuration(self, kube_apis,
ingress_controller_endpoint, crd_ingress_controller,
v_s_route_setup, v_s_route_app_setup):
req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.api_port}"
vsr_s_upstream = f"vs_{v_s_route_setup.namespace}_{v_s_route_setup.vs_name}_" \
f"vsr_{v_s_route_setup.route_s.namespace}_{v_s_route_setup.route_s.name}_backend2"
vsr_m_upstream = f"vs_{v_s_route_setup.namespace}_{v_s_route_setup.vs_name}_" \
f"vsr_{v_s_route_setup.route_m.namespace}_{v_s_route_setup.route_m.name}_backend1"
initial_reloads_count = get_nginx_generation_value(req_url)
upstream_servers_s_url = f"{req_url}/api/{NGINX_API_VERSION}/http/upstreams/{vsr_s_upstream}/servers"
upstream_servers_m_url = f"{req_url}/api/{NGINX_API_VERSION}/http/upstreams/{vsr_m_upstream}/servers"
print("Scale BE deployment")
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "backend2", v_s_route_setup.route_s.namespace, 0)
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "backend1", v_s_route_setup.route_m.namespace, 0)
wait_for_empty_array(upstream_servers_s_url)
wait_for_empty_array(upstream_servers_m_url)
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "backend2", v_s_route_setup.route_s.namespace, 1)
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "backend1", v_s_route_setup.route_m.namespace, 1)
wait_for_non_empty_array(upstream_servers_s_url)
wait_for_non_empty_array(upstream_servers_m_url)
print("Run checks")
resp_s = json.loads(requests.get(upstream_servers_s_url).text)
resp_m = json.loads(requests.get(upstream_servers_m_url).text)
new_reloads_count = get_nginx_generation_value(req_url)
assert new_reloads_count == initial_reloads_count, "Expected: no new reloads"
for resp in [resp_s, resp_m]:
assert resp[0]['max_conns'] == 32
assert resp[0]['max_fails'] == 25
assert resp[0]['fail_timeout'] == '15s'
assert resp[0]['slow_start'] == '10s'
def test_status_zone_support(self, kube_apis,
ingress_controller_endpoint, crd_ingress_controller,
v_s_route_setup, v_s_route_app_setup):
req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.api_port}"
status_zone_url = f"{req_url}/api/{NGINX_API_VERSION}/http/server_zones"
resp = json.loads(requests.get(status_zone_url).text)
assert resp[f"{v_s_route_setup.vs_host}"]
|
1638374
|
import chemutils.mainutils.mainutils as mainutils
from docopt import docopt, DocoptExit
import pprint
import re
import sys
doc = """chemutils
Usage:
chemutils atomspos <xyzFileOrDir>
[--out-dir=<outdir>]
[--out-filename=<outfname>]
[--file-extension=<filext>]
[--no-file-output]
chemutils convert <moleculeFileOrDir> <convertFrom> <convertTo>
[--conv-options=<convopts>]
[--out-dir=<outdir>]
[--out-filename=<outfname>]
[--file-extension=<filext>]
[--no-file-output]
chemutils xyz2xyz <xyzTargetFile> <xyzRefFile>
[--out-dir=<outdir>]
[--out-filename=<outfname>]
[--file-extension=<filext>]
[--no-file-output]
chemutils xyz2ginp <xyzFileOrDir>
[--charge=<charge>]
[--spin-mult=<spinmult>]
[--job-route=<jobroute>]
[--memory=<mem>]
[--num-cpus=<ncpus>]
[--out-dir=<outdir>]
[--out-filename=<outfname>]
[--file-extension=<filext>]
[--no-file-output]
chemutils genconfs <moleculeFileOrDir>
(--input-format=<inpformat>)
[--ret-num-confs=<retnumconfs>]
[--gen-num-confs=<gennumconfs>]
[--max-iters=<maxiters>]
[--mff-variant=<mffvariant>]
[--out-dir=<outdir>]
[--out-filename=<outfname>]
[--file-extension=<filext>]
[--no-file-output]
Options:
--conv-options=<convopts> OpenBabel conversion options. Defaults to none.
--job-route=<jobroute> Gaussian job route [default: #n B3LYP/6-311+G(d,p) Opt Freq]
--charge=<charge> Molecule's charge in atomic units [default: 0]
--spin-mult=<spinmult> Molecule's spin multiplicity [default: 1]
--memory=<mem> Memory to be used for the gaussian job, in GB [default: 32]
--num-cpus=<ncpus> Number of cpus to be used for the gaussian job [default: 16]
--out-dir=<outdir> Output dir for the generated files. Defaults to:
* input file directory for file input
* input directory for the directory input
--out-filename=<outfname> Base name of the produced output files.
If multiple output files are generated, basename
will be appended by an integer number.
--file-extension=<filext> Input file extension. The following defaults
are used:
* xyz for xyzFileOrDir / xyzTargetFile /
xyzRefFile inputs
* --input-format or convertFrom for
moleculeFileOrDir input
--no-file-output Suppresses the output file(s) generation.
--input-format=<inpformat> Format of the input file.
--ret-num-confs=<retnumconfs> Number of the lowest energy conformers to return. [default: 1]
--gen-num-confs=<gennumconfs> Number of conformers to generate. [default: 100]
Larger values would increase a chance of finding
the ground state conformer.
--max-iters=<maxiters> Max number of force field iterations [default: 1000]
--mff-variant=<mffvariant> Force field variant [default: MMFF94]
"""
# chemutils xyz2xyzFlexBond <xyzTargetFileOrStr> <xyzRefFileOrStr>
# <refAtomId1>
# <refAtomId2>
def start():
try:
args = docopt(doc)
except DocoptExit:
raise DocoptExit('Error: chemutils called with wrong arguments.')
if args["atomspos"]:
output = mainutils.xyzToAtomsPositionsWrapper(
xyzFileOrDir=args['<xyzFileOrDir>'], \
outDir=args['--out-dir'], \
outFileBaseName=args['--out-filename'], \
fileExt=args['--file-extension'], \
noOutFile=args['--no-file-output'], \
)
elif args["convert"]:
output = mainutils.obConvertWrapper(
inputFileOrDir=args['<moleculeFileOrDir>'],
convertFrom=args['<convertFrom>'], \
convertTo=args['<convertTo>'], \
convOptions=args['--conv-options'],
outDir=args['--out-dir'], \
outFileBaseName=args['--out-filename'], \
fileExt=args['--file-extension'], \
noOutFile=args['--no-file-output'], \
)
elif args["xyz2ginp"]:
output = mainutils.xyzToGaussianInputWrapper(xyzFileOrDir=args['<xyzFileOrDir>'], \
jobRoute= args['--job-route'], \
charge= args['--charge'], \
spinMult = args['--spin-mult'], \
memory= args['--memory'], \
numCpus= args['--num-cpus'], \
outDir= args['--out-dir'], \
outFileBaseName=args['--out-filename'], \
fileExt=args['--file-extension'], \
noOutFile=args['--no-file-output'], \
)
elif args["xyz2xyz"]:
output = mainutils.xyzReorderToxyz(
xyzTargetFile= args['<xyzTargetFile>'], \
xyzRefFile= args['<xyzRefFile>'], \
outDir= args['--out-dir'], \
noOutFile=args['--no-file-output'], \
)
elif args["genconfs"]:
output =mainutils.getConformersXYZWrapper(
moleculeFileOrDir=args['<moleculeFileOrDir>'], \
inputFormat=args['--input-format'], \
retNumConfs=int(args['--ret-num-confs']), \
genNumConfs=int(args['--gen-num-confs']), \
maxIters=int(args['--max-iters']), \
mmffVariant=args['--mff-variant'], \
outDir=args['--out-dir'], \
outFileBaseName=args['--out-filename'], \
fileExt=args['--file-extension'], \
noOutFile=args['--no-file-output'], \
)
pprint.pprint(output)
#else:
# output = xyzReorderToxyzFlexBond(args['<xyzTargetFileOrStr>'], args['<xyzRefFileOrStr>'], \
# args['<refAtomId1>'], args['<refAtomId2>'])
if __name__ == '__main__':
start()
|
1638385
|
from sklearn.neighbors import LocalOutlierFactor
from pyod.models.iforest import IForest
from pyod.models.hbos import HBOS
from pyod.models.loda import LODA
from pyod.models.copod import COPOD
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
import ast
import eval.evaluation_utils as utils
from sklearn import metrics
from config import eva_root
def evaluation_od_train(x, y, data_name, model_name="iforest", chosen_subspace=None):
"""
using anomaly detector to yield anomaly score for each subspace,
generate two files: the subspaces with the highest anomaly score & lof score for each subspace
:param x: data matrix
:param y: class information
:param data_name: the data set name, using for naming the ground truth file
:param model_name: anomaly detector name, default: lof
:param chosen_subspace: use this to only evaluate a subset of the power set of full feature space
:return: df: a ground-truth map using anomaly idx as key and ground truth feature subspace as value.
"""
global chosen_model
dim = x.shape[1]
ano_idx = np.where(y == 1)[0]
n_ano = len(ano_idx)
# get all the possible feature subset or just use given subset list
f_subsets = utils.get_subset_candidate(dim, chosen_subspace)
# score anomalies in each subspace, generate the score matrix
n_subsets = len(f_subsets)
score_matrix = np.zeros([n_ano, n_subsets])
for i in tqdm(range(n_subsets)):
subset = f_subsets[i]
x_subset = x[:, subset]
if model_name == "iforest":
clf = IForest()
clf.fit(x_subset)
od_score = clf.decision_scores_
elif model_name == "copod":
clf = COPOD()
clf.fit(x_subset)
od_score = clf.decision_scores_
elif model_name == "hbos":
clf = HBOS()
clf.fit(x_subset)
od_score = clf.decision_scores_
else:
raise ValueError("unsupported od model")
od_score = utils.min_max_norm(od_score)
score_matrix[:, i] = od_score[ano_idx]
if not os.path.exists(eva_root + "data_od_evaluation/"):
os.makedirs(eva_root + "data_od_evaluation/")
# score matrix to df
anomaly_score_df = pd.DataFrame(data=score_matrix, columns=[str(s) for s in f_subsets])
col_name = anomaly_score_df.columns.tolist()
col_name.insert(0, 'ano_idx')
anomaly_score_df["ano_idx"] = ano_idx
anomaly_score_df = anomaly_score_df.reindex(columns=col_name)
path1 = eva_root + "data_od_evaluation/" + data_name + "_score_" + model_name + ".csv"
anomaly_score_df.to_csv(path1, index=False)
# get the ground truth (one subspace for each anomaly that the anomaly can obtain the highest anomaly score)
g_truth_df = pd.DataFrame(columns=["ano_idx", "exp_subspace"])
exp_subspaces = []
for ii, ano_score in enumerate(score_matrix):
max_score_idx = int(np.argmax(ano_score))
exp_subset = str(f_subsets[max_score_idx])
exp_subspaces.append(exp_subset)
g_truth_df["ano_idx"] = ano_idx
g_truth_df["exp_subspace"] = exp_subspaces
g_truth_df.astype({"exp_subspace": "object"})
path2 = eva_root + "data_od_evaluation/" + data_name + "_gt_" + model_name + ".csv"
g_truth_df.to_csv(path2, index=False)
return anomaly_score_df, g_truth_df
def evaluation_od(exp_subspace_list, x, y, data_name, model_name):
"""
use outlier detection to evaluate the explanation subspace for each anomaly data object,
to evaluate whether this subspace is a high-contrast subspace to highlight this anomaly
i.e., the anomaly detector can or cannot get a higher score in this space
:param exp_subspace_list: explanation feature subspace for each anomaly, corresponding to ano_idx
:param x: data set
:param y: label
:param data_name: name of dataset
:param model_name: the name of anomaly detector to generate ground truth
:return: average precision, jaccard, and anomaly score
"""
path1 = eva_root + "data_od_evaluation/" + data_name + "_gt_" + model_name + ".csv"
if not os.path.exists(path1):
print("annotation file not found, labeling now...")
_, g_truth_df = evaluation_od_train(x, y, data_name, model_name)
else:
g_truth_df = pd.read_csv(path1)
ano_idx = np.where(y == 1)[0]
precision_list = np.zeros(len(ano_idx))
jaccard_list = np.zeros(len(ano_idx))
recall_list = np.zeros(len(ano_idx))
for ii, ano in enumerate(ano_idx):
exp_subspace = list(exp_subspace_list[ii])
gt_subspace_str = g_truth_df.loc[g_truth_df["ano_idx"] == ano]["exp_subspace"].values[0]
gt_subspace = ast.literal_eval(gt_subspace_str)
overlap = list(set(gt_subspace).intersection(set(exp_subspace)))
union = list(set(gt_subspace).union(set(exp_subspace)))
precision_list[ii] = len(overlap) / len(exp_subspace)
jaccard_list[ii] = len(overlap) / len(union)
recall_list[ii] = len(overlap) / len(gt_subspace)
return precision_list.mean(), recall_list.mean(), jaccard_list.mean()
def evaluation_od_auc(feature_weight, x, y, data_name, model_name="iforest"):
"""
use outlier detection to evaluate the explanation subspace for each anomaly data,
whether this subspace is a high-contrast subspace to highlight this anomaly
:param exp_subspace_list: explanation feature subspace for each anomaly, corresponding to ano_idx
:param x: data set
:param y: label
:param data_name: name of dataset
:param model_name: the name of anomaly detector to generate ground truth
:return: average precision, jaccard, and anomaly score
"""
path1 = eva_root + "data_od_evaluation/" + data_name + "_gt_" + model_name + ".csv"
if not os.path.exists(path1):
print("annotation file not found, labeling now...")
_, g_truth_df = evaluation_od_train(x, y, data_name, model_name)
else:
g_truth_df = pd.read_csv(path1)
ano_idx = np.where(y == 1)[0]
dim = x.shape[1]
auroc_list = np.zeros(len(ano_idx))
aupr_list = np.zeros(len(ano_idx))
for ii, ano in enumerate(ano_idx):
score = feature_weight[ii]
# ground_truth metrics
gt_subspace_str = g_truth_df.loc[g_truth_df["ano_idx"] == ano]["exp_subspace"].values[0]
gt_subspace = ast.literal_eval(gt_subspace_str)
gt = np.zeros(dim, dtype=int)
gt[gt_subspace] = 1
if len(gt_subspace) == dim:
auroc_list[ii] = 1
aupr_list[ii] = 1
else:
precision, recall, _ = metrics.precision_recall_curve(gt, score)
aupr_list[ii] = metrics.auc(recall, precision)
auroc_list[ii] = metrics.roc_auc_score(gt, score)
return aupr_list.mean(), auroc_list.mean()
|
1638407
|
import pygame
import perlin
from random import randint
from .block import Block
from .tree import Tree
from ...variables import *
p = perlin.Perlin(randint(0, 99999))
class Terrain:
def __init__(self):
self.map = []
self.tile_rects = []
self.placed_blocks = []
self.loaded_chunks = []
def generate_chunk(self, x, y):
if (x, y) not in [block.chunk for block in self.map]:
tree_blocks = []
chunk_loaded = (x, y) in self.loaded_chunks
for y_pos in range(CHUNK_SIZE):
for x_pos in range(CHUNK_SIZE):
target_x = x * CHUNK_SIZE + x_pos
target_y = y * CHUNK_SIZE + y_pos
height = p.one(target_x)
if target_y == CHUNK_SIZE - 1 - height and randint(0, 6) == 0:
tree = Tree((target_x * TILE_SIZE, target_y * TILE_SIZE))
for tree_block in tree.blocks:
if tree_block.pos not in [i.pos for i in tree_blocks]:
tree_blocks.append(tree_block)
for y_pos in range(CHUNK_SIZE):
for x_pos in range(CHUNK_SIZE):
block_added = False
target_x = x * CHUNK_SIZE + x_pos
target_y = y * CHUNK_SIZE + y_pos
height = p.one(target_x)
if target_y > CHUNK_SIZE + 3 - height:
tile_type = 'stone'
elif target_y > CHUNK_SIZE - height:
tile_type = 'dirt'
elif target_y == CHUNK_SIZE - height:
tile_type = 'grass_block'
elif target_y == CHUNK_SIZE - 1 - height and randint(0, 6) == 0 and not chunk_loaded:
tile_type = 'flower'
elif target_y == CHUNK_SIZE - 1 - height and randint(0, 3) == 0 and not chunk_loaded:
tile_type = 'grass'
else:
tile_type = 'air'
for block in self.placed_blocks:
if block.coords == (target_x, target_y):
self.map.append(block)
block_added = True
if not block_added:
if (target_x * TILE_SIZE, target_y * TILE_SIZE) in [i.pos for i in tree_blocks]:
if not chunk_loaded:
for tree_block in tree_blocks:
if tree_block.pos == (target_x * TILE_SIZE, target_y * TILE_SIZE):
self.map.append(tree_block)
self.placed_blocks.append(tree_block)
else:
self.map.append(Block((target_x * TILE_SIZE, target_y * TILE_SIZE), tile_type))
if tile_type in ['flower', 'grass']:
self.placed_blocks.append(Block((target_x * TILE_SIZE, target_y * TILE_SIZE), tile_type))
if not chunk_loaded:
self.loaded_chunks.append((x, y))
def unload_chunk(self, chunk_pos):
for block in self.map:
if block.chunk == chunk_pos:
self.map.remove(block)
def remove_block(self, block_pos):
for i, block in enumerate(self.map):
if block.pos == block_pos:
self.map[i].type = 'air'
self.placed_blocks.append(self.map[i])
def add_block(self, block_pos, block_type):
for i, block in enumerate(self.map):
if block.pos == block_pos:
if block_type not in ['flower', 'grass']:
if block.type == 'air':
self.map[i].type = block_type
self.placed_blocks.append(self.map[i])
return True
else:
for block2 in self.map:
if block2.pos == (block_pos[0], block_pos[1] + TILE_SIZE):
if block2.type != 'air':
self.map[i].type = block_type
self.placed_blocks.append(self.map[i])
return True
else:
return False
def generate_hitbox(self):
self.tile_rects = []
for block in self.map:
if block.type not in ['air', 'grass', 'flower']:
self.tile_rects.append(block.rect)
def draw(self, display):
for block in self.map:
display.blit(block.img, block.get_scrolled_pos(scroll))
def update(self, player):
self.generate_hitbox()
for y in range(RENDER_DISTANCE):
for x in range(RENDER_DISTANCE):
target_x = x + player.current_chunk[0] - RENDER_DISTANCE//2
target_y = y + player.current_chunk[1] - RENDER_DISTANCE//2
self.generate_chunk(target_x, target_y)
for i, block in enumerate(self.map):
if block.type in ['flower', 'grass']:
for block2 in self.map:
if block2.pos == (block.pos[0], block.pos[1] + TILE_SIZE):
if block2.type == 'air':
try:
self.placed_blocks.remove(self.map[i])
except ValueError:
pass
self.map[i].type = 'air'
|
1638415
|
from .__about__ import __version__
from .check import CalicoCheck
__all__ = ['__version__', 'CalicoCheck']
|
1638417
|
from urllib2 import quote, unquote
import bcrypt
from base import BaseHandler
class LoginHandler(BaseHandler):
def get(self):
error = self.get_argument('error', None)
self.render('login.html', error=error, hide_notebooks=True)
def post(self):
username = self.get_argument('username', '')
password = self.get_argument('password', '')
if username == '' or password == '':
self.redirect('/login?error=' + quote(("Username and password are "
"required!")))
elif username != self.settings.username or \
bcrypt.hashpw(password, self.settings.pwdhash) != self.settings.pwdhash:
self.redirect('/login?error=' + quote("Username/password "
"incorrect!"))
else:
self.set_cookie('session', self.settings.session,
expires_days=365)
self.redirect(self.get_argument('next', '/'))
|
1638435
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import os
import json
import scipy
import scipy.stats
jian_file = 'result2'
grid_file = 'result2'
datasets = ['HGBn-ACM', 'HGBn-DBLP', 'HGBn-IMDB', 'HNE-PubMed', 'HGBn-Freebase', 'HGBn-ACM']
xL = [[[0, 1], [0, 1], [0, 1],[0, 1] ,[0, 1],[0, 1],],
[[0.8, 0.95], [0.7, 0.95], [0.5, 0.65], [0.1, 0.6], [0.2, 0.5], [0.2, 0.5]],]
yL = [[[0, 1], [0, 1], [0, 1],[0, 1], [0, 1],[0, 1],],
[[0.6, 1], [0.55, 1], [0.6, 1],[0.6, 1], [0.6, 1],[0.6, 1]]]
# jian_file = 'result2'
# grid_file = 'result2'
# datasets = ['HGBl-ACM', 'HGBl-DBLP', 'HGBl-IMDB', 'HGBl-PubMed', 'HGBl-amazon', 'HGBl-LastFM']
# xL = [[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1]],
# [[0.8, 1], [0.6, 1], [0.5, 1], [0.7, 1], [0.8, 1],[0.8, 1]]]
# yL = [[[0,1], [0,1], [0,1],[0,1], [0,1],[0,1],],
# [[0.6, 1], [0.6, 1], [0.6, 1],[0.6, 1], [0.6, 1],[0.6, 1],]]
score = 'score'
dim = 'subgraph'
num_data = len(datasets)
# Detectron colors
_COLORS = np.array([
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188
]).astype(np.float32).reshape((-1, 3))
# Random number generator seed
_RNG_SEED = 1
# Fix RNG seeds
random.seed(_RNG_SEED)
np.random.seed(_RNG_SEED)
# Directory where sweep summaries are stored
_DATA_DIR = '.'
def load_sweep(sweep_name):
"""Loads a sweep summary."""
summary_path = os.path.join(_DATA_DIR, '{}.csv'.format(sweep_name))
with open(summary_path, 'r') as f:
sweep_summary = pd.read_csv(f, sep=',')
return sweep_summary
# Load ResNet sweep
results1 = load_sweep('{}'.format(jian_file))
results2 = load_sweep('{}'.format(grid_file))
def draw( i, j, ax, has_y=True, has_x=True):
if i == 0:
results = results1
else:
results = results2
dataset = datasets[j]
homo = results[(results[dim] == 'homo') & (results['dataset'] == dataset)]
homo = set(homo[score].values.tolist())
relation = results[(results[dim] == 'relation') & (results['dataset'] == dataset)]
relation = set(relation[score].values.tolist())
mp = results[(results[dim] == 'metapath') & (results['dataset'] == dataset)]
mp = set(mp[score].values.tolist())
mix = results[(results[dim] == 'mixed') & (results['dataset'] == dataset)]
mix = set(mix[score].values.tolist())
# Text experiment, point estimates
random.seed(_RNG_SEED)
num_trials = 5000
N_mp = len(mp)
N_relation = len(relation)
N_homo = len(homo)
N_mix = len(mix)
random.seed(_RNG_SEED)
err_homo = sorted([j for j in homo])
err_mp = sorted([j for j in mp])
err_relation = sorted([j for j in relation])
err_mix = sorted([j for j in mix])
edf_homo = np.arange(N_homo) / float(N_homo - 1)
edf_relation = np.arange(N_relation) / float(N_relation - 1)
edf_mp = np.arange(N_mp) / float(N_mp - 1)
edf_mix = np.arange(N_mix) / float(N_mix)
ax.plot(
err_homo, edf_homo, color=_COLORS[1], linewidth=2, alpha=0.8,
zorder=1, label='{}=homo'.format(dim)
)
ax.plot(
err_relation, edf_relation, color=_COLORS[0], linewidth=2, alpha=0.8,
zorder=0, label='{}=relation'.format(dim)
)
ax.plot(
err_mp, edf_mp, color=_COLORS[2], linewidth=2, alpha=0.8,
zorder=1, label='{}=metapath'.format(dim)
)
# ax.plot(
# err_mix, edf_mix, color=_COLORS[3], linewidth=2, alpha=0.8,
# zorder=0, label='{}=mixed'.format(dim)
# )
#ax.set_xlim([4.5, 13.5])
ax.set_xlim(xL[i][j])
ax.set_ylim(yL[i][j])
#ax.set_xticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
if not has_x:
ax.set_xlabel('', fontsize=20)
else:
ax.set_xlabel('{}'.format(dataset), fontsize=20)
if not has_y:
ax.set_ylabel('', fontsize=20)
else:
ax.set_ylabel('cumulative prob.', fontsize=20)
ax.grid(alpha=0.4)
#ax.legend(loc='upper left', prop={'size': 14})
r, c = 2, num_data
l_w, l_h = 4, 3
r_w, r_h = 4, 3
fig, axes = plt.subplots(
nrows=r, ncols=c,
figsize=(22, 6),
gridspec_kw = {'width_ratios': [2] * num_data}
)
for i in range(2):
for j in range(len(datasets)):
draw(i, j, axes[i, j], has_x = i==1, has_y= j == 0)
plt.tight_layout()
#plt.subplots_adjust(left=0.1, bottom=0.2, right=0.85, top=0.9, hspace=0.4, wspace=0.5)
plt.subplots_adjust(left=0.05, bottom=0.2, right=0.97, top=0.9, hspace=0.3, wspace=0.25)
lines, labels = fig.axes[-1].get_legend_handles_labels()
fig.legend(lines, labels, loc='center right', title_fontsize= 'large', )
path = 'figs/1112'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig('{}/all_{}_node_1112.png'.format(path, dim), dpi=300)
plt.show()
|
1638437
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_cluster(self, BrokerNodeGroupInfo: Dict, ClusterName: str, KafkaVersion: str, NumberOfBrokerNodes: int, EncryptionInfo: Dict = None, EnhancedMonitoring: str = None) -> Dict:
"""
Creates a new MSK cluster.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/CreateCluster>`_
**Request Syntax**
::
response = client.create_cluster(
BrokerNodeGroupInfo={
'BrokerAZDistribution': 'DEFAULT',
'ClientSubnets': [
'string',
],
'InstanceType': 'string',
'SecurityGroups': [
'string',
],
'StorageInfo': {
'EbsStorageInfo': {
'VolumeSize': 123
}
}
},
ClusterName='string',
EncryptionInfo={
'EncryptionAtRest': {
'DataVolumeKMSKeyId': 'string'
}
},
EnhancedMonitoring='DEFAULT'|'PER_BROKER'|'PER_TOPIC_PER_BROKER',
KafkaVersion='string',
NumberOfBrokerNodes=123
)
**Response Syntax**
::
{
'ClusterArn': 'string',
'ClusterName': 'string',
'State': 'ACTIVE'|'CREATING'|'DELETING'|'FAILED'
}
**Response Structure**
- *(dict) --*
- **ClusterArn** *(string) --*
The Amazon Resource Name (ARN) of the cluster.
- **ClusterName** *(string) --*
The name of the MSK cluster.
- **State** *(string) --*
The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED.
:type BrokerNodeGroupInfo: dict
:param BrokerNodeGroupInfo: **[REQUIRED]**
Information about the broker nodes in the cluster.
- **BrokerAZDistribution** *(string) --*
The distribution of broker nodes across Availability Zones.
- **ClientSubnets** *(list) --* **[REQUIRED]**
The list of subnets to connect to in the client virtual private cloud (VPC). AWS creates elastic network interfaces inside these subnets. Client applications use elastic network interfaces to produce and consume data. Client subnets can\'t be in Availability Zone us-east-1e.
- *(string) --*
- **InstanceType** *(string) --* **[REQUIRED]**
The type of Amazon EC2 instances to use for Kafka brokers. The following instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, kafka.m5.4xlarge, kafka.m5.12xlarge, and kafka.m5.24xlarge.
- **SecurityGroups** *(list) --*
The AWS security groups to associate with the elastic network interfaces in order to specify who can connect to and communicate with the Amazon MSK cluster.
- *(string) --*
- **StorageInfo** *(dict) --*
Contains information about storage volumes attached to MSK broker nodes.
- **EbsStorageInfo** *(dict) --*
EBS volume information.
- **VolumeSize** *(integer) --*
The size in GiB of the EBS volume for the data drive on each broker node.
:type ClusterName: string
:param ClusterName: **[REQUIRED]**
The name of the cluster.
:type EncryptionInfo: dict
:param EncryptionInfo:
Includes all encryption-related information.
- **EncryptionAtRest** *(dict) --*
The data volume encryption details.
- **DataVolumeKMSKeyId** *(string) --* **[REQUIRED]**
The AWS KMS key used for data encryption.
:type EnhancedMonitoring: string
:param EnhancedMonitoring:
Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER.
:type KafkaVersion: string
:param KafkaVersion: **[REQUIRED]**
The version of Apache Kafka.
:type NumberOfBrokerNodes: integer
:param NumberOfBrokerNodes: **[REQUIRED]**
The number of Kafka broker nodes in the Amazon MSK cluster.
:rtype: dict
:returns:
"""
pass
def delete_cluster(self, ClusterArn: str, CurrentVersion: str = None) -> Dict:
"""
Deletes the MSK cluster specified by the Amazon Resource Name (ARN) in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DeleteCluster>`_
**Request Syntax**
::
response = client.delete_cluster(
ClusterArn='string',
CurrentVersion='string'
)
**Response Syntax**
::
{
'ClusterArn': 'string',
'State': 'ACTIVE'|'CREATING'|'DELETING'|'FAILED'
}
**Response Structure**
- *(dict) --*
Successful response.
- **ClusterArn** *(string) --*
The Amazon Resource Name (ARN) of the cluster.
- **State** *(string) --*
The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED.
:type ClusterArn: string
:param ClusterArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
:type CurrentVersion: string
:param CurrentVersion:
The current version of the MSK cluster.
:rtype: dict
:returns:
"""
pass
def describe_cluster(self, ClusterArn: str) -> Dict:
"""
Returns a description of the MSK cluster whose Amazon Resource Name (ARN) is specified in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeCluster>`_
**Request Syntax**
::
response = client.describe_cluster(
ClusterArn='string'
)
**Response Syntax**
::
{
'ClusterInfo': {
'BrokerNodeGroupInfo': {
'BrokerAZDistribution': 'DEFAULT',
'ClientSubnets': [
'string',
],
'InstanceType': 'string',
'SecurityGroups': [
'string',
],
'StorageInfo': {
'EbsStorageInfo': {
'VolumeSize': 123
}
}
},
'ClusterArn': 'string',
'ClusterName': 'string',
'CreationTime': datetime(2015, 1, 1),
'CurrentBrokerSoftwareInfo': {
'ConfigurationArn': 'string',
'ConfigurationRevision': 'string',
'KafkaVersion': 'string'
},
'CurrentVersion': 'string',
'EncryptionInfo': {
'EncryptionAtRest': {
'DataVolumeKMSKeyId': 'string'
}
},
'EnhancedMonitoring': 'DEFAULT'|'PER_BROKER'|'PER_TOPIC_PER_BROKER',
'NumberOfBrokerNodes': 123,
'State': 'ACTIVE'|'CREATING'|'DELETING'|'FAILED',
'ZookeeperConnectString': 'string'
}
}
**Response Structure**
- *(dict) --*
Successful response.
- **ClusterInfo** *(dict) --*
The cluster information.
- **BrokerNodeGroupInfo** *(dict) --*
Information about the broker nodes.
- **BrokerAZDistribution** *(string) --*
The distribution of broker nodes across Availability Zones.
- **ClientSubnets** *(list) --*
The list of subnets to connect to in the client virtual private cloud (VPC). AWS creates elastic network interfaces inside these subnets. Client applications use elastic network interfaces to produce and consume data. Client subnets can't be in Availability Zone us-east-1e.
- *(string) --*
- **InstanceType** *(string) --*
The type of Amazon EC2 instances to use for Kafka brokers. The following instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, kafka.m5.4xlarge, kafka.m5.12xlarge, and kafka.m5.24xlarge.
- **SecurityGroups** *(list) --*
The AWS security groups to associate with the elastic network interfaces in order to specify who can connect to and communicate with the Amazon MSK cluster.
- *(string) --*
- **StorageInfo** *(dict) --*
Contains information about storage volumes attached to MSK broker nodes.
- **EbsStorageInfo** *(dict) --*
EBS volume information.
- **VolumeSize** *(integer) --*
The size in GiB of the EBS volume for the data drive on each broker node.
- **ClusterArn** *(string) --*
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
- **ClusterName** *(string) --*
The name of the cluster.
- **CreationTime** *(datetime) --*
The time when the cluster was created.
- **CurrentBrokerSoftwareInfo** *(dict) --*
Information about the version of software currently deployed on the Kafka brokers in the cluster.
- **ConfigurationArn** *(string) --*
The Amazon Resource Name (ARN) of the configuration used for the cluster.
- **ConfigurationRevision** *(string) --*
The revision of the configuration to use.
- **KafkaVersion** *(string) --*
The version of Apache Kafka.
- **CurrentVersion** *(string) --*
The current version of the MSK cluster.
- **EncryptionInfo** *(dict) --*
Includes all encryption-related information.
- **EncryptionAtRest** *(dict) --*
The data volume encryption details.
- **DataVolumeKMSKeyId** *(string) --*
The AWS KMS key used for data encryption.
- **EnhancedMonitoring** *(string) --*
Specifies which metrics are gathered for the MSK cluster. This property has three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER.
- **NumberOfBrokerNodes** *(integer) --*
The number of Kafka broker nodes in the cluster.
- **State** *(string) --*
The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED.
- **ZookeeperConnectString** *(string) --*
The connection string to use to connect to the Apache ZooKeeper cluster.
:type ClusterArn: string
:param ClusterArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_bootstrap_brokers(self, ClusterArn: str) -> Dict:
"""
A list of brokers that a client application can use to bootstrap.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetBootstrapBrokers>`_
**Request Syntax**
::
response = client.get_bootstrap_brokers(
ClusterArn='string'
)
**Response Syntax**
::
{
'BootstrapBrokerString': 'string'
}
**Response Structure**
- *(dict) --*
Successful response.
- **BootstrapBrokerString** *(string) --*
A string containing one or more hostname:port pairs.
:type ClusterArn: string
:param ClusterArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_clusters(self, ClusterNameFilter: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Returns a list of clusters in an account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusters>`_
**Request Syntax**
::
response = client.list_clusters(
ClusterNameFilter='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'ClusterInfoList': [
{
'BrokerNodeGroupInfo': {
'BrokerAZDistribution': 'DEFAULT',
'ClientSubnets': [
'string',
],
'InstanceType': 'string',
'SecurityGroups': [
'string',
],
'StorageInfo': {
'EbsStorageInfo': {
'VolumeSize': 123
}
}
},
'ClusterArn': 'string',
'ClusterName': 'string',
'CreationTime': datetime(2015, 1, 1),
'CurrentBrokerSoftwareInfo': {
'ConfigurationArn': 'string',
'ConfigurationRevision': 'string',
'KafkaVersion': 'string'
},
'CurrentVersion': 'string',
'EncryptionInfo': {
'EncryptionAtRest': {
'DataVolumeKMSKeyId': 'string'
}
},
'EnhancedMonitoring': 'DEFAULT'|'PER_BROKER'|'PER_TOPIC_PER_BROKER',
'NumberOfBrokerNodes': 123,
'State': 'ACTIVE'|'CREATING'|'DELETING'|'FAILED',
'ZookeeperConnectString': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Successful response.
- **ClusterInfoList** *(list) --*
Information on each of the MSK clusters in the response.
- *(dict) --*
Returns information about a cluster.
- **BrokerNodeGroupInfo** *(dict) --*
Information about the broker nodes.
- **BrokerAZDistribution** *(string) --*
The distribution of broker nodes across Availability Zones.
- **ClientSubnets** *(list) --*
The list of subnets to connect to in the client virtual private cloud (VPC). AWS creates elastic network interfaces inside these subnets. Client applications use elastic network interfaces to produce and consume data. Client subnets can't be in Availability Zone us-east-1e.
- *(string) --*
- **InstanceType** *(string) --*
The type of Amazon EC2 instances to use for Kafka brokers. The following instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, kafka.m5.4xlarge, kafka.m5.12xlarge, and kafka.m5.24xlarge.
- **SecurityGroups** *(list) --*
The AWS security groups to associate with the elastic network interfaces in order to specify who can connect to and communicate with the Amazon MSK cluster.
- *(string) --*
- **StorageInfo** *(dict) --*
Contains information about storage volumes attached to MSK broker nodes.
- **EbsStorageInfo** *(dict) --*
EBS volume information.
- **VolumeSize** *(integer) --*
The size in GiB of the EBS volume for the data drive on each broker node.
- **ClusterArn** *(string) --*
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
- **ClusterName** *(string) --*
The name of the cluster.
- **CreationTime** *(datetime) --*
The time when the cluster was created.
- **CurrentBrokerSoftwareInfo** *(dict) --*
Information about the version of software currently deployed on the Kafka brokers in the cluster.
- **ConfigurationArn** *(string) --*
The Amazon Resource Name (ARN) of the configuration used for the cluster.
- **ConfigurationRevision** *(string) --*
The revision of the configuration to use.
- **KafkaVersion** *(string) --*
The version of Apache Kafka.
- **CurrentVersion** *(string) --*
The current version of the MSK cluster.
- **EncryptionInfo** *(dict) --*
Includes all encryption-related information.
- **EncryptionAtRest** *(dict) --*
The data volume encryption details.
- **DataVolumeKMSKeyId** *(string) --*
The AWS KMS key used for data encryption.
- **EnhancedMonitoring** *(string) --*
Specifies which metrics are gathered for the MSK cluster. This property has three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER.
- **NumberOfBrokerNodes** *(integer) --*
The number of Kafka broker nodes in the cluster.
- **State** *(string) --*
The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED.
- **ZookeeperConnectString** *(string) --*
The connection string to use to connect to the Apache ZooKeeper cluster.
- **NextToken** *(string) --*
The paginated results marker. When the result of a ListClusters operation is truncated, the call returns NextToken in the response. To get another batch of clusters, provide this token in your next request.
:type ClusterNameFilter: string
:param ClusterNameFilter:
Specify a prefix of the name of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.
:type MaxResults: integer
:param MaxResults:
The maximum number of clusters to return in the response. If there are more clusters, the response includes a NextToken parameter.
:type NextToken: string
:param NextToken:
The paginated results marker. When the result of a ListClusters operation is truncated, the call returns NextToken in the response. To get another batch of clusters, provide this token in your next request.
:rtype: dict
:returns:
"""
pass
def list_nodes(self, ClusterArn: str, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Returns a list of the broker nodes in the cluster.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListNodes>`_
**Request Syntax**
::
response = client.list_nodes(
ClusterArn='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'NodeInfoList': [
{
'AddedToClusterTime': 'string',
'BrokerNodeInfo': {
'AttachedENIId': 'string',
'BrokerId': 123.0,
'ClientSubnet': 'string',
'ClientVpcIpAddress': 'string',
'CurrentBrokerSoftwareInfo': {
'ConfigurationArn': 'string',
'ConfigurationRevision': 'string',
'KafkaVersion': 'string'
}
},
'InstanceType': 'string',
'NodeARN': 'string',
'NodeType': 'BROKER',
'ZookeeperNodeInfo': {
'AttachedENIId': 'string',
'ClientVpcIpAddress': 'string',
'ZookeeperId': 123.0,
'ZookeeperVersion': 'string'
}
},
]
}
**Response Structure**
- *(dict) --*
Successful response.
- **NextToken** *(string) --*
The paginated results marker. When the result of a ListNodes operation is truncated, the call returns NextToken in the response. To get another batch of nodes, provide this token in your next request.
- **NodeInfoList** *(list) --*
List containing a NodeInfo object.
- *(dict) --*
The node information object.
- **AddedToClusterTime** *(string) --*
The start time.
- **BrokerNodeInfo** *(dict) --*
The broker node info.
- **AttachedENIId** *(string) --*
The attached elastic network interface of the broker.
- **BrokerId** *(float) --*
The ID of the broker.
- **ClientSubnet** *(string) --*
The client subnet to which this broker node belongs.
- **ClientVpcIpAddress** *(string) --*
The virtual private cloud (VPC) of the client.
- **CurrentBrokerSoftwareInfo** *(dict) --*
Information about the version of software currently deployed on the Kafka brokers in the cluster.
- **ConfigurationArn** *(string) --*
The Amazon Resource Name (ARN) of the configuration used for the cluster.
- **ConfigurationRevision** *(string) --*
The revision of the configuration to use.
- **KafkaVersion** *(string) --*
The version of Apache Kafka.
- **InstanceType** *(string) --*
The instance type.
- **NodeARN** *(string) --*
The Amazon Resource Name (ARN) of the node.
- **NodeType** *(string) --*
The node type.
- **ZookeeperNodeInfo** *(dict) --*
The ZookeeperNodeInfo.
- **AttachedENIId** *(string) --*
The attached elastic network interface of the broker.
- **ClientVpcIpAddress** *(string) --*
The virtual private cloud (VPC) IP address of the client.
- **ZookeeperId** *(float) --*
The role-specific ID for Zookeeper.
- **ZookeeperVersion** *(string) --*
The version of Zookeeper.
:type ClusterArn: string
:param ClusterArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
:type MaxResults: integer
:param MaxResults:
The maximum number of clusters to return in the response. If there are more clusters, the response includes a NextToken parameter.
:type NextToken: string
:param NextToken:
The paginated results marker. When the result of a ListClusters operation is truncated, the call returns NextToken in the response. To get another batch of clusters, provide this token in your next request.
:rtype: dict
:returns:
"""
pass
def list_tags_for_resource(self, ResourceArn: str) -> Dict:
"""
Returns a list of tags attached to a resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource>`_
**Request Syntax**
::
response = client.list_tags_for_resource(
ResourceArn='string'
)
**Response Syntax**
::
{
'Tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
Successful response.
- **Tags** *(dict) --*
The key-value pairs for the resource tags
- *(string) --*
- *(string) --*
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that uniquely identifies the resource.
:rtype: dict
:returns:
"""
pass
def tag_resource(self, ResourceArn: str, Tags: Dict):
"""
Tag a resource with given tags.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource>`_
**Request Syntax**
::
response = client.tag_resource(
ResourceArn='string',
Tags={
'string': 'string'
}
)
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that uniquely identifies the resource.
:type Tags: dict
:param Tags: **[REQUIRED]**
The key-value pairs for the resource tags
- *(string) --*
- *(string) --*
:returns: None
"""
pass
def untag_resource(self, ResourceArn: str, TagKeys: List):
"""
Remove tags of a resource by given tag keys.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource>`_
**Request Syntax**
::
response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that uniquely identifies the resource.
:type TagKeys: list
:param TagKeys: **[REQUIRED]**
The list of tag keys.
- *(string) --*
:returns: None
"""
pass
|
1638447
|
from pathlib import Path
import yaml
DEF_DT_WIDTH = 25
class AnasymodProjectConfig:
def __init__(self, fpga_sim_ctrl='UART_ZYNQ', custom_zynq_firmware=True):
# validate input
assert fpga_sim_ctrl in {'UART_ZYNQ', 'VIVADO_VIO'}, 'Invalid setting.'
# initialize the config dictionary
self.config = {
'PROJECT': {
'dt': 50e-9,
'board_name': 'ZC702',
'plugins': ['msdsl'],
'emu_clk_freq': 5e6,
'cpu_debug_mode': 0,
'cpu_debug_hierarchies': [],
'flatten_hierarchy': 'rebuilt',
'vivado_stack': 2000,
'dt_width': DEF_DT_WIDTH
},
'FPGA_TARGET': {
'fpga': {
'fpga_sim_ctrl': fpga_sim_ctrl,
'custom_zynq_firmware': custom_zynq_firmware
}
}
}
def set_cpu_debug_mode(self, value):
self.config['PROJECT']['cpu_debug_mode'] = value
def set_vivado_stack(self, value):
self.config['PROJECT']['vivado_stack'] = value
def set_flatten_hierarchy(self, value):
assert value in {'none', 'full', 'rebuilt'}, 'Invalid setting.'
self.config['PROJECT']['flatten_hierarchy'] = value
def set_dt_width(self, value):
self.config['PROJECT']['dt_width'] = value
def add_debug_probe(self, depth, path):
self.config['PROJECT']['cpu_debug_hierarchies'].append([depth, path])
def set_board_name(self, value):
self.config['PROJECT']['board_name'] = value
def set_dt(self, value):
self.config['PROJECT']['dt'] = value
def add_plugin(self, arg):
self.config['PROJECT']['plugins'].append(arg)
def set_emu_clk_freq(self, value):
self.config['PROJECT']['emu_clk_freq'] = value
def set_custom_zynq_firmware(self, value):
self.config['FPGA_TARGET']['fpga']['custom_zynq_firmware'] = value
def write_to_file(self, fname):
with open(fname, 'w') as f:
yaml.dump(self.config, f, sort_keys=False)
class AnasymodSourceConfig:
def __init__(self):
self.sources = {}
def add_generic_sources(self, kind, file_list, fileset=None):
if kind not in self.sources:
self.sources[kind] = {}
for file_ in file_list:
# determine a unique name for this file
key = Path(file_).stem
suffix = Path(file_).suffix
if (suffix is not None) and (len(suffix) > 1) and (suffix[0] == '.'):
key = f'{key}_{suffix[1:]}'
if fileset is not None:
key = f'{key}_{fileset}'
# create entry for this file
if key in self.sources[kind]:
raise Exception(f'Source "{key}" already defined.')
else:
self.sources[kind][key] = {}
self.sources[kind][key]['files'] = str(file_)
if fileset is not None:
self.sources[kind][key]['fileset'] = fileset
def add_edif_files(self, file_list, fileset=None):
self.add_generic_sources('edif_files', file_list, fileset)
def add_verilog_sources(self, file_list, fileset=None):
self.add_generic_sources('verilog_sources', file_list, fileset)
def add_verilog_headers(self, header_list, fileset=None):
self.add_generic_sources('verilog_headers', header_list, fileset)
def add_firmware_files(self, file_list, fileset=None):
self.add_generic_sources('firmware_files', file_list, fileset)
def add_defines(self, defines, fileset=None):
if 'defines' not in self.sources:
self.sources['defines'] = {}
for mname, mval in defines.items():
# determine a unique name for this definition
key = mname
if fileset is not None:
key = f'{key}_{fileset}'
# create entry for this definition
if key in self.sources['defines']:
raise Exception(f'Definition "{key}" already specified.')
else:
self.sources['defines'][key] = {}
self.sources['defines'][key]['name'] = mname
self.sources['defines'][key]['value'] = mval
if fileset is not None:
self.sources['defines'][key]['fileset'] = fileset
def write_to_file(self, fname):
with open(fname, 'w') as f:
yaml.dump(self.sources, f, sort_keys=False)
|
1638460
|
from rest_framework import serializers
from qapi.utils import generate_fields
from apps.utils.file_uri import file_uri
from .models import DeliveryPrinter, DeliveryAccount, Sender, DeliveryRecords
class DeliveryAccountSerializer(serializers.ModelSerializer):
remark_content = serializers.CharField(
label='备注内容',
write_only=True,
required=False,
default='',
)
class Meta:
model = DeliveryAccount
fields = generate_fields(model, add=['remark_content'])
class DeliveryOrderSerializer(serializers.Serializer):
order_id = serializers.CharField(label='订单ID')
openid = serializers.CharField(label='用户openid', required=False, default=None)
delivery_id = serializers.CharField(label='快递公司ID')
waybill_id = serializers.CharField(label='运单ID')
class QuotaSerializer(serializers.Serializer):
delivery_id = serializers.CharField(label='快递公司ID')
biz_id = serializers.CharField(label='快递公司客户编码')
class DeliveryPrinterSerializer(serializers.ModelSerializer):
nickname = serializers.SerializerMethodField()
tag_list = serializers.SerializerMethodField()
tagid_list = serializers.ListField(
label='打印员面单打印权限',
write_only=True,
required=False,
default=[],
)
def get_tag_list(self, instance):
return instance.tags.split(',')
def get_nickname(self, instance):
return instance.user.nickname
class Meta:
model = DeliveryPrinter
fields = generate_fields(
model, add=['tagid_list', 'tag_list', 'nickname'], remove=['tags'])
def validate(self, attrs):
shop = attrs.get('shop')
tagid_list = attrs.pop('tagid_list', [])
if shop and shop.id not in tagid_list:
tagid_list.append(shop.id)
attrs['tags'] = ','.join(list(map(str, tagid_list)))
return attrs
class SenderSerializer(serializers.HyperlinkedModelSerializer):
shop_name = serializers.SerializerMethodField()
def get_shop_name(self, instance):
return instance.shop.name
class Meta:
model = Sender
fields = generate_fields(model, add=['shop_name'])
class AddOrderSerializer(serializers.ModelSerializer):
sender = serializers.PrimaryKeyRelatedField(label='发货人信息', queryset=Sender.objects.all(), required=True, allow_null=False)
service_name = serializers.CharField(label='服务类型', max_length=20, required=True, allow_null=False)
service_type = serializers.IntegerField(label='服务类型id')
biz_id = serializers.CharField(label='客户(现付)编码', max_length=20, required=True, allow_null=False)
count = serializers.IntegerField(label='数量')
space_x = serializers.IntegerField(label='包裹长度')
space_y = serializers.IntegerField(label='包裹宽度')
space_z = serializers.IntegerField(label='包裹高度')
weight = serializers.FloatField(label='总重量')
custom_remark = serializers.CharField(label='备注', style={'base_template': 'textarea.html'}, required=False, allow_null=True)
expect_time = serializers.IntegerField(label='预约收件时间', required=False, allow_null=True)
receiver = serializers.JSONField(label='收件人信息')
delivery_name = serializers.CharField(label='快递公司名称', max_length=50, required=True, allow_null=False)
class Meta:
model = DeliveryRecords
fields = ('items', 'order', 'delivery_id', 'sender', 'service_name', 'service_type', 'count', 'space_x',
'space_y', 'space_z', 'weight', 'custom_remark', 'expect_time', 'biz_id', 'receiver', 'delivery_name')
def validate_receiver(self, data):
if not data.get('address', None) or not data.get('area', None) or not data.get('city', None) or not data.get('province', None):
raise serializers.ValidationError('请完整输入收件人详细地址')
if not data.get('mobile', None):
raise serializers.ValidationError('请输入收件人手机号')
if not data.get('name', None):
raise serializers.ValidationError('请输入收件人姓名')
return data
def validate(self, attrs):
items = attrs['items']
for i in items:
if i.order != attrs['order']:
raise serializers.ValidationError('发货商品不属于同一订单')
if attrs["delivery_id"] == 'SF' and not attrs.get('expect_time', None):
raise serializers.ValidationError('顺丰快递预约取件时间必填')
return attrs
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
def cargo(self, validated_data):
detail_list = [{"count": i.goods_backup.num,
"name": f"{i.goods_backup.goods_name}-{i.goods_backup.gtype_name}*{i.goods_backup.num}"} for i
in validated_data['items']]
return {"count": validated_data["count"],
"weight": validated_data["weight"],
"space_x": validated_data["space_x"],
"space_y": validated_data["space_y"],
"space_z": validated_data["space_z"],
"detail_list": detail_list}
def receiver_info(self, validated_data):
receiver = validated_data['receiver']
receiver.update({"country": "中国"})
return receiver
def sender_info(self, sender):
sender_ = {"name": sender.name,
"company": sender.company,
"post_code": sender.post_code,
"country": sender.country,
"province": sender.province,
"city": sender.city,
"area": sender.area,
"address": sender.address,
}
if sender.tel:
sender_.update({"mobile": sender.tel})
if sender.mobile:
sender_.update({"mobile": sender.mobile})
return sender_
def goods_info(self, items):
goods_info = {
"goods_count": sum([i.goods_backup.num for i in items]),
"goods_name": items[0].goods_backup.goods_name,
"img_url": file_uri(self.context.get('request'), items[0].goods_backup.g_image),
"wxa_path": f"pages/util/index?oid={items[0].order.id}"
}
return goods_info
def save(self, **kwargs):
validated_data = self.validated_data
items = validated_data['items']
order = validated_data['order']
sender = validated_data['sender']
validated_data["wx_order_id"] = f"{order.order_sn}{items[0].id}" # 物流单需要一个唯一的订单号,采用订单加子订单第一个的id组成
data = {
"add_source": 0,
"biz_id": validated_data["biz_id"],
"cargo": self.cargo(validated_data),
"delivery_id": validated_data["delivery_id"],
"insured": {"insured_value": 0, "use_insured": 0},
"openid": order.user.wx_app_openid,
"order_id": validated_data["wx_order_id"],
"receiver": self.receiver_info(validated_data),
"sender": self.sender_info(sender),
"service": {"service_name": validated_data["service_name"], "service_type": validated_data["service_type"]},
"shop": self.goods_info(items),
"custom_remark": validated_data.get("custom_remark", ''),
"tagid": order.shop.id,
}
if validated_data["delivery_id"] == 'SF':
data['expect_time'] = validated_data['expect_time']
return data
|
1638465
|
import codecs
import os
from setuptools import find_packages
from setuptools import setup
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""Return the contents of the read file.
- Build an absolute path from *parts*
- Return the contents of the resulting file.
- Assume UTF-8 encoding.
Proudly copy-pasted from Hynek's attrs project
(minus the typo).
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
LONG = read("README.rst") + "\n\n" + read("CHANGES.rst")
setup(
name="Flask-Reuploaded",
version="1.1.1.dev0",
url="https://github.com/jugmac00/flask-reuploaded",
project_urls={
"Source": "https://github.com/jugmac00/flask-reuploaded",
"Issue Tracker": "https://github.com/jugmac00/flask-reuploaded/issues",
"Documentation": "https://flask-reuploaded.readthedocs.io/en/latest/",
},
license="MIT",
author='Matthew "LeafStorm" Frazier',
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
description="Flexible and efficient upload handling for Flask",
long_description=LONG,
long_description_content_type="text/x-rst",
packages=find_packages(where="src"),
package_dir={"": "src"},
zip_safe=False,
platforms="any",
include_package_data=True,
install_requires=["Flask>=1.0.4"],
extras_require={
"test": [
"pytest",
"pytest-cov",
],
},
python_requires=">= 3.6",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Flask",
],
)
|
1638487
|
from rest_framework import serializers
from .models import WorkingHour
class WorkingHourSerializer(serializers.ModelSerializer):
class Meta:
model = WorkingHour
fields = ('id', 'hour')
|
1638541
|
import argparse
import os
import shutil
import tfaip.util.logging as logging
from tfaip.data.pipeline.definitions import PipelineMode
from tqdm import tqdm
from calamari_ocr.ocr import CrossFold
from calamari_ocr.ocr.dataset.datareader.file import FileDataParams
from calamari_ocr.utils import split_all_ext, glob_all
logger = logging.logger(__name__)
def main():
parser = argparse.ArgumentParser(description="Write split of folds to separate directories")
parser.add_argument(
"--files",
nargs="+",
help="List all image files that shall be processed. Ground truth fils with the same "
"base name but with '.gt.txt' as extension are required at the same location",
)
parser.add_argument(
"--n_folds",
type=int,
required=True,
help="The number of fold, that is the number of models to train",
)
parser.add_argument("--output_dir", type=str, required=True, help="Where to write the folds")
parser.add_argument(
"--keep_original_filename",
action="store_true",
help="By default the copied new files get a new 8 digit name. Use this flag to keep the "
"original name but be aware, that this might override lines with the same name",
)
args = parser.parse_args()
logger.info("Creating folds")
images = glob_all(args.files)
texts = [split_all_ext(p)[0] + ".gt.txt" for p in images]
data_reader = FileDataParams(images=images, texts=texts, skip_invalid=True)
data_reader.prepare_for_mode(PipelineMode.TRAINING)
cross_fold = CrossFold(
n_folds=args.n_folds,
data_generator_params=data_reader,
output_dir=args.output_dir,
)
logger.info("Copying files")
for fold_id, fold_files in enumerate(cross_fold.folds):
fold_out_dir = os.path.join(args.output_dir, str(fold_id))
if not os.path.exists(fold_out_dir):
os.makedirs(fold_out_dir)
for file_id, file in tqdm(enumerate(fold_files), total=len(fold_files), desc=f"Fold {fold_id}"):
img_file = file
base, ext = split_all_ext(file)
txt_file = base + ".gt.txt"
output_basename = os.path.basename(base) if args.keep_original_filename else f"{fold_id:08d}"
if os.path.exists(img_file) and os.path.exists(txt_file):
output_file = os.path.join(fold_out_dir, f"{output_basename}{ext}")
shutil.copyfile(img_file, output_file)
output_file = os.path.join(fold_out_dir, f"{output_basename}.gt.txt")
shutil.copyfile(txt_file, output_file)
else:
logger.info(f"Warning: Does not exist {img_file} or {txt_file}")
if __name__ == "__main__":
main()
|
1638545
|
import os
import textwrap
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from orchestra.forms import UserCreationForm, UserChangeForm
from . import settings
from .models import SystemUser
from .validators import validate_home, validate_paths_exist
class SystemUserFormMixin(object):
MOCK_USERNAME = '<username>'
def __init__(self, *args, **kwargs):
super(SystemUserFormMixin, self).__init__(*args, **kwargs)
duplicate = lambda n: (n, n)
if self.instance.pk:
username = self.instance.username
choices=(
duplicate(self.account.main_systemuser.get_base_home()),
duplicate(self.instance.get_base_home()),
)
else:
username = self.MOCK_USERNAME
choices=(
duplicate(self.account.main_systemuser.get_base_home()),
duplicate(SystemUser(username=username).get_base_home()),
)
self.fields['home'].widget = forms.Select(choices=choices)
if self.instance.pk and (self.instance.is_main or self.instance.has_shell):
# hidde home option for shell users
self.fields['home'].widget.input_type = 'hidden'
self.fields['directory'].widget.input_type = 'hidden'
elif self.instance.pk and (self.instance.get_base_home() == self.instance.home):
self.fields['directory'].widget = forms.HiddenInput()
else:
self.fields['directory'].widget = forms.TextInput(attrs={'size':'70'})
if not self.instance.pk or not self.instance.is_main:
# Some javascript for hidde home/directory inputs when convinient
self.fields['shell'].widget.attrs['onChange'] = textwrap.dedent("""\
field = $(".field-home, .field-directory");
input = $("#id_home, #id_directory");
if ($.inArray(this.value, %s) < 0) {
field.addClass("hidden");
} else {
field.removeClass("hidden");
input.removeAttr("type");
};""" % list(settings.SYSTEMUSERS_DISABLED_SHELLS)
)
self.fields['home'].widget.attrs['onChange'] = textwrap.dedent("""\
field = $(".field-box.field-directory");
input = $("#id_directory");
if (this.value.search("%s") > 0) {
field.addClass("hidden");
} else {
field.removeClass("hidden");
input.removeAttr("type");
};""" % username
)
def clean_directory(self):
directory = self.cleaned_data['directory']
return directory.lstrip('/')
def clean(self):
super(SystemUserFormMixin, self).clean()
cleaned_data = self.cleaned_data
home = cleaned_data.get('home')
shell = cleaned_data.get('shell')
if home and self.MOCK_USERNAME in home:
username = cleaned_data.get('username', '')
cleaned_data['home'] = home.replace(self.MOCK_USERNAME, username)
elif home and shell not in settings.SYSTEMUSERS_DISABLED_SHELLS:
cleaned_data['home'] = ''
cleaned_data['directory'] = ''
validate_home(self.instance, cleaned_data, self.account)
return cleaned_data
class SystemUserCreationForm(SystemUserFormMixin, UserCreationForm):
pass
class SystemUserChangeForm(SystemUserFormMixin, UserChangeForm):
pass
class LinkForm(forms.Form):
base_home = forms.ChoiceField(label=_("Target path"), choices=(),
help_text=_("Target link will be under this directory."))
home_extension = forms.CharField(label=_("Home extension"), required=False, initial='',
widget=forms.TextInput(attrs={'size':'70'}),
help_text=_("Relative path to chosen directory."))
link_name = forms.CharField(label=_("Link name"), required=False, initial='',
widget=forms.TextInput(attrs={'size':'70'}))
def __init__(self, *args, **kwargs):
self.instance = args[0]
self.queryset = kwargs.pop('queryset', [])
super_args = []
if len(args) > 1:
super_args.append(args[1])
super(LinkForm, self).__init__(*super_args, **kwargs)
related_users = type(self.instance).objects.filter(account=self.instance.account_id)
self.fields['base_home'].choices = (
(user.get_base_home(), user.get_base_home()) for user in related_users
)
if len(self.queryset) == 1:
user = self.instance
help_text = _("If left blank or relative path: the link will be created in %s home.") % user
else:
help_text = _("If left blank or relative path: the link will be created in each user home.")
self.fields['link_name'].help_text = help_text
def clean_home_extension(self):
home_extension = self.cleaned_data['home_extension']
return home_extension.lstrip('/')
def clean_link_name(self):
link_name = self.cleaned_data['link_name']
if link_name:
if link_name.startswith('/'):
if len(self.queryset) > 1:
raise ValidationError(
_("Link name can not be a full path when multiple users."))
link_names = [os.path.dirname(link_name)]
else:
dir_name = os.path.dirname(link_name)
link_names = [os.path.join(user.home, dir_name) for user in self.queryset]
validate_paths_exist(self.instance, link_names)
return link_name
def clean(self):
cleaned_data = super(LinkForm, self).clean()
path = os.path.join(cleaned_data['base_home'], cleaned_data['home_extension'])
try:
validate_paths_exist(self.instance, [path])
except ValidationError as err:
raise ValidationError({
'home_extension': err,
})
return cleaned_data
class PermissionForm(LinkForm):
set_action = forms.ChoiceField(label=_("Action"), initial='grant',
choices=(
('grant', _("Grant")),
('revoke', _("Revoke"))
))
base_home = forms.ChoiceField(label=_("Set permissions to"), choices=(),
help_text=_("User will be granted/revoked access to this directory."))
home_extension = forms.CharField(label=_("Home extension"), required=False, initial='',
widget=forms.TextInput(attrs={'size':'70'}), help_text=_("Relative to chosen home."))
permissions = forms.ChoiceField(label=_("Permissions"), initial='read-write',
choices=(
('rw', _("Read and write")),
('r', _("Read only")),
('w', _("Write only"))
))
|
1638549
|
from ConnectSignal.Lambda import (
connect_slider_moved_abstract,
connect_slider_released_abstract,
connect_def_str_lineedit_abstract,
connect_name_change_abstract
)
from ConnectSignal.ConnectMacros import (
connect_colour,
connect_fill_pattern,
connect_dash,
connect_o_arrow,
connect_d_arrow
)
def connect_circle(scene):
"""Connect signals in the circle tab."""
scene.ui.circle_def_str.editingFinished.connect(
lambda: connect_def_str_lineedit_abstract(scene, scene.ui.circle_def_str))
scene.ui.circle_name.editingFinished.connect(
lambda: connect_name_change_abstract(scene.ui.circle_name, scene))
scene.ui.circle_line_width_slider.sliderMoved.connect(
lambda x: connect_slider_moved_abstract(x, scene, ['line'], 'line_width', lambda x: x / 10.0,
scene.ui.circle_line_width_spin))
scene.ui.circle_line_width_slider.sliderReleased.connect(
lambda: connect_slider_released_abstract(scene))
scene.ui.circle_double_distance_slider.sliderMoved.connect(
lambda x: connect_slider_moved_abstract(x, scene, ['line', 'double'], 'distance', lambda x: x / 10.0,
scene.ui.circle_double_distance_spin))
scene.ui.circle_double_distance_slider.sliderReleased.connect(
lambda: connect_slider_released_abstract(scene))
connect_fill_pattern(scene, ['fill', 'pattern'],
scene.ui.circle_pattern_type,
scene.ui.circle_pattern_distance_spin,
scene.ui.circle_pattern_distance_slider,
scene.ui.circle_pattern_size_spin,
scene.ui.circle_pattern_size_slider,
scene.ui.circle_pattern_rotation_spin,
scene.ui.circle_pattern_rotation_slider,
scene.ui.circle_pattern_xshift_spin,
scene.ui.circle_pattern_xshift_slider,
scene.ui.circle_pattern_yshift_spin,
scene.ui.circle_pattern_yshift_slider)
connect_colour(scene, ['fill', 'colour'],
scene.ui.circle_marker_colour_name,
scene.ui.circle_marker_colour_mix_name,
scene.ui.circle_marker_colour_mixratio_spin,
scene.ui.circle_marker_colour_mixratio_slider,
scene.ui.circle_marker_colour_strength_spin,
scene.ui.circle_marker_colour_strength_slider)
connect_colour(scene, ['line', 'colour'],
scene.ui.circle_border_colour_name,
scene.ui.circle_border_colour_mix_name,
scene.ui.circle_border_colour_mixratio_spin,
scene.ui.circle_border_colour_mixratio_slider,
scene.ui.circle_border_colour_strength_spin,
scene.ui.circle_border_colour_strength_slider)
connect_colour(scene, ['line', 'double', 'colour'],
scene.ui.circle_double_colour_name,
scene.ui.circle_double_colour_mix_name,
scene.ui.circle_double_colour_mixratio_spin,
scene.ui.circle_double_colour_mixratio_slider,
scene.ui.circle_double_colour_strength_spin,
scene.ui.circle_double_colour_strength_slider)
connect_o_arrow(scene,
scene.ui.circle_o_tip,
scene.ui.circle_o_side,
scene.ui.circle_o_reversed,
scene.ui.circle_o_length_spin,
scene.ui.circle_o_length_slider,
scene.ui.circle_o_width_spin,
scene.ui.circle_o_width_slider)
connect_d_arrow(scene,
scene.ui.circle_d_tip,
scene.ui.circle_d_side,
scene.ui.circle_d_reversed,
scene.ui.circle_d_length_spin,
scene.ui.circle_d_length_slider,
scene.ui.circle_d_width_spin,
scene.ui.circle_d_width_slider)
connect_dash(scene, ['line' 'dash'], scene.ui.circle_line_stroke, scene.ui.circle_custom_dash)
|
1638568
|
class BackendError(Exception):
"""Base backend error exception."""
class NoTokenError(BackendError):
"""Token given is unknown."""
class UserExistsError(BackendError):
"""A user with this username already exists."""
|
1638607
|
from .lasot import Lasot
from .got10k import Got10k
from .tracking_net import TrackingNet
from .coco import MSCOCO
from .coco_seq import MSCOCOSeq
|
1638612
|
import os
import unittest
from shutil import rmtree
from webvtt import WebVTTSegmenter, Caption
from webvtt.errors import InvalidCaptionsError
from webvtt import WebVTT
BASE_DIR = os.path.dirname(__file__)
SUBTITLES_DIR = os.path.join(BASE_DIR, 'subtitles')
OUTPUT_DIR = os.path.join(BASE_DIR, 'output')
class WebVTTSegmenterTestCase(unittest.TestCase):
def setUp(self):
self.segmenter = WebVTTSegmenter()
def tearDown(self):
if os.path.exists(OUTPUT_DIR):
rmtree(OUTPUT_DIR)
def _parse_captions(self, filename):
self.webvtt = WebVTT().read(os.path.join(SUBTITLES_DIR, filename))
def test_invalid_captions(self):
self.assertRaises(
FileNotFoundError,
self.segmenter.segment,
'text'
)
self.assertRaises(
InvalidCaptionsError,
self.segmenter.segment,
10
)
def test_single_invalid_caption(self):
self.assertRaises(
InvalidCaptionsError,
self.segmenter.segment,
[Caption(), Caption(), 'text', Caption()]
)
def test_total_segments(self):
# segment with default 10 seconds
self._parse_captions('sample.vtt')
self.segmenter.segment(self.webvtt, OUTPUT_DIR)
self.assertEqual(self.segmenter.total_segments, 7)
# segment with custom 30 seconds
self._parse_captions('sample.vtt')
self.segmenter.segment(self.webvtt, OUTPUT_DIR, 30)
self.assertEqual(self.segmenter.total_segments, 3)
def test_output_folder_is_created(self):
self.assertFalse(os.path.exists(OUTPUT_DIR))
self._parse_captions('sample.vtt')
self.segmenter.segment(self.webvtt, OUTPUT_DIR)
self.assertTrue(os.path.exists(OUTPUT_DIR))
def test_segmentation_files_exist(self):
self._parse_captions('sample.vtt')
self.segmenter.segment(self.webvtt, OUTPUT_DIR)
for i in range(7):
self.assertTrue(
os.path.exists(os.path.join(OUTPUT_DIR, 'fileSequence{}.webvtt'.format(i)))
)
self.assertTrue(os.path.exists(os.path.join(OUTPUT_DIR, 'prog_index.m3u8')))
def test_segmentation(self):
self._parse_captions('sample.vtt')
self.segmenter.segment(self.webvtt, OUTPUT_DIR)
# segment 1 should have caption 1 and 2
self.assertEqual(len(self.segmenter.segments[0]), 2)
self.assertIn(self.webvtt.captions[0], self.segmenter.segments[0])
self.assertIn(self.webvtt.captions[1], self.segmenter.segments[0])
# segment 2 should have caption 2 again (overlap), 3 and 4
self.assertEqual(len(self.segmenter.segments[1]), 3)
self.assertIn(self.webvtt.captions[2], self.segmenter.segments[1])
self.assertIn(self.webvtt.captions[3], self.segmenter.segments[1])
# segment 3 should have caption 4 again (overlap), 5, 6 and 7
self.assertEqual(len(self.segmenter.segments[2]), 4)
self.assertIn(self.webvtt.captions[3], self.segmenter.segments[2])
self.assertIn(self.webvtt.captions[4], self.segmenter.segments[2])
self.assertIn(self.webvtt.captions[5], self.segmenter.segments[2])
self.assertIn(self.webvtt.captions[6], self.segmenter.segments[2])
# segment 4 should have caption 7 again (overlap), 8, 9 and 10
self.assertEqual(len(self.segmenter.segments[3]), 4)
self.assertIn(self.webvtt.captions[6], self.segmenter.segments[3])
self.assertIn(self.webvtt.captions[7], self.segmenter.segments[3])
self.assertIn(self.webvtt.captions[8], self.segmenter.segments[3])
self.assertIn(self.webvtt.captions[9], self.segmenter.segments[3])
# segment 5 should have caption 10 again (overlap), 11 and 12
self.assertEqual(len(self.segmenter.segments[4]), 3)
self.assertIn(self.webvtt.captions[9], self.segmenter.segments[4])
self.assertIn(self.webvtt.captions[10], self.segmenter.segments[4])
self.assertIn(self.webvtt.captions[11], self.segmenter.segments[4])
# segment 6 should have caption 12 again (overlap), 13, 14 and 15
self.assertEqual(len(self.segmenter.segments[5]), 4)
self.assertIn(self.webvtt.captions[11], self.segmenter.segments[5])
self.assertIn(self.webvtt.captions[12], self.segmenter.segments[5])
self.assertIn(self.webvtt.captions[13], self.segmenter.segments[5])
self.assertIn(self.webvtt.captions[14], self.segmenter.segments[5])
# segment 7 should have caption 15 again (overlap) and 16
self.assertEqual(len(self.segmenter.segments[6]), 2)
self.assertIn(self.webvtt.captions[14], self.segmenter.segments[6])
self.assertIn(self.webvtt.captions[15], self.segmenter.segments[6])
def test_segment_content(self):
self._parse_captions('sample.vtt')
self.segmenter.segment(self.webvtt, OUTPUT_DIR, 10)
with open(os.path.join(OUTPUT_DIR, 'fileSequence0.webvtt'), 'r', encoding='utf-8') as f:
lines = [line.rstrip() for line in f.readlines()]
expected_lines = [
'WEBVTT',
'X-TIMESTAMP-MAP=MPEGTS:900000,LOCAL:00:00:00.000',
'',
'00:00:00.500 --> 00:00:07.000',
'Caption text #1',
'',
'00:00:07.000 --> 00:00:11.890',
'Caption text #2'
]
self.assertListEqual(lines, expected_lines)
def test_manifest_content(self):
self._parse_captions('sample.vtt')
self.segmenter.segment(self.webvtt, OUTPUT_DIR, 10)
with open(os.path.join(OUTPUT_DIR, 'prog_index.m3u8'), 'r', encoding='utf-8') as f:
lines = [line.rstrip() for line in f.readlines()]
expected_lines = [
'#EXTM3U',
'#EXT-X-TARGETDURATION:{}'.format(self.segmenter.seconds),
'#EXT-X-VERSION:3',
'#EXT-X-PLAYLIST-TYPE:VOD',
]
for i in range(7):
expected_lines.extend([
'#EXTINF:30.00000',
'fileSequence{}.webvtt'.format(i)
])
expected_lines.append('#EXT-X-ENDLIST')
for index, line in enumerate(expected_lines):
self.assertEqual(lines[index], line)
def test_customize_mpegts(self):
self._parse_captions('sample.vtt')
self.segmenter.segment(self.webvtt, OUTPUT_DIR, mpegts=800000)
with open(os.path.join(OUTPUT_DIR, 'fileSequence0.webvtt'), 'r', encoding='utf-8') as f:
lines = f.readlines()
self.assertIn('MPEGTS:800000', lines[1])
def test_segment_from_file(self):
self.segmenter.segment(os.path.join(SUBTITLES_DIR, 'sample.vtt'), OUTPUT_DIR),
self.assertEqual(self.segmenter.total_segments, 7)
def test_segment_with_no_captions(self):
self.segmenter.segment(os.path.join(SUBTITLES_DIR, 'no_captions.vtt'), OUTPUT_DIR),
self.assertEqual(self.segmenter.total_segments, 0)
def test_total_segments_readonly(self):
self.assertRaises(
AttributeError,
setattr,
WebVTTSegmenter(),
'total_segments',
5
)
|
1638623
|
from copy import deepcopy
from uuid import uuid4
from time import strftime, gmtime
from threading import Lock, Timer
from typing import List, Dict, Callable
import atexit
try:
from contextvars import ContextVar
_session_info = ContextVar('bugsnag-session', default={}) # type: ignore
except ImportError:
from bugsnag.utils import ThreadContextVar
# flake8: noqa
_session_info = ThreadContextVar('bugsnag-session', default={}) # type: ignore
from bugsnag.utils import package_version, FilterDict, SanitizingJSONEncoder
from bugsnag.event import Event
__all__ = [] # type: List[str]
class SessionTracker:
MAXIMUM_SESSION_COUNT = 100
SESSION_PAYLOAD_VERSION = "1.0"
"""
Session tracking class for Bugsnag
"""
def __init__(self, configuration):
self.session_counts = {} # type: Dict[str, int]
self.config = configuration
self.mutex = Lock()
self.auto_sessions = False
self.delivery_thread = None
def start_session(self):
if not self.auto_sessions and self.config.auto_capture_sessions:
self.auto_sessions = True
self.__start_delivery()
start_time = strftime('%Y-%m-%dT%H:%M:00', gmtime())
new_session = {
'id': uuid4().hex,
'startedAt': start_time,
'events': {
'handled': 0,
'unhandled': 0
}
}
_session_info.set(new_session)
self.__queue_session(start_time)
def send_sessions(self):
self.mutex.acquire()
try:
sessions = []
for min_time, count in self.session_counts.items():
sessions.append({
'startedAt': min_time,
'sessionsStarted': count
})
self.session_counts = {}
finally:
self.mutex.release()
self.__deliver(sessions)
def __start_delivery(self):
if self.delivery_thread is None:
def deliver():
self.send_sessions()
self.delivery_thread = Timer(30.0, deliver)
self.delivery_thread.daemon = True
self.delivery_thread.start()
self.delivery_thread = Timer(30.0, deliver)
self.delivery_thread.daemon = True
self.delivery_thread.start()
def cleanup():
if self.delivery_thread is not None:
self.delivery_thread.cancel()
self.send_sessions()
atexit.register(cleanup)
def __queue_session(self, start_time: str):
self.mutex.acquire()
try:
if start_time not in self.session_counts:
self.session_counts[start_time] = 0
self.session_counts[start_time] += 1
finally:
self.mutex.release()
def __deliver(self, sessions: List[Dict]):
if not sessions:
self.config.logger.debug("No sessions to deliver")
return
if not self.config.api_key:
self.config.logger.debug(
"Not delivering due to an invalid api_key"
)
return
if not self.config.should_notify():
self.config.logger.debug("Not delivering due to release_stages")
return
notifier_version = package_version('bugsnag') or 'unknown'
payload = {
'notifier': {
'name': Event.NOTIFIER_NAME,
'url': Event.NOTIFIER_URL,
'version': notifier_version
},
'device': FilterDict({
'hostname': self.config.hostname,
'runtimeVersions': self.config.runtime_versions
}),
'app': {
'releaseStage': self.config.release_stage,
'version': self.config.app_version
},
'sessionCounts': sessions
}
try:
encoder = SanitizingJSONEncoder(
self.config.logger,
separators=(',', ':'),
keyword_filters=self.config.params_filters
)
encoded_payload = encoder.encode(payload)
self.config.delivery.deliver_sessions(self.config, encoded_payload)
except Exception as e:
self.config.logger.exception('Sending sessions failed %s', e)
class SessionMiddleware:
"""
Session middleware ensures that a session is appended to the event.
"""
def __init__(self, bugsnag: Callable[[Event], Callable]):
self.bugsnag = bugsnag
def __call__(self, event: Event):
session = _session_info.get()
if session:
if event.unhandled:
session['events']['unhandled'] += 1
else:
session['events']['handled'] += 1
event.session = deepcopy(session)
self.bugsnag(event)
|
1638629
|
import random
import pytest
import concurrent.futures
from deepdiff import DeepDiff
from fast_autocomplete.lfucache import LFUCache
class TestLFUcache:
@pytest.mark.parametrize("items, size, expected_results", [
(['a', 'a', 'b', 'a', 'c', 'b', 'd'], 3, [('a', 2), ('b', 1), ('d', 0)]),
(['a', 'a', 'b', 'a', 'c', 'b', 'd', 'e', 'c', 'b'], 3, [('a', 2), ('b', 2), ('c', 0)]),
(['a', 'a', 'b', 'a', 'c', 'b', 'd', 'e', 'c', 'b', 'b', 'c', 'd', 'b'], 3, [('b', 4), ('a', 2), ('d', 0)]),
])
def test_autocomplete(self, items, size, expected_results):
lfucache = LFUCache(size)
for item in items:
lfucache.set(item, f'{item}_cached')
results = lfucache.get_sorted_cache_keys()
diff = DeepDiff(expected_results, results)
assert not diff
def test_get_multithreading(self):
keys = '<KEY>'
lfucache = LFUCache(2)
def _do_set(cache, key):
cache.set(key, f'{key}_cached')
def _do_get(cache, key):
return cache.get(key)
def _key_gen():
i = 0
while i < 30000:
i += 1
yield random.choice(keys)
def _random_func(cache, key):
return random.choice([_do_get, _do_get, _do_set])(cache, key)
with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:
futures = (executor.submit(_random_func, lfucache, key) for key in _key_gen())
for future in concurrent.futures.as_completed(futures):
future.result()
|
1638704
|
import abc
import os
from pkg_resources import resource_filename
from typing import Optional, List, Dict, Callable, Tuple
import base64
import asyncio
import shutil
import copy
from gtmcore.dataset.io import PushResult, PushObject, PullObject, PullResult
from gtmcore.dataset.manifest.manifest import Manifest, StatusResult
from gtmcore.dataset.manifest.eventloop import get_event_loop
class StorageBackend(metaclass=abc.ABCMeta):
"""Parent class for Dataset storage backends"""
def __init__(self):
# Optional configuration data that is in the form of key-value pairs
# No nesting of values is supported
# Configuration is populated from the Dataset at runtime (via a file and in-memory secrets)
self.configuration = dict()
# Attributes used to store the required keys for a backend
self._required_configuration_params = [{'parameter': 'username',
'description': "the Gigantum username for the logged in user",
'type': 'str'
},
{'parameter': 'gigantum_bearer_token',
'description': "Gigantum bearer token for the current session",
'type': 'str'
},
{'parameter': 'gigantum_id_token',
'description': "Gigantum ID token for the current session",
'type': 'str'
}]
if self._required_configuration():
# If additional config required, append
self._required_configuration_params.extend(self._required_configuration())
@property
def storage_type(self) -> str:
"""Return the string identifier for the dataset's storage class"""
return self._backend_metadata()['storage_type']
def _backend_metadata(self) -> dict:
"""Method to specify Storage Backend metadata for each implementation. This is used to render the UI
Simply implement this method in a child class. Note, 'icon' should be the name of the icon file saved in the
thumbnails directory. It should be a 128x128 px PNG image.
return {"storage_type": "a_unique_identifier",
"name": "My Dataset Type",
"description": "Short string",
"readme": "Long string",
"icon": "my_icon.png",
"url": "http://moreinfo.com"
}
Returns:
dict
"""
raise NotImplemented
@property
def metadata(self):
"""
Returns:
"""
metadata = self._backend_metadata()
dataset_pkg = resource_filename('gtmcore', 'dataset')
icon_file = os.path.join(dataset_pkg, 'storage', 'thumbnails', metadata['icon'])
metadata['is_managed'] = self.is_managed
with open(icon_file, 'rb') as icf:
metadata['icon'] = base64.b64encode(icf.read()).decode("utf-8")
return metadata
@property
def is_managed(self):
"""Boolean property indicating if this is a managed dataset type"""
return isinstance(self, ManagedStorageBackend)
def set_default_configuration(self, username: str, bearer_token: str, id_token: str) -> None:
"""Method to configure default keys. This should be called from API and other situations where
remote ops are desired and the bearer and ID tokens exist
Args:
username: current logged in username
bearer_token: current session bearer token (gigantum auth service)
id_token: current session id token (gigantum auth service)
Returns:
None
"""
self.configuration['username'] = username
self.configuration['gigantum_bearer_token'] = bearer_token
self.configuration['gigantum_id_token'] = id_token
def _required_configuration(self) -> List[Dict[str, str]]:
"""A private method to return a list of parameters that must be set for a backend to be fully configured
The format is a list of dictionaries, e.g.:
[
{
"parameter": "server",
"description": "URL of the remote server",
"type": "str"
},
{
"parameter": "username",
"description": "The current logged in username",
"type": "str"
}
]
"type" must be either `str` or `bool`
There are 3 parameters that are always automatically populated:
- username: the gigantum username for the logged in user
- gigantum_bearer_token: the gigantum bearer token for the current session
- gigantum_id_token: the gigantum id token for the current session
"""
raise NotImplemented
def confirm_configuration(self, dataset) -> Optional[str]:
"""Method to verify a configuration and optionally allow the user to confirm before proceeding
Should return the desired confirmation message if there is one. If no confirmation is required/possible,
return None
"""
raise NotImplemented
@property
def is_configured(self) -> bool:
"""Boolean property indicating if a storage backend has all required config items set"""
return len(self.missing_configuration) == 0
@property
def missing_configuration(self) -> List[Dict[str, str]]:
"""Property returning the missing configuration parameters"""
configured_params = list(self.configuration.keys())
missing_params = list()
for param in self._required_configuration_params:
if param['parameter'] not in configured_params:
missing_params.append(param)
return missing_params
@property
def safe_current_configuration(self) -> List[Dict[str, str]]:
"""Property returning the current configuration, excluding the default parameters which include secrets"""
current_params = list()
for param in self._required_configuration_params:
if param['parameter'] in ['username', 'gigantum_bearer_token', 'gigantum_id_token']:
continue
param['value'] = self.configuration.get(param['parameter'])
current_params.append(param)
return current_params
def prepare_pull(self, dataset, objects: List[PullObject]) -> None:
"""Method to prepare a backend for pulling objects locally
Args:
dataset: The dataset instance
objects: A list of PullObjects, indicating which objects to pull
status_update_fn: A callable, accepting a string for logging/providing status to the UI
Returns:
"""
raise NotImplemented
def pull_objects(self, dataset, objects: List[PullObject], progress_update_fn: Callable) -> PullResult:
"""Method to pull objects locally
Args:
dataset: The dataset instance
objects: A list of PullObjects, indicating which objects to pull
progress_update_fn: A callable with arg "completed_bytes" (int) indicating how many bytes have been
downloaded in since last called
Returns:
PullResult
"""
raise NotImplemented
def finalize_pull(self, dataset) -> None:
"""Method to finalize and cleanup a backend after pulling objects locally
Args:
dataset: The dataset instance
status_update_fn: A callable, accepting a string for logging/providing status to the UI
Returns:
"""
raise NotImplemented
def hash_file_key_list(self, dataset, keys):
m = Manifest(dataset, self.configuration.get('username'))
loop = get_event_loop()
hash_task = asyncio.ensure_future(m.hasher.hash(keys))
loop.run_until_complete(asyncio.gather(hash_task))
return hash_task.result()
def verify_contents(self, dataset, status_update_fn: Callable) -> List[str]:
"""Method to verify the hashes of all local files and indicate if they have changed
Args:
dataset: Dataset object
status_update_fn: A callable, accepting a string for logging/providing status to the UI
Returns:
list
"""
if 'username' not in self.configuration:
raise ValueError("Dataset storage backend requires current logged in username to verify contents")
m = Manifest(dataset, self.configuration.get('username'))
keys_to_verify = list()
for item in m.manifest:
if os.path.isfile(os.path.join(m.cache_mgr.cache_root, m.dataset_revision, item)):
# File exists locally
keys_to_verify.append(item)
# re-hash files
status_update_fn(f"Validating contents of {len(keys_to_verify)} files. Please wait.")
updated_hashes = self.hash_file_key_list(dataset, keys_to_verify)
modified_items = list()
for key, new_hash in zip(keys_to_verify, updated_hashes):
item = m.manifest.get(key)
if item:
if new_hash != item.get('h'):
modified_items.append(key)
if modified_items:
status_update_fn(f"Integrity check complete. {len(modified_items)} files have been modified.")
else:
status_update_fn(f"Integrity check complete. No files have been modified.")
return modified_items
class ManagedStorageBackend(StorageBackend):
"""Parent class for Managed Dataset storage backends"""
@property
def client_should_dedup_on_push(self) -> bool:
"""Property to indicate if the client should perform deduplication of objects based on content hashing on push
This effectively removes duplicate objects from the list of PushObjects before calling prepare_push() and
push_objects(). If two different files have the same contents only 1 copy will be pushed.
Returns:
bool
"""
raise NotImplemented
def prepare_push(self, dataset, objects: List[PushObject]) -> None:
"""Method to prepare a backend for pushing objects to the remote storage backend
Args:
dataset: The dataset instance
objects: A list of PushObjects, indicating which objects to push
status_update_fn: A callable, accepting a string for logging/providing status to the UI
Returns:
"""
raise NotImplemented
def push_objects(self, dataset, objects: List[PushObject], progress_update_fn: Callable) -> PushResult:
"""Method to push objects to the remote storage backend
Args:
dataset: The dataset instance
objects: A list of PushObject, indicating which objects to push
progress_update_fn: A callable with arg "completed_bytes" (int) indicating how many bytes have been
uploaded in since last called
Returns:
"""
raise NotImplemented
def finalize_push(self, dataset) -> None:
"""Method to finalize and cleanup a backend after pushing objects to the remote storage backend
Args:
dataset: The dataset instance
status_update_fn: A callable, accepting a string for logging/providing status to the UI
Returns:
"""
raise NotImplemented
def delete_contents(self, dataset) -> None:
"""Method to remove the contents of a dataset from the storage backend, should only work if managed
Args:
dataset: Dataset object
Returns:
None
"""
raise NotImplemented
class UnmanagedStorageBackend(StorageBackend):
"""Parent class for Unmanaged Dataset storage backends"""
def update_from_local(self, dataset, status_update_fn: Callable,
verify_contents: bool = False,
status_result: Optional[StatusResult] = None) -> None:
"""Method to update the dataset manifest for changed files that exists locally
Args:
dataset: Dataset object
status_update_fn: A callable, accepting a string for logging/providing status to the UI
verify_contents: Boolean indicating if "verify_contents" should be run, and the results added to modified
status_result: Optional StatusResult object to include in the update (typically from update_from_remote())
Returns:
None
"""
if 'username' not in self.configuration:
raise ValueError("Dataset storage backend requires current logged in username to verify contents")
m = Manifest(dataset, self.configuration.get('username'))
status_update_fn("Updating Dataset manifest from local file state.")
if status_result is not None:
if status_result.modified is not None:
modified_keys = copy.deepcopy(status_result.modified)
else:
modified_keys = list()
else:
modified_keys = list()
if verify_contents:
modified_keys.extend(self.verify_contents(dataset, status_update_fn))
# Create StatusResult to force modifications
if status_result:
created_result = copy.deepcopy(status_result.created)
# Check if any directories got created
for key in status_result.created:
if key[-1] != '/':
# a file
if os.path.dirname(key) not in m.manifest:
# Add the directory to the manifest
created_result.append(f"{os.path.dirname(key)}/")
created_result = list(set(created_result))
if '/' in created_result:
created_result.remove('/')
# Combine a previous StatusResult object (typically from "update_from_remote")
status = StatusResult(created=created_result,
modified=modified_keys,
deleted=status_result.deleted)
else:
status = StatusResult(created=[], modified=modified_keys, deleted=[])
# Update the manifest
previous_revision = m.dataset_revision
m.update(status)
m.create_update_activity_record(status)
# Link the revision dir
m.link_revision()
if os.path.isdir(os.path.join(m.cache_mgr.cache_root, previous_revision)):
shutil.rmtree(os.path.join(m.cache_mgr.cache_root, previous_revision))
status_update_fn("Update complete.")
@property
def can_update_from_remote(self) -> bool:
"""Property indicating if this backend can automatically update its contents to the latest on the remote
Returns:
bool
"""
raise NotImplemented
def update_from_remote(self, dataset, status_update_fn: Callable) -> None:
"""Optional method that updates the dataset by comparing against the remote. Not all unmanaged dataset backends
will be able to do this.
Args:
dataset: Dataset object
status_update_fn: A callable, accepting a string for logging/providing status to the UI
Returns:
None
"""
raise NotImplemented
|
1638706
|
from etna.analysis.feature_selection.mrmr_selection import AGGREGATION_FN
from etna.analysis.feature_selection.mrmr_selection import AggregationMode
from etna.analysis.feature_selection.mrmr_selection import mrmr
|
1638719
|
import sys
from ephyviewer.myqt import QT_MODE
if QT_MODE == 'PyQt5':
from . import icons_PyQt5 as icons
elif QT_MODE == 'PySide2':
from . import icons_PySide2 as icons
elif QT_MODE == 'PyQt4':
from . import icons_PyQt4 as icons
else:
raise ValueError('Could not load icons for unrecognized QT_MODE: ' + QT_MODE)
|
1638728
|
import maya.cmds as mc
import data
class SetData( data.Data ):
'''
SetData class object.
Contains functions to save, load and rebuild maya sets.
'''
def __init__(self,setNode=None):
'''
SetData class initializer.
'''
# Execute Super Class Initilizer
super(SetData, self).__init__()
# Initialize Default Class Data Members
self._data['name'] = ''
self._data['membership'] = []
self.mode = ['add','replace']
# Build Data
if setNode: self.buildData(setNode)
def verifySet(self,setNode):
'''
Run standard checks on the specified set
@param setNode: Set to verify
@type setNode: str
'''
# Check Set Exists
if not mc.objExists(setNode):
raise Exception('Set "'+setNode+'" does not exists!')
# Check Set Node Type
if mc.objectType(setNode) != 'objectSet':
raise Exception('Object "'+setNode+'" is not a vaild "set" node!')
def buildData(self,setNode):
'''
Build setData class.
@param setNode: Set to initialize data for
@type setNode: str
'''
# ==========
# - Checks -
# ==========
if not setNode:
raise Exception('Invalid set node! Unable to build setData...')
return
self.verifySet(setNode)
# ==============
# - Build Data -
# ==============
# Start timer
timer = mc.timerX()
# Reset Data
self.reset()
# Get basic set info
self._data['name'] = setNode
self._data['membership'] = mc.sets(setNode,q=True)
# Print timer result
buildTime = mc.timerX(st=timer)
print('SetData: Data build time for set "'+setNode+'": '+str(buildTime))
# =================
# - Return Result -
# =================
return self._data['name']
def rebuild(self,mode='add',forceMembership=True):
'''
Rebuild the set from the stored setData.
@param mode: Membership mode if the specified set already exists. Accepted values are "add" and "replace".
@type mode: str
@param forceMembership: Forces addition of items to the set. If items are in another set which is in the same partition as the given set, the items will be removed from the other set in order to keep the sets in the partition mutually exclusive with respect to membership.
@type forceMembership: bool
'''
# ==========
# - Checks -
# ==========
# Set Name
if not self._data['name']:
raise Exception('SetData has not been initialized!')
# Member Items
memberList = self._data['membership'] or []
for obj in memberList:
if not mc.objExists(obj):
print('Set member item "'+obj+'" does not exist! Unable to add to set...')
memberList.remove(obj)
# Flatten Membership List
memberList = mc.ls(memberList,fl=True) or []
# Mode
if not mode in self.mode:
raise Exception('Invalid set membership mode "'+mode+'"! Use "add" or "replace"!')
# ===============
# - Rebuild Set -
# ===============
# Start timer
timer = mc.timerX()
# Create Set
setName = self._data['name']
# Delete Set (REPLACE only)
if mc.objExists(setName) and mode == 'replace': mc.delete(setName)
# Create Set
if not mc.objExists(setName): setName = mc.sets(n=setName)
# Add Members
if memberList:
if forceMembership:
for obj in memberList:
try: mc.sets(obj,e=True,fe=setName)
except Exception, e:
print('Error adding item "'+obj+'" to set "'+setName+'"! Skipping')
print(str(e))
else:
for obj in memberList:
try: mc.sets(obj,e=True,add=setName)
except Exception, e:
print('Error adding item "'+obj+'" to set "'+setName+'"! Skipping')
print(str(e))
# Print Timer Result
buildTime = mc.timerX(st=timer)
print('SetData: Rebuild time for set "'+setName+'": '+str(buildTime))
# =================
# - Return Result -
# =================
self.setName = setName
result = {}
result['set'] = setName
result['membership'] = memberList
return result
|
1638733
|
import inspect
import logging
import warnings
import numpy as np
import astropy.units as u
from spectral_cube import SpectralCube
from . import scDerivativeRoutines as scdr
warnings.filterwarnings("ignore")
def _nicestr(quantity):
if quantity.value == int(quantity.value):
return(str(int(quantity.value))+' '+str(quantity.unit))
else:
return(str(quantity))
def _func_and_kwargs_for_moment(moment_tag=None):
"""
Return function name and defalt kwargs for a moment tag.
"""
func = None
kwargs = None
if moment_tag is None:
return(func,kwargs)
if moment_tag == 'mom0':
func = scdr.write_moment0
kwargs ={'unit': u.K * u.km / u.s}
elif moment_tag == 'mom1':
func = scdr.write_moment1
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'mom2':
func = scdr.write_moment2
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'ew':
func = scdr.write_ew
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'vquad':
func = scdr.write_vquad
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'vpeak':
func = scdr.write_vmax
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'tpeak':
func = scdr.write_tmax
kwargs = {'unit': u.K}
elif moment_tag == 'mom1wprior':
func = scdr.write_moment1_hybrid
kwargs = {'unit': u.km / u.s}
return(func, kwargs)
def moment_tag_known(moment_tag=None):
"""
Test whether the programs know about a moment tag.
"""
func, kwargs = _func_and_kwargs_for_moment(moment_tag)
if func is None:
return(False)
return(True)
def moment_generator(
cubein, mask=None, noise=None,
moment=None, momkwargs=None,
outfile=None, errorfile=None,
channel_correlation=None,
context=None, assignkunits=False):
"""
Generate one moment map from input cube, noise, and masks.
"""
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Set up the call
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Get the relevant function and keyword arguments for this moment
func, kwargs = _func_and_kwargs_for_moment(moment)
if func is None:
logging.error("Moment tag not recognized: "+str(moment))
raise NotImplementedError
return(None)
# Add any user-supplied kwargs to the dictionary
if momkwargs is not None:
if type(momkwargs) != type({}):
logging.error("Type of momkwargs should be dictionary.")
raise NotImplementedError
for this_kwarg in momkwargs:
kwargs[this_kwarg] = momkwargs[this_kwarg]
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Read in the data
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Read in the cube (if needed)
if type(cubein) is str:
cube = SpectralCube.read(cubein)
elif type(cubein) is SpectralCube:
cube = cubein
else:
logging.error('Unrecognized input type for cubein')
raise NotImplementedError
cube.allow_huge_operations = True
# Force Kelvin. We will be unit agnostic later.
cube = cube.to(u.K)
# Attach a mask if needed
if mask is not None:
if type(mask) is str:
mask = SpectralCube.read(mask)
elif type(mask) is SpectralCube:
mask = mask
else:
logging.error('Unrecognized input type for mask')
raise NotImplementedError
# Ensure the mask is booleans and attach it to the cube. This
# just assumes a match in astrometry. Could add reprojection
# here or (better) build a masking routine to apply masks with
# arbitrary astrometry.
mask = np.array(mask.filled_data[:].value, dtype=np.bool)
cube = cube.with_mask(mask, inherit_mask=False)
# Read in the noise (if present)
if noise is not None:
if type(noise) is str:
noisecube = SpectralCube.read(noise)
elif type(noise) is SpectralCube:
noisecube = noise
else:
logging.error('Unrecognized input type for noise.')
raise NotImplementedError
noisecube.allow_huge_operations = True
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Call the moment generation
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Probably not needed anymore
theseargs = (inspect.getfullargspec(func)).args
if 'context' in theseargs:
moment_map, error_map = func(
cube, rms=noisecube,
outfile=outfile, errorfile=errorfile,
channel_correlation=channel_correlation,
#context=context,
**kwargs)
else:
moment_map, error_map = func(
cube, rms=noisecube,
outfile=outfile, errorfile=errorfile,
channel_correlation=channel_correlation,
**kwargs)
return(moment_map, error_map)
|
1638734
|
import json
from pathlib import Path
from shutil import Error
from unittest.mock import mock_open, patch
import gdk.CLIParser as CLIParser
import gdk.common.consts as consts
import gdk.common.exceptions.error_messages as error_messages
import gdk.common.parse_args_actions as parse_args_actions
import gdk.common.utils as utils
import pytest
from gdk.commands.component.BuildCommand import BuildCommand
@pytest.fixture()
def supported_build_system(mocker):
builds_file = utils.get_static_file_path(consts.project_build_system_file)
with open(builds_file, "r") as f:
data = json.loads(f.read())
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value=data
)
return mock_get_supported_component_builds
@pytest.fixture()
def rglob_build_file(mocker):
def search(*args, **kwargs):
if "build.gradle" in args[0] or "pom.xml" in args[0]:
return [Path(utils.current_directory).joinpath("build_file")]
return []
mock_rglob = mocker.patch("pathlib.Path.rglob", side_effect=search)
return mock_rglob
def test_build_command_instantiation(mocker):
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
mock_check_if_arguments_conflict = mocker.patch.object(BuildCommand, "check_if_arguments_conflict", return_value=None)
mock_run = mocker.patch.object(BuildCommand, "run", return_value=None)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value={},
)
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert mock_get_proj_config.call_count == 1
assert mock_get_supported_component_builds.call_count == 1
assert mock_check_if_arguments_conflict.call_count == 1
assert mock_run.call_count == 1
def test_build_command_instantiation_failed_fetching_config(mocker):
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
side_effect=Exception("exception fetching proj values"),
)
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
mock_check_if_arguments_conflict = mocker.patch.object(BuildCommand, "check_if_arguments_conflict", return_value=None)
mock_run = mocker.patch.object(BuildCommand, "run", return_value=None)
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "exception fetching proj values" in e.value.args[0]
assert mock_get_proj_config.call_count == 1
assert mock_get_supported_component_builds.call_count == 0
assert mock_check_if_arguments_conflict.call_count == 1
assert mock_run.call_count == 0
def test_build_command_instantiation_failed_fetching_build_config(mocker):
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds",
side_effect=Exception("exception fetching build"),
)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value={},
)
mock_check_if_arguments_conflict = mocker.patch.object(BuildCommand, "check_if_arguments_conflict", return_value=None)
mock_run = mocker.patch.object(BuildCommand, "run", return_value=None)
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "exception fetching build" in e.value.args[0]
assert mock_get_proj_config.call_count == 1
assert mock_get_supported_component_builds.call_count == 1
assert mock_check_if_arguments_conflict.call_count == 1
assert mock_run.call_count == 0
def test_build_command_instantiation_failed_conflicting_args(mocker):
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
side_effect=Exception("exception fetching proj values"),
)
mock_check_if_arguments_conflict = mocker.patch.object(
BuildCommand,
"check_if_arguments_conflict",
side_effect=Exception("exception due to conflictins args"),
)
mock_run = mocker.patch.object(BuildCommand, "run", return_value=None)
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "exception due to conflictins args" in e.value.args[0]
assert mock_get_proj_config.call_count == 0
assert mock_get_supported_component_builds.call_count == 0
assert mock_check_if_arguments_conflict.call_count == 1
assert mock_run.call_count == 0
def test_build_run():
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "Could not build the project due to the following error." in e.value.args[0]
def test_build_run_default_zip_json(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=project_config(),
)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=project_config(),
)
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=True)
mock_subprocess_run = mocker.patch("subprocess.run")
mock_json_dump = mocker.patch("json.dumps")
pc = mock_get_proj_config.return_value
file_name = Path(pc["gg_build_recipes_dir"]).joinpath(pc["component_recipe_file"].name).resolve()
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
mock_file.assert_any_call(file_name, "w")
mock_json_dump.call_count == 1
assert mock_get_proj_config.assert_called_once
assert not mock_subprocess_run.called
assert mock_copy_dir.call_count == 1 # copy files to zip-build to create a zip
assert mock_archive_dir.call_count == 1 # archiving directory
assert mock_is_artifact_in_build.call_count == 1 # only one artifact in project_config. Available in build
assert mock_clean_dir.call_count == 2 # clean zip-build, clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
def test_build_run_default_maven_yaml(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
pc = project_config()
pc["component_build_config"] = {"build_system": "maven"}
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_platform = mocker.patch("platform.system", return_value="not-windows")
pc["component_recipe_file"] = Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/recipe.yaml")
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=True)
mock_subprocess_run = mocker.patch("subprocess.run")
pc = mock_get_proj_config.return_value
file_name = Path(pc["gg_build_recipes_dir"]).joinpath(pc["component_recipe_file"].name).resolve()
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
mock_file.assert_any_call(file_name, "w")
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["mvn", "clean", "package"]) # called maven build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in maven
assert mock_is_artifact_in_build.call_count == 1 # only one artifact in project_config. Available in build
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
assert mock_platform.call_count == 1
def test_build_run_default_maven_yaml_windows(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
mock_platform = mocker.patch("platform.system", return_value="Windows")
pc = project_config()
pc["component_build_config"] = {"build_system": "maven"}
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=True)
mock_subprocess_run = mocker.patch("subprocess.run", side_effect="error with maven build cmd")
mock_yaml_dump = mocker.patch("yaml.dump")
pc = mock_get_proj_config.return_value
file_name = Path(pc["gg_build_recipes_dir"]).joinpath(pc["component_recipe_file"].name).resolve()
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
mock_file.assert_any_call(file_name, "w")
mock_yaml_dump.call_count == 1
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["mvn.cmd", "clean", "package"]) # called maven build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in maven
assert mock_is_artifact_in_build.call_count == 1 # only one artifact in project_config. Available in build
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
assert mock_platform.call_count == 1
def test_build_run_default_maven_yaml_error(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
mock_platform = mocker.patch("platform.system", return_value="Windows")
pc = project_config()
pc["component_build_config"] = {"build_system": "maven"}
pc["component_recipe_file"] = Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/recipe.yaml")
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=True)
mock_subprocess_run = mocker.patch("subprocess.run", side_effect=Exception("error with maven build cmd"))
pc = mock_get_proj_config.return_value
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build", "-d"]))
assert "error with maven build cmd" in e.value.args[0]
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["mvn.cmd", "clean", "package"]) # called maven build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in maven
assert mock_is_artifact_in_build.call_count == 0 # only one artifact in project_config. Available in build
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
assert mock_platform.called
def test_build_run_default_gradle_yaml_artifact_not_found(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
pc = project_config()
pc["component_build_config"] = {"build_system": "gradle"}
pc["component_recipe_file"] = Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/recipe.yaml")
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_boto3_client = mocker.patch("boto3.client")
mock_subprocess_run = mocker.patch("subprocess.run")
mock_yaml_dump = mocker.patch("yaml.dump")
pc = mock_get_proj_config.return_value
with patch("builtins.open", mock_open()) as mock_file:
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert (
"Could not find artifact with URI"
" 's3://DOC-EXAMPLE-BUCKET/artifacts/com.example.HelloWorld/1.0.0/hello_world.py' on s3 or inside"
" the build folders."
in e.value.args[0]
)
assert not mock_file.called
mock_yaml_dump.call_count == 0
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["gradle", "build"]) # called gradle build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in gralde
assert mock_boto3_client.call_count == 1
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
def test_build_run_default_exception(mocker, rglob_build_file):
mock_create_gg_build_directories = mocker.patch.object(BuildCommand, "create_gg_build_directories")
mock_default_build_component = mocker.patch.object(
BuildCommand, "default_build_component", side_effect=Exception("error in default_build_component")
)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=project_config(),
)
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
mock_subprocess_run = mocker.patch("subprocess.run")
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "error in default_build_component" in e.value.args[0]
assert mock_get_proj_config.called
assert mock_get_supported_component_builds.called
assert mock_create_gg_build_directories.assert_called_once
assert mock_default_build_component.assert_called_once
assert not mock_subprocess_run.called
def test_default_build_component_error_run_build_command(mocker, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_run_build_command = mocker.patch.object(
BuildCommand, "run_build_command", side_effect=Error("err in run_build_command")
)
mock_find_artifacts_and_update_uri = mocker.patch.object(BuildCommand, "find_artifacts_and_update_uri")
mock_create_build_recipe_file = mocker.patch.object(BuildCommand, "create_build_recipe_file")
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=project_config(),
)
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert error_messages.BUILD_FAILED in e.value.args[0]
assert mock_run_build_command.assert_called_once
assert not mock_find_artifacts_and_update_uri.called
assert not mock_create_build_recipe_file.called
assert mock_get_supported_component_builds.called
assert mock_clean_dir.call_count == 1
assert mock_create_dir.call_count == 2
assert mock_get_proj_config.call_count == 1
def test_build_run_custom(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
pc = project_config()
pc["component_build_config"] = {"build_system": "custom", "custom_build_command": ["some-command"]}
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=False)
mock_is_artifact_in_s3 = mocker.patch.object(BuildCommand, "is_artifact_in_s3", return_value=True)
mock_boto3_client = mocker.patch("boto3.client")
mock_subprocess_run = mocker.patch("subprocess.run")
mock_yaml_dump = mocker.patch("yaml.dump")
pc = mock_get_proj_config.return_value
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert not mock_file.called
mock_yaml_dump.call_count == 0
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["some-command"]) # called maven build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_is_artifact_in_build.call_count == 0 # only one artifact in project_config. Not vailable in build
assert mock_is_artifact_in_s3.call_count == 0 # only one artifact in project_config. Not available in s3
assert mock_boto3_client.call_count == 0
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
def test_build_run_default_gradle_yaml_artifact_found_build(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
pc = project_config()
pc["component_build_config"] = {"build_system": "gradle"}
pc["component_recipe_file"] = Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/recipe.yaml")
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_boto3_client = mocker.patch("boto3.client")
mock_subprocess_run = mocker.patch("subprocess.run")
mock_yaml_dump = mocker.patch("yaml.dump")
pc = mock_get_proj_config.return_value
mocker.patch("pathlib.Path.is_file", return_value=True)
mock_copy_file = mocker.patch("shutil.copy", return_value=None)
mock_exists = mocker.patch("pathlib.Path.exists", return_value=True)
file_name = Path(pc["gg_build_recipes_dir"]).joinpath(pc["component_recipe_file"].name).resolve()
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
mock_file.assert_any_call(file_name, "w")
mock_yaml_dump.call_count == 0
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["gradle", "build"]) # called gradle build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in gralde
assert mock_boto3_client.call_count == 0 # artifacts found in s3
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
assert mock_copy_file.call_count == 1
assert mock_exists.called
def test_build_run_default_gradle_yaml_error_creating_recipe(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
pc = project_config()
pc["component_build_config"] = {"build_system": "gradle"}
pc["component_recipe_file"] = Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/recipe.yaml")
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_boto3_client = mocker.patch("boto3.client")
mock_subprocess_run = mocker.patch("subprocess.run")
mock_yaml_dump = mocker.patch("yaml.dump", side_effect=Exception("writing failed"))
pc = mock_get_proj_config.return_value
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=True)
file_name = Path(pc["gg_build_recipes_dir"]).joinpath(pc["component_recipe_file"].name).resolve()
with patch("builtins.open", mock_open()) as mock_file:
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
mock_file.assert_any_call(file_name, "w")
mock_yaml_dump.call_count == 1
assert "Failed to create build recipe file at" in e.value.args[0]
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["gradle", "build"]) # called gradle build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_is_artifact_in_build.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in gralde
assert mock_boto3_client.call_count == 0 # artifacts found in s3
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
def project_config():
return {
"component_name": "component_name",
"component_build_config": {"build_system": "zip"},
"component_version": "1.0.0",
"component_author": "abc",
"bucket": "default",
"region": "us-east-1",
"gg_build_directory": Path("/src/GDK-CLI-Internal/greengrass-build"),
"gg_build_artifacts_dir": Path("/src/GDK-CLI-Internal/greengrass-build/artifacts"),
"gg_build_recipes_dir": Path("/src/GDK-CLI-Internal/greengrass-build/recipes"),
"gg_build_component_artifacts_dir": Path("/src/GDK-CLI-Internal/greengrass-build/artifacts/component_name/1.0.0"),
"component_recipe_file": Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/valid_component_recipe.json"),
"parsed_component_recipe": {
"RecipeFormatVersion": "2020-01-25",
"ComponentName": "com.example.HelloWorld",
"ComponentVersion": "1.0.0",
"ComponentDescription": "My first Greengrass component.",
"ComponentPublisher": "Amazon",
"ComponentConfiguration": {"DefaultConfiguration": {"Message": "world"}},
"Manifests": [
{
"Platform": {"os": "linux"},
"Lifecycle": {"Run": "python3 -u {artifacts:path}/hello_world.py '{configuration:/Message}'"},
"Artifacts": [{"URI": "s3://DOC-EXAMPLE-BUCKET/artifacts/com.example.HelloWorld/1.0.0/hello_world.py"}],
}
],
},
}
|
1638735
|
from django.db import models
from model_clone import CloneMixin
class CompanyDepot(CloneMixin, models.Model):
name = models.CharField(max_length=255)
|
1638745
|
from django.conf.urls import url
from api.licenses import views
app_name = 'osf'
urlpatterns = [
url(r'^$', views.LicenseList.as_view(), name=views.LicenseList.view_name),
url(r'^(?P<license_id>\w+)/$', views.LicenseDetail.as_view(), name=views.LicenseDetail.view_name),
]
|
1638784
|
import os
import shutil
def rmgeneric(path, __func__):
try:
__func__(path)
#print 'Removed ', path
return 1
except OSError as e:
print('Could not remove {0}, {1}'.format(path,e))
return 0
def rmfiles(path, ext = None, rmcache = True):
if not os.path.isdir(path):
return 0
trem = 0
tall = 0
files = os.listdir(path)
for f in files:
fullpath = os.path.join(path, f)
if os.path.isfile(fullpath):
sf = f.split('.')
if len(sf) == 2:
if ext == None or sf[1] == ext:
tall += 1
trem += rmgeneric(fullpath, os.remove)
elif f == '__pycache__' and rmcache:
shutil.rmtree(fullpath)
tall += 1
elif os.path.isdir(fullpath):
r,ra = rmfiles(fullpath, ext)
trem += r
tall += ra
return trem, tall
if __name__ == '__main__':
path = os.curdir
removed, allfiles = rmfiles(path,'pyc')
print('removed {0} pyc files out of {1}'.format(removed, allfiles))
|
1638786
|
import socket
import sys
import rlcompleter
import readline
def run(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
print('Connect to %s:%d' % (host, port))
except:
print('Unable to connect %s:%d' % (host, port))
exit(1)
print('== Welcome to StellarSQL Client! ==')
client = Client()
while client.check_live():
input = raw_input('StellarSQL> ')
message = client.parse(input)
if message is not None:
s.send(message)
else:
continue
data = s.recv(512)
print(data)
s.close()
class Client():
"""
username||database||query||key
"""
def __init__(self):
self._user = ""
self._database = ""
self._query = ""
self._key = ""
self._is_live = True
def _set_user(self, name):
self._user = name
print("user: %s" % self._user)
def _use_database(self, name):
self._database = name
print("database: %s" % self._database)
def _create_user(self, name, key):
self._user = name
self._key = key
return ('{0}||||||{1}\n').format(self._user, self._key)
def _create_database(self, db_name):
self._database = db_name
return ('{0}||||create database {1};\n').format(self._user, self._database)
def _send_query(self, query):
if self._user == "":
print('Please set or create user!')
return None
if self._database == "":
print('Please use or create database!')
return None
self._query = query
return ('{0}||{1}||{2};\n').format(self._user, self._database, self._query)
def check_live(self):
return self._is_live
def parse(self, input):
tokens = input.split()
try:
# create user
# create database
if tokens[0] == 'create':
if tokens[1] == 'user':
return self._create_user(tokens[2], tokens[3])
elif tokens[1] == 'database':
return self._create_database(tokens[2])
else:
return self._send_query(input)
# set user
elif tokens[0] == 'set' and tokens[1] == 'user':
user = tokens[2]
self._set_user(user)
# use database
elif tokens[0] == 'use':
db = tokens[1]
return self._use_database(db)
# quit
elif tokens[0] == 'q' or tokens[0] == 'exit':
self._is_live = False
elif tokens[0] == 'h' or tokens[0] == 'help':
print('create user <username> <key>')
print('set <username>')
print('create database <db_name>')
print('use <db_name>')
print('<query> (ex: select a1 from t1)')
# use database
else:
return self._send_query(input)
except:
print('Syntax Error! Enter `h` to see commands.')
return None
return None
if __name__ == '__main__':
host = '127.0.0.1'
port = 23333
readline.parse_and_bind("tab: complete")
if len(sys.argv) == 3:
host = sys.argv[1]
port = sys.argv[2]
elif len(sys.argv) == 1:
pass
else:
print('run: client.py [host] [port]')
exit(1)
run(host, port)
|
1638809
|
from prettytable import PrettyTable
import pandas as pd
from .column import Column, ColumnSet
from .query_templates import query_templates
class Table(object):
"""
A Table is an in-memory reference to a table in a database. You can use it to get more info
about the columns, schema, etc. of a table and you can also use it to execute queries.
"""
def __init__(self, con, query_templates, schema, name, cols, keys_per_column, foreign_keys=None, ref_keys=None):
self.schema = schema
self.name = name
self._con = con
self._cur = con.cursor()
self._query_templates = query_templates
self.foreign_keys = []
self.ref_keys = []
self.keys_per_column = keys_per_column
self._columns = cols
for col in cols:
attr = col.name
if attr in ("name", "con", "count"):
attr = self.name + "_" + col.name
setattr(self, attr, col)
# ToDo: factor out common logic below
# load foreign keys if not provided
if not isinstance(foreign_keys, list):
self._cur.execute(self._query_templates['system']['foreign_keys_for_table'].format(table=self.name,
table_schema=self.schema))
foreign_keys = self._cur
# build columns from the foreign keys metadata we have
for (column_name, foreign_table_schema, foreign_table, foreign_column) in foreign_keys:
col = getattr(self, column_name)
foreign_key = Column(con, query_templates, foreign_table_schema, foreign_table, foreign_column, col.type, self.keys_per_column)
self.foreign_keys.append(foreign_key)
col.foreign_keys.append(foreign_key)
setattr(self, column_name, col)
# store the foreign keys as a special group of columns
self.foreign_keys = ColumnSet(self.foreign_keys)
# load ref keys if not provided
if not isinstance(ref_keys, list):
self._cur.execute(self._query_templates['system']['ref_keys_for_table'].format(table=self.name,
table_schema=self.schema))
ref_keys = self._cur
# build columns for the ref key metadata we have
for (column_name, ref_schema, ref_table, ref_column) in ref_keys:
col = getattr(self, column_name)
ref_key = Column(con, query_templates, ref_schema, ref_table, ref_column, col.type, self.keys_per_column)
self.ref_keys.append(ref_key)
col.ref_keys.append(ref_key)
setattr(self, column_name, col)
# store ref keys as a special group of columns
self.ref_keys = ColumnSet(self.ref_keys)
def _tablify(self):
tbl = PrettyTable(["Column", "Type", "Foreign Keys", "Reference Keys"])
tbl.align["Column"] = "l"
tbl.align["Type"] = "l"
tbl.align["Foreign Keys"] = "l"
tbl.align["Reference Keys"] = "l"
for col in self._columns:
tbl.add_row([col.name, col.type, col._str_foreign_keys(), col._str_ref_keys()])
return tbl
def __repr__(self):
tbl = str(self._tablify())
r = tbl.split('\n')[0]
brk = "+" + "-" * (len(r) - 2) + "+"
title = "|" + self.name.center(len(r) - 2) + "|"
return brk + "\n" + title + "\n" + tbl
def __str__(self):
return "Table({0})<{1}>".format(self.name, self.__hash__())
def _repr_html_(self):
return self._tablify().get_html_string()
def _format_columns(self, columns):
if len(columns) == 0:
return "*"
if self._query_templates['dbtype']=="postgres":
columns = ['"%s"' % column for column in columns]
return ", ".join(columns)
def select(self, *args):
"""
Returns DataFrame of table with arguments selected as columns. This is
executing:
SELECT
<name of column 1>
, <name of column 2>
, <name of column 3>
FROM
<name_of_the_table>
Parameters
----------
*args: str
columns to select
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Track.select("Name")[:1].Name
0 For Those About To Rock (We Salute You)
Name: Name, dtype: object
# select name from the Track table
db.tables.Track.select("Name")
Name
0 For Those About To Rock (We Salute You)
1 Balls to the Wall
2 Fast As a Shark
3 Restless and Wild
4 Princess of the Dawn
5 Put The Finger On You
6 Let's Get It Up
7 Inject The Venom
8 Snowballed
9 Evil Walks
...
# select name & composer from the Track table
>>> df = db.tables.Track.select("Name", "Composer")
"""
q = self._query_templates['table']['select'].format(columns=self._format_columns(args), schema=self.schema,
table=self.name)
return pd.read_sql(q, self._con)
def head(self, n=6):
"""
Returns first n values of your table as a DataFrame. This is executing:
SELECT
*
FROM
<name_of_the_table>
LIMIT <n>
Parameters
----------
n: int
number of rows to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Track.count
3503
-= Not in doctest as output is hard to predict
# select name from the Track table
db.tables.Track.head()
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
1 2 Balls to the Wall 2 2
2 3 Fast As a Shark 3 2
3 4 Restless and Wild 3 2
4 5 Princess of the Dawn 3 2
5 6 Put The Finger On You 1 1
GenreId Composer Milliseconds \
0 1 <NAME>, <NAME>, <NAME> 343719
1 1 None 342562
2 1 <NAME>, <NAME>, <NAME> & <NAME>... 230619
3 1 <NAME>, <NAME>, <NAME>, U. D... 252051
4 1 Deaffy & R.A. Smith-Diesel 375418
5 1 <NAME>, <NAME>, <NAME> 205662
Bytes UnitPrice
0 11170334 0.99
1 5510424 0.99
2 3990994 0.99
3 4331779 0.99
4 6290521 0.99
5 6713451 0.99
db.tables.Track.head(1)
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
GenreId Composer Milliseconds Bytes \
0 1 <NAME>, <NAME>, <NAME> 343719 11170334
UnitPrice
0 0.99
"""
q = self._query_templates['table']['head'].format(schema=self.schema,
table=self.name, n=n)
return pd.read_sql(q, self._con)
def all(self):
"""
Returns entire table as a DataFrame. This is executing:
SELECT
*
FROM
<name_of_the_table>
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> len(db.tables.Track.all())
3503
>>> df = db.tables.Track.all()
"""
q = self._query_templates['table']['all'].format(schema=self.schema,
table=self.name)
return pd.read_sql(q, self._con)
def unique(self, *args):
"""
Returns all unique values as a DataFrame. This is executing:
SELECT
DISTINCT
<name_of_the_column_1>
, <name_of_the_column_2>
, <name_of_the_column_3>
...
FROM
<name_of_the_table>
Parameters
----------
*args: columns as strings
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Track.unique("GenreId")
GenreId
0 1
1 2
2 3
3 4
4 5
5 6
6 7
7 8
8 9
9 10
10 11
11 12
12 13
13 14
14 15
15 16
16 17
17 18
18 19
19 20
20 21
21 22
22 23
23 24
24 25
>>> len(db.tables.Track.unique("GenreId", "MediaTypeId"))
38
"""
q = self._query_templates['table']['unique'].format(columns=self._format_columns(args), schema=self.schema,
table=self.name)
return pd.read_sql(q, self._con)
def sample(self, n=10):
"""
Returns random sample of n rows as a DataFrame. This is executing:
SELECT
*
FROM
<name_of_the_table>
ORDER BY
RANDOM()
LIMIT <n>
Parameters
----------
n: int
number of rows to sample
Examples
--------
from db import DemoDB
db = DemoDB()
Not in doctest : can't predict sample
db.tables.Track.sample(10)
TrackId Name AlbumId \
0 274 <NAME> 25
1 1971 Girls, Girls, Girls 162
2 843 Otay 68
3 3498 Concerto for Violin, Strings and Continuo in G... 342
4 3004 Pride (In The Name Of Love) 238
5 2938 Beautiful Day 233
6 2023 O Braco Da Minha Guitarra 165
7 1920 Caxanga 158
8 3037 The Wanderer 240
9 1487 Third Stone From The Sun 120
MediaTypeId GenreId Composer \
0 1 7 None
1 1 3 <NAME>/<NAME>/<NAME>
2 1 2 <NAME>, <NAME>, <NAME> a...
3 4 24 <NAME>
4 1 1 U2
5 1 1 <NAME>, Bono, <NAME>, The Edge
6 1 1 None
7 1 7 <NAME>, <NAME>
8 1 1 U2; Bono
9 1 1 <NAME>
Milliseconds Bytes UnitPrice
0 271856 9095410 0.99
1 270288 8874814 0.99
2 423653 14176083 0.99
3 493573 16454937 0.99
4 230243 7549085 0.99
5 248163 8056723 0.99
6 258351 8469531 0.99
7 245551 8144179 0.99
8 283951 9258717 0.99
9 404453 13186975 0.99
"""
q = self._query_templates['table']['sample'].format(schema=self.schema,
table=self.name, n=n)
return pd.read_sql(q, self._con)
@property
def count(self):
"""Return total of rows from table."""
return len(self.all())
def to_dict(self):
"""Serialize representation of the table for local caching."""
return {'schema': self.schema, 'name': self.name, 'columns': [col.to_dict() for col in self._columns],
'foreign_keys': self.foreign_keys.to_dict(), 'ref_keys': self.ref_keys.to_dict()}
class TableSet(object):
"""
Set of Tables. Used for displaying search results in terminal/ipython notebook.
"""
def __init__(self, tables):
self.pretty_tbl_cols = ["Table", "Columns"]
self.use_schema = False
for tbl in tables:
setattr(self, tbl.name, tbl)
if tbl.schema and not self.use_schema:
self.use_schema = True
self.pretty_tbl_cols.insert(0, "Schema")
self.tables = tables
def __getitem__(self, i):
return self.tables[i]
def _tablify(self):
tbl = PrettyTable(self.pretty_tbl_cols)
for col in self.pretty_tbl_cols:
tbl.align[col] = "l"
for table in self.tables:
column_names = [col.name for col in table._columns]
column_names = ", ".join(column_names)
pretty_column_names = ""
for i in range(0, len(column_names), 80):
pretty_column_names += column_names[i:(i + 80)] + "\n"
pretty_column_names = pretty_column_names.strip()
row_data = [table.name, pretty_column_names]
if self.use_schema:
row_data.insert(0, table.schema)
tbl.add_row(row_data)
return tbl
def __repr__(self):
tbl = str(self._tablify())
return tbl
def _repr_html_(self):
return self._tablify().get_html_string()
def __len__(self):
return len(self.tables)
def to_dict(self):
"""Serialize representation of the tableset for local caching."""
return {'tables': [table.to_dict() for table in self.tables]}
|
1638818
|
import sys
import time
import amanobot
from amanobot.loop import MessageLoop
from amanobot.namedtuple import InlineQueryResultArticle, InputTextMessageContent
def on_inline_query(msg):
def compute():
query_id, from_id, query_string = amanobot.glance(msg, flavor='inline_query')
print('Inline Query:', query_id, from_id, query_string)
articles = [InlineQueryResultArticle(
id='abc',
title=query_string,
input_message_content=InputTextMessageContent(
message_text=query_string
)
)]
return articles
answerer.answer(msg, compute)
def on_chosen_inline_result(msg):
result_id, from_id, query_string = amanobot.glance(msg, flavor='chosen_inline_result')
print ('Chosen Inline Result:', result_id, from_id, query_string)
TOKEN = sys.argv[1] # get token from command-line
bot = amanobot.Bot(TOKEN)
answerer = amanobot.helper.Answerer(bot)
MessageLoop(bot, {'inline_query': on_inline_query,
'chosen_inline_result': on_chosen_inline_result}).run_as_thread()
while 1:
time.sleep(10)
|
1638832
|
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support, classification_report
from seqeval.metrics import (accuracy_score as seqeval_accuracy_score,
classification_report as seqeval_classification_report,
f1_score as seqeval_f1_score,
precision_score as seqeval_precision_score,
recall_score as seqeval_recall_score)
def sk_classification_metrics(pred, pred_labs=False):
result = classification_metrics(pred)
labels = pred.label_ids
preds = pred.predictions if pred_labs else pred.predictions.argmax(-1)
result['classification_report'] = classification_report(labels, preds, digits=4)
return result
def classification_metrics(pred, pred_labs=False):
labels = pred.label_ids
preds = pred.predictions if pred_labs else pred.predictions.argmax(-1)
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, average="macro")
precision_micro, recall_micro, f1_micro, _ = precision_recall_fscore_support(labels, preds, average="micro")
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1_micro': f1_micro,
'precision_micro': precision_micro,
'recall_micro': recall_micro,
'f1_macro': f1_macro,
'precision_macro': precision_macro,
'recall_macro': recall_macro,
'nb_samples': len(labels)
}
def seqeval_classification_metrics(pred):
from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report
labels = pred.label_ids
preds = pred.predictions
precision_macro = precision_score(labels, preds, average='macro')
recall_macro = recall_score(labels, preds, average='macro')
f1_macro = f1_score(labels, preds, average='macro')
precision_micro = precision_score(labels, preds, average='micro')
recall_micro = recall_score(labels, preds, average='micro')
f1_micro = f1_score(labels, preds, average='micro')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1_micro': f1_micro,
'precision_micro': precision_micro,
'recall_micro': recall_micro,
'f1_macro': f1_macro,
'precision_macro': precision_macro,
'recall_macro': recall_macro,
'nb_samples': len(labels),
'classification_report': classification_report(labels, preds, digits=4)
}
def _compute_best_threshold(targets, probs):
f1s = []
for threshold in range(1,100):
preds = (probs > (threshold / 100)).astype(int)
f1s.append((
threshold/100,
f1_score(targets,
preds,
average='binary')
))
f1s_df = pd.DataFrame(f1s).sort_values(1,ascending=False).reset_index(drop=True)
f1s_df.columns = ['threshold_label','f1_label']
return f1s_df.threshold_label[0], f1s_df.f1_label[0]
def _select_best_thresholds(targets, probs, n_labels):
best_thresholds = dict()
for i in range(0, n_labels):
best_thresholds[f'label-{i}'] = _compute_best_threshold(targets[:,i], probs[:,i])
return best_thresholds
def sigmoid(x):
return 1/(1 + np.exp(-x))
def multilabel_classification_metrics(pred, n_labels):
labels = pred.label_ids
logits = pred.predictions
probs = sigmoid(logits)
best_threshold_mapping = _select_best_thresholds(labels, probs, n_labels)
best_thresholds = [ v[0] for k,v in best_threshold_mapping.items() ]
preds = np.array(probs > best_thresholds)
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, average='macro')
precision_micro, recall_micro, f1_micro, _ = precision_recall_fscore_support(labels, preds, average='micro')
acc = accuracy_score(labels, preds)
accuracy_micro = (labels == preds).mean()
return {
'accuracy': acc,
'accuracy_micro': accuracy_micro,
'f1_micro': f1_micro,
'precision_micro': precision_micro,
'recall_micro': recall_micro,
'f1_macro': f1_macro,
'precision_macro': precision_macro,
'recall_macro': recall_macro,
'nb_samples': len(labels)
}
|
1638896
|
from PyInstaller.utils.hooks import collect_submodules
hiddenimports = collect_submodules("brainpy._c")
|
1638937
|
from opentsdb.protocols.http_connect import HttpTSDBConnect
from opentsdb.protocols.telnet_connect import TelnetTSDBConnect
from opentsdb.push_thread import HTTPPushThread, TelnetPushThread
from opentsdb.exceptions import UnknownTSDBConnectProtocol
__all__ = ['HttpTSDBConnect', 'TelnetTSDBConnect', 'TSDBConnectProtocols']
class TSDBConnectProtocols:
HTTP = 'HTTP'
TELNET = 'TELNET'
@classmethod
def get_connect(cls, protocol: str, *args, **kwargs):
if protocol == cls.HTTP:
return HttpTSDBConnect(*args, **kwargs)
elif protocol == cls.TELNET:
return TelnetTSDBConnect(*args, **kwargs)
raise UnknownTSDBConnectProtocol(protocol)
@classmethod
def get_push_thread(cls, protocol, *args, **kwargs):
if protocol == cls.HTTP:
return HTTPPushThread(*args, **kwargs)
elif protocol == cls.TELNET:
return TelnetPushThread(*args, **kwargs)
raise UnknownTSDBConnectProtocol(protocol)
|
1638954
|
def pytest_addoption(parser):
parser.addoption("--exe", action="store", default="py.exe",
help="Path to the py.exe program.")
|
1638971
|
import torchvision
from torchexpo.modules import ImageClassificationModule
def resnext50_32x4d():
"""ResNext-50 32x4d Model pre-trained on ImageNet"""
model = torchvision.models.resnext50_32x4d(pretrained=True)
obj = ImageClassificationModule(model, "ResNext-50 32x4d", model_example="default")
return obj
def resnext101_32x8d():
"""ResNext-101 32x8d Model pre-trained on ImageNet"""
model = torchvision.models.resnext101_32x8d(pretrained=True)
obj = ImageClassificationModule(model, "ResNext-101 32x8d", model_example="default")
return obj
|
1638979
|
import WeiBanAPI
import json
import time # time.sleep延时
import os # 兼容文件系统
import random
tenantCode = '61050002' # 成电ID
def main():
# 显示License
licenseFile = open('.' + os.sep + 'LICENSE', encoding='utf-8')
print(licenseFile.read())
licenseFile.close()
# 登录
# 补打空cookie
cookie = ''
loginResponse = WeiBanAPI.qrLogin()
try:
print('登录成功,userName:' + loginResponse['data']['userName'])
time.sleep(2)
except BaseException:
print('登录失败')
print(loginResponse) # TODO: 这里的loginResponse调用没有考虑网络错误等问题
exit(0)
# 请求解析并打印用户信息
try:
print('请求用户信息')
stuInfoResponse = WeiBanAPI.getStuInfo(loginResponse['data']['userId'],
tenantCode,
cookie)
print('用户信息:' + stuInfoResponse['data']['realName'] + '\n'
+ stuInfoResponse['data']['orgName']
+ stuInfoResponse['data']['specialtyName']
)
time.sleep(2)
except BaseException:
print('解析用户信息失败,将尝试继续运行,请注意运行异常')
# 请求课程完成进度
try:
getProgressResponse = WeiBanAPI.getProgress(loginResponse['data']['preUserProjectId'],
tenantCode,
cookie)
print('课程总数:' + str(getProgressResponse['data']['requiredNum']) + '\n'
+ '完成课程:' +
str(getProgressResponse['data']['requiredFinishedNum']) + '\n'
+ '结束时间' + str(getProgressResponse['data']['endTime']) + '\n'
+ '剩余天数' + str(getProgressResponse['data']['lastDays'])
)
time.sleep(2)
except BaseException:
print('解析课程进度失败,将尝试继续运行,请注意运行异常')
# 请求课程列表
try:
getListCategoryResponse = WeiBanAPI.getListCategory(loginResponse['data']['preUserProjectId'],
'3',
tenantCode,
loginResponse['data']['userId'],
loginResponse['data']['token'])
time.sleep(2)
except BaseException:
print('请求课程列表失败')
print('解析课程列表并发送完成请求')
for Category in getListCategoryResponse['data']:
print('\n----章节码:' + Category['categoryCode'] +
'章节内容:' + Category['categoryName'])
try:
getListCourseResponse = WeiBanAPI.getListCourse(loginResponse['data']['preUserProjectId'],
'3',
Category['categoryCode'],
'',
loginResponse['data']['userId'],
tenantCode,
loginResponse['data']['token'])
time.sleep(2)
except BaseException:
print('请求课程列表失败')
for j in getListCourseResponse['data']:
print('课程内容:' + j['resourceName'] +
'\nuserCourseId:' + j['userCourseId'])
if (j['finished'] == 1):
print('已完成')
else:
print('发送完成请求')
WeiBanAPI.doStudy(
loginResponse['data']['preUserProjectId'], j['resourceId'], tenantCode)
WeiBanAPI.finishCourse(j['userCourseId'], tenantCode, cookie)
delayInt = WeiBanAPI.getRandomTime()
print('\n随机延时' + str(delayInt))
time.sleep(delayInt)
if __name__ == '__main__':
main()
|
1638982
|
from datetime import timedelta
from django.db import models
from timeseries.utils import TimeSeriesModel, TimeSeriesQuerySet
class AdQuerySet(TimeSeriesQuerySet):
def update_rawdata(self, force=False):
return self.update_timeseries(
'rawdata', ad_data_collector, force=force
)
def update_reports(self, force=False):
return self.update_timeseries(
'monthlyreports', report_data_collector, force=force
)
class AdManager(models.Manager.from_queryset(AdQuerySet)):
pass
class Ad(models.Model):
objects = AdManager()
class RawAdData(TimeSeriesModel):
# update daily N.B integers in seconds also work
TIMESERIES_INTERVAL = timedelta(days=1)
NOT_AVAILABLE = -1
ad = models.ForeignKey(Ad, related_name='rawdata')
views = models.BigIntegerField(default=NOT_AVAILABLE)
clicks = models.BigIntegerField(default=NOT_AVAILABLE)
class MonthlyAdReport(TimeSeriesModel):
TIMESERIES_INTERVAL = timedelta(days=28)
ad = models.ForeignKey(Ad, related_name='monthlyreports')
avg_views = models.FloatField()
avg_clicks = models.FloatField()
def fake_data(obj):
return {
'views': obj.id,
'clicks': obj.id,
'ad': obj
}
def fake_report(obj):
return {
'avg_views': obj.id,
'avg_clicks': obj.id,
'ad': obj
}
def ad_data_collector(queryset):
"""
should return an iterable that yields dictionaries of data
needed to successfully create a RawAdData instance
"""
for ad in queryset:
yield fake_data(ad)
def report_data_collector(queryset):
"""
should return an iterable that yields dictionaries of data
needed to successfully create a MonthlyAdReport instance
"""
for ad in queryset:
yield fake_report(ad)
|
1638998
|
import os
import shutil
import sys
import subprocess
from uranium import current_build
from uranium.rules import rule, Once
current_build.packages.install("uranium-plus[vscode]")
import uranium_plus
current_build.config.update(
{
"uranium-plus": {
"module": "transmute_core",
"publish": {"additional_args": ["--release"]},
"test": {"packages": ["mock", "pytest-benchmark", "flask", "tornado"]},
}
}
)
if sys.version_info > (3, 4):
current_build.packages.install("aiohttp")
current_build.packages.install("pytest-aiohttp")
if sys.version_info > (3, 6):
current_build.packages.install("pydantic")
uranium_plus.bootstrap(current_build)
current_build.tasks.prepend("install_swagger_ui", "main")
current_build.tasks.prepend("clean_and_install_swagger_ui", "publish")
def dev_docs(build):
build.packages.install("Babel")
build.packages.install("Sphinx")
build.packages.install("Sphinx-autobuild")
build.packages.install("sphinx_rtd_theme")
build.packages.install("sphinxcontrib-programoutput")
return build.executables.run(
["sphinx-autobuild", "docs", os.path.join("docs", "_build")]
+ build.options.args
)[0]
@rule(Once())
def install_swagger_ui(build):
clean_and_install_swagger_ui(build)
def clean_and_install_swagger_ui(build):
import io
import shutil
import tarfile
version = "3.20.9"
PATH = "https://github.com/swagger-api/swagger-ui/archive/v{0}.tar.gz".format(
version
)
TARGET_PATH = os.path.join(build.root, "transmute_core", "swagger", "static")
EXTRACTED_TOP_LEVEL_DIRNAME = "swagger-ui-{0}".format(version)
build.packages.install("requests")
import requests
r = requests.get(PATH, stream=True)
stream = io.BytesIO()
stream.write(r.content)
stream.seek(0)
tf = tarfile.TarFile.open(fileobj=stream)
if os.path.exists(TARGET_PATH):
shutil.rmtree(TARGET_PATH)
tf.extractall(path=TARGET_PATH)
# move the files under the top level directory.
for name in os.listdir(
os.path.join(TARGET_PATH, EXTRACTED_TOP_LEVEL_DIRNAME, "dist")
):
shutil.move(
os.path.join(TARGET_PATH, EXTRACTED_TOP_LEVEL_DIRNAME, "dist", name),
TARGET_PATH,
)
|
1639043
|
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = datasets.fashion_mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
def compute_loss(logits, labels):
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
def compute_accuracy(logits, labels):
predictions = tf.argmax(logits, axis=1)
return tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32))
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(logits, y)
# compute gradient
grads = tape.gradient(loss, model.trainable_variables)
# update to weights
optimizer.apply_gradients(zip(grads, model.trainable_variables))
accuracy = compute_accuracy(logits, y)
# loss and accuracy is scalar tensor
return loss, accuracy
def train(epoch, model, optimizer):
train_ds = mnist_dataset()
loss = 0.0
accuracy = 0.0
for step, (x, y) in enumerate(train_ds):
loss, accuracy = train_one_step(model, optimizer, x, y)
if step%500==0:
print('epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy())
return loss, accuracy
def main():
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # or any {'0', '1', '2'}
train_dataset = mnist_dataset()
model = keras.Sequential([
layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
layers.Dense(200, activation='relu'),
layers.Dense(200, activation='relu'),
layers.Dense(10)])
optimizer = optimizers.Adam()
for epoch in range(20):
loss, accuracy = train(epoch, model, optimizer)
print('Final epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy())
if __name__ == '__main__':
main()
|
1639044
|
import subprocess
# Designed as a final check to make sure your code matches the provided code.
# Note that a randomized test can be rerun manually from the pa4 directory with
# ./trand seed
# and
# ./mytrand seed
max_seed = 1000
success = 0
failure = 0
failures = []
for i in range(max_seed):
print("Running randomized test {i} of {max}...".format(i=i+1, max=max_seed))
r = subprocess.check_output("./trand {i} | sed 1,2d | sed \\$d".format(i=i), shell=True)
myr = subprocess.check_output("./mytrand {i} | sed 1,2d | sed \\$d".format(i=i), shell=True)
if r == myr:
print("Results are the same! test {i} passed!".format(i=i+1))
success += 1
else:
print("Results are different for seed = {i}!".format(i=i+1))
failure += 1
failures.append(1)
print("\nSuccesses: {s}, Failures: {f}".format(s=success, f=failure))
if failure > 0:
print("Failed on seeds: {seeds}".format(seeds=failures))
|
1639048
|
from django.db.utils import IntegrityError
from django.test import TestCase, skipUnlessDBFeature
from ..models import (
Author, Editor, Post,
TestUniqueNullableModel, TestNullableUniqueTogetherModel,
)
@skipUnlessDBFeature('supports_nullable_unique_constraints')
class TestNullableUniqueColumn(TestCase):
def test_multiple_nulls(self):
# Issue #45 (case 1) - after field `x` has had its type changed, the filtered UNIQUE
# INDEX which is implementing the nullable unique constraint should still be correctly
# in place - i.e. allowing multiple NULLs but still enforcing uniqueness of non-NULLs
# Allowed
TestUniqueNullableModel.objects.create(x=None, test_field='randomness')
TestUniqueNullableModel.objects.create(x=None, test_field='doesntmatter')
# Disallowed
TestUniqueNullableModel.objects.create(x="foo", test_field='irrelevant')
with self.assertRaises(IntegrityError):
TestUniqueNullableModel.objects.create(x="foo", test_field='nonsense')
@skipUnlessDBFeature('supports_partially_nullable_unique_constraints')
class TestPartiallyNullableUniqueTogether(TestCase):
def test_partially_nullable(self):
# Check basic behaviour of `unique_together` where at least 1 of the columns is nullable
# It should be possible to have 2 rows both with NULL `alt_editor`
author = Author.objects.create(name="author")
Post.objects.create(title="foo", author=author)
Post.objects.create(title="foo", author=author)
# But `unique_together` is still enforced for non-NULL values
editor = Editor.objects.create(name="editor")
Post.objects.create(title="foo", author=author, alt_editor=editor)
with self.assertRaises(IntegrityError):
Post.objects.create(title="foo", author=author, alt_editor=editor)
def test_after_type_change(self):
# Issue #45 (case 2) - after one of the fields in the `unique_together` has had its
# type changed in a migration, the constraint should still be correctly enforced
# Multiple rows with a=NULL are considered different
TestNullableUniqueTogetherModel.objects.create(a=None, b='bbb', c='ccc')
TestNullableUniqueTogetherModel.objects.create(a=None, b='bbb', c='ccc')
# Uniqueness still enforced for non-NULL values
TestNullableUniqueTogetherModel.objects.create(a='aaa', b='bbb', c='ccc')
with self.assertRaises(IntegrityError):
TestNullableUniqueTogetherModel.objects.create(a='aaa', b='bbb', c='ccc')
|
1639128
|
import unittest
import numpy
from become_yukarin.dataset import dataset
class TestDataset(unittest.TestCase):
def setUp(self):
self.sample_rate = 24000
self.len_time = len_time = 100
self.fft_size = fft_size = 1024
self.order = order = 59
self.dummy_feature = dataset.AcousticFeature(
f0=numpy.arange(len_time).reshape((len_time, -1)),
spectrogram=numpy.arange(len_time * (fft_size // 2 + 1)).reshape((len_time, -1)),
aperiodicity=numpy.arange(len_time * (fft_size // 2 + 1)).reshape((len_time, -1)),
mfcc=numpy.arange(len_time * (order + 1)).reshape((len_time, -1)),
voiced=(numpy.arange(len_time) % 2 == 1).reshape((len_time, -1)),
)
self.feature_sizes = dataset.AcousticFeature.get_sizes(
sampling_rate=self.sample_rate,
order=self.order,
)
def test_encode_decode_feature(self):
encode_feature = dataset.EncodeFeatureProcess(['mfcc'])
decode_feature = dataset.DecodeFeatureProcess(['mfcc'], self.feature_sizes)
e = encode_feature(self.dummy_feature, test=True)
d = decode_feature(e, test=True)
self.assertTrue(numpy.all(self.dummy_feature.mfcc == d.mfcc))
def test_encode_decode_feature2(self):
encode_feature = dataset.EncodeFeatureProcess(['mfcc', 'f0'])
decode_feature = dataset.DecodeFeatureProcess(['mfcc', 'f0'], self.feature_sizes)
e = encode_feature(self.dummy_feature, test=True)
d = decode_feature(e, test=True)
self.assertTrue(numpy.all(self.dummy_feature.mfcc == d.mfcc))
self.assertTrue(numpy.all(self.dummy_feature.f0 == d.f0))
def test_encode_decode_feature3(self):
encode_feature = dataset.EncodeFeatureProcess(['mfcc', 'f0'])
decode_feature = dataset.DecodeFeatureProcess(['mfcc', 'f0'], self.feature_sizes)
e = encode_feature(self.dummy_feature, test=True)
e[0] = numpy.nan
d = decode_feature(e, test=True)
self.assertFalse(numpy.all(self.dummy_feature.mfcc == d.mfcc))
self.assertTrue(numpy.all(self.dummy_feature.f0 == d.f0))
if __name__ == '__main__':
unittest.main()
|
1639146
|
from binaryninja import Architecture
from .x86 import X86Explainer
from .mips import MipsExplainer
from .aarch64 import AArch64Explainer
from .ual import UALExplainer
from .asm6502 import Asm6502Explainer
from .msp430 import MSP430Explainer
from .powerpc import PowerPCExplainer
from .generic import UnavailableExplainer
def explainer_for_architecture(arch: Architecture):
return {
"x86": X86Explainer,
"x86_64": X86Explainer,
"mips": MipsExplainer,
"aarch64": AArch64Explainer,
"arm": UALExplainer,
"thumb2": UALExplainer,
"6502": Asm6502Explainer,
"msp430": MSP430Explainer,
"powerpc": PowerPCExplainer,
}.get(arch.name, UnavailableExplainer)
|
1639162
|
import tensorflow as tf
from tensorflow.keras.utils import get_custom_objects
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
def spike(x, threshold=1, bias=0, thresholding=0.5,scaling_factor=1,T=255,ext=0,noneloss=False):
#x = tf.math.floordiv(x, v_thr, name=None)
_T = T + ext
bias = tf.math.multiply(bias, _T, name=None)
x = tf.math.add(x, bias, name=None)
x = tf.nn.relu(x)
_t = scaling_factor*threshold
tre = _t*thresholding * (not noneloss)
x = tf.math.add(x,tre, name=None)
x = tf.math.divide(x, _t, name=None)
@tf.custom_gradient
def custom_floor(x):
def grad_fn(dy):
return dy
return tf.floor(x), grad_fn
#x = tf.floor(x,name="Floor")
pred0 = tf.constant(noneloss, dtype=tf.bool)
x = tf.cond(pred0,lambda: x,lambda: custom_floor(x))
pred1 = tf.constant(ext<0, dtype=tf.bool)
x = tf.cond(pred1,lambda: x,lambda: tf.clip_by_value(x,0,_T))
return x
get_custom_objects().update({'spike': Activation(spike)})
class SpikeActivation(Layer):
def __init__(self, timesteps=255, threshold=1,bias=0, thresholding=0.5, scaling_factor=1,
spike_ext=0,noneloss=False,**kwargs):
super(SpikeActivation, self).__init__(**kwargs)
self.spike_ext = int(spike_ext)
self.timesteps = int(timesteps)
self.bias = K.cast_to_floatx(bias)
self.noneloss = noneloss
self.threshold = K.cast_to_floatx(threshold)
self.thresholding = K.cast_to_floatx(thresholding)
self.scaling_factor = K.cast_to_floatx(scaling_factor)
def call(self, inputs):
return spike(inputs,
T=self.timesteps,
threshold=self.threshold,
bias=self.bias,
thresholding=self.thresholding,
ext=self.spike_ext,
noneloss=self.noneloss,
scaling_factor=self.scaling_factor)
def get_config(self):
config = {'timesteps': int(self.timesteps),
'threshold': self.threshold,
'bias': self.bias,
'spike_ext': int(self.spike_ext),
'thresholding': self.thresholding,
'noneloss': self.noneloss,
'scaling_factor': self.scaling_factor}
base_config = super(SpikeActivation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
|
1639186
|
from django.conf.urls import url
from tickets.views import (
edit_ticket,
index,
view_ticket,
delete_attachment,
delete_comment,
delete_ticket,
ticket_status,
edit_comment,
ticket_comment,
admin_tickets_list,
admin_ticket_view,
new_ticket,
)
app_name = "tickets"
urlpatterns = [
url(r"^$", index, name="index"),
url(
r"ticket/edit/(?P<ticket_id>[a-zA-Z0-9_-]+)/$", edit_ticket, name="edit_ticket"
),
url(
r"ticket/view/(?P<ticket_id>[a-zA-Z0-9_-]+)/$", view_ticket, name="view_ticket"
),
url(
r"attachment/delete/(?P<attachment_id>[a-zA-Z0-9_-]+)/$",
delete_attachment,
name="delete_attachment",
),
url(
r"comment/delete/(?P<comment_id>[a-zA-Z0-9_-]+)/$",
delete_comment,
name="delete_comment",
),
url(
r"ticket/delete/(?P<ticket_id>[a-zA-Z0-9_-]+)/$",
delete_ticket,
name="delete_ticket",
),
url(r"status/(?P<ticket_id>[a-zA-Z0-9_-]+)/$", ticket_status, name="ticket_status"),
url(r"comment/edit/$", edit_comment, name="edit_comment"),
url(
r"comment/(?P<ticket_id>[a-zA-Z0-9_-]+)/$",
ticket_comment,
name="ticket_comment",
),
url(r"ticket/list/$", admin_tickets_list, name="admin_tickets_list"),
url(
r"dashboard/ticket-view/(?P<ticket_id>[a-zA-Z0-9_-]+)/$",
admin_ticket_view,
name="admin_ticket_view",
),
url(r"ticket/new/$", new_ticket, name="new_ticket"),
]
|
1639194
|
import backend_path
import unittest
import datetime
import numpy as np
from backend.models import config
class ConfigTest(unittest.TestCase):
def test_get_agency(self):
muni = config.get_agency('muni')
self.assertEqual(muni.id, 'muni')
self.assertEqual(muni.provider, 'nextbus')
self.assertEqual(muni.timezone_id, 'America/Los_Angeles')
self.assertEqual(muni.tz.zone, 'America/Los_Angeles')
self.assertEqual(muni.nextbus_id, 'sf-muni')
portland_sc = config.get_agency('portland-sc')
self.assertEqual(portland_sc.id, 'portland-sc')
self.assertEqual(portland_sc.gtfs_agency_id, 'PSC')
test = config.get_agency('test')
self.assertEqual(test.id, 'test')
with self.assertRaises(FileNotFoundError):
config.get_agency('invalid')
if __name__ == '__main__':
unittest.main()
|
1639203
|
import os
import os.path
from os import listdir
from os.path import join, isdir
import cv2
import numpy as np
from images_utils import get_images_recursively
def imread(path):
img_bgr = cv2.imread(path)
img_rgb = img_bgr[..., ::-1]
# img_hsl = convert_to_hsl(img_rgb)
return img_rgb.astype(np.float)
def normalize_rgb(image):
return np.array(image) / 127.5 - 1.
def is_normalized(image):
for x, row in enumerate(image):
for y, p in enumerate(row):
if -1 > p[0] > 1 and -1 > p[1] > 1 and -1 > p[2] > 1:
return False
return True
def create_folder_is_not_exists(folder):
if not os.path.exists(folder):
os.mkdir(folder)
def convert_to_files(data_set):
images_paths = get_images_recursively('data/' + data_set)
data_set_rgb = data_set + '-rgb'
# data_set_hsl = data_set + '-hsl'
if not os.path.exists('data/' + data_set_rgb):
os.mkdir('data/' + data_set_rgb)
count = 0
for image_path in images_paths:
print('converting {} to RGB'.format(image_path))
rgb_normalized = normalize_rgb(imread(image_path))
np.save('data/' + data_set_rgb + '/' + str(count) + '.npy', rgb_normalized)
count += 1
# if not os.path.exists('data/' + data_set_hsl):
# os.mkdir('data/' + data_set_hsl)
# count = 0
# for image_path in images_paths:
# print('converting {} to HSL'.format(image_path))
# hsl = convert_to_hsl(imread(image_path))
# np.save('data/' + data_set_hsl + '/' + str(count) + '.npy', hsl)
# count += 1
folders = [f for f in listdir('data') if isdir(join('data', f))]
for data_set in folders:
if not data_set.endswith('-rgb') and not data_set.endswith('-hsl'):
convert_to_files(data_set)
|
1639210
|
import sys
import os
import zipfile
import sysconfig
import shutil
import tempfile
import imp
import warnings
class ModuleImporter(object):
def __init__(self):
if not zipfile.is_zipfile(sys.executable):
self.exe_zip = None
return
self.exe_zip = zipfile.ZipFile(sys.executable, 'r')
self.exe_names = self.exe_zip.namelist()
self.ext_suffix = sysconfig.get_config_var('SO')
def find_spec(self, fullname, path, target=None):
if self.exe_zip is None:
return
from importlib.machinery import ModuleSpec
path = self._get_path_in_zip(fullname)
if path is not None:
return ModuleSpec(fullname, self, origin=path)
def find_module(self, fullname, path=None):
if self.exe_zip is None:
return
path = self._get_path_in_zip(fullname)
return self if path else None
def load_module(self, fullname):
if self.exe_zip is None:
return
if fullname in sys.modules:
return sys.modules[fullname]
if sys.version_info[0] == 3:
spec = self.find_spec(fullname, None)
assert spec is not None
path = spec.origin
else:
path = self._get_path_in_zip(fullname)
assert path is not None
so_file = os.path.basename(path)
tmpdir = tempfile.mkdtemp()
tmp_path = os.path.join(tmpdir, so_file)
try:
self._extract_so_file(path, tmp_path)
self._handle_rpath(path, tmp_path)
if sys.version_info[0] == 2:
name = fullname.split('.')[-1]
mod = imp.load_dynamic(name, tmp_path)
else:
from importlib.machinery import ExtensionFileLoader
loader = ExtensionFileLoader(fullname, tmp_path)
spec.origin = tmp_path
mod = loader.create_module(spec)
sys.modules[fullname] = mod
return mod
finally:
try:
shutil.rmtree(tmpdir)
except OSError:
pass
def _extract_so_file(self, src, dst):
with self.exe_zip.open(src) as src:
with open(dst, 'wb') as dst:
shutil.copyfileobj(src, dst)
os.fchmod(dst.fileno(), 0o700)
def _get_path_in_zip(self, fullname):
path = '{}{}'.format(fullname.replace('.', '/'), self.ext_suffix)
return path if path in self.exe_names else None
def _handle_rpath(self, zip_path, solib_path, cur_rpath=None):
if sys.platform not in ('linux', 'linux2'):
return
try:
import _exxo_elf
except ImportError:
# for unit tests
from . import _exxo_elf
dst_dir = os.path.dirname(solib_path)
elf = _exxo_elf.readelf(solib_path)
dyntab = elf['dynamic']
rpath = dyntab.get(_exxo_elf.DT_RPATH, [])
if not rpath:
if cur_rpath is None:
return
# if RPATH was specified as a param, we go with this one,
# even if given solib has no RPATH section - this allows
# handling dependecies recursively
rpath = cur_rpath
else:
rpath = rpath[0].decode()
# replace current RPATH with $ORIGIN and copy referenced
# libraries to the same directory as extension module solib
new_rpath = '$ORIGIN'
# TODO: if RPATH is shorter than $ORIGIN all we do is hope
# RPATH is not needed at all
if len(rpath) < len(new_rpath):
warnings.warn("can't overwrite RPATH {} with {}".format(rpath, new_rpath))
return
with open(solib_path, 'rb+') as fp:
fp.seek(elf['rpath_offset'], os.SEEK_SET)
fp.write(new_rpath.encode() + b'\0')
# extract dependencies from zip, if any. put them in the same
# temporary directory
origin = os.path.dirname(zip_path)
rpath = os.path.normpath(rpath.replace('$ORIGIN', origin))
for lib in dyntab.get(_exxo_elf.DT_NEEDED, []):
lib = lib.decode()
path = os.path.normpath('{}/{}'.format(rpath, lib))
if path in self.exe_names:
dst = os.path.join(dst_dir, lib)
self._extract_so_file(path, dst)
# extract dependecies recursively
self._handle_rpath(path, dst, cur_rpath=rpath)
exxo_importer = ModuleImporter()
|
1639221
|
import ipranges
import logging as log
logger = log.getLogger(__name__)
class AddressGroup:
content = list()
def __init__(self, addrGroup):
self.content = list()
if not isinstance(addrGroup, dict):
raise Exception("AddressGroup must be a dictionary")
if "file" in addrGroup:
with open(addrGroup["file"], 'r') as f:
for line in f:
line = line.strip('\n')
if not line:
continue
try:
self.content.append(ipranges.from_str(line))
except ValueError as e:
logger.error("IP address ({0}) could not be parsed.".format(line))
continue
elif "list" in addrGroup:
for ip in addrGroup["list"]:
try:
self.content.append(ipranges.from_str(ip))
except ValueError as e:
logger.error("IP address ({0}) could not be parsed.".format(ip))
continue
else:
raise Exception("Only 'file' or 'list' keys are supported.")
self.id = addrGroup["id"]
def __str__(self):
return "ID: '" + self.id + "' IPs: " + str(self.content)
def iplist(self):
return repr([str(i) for i in self.content])
def isPresent(self, ip):
"""
Return True if `ip` is in the addressgroup.
This method may raise ValueError if the `ip` is not a valid IPv4/6 address or subnet.
:param str ip: IPv4 or IPv6 address or subnet
:return: True if `ip` is present, False otherwise.
"""
ip = ipranges.from_str(ip)
return any((ip in rng) for rng in self.content)
|
1639238
|
from collections import defaultdict
from typing import Dict, Sequence, Tuple
import numpy as np
from ..dataset import MotionPredictionDataset
from ..proto import (ObjectPrediction, Submission, Trajectory, Vector3,
WeightedTrajectory)
from ..utils.map import repeated_points_to_array
from .metrics import (avg_ade, avg_fde, corrected_negative_log_likelihood,
log_likelihood, min_ade, min_fde, top1_ade, top1_fde,
weighted_ade, weighted_fde)
MAX_NUM_MODES = 25
def save_submission_proto(filepath: str, submission: Submission) -> None:
"""Save serialized submission protobuf to file.
Args:
filepath (str): Path to output file.
submission (Submission): Submission proto to save.
"""
with open(filepath, 'wb') as fout:
fout.write(submission.SerializeToString())
def load_submission_proto(filepath: str) -> Submission:
"""Load and deserialized submission proto from file.
Args:
filepath (str): File with serialized protobuf message.
Returns:
Submission: Deserialized message.
"""
with open(filepath, 'rb') as fin:
serialized = fin.read()
submission = Submission()
submission.ParseFromString(serialized)
return submission
def evaluate_submission_with_proto(
submission: Submission,
ground_truth: Submission,
) -> Dict[str, float]:
"""Calculates various motion prediction metrics given
the submission and ground truth protobuf messages.
Args:
submission (Submission): Proto message with predicted trajectories.
ground_truth (Submission): Proto message with ground truth trajectories.
Raises:
ValueError:
Number of objects in submission is not equal to number of objects in ground truth.
ValueError:
Objects order in submission violates objects order in ground truth.
Returns:
Dict[str, float]: Mapping from metric name to its aggregated value.
"""
_check_submission_and_ground_truth(submission, ground_truth)
metrics = defaultdict(list)
gt_map = {
(prediction.scene_id, prediction.track_id): prediction
for prediction in ground_truth.predictions
}
for i in range(len(submission.predictions)):
pred = submission.predictions[i]
gt = gt_map[(pred.scene_id, pred.track_id)]
if pred.scene_id != gt.scene_id:
raise ValueError(f'Check scenes order: {pred.scene_id} != {gt.scene_id}')
if pred.track_id != gt.track_id:
raise ValueError(f'Check objects order: {pred.track_id} != {gt.track_id}')
pred_trajectories, weights = get_trajectories_weights_arrays(pred.weighted_trajectories)
pred_trajectories = pred_trajectories[np.argsort(weights)][-MAX_NUM_MODES:]
weights = weights[np.argsort(weights)][-MAX_NUM_MODES:]
gt_trajectory, _ = get_trajectories_weights_arrays(gt.weighted_trajectories)
gt_trajectory = gt_trajectory[0] # Reduce modes dim
metrics['avg_ade'].append(avg_ade(gt_trajectory, pred_trajectories))
metrics['avg_fde'].append(avg_fde(gt_trajectory, pred_trajectories))
metrics['min_ade'].append(min_ade(gt_trajectory, pred_trajectories))
metrics['min_fde'].append(min_fde(gt_trajectory, pred_trajectories))
metrics['top1_ade'].append(top1_ade(gt_trajectory, pred_trajectories, weights))
metrics['top1_fde'].append(top1_fde(gt_trajectory, pred_trajectories, weights))
metrics['weighted_ade'].append(weighted_ade(gt_trajectory, pred_trajectories, weights))
metrics['weighted_fde'].append(weighted_fde(gt_trajectory, pred_trajectories, weights))
metrics['log_likelihood'].append(log_likelihood(gt_trajectory, pred_trajectories, weights))
metrics['corrected_nll'].append(
corrected_negative_log_likelihood(gt_trajectory, pred_trajectories, weights))
metrics['is_ood'].append(gt.is_ood)
return metrics
def get_trajectories_weights_arrays(
trajectories: Sequence[WeightedTrajectory],
) -> Tuple[np.ndarray, np.ndarray]:
"""Return numpy array of trajectories and respective weights
given the sequence of WeightedTrajectory protobuf messages.
Args:
trajectories (Sequence[WeightedTrajectory]): sequence of protobuf messsages
to extract array from
Returns:
Tuple[np.ndarray, np.ndarray]: trajectories of shape (n_modes, prediction_horizon, 2) and
respective weights of shape (n_modes,)
"""
n_modes = len(trajectories)
prediction_horizon = get_prediction_horizon(trajectories)
trajectories_array = np.empty((n_modes, prediction_horizon, 2))
weights = np.empty(n_modes)
for i, weighted_trajectory in enumerate(trajectories):
trajectories_array[i] = repeated_points_to_array(weighted_trajectory.trajectory)
weights[i] = weighted_trajectory.weight
return trajectories_array, weights
def ground_truth_from_dataset(dataset: MotionPredictionDataset) -> Submission:
"""Generates a Submission protobuf instance with ground truth trajectories.
Args:
dataset (MotionPredictionDataset): Dataset to get trajectories from.
Returns:
Submission: Resulting protobuf message.
"""
dataset_iter = iter(dataset)
ground_truth = Submission()
for data_item in dataset_iter:
pred = ObjectPrediction()
pred.track_id = data_item['track_id']
pred.scene_id = data_item['scene_id']
pred.weighted_trajectories.append(WeightedTrajectory(
trajectory=trajectory_array_to_proto(data_item['ground_truth_trajectory']),
weight=1.0,
))
ground_truth.predictions.append(pred)
return ground_truth
def trajectory_array_to_proto(trajectory: np.ndarray) -> Trajectory:
"""Transforms a numpy array with 2D trajectory to Trajectory proto message.
Args:
trajectory (np.ndarray): Trajectory array, shape (N, 2)
Returns:
Trajectory: Resulting protobuf messsage.
"""
assert len(trajectory.shape) == 2
trajectory_proto = Trajectory()
for i in range(trajectory.shape[0]):
trajectory_proto.points.append(Vector3(x=trajectory[i, 0], y=trajectory[i, 1]))
return trajectory_proto
def get_prediction_horizon(trajectories: Sequence[WeightedTrajectory]) -> int:
"""Returns a common number of timestamps for trajectories.
Args:
trajectories (Sequence[WeightedTrajectory]): sequence of weighted trajectoies.
Raises:
ValueError: If any trajectory has deviating number of timestamps.
Returns:
int: A number of timestamps.
"""
horizon = len(trajectories[0].trajectory.points)
if not all(len(w.trajectory.points) == horizon for w in trajectories):
raise ValueError('All modes must have the same prediction horizon')
return horizon
def object_prediction_from_model_output(
track_id: int,
scene_id: str,
model_output: Dict[str, np.ndarray],
is_ood: bool,
) -> ObjectPrediction:
"""Generates an instance of ObjectPrediction proto from scene data and model predictions.
Args:
track_id (int): prediction request id
scene_id (str): unique scene id
model_output (Dict[str, np.ndarray]): model predictions stored in dict:
trajectories with associated weights and scene-level prediction confidence.
is_ood (bool): whether the sample is out of domain or not.
Returns:
ObjectPrediction: resulting message instance with fields set.
"""
object_prediction = ObjectPrediction()
object_prediction.track_id = track_id
object_prediction.scene_id = scene_id
object_prediction.is_ood = is_ood
n_trajectories = len(model_output['predictions_list'])
n_weights = len(model_output['plan_confidence_scores_list'])
if n_trajectories != n_weights:
raise ValueError(f'Number of predicted trajectories is not equal to number of weights:'
f'{n_trajectories} != {n_weights}')
for i in range(len(model_output['predictions_list'])):
weighted_trajectory = WeightedTrajectory(
trajectory=trajectory_array_to_proto(model_output['predictions_list'][i]),
weight=model_output['plan_confidence_scores_list'][i],
)
object_prediction.weighted_trajectories.append(weighted_trajectory)
object_prediction.uncertainty_measure = model_output['pred_request_uncertainty_measure']
return object_prediction
def _check_submission_and_ground_truth(
submission: Submission,
ground_truth: Submission,
) -> None:
if len(submission.predictions) != len(ground_truth.predictions):
raise ValueError(f'Check number of submitted predictions:'
f'{len(submission.predictions)} != {len(ground_truth.predictions)}')
submission_keys = {(op.scene_id, op.track_id) for op in submission.predictions}
gt_keys = {(op.scene_id, op.track_id) for op in ground_truth.predictions}
if len(submission_keys) != len(submission.predictions):
raise ValueError('Submission has duplicate keys.')
if len(gt_keys) != len(ground_truth.predictions):
raise ValueError('Ground truth has duplicate keys.')
if submission_keys != gt_keys:
raise ValueError('Submission and ground truth keys are not identical sets.')
|
1639249
|
import os
import sys
sys.path.insert(0, os.path.realpath(os.path.join(__file__, '../../')))
for p in os.environ.get('PYTHONPATH', '').split(';'):
sys.path.append(p)
from asserts import *
from ckstyle.doCssFix import doFix
import ckstyle.command.args as args
defaultConfig = args.CommandArgs()
def getFixed(css, name):
fixer, msg = doFix(css, '')
ruleSet = fixer.getStyleSheet().getRuleSets()[0]
rule = ruleSet.getRuleByName(name)
return rule.fixedValue
|
1639291
|
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch.distributions import MultivariateNormal
def sample_digits_maf(model, epoch, random_order=False, seed=None, test=False):
model.eval()
n_samples = 80
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
if random_order is True:
np.random.seed(seed)
order = np.random.permutation(784)
else:
order = np.arange(784)
u = torch.zeros(n_samples, 784).normal_(0, 1)
mvn = MultivariateNormal(torch.zeros(28 * 28), torch.eye(28 * 28))
log_prob = mvn.log_prob(u)
samples, log_det = model.backward(u)
# log_det = log_prob - log_det
# log_det = log_det[np.logical_not(np.isnan(log_det.detach().numpy()))]
# idx = np.argsort(log_det.detach().numpy())
# samples = samples[idx].flip(dims=(0,))
# samples = samples[80 : 80 + n_samples]
samples = (torch.sigmoid(samples) - 1e-6) / (1 - 2e-6)
samples = samples.detach().cpu().view(n_samples, 28, 28)
fig, axes = plt.subplots(ncols=10, nrows=8)
ax = axes.ravel()
for i in range(n_samples):
ax[i].imshow(
np.transpose(samples[i], (0, 1)), cmap="gray", interpolation="none"
)
ax[i].axis("off")
ax[i].set_xticklabels([])
ax[i].set_yticklabels([])
ax[i].set_frame_on(False)
if not os.path.exists("gif_results"):
os.makedirs("gif_results")
if test is False:
save_path = "gif_results/samples_gaussian_" + str(epoch) + ".png"
else:
save_path = "figs/samples_gaussian_" + str(epoch) + ".png"
fig.subplots_adjust(wspace=-0.35, hspace=0.065)
plt.gca().set_axis_off()
plt.savefig(
save_path, dpi=300, bbox_inches="tight", pad_inches=0,
)
plt.close()
def plot_losses(epochs, train_losses, val_losses, title=None):
sns.set(style="white")
fig, axes = plt.subplots(
ncols=1, nrows=1, figsize=[10, 5], sharey=True, sharex=True, dpi=400
)
train = pd.Series(train_losses).astype(float)
val = pd.Series(val_losses).astype(float)
train.index += 1
val.index += 1
axes = sns.lineplot(data=train, color="gray", label="Training loss")
axes = sns.lineplot(data=val, color="orange", label="Validation loss")
axes.set_ylabel("Negative log-likelihood")
axes.legend(
frameon=False,
prop={"size": 14},
fancybox=False,
handletextpad=0.5,
handlelength=1,
)
axes.set_ylim(1250, 1600)
axes.set_xlim(0, 50)
axes.set_title(title) if title is not None else axes.set_title(None)
if not os.path.exists("plots"):
os.makedirs("plots")
save_path = "plots/train_plots" + str(epochs[-1]) + ".pdf"
plt.savefig(
save_path, dpi=300, bbox_inches="tight", pad_inches=0,
)
plt.close()
|
1639333
|
import pkg_resources
try:
__version__ = pkg_resources.get_distribution(__name__).version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
import logging
logging.addLevelName(5, "TRACE")
logging.TRACE = 5
logging.Logger.trace = lambda self, msg, *args, **kwargs: \
self.log(logging.TRACE, msg, *args, **kwargs)
|
1639444
|
import yaml
import os
'''
This function is to load the yaml file used for workflows.
How to use this function?
By default getyaml.readyaml() will load data.yaml data.
But if teh workflow reffering to a different yaml file in sampledata folder, call the function like
getyaml.readyaml('data_workflowname.yaml')
If the workflow requires more data try to have a different yaml file under sampledata folder. format: "data_workflowname.yaml"
'''
def readyaml(filename='data.yaml'):
filename = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'sampledata',filename))
with open(filename, 'r') as ymlfile:
data = yaml.load(ymlfile)
return data
|
1639450
|
import re
def camel_to_snake(s: str) -> str:
underscored = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", s)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", underscored).lower()
|
1639463
|
import os
from IPython.core.magic import register_line_magic
os.system('wget -qO tldr https://github.com/dbrgn/tealdeer/releases/download/v1.3.0/tldr-linux-x86_64-musl')
os.system('chmod +x tldr')
os.system('mv tldr /usr/local/bin')
os.system('tldr --update') # need once
@register_line_magic
def tldr(line):
get_ipython().system('tldr '+line)
|
1639530
|
from django.conf.urls import url
from django.urls import path
from . import views
from .forms import LoginForm
app_name = "accounts"
urlpatterns = [url(r'^login/$',
views.LoginView.as_view(success_url='/'),
name='login',
kwargs={'authentication_form': LoginForm}),
url(r'^register/$',
views.RegisterView.as_view(success_url="/"),
name='register'),
url(r'^logout/$',
views.LogoutView.as_view(),
name='logout'),
path(r'account/result.html',
views.account_result,
name='result'),
url(r'^forget_password/$',
views.ForgetPasswordView.as_view(),
name='forget_password'),
url(r'^forget_password_code/$',
views.ForgetPasswordEmailCode.as_view(),
name='forget_password_code'),
]
|
1639534
|
from setuptools import setup, find_packages
version = '0.1.0'
install_requires = [
'pyqt5',
'matplotlib',
]
setup(name='chainer_wing',
version=version,
description='ChainerWing -- GUI Deep Learning IDE.',
keywords='Deep Learning',
author='fukatani',
license="BSD 3-Clause",
packages=find_packages(),
package_data={'chainer_wing': ['resources/*', ], },
install_requires=install_requires,
)
|
1639544
|
import numpy as np
from typing import Callable, Iterable
def custom_scheduler(
max_steps: int,
update_fn: Callable[[int], float]) -> float:
"""
Create a custom generator for an input param
"""
for step in range(max_steps):
yield update_fn(step)
def get_custom_exp(
max_steps: int,
start_val: float,
end_val: float) -> Iterable:
"""
Create a custom exponential scheduler
"""
assert isinstance(max_steps, int) and max_steps >= 1
N0 = start_val
N1 = np.log(start_val/end_val)/(max_steps-1)
update_fn = lambda x: N0 * np.exp(N1 * x)
return custom_scheduler(max_steps, update_fn)
def get_custom_linear(
max_steps: int,
start_val: float,
end_val: float) -> Iterable:
"""
Create a custom linear scheduler
"""
assert isinstance(max_steps, int) and max_steps >= 1
N1 = (end_val-start_val)/(max_steps-1)
update_fn = lambda x: N1 * x + start_val
return custom_scheduler(max_steps, update_fn)
|
1639582
|
import bpy, bmesh
import socket
from struct import unpack
import numpy
from .common import PROTOCOL_VERSION, send_protobuf, receive_protobuf, receive_buffer, receive_into_numpy_array
from .connection import Connection
from .messages_pb2 import ClientMessage, HelloResult, QueryBoundResult, ServerStateResult
# XXX if this operator gets called during rendering, then what? :)
class OSPRayUpdateMeshBound(bpy.types.Operator):
"""Update bounding geometry with bound provided by plugin"""
bl_idname = "ospray.update_mesh_bound"
bl_label = "Update bounding mesh from server"
bl_options = {'REGISTER'}#, 'UNDO'} # Enable undo for the operator?
def execute(self, context):
obj = context.active_object
assert obj.type == 'MESH'
mesh = obj.data
if obj.mode == 'EDIT':
self.report({'ERROR'}, 'Mesh should be in object mode')
return {'CANCELLED'}
scene = context.scene
ospray = scene.ospray
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.connect((ospray.host, ospray.port))
# Handshake
client_message = ClientMessage()
client_message.type = ClientMessage.HELLO
client_message.uint_value = PROTOCOL_VERSION
send_protobuf(sock, client_message)
result = HelloResult()
receive_protobuf(sock, result)
if not result.success:
print('ERROR: Handshake with server:')
print(result.message)
self.report({'ERROR'}, 'Handshake with server failed: %s' % result.message)
return {'CANCELLED'}
# Volume data (i.e. mesh)
print('Getting extent for mesh %s (ospray volume)' % mesh.name)
# Send request
client_message = ClientMessage()
client_message.type = ClientMessage.QUERY_BOUND
client_message.string_value = mesh.name
send_protobuf(sock, client_message)
# Get result
result = QueryBoundResult()
receive_protobuf(sock, result)
if not result.success:
print('ERROR: extent query failed:')
print(result.message)
self.report({'ERROR'}, 'Query failed: %s' % result.message)
return {'CANCELLED'}
# Receive actual geometry
# Lengths are for the complete vector, not the number of higher
# level elements
vertices_len, edges_len, faces_len, loop_len = unpack('<IIII', receive_buffer(sock, 4*4))
vertices = numpy.empty(vertices_len, dtype=numpy.float32)
edges = numpy.empty(edges_len, dtype=numpy.uint32)
faces = numpy.empty(faces_len, dtype=numpy.uint32)
loop_start = numpy.empty(loop_len, dtype=numpy.uint32)
loop_total = numpy.empty(loop_len, dtype=numpy.uint32)
print('Mesh bound: %d v, %d e, %d f, %d l' % (vertices_len, edges_len, faces_len, loop_len))
receive_into_numpy_array(sock, vertices, vertices_len*4)
receive_into_numpy_array(sock, edges, edges_len*4)
receive_into_numpy_array(sock, faces, faces_len*4)
receive_into_numpy_array(sock, loop_start, loop_len*4)
receive_into_numpy_array(sock, loop_total, loop_len*4)
#print(vertices)
#print(edges)
#print(faces)
#print(loop_start)
#print(loop_total)
# Bye
client_message.type = ClientMessage.BYE
send_protobuf(sock, client_message)
sock.close()
# XXX use new mesh replace from 2.81 when it becomes available
bm = bmesh.new()
verts = []
for x, y, z in vertices.reshape((-1,3)):
verts.append(bm.verts.new((x, y, z)))
for i, j in edges.reshape((-1,2)):
bm.edges.new((verts[i], verts[j]))
for start, total in zip(loop_start, loop_total):
vv = []
for i in range(total):
vi = faces[start+i]
vv.append(verts[vi])
bm.faces.new(vv)
bm.to_mesh(mesh)
mesh.update()
return {'FINISHED'}
class OSPRayGetServerState(bpy.types.Operator):
"""Retrieve server state and store in text editor block"""
bl_idname = "ospray.get_server_state"
bl_label = "Get server state"
bl_options = {'REGISTER'}
def execute(self, context):
scene = context.scene
ospray = scene.ospray
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.connect((ospray.host, ospray.port))
# Handshake
client_message = ClientMessage()
client_message.type = ClientMessage.HELLO
client_message.uint_value = PROTOCOL_VERSION
send_protobuf(sock, client_message)
result = HelloResult()
receive_protobuf(sock, result)
if not result.success:
print('ERROR: Handshake with server:')
print(result.message)
self.report({'ERROR'}, 'Handshake with server failed: %s' % result.message)
return {'CANCELLED'}
# Send request
print('Getting server state')
client_message = ClientMessage()
client_message.type = ClientMessage.GET_SERVER_STATE
send_protobuf(sock, client_message)
# Get result
result = ServerStateResult()
receive_protobuf(sock, result)
# Bye
client_message.type = ClientMessage.BYE
send_protobuf(sock, client_message)
sock.close()
# Set in text
text = bpy.data.texts.new('BLOSPRAY server report')
text.write(result.state)
text.current_line_index = 0
return {'FINISHED'}
classes = (
OSPRayUpdateMeshBound,
OSPRayGetServerState
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
def unregister():
from bpy.utils import unregister_class
for cls in classes:
unregister_class(cls)
|
1639588
|
from auth.reddit_auth import *
from trade_client import *
from store_order import *
from datetime import datetime, time
import time
import json
import os.path
import nltk
nltk.download()
from nltk.sentiment import SentimentIntensityAnalyzer
reddit = load_creds('auth/auth.yml')
config = load_config('config.yml')
keywords = load_keywords('keywords.yml')
print(f'logged in as {reddit.user.me()}')
def get_post():
"""
Returns relevant posts based the user configuration
"""
posts = {}
for sub in config['SUBREDDITS']:
subreddit = reddit.subreddit(sub)
relevant_posts = getattr(subreddit, config['SORT_BY'])(limit=config['NUMBER_OF_POSTS'])
for post in relevant_posts:
if not post.stickied:
posts[post.id] = {"title": post.title,
"subreddit": sub,
"body": post.selftext,
}
return posts
def store_posts(data):
"""
Stores relevant posts and associated data in a local json file
"""
with open('reddit_posts.json', 'w') as file:
json.dump(data, file)
def load_posts(file):
"""
Loads saved reddit posts
"""
with open(file, 'r') as f:
return json.load(f)
def compare_posts(fetched, stored):
"""
Checks if there are new posts
"""
i=0
for post in fetched:
if not fetched[post] in [stored[item] for item in stored]:
i+=1
return i
def find_keywords(posts, keywords):
"""
Checks if there are any keywords int he posts we pulled
Bit of a mess but it works
"""
key_posts = {}
for post in posts:
for key in keywords:
for item in keywords[key]:
if item in posts[post]['title'] or item in posts[post]['body']:
key_posts[post] = posts[post]
key_posts[post]['coin'] = key
return key_posts
def analyse_posts(posts):
"""
analyses the sentiment of each post with a keyword
"""
sia = SentimentIntensityAnalyzer()
sentiment = {}
for post in posts:
if posts[post]['coin'] not in sentiment:
sentiment[posts[post]['coin']] = []
sentiment[posts[post]['coin']].append(sia.polarity_scores(posts[post]['title']))
sentiment[posts[post]['coin']].append(sia.polarity_scores(posts[post]['body']))
return sentiment
def get_avg_sentiment(sentiment):
"""
Compiles and returnes the average sentiment
of all titles and bodies of our query
"""
average = {}
for coin in sentiment:
# sum up all compound readings from each title & body associated with the
# coin we detected in keywords
average[coin] = sum([item['compound'] for item in sentiment[coin]])
# get the mean compound sentiment if it's not 0
if average[coin] != 0:
average[coin] = average[coin] / len(sentiment[coin])
return average
def get_price(coin, pairing):
return client.get_ticker(symbol=coin+pairing)['lastPrice']
if __name__ == '__main__':
i = 0
while True:
i +=1
print(f'iteration {i}')
# get the posts from reddit
posts = get_post()
# check if the order file exists and load the current orders
if os.path.isfile('order.json'):
order = load_order('order.json')
else:
order = {}
# check if the reddit posts files exist and load them
if os.path.isfile('reddit_posts.json'):
saved_posts = load_posts('reddit_posts.json')
# this will return the number of new posts we found on reddit
# compared to the ones stored
new_posts = compare_posts(posts, saved_posts)
if new_posts > 0 or i == 2:
print("New posts detected, fetching new posts...")
# store the posts if they are new
store_posts(posts)
# find posts with matching keywords
key_posts = find_keywords(posts, keywords)
# determine the sentiment for each post
sentiment = analyse_posts(key_posts)
# return the compoundavg sentiment, grouped by symbol
analyzed_coins = get_avg_sentiment(sentiment)
print(f'Found matching keywords with the following sentiments: {analyzed_coins}')
for coin in analyzed_coins:
# prepare to buy if the sentiment of each coin is greater than 0
# and the coin hasn't been bought already
if analyzed_coins[coin] > 0 and coin not in order:
print(f'{coin} sentiment is positive: {analyzed_coins[coin]}, preparing to buy...')
price = get_price(coin, config['TRADE_OPTIONS']['PAIRING'])
volume = convert_volume(coin+config['TRADE_OPTIONS']['PAIRING'], config['TRADE_OPTIONS']['QUANTITY'],price)
try:
# Run a test trade if true
if config['TRADE_OPTIONS']['TEST']:
order[coin] = {
'symbol':coin+config['TRADE_OPTIONS']['PAIRING'],
'price':price,
'volume':volume,
'time':datetime.timestamp(datetime.now())
}
print('PLACING TEST ORDER')
else:
order[coin] = create_order(coin+config['TRADE_OPTIONS']['PAIRING'], volume)
except Exception as e:
print(e)
else:
print(f'Order created with {volume} on {coin}')
store_order('order.json', order)
else:
print(f'Sentiment for {coin} is negative or {coin} is currently in portfolio')
time.sleep(config['TRADE_OPTIONS']['RUN_EVERY']*60)
else:
print("Running first iteration, fetching posts...")
store_posts(posts)
|
1639619
|
import strawberry
import strawberry_django
from .models import User
from .types import Fruit
from . import utils
def test_type_instance():
@strawberry_django.type(User, fields=['id', 'name'])
class UserType:
pass
user = UserType(1, 'user')
assert user.id == 1
assert user.name == 'user'
def test_input_instance():
@strawberry_django.input(User, fields=['id', 'name'])
class InputType:
pass
user = InputType(1, 'user')
assert user.id == 1
assert user.name == 'user'
|
1639649
|
import numpy as np
from scipy.signal import fftconvolve
"""
This script contains dilation and erosion implementations described in the following link.
https://stackoverflow.com/questions/25034259/scipy-ndimage-morphology-operators-saturate-my-computer-memory-ram-8gb
This implementation deals with memory error faced in scipy ones.
"""
def binary_dilation(A, B):
return fftconvolve(A, B,'same')>0.5
def binary_erosion(A, B):
return _erode_v2(A, B)
def _erode_v1(A,B,R):
#R should be the radius of the spherical kernel, i.e. half the width of B
A_inv = np.logical_not(A)
A_inv = np.pad(A_inv, R, 'constant', constant_values=1)
tmp = fftconvolve(A_inv, B, 'same') > 0.5
#now we must un-pad the result, and invert it again
return np.logical_not(tmp[R:-R, R:-R, R:-R])
def _erode_v2(A,B):
thresh = np.count_nonzero(B)-0.5
return fftconvolve(A,B,'same') > thresh
def binary_opening(image, structure=None):
"""Return fast binary morphological opening of an image.
This function returns the same result as greyscale opening but performs
faster for binary images.
The morphological opening on an image is defined as an erosion followed by
a dilation. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks. This tends to "open" up (dark) gaps between (bright)
features.
Parameters
----------
image : ndarray
Binary input image.
selem : ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped structuring element (connectivity=1).
Returns
-------
opening : ndarray of bool
The result of the morphological opening.
"""
eroded = binary_erosion(image, structure)
out = binary_dilation(eroded, structure)
# eroded = erode_v2(image, structure)
# out = dilate(eroded, structure)
return out
|
1639704
|
from .bleu import BleuCorpusMetric
from .distinct import SingleTurnDistinct, MultiTurnDistinct
from .recorder import SingleTurnResponseRecorder
__all__ = [
'BleuCorpusMetric',
'SingleTurnDistinct', 'MultiTurnDistinct',
'SingleTurnResponseRecorder',
]
|
1639716
|
import os
import time
from django.utils import translation
def screenshot(client, name):
time.sleep(1)
if translation.get_language() != 'en':
p = name.rsplit('.', 1)
p.insert(1, translation.get_language())
name = '.'.join(p)
os.makedirs(os.path.join('screens', os.path.dirname(name)), exist_ok=True)
client.save_screenshot(os.path.join('screens', name))
|
1639763
|
import os
import unittest
import rdflib
import sbol2
MODULE_LOCATION = os.path.dirname(os.path.abspath(__file__))
TEST_LOCATION = os.path.join(MODULE_LOCATION, 'resources', 'crispr_example.xml')
PARTS_LOCATION = os.path.join(MODULE_LOCATION, 'resources', 'tutorial', 'parts.xml')
class TestReferencedObjects(unittest.TestCase):
def test_participant_type(self):
doc = sbol2.Document()
doc.read(TEST_LOCATION)
md_uri = 'http://sbols.org/CRISPR_Example/CRISPR_Template/1.0.0'
md = doc.moduleDefinitions[md_uri]
# Work with the first interaction
i = md.interactions[0]
# participant should be a str
self.assertEqual(type(i.participations[0].participant), str)
def test_fc_definition(self):
doc = sbol2.Document()
doc.read(TEST_LOCATION)
md_uri = 'http://sbols.org/CRISPR_Example/CRISPR_Template/1.0.0'
md = doc.moduleDefinitions[md_uri]
fc_uri = ('http://sbols.org/CRISPR_Example/CRISPR_Template' +
'/cas9_gRNA_complex/1.0.0')
fc = md.functionalComponents[fc_uri]
# definition should be a str
self.assertEqual(type(fc.definition), str)
def test_cd_sequences(self):
# Test a referenced object storing a list instead of a singleton
doc = sbol2.Document()
doc.read(PARTS_LOCATION)
cd_uri = 'http://examples.org/ComponentDefinition/AmeR/1'
cd = doc.componentDefinitions[cd_uri]
s1_uri = 'http://examples.org/Sequence/AmeR_sequence/1'
s2_uri = 'http://examples.org/Sequence/ECK120010818_sequence/1'
# Ensure the URI is present, and as a string
self.assertTrue(s1_uri in cd.sequences)
# Cannot append sequences - it has no effect on the cd
#
# The CD returns a copy of the list of sequences, not its
# internal representation.
cd.sequences.append(s2_uri)
self.assertTrue(len(cd.sequences) == 1)
cd.sequences = [s1_uri, s2_uri]
self.assertTrue(len(cd.sequences) == 2)
# Verify that all of the elements are instances of str
self.assertTrue(all([isinstance(uri, str) for uri in cd.sequences]))
# Verify that the attribute is still a ReferencedObject and
# was not overwritten with the list.
if 'sequences' in cd.__dict__:
self.assertIsInstance(cd.__dict__['sequences'], sbol2.ReferencedObject)
def test_dunder_uri(self):
# See issue #319
# The __uri__ method to convert an object to a URI
# was returning a str, not a URIRef
sa = sbol2.SequenceAnnotation('test_sa')
comp = sbol2.Component('test_comp')
sa.component = comp
expected = rdflib.URIRef(comp.identity)
self.assertEqual(expected, sa.properties[sbol2.SBOL_COMPONENT_PROPERTY][0])
if __name__ == '__main__':
unittest.main()
|
1639785
|
from .eight_plus_sixteen import * # noqa
from .five_plus_ten import * # noqa
from .four_plus_eight import * # noqa
from .six_plus_twelve import * # noqa
from .ten_plus_twenty import * # noqa
from .three_plus_six import * # noqa
from .two_plus_four import * # noqa
|
1639817
|
from .document import DocumentArray
from .storage.sqlite import StorageMixins, SqliteConfig
__all__ = ['SqliteConfig', 'DocumentArraySqlite']
class DocumentArraySqlite(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
|
1639819
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings; warnings.simplefilter('ignore')
from scipy import stats
from ast import literal_eval
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
from surprise import Reader, Dataset, SVD, evaluate
from collections import defaultdict
class hybrid(object):
def __init__ (self,user_id,ratings):
self.user_id = user_id
self.md = pd.read_csv('CustomData/FinalData.csv')
self.ratings = ratings
print(ratings[(ratings['user_id'] == user_id)][['user_id','book_id', 'rating']])
self.popularity_rating = self.popularity(self.md)
self.collaborative_rating = self.collaborative(self.ratings, self.user_id)
self.content_rating = self.content_based(self.md,self.ratings,self.user_id)
self.final_hybrid(self.md, self.popularity_rating , self.collaborative_rating, self.content_rating, self.user_id)
#Popularity#
def popularity(self,md):
fd = pd.read_csv('CustomData/AverageRatings.csv')
fd1 = pd.read_csv('CustomData/RatingsCount.csv')
fd[fd['rating'].notnull()]['rating'] = fd[fd['rating'].notnull()]['rating'].astype('float')
vote_averages= fd[fd['rating'].notnull()]['rating']
C = vote_averages.mean()
fd1[fd1['rating'].notnull()]['rating'] = fd1[fd1['rating'].notnull()]['rating'].astype('float')
vote_counts = fd1[fd1['rating'].notnull()]['rating']
m = len(vote_counts)
md['ratings_count'] = fd1['rating']
md['average_rating'] = fd['rating']
qualified = md[(md['ratings_count'].notnull())][['book_id','title', 'authors', 'ratings_count', 'average_rating']]
qualified['ratings_count'] = qualified['ratings_count'].astype('float')
qualified['average_rating'] = qualified['average_rating'].astype('float')
qualified.shape
def weighted_rating(x):
v = x['ratings_count']
R = x['average_rating']
return (v/(v+m) * R) + (m/(m+v) * C)
qualified['popularity_rating'] = qualified.apply(weighted_rating, axis=1)
pop = qualified[['book_id','popularity_rating']]
print(qualified.shape)
print(pop.shape)
return pop
### Collaborative ##
def collaborative(self,ratings,user_id):
reader = Reader()
#ratings.head()
temp_ratings = ratings
data = Dataset.load_from_df(temp_ratings[['user_id', 'book_id', 'rating']], reader)
data.split(n_folds=2)
## Training the data ##
svd = SVD()
evaluate(svd, data, measures=['RMSE', 'MAE'])
trainset = data.build_full_trainset()
algo = SVD()
algo.fit(trainset)
#svd.train(trainset)
## Testing the data ##
testset = trainset.build_anti_testset()
predictions = algo.test(testset)
count = 0
for uid, iid, true_r, est, _ in predictions:
if uid == user_id:
count = count+1
temp_ratings.loc[len(temp_ratings)+1]= [uid,iid,est]
cb = temp_ratings[(temp_ratings['user_id'] == user_id)][['book_id', 'rating']]
return(cb)
##### CONTENT ######
def content_based(self,md,ratings,user_id):
md['book_id'] = md['book_id'].astype('int')
ratings['book_id'] = ratings['book_id'].astype('int')
ratings['user_id'] = ratings['user_id'].astype('int')
ratings['rating'] = ratings['rating'].astype('int')
md['authors'] = md['authors'].str.replace(' ','')
md['authors'] = md['authors'].str.lower()
md['authors'] = md['authors'].str.replace(',',' ')
#print(md.head())
md['authors'] = md['authors'].apply(lambda x: [x,x])
#print(md['authors'])
md['Genres']=md['Genres'].str.split(';')
#print(md['Genres'])
md['soup'] = md['authors'] + md['Genres']
#print(md['soup'])
md['soup'] = md['soup'].str.join(' ')
count = CountVectorizer(analyzer='word',ngram_range=(1,1),min_df=0, stop_words='english')
count_matrix = count.fit_transform(md['soup'])
print (count_matrix.shape)
cosine_sim = cosine_similarity(count_matrix, count_matrix)
def build_user_profiles():
user_profiles=np.zeros((60001,999))
#taking only the first 100000 ratings to build user_profile
for i in range(0,100000):
u=ratings.iloc[i]['user_id']
b=ratings.iloc[i]['book_id']
user_profiles[u][b-1]=ratings.iloc[i]['rating']
return user_profiles
user_profiles=build_user_profiles()
def _get_similar_items_to_user_profile(person_id):
#Computes the cosine similarity between the user profile and all item profiles
user_ratings = np.empty((999,1))
cnt=0
for i in range(0,998):
book_sim=cosine_sim[i]
user_sim=user_profiles[person_id]
user_ratings[i]=(book_sim.dot(user_sim))/sum(cosine_sim[i])
maxval = max(user_ratings)
print(maxval)
for i in range(0,998):
user_ratings[i]=((user_ratings[i]*5.0)/(maxval))
if(user_ratings[i]>3):
cnt+=1
return user_ratings
content_ratings = _get_similar_items_to_user_profile(user_id)
num = md[['book_id']]
num1 = pd.DataFrame(data=content_ratings[0:,0:])
frames = [num, num1]
content_rating = pd.concat(frames, axis =1,join_axes=[num.index])
content_rating.columns=['book_id', 'content_rating']
return(content_rating)
def final_hybrid(self,md, popularity_rating , collaborative_rating, content_rating, user_id):
hyb = md[['book_id']]
title = md[['book_id','title', 'Genres']]
hyb = hyb.merge(title,on = 'book_id')
hyb = hyb.merge(self.collaborative_rating,on = 'book_id')
hyb = hyb.merge(self.popularity_rating, on='book_id')
hyb = hyb.merge(self.content_rating, on='book_id')
def weighted_rating(x):
v = x['rating']
R = x['popularity_rating']
c = x['content_rating']
return 0.4*v + 0.2*R + 0.4 * c
hyb['hyb_rating'] = hyb.apply(weighted_rating, axis=1)
hyb = hyb.sort_values('hyb_rating', ascending=False).head(999)
hyb.columns = ['Book ID' , 'Title', 'Genres', 'Collaborative Rating', 'Popularity Rating' , 'Content Rating', 'Hybrid Rating']
print(len(hyb['Hybrid Rating']))
print(hyb)
def newUser():
print('\n Rate from books\n')
print('ID Author Title Genre\n')
print('2. <NAME>, Mary <NAME> and the Sorcerer\'s Stone (Harry Potter, #1) Fantasy;Young-Age')
print('127. <NAME> The Tipping Point: How Little Things Can Make a Big Difference Self-Help')
print('239. <NAME> World War Z: An Oral History of the Zombie War Horror;Fiction')
print('26 <NAME> The Da Vinci Code Thriller;Drama')
print('84 <NAME> Jurassic Park (Jurassic Park, #1) SciFi;Thriller;Fantasy')
print('86 <NAME> A Time to Kill Thriller')
print('966 <NAME> Presumed Innocent Thriller;Crime')
print('42 <NAME> Little Women (Little Women, #1) Young-Age;Romance;Drama')
print('44 <NAME> The Notebook (The Notebook, #1) Romance;Drama')
print('54 <NAME> The Hitchhiker\'s Guide to the Galaxy Fantasy;Fiction')
print('134 <NAME> City of Glass (The Mortal Instruments, #3) Kids;Fantasy;Fiction')
print('399 <NAME> The Tales of Beedle the Bard Kids;Fantasy;Fiction')
print('38 <NAME> The Time Traveler\'s Wife Romance;SciFi;Fantasy;Domestic')
print('729 <NAME> Hyperion (Hyperion Cantos, #1) SciFi')
print('807 <NAME> The Circle SciFi')
print('690 <NAME> The Audacity of Hope: Thoughts on Reclaiming the American Dream Biography')
print('617 <NAME> Orange Is the New Black Biography')
print('495 <NAME> A Heartbreaking Work of Staggering Genius Biography')
print('770 <NAME>,<NAME> <NAME> History;Classic')
print('773 <NAME> The Taming of the Shrew Comedy;Classic')
print('829 <NAME> A Room with a View Classic')
print('971 <NAME>, <NAME> The Rainbow Fish Kids')
print('976 <NAME>, Dr. Seuss Dr. Seuss\'s Green Eggs and Ham: For Soprano, Boy Soprano, and Orchestra Kids')
print('627 <NAME>, <NAME> The True Story of the 3 Little Pigs Kids;Fiction')
print('121 <NAME>, <NAME> Lolita Biography;Romance;Comedy')
print('196 <NAME> Fight Club Comedy;Drama')
print('444 <NAME>, <NAME> Winnie-the-Pooh (Winnie-the-Pooh, #1) Kids;Comedy')
print('745 Jenny Lawson Lets Pretend This Never Happened: A Mostly True Memoir Biography;Comedy')
ratings = pd.read_csv('CustomData/FinalRatings.csv')
#taking only the first 100000 ratings
ratings=ratings[1:100000]
user_id = 60000
rating_count = len(ratings['user_id'])+1
print(user_id)
print('\n----------------Welcome User '+str(user_id)+'-------------------')
print('\nPlease Rate 5 books from the above list.')
for x in range(0,5):
print("\n")
bookId=input("BookId:")
rating=input("Rating:")
ratings.loc[rating_count]= [user_id,bookId,rating]
rating_count =rating_count+1
h = hybrid(user_id,ratings)
print("------------------------------Welcome to the Book Recommendation Engine---------------------------\n")
user=raw_input("1. Book Recommendation for New User. \n2. Book Recommendation for Existing User.\n")
if user=='1':
newUser()
elif user=='2':
ratings = pd.read_csv('CustomData/FinalRatings.csv')
ratings=ratings[1:100000]
#taking only the first 100000 ratings
userId=int(raw_input("\nPlease Enter User Id: "))
print('\n----------------Welcome User'+str(userId)+'-------------------')
h = hybrid(userId,ratings)
else:
print("Invalid option\n ")
|
1639848
|
from .version import *
import ctypes
def get_file_version_info(filename):
# Get the file version info structure.
pBlock = GetFileVersionInfoW(filename)
pBuffer, dwLen = VerQueryValueW(pBlock.raw, "\\")
if dwLen != ctypes.sizeof(VS_FIXEDFILEINFO):
raise ctypes.WinError(ERROR_BAD_LENGTH)
pVersionInfo = ctypes.cast(pBuffer,
ctypes.POINTER(VS_FIXEDFILEINFO))
VersionInfo = pVersionInfo.contents
if VersionInfo.dwSignature != 0xFEEF04BD:
raise ctypes.WinError(ERROR_BAD_ARGUMENTS)
FileDate = (VersionInfo.dwFileDateMS << 32) + VersionInfo.dwFileDateLS
return FileDate
|
1639880
|
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Tuple, Union
from pyscreener.docking.metadata import CalculationMetadata
from pyscreener.docking.dock.utils import SphereMode
@dataclass(repr=True, eq=False)
class DOCKMetadata(CalculationMetadata):
probe_radius: float = 1.4
steric_clash_dist: float = 0.0
min_radius: float = 1.4
max_radius: float = 4.0
sphere_mode: Union[SphereMode, str] = SphereMode.BOX
docked_ligand_file: Optional[str] = None
enclose_spheres: bool = True
buffer: float = 10.
prepared_ligand: Optional[Union[str, Path]] = None
prepared_receptor: Optional[Tuple[str, str]] = None
def __post_init__(self):
if isinstance(self.sphere_mode, str):
self.sphere_mode = SphereMode.from_str(self.sphere_more)
|
1639889
|
import remoto
import json
import ceph_medic
from ceph_medic import terminal
def get_mon_report(conn):
command = [
'ceph',
'--cluster=%s' % ceph_medic.metadata['cluster_name'],
'report'
]
out, err, code = remoto.process.check(
conn,
command
)
if code > 0:
terminal.error('failed to connect to the cluster to fetch a report from the monitor')
terminal.error('command: %s' % ' '.join(command))
for line in err:
terminal.error(line)
raise RuntimeError()
try:
return json.loads(b''.join(out).decode('utf-8'))
except ValueError:
return {}
def get_cluster_nodes(conn):
"""
Ask a monitor (with a pre-made connection) about all the nodes in
a cluster. This will be able to get us all known MONs and OSDs.
It returns a dictionary with a mapping that looks like::
{
'mons': [
{
'host': 'node1',
'public_ip': '192.168.1.100',
},
],
'osds': [
{
'host': 'node2',
'public_ip': '192.168.1.101',
},
{
'host': 'node3',
'public_ip': '192.168.1.102',
},
]
}
"""
report = get_mon_report(conn)
nodes = {'mons': [], 'osds': []}
try:
# XXX Is this really needed? in what case we wouldn't have a monmap
# with mons?
mons = report['monmap']['mons']
except KeyError:
raise SystemExit(report)
for i in mons:
nodes['mons'].append({
'host': i['name'],
'public_ip': _extract_ip_address(i['public_addr'])
})
osds = report['osd_metadata']
for i in osds:
nodes['osds'].append({
'host': i['hostname'],
'public_ip': _extract_ip_address(i['front_addr'])
})
return nodes
# XXX does not support IPV6
def _extract_ip_address(string):
"""
Addresses from Ceph reports can come up with subnets and ports using ':'
and '/' to identify them properly. Parse those types of strings to extract
just the IP.
"""
port_removed = string.split(':')[0]
return port_removed.split('/')[0]
|
1639891
|
import numpy as np
from a_nice_mc.objectives.bayes_logistic_regression import BayesianLogisticRegression
class Heart(BayesianLogisticRegression):
def __init__(self, name='heart', batch_size=32):
data = np.load('data/heart/data.npy')
labels = np.load('data/heart/labels.npy')
# Normalize the f**king data!!!
dm = np.mean(data, axis=0)
ds = np.std(data, axis=0)
data = (data - dm) / ds
super(Heart, self).__init__(data, labels, batch_size=batch_size)
self.name = name
@staticmethod
def mean():
return np.array([
-0.13996868, 0.71390106, 0.69571619, 0.43944853, 0.36997702, -0.27319424,
0.31730518, -0.49617367, 0.40516419, 0.4312388, 0.26531786, 1.10337417,
0.70054367, -0.25684964
])
@staticmethod
def std():
return np.array([
0.22915648, 0.24545612, 0.20457998, 0.20270157, 0.21040644, 0.20094482,
0.19749419, 0.24134014, 0.20230987, 0.25595334, 0.23709087, 0.24735325,
0.20701178, 0.19771984
])
|
1639957
|
import sys
sys.path.append("./src")
from github_stats import *
def test_github_user_stats():
resp = get_user_stats("Shravan-1908")
assert resp is not None
invalid_user = get_user_stats("Shravan-1908/hydra")
assert invalid_user is None
def test_github_repo_stats():
resp = get_repo_stats("Shravan-1908/hydra")
assert resp is not None
invalid_repo = get_repo_stats("Shravan-1908")
assert invalid_repo is None
|
1639988
|
from django.apps import AppConfig
class PlacesConfig(AppConfig):
name = 'places'
verbose_name = "Places"
|
1639994
|
import argparse
import logging
import os
import tensorflow as tf
from distutils.util import strtobool
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
from generic.data_provider.iterator import Iterator
from generic.tf_utils.evaluator import Evaluator, MultiGPUEvaluator
from generic.tf_utils.optimizer import create_multi_gpu_optimizer
from generic.tf_utils.ckpt_loader import load_checkpoint, create_resnet_saver
from generic.utils.config import load_config
from generic.utils.file_handlers import pickle_dump
from generic.data_provider.image_loader import get_img_builder
from generic.data_provider.nlp_utils import GloveEmbeddings
from generic.data_provider.dataset import DatasetMerger
from vqa.data_provider.vqa_tokenizer import VQATokenizer
from vqa.data_provider.vqa_dataset import VQADataset
from vqa.data_provider.vqa_batchifier import VQABatchifier
from vqa.models.vqa_network import VQANetwork
from vqa.train.evaluator_listener import VQADumperListener, VQAEvaluator
###############################
# LOAD CONFIG
#############################
parser = argparse.ArgumentParser('VQA network baseline!')
parser.add_argument("-data_dir", type=str, help="Directory with data")
parser.add_argument("-img_dir", type=str, help="Directory with image")
parser.add_argument("-img_buf", type=lambda x:bool(strtobool(x)), default="False", help="Store image in memory (faster but require a lot of RAM)")
parser.add_argument("-year", type=str, help="VQA release year (either 2014 or 2017)")
parser.add_argument("-test_set", type=str, default="test-dev", help="VQA release year (either 2014 or 2017)")
parser.add_argument("-exp_dir", type=str, help="Directory in which experiments are stored")
parser.add_argument("-config", type=str, help='Config file')
parser.add_argument("-load_checkpoint", type=str, help="Load model parameters from specified checkpoint")
parser.add_argument("-continue_exp", type=lambda x:bool(strtobool(x)), default="False", help="Continue previously started experiment?")
parser.add_argument("-no_thread", type=int, default=1, help="No thread to load batch")
parser.add_argument("-no_gpu", type=int, default=1, help="How many gpus?")
parser.add_argument("-gpu_ratio", type=float, default=0.95, help="How many GPU ram is required? (ratio)")
args = parser.parse_args()
config, exp_identifier, save_path = load_config(args.config, args.exp_dir)
logger = logging.getLogger()
# Load config
resnet_version = config['model']["image"].get('resnet_version', 50)
finetune = config["model"]["image"].get('finetune', list())
use_glove = config["model"]["glove"]
batch_size = config['optimizer']['batch_size']
no_epoch = config["optimizer"]["no_epoch"]
merge_dataset = config.get("merge_dataset", False)
# Load images
logger.info('Loading images..')
image_builder = get_img_builder(config['model']['image'], args.img_dir)
use_resnet = image_builder.is_raw_image()
require_multiprocess = image_builder.require_multiprocess()
# Load dictionary
logger.info('Loading dictionary..')
tokenizer = VQATokenizer(os.path.join(args.data_dir, config["dico_name"]))
# Load data
logger.info('Loading data..')
trainset = VQADataset(args.data_dir, year=args.year, which_set="train", image_builder=image_builder, preprocess_answers=tokenizer.preprocess_answers)
validset = VQADataset(args.data_dir, year=args.year, which_set="val", image_builder=image_builder, preprocess_answers=tokenizer.preprocess_answers)
testset = VQADataset(args.data_dir, year=args.year, which_set=args.test_set, image_builder=image_builder)
if merge_dataset:
trainset = DatasetMerger([trainset, validset])
# Load glove
glove = None
if use_glove:
logger.info('Loading glove..')
glove = GloveEmbeddings(os.path.join(args.data_dir, config["glove_name"]))
# Build Network
logger.info('Building multi_gpu network..')
networks = []
for i in range(args.no_gpu):
logging.info('Building network ({})'.format(i))
with tf.device('gpu:{}'.format(i)):
with tf.name_scope('tower_{}'.format(i)) as tower_scope:
network = VQANetwork(
config=config["model"],
no_words=tokenizer.no_words,
no_answers=tokenizer.no_answers,
reuse=(i > 0), device=i)
networks.append(network)
assert len(networks) > 0, "you need to set no_gpu > 0 even if you are using CPU"
# Build Optimizer
logger.info('Building optimizer..')
optimizer, outputs = create_multi_gpu_optimizer(networks, config, finetune=finetune)
#optimizer, outputs = create_optimizer(networks[0], config, finetune=finetune)
###############################
# START TRAINING
#############################
# create a saver to store/load checkpoint
saver = tf.train.Saver()
resnet_saver = None
# Retrieve only resnet variabes
if use_resnet:
resnet_saver = create_resnet_saver(networks)
# CPU/GPU option
cpu_pool = Pool(args.no_thread, maxtasksperchild=1000)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
# retrieve incoming sources
sources = networks[0].get_sources(sess)
scope_names = ['tower_{}/{}'.format(i, network.scope_name) for i, network in enumerate(networks)]
logger.info("Sources: " + ', '.join(sources))
# Create evaluation tools
train_evaluator = MultiGPUEvaluator(sources, scope_names, networks=networks, tokenizer=tokenizer)
#train_evaluator = Evaluator(sources, scope_names[0], network=networks[0], tokenizer=tokenizer)
eval_evaluator = Evaluator(sources, scope_names[0], network=networks[0], tokenizer=tokenizer)
# Load checkpoints or pre-trained networks
sess.run(tf.global_variables_initializer())
start_epoch = load_checkpoint(sess, saver, args, save_path)
if use_resnet:
resnet_saver.restore(sess, os.path.join(args.data_dir,'resnet_v1_{}.ckpt'.format(resnet_version)))
train_batchifier = VQABatchifier(tokenizer, sources, glove, remove_unknown=True)
eval_batchifier = VQABatchifier(tokenizer, sources, glove, remove_unknown=False)
# Create listener to use VQA evaluation code
dump_file = save_path.format('tmp.json')
ques_file = os.path.join(args.data_dir, 'OpenEnded_mscoco_val{}_questions.json'.format(args.year))
ann_file = os.path.join(args.data_dir, 'mscoco_val{}_annotations.json'.format(args.year))
vqa_eval_listener = VQAEvaluator(tokenizer, dump_file, ann_file, ques_file, require=networks[0].prediction)
# start actual training
best_val_acc, best_train_acc = 0, 0
for t in range(start_epoch, no_epoch):
# CPU/GPU option
# h5 requires a Tread pool while raw images are more efficient with processes
if require_multiprocess:
cpu_pool = Pool(args.no_thread, maxtasksperchild=1000)
else:
cpu_pool = ThreadPool(args.no_thread)
cpu_pool._maxtasksperchild = 1000
logger.info('Epoch {}/{}..'.format(t + 1,no_epoch))
train_iterator = Iterator(trainset,
batch_size=batch_size,
batchifier=train_batchifier,
shuffle=True,
pool=cpu_pool)
[train_loss, train_accuracy] = train_evaluator.process(sess, train_iterator, outputs=outputs + [optimizer])
valid_loss, valid_accuracy = 0,0
if not merge_dataset:
valid_iterator = Iterator(validset,
batch_size=batch_size*2,
batchifier=eval_batchifier,
shuffle=False,
pool=cpu_pool)
# Note : As we need to dump a compute VQA accuracy, we can only use a single-gpu evaluator
[valid_loss, valid_accuracy] = eval_evaluator.process(sess, valid_iterator,
outputs=[networks[0].loss, networks[0].accuracy],
listener=vqa_eval_listener)
logger.info("Training loss: {}".format(train_loss))
logger.info("Training accuracy: {}".format(train_accuracy))
logger.info("Validation loss: {}".format(valid_loss))
logger.info("Validation accuracy: {}".format(valid_accuracy))
logger.info(vqa_eval_listener.get_accuracy())
if valid_accuracy >= best_val_acc:
best_train_acc = train_accuracy
best_val_acc = valid_accuracy
saver.save(sess, save_path.format('params.ckpt'))
logger.info("checkpoint saved...")
pickle_dump({'epoch': t}, save_path.format('status.pkl'))
# Dump test file to upload on VQA website
logger.info("Compute final {} results...".format(args.test_set))
vqa_file_name = "vqa_OpenEnded_mscoco_{}{}_cbn_results.json".format(args.test_set, args.year, config["model"]["name"])
dumper_eval_listener = VQADumperListener(tokenizer, os.path.join(args.exp_dir, save_path.format(vqa_file_name)),
require=networks[0].prediction)
saver.restore(sess, save_path.format('params.ckpt'))
test_iterator = Iterator(testset,
batch_size=batch_size*2,
batchifier=eval_batchifier,
shuffle=False,
pool=cpu_pool)
eval_evaluator.process(sess, test_iterator, outputs=[], listener=dumper_eval_listener)
logger.info("File dump at {}".format(dumper_eval_listener.out_path))
|
1640003
|
import copy
import requests
from . import responses
from .exceptions import ResponseError, ShipwireError, TimeoutError
""" Add or remove methods and API calls here. """
METHODS = {
'order': {
'create': ['POST', 'orders'],
'get': ['GET', 'orders', ''],
'modify': ['PUT', 'orders', ''],
'cancel': ['POST', 'orders', '/cancel'],
'holds': ['GET', 'orders', '/holds'],
'clear_holds': ['POST', 'orders', '/holds/clear'],
'items': ['GET', 'orders', '/items'],
'returns': ['GET', 'orders', '/returns'],
'trackings': ['GET', 'orders', '/trackings'],
'list': ['GET', 'orders'],
'split_orders': ['GET', 'orders', '/splitOrders'],
},
'orders': {
'list': ['GET', 'orders']
},
'stock': {
'products': ['GET', 'stock']
},
'rate': {
'quote': ['POST', 'rate']
},
'receiving': {
'list': ['GET', 'receivings'],
'create': ['POST', 'receivings'],
'get': ['GET', 'receivings', ''],
'modify': ['PUT', 'receivings', ''],
'cancel': ['POST', 'receivings', '/cancel'],
'cancel_labels': ['POST', 'receivings', '/labels/cancel'],
'holds': ['GET', 'receivings', '/holds'],
'instructions_recipients': ['GET', 'receivings',
'/instructionsRecipients'],
'items': ['GET', 'receivings', '/items'],
'shipments': ['GET', 'receivings', '/shipments'],
'trackings': ['GET', 'receivings', '/trackings'],
'labels': ['GET', 'receivings', '/labels'],
},
'returns': {
'list': ['GET', 'returns'],
'create': ['POST', 'returns'],
'get': ['GET', 'returns', ''],
'cancel': ['POST', 'returns', '/cancel'],
'holds': ['GET', 'returns', '/holds'],
'items': ['GET', 'returns', '/items'],
'trackings': ['GET', 'returns', '/trackings'],
'labels': ['GET', 'returns', '/labels'],
},
'webhooks': {
'list': ['GET', 'webhooks'],
'create': ['POST', 'webhooks'],
'get': ['GET', 'webhooks', ''],
'modify': ['PUT', 'webhooks', ''],
'delete': ['DELETE', 'webhooks', '']
},
'secrets': {
'list': ['GET', 'secret'],
'create': ['POST', 'secret'],
'get': ['GET', 'secret', ''],
'delete': ['DELETE', 'secret', '']
}
}
class Shipwire():
""" Shipwire API class."""
def __init__(self, username='<EMAIL>', password='<PASSWORD>',
host='api.shipwire.com', api_version=3, secure=True,
raise_on_errors=False, timeout=None, **kwargs):
self.host = host
self.api_version = api_version
self.auth = requests.auth.HTTPBasicAuth(username, password)
self.secure = secure
self.resource = False
self.method = False
self.call_params = False
self.json = ''
self.uri = ''
self.raise_on_errors = raise_on_errors
self.timeout = timeout
def __getattr__(self, name):
if name.startswith('__') or self.method:
""" can't chain another attribute after the method and
when __ the copying causes recurssion. """
raise AttributeError(name)
elif self.resource:
if name in list(METHODS[self.resource].keys()):
self.method = name
else:
raise ShipwireError('The \'%s\' attribute is not currently defined.'
% name)
else: # since self.resource and method_call_dict are empty this must be resource.
if name in METHODS:
self.resource = name
else:
raise ShipwireError('The \'%s\' resource is not currently defined.'
% name)
new_instance = copy.copy(self)
self.resource = self.method = False
return new_instance
def __call__(self, *args, **kwargs):
if self.method is False: # only run calls on methods, not resources.
raise ShipwireError('Parameters can only be passed to specific methods.')
if 'json' in kwargs:
self.json = kwargs.pop('json')
self.call_params = kwargs
return self._call_api()
def _call_api(self):
self.uri = uri = self._make_uri()
endpoint = METHODS[self.resource][self.method]
http_method = endpoint[0]
try:
res = requests.request(http_method, uri, auth=self.auth,
params=self.call_params,
json=self.json, timeout=self.timeout)
except requests.exceptions.Timeout as exc:
raise TimeoutError(exc)
if res.status_code >= 400 and self.raise_on_errors:
raise ResponseError(res)
# wrap response is response classes.
return getattr(responses, self._class_name())(res, self)
def _class_name(self):
return '%sResponse' % self.method.capitalize()
def _make_uri(self):
endpoint = METHODS[self.resource][self.method]
number_words = len(endpoint)
protocol = 'https' if self.secure else 'http'
resource = endpoint[1]
base = "%s://%s/api/v%s" % (protocol, self.host,
self.api_version)
if number_words == 2: #ex: ['GET', 'orders']
uri = "%s/%s" % (base, resource)
elif number_words == 3: #ex: ['GET', 'orders', 'returns']
if 'id' not in self.call_params:
raise ShipwireError('An \'id\' is required for this api call.')
method = endpoint[2]
uri = "%s/%s/%s%s" % (base, resource,
self.call_params.get('id'),
method)
return uri
|
1640007
|
import web
import tempfile
import os
import subprocess
import xmltodict
import json
import cgi
import shutil
from extractor.extractionWrapper import Extraction
from extractor.utilities import Util
urls = (
'/', 'Index',
'/extractor', 'FileHandler', # For uploading a file
'/extractor/file', 'PDFStreamHandler', # For uploading any binary data stream
'/extractor/(.+)/(header|citations|text|file)', 'Extractor', # For retrieving file information
'/extractor/(.+)', 'FileHandler', # For deleting a file
'/hello', 'HelloWorld', #For testing
)
ROOT_FOLDER="../" # there must be a trailing /
TMP_FOLDER=tempfile.gettempdir()+"/citeseerextractor/" #Specifies temp folder - useful for cleaning up afterwards
cgi.maxlen = 5 * 1024 * 1024 # 5MB file size limit for uploads
global utilities
utilities = Util()
class HelloWorld:
def GET(self):
return 'Hello World!\n'
class Index:
"""Loads the index page from the static dir"""
def GET(self):
web.header('Content-Type','text/html; charset=utf-8')
raise web.seeother('/static/index.html')
class Extractor:
def GET(self, datafile, method):
params = web.input(output="xml")
"""Returns some extracted information from a file"""
extractor = Extraction(utilities)
data = ''
pdffile = TMP_FOLDER + datafile
"""Check if the file exists, if not return a 404"""
if not os.path.exists(pdffile):
return web.notfound()
try:
if method == 'file':
typeFilterStatus = utilities.typeFilter(pdffile)
web.header('Content-Type', typeFilterStatus) # Set the Header
return open(pdffile,"rb").read()
else:
if method == 'header':
data = data + extractor.extractHeaders(pdffile)
elif method == 'text':
web.header('Content-Type', 'text/text') # Set the Header
web.debug("in the if statement")
data = data + extractor.extractText(pdffile)
return data
elif method == 'citations':
data = data + extractor.extractCitations(pdffile)
#Print XML or JSON
if params.output == 'xml' or params.output == '':
web.header('Content-Type','text/xml; charset=utf-8')
return utilities.printXML(data)
elif params.output == 'json':
jsondata = xmltodict.parse(data)
web.header('Content-Type','text/json; charset=utf-8')
return json.dumps(jsondata)
else:
web.ctx.status = '400'
return 'Unsupported output format. Options are: "xml" (default) and "json"'
except (IOError, OSError) as er: #Internal error, i.e. during extraction
web.debug(er)
return web.internalerror()
class Handler(object): # Super-class for the two handlers
def fileCheck(self, pdfpath):
try:
# After we handle the file upload, we do the following:
# I. Check the uploaded file's type -> proceed to next step
# II. Extract the full text from the document, where if the file type is:
# PDF -> extract text using pdf2text -> proceed to next step
# PostScript -> extract text using ps2text -> proceed to next step
# Text File -> skip full text extraction, proceed to next step
# Type NOT from the above -> Value error & Display error message
# III. Check if the document is an academic document and returns:
# "1" - Document is academic -> Proceed to next step
# "0" - Document is not academic -> Value error & Display error message
# "-1" - OS error
# IV. Form and return XML response
#new: checks file type and returns true if it is a PDF. Raises errors otherwise
typeFilterStatus = utilities.typeFilter(pdfpath)
web.debug(typeFilterStatus)
if typeFilterStatus == "application/pdf":
web.debug(pdfpath)
else:
typeFilterStatus = "falsetype"
raise ValueError
except OSError as ex:
web.debug(ex)
return web.internalerror()
except ValueError as ex:
web.debug(ex)
if typeFilterStatus == "falsetype":
return False, "Your document failed our academic document filter due to invalid file type. Supported types are PDF."
elif acaFilterStatus == "0":
return False, "Your document failed our academic document filter."
return True, typeFilterStatus
def printLocations(self, fileid):
location = web.ctx.homedomain + '/extraction/extractor/pdf/' + fileid
web.ctx.status = '201 CREATED'
web.header('Location', location)
web.header('Content-Type','text/xml; charset=utf-8')
web.header('Access-Control-Allow-Origin', '*')
response = utilities.printXMLLocations(fileid)
return response
class FileHandler(Handler):
def GET(self):
"""A form for submitting a pdf"""
return """<html><head></head><body>
<form method="POST" enctype="multipart/form-data" action="">
<input type="file" name="myfile" />
<br/>
<input type="submit" />
</form>
</body></html>"""
def POST(self):
"""Actually submits the file"""
try:
pdffile = web.input(myfile={})
pdfpath = utilities.handleUpload(pdffile)
passed, message = super(FileHandler, self).fileCheck(pdfpath)
if passed is False:
web.ctx.status = '400'
return message
else:
fileid = os.path.basename(pdfpath)
return super(FileHandler, self).printLocations(fileid)
except (IOError, OSError) as ex:
web.debug(ex)
web.ctx.status = '500'
return web.internalerror()
except ValueError as ex:
web.debug(ex)
web.ctx.status = '400'
return "File too large. Limit is ", cgi.maxlen
def DELETE(self,fileid):
""" 404 when txt file doesn't exist """
if not os.path.exists(TMP_FOLDER + fileid):
return web.notfound()
try:
os.unlink(TMP_FOLDER + fileid)
#os.unlink(TMP_FOLDER + fileid + '.txt')
return 'DELETED ' + fileid
except (IOError, OSError) as ex:
web.debug(ex)
return web.internalerror()
class PDFStreamHandler(Handler):
def POST(self):
"""Posts a PDF bytestream"""
content_size = -1
# Check for Content-Length header
try:
content_size = int(web.ctx.env.get('CONTENT_LENGTH'))
except (TypeError, ValueError):
content_size = 0
try: #Max file size
if content_size > cgi.maxlen:
raise ValueError
except ValueError as ex:
web.debug(ex)
web.ctx.status = '400'
return "File too large. Limit is ", cgi.maxlen
try:
if content_size == 0: #No Content-Length header
raise ValueError
except ValueError as ex:
web.debug(ex)
web.ctx.status = '400'
return "Please set Content-Length header for bytestream upload"
try:
data = web.data()
with tempfile.NamedTemporaryFile('wb',dir=TMP_FOLDER,delete=False) as f:
f.write(data)
pdfpath = os.path.abspath(f.name)
web.debug(pdfpath)
passed, message = super(PDFStreamHandler, self).fileCheck(pdfpath)
if passed is False:
web.ctx.status = '400'
return message
else:
fileid = os.path.basename(pdfpath)
return super(PDFStreamHandler, self).printLocations(fileid)
except (IOError, OSError) as ex:
web.debug(ex)
web.ctx.status = '500'
return web.internalerror()
if os.path.isdir(TMP_FOLDER): #Create the temp folder
shutil.rmtree(TMP_FOLDER)
os.mkdir(TMP_FOLDER, 0o700)
application = web.application(urls, globals()).wsgifunc()
#app.run()
|
1640050
|
import json
import pulp
import unittest
from pyspatialopt.models import covering, utilities
class GUROBISolverTest(unittest.TestCase):
def setUp(self):
# Read the coverages
with open("valid_coverages/partial_coverage1.json", "r") as f:
self.partial_coverage = json.load(f)
with open("valid_coverages/binary_coverage_polygon1.json", "r") as f:
self.binary_coverage_polygon = json.load(f)
with open("valid_coverages/binary_coverage_point1.json", "r") as f:
self.binary_coverage_point = json.load(f)
with open("valid_coverages/partial_coverage2.json", "r") as f:
self.partial_coverage2 = json.load(f)
with open("valid_coverages/binary_coverage_polygon2.json", "r") as f:
self.binary_coverage_polygon2 = json.load(f)
with open("valid_coverages/binary_coverage_point2.json", "r") as f:
self.binary_coverage_point2 = json.load(f)
with open("valid_coverages/serviceable_demand_polygon.json", "r") as f:
self.serviceable_demand_polygon = json.load(f)
with open("valid_coverages/serviceable_demand_point.json", "r") as f:
self.serviceable_demand_point = json.load(f)
with open("valid_coverages/traumah_coverage.json", "r") as f:
self.traumah_coverage = json.load(f)
def test_mclp(self):
mclp = covering.create_mclp_model(self.binary_coverage_polygon, {"total": 5})
mclp.solve(pulp.GUROBI())
ids = utilities.get_ids(mclp, "facility_service_areas")
self.assertEqual(['1', '4', '5', '6', '7'], ids)
def test_mclpcc(self):
mclpcc = covering.create_mclp_cc_model(self.partial_coverage, {"total": 5})
mclpcc.solve(pulp.GUROBI())
ids = utilities.get_ids(mclpcc, "facility_service_areas")
self.assertEqual(['1', '4', '5', '6', '7'], ids)
def test_threshold(self):
threshold = covering.create_threshold_model(self.binary_coverage_point2, 30)
threshold_i = covering.create_threshold_model(self.binary_coverage_point2, 100)
threshold.solve(pulp.GUROBI())
threshold_i.solve(pulp.GUROBI())
ids = utilities.get_ids(threshold, "facility2_service_areas")
self.assertEqual(['10', '20', '4'], ids)
self.assertEqual(threshold_i.status, pulp.constants.LpStatusInfeasible)
def test_cc_threshold(self):
ccthreshold = covering.create_cc_threshold_model(self.partial_coverage2, 80)
ccthreshold_i = covering.create_cc_threshold_model(self.partial_coverage2, 100)
ccthreshold.solve(pulp.GUROBI())
ccthreshold_i.solve(pulp.GUROBI())
ids = utilities.get_ids(ccthreshold, "facility2_service_areas")
self.assertEqual(['1', '10', '11', '13', '15', '17', '19', '20', '21', '22', '3', '4', '7', '9'], ids)
self.assertEqual(ccthreshold_i.status, pulp.constants.LpStatusInfeasible)
def test_backup(self):
merged_dict = covering.merge_coverages([self.binary_coverage_point, self.binary_coverage_point2])
merged_dict = covering.update_serviceable_demand(merged_dict, self.serviceable_demand_point)
bclp = covering.create_backup_model(merged_dict, {"total": 30})
bclp.solve(pulp.GUROBI())
ids = utilities.get_ids(bclp, "facility_service_areas")
ids2 = utilities.get_ids(bclp, "facility2_service_areas")
self.assertEqual(['1', '3', '4', '5', '6', '7'], ids)
self.assertEqual(
['0', '1', '10', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '22', '3', '4', '5', '6', '8',
'9'], ids2)
def test_lscp(self):
merged_dict = covering.merge_coverages([self.binary_coverage_point, self.binary_coverage_point2])
merged_dict = covering.update_serviceable_demand(merged_dict, self.serviceable_demand_point)
lscp = covering.create_lscp_model(merged_dict)
lscp_i = covering.create_lscp_model(self.binary_coverage_point)
lscp.solve(pulp.GUROBI())
lscp_i.solve(pulp.GUROBI())
ids = utilities.get_ids(lscp, "facility_service_areas")
ids2 = utilities.get_ids(lscp, "facility2_service_areas")
self.assertEqual(['1', '3', '4', '5', '6', '7'], ids)
self.assertEqual(
['0', '1', '11', '12', '13', '14', '15', '16', '17', '19', '2', '20', '22', '4', '5', '6', '7', '9'], ids2)
self.assertEqual(lscp_i.status, pulp.constants.LpStatusInfeasible)
def test_traumah(self):
traumah = covering.create_traumah_model(self.traumah_coverage, 5, 10)
traumah_i = covering.create_traumah_model(self.traumah_coverage, 100, 100)
traumah.solve(pulp.GUROBI())
traumah_i.solve(pulp.GUROBI())
ad_ids = utilities.get_ids(traumah, "AirDepot")
tc_ids = utilities.get_ids(traumah, "TraumaCenter")
self.assertEqual(['0', '1', '2', '4', '5'], ad_ids)
self.assertEqual(['10', '12', '15', '16', '18', '19', '21', '22', '7', '9'], tc_ids)
self.assertEqual(traumah_i.status, pulp.constants.LpStatusInfeasible)
def test_bclpcc(self):
merged_dict = covering.merge_coverages([self.partial_coverage, self.partial_coverage2])
merged_dict = covering.update_serviceable_demand(merged_dict, self.serviceable_demand_polygon)
bclpcc = covering.create_bclpcc_model(merged_dict, {"total": 3}, 0.2)
bclpcc.solve(pulp.GUROBI())
ids = utilities.get_ids(bclpcc, "facility_service_areas")
ids2 = utilities.get_ids(bclpcc, "facility2_service_areas")
self.assertEqual(['4'], ids)
self.assertEqual(['10'], ids2)
if __name__ == '__main__':
unittest.main()
|
1640052
|
from typing import List, Tuple
from scipy.sparse import lil_matrix, eye
from scipy.sparse.csgraph import structural_rank, breadth_first_order
from scipy.sparse.linalg import inv
from sksparse.cholmod import cholesky
from torch_sparse import SparseTensor
from thgsp.alg import dsatur
from .utils import laplace, bipartite_mask, np
def amfs(A: SparseTensor, Sigma=None, level=None, delta=0.1, thresh_kld=1e-6, priority=True, verbose=False) \
-> Tuple[List[lil_matrix], np.ndarray]:
r"""
AMFS bipartite approximation for graph wavelet signal processing [3]_.
Parameters
----------
A: SparseTensor
The adjacency matrix.
Sigma: scipy.spmatrix, optional
The covariance matrix specified by the Laplacian matrix L. If None, :math:`\Sigma^{-1}=L+\delta I`
level: int, optional
The number of bipartite subgraphs, i.e., the decomposition level. If None,
:math:`level=\lceil log_2( \mathcal{X}) \rceil`, where :math:`\mathcal{X}` is the chromatic number of :obj:`A`.
delta: float, optional
:math:`1/\delta` is interpreted as the variance of the DC compnent. Refer to [4]_ for more details.
thresh_kld: float, optional
Threshold of Kullback-Leibler divergence to perform `AMFS` decomposition.
priority: bool,optional
If True, KLD holds priority.
verbose: bool,optional
Returns
-------
bptG: List[SparseTensor]
The bipartite subgraphs.
beta: Tensor(N, M)
The indicator of bipartite sets
References
----------
.. [3] <NAME>, et al, "Bipartite Subgraph Decomposition for Critically Sampledwavelet Filterbanks on Arbitrary
Graphs," IEEE trans on SP, 2016.
.. [4] <NAME>, et al, "A probablistic interpretation of sampling theory of graph signals". ICASSP, 2015.
"""
N = A.size(-1)
# compute_sigma consists of laplace matrix which prefers "coo"
A = A.to_scipy(layout='coo').astype("d")
if Sigma is None:
Sigma = compute_sigma(A, delta)
else:
assert Sigma.shape == (N, N)
if level is None:
chromatic = dsatur(A).n_color
level = np.ceil(np.log2(chromatic))
A = A.tolil()
beta = np.zeros((N, level), dtype=bool)
bptG = [lil_matrix((N, N), dtype=A.dtype) for _ in range(level)]
for i in range(level):
if verbose:
print(
"\n|----------------------decomposition in level: {:4d} ------------------------|".format(i))
s1, s2 = amfs1level(A, Sigma, delta, thresh_kld, priority, verbose)
bt = beta[:, i]
bt[s1] = 1 # set s1 True
mask = bipartite_mask(bt)
bptG[i][mask] = A[mask]
A[mask] = 0
return bptG, beta
def amfs1level(W: lil_matrix, Sigma: lil_matrix = None, delta=0.1, thresh_kld=1e-6, priority=True, verbose=True):
if Sigma is None:
Sigma = compute_sigma(W, delta)
N = W.shape[-1]
not_arrived = np.arange(N)
nodes = breadth_first_order(W, i_start=0, return_predecessors=False)
not_arrived = np.setdiff1d(not_arrived, nodes)
s1 = [0]
s2 = []
nodes = nodes[1:]
while len(not_arrived) > 0:
new_root = not_arrived[0]
other_nodes = breadth_first_order(
W, i_start=new_root, return_predecessors=False)
not_arrived = np.setdiff1d(not_arrived, other_nodes)
s1.append(new_root)
nodes = np.append(nodes, other_nodes[1:])
balance_flag = True
for i, v in enumerate(nodes):
if verbose:
print("handling {:5d}-th node: {:5d}, ".format(i, v), end='')
N1 = len(s1)
s = [*s1, v, *s2]
W_local = W[np.ix_(s, s)]
Wb1 = W_local.copy()
Wb2 = W_local.copy()
Wb2[:N1, :N1] = 0
Wb2[N1:, N1:] = 0
Wb1[:N1 + 1, :N1 + 1] = 0
Wb1[N1 + 1:, N1 + 1:] = 0
if priority: # KLD holds priority
S_local = Sigma[np.ix_(s, s)]
DK1 = dkl(Wb1, S_local, delta)
DK2 = dkl(Wb2, S_local, delta)
diff = DK1 - DK2
if verbose:
print("DK1-DK2: {:5f}".format(diff))
if abs(diff) > thresh_kld:
if diff > 0:
s2.append(v)
else:
s1.append(v)
else:
rank1 = structural_rank(Wb1.tocsr())
rank2 = structural_rank(Wb2.tocsr())
if rank1 > rank2:
s1.append(v)
elif rank1 < rank2:
s2.append(v)
else:
if balance_flag:
s1.append(v)
else:
s2.append(v)
balance_flag = not balance_flag
else:
rank1 = structural_rank(Wb1)
rank2 = structural_rank(Wb2)
if rank1 > rank2:
s1.append(v)
elif rank1 < rank2:
s2.append(v)
else:
S_local = Sigma[np.ix_(s, s)]
DK1 = dkl(Wb1, S_local, delta)
DK2 = dkl(Wb2, S_local, delta)
if DK1 < DK2:
s1.append(v)
elif DK1 > DK2:
s2.append(v)
else:
if balance_flag:
s1.append(v)
else:
s2.append(v)
balance_flag = not balance_flag
return s1, s2
def dkl(Wb: lil_matrix, Sigma, delta: float):
N = Wb.shape[-1]
Lb = laplace(Wb, lap_type="comb").tocsc() # coo -> csc
temp = Lb + delta * eye(N, dtype=Lb.dtype, format='csc')
try:
dk = (Lb @ Sigma).diagonal().sum() - \
cholesky(temp).logdet() # cholesky prefers `csc`
except Exception as err:
raise err
return dk
def compute_sigma(A, delta, precision_mat=False) -> lil_matrix:
Sigma_inv = laplace(A, lap_type="comb").tocsc() + \
delta * eye(A.shape[-1], dtype=A.dtype, format='csc')
if precision_mat:
return Sigma_inv
Sigma = inv(Sigma_inv) # csc more efficient
Sigma = Sigma + Sigma.T
Sigma.data *= 0.5
return Sigma.tolil()
|
1640054
|
import json
import urllib.request
import urllib.error
import getpass
import sys
loginpayload = {
"variables": {
"identifier": "",
"email": "",
"password": ""
},
"query": """
mutation ($identifier: String, $email: String, $password: String, $anonymousId: String) {
login(identifier: $identifier, email: $email, password: <PASSWORD>, anonymousId: $anonymousId) {
accessToken
}
}
"""
}
loginpayload['variables']['identifier'] = loginpayload['variables']['email'] = input('Your username or e-mail: ')
loginpayload['variables']['password'] = getpass.getpass('Your password: ')
try:
req = urllib.request.Request('https://api.aidungeon.io/graphql', headers={'content-type': 'application/json'})
res = urllib.request.urlopen(req, data=json.dumps(loginpayload).encode('utf-8'))
payload = json.load(res)
if 'errors' in payload:
print('Couldn\'t log in.')
for error in payload['errors']:
print(error['message'])
sys.exit(1)
elif 'data' in payload:
print('Your access token is %s' % payload['data']['login']['accessToken'])
else:
print('no data?!')
sys.exit(1)
except urllib.error.HTTPError as e:
print(e)
print(e.read())
sys.exit(1)
|
1640057
|
import logging
from typing import Iterator
from typing import Tuple
from typeguard import check_argument_types
from espnet2.fileio.read_text import read_2column_text
from espnet2.samplers.abs_sampler import AbsSampler
class UnsortedBatchSampler(AbsSampler):
"""BatchSampler with constant batch-size.
Any sorting is not done in this class,
so no length information is required,
This class is convenient for decoding mode,
or not seq2seq learning e.g. classification.
Args:
batch_size:
key_file:
"""
def __init__(
self,
batch_size: int,
key_file: str,
drop_last: bool = False,
utt2category_file: str = None,
):
assert check_argument_types()
assert batch_size > 0
self.batch_size = batch_size
self.key_file = key_file
self.drop_last = drop_last
# utt2shape:
# uttA <anything is o.k>
# uttB <anything is o.k>
utt2any = read_2column_text(key_file)
if len(utt2any) == 0:
logging.warning(f"{key_file} is empty")
# In this case the, the first column in only used
keys = list(utt2any)
if len(keys) == 0:
raise RuntimeError(f"0 lines found: {key_file}")
category2utt = {}
if utt2category_file is not None:
utt2category = read_2column_text(utt2category_file)
if set(utt2category) != set(keys):
raise RuntimeError(
f"keys are mismatched between {utt2category_file} != {key_file}"
)
for k, v in utt2category.items():
category2utt.setdefault(v, []).append(k)
else:
category2utt["default_category"] = keys
self.batch_list = []
for d, v in category2utt.items():
category_keys = v
# Apply max(, 1) to avoid 0-batches
N = max(len(category_keys) // batch_size, 1)
if not self.drop_last:
# Split keys evenly as possible as. Note that If N != 1,
# the these batches always have size of batch_size at minimum.
cur_batch_list = [
category_keys[i * len(keys) // N : (i + 1) * len(keys) // N]
for i in range(N)
]
else:
cur_batch_list = [
tuple(category_keys[i * batch_size : (i + 1) * batch_size])
for i in range(N)
]
self.batch_list.extend(cur_batch_list)
def __repr__(self):
return (
f"{self.__class__.__name__}("
f"N-batch={len(self)}, "
f"batch_size={self.batch_size}, "
f"key_file={self.key_file}, "
)
def __len__(self):
return len(self.batch_list)
def __iter__(self) -> Iterator[Tuple[str, ...]]:
return iter(self.batch_list)
|
1640060
|
from pyids.model_selection import CoordinateAscent
from pyids.algorithms.ids import IDS
from pyids.algorithms import mine_CARs, mine_IDS_ruleset
from pyarc.qcba.data_structures import QuantitativeDataFrame
import pandas as pd
import numpy as np
df_iris = pd.read_csv("../../../data/iris0.csv")
quant_df = QuantitativeDataFrame(df_iris)
cars = mine_CARs(df_iris, 20)
def is_solution_interpretable(metrics):
print(metrics)
return (
metrics["fraction_overlap"] <= 0.5 and
metrics["fraction_classes"] > 1.0 and
metrics["fraction_uncovered"] <= 0.5 and
metrics["average_rule_width"] < 8 and
metrics["ruleset_length"] <= 10
)
def solution_interpretability_distance(metrics):
distance_vector = np.array([
max(metrics["fraction_overlap"] - 0.5, 0),
max(1 - metrics["fraction_classes"], 0),
max(metrics["fraction_uncovered"] - 0.5, 0),
max(metrics["average_rule_width"] - 8, 0),
max(metrics["ruleset_length"] - 10, 0)
])
return np.sum(distance_vector)
return np.linalg.norm(distance_vector)
def fmax(lambda_dict):
print(lambda_dict)
ids = IDS(algorithm="SLS")
ids.fit(class_association_rules=cars, quant_dataframe=quant_df, lambda_array=list(lambda_dict.values()))
metrics = ids.score_interpretability_metrics(quant_df)
"""
if not is_solution_interpretable(metrics):
distance = -solution_interpretability_distance(metrics)
print(distance)
return -distance
"""
auc = ids.score_auc(quant_df)
print(auc)
return auc
coord_asc = CoordinateAscent(
func=fmax,
func_args_ranges=dict(
l1=(1, 1000),
l2=(1, 1000),
l3=(1, 1000),
l4=(1, 1000),
l5=(1, 1000),
l6=(1, 1000),
l7=(1, 1000)
),
ternary_search_precision=50,
max_iterations=3
)
coord_asc.fit()
df = pd.DataFrame(coord_asc.procedure_data)
df.to_csv("output_data/coordinate_ascent_run_AUConly.csv")
|
1640070
|
rectangle_side_a = 10
rectangle_side_b = 5
rectangle_area = rectangle_side_a * rectangle_side_b
rectangle_perimeter = 2 * (rectangle_side_a + rectangle_side_b)
print "Let there be a rectangle with the sides of lengths:"
print rectangle_side_a, "and", rectangle_side_b, "cm."
print "Then the area of the rectangle is", rectangle_area, "cm squared."
print "The perimeter of the rectangle is", rectangle_perimeter, "cm."
|
1640077
|
from common import get_patch_version, inc_patch_version, GRADLE_PROPERTIES
from git import git_command
if __name__ == '__main__':
patch_version = get_patch_version()
release_branch = f"release-{patch_version}"
git_command("branch", release_branch)
git_command("push", "origin", release_branch)
inc_patch_version()
git_command("add", GRADLE_PROPERTIES)
git_command("commit", "-m", ":arrow_up: patch version")
git_command("push", "origin", "master")
|
1640103
|
import scripts
from deepdiff import DeepDiff
state = {}
def apply(newState):
# Step 1: create new nodes
state = newState
connections = []
for key,value in state.items():
addr = "/project1/lambda/" + key
newOp = createOp(addr, value['ty'])
if 'connections' in value:
connections.extend(list(map(lambda c: c.append(addr), value['connections'])))
if 'parameters' in value:
for k,v in value['parameters']:
addParameter(newOp, k, v)
if 'commands' in value:
for comm in value['commands']:
runCommand(newOp, comm['command'], comm['args'])
if 'text' in value:
newOp.text = value['text']
for key,conn in connections:
op("/project1/lambda" + conn[0]).outputConnectors[0].connect(op(conn[1]).inputConnectors[key])
def createOp(addr, ty):
clazz = scripts.getClass(ty, 'none')
if clazz == "none":
print("Couldn't find " + ty)
return
name = addr[(addr.rfind('/') + 1):]
par = addr[:(addr.rfind('/'))]
if op(addr) != None:
op(addr).destroy()
# Special case things that can't have duplicates
if clazz[1] == 'audiodevin' or clazz[1] == 'videodevin':
if op(clazz[1]) == None:
parent().create(clazz[0], clazz[1])
if clazz[2] == "CHOP":
selOp = selectCHOP
selPar = 'chop'
elif clazz[2] == "TOP":
selOp = selectTOP
selPar = 'top'
op(par).create(selOp, name)
op(addr).pars(selPar)[0].val = '/project1/' + clazz[1]
else:
op(par).create(clazz[0], name)
newOp = op(addr)
# TODO: Figure out a clean way to not special case these
if clazz[1] == 'out' and clazz[2] == 'SOP':
newOp.render = True
newOp.display = True
if clazz[1] == 'geo':
op(addr + "/torus1").destroy()
return newOp
def addParameter(newOp, name, value):
pars = newOp.pars(name)
if len(pars) > 0:
par = pars[0]
if isfloat(value):
if par.isMenu:
par.menuIndex = value
else:
par.val = float(value)
else:
par.expr = value
# Special case loading tox as soon as we know source
if name == "externaltox":
op(addr).par.reinitnet.pulse()
def runCommand(newOp, command, args):
if command == "pulse":
pars = op(addr).pars(args[0])
if len(pars) > 0:
if isfloat(args[1]):
pars[0].pulse(float(args[1]), frames=float(args[2]))
else:
pars[0].pulse(args[1])
elif command == "store":
op(addr).store(args[0], args[1])
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
|
1640123
|
import torch
import torch.nn as nn
import numpy as np
np.random.seed(0)
from model.generate_anchor import generate_anchors
from model.bbox_transform import clip_boxes
from model.ellipse_transform import ellipse_transform_inv, ellipse2box
from nms.cpu_nms import cpu_nms
from nms.gpu_nms import gpu_nms
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = ((ws >= min_size) & (hs >= min_size)).nonzero().view(-1)
return keep
class EllipseProposalLayer(nn.Module):
def __init__(self, cfg):
super(EllipseProposalLayer, self).__init__()
self._cfg = dict(cfg)
self._preprocess()
def _preprocess(self):
# pre-computing stuff for making anchor later
self._im_info = (self._cfg['MAX_SIZE'], self._cfg['MAX_SIZE'])
base_anchors = generate_anchors(
base_size=self._cfg['RPN_FEAT_STRIDE'],
ratios=[1],
scales=np.array(self._cfg['ANCHOR_SCALES'], dtype=np.float32))
num_anchors = base_anchors.shape[0]
feat_stride = self._cfg['RPN_FEAT_STRIDE']
feat_width = self._cfg['MAX_SIZE'] // self._cfg['RPN_FEAT_STRIDE']
feat_height = feat_width
shift_x = np.arange(0, feat_width) * feat_stride
shift_y = np.arange(0, feat_height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(),
shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
anchors = base_anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
self._feat_height = feat_height
self._feat_width = feat_width
self._anchors = torch.from_numpy(anchors).float()
def cuda(self, device=None):
self._anchors = self._anchors.cuda(device)
return self._apply(lambda t: t.cuda(device))
def forward(self, out_cls, out_ellipse):
"""
out_cls: (feat_height, feat_width, anchors, 2) FloatVariable
out_ellipse: (feat_height, feat_width, anchors, 5) FloatVariable
"""
scores = nn.functional.softmax(
out_cls, dim=3)[..., 1].contiguous().data.view(-1, 1)
ellipse_deltas = out_ellipse.data.view(-1, 5)
# 1. Generate proposals from ellipse deltas and shifted anchors
# Convert anchors into proposals via ellipse transformations
# Convert ellipse into bbox proposals
ellipses = ellipse_transform_inv(self._anchors, ellipse_deltas)
boxes = ellipse2box(ellipses, self._cfg['ELLIPSE_PAD'])
# 2. clip predicted boxes to image
boxes = clip_boxes(boxes, self._im_info[:2])
# 3. remove predicted boxes with either height or width < threshold
# (NOTICE: convert min_size to input image scale stored in im_info[2])
keep = _filter_boxes(boxes, self._cfg['TEST.RPN_MIN_SIZE'])
boxes = boxes[keep, :]
ellipses = ellipses[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
_, order = torch.sort(scores.view(-1), dim=0, descending=True)
if self._cfg['TEST.RPN_PRE_NMS_TOP_N'] > 0:
order = order[:self._cfg['TEST.RPN_PRE_NMS_TOP_N']]
boxes = boxes[order, :]
ellipses = ellipses[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if self._cfg['USE_GPU_NMS']:
nms = gpu_nms
else:
nms = cpu_nms
dets = np.hstack((boxes.cpu().numpy(), scores.cpu().numpy()))
keep = nms(dets, self._cfg['TEST.RPN_NMS_THRESH'])
keep = torch.from_numpy(np.array(keep)).type_as(scores).long()
if self._cfg['TEST.RPN_POST_NMS_TOP_N'] > 0:
keep = keep[:self._cfg['TEST.RPN_POST_NMS_TOP_N']]
boxes = boxes[keep, :]
ellipses = ellipses[keep, :]
scores = scores[keep].view(-1)
return (boxes, ellipses, scores)
|
1640141
|
from sofi.ui import Column
def test_basic():
assert(str(Column()) == "<div class=\"col-md-4\"></div>")
def test_size_count():
assert(str(Column('lg', 2)) == "<div class=\"col-lg-2\"></div>")
def test_offset():
assert(str(Column('lg', 2, 3)) == "<div class=\"col-lg-2 col-lg-offset-3\"></div>")
def test_custom_class_ident_style_and_attrs():
assert(str(Column(cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<div id=\"123\" class=\"col-md-4 abclass\" style=\"font-size:0.9em;\" data-test=\"abc\"></div>")
|
1640189
|
import numpy as np
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import ignore_warnings
from imodels.rule_set.rule_fit import RuleFitRegressor
from imodels.util.transforms import FriedScale
## Testing FriedScale():
def test_fried_scale():
x_scale_test = np.zeros([100, 2])
x_scale_test[0:5, 0] = -100
x_scale_test[5:10, 0] = 100
x_scale_test[10:55, 0] = 1
x_scale_test[5:55,
1] = 1 # winsorised version of first column at trim=0.1: note, will not be scaled because it is already an indicator function, as per FP004
fs = FriedScale() # trim_quantile=0.1)
fs.train(x_scale_test)
'''
np.testing.assert_array_equal(fs.scale(x_scale_test),
np.hstack([x_scale_test[:, 1].reshape([-1, 1]) * 0.4 / np.std(x_scale_test[:, 1]),
x_scale_test[:, 1].reshape([-1, 1])]))
'''
@ignore_warnings(category=ConvergenceWarning)
def test_integration():
X = np.array([[1, 99, 43, 34],
[1, 76, 22, 10],
[0, 83, 11, 0],
[0, 99, 74, 33],
[0, 53, 40, 34]])
y = np.array([1, 0, 1, 1, 0])
rfr = RuleFitRegressor(exp_rand_tree_size=False, n_estimators=500, random_state=1, include_linear=False,
max_rules=None, alpha=0.1)
rfr.fit(X, y)
print(len(rfr._get_rules()))
expected = np.array([0.83333333, 0.25, 0.83333333, 0.83333333, 0.25])
assert np.allclose(rfr.predict(X), expected, atol=1.0e-04)
rfr = RuleFitRegressor(exp_rand_tree_size=False, n_estimators=5, random_state=0, max_rules=None, alpha=0.01)
rfr.fit(X, y)
expected = np.array([0.89630491, 0.15375469, 0.89624531, 1.05000033, 0.00369476])
assert np.allclose(rfr.predict(X), expected)
rfr = RuleFitRegressor(exp_rand_tree_size=False, n_estimators=5, random_state=0,
max_rules=None, alpha=0.01, tree_generator=RandomForestClassifier())
rfr.fit(X, y)
# expected = np.array([0.89630491, 0.15375469, 0.89624531, 1.05000033, 0.00369476])
# assert np.allclose(rfr.predict(X), expected)
|
1640204
|
class Solution:
def areAlmostEqual(self, s1: str, s2: str) -> bool:
d1, d2 = "", ""
for a, b in zip(s1, s2):
if a != b:
d1, d2 = d1 + a, d2 + b
return len(d1) in [0, 2] and sorted(d1) == sorted(d2)
|
1640231
|
import pytest
from markupsafe import Markup
def test_adding(escape):
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup("<em>username</em>")
assert unsafe + safe == str(escape(unsafe)) + str(safe)
@pytest.mark.parametrize(
("template", "data", "expect"),
(
("<em>%s</em>", "<bad user>", "<em><bad user></em>"),
(
"<em>%(username)s</em>",
{"username": "<bad user>"},
"<em><bad user></em>",
),
("%i", 3.14, "3"),
("%.2f", 3.14, "3.14"),
),
)
def test_string_interpolation(template, data, expect):
assert Markup(template) % data == expect
def test_type_behavior():
assert type(Markup("foo") + "bar") is Markup
x = Markup("foo")
assert x.__html__() is x
def test_html_interop():
class Foo:
def __html__(self):
return "<em>awesome</em>"
def __str__(self):
return "awesome"
assert Markup(Foo()) == "<em>awesome</em>"
result = Markup("<strong>%s</strong>") % Foo()
assert result == "<strong><em>awesome</em></strong>"
@pytest.mark.parametrize("args", ["foo", 42, ("foo", 42)])
def test_missing_interpol(args):
with pytest.raises(TypeError):
Markup("<em></em>") % args
def test_tuple_interpol():
result = Markup("<em>%s:%s</em>") % ("<foo>", "<bar>")
expect = Markup("<em><foo>:<bar></em>")
assert result == expect
def test_dict_interpol():
result = Markup("<em>%(foo)s</em>") % {"foo": "<foo>"}
expect = Markup("<em><foo></em>")
assert result == expect
result = Markup("<em>%(foo)s:%(bar)s</em>") % {"foo": "<foo>", "bar": "<bar>"}
expect = Markup("<em><foo>:<bar></em>")
assert result == expect
def test_escaping(escape):
assert escape("\"<>&'") == ""<>&'"
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
def test_unescape():
assert Markup("<test>").unescape() == "<test>"
result = Markup("jack & tavi are cooler than mike & russ").unescape()
expect = "jack & tavi are cooler than mike & russ"
assert result == expect
original = "&foo;"
once = Markup(original).unescape()
twice = Markup(once).unescape()
expect = "&foo;"
assert once == expect
assert twice == expect
def test_format():
result = Markup("<em>{awesome}</em>").format(awesome="<awesome>")
assert result == "<em><awesome></em>"
result = Markup("{0[1][bar]}").format([0, {"bar": "<bar/>"}])
assert result == "<bar/>"
result = Markup("{0[1][bar]}").format([0, {"bar": Markup("<bar/>")}])
assert result == "<bar/>"
def test_formatting_empty():
formatted = Markup("{}").format(0)
assert formatted == Markup("0")
def test_custom_formatting():
class HasHTMLOnly:
def __html__(self):
return Markup("<foo>")
class HasHTMLAndFormat:
def __html__(self):
return Markup("<foo>")
def __html_format__(self, spec):
return Markup("<FORMAT>")
assert Markup("{0}").format(HasHTMLOnly()) == Markup("<foo>")
assert Markup("{0}").format(HasHTMLAndFormat()) == Markup("<FORMAT>")
def test_complex_custom_formatting():
class User:
def __init__(self, id, username):
self.id = id
self.username = username
def __html_format__(self, format_spec):
if format_spec == "link":
return Markup('<a href="/user/{0}">{1}</a>').format(
self.id, self.__html__()
)
elif format_spec:
raise ValueError("Invalid format spec")
return self.__html__()
def __html__(self):
return Markup("<span class=user>{0}</span>").format(self.username)
user = User(1, "foo")
result = Markup("<p>User: {0:link}").format(user)
expect = Markup('<p>User: <a href="/user/1"><span class=user>foo</span></a>')
assert result == expect
def test_formatting_with_objects():
class Stringable:
def __str__(self):
return "строка"
assert Markup("{s}").format(s=Stringable()) == Markup("строка")
def test_escape_silent(escape, escape_silent):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent("<foo>") == Markup("<foo>")
def test_splitting():
expect = [Markup("a"), Markup("b")]
assert Markup("a b").split() == expect
assert Markup("a b").rsplit() == expect
assert Markup("a\nb").splitlines() == expect
def test_mul():
assert Markup("a") * 3 == Markup("aaa")
def test_escape_return_type(escape):
assert isinstance(escape("a"), Markup)
assert isinstance(escape(Markup("a")), Markup)
class Foo:
def __html__(self):
return "<strong>Foo</strong>"
assert isinstance(escape(Foo()), Markup)
def test_soft_str(soft_str):
assert type(soft_str("")) is str
assert type(soft_str(Markup())) is Markup
assert type(soft_str(15)) is str
def test_soft_unicode_deprecated(soft_unicode):
with pytest.warns(DeprecationWarning):
assert type(soft_unicode(Markup())) is Markup
|
1640248
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import functools
from collections import OrderedDict
import random
import os
import math
import pickle
def load_state_dict(model, fname):
"""
Set parameters converted from Caffe models authors of VGGFace2 provide.
See https://www.robots.ox.ac.uk/~vgg/data/vgg_face2/.
Arguments:
model: model
fname: file name of parameters converted from a Caffe model, assuming the file format is Pickle.
"""
with open(fname, 'rb') as f:
weights = pickle.load(f, encoding='latin1')
own_state = model.state_dict()
for name, param in weights.items():
if name in own_state:
try:
own_state[name].copy_(torch.from_numpy(param))
except Exception:
raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose '\
'dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
else:
#raise KeyError('unexpected key "{}" in state_dict'.format(name))
print('unexpected key "{}" in state_dict'.format(name))
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, bias=True):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,bias=bias )
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=-1, include_top=True):
self.inplanes = 64
super(ResNet, self).__init__()
self.include_top = include_top
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
#self.fc = nn.Linear(512 * block.expansion, num_classes)
# CHJ_ADD task use
self.fc_dims={
"id": 80,
"ex": 64,
"tex": 80,
"angles":3,
"gamma":27,
"XY":2,
"Z":1}
#self.fc_dims_arr=[0] * (1+len(self.fc_dims))
#for i, (k, v) in enumerate(self.fc_dims.items()):
# self.fc_dims_arr[i+1] = v + self.fc_dims_arr[i]
_outdim = 512 * block.expansion
'''
self.fcid = nn.Linear(_outdim, 80)
self.fcex = nn.Linear(_outdim, 64)
self.fctex = nn.Linear(_outdim, 80)
self.fcangles = nn.Linear(_outdim, 3)
self.fcgamma = nn.Linear(_outdim, 27)
self.fcXY = nn.Linear(_outdim, 2)
self.fcZ = nn.Linear(_outdim, 1)
'''
self.fcid = conv1x1(_outdim, 80)
self.fcex = conv1x1(_outdim, 64)
self.fctex = conv1x1(_outdim, 80)
self.fcangles = conv1x1(_outdim, 3)
self.fcgamma = conv1x1(_outdim, 27)
self.fcXY = conv1x1(_outdim, 2)
self.fcZ = conv1x1(_outdim, 1)
self.arr_fc = [self.fcid, self.fcex, self.fctex,
self.fcangles, self.fcgamma, self.fcXY, self.fcZ]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, fm=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x1 = self.layer2(x)
x2 = self.layer3(x1)
x3 = self.layer4(x2)
x = self.avgpool(x3)
n_b = x.size(0)
outs=[]
for fc in self.arr_fc:
outs.append( fc(x).reshape(n_b, -1) )
if fm:
return [x1, x2, x3, x, torch.cat(outs,1)]
return outs
def resnet50_use():
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3])
#load_state_dict(model, fweight_file)
return model
|
1640273
|
import argparse
import json
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from datasets.phototourism import build_tourism
from models.nerf import build_nerf
from models.rendering import get_rays_tourism, sample_points, volume_render
from utils.tour_video import create_interpolation_video
def inner_loop(model, optim, img, rays_o, rays_d, bound, num_samples, raybatch_size, inner_steps):
"""
train the inner model for a specified number of iterations
"""
pixels = img.reshape(-1, 3)
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
num_rays = rays_d.shape[0]
for step in range(inner_steps):
indices = torch.randint(num_rays, size=[raybatch_size])
raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]
pixelbatch = pixels[indices]
t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],
num_samples, perturb=True)
optim.zero_grad()
rgbs, sigmas = model(xyz)
colors = volume_render(rgbs, sigmas, t_vals)
loss = F.mse_loss(colors, pixelbatch)
loss.backward()
optim.step()
def report_result(model, img, rays_o, rays_d, bound, num_samples, raybatch_size):
"""
report synthesis result on heldout view
"""
pixels = img.reshape(-1, 3)
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],
num_samples, perturb=False)
synth = []
num_rays = rays_d.shape[0]
with torch.no_grad():
for i in range(0, num_rays, raybatch_size):
rgbs_batch, sigmas_batch = model(xyz[i:i+raybatch_size])
color_batch = volume_render(rgbs_batch, sigmas_batch, t_vals[i:i+raybatch_size])
synth.append(color_batch)
synth = torch.cat(synth, dim=0)
error = F.mse_loss(synth, pixels)
psnr = -10*torch.log10(error)
return psnr
def test():
parser = argparse.ArgumentParser(description='phototourism with meta-learning')
parser.add_argument('--config', type=str, required=True,
help='config file for the scene')
parser.add_argument('--weight-path', type=str, required=True,
help='path to the meta-trained weight file')
args = parser.parse_args()
with open(args.config) as config:
info = json.load(config)
for key, value in info.items():
args.__dict__[key] = value
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
test_set = build_tourism(image_set="test", args=args)
test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
model = build_nerf(args)
model.to(device)
checkpoint = torch.load(args.weight_path, map_location=device)
meta_state_dict = checkpoint['meta_model_state_dict']
test_psnrs = []
for idx, (img, pose, kinv, bound) in enumerate(test_loader):
img, pose, kinv, bound = img.to(device), pose.to(device), kinv.to(device), bound.to(device)
img, pose, kinv, bound = img.squeeze(), pose.squeeze(), kinv.squeeze(), bound.squeeze()
rays_o, rays_d = get_rays_tourism(img.shape[0], img.shape[1], kinv, pose)
# optimize on the left half, test on the right half
left_width = img.shape[1]//2
right_width = img.shape[1] - left_width
tto_img, test_img = torch.split(img, [left_width, right_width], dim=1)
tto_rays_o, test_rays_o = torch.split(rays_o, [left_width, right_width], dim=1)
tto_rays_d, test_rays_d = torch.split(rays_d, [left_width, right_width], dim=1)
model.load_state_dict(meta_state_dict)
optim = torch.optim.SGD(model.parameters(), args.tto_lr)
inner_loop(model, optim, tto_img, tto_rays_o, tto_rays_d,
bound, args.num_samples, args.tto_batchsize, args.tto_steps)
psnr = report_result(model, test_img, test_rays_o, test_rays_d, bound,
args.num_samples, args.test_batchsize)
print(f"test view {idx+1}, psnr:{psnr:.3f}")
test_psnrs.append(psnr)
test_psnrs = torch.stack(test_psnrs)
print("----------------------------------")
print(f"test dataset mean psnr: {test_psnrs.mean():.3f}")
print("\ncreating interpolation video ...\n")
create_interpolation_video(args, model, meta_state_dict, test_set, device)
print("\ninterpolation video created!")
if __name__ == '__main__':
test()
|
1640298
|
import hashlib
from colorama import Fore, Style
from Crypto.Cipher import AES
from elftools.elf.elffile import ELFFile
from .compression import lz77_decompress, lzma_compress
from .exception import (
InvalidStockRomError,
MissingSymbolError,
NotEnoughSpaceError,
ParsingError,
)
from .patch import FirmwarePatchMixin
from .utils import round_down_word, round_up_word
def _val_to_color(val):
if 0x9010_0000 > val >= 0x9000_0000:
return Fore.YELLOW
elif 0x0804_0000 > val >= 0x0800_0000:
return Fore.MAGENTA
else:
return ""
class Lookup(dict):
def __repr__(self):
substrs = []
substrs.append("{")
for k, v in sorted(self.items()):
k_color = _val_to_color(k)
v_color = _val_to_color(v)
substrs.append(
f" {k_color}0x{k:08X}{Style.RESET_ALL}: "
f"{v_color}0x{v:08X}{Style.RESET_ALL},"
)
substrs.append("}")
return "\n".join(substrs)
class Firmware(FirmwarePatchMixin, bytearray):
RAM_BASE = 0x02000000
RAM_LEN = 0x00020000
FLASH_BASE = 0x0000_0000
FLASH_LEN = 0
def __init__(self, firmware=None):
if firmware:
with open(firmware, "rb") as f:
firmware_data = f.read()
super().__init__(firmware_data)
else:
super().__init__(self.FLASH_LEN)
self._lookup = Lookup()
self._verify()
def _verify(self):
pass
def __getitem__(self, key):
"""Properly raises index error if trying to access oob regions."""
if isinstance(key, slice):
if key.start is not None:
try:
self[key.start]
except IndexError:
raise IndexError(
f"Index {key.start} ({hex(key.start)}) out of range"
) from None
if key.stop is not None:
try:
self[key.stop - 1]
except IndexError:
raise IndexError(
f"Index {key.stop - 1} ({hex(key.stop - 1)}) out of range"
) from None
return super().__getitem__(key)
def __setitem__(self, key, new_val):
"""Properly raises index error if trying to access oob regions."""
if isinstance(key, slice):
if key.start is not None:
try:
self[key.start]
except IndexError:
raise NotEnoughSpaceError(
f"Starting index {key.start} ({hex(key.start)}) exceeds "
f"firmware length {len(self)} ({hex(len(self))})"
) from None
if key.stop is not None:
try:
self[key.stop - 1]
except IndexError:
raise NotEnoughSpaceError(
f"Ending index {key.stop - 1} ({hex(key.stop - 1)}) exceeds "
f"firmware length {len(self)} ({hex(len(self))})"
) from None
return super().__setitem__(key, new_val)
def __str__(self):
return self.__name__
@staticmethod
def hash(data):
return hashlib.sha1(data).hexdigest()
def int(self, offset: int, size=4):
return int.from_bytes(self[offset : offset + size], "little")
def set_range(self, start: int, end: int, val: bytes):
self[start:end] = val * (end - start)
return end - start
def clear_range(self, start: int, end: int):
return self.set_range(start, end, val=b"\x00")
def show(self, wrap=1024, show=True):
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
def to_hex(x, pos):
return f"0x{int(x):06X}"
def to_hex_wrap(x, pos):
return f"0x{int(x)*wrap:06X}"
n_bytes = len(self)
rows = int(np.ceil(n_bytes / wrap))
occupied = np.array(self) != 0
plt.imshow(occupied.reshape(rows, wrap))
plt.title(str(self))
axes = plt.gca()
axes.get_xaxis().set_major_locator(ticker.MultipleLocator(128))
axes.get_xaxis().set_major_formatter(ticker.FuncFormatter(to_hex))
axes.get_yaxis().set_major_locator(ticker.MultipleLocator(32))
axes.get_yaxis().set_major_formatter(ticker.FuncFormatter(to_hex_wrap))
if show:
plt.show()
class RWData:
"""
Assumptions:
1. Only compressed rwdata is after this table
2. We are only modifying the lz_decompress stuff.
"""
# THIS HAS TO AGREE WITH THE LINKER
MAX_TABLE_ELEMENTS = 5
def __init__(self, firmware, table_start, table_len):
# We want to be able to extend the table.
self.firmware = firmware
self.table_start = table_start
self.__compressed_len_memo = {}
self.datas, self.dsts = [], []
for i in range(table_start, table_start + table_len - 4, 16):
# First thing is pointer to executable, need to always replace this
# to our lzma
rel_offset_to_fn = firmware.int(i)
if rel_offset_to_fn > 0x8000_0000:
rel_offset_to_fn -= 0x1_0000_0000
# fn_addr = i + rel_offset_to_fn
# assert fn_addr == 0x18005 # lz_decompress function
i += 4
data_addr = i + firmware.int(i)
i += 4
data_len = firmware.int(i) >> 1
i += 4
data_dst = firmware.int(i)
i += 4
data = lz77_decompress(firmware[data_addr : data_addr + data_len])
print(f" lz77 decompressed data {data_len} -> {len(data)}")
firmware.clear_range(data_addr, data_addr + data_len)
self.append(data, data_dst)
last_element_offset = table_start + table_len - 4
self.last_fn = firmware.int(last_element_offset)
if self.last_fn > 0x8000_0000:
self.last_fn -= 0x1_0000_0000
self.last_fn += last_element_offset
# Mark this area as reserved; there's nothing special about 0x77, its
# just not 0x00
firmware.set_range(
table_start, table_start + 16 * self.MAX_TABLE_ELEMENTS + 4, b"\x77"
)
def __getitem__(self, k):
return self.datas[k]
@property
def table_end(self):
return self.table_start + 4 * 4 * len(self.datas) + 4 + 4
def append(self, data, dst):
"""Add a new element to the table"""
if len(self.datas) >= self.MAX_TABLE_ELEMENTS:
raise NotEnoughSpaceError(
f"MAX_TABLE_ELEMENTS value {self.MAX_TABLE_ELEMENTS} exceeded"
)
self.datas.append(data)
self.dsts.append(dst)
assert len(self.datas) == len(self.dsts)
@property
def compressed_len(self):
compressed_len = 0
for data in self.datas:
data = bytes(data)
if data not in self.__compressed_len_memo:
compressed_data = lzma_compress(bytes(data))
self.__compressed_len_memo[data] = len(compressed_data)
compressed_len += self.__compressed_len_memo[data]
return compressed_len
def write_table_and_data(self, end_of_table_reference, data_offset=None):
"""
Parameters
----------
data_offset : int
Where to write the compressed data
"""
# Write Compressed Data
data_addrs, data_lens = [], []
if data_offset is None:
index = self.table_end
else:
index = data_offset
total_len = 0
for data in self.datas:
compressed_data = lzma_compress(bytes(data))
print(
f" compressed {len(data)}->{len(compressed_data)} bytes "
f"(saves {len(data)-len(compressed_data)}). "
f"Writing to 0x{index:05X}"
)
self.firmware[index : index + len(compressed_data)] = compressed_data
data_addrs.append(index)
data_lens.append(len(compressed_data))
index += len(compressed_data)
total_len += len(compressed_data)
# Write Table
index = self.table_start
assert len(data_addrs) == len(data_lens) == len(self.dsts)
for data_addr, data_len, data_dst in zip(data_addrs, data_lens, self.dsts):
self.firmware.relative(index, "rwdata_inflate")
index += 4
# Assumes that the data will be after the table.
rel_addr = data_addr - index
if rel_addr < 0:
rel_addr += 0x1_0000_0000
self.firmware.replace(index, rel_addr, size=4)
index += 4
self.firmware.replace(index, data_len, size=4)
index += 4
self.firmware.replace(index, data_dst, size=4)
index += 4
self.firmware.relative(index, "bss_rwdata_init")
index += 4
self.firmware.relative(index, self.last_fn, size=4)
index += 4
assert index == self.table_end
# Update the pointer to the end of table in the loader
self.firmware.relative(end_of_table_reference, index, size=4)
print(self)
return total_len
def __str__(self):
"""Returns the **written** table.
Doesn't show unstaged changes.
"""
substrs = []
substrs.append("")
substrs.append("RWData Table")
substrs.append("------------")
for addr in range(self.table_start, self.table_end - 4 - 4, 16):
substrs.append(
f"0x{addr:08X}: "
f"0x{self.firmware.int(addr + 0):08X} "
f"0x{self.firmware.int(addr + 4):08X} "
f"0x{self.firmware.int(addr + 8):08X} "
f"0x{self.firmware.int(addr + 12):08X} "
)
addr = self.table_end - 8
substrs.append(f"0x{addr:08X}: 0x{self.firmware.int(addr + 0):08X}")
addr = self.table_end - 4
substrs.append(f"0x{addr:08X}: 0x{self.firmware.int(addr + 0):08X}")
substrs.append("")
return "\n".join(substrs)
class IntFirmware(Firmware):
FLASH_BASE = 0x08000000
FLASH_LEN = 0x00020000
RWDATA_OFFSET = None
RWDATA_LEN = 0
RWDATA_ITCM_IDX = None
RWDATA_DTCM_IDX = None
def __init__(self, firmware, elf):
super().__init__(firmware)
self._elf_f = open(elf, "rb")
self.elf = ELFFile(self._elf_f)
self.symtab = self.elf.get_section_by_name(".symtab")
if self.RWDATA_OFFSET is None:
self.rwdata = None
else:
self.rwdata = RWData(self, self.RWDATA_OFFSET, self.RWDATA_LEN)
def _verify(self):
h = hashlib.sha1(self).hexdigest()
if h != self.STOCK_ROM_SHA1_HASH:
raise InvalidStockRomError
def address(self, symbol_name, sub_base=False):
symbols = self.symtab.get_symbol_by_name(symbol_name)
if not symbols:
raise MissingSymbolError(f'Cannot find symbol "{symbol_name}"')
address = symbols[0]["st_value"]
if address == 0:
raise MissingSymbolError(f"{symbol_name} has address 0x0")
print(f" found {symbol_name} at 0x{address:08X}")
if sub_base:
address -= self.FLASH_BASE
return address
@property
def empty_offset(self):
"""Detect a series of 0x00 to figure out the end of the internal firmware.
Returns
-------
int
Offset into firmware where empty region begins.
"""
if self.rwdata is None:
search_start = self.STOCK_ROM_END
else:
search_start = self.rwdata.table_end
for addr in range(search_start, self.FLASH_LEN, 0x10):
if self[addr : addr + 256] == b"\x00" * 256:
int_pos_start = addr
break
else:
raise ParsingError("Couldn't find end of internal code.")
return int_pos_start
@property
def key(self):
return self[self.KEY_OFFSET : self.KEY_OFFSET + 16]
@property
def nonce(self):
return self[self.NONCE_OFFSET : self.NONCE_OFFSET + 8]
def _nonce_to_iv(nonce):
# need to convert nonce to 2
assert len(nonce) == 8
nonce = nonce[::-1]
# The lower 28bits (counter) will be updated in `crypt` method
return nonce + b"\x00\x00" + b"\x71\x23" + b"\x20\x00" + b"\x00\x00"
class ExtFirmware(Firmware):
FLASH_BASE = 0x9000_0000
FLASH_LEN = 0x0010_0000
ENC_START = 0
ENC_END = 0
def crypt(self, key, nonce):
"""Decrypts if encrypted; encrypts if in plain text."""
key = bytes(key[::-1])
iv = bytearray(_nonce_to_iv(nonce))
aes = AES.new(key, AES.MODE_ECB)
for offset in range(self.ENC_START, self.ENC_END, 128 // 8):
counter_block = iv.copy()
counter = (self.FLASH_BASE + offset) >> 4
counter_block[12] = ((counter >> 24) & 0x0F) | (counter_block[12] & 0xF0)
counter_block[13] = (counter >> 16) & 0xFF
counter_block[14] = (counter >> 8) & 0xFF
counter_block[15] = (counter >> 0) & 0xFF
cipher_block = aes.encrypt(bytes(counter_block))
for i, cipher_byte in enumerate(reversed(cipher_block)):
self[offset + i] ^= cipher_byte
class Device:
registry = {}
def __init_subclass__(cls, name, **kwargs):
super().__init_subclass__(**kwargs)
cls.name = name
cls.registry[name] = cls
def __init__(self, internal_bin, internal_elf, external_bin):
self.internal = self.Int(internal_bin, internal_elf)
self.external = self.Ext(external_bin)
self.compressed_memory = self.FreeMemory()
# Link all lookup tables to a single device instance
self.lookup = Lookup()
self.internal._lookup = self.lookup
self.external._lookup = self.lookup
self.compressed_memory._lookup = self.lookup
self.ext_offset = 0
self.int_pos = 0
self.compressed_memory_pos = 0
def _move_copy(
self, dst, dst_offset: int, src, src_offset: int, size: int, delete: bool
) -> int:
dst[dst_offset : dst_offset + size] = src[src_offset : src_offset + size]
if delete:
src.clear_range(src_offset, src_offset + size)
for i in range(size):
self.lookup[src.FLASH_BASE + src_offset + i] = (
dst.FLASH_BASE + dst_offset + i
)
return size
def _move(self, dst, dst_offset: int, src, src_offset: int, size: int) -> int:
return self._move_copy(dst, dst_offset, src, src_offset, size, True)
def _copy(self, dst, dst_offset: int, src, src_offset: int, size: int) -> int:
return self._move_copy(dst, dst_offset, src, src_offset, size, False)
# Convenience methods for move and copy
def _move_ext_to_int(self, ext_offset: int, int_offset: int, size: int) -> int:
return self._move(self.internal, int_offset, self.external, ext_offset, size)
def _copy_ext_to_int(self, ext_offset: int, int_offset: int, size: int) -> int:
return self._copy(self.internal, int_offset, self.external, ext_offset, size)
def _move_to_compressed_memory(
self, ext_offset: int, compressed_memory_offset: int, size: int
) -> int:
return self._move(
self.compressed_memory,
compressed_memory_offset,
self.external,
ext_offset,
size,
)
def crypt(self):
self.external.crypt(self.internal.key, self.internal.nonce)
def show(self, show=True):
import matplotlib.pyplot as plt
if len(self.external):
plt.subplot(2, 1, 1)
self.internal.show(show=False)
plt.subplot(2, 1, 2)
self.external.show(show=False)
else:
self.internal.show(show=False)
if show:
plt.show()
def compressed_memory_compressed_len(self, add_index=0):
index = self.compressed_memory_pos + add_index
if not index:
return 0
data = bytes(self.compressed_memory[:index])
if data in self.compressed_memory_compressed_len.memo:
return self.compressed_memory_compressed_len.memo[data]
compressed_data = lzma_compress(data)
self.compressed_memory_compressed_len.memo[data] = len(compressed_data)
return len(compressed_data)
compressed_memory_compressed_len.memo = {}
@property
def compressed_memory_free_space(self):
return len(self.compressed_memory) - self.compressed_memory_pos
@property
def int_free_space(self):
out = (
len(self.internal) - self.int_pos - self.compressed_memory_compressed_len()
)
if self.internal.rwdata is not None:
out -= self.internal.rwdata.compressed_len
return out
def rwdata_lookup(self, lower, size):
lower += self.external.FLASH_BASE
upper = lower + size
for i in range(0, len(self.internal.rwdata[self.internal.RWDATA_DTCM_IDX]), 4):
val = int.from_bytes(
self.internal.rwdata[self.internal.RWDATA_DTCM_IDX][i : i + 4], "little"
)
if lower <= val < upper:
new_val = self.lookup[val]
print(f" updating rwdata 0x{val:08X} -> 0x{new_val:08X}")
self.internal.rwdata[self.internal.RWDATA_DTCM_IDX][
i : i + 4
] = new_val.to_bytes(4, "little")
def rwdata_erase(self, lower, size):
"""
Erasing no longer used references makes it compress better.
"""
lower += 0x9000_0000
upper = lower + size
for i in range(0, len(self.internal.rwdata[self.internal.RWDATA_DTCM_IDX]), 4):
val = int.from_bytes(
self.internal.rwdata[self.internal.RWDATA_DTCM_IDX][i : i + 4], "little"
)
if lower <= val < upper:
self.internal.rwdata[self.internal.RWDATA_DTCM_IDX][
i : i + 4
] = b"\x00\x00\x00\x00"
def move_to_int(self, ext, size, reference):
if self.int_free_space < size:
raise NotEnoughSpaceError
new_loc = self.int_pos
if isinstance(ext, (bytes, bytearray)):
self.internal[self.int_pos : self.int_pos + size] = ext
else:
self._move_ext_to_int(ext, self.int_pos, size=size)
print(f" move_ext_to_int {hex(ext)} -> {hex(self.int_pos)}")
self.int_pos += round_up_word(size)
if reference is not None:
self.internal.lookup(reference)
return new_loc
def move_ext_external(self, ext, size, reference):
"""Explicitly just moves ext->ext data"""
if isinstance(ext, (bytes, bytearray)):
self.external[self.ext_offset : self.ext_offset + size] = ext
else:
self.external.move(ext, self.ext_offset, size=size)
if reference is not None:
self.internal.lookup(reference)
new_loc = ext + self.ext_offset
return new_loc
def move_ext(self, ext, size, reference):
"""Attempt to relocate in priority order:
1. Internal
2. External
This is the primary moving function for data that is already compressed
or is incompressible.
"""
try:
new_loc = self.move_to_int(ext, size, reference)
if isinstance(ext, int):
self.ext_offset -= round_down_word(size)
return new_loc
except NotEnoughSpaceError:
print(
f" {Fore.RED}Not Enough Internal space. Using external flash{Style.RESET_ALL}"
)
return self.move_ext_external(ext, size, reference)
def move_to_compressed_memory(self, ext, size, reference):
"""Attempt to relocate in priority order:
1. compressed_memory
2. Internal
3. External
This is the primary moving method for any compressible data.
"""
current_len = self.compressed_memory_compressed_len()
try:
self.compressed_memory[
self.compressed_memory_pos : self.compressed_memory_pos + size
] = self.external[ext : ext + size]
except NotEnoughSpaceError:
print(
f" {Fore.RED}compressed_memory full. Attempting to put in internal{Style.RESET_ALL}"
)
return self.move_ext(ext, size, reference)
new_len = self.compressed_memory_compressed_len(size)
diff = new_len - current_len
compression_ratio = size / diff
print(
f" {Fore.YELLOW}compression_ratio: {compression_ratio}{Style.RESET_ALL}"
)
if diff > self.int_free_space:
print(
f" {Fore.RED}not putting into free memory due not enough free "
f"internal storage for compressed data.{Style.RESET_ALL}"
)
self.compressed_memory.clear_range(
self.compressed_memory_pos, self.compressed_memory_pos + size
)
return self.move_ext_external(ext, size, reference)
elif compression_ratio < self.args.compression_ratio:
# Revert putting this data into compressed_memory due to poor space_savings
print(
f" {Fore.RED}not putting in free memory due to poor compression.{Style.RESET_ALL}"
)
self.compressed_memory.clear_range(
self.compressed_memory_pos, self.compressed_memory_pos + size
)
return self.move_ext(ext, size, reference)
# Even though the data is already moved, this builds the reference lookup
self._move_to_compressed_memory(ext, self.compressed_memory_pos, size=size)
print(
f" move_to_compressed_memory {hex(ext)} -> {hex(self.compressed_memory_pos)}"
)
if reference is not None:
self.internal.lookup(reference)
new_loc = self.compressed_memory_pos
self.compressed_memory_pos += round_up_word(size)
self.ext_offset -= round_down_word(size)
return new_loc
def __call__(self):
self.int_pos = self.internal.empty_offset
return self.patch()
def patch(self):
"""Device specific argument parsing and patching routine.
Called from __call__; not to be called otherwise.
"""
raise NotImplementedError
|
1640313
|
import os
import pytest
from tests import resources
from audiomate.corpus.conversion import base
class DummyAudioFileConverter(base.AudioFileConverter):
def __init__(self, a_ret, sampling_rate=16000, separate_file_per_utterance=False,
force_conversion=False):
super(DummyAudioFileConverter, self).__init__(
sampling_rate,
separate_file_per_utterance,
force_conversion
)
self.a_ret = a_ret
self.a_log = []
self.b_log = []
def _file_extension(self):
return 'wav'
def _does_utt_match_target_format(self, utterance):
self.a_log.append(utterance.idx)
return self.a_ret[utterance.idx]
def _convert_files(self, files):
self.b_log.append(files)
@pytest.fixture
def ds():
ds = resources.create_dataset()
file_1_path = resources.sample_wav_file('wav_1.wav')
file_2_path = resources.get_resource_path(('audio_formats', 'mp3_2_44_1k_16b.mp3'))
file_3_path = resources.get_resource_path(('audio_formats', 'flac_1_16k_16b.flac'))
file_4_path = resources.sample_wav_file('wav_4.wav')
ds.tracks['wav-1'].path = file_1_path
ds.tracks['wav_2'].path = file_2_path
ds.tracks['wav_3'].path = file_3_path
ds.tracks['wav_4'].path = file_4_path
return ds
class TestAudioFileConverter:
def test_convert(self, tmp_path, ds):
c = DummyAudioFileConverter({
'utt-1': True, 'utt-2': False, 'utt-3': False, 'utt-4': False, 'utt-5': True
})
res = c.convert(ds, str(tmp_path))
assert sorted(c.a_log) == sorted(ds.utterances.keys())
assert len(c.b_log) == 1
assert sorted(c.b_log[0]) == sorted([
(
ds.tracks['wav_2'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'wav_2.wav')
),
(
ds.tracks['wav_3'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'wav_3.wav')
)
])
assert set(res.utterances.keys()) == set(ds.utterances.keys())
assert set(res.tracks.keys()) == set(ds.tracks.keys())
assert set(res.subviews.keys()) == set(ds.subviews.keys())
assert res.utterances['utt-2'].track.path == os.path.join(str(tmp_path), 'wav_2.wav')
def test_convert_separate_file_per_utterance(self, tmp_path, ds):
c = DummyAudioFileConverter({
'utt-1': True, 'utt-2': False, 'utt-3': False, 'utt-4': False, 'utt-5': True
}, separate_file_per_utterance=True)
res = c.convert(ds, str(tmp_path))
assert sorted(c.a_log) == ['utt-1', 'utt-2', 'utt-5']
assert len(c.b_log) == 1
assert sorted(c.b_log[0]) == sorted([
(
ds.tracks['wav_2'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'utt-2.wav')
),
(
ds.tracks['wav_3'].path,
0.0,
1.5,
os.path.join(str(tmp_path), 'utt-3.wav')
),
(
ds.tracks['wav_3'].path,
1.5,
2.5,
os.path.join(str(tmp_path), 'utt-4.wav')
),
])
assert set(res.utterances.keys()) == set(ds.utterances.keys())
assert set(res.tracks.keys()) == {'wav-1', 'utt-2', 'utt-3', 'utt-4', 'wav_4'}
assert set(res.subviews.keys()) == set(ds.subviews.keys())
assert res.utterances['utt-2'].track.path == os.path.join(str(tmp_path), 'utt-2.wav')
def test_convert_separate_file_per_utterance_and_force(self, tmp_path, ds):
c = DummyAudioFileConverter({
'utt-1': True, 'utt-2': False, 'utt-3': False, 'utt-4': False, 'utt-5': True
}, separate_file_per_utterance=True, force_conversion=True)
res = c.convert(ds, str(tmp_path))
assert sorted(c.a_log) == []
assert len(c.b_log) == 1
assert sorted(c.b_log[0]) == sorted([
(
ds.tracks['wav-1'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'utt-1.wav')
),
(
ds.tracks['wav_2'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'utt-2.wav')
),
(
ds.tracks['wav_3'].path,
0.0,
1.5,
os.path.join(str(tmp_path), 'utt-3.wav')
),
(
ds.tracks['wav_3'].path,
1.5,
2.5,
os.path.join(str(tmp_path), 'utt-4.wav')
),
(
ds.tracks['wav_4'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'utt-5.wav')
),
])
assert set(res.utterances.keys()) == set(ds.utterances.keys())
assert set(res.tracks.keys()) == {'utt-1', 'utt-2', 'utt-3', 'utt-4', 'utt-5'}
assert set(res.subviews.keys()) == set(ds.subviews.keys())
assert res.utterances['utt-2'].track.path == os.path.join(str(tmp_path), 'utt-2.wav')
def test_convert_force(self, tmp_path, ds):
c = DummyAudioFileConverter({
'utt-1': True, 'utt-2': False, 'utt-3': False, 'utt-4': False, 'utt-5': True
}, force_conversion=True)
res = c.convert(ds, str(tmp_path))
assert sorted(c.a_log) == []
assert len(c.b_log) == 1
assert sorted(c.b_log[0]) == sorted([
(
ds.tracks['wav-1'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'wav-1.wav')
),
(
ds.tracks['wav_2'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'wav_2.wav')
),
(
ds.tracks['wav_3'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'wav_3.wav')
),
(
ds.tracks['wav_4'].path,
0,
float('inf'),
os.path.join(str(tmp_path), 'wav_4.wav')
),
])
assert set(res.utterances.keys()) == set(ds.utterances.keys())
assert set(res.tracks.keys()) == set(ds.tracks.keys())
assert set(res.subviews.keys()) == set(ds.subviews.keys())
assert res.utterances['utt-2'].track.path == os.path.join(str(tmp_path), 'wav_2.wav')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.