id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11597556
|
import time
LPWAN_MAC_PATH='/flash/sys/lpwan.mac'
def MAC_to_bytearray(mac):
import struct
mac = mac.replace("-","").replace(":","")
mac = int(mac, 16)
return struct.pack('>Q', mac)
def write_mac(mac):
with open(LPWAN_MAC_PATH, 'wb') as output:
output.write(mac)
time.sleep(0.5)
try:
import machine
new_mac = MAC_to_bytearray("{MAC_ADDRESS}")
write_mac(new_mac)
machine.reset()
except:
print("LPWAN MAC write failure")
|
11597579
|
from app import create_app, db
from app.models import (
User,
ScopeItem,
ConfigItem,
NatlasServices,
AgentConfig,
RescanTask,
Tag,
Agent,
AgentScript,
ScopeLog,
UserInvitation,
)
from app.instrumentation import initialize_sentryio
from sentry_sdk import capture_exception
from config import Config
config = Config()
initialize_sentryio(config)
try:
app = create_app(config)
except Exception as e:
capture_exception(e)
raise
@app.shell_context_processor
def make_shell_context():
return {
"db": db,
"User": User,
"ScopeItem": ScopeItem,
"ConfigItem": ConfigItem,
"NatlasServices": NatlasServices,
"AgentConfig": AgentConfig,
"RescanTask": RescanTask,
"Tag": Tag,
"Agent": Agent,
"AgentScript": AgentScript,
"UserInvitation": UserInvitation,
"ScopeLog": ScopeLog,
}
|
11597610
|
import os
import shutil
import time
import numpy as np
from baselines import logger
from collections import deque
import tensorflow as tf
from baselines.common import explained_variance, set_global_seeds
from ppo_iter.policies import build_policy
from baselines.common.tf_util import get_session
from baselines.common.mpi_util import sync_from_root
from ppo_iter.utils import get_docs, get_file_id, save_file_from_db, constfn, get_alpha
from ppo_iter.utils import scheduling, get_lr_fn, save_model, switch_training_model, get_all_burnin_data_dict
from ppo_iter.utils import safemean, save_data, load_batch
from ppo_iter.utils import db_uri, db_name
from ppo_iter.model import Model
try:
from mpi4py import MPI
except ImportError:
MPI = None
from ppo_iter.runner import Runner
def learn(*, network, env, total_timesteps, iter_loss, arch, _run,
seed=None, nsteps=2048, ent_coef=0.0, learning_rate=3e-4, lr_schedule=None,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
load_path=None, mpi_rank_weight=1, comm=None,
eval=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network:
The network model. Will only work with the one in this repo because of IBAC
env: baselines.common.vec_env.VecEnv
total_timesteps: int
number of timesteps (i.e. number of actions taken in the environment)
iter_loss: dict
the config dict as specified in default.yaml and/or overwritting by command line arguments
see sacred for further documentation
arch: dict
config dict similar to iter_loss
eval: dict
config dict similar to iter_loss
_run:
sacred Experiment._run object. Used for logging
ent_coef: float
policy entropy coefficient in the optimization objective
seed: float
random seed
nsteps: int
number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
ent_coef: float
value function loss coefficient in the optimization objective
learning_rate: float
learning rate
lr_schedule: None or str
If None, use a const. learning rate. If string, only "linear" is implemented at the moment
vf_coef: float
Coefficient for vf optimisation
max_grad_norm: flaot
Max gradient norm before it's clipped
gamma: float
Discount factor
lam: float
For GAE
log_interval: int
number of timesteps between logging events
nminibatches: int
number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int
number of training epochs per update
cliprange: float or function
clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int
number of timesteps between saving events
load_path: str
path to load the model from
**network_kwargs:
keyword arguments to the policy / network builder.
See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
# Set learning rate schedule
lr = get_lr_fn(lr_schedule, start_learning_rate=learning_rate)
set_global_seeds(seed)
session = get_session()
# if isinstance(lr, float): lr = constfn(lr)
# else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
model_fn = Model
policy = build_policy(env, network, arch, **network_kwargs)
# Instantiate the model object (that creates act_model and train_model)
def create_model(scope_name, **kwargs):
return model_fn(scope_name=scope_name, policy=policy, ob_space=ob_space, ac_space=ac_space,
nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight,
iter_loss=iter_loss, arch=arch, **kwargs)
# model_train is the teacher and always executed
# model_burnin is trained. If teacher and student are swapped, the parameters from burnin are
# copied into the teacher and burnin is re-initialized
model_train = create_model("ppo_iter_train")
model_burnin = create_model("ppo_iter_burnin",
target_vf=model_train.train_model.vf_run,
target_dist_param=model_train.train_model.pi_run)
get_session().run(tf.variables_initializer(tf.global_variables()))
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
if MPI is not None:
sync_from_root(session, global_variables, comm=comm) # pylint: disable=E1101
if load_path is not None:
print("Load model...")
if eval["load_id"]:
# Only works with mongodb as backend, not with tinydb
raise NotImplementedError("Requires MongoDB backend to work")
docs = get_docs(db_uri, db_name, "runs")
projection = {'config': True}
projection.update({'artifacts': True})
doc = docs.find_one({'_id': eval["load_id"]}, projection)
print("Loading model from db to disc")
file_id = get_file_id(doc, eval["file_name"])
load_path = os.path.join(logger.get_dir(), "loadmodel_{}".format(_run._id))
save_file_from_db(file_id, load_path , db_uri, db_name)
model_train.load(load_path)
if eval["switch_after_load"]:
switch_training_model(0, is_mpi_root, model_train, _run, iter_loss, session, comm,
save=False)
# Instantiate the runner object
runner = Runner(env=env, model=model_train, model_burnin=model_burnin, nsteps=nsteps, gamma=gamma, lam=lam,
iter_loss=iter_loss, eval=eval)
epinfobuf = deque(maxlen=100)
burnin_data_idx = 0
all_burnin_data = None
assert iter_loss["timesteps_anneal"] > iter_loss["v2_buffer_size"] * env.num_envs * nsteps, \
"{}, {}".format(iter_loss["timesteps_anneal"], iter_loss["v2_buffer_size"] * env.num_envs * nsteps)
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
current_cycle_count = 0
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
num_timesteps = update * nbatch
# Start timer
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
# 'Burnin_phase' tells us whether we need regularization
cycle_count, alpha_reg, burnin_phase = scheduling(num_timesteps, iter_loss, "alpha_reg")
if cycle_count != current_cycle_count:
current_cycle_count = cycle_count
if iter_loss["v2"]:
logger.info("Training student")
train_student(
teacher=model_train,
student=model_burnin,
data=all_burnin_data,
iter_loss=iter_loss,
lr=lrnow,
cliprange=cliprangenow,
nminibatches=nminibatches,
session=session,
max_idx=burnin_data_idx,
nenvs=env.num_envs,
nsteps=nsteps,
id=_run._id,
)
switch_training_model(update, is_mpi_root, model_train, _run, iter_loss, session, comm)
# Resetting
all_burnin_data = None
burnin_data_idx = 0
logger.info("Switched training model")
tstart = time.perf_counter()
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, returns, b_returns, masks, actions, values, b_values, neglogpacs, states, b_states, epinfos, burnin_data= \
runner.run(burnin_phase) #pylint: disable=E0632
if burnin_phase and (iter_loss["v2"] or eval["save_latent"]):
print("Saving data")
if iter_loss["v2_use_files"] or eval["save_latent"]:
# Burnin_data_idx is incremented by nsteps, which is nr. of files
save_data(burnin_data, burnin_data_idx, _run._id, nsteps)
else:
if all_burnin_data is None:
all_burnin_data = get_all_burnin_data_dict(
env, iter_loss, nsteps, comm)
for key, value in burnin_data.items():
all_burnin_data[key][burnin_data_idx:burnin_data_idx + nsteps] = value
burnin_data_idx += nsteps
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
mblossvals_burnin = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices_train = (arr[mbinds] for arr in (obs, returns, actions, values, neglogpacs))
slices_burnin = (arr[mbinds] for arr in (obs, b_returns, actions, b_values, neglogpacs))
stats_train, train_op_train, feed = model_train.train(
lrnow, cliprangenow, *slices_train,
)
stats_burnin, train_op_burnin, feed_burnin = model_burnin.train(
lrnow, cliprangenow, *slices_burnin, alpha=alpha_reg,
)
feed.update(feed_burnin) # Needs both!
fetches = {}
if eval["eval_only"]:
pass
session_outputs = {}
elif not burnin_phase or iter_loss["v2"]:
# For v2, normal PPO training is only the old policy,
# The student policy is trained differently
fetches.update({"stats_train": stats_train,})
fetches.update({"train_op": train_op_train})
session_outputs = session.run(fetches, feed)
elif (iter_loss["update_old_policy"] or
(iter_loss["update_old_policy_in_initial"] and cycle_count==0)):
fetches.update({"stats_burnin": stats_burnin})
fetches.update({"train_op": train_op_burnin})
session_outputs_burnin = session.run(fetches, feed)
fetches.update({"stats_train": stats_train,})
fetches.update({"train_op": train_op_train})
session_outputs = session.run(fetches, feed)
session_outputs.update(session_outputs_burnin)
else:
fetches.update({"stats_burnin": stats_burnin})
fetches.update({"train_op": train_op_burnin})
session_outputs = session.run(fetches, feed)
if "stats_train" in session_outputs.keys():
mblossvals.append(session_outputs["stats_train"])
else:
mblossvals.append(
[0 for loss in model_train.loss_names]
)
if "stats_burnin" in session_outputs.keys():
mblossvals_burnin.append(session_outputs["stats_burnin"])
else:
mblossvals_burnin.append(
[0 for loss in model_burnin.loss_names]
)
else: # recurrent version
raise NotImplementedError("Recurrent version not implemented")
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
lossvals_burnin = np.mean(mblossvals_burnin, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model_train.loss_names):
logger.logkv('loss/' + lossname, lossval)
for (lossval, lossname) in zip(lossvals_burnin, model_burnin.loss_names):
logger.logkv('loss_burnin/' + lossname, lossval)
logger.logkv("schedule/alpha_reg", alpha_reg)
logger.logkv("schedule/current_cycle_count", current_cycle_count)
logger.logkv("schedule/burnin_phase", burnin_phase)
logger.dumpkvs()
if is_mpi_root:
save_model(model_train, "model", update, _run)
return model_train
def train_student(teacher, student, data, iter_loss, lr, cliprange,
nminibatches, session, max_idx, nenvs, nsteps, id):
"""Train student for sequential ITER (i.e. v2=True).
Args:
teacher: teacher model
student: student model
data: either a np array or None if we use files to store the data
iter_loss: config dict
lr: learning rate
cliprange: cliprange used for gradients
nminibatches: How many minibatches are used in PPO?
session: TF session
max_idx: How many frames have been stored? Need to know when things are stored in files
nenvs: How many parallel envs are being exectued
nsteps: How many steps per batch are executed
id: Run id, needed to find the folder with the files
Doesn't return anything, but updates the student
"""
use_data = data is not None
# In unit of steps
num_processed_parallel = int(max(nsteps // nminibatches, 1))
num_batches = int(max_idx // num_processed_parallel)
max_idx = num_batches * num_processed_parallel
if use_data:
obs = data["obs"][:max_idx]
actions = data["actions"][:max_idx]
returns = data["returns"][:max_idx]
neglogpacs = data["neglogpacs"][:max_idx]
values = data["values"][:max_idx]
# Get example so I know dimensionality of pi
test_obs = obs[0:num_processed_parallel]
sa = test_obs.shape
v, pi = teacher.train_model.value_and_pi(
test_obs.reshape(-1, *sa[2:]))
teacher_values = np.empty_like(values)
teacher_pis = np.empty(
shape=(nsteps * iter_loss["v2_buffer_size"], nenvs, pi.shape[-1]),
dtype=pi.dtype)
print("Re-evaluating")
for batch_nr in range(num_batches): # This leaves out the last (too small) batch
current_idx = batch_nr * num_processed_parallel
current_slice = slice(current_idx, current_idx + num_processed_parallel)
batch_idxs = list(range(current_idx, current_idx + num_processed_parallel))
batch_obs = (obs[current_slice]
if use_data
else load_batch(batch_idxs, id, nenvs)['obs'])
sa = batch_obs.shape
v, pi = teacher.train_model.value_and_pi(
batch_obs.reshape(sa[0] * sa[1], *sa[2:]),
)
pi_size = pi.shape[1]
v = v.reshape(*sa[:2])
pi = pi.reshape(*sa[:2], pi_size)
if use_data:
teacher_values[current_slice] = v
teacher_pis[current_slice] = pi
else:
save_data(
{"teacher_values": v, "teacher_pis": pi},
current_idx, id, num_processed_parallel, prefix="teacher_")
if use_data:
teacher_values = teacher_values.reshape(-1)
teacher_pis = teacher_pis.reshape(-1, pi_size)
obs = obs.reshape(-1, *sa[2:])
actions = actions.reshape(-1)
returns = returns.reshape(-1)
neglogpacs = neglogpacs.reshape(-1)
values = values.reshape(-1)
# Each file contains nenvs datapoints that are loaded together for speed
# On the other hand, when reading from memory, I can pick each data individually
inds = (np.arange(max_idx * nenvs, dtype=np.int)
if use_data
else np.arange(max_idx, dtype=np.int))
# Similarly, adapt `num_processed_parallel` if we're reading from memory
if use_data: num_processed_parallel *= nenvs
# Calculate the fps (frame per second)
print("Training", flush=True)
total_steps = iter_loss["v2_number_epochs"] * num_batches
for iteration in range(iter_loss["v2_number_epochs"]):
tstart = time.perf_counter()
np.random.shuffle(inds)
# losses = defaultdict(list)
mblossvals_burnin = []
time_data = 0
time_train = 0
for batch_nr in range(0, num_batches):
current_idx = batch_nr * num_processed_parallel
mbinds = inds[current_idx:current_idx + num_processed_parallel]
current_step = iteration * num_batches + batch_nr
alpha_reg = get_alpha(current_step / total_steps, iter_loss["alpha_reg"])
it_start = time.perf_counter()
if use_data:
curr_obs = obs[mbinds]
curr_returns = returns[mbinds]
curr_actions = actions[mbinds]
curr_values = values[mbinds]
curr_neglogpacs = neglogpacs[mbinds]
curr_teacher_values = teacher_values[mbinds]
curr_teacher_pis = teacher_pis[mbinds]
else:
dd = load_batch(mbinds, id, nenvs)
dd.update(
load_batch(mbinds, id, nenvs, prefix="teacher_", pi_size=pi_size)
)
curr_obs = dd['obs'].reshape(-1, *sa[2:])
curr_returns = dd['returns'].reshape(-1)
curr_actions = dd['actions'].reshape(-1)
curr_values = dd['values'].reshape(-1)
curr_neglogpacs = dd['neglogpacs'].reshape(-1)
curr_teacher_values = dd['teacher_values'].reshape(-1)
curr_teacher_pis = dd['teacher_pis'].reshape(-1, pi_size)
it_mid = time.perf_counter()
stats, op, feed = student.train(
lr=lr,
cliprange=cliprange,
obs=curr_obs,
returns=curr_returns,
actions=curr_actions,
values=curr_values,
neglogpacs=curr_neglogpacs,
teacher_values=curr_teacher_values,
teacher_pis=curr_teacher_pis,
alpha=alpha_reg,
)
fetches = {
"stats_burnin": stats,
"train_op": op
}
session_outputs = session.run(fetches, feed)
mblossvals_burnin.append(session_outputs["stats_burnin"])
it_end = time.perf_counter()
time_data += it_mid - it_start
time_train += it_end - it_mid
lossvals_burnin = np.mean(mblossvals_burnin, axis=0)
tnow = time.perf_counter()
fps = int(len(inds) / (tnow - tstart))
if not use_data: fps *= nenvs
logger.logkv("distill_v2/iteration", iteration)
logger.logkv("distill_v2/fps", fps)
logger.logkv("distill_v2/data_loading_time", time_data / (time_data + time_train))
logger.logkv("distill_v2/alpha_reg", alpha_reg)
for (lossval, lossname) in zip(lossvals_burnin, student.loss_names):
logger.logkv('distill_v2/' + lossname, lossval)
logger.dumpkvs()
logger.info("Trained v2 student")
if iter_loss["v2_use_files"]:
shutil.rmtree(f"{id}_data")
logger.info(f"Removed data folder {id}_data")
|
11597615
|
import networkx as nx
import netomaton as ntm
if __name__ == '__main__':
"""
In Smith's Restricted NA Game of Life example, they define an Underlying network that restricts what links can be
formed. Alternatively, one can instead begin with a lattice network, and change the link weights; or, one can add
additional links between the already connected nodes.
Nevertheless, here, the Restricted NA Game of Life example is implemented using an underlying lattice, as
described in the paper: Smith, <NAME>, et al. "Network automata: Coupling structure and function in
dynamic networks." Advances in Complex Systems 14.03 (2011): 317-339.
Figure 1.
"""
underlying_network = ntm.topology.lattice(dim=(1, 6, 6), periodic=True)
initial_network = ntm.Network(n=36)
# spaceship
initial_network.add_edge(9, 10)
initial_network.add_edge(10, 9)
initial_network.add_edge(3, 9)
initial_network.add_edge(9, 3)
initial_network.add_edge(15, 9)
initial_network.add_edge(9, 15)
initial_network.add_edge(15, 14)
initial_network.add_edge(14, 15)
initial_network.add_edge(8, 14)
initial_network.add_edge(14, 8)
initial_network.add_edge(2, 8)
initial_network.add_edge(8, 2)
initial_network.add_edge(7, 8)
initial_network.add_edge(8, 7)
def topology_rule(ctx):
curr_network = ctx.network
new_network = ctx.network.copy()
for i in underlying_network.nodes:
in_degree_i = curr_network.in_degree(i)
for j in underlying_network.nodes:
if i == j:
continue
in_degree_j = curr_network.in_degree(j)
combined_in_degrees = in_degree_i + in_degree_j
# a non-existent link will be “born” if the combined degrees of the
# two nodes between which it might exist is 2
if combined_in_degrees == 2 and not curr_network.has_edge(j, i) and underlying_network.has_edge(j, i):
new_network.add_edge(j, i)
# a link will survive if the combined degree of the two nodes it connects is 3
elif combined_in_degrees == 3 and curr_network.has_edge(j, i):
pass
# a link dies if it exists
elif curr_network.has_edge(j, i):
new_network.remove_edge(j, i)
return new_network
trajectory = ntm.evolve(network=initial_network, topology_rule=topology_rule, timesteps=6)
pos = nx.spring_layout(ntm.topology.lattice(dim=(1, 6, 6), periodic=False).to_networkx())
ntm.animate_network(trajectory, layout=pos, interval=500)
|
11597622
|
class Solution:
def canCompleteCircuit(self, gas, cost):
tg = tc = md = d = s = 0
for i, (g, c) in enumerate(zip(gas, cost)):
tg += g
tc += c
d = tg - tc
if d < md:
md, s = d, i
return -1 if tc > tg else (s + 1) % len(gas)
|
11597630
|
from models import User, WaitingListUser, ActiveChatsUser, db
from campaign import send_campaign
from utilities import send_message
from templates import TextTemplate
import os
import config
APP_URL = os.environ.get('APP_URL', config.APP_URL)
def log_waitlisted_users():
waitlist = WaitingListUser.query.all()
i=0
print("WAITLIST IS BELOW")
try:
for user in waitlist:
id = user.id
u = User.query.get(id)
print(i, u.name, user.gender, user.interest)
i = i+1
except Exception, e:
print("LOG WAITLIST ERROR", e)
def update_users():
for u in User.query.all():
u.status = False
u.messages = ""
db.session.commit()
def send_emoticon(id):
happy = u'\u2B50'
print("EMOTICON")
message = TextTemplate(text="Hi "+happy)
send_message(message.get_message(), id=id)
print("EMOTICON 1")
def handle_debug(text, id):
if text[3:] == "waitlist":
log_waitlisted_users()
elif text[3:] == "update":
update_users()
elif text[3:] == "campaign":
send_campaign()
elif text[3:] == "emoticon":
send_emoticon(id)
elif text[3:] == "webview":
message = {
"attachment":{
"type":"template",
"payload":{
"template_type":"button",
"text":"Test Webview?",
"buttons":[
{
"type":"web_url",
"url":APP_URL+"webview/",
"title":"Show page",
"webview_height_ratio": "compact"
}
]
}
}
}
send_message(message=message, id=id)
|
11597692
|
from __future__ import absolute_import
from chart_studio.api.v2 import files
from chart_studio.tests.test_plot_ly.test_api import PlotlyApiTestCase
class FilesTest(PlotlyApiTestCase):
def setUp(self):
super(FilesTest, self).setUp()
# Mock the actual api call, we don't want to do network tests here.
self.request_mock = self.mock("chart_studio.api.v2.utils.requests.request")
self.request_mock.return_value = self.get_response()
# Mock the validation function since we can test that elsewhere.
self.mock("chart_studio.api.v2.utils.validate_response")
def test_retrieve(self):
files.retrieve("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/files/hodor:88".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], {})
def test_retrieve_share_key(self):
files.retrieve("hodor:88", share_key="foobar")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/files/hodor:88".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], {"share_key": "foobar"})
def test_update(self):
new_filename = "..zzZ ..zzZ"
files.update("hodor:88", body={"filename": new_filename})
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "put")
self.assertEqual(url, "{}/v2/files/hodor:88".format(self.plotly_api_domain))
self.assertEqual(kwargs["data"], '{{"filename": "{}"}}'.format(new_filename))
def test_trash(self):
files.trash("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(
url, "{}/v2/files/hodor:88/trash".format(self.plotly_api_domain)
)
def test_restore(self):
files.restore("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(
url, "{}/v2/files/hodor:88/restore".format(self.plotly_api_domain)
)
def test_permanent_delete(self):
files.permanent_delete("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "delete")
self.assertEqual(
url, "{}/v2/files/hodor:88/permanent_delete".format(self.plotly_api_domain)
)
def test_lookup(self):
# requests does urlencode, so don't worry about the `' '` character!
path = "/mah plot"
parent = 43
user = "someone"
exists = True
files.lookup(path=path, parent=parent, user=user, exists=exists)
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
expected_params = {
"path": path,
"parent": parent,
"exists": "true",
"user": user,
}
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/files/lookup".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], expected_params)
|
11597725
|
import matplotlib.patches as patches
import matplotlib.pyplot as plt
def cal_IOU(bb1, bb2):
i_x1 = max(bb1[0], bb2[0])
i_y1 = max(bb1[1], bb2[1])
i_x2 = min(bb1[2], bb2[2])
i_y2 = min(bb1[3], bb2[3])
i_h = i_y2 - i_y1
i_w = i_x2 - i_x1
if i_h > 0 and i_w > 0:
i = i_h * i_w
u = (bb1[2] - bb1[0]) * (bb1[3] - bb1[1]) + (bb2[2] - bb2[0]) * (bb2[3] - bb2[1]) - i
return i / u
else:
return 0
def take_probability(elem):
return elem[4]
# list_bbox_p = [[x1, y1, x2, y2, p],[...]]
def nonmax_supression(list_bbox_p, threshold=0.1):
list_bbox_p.sort(key=take_probability, reverse=True)
# print(list_bbox_p)
result = []
while len(list_bbox_p) > 0:
temp = list_bbox_p[0]
list_bbox_p.remove(list_bbox_p[0])
# print('pop', temp)
for bb in list_bbox_p:
iou = cal_IOU(temp[:-1], bb[:-1])
# print(temp, bb, iou)
if iou > threshold:
list_bbox_p.remove(bb)
# print('removed', bb)
result.append(temp)
return result
if __name__ == '__main__':
img = plt.imread('image.png')
h, w = img.shape[:-1]
bboxs = [[10, 10, 500, 500, 0.4], [100, 100, 400, 400, 0.7], [200, 200, 600, 600, 0.6]
, [30, 30, 550, 550, 0.61], [90, 90, 390, 390, 0.62]]
f, ax = plt.subplots(1)
plt.imshow(img)
for i in range(len(bboxs)):
rect = patches.Rectangle((bboxs[i][0], bboxs[i][1]), bboxs[i][2] - bboxs[i][0], bboxs[i][3] - bboxs[i][1],
linewidth=2, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.text(bboxs[i][0], bboxs[i][1], bboxs[i][4])
supressed_bboxs = nonmax_supression(bboxs)
print(supressed_bboxs)
for i in range(len(supressed_bboxs)):
rect = patches.Rectangle((supressed_bboxs[i][0], supressed_bboxs[i][1]),
supressed_bboxs[i][2] - supressed_bboxs[i][0],
supressed_bboxs[i][3] - supressed_bboxs[i][1],
linewidth=4, edgecolor='g', facecolor='none')
ax.add_patch(rect)
plt.show()
|
11597726
|
from .resnet18 import ResNet18
from .wrn import WideResNet
from .densenet import DenseNet3
import torch
def get_network(
name: str, num_classes: int, num_clusters: int = 0, checkpoint: str = None
):
if name == "res18":
net = ResNet18(num_classes=num_classes, dim_aux=num_clusters)
elif name == "wrn":
net = WideResNet(
depth=28,
widen_factor=10,
dropRate=0.0,
num_classes=num_classes,
dim_aux=num_clusters,
)
elif name == "densenet":
net = DenseNet3(
depth=100,
growth_rate=12,
reduction=0.5,
bottleneck=True,
dropRate=0.0,
num_classes=num_classes,
dim_aux=num_clusters,
)
else:
raise Exception("Unexpected Network Architecture!")
if checkpoint:
net.load_state_dict(torch.load(checkpoint), strict=False)
print("Model Loading Completed!")
return net
|
11597770
|
from elasticsearch_dsl import Document, Keyword, Text
class LetterDocument(Document):
title = Text(
analyzer="snowball", required=True, fields={"raw": Keyword()}, term_vector="yes"
)
body = Text(analyzer="snowball", required=True, term_vector="yes")
content = Text(analyzer="snowball", multi=True, term_vector="yes")
letter_id = Text(multi=False)
class Index:
name = "letter"
settings = {
"number_of_shards": 1,
"number_of_replicas": 0,
}
|
11597823
|
import datetime
import time
import os
def log(fileName, line):
f = open(fileName, "a", errors='ignore')
f.write(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + " " + line + "\n")
f.close()
def readFileContents(fileName):
if(os.path.exists(fileName)):
try:
f = open(fileName, "r+", errors='ignore')
return f.read()
except IOError:
return ""
|
11597832
|
from __future__ import print_function
from __future__ import unicode_literals
import sys
from Registry import *
def rec(key):
for value in key.values():
print("%s : %s : %s" % (key.path(), value.name(), value.value_type_str()))
for subkey in key.subkeys():
rec(subkey)
reg = Registry.Registry(sys.argv[1])
rec(reg.root())
|
11597833
|
from django.db import models
from django.db.models import Count, Max, Q, Sum, Case, When, IntegerField, Value
from django.urls import reverse # TODO ideally this shouldn't be in the model
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models import F
from django.utils.translation import ugettext_lazy as _
from .base import LifeTimeTrackingModel
import json
KNOWN_COURSE_PLATFORMS = {
"www.edx.org/": "edX",
"www.futurelearn.com/": "FutureLearn",
"ocw.mit.edu/": "MIT OpenCourseWare",
"www.coursera.org/": "Coursera",
"www.khanacademy.org/": "Khan Academy",
"www.lynda.com/": "Lynda",
"oli.cmu.edu/": "Open Learning Initiative",
"www.udemy.com/": "Udemy",
"www.udacity.com/": "Udacity",
"course.oeru.org/": "OERu",
"www.open.edu/openlearn/": "OpenLearn",
"www.codecademy.com/": "CodeAcademy",
}
def course_platform_from_url(url):
platform = ""
for domain in KNOWN_COURSE_PLATFORMS.keys():
if domain in url:
platform = KNOWN_COURSE_PLATFORMS[domain]
return platform
class Course(LifeTimeTrackingModel):
OER_LICENSES = ['CC-BY', 'CC-BY-SA', 'CC-BY-NC', 'CC-BY-NC-SA', 'Public Domain']
title = models.CharField(max_length=128)
provider = models.CharField(max_length=256)
link = models.URLField()
caption = models.CharField(max_length=500)
on_demand = models.BooleanField()
topics = models.CharField(max_length=500)
language = models.CharField(max_length=6) # ISO language code
created_by = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)
unlisted = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
license = models.CharField(max_length=128, blank=True)
platform = models.CharField(max_length=256, blank=True)
overall_rating = models.FloatField(default=0)
total_ratings = models.SmallIntegerField(default=0)
rating_step_counts = models.TextField(default="{}") # JSON value
discourse_topic_url = models.URLField(blank=True)
def __str__(self):
return self.title
def topic_list(self):
return self.topics.split(',')
def rating_step_counts_json(self):
return json.loads(self.rating_step_counts)
def star_max(self):
""" return the number of ratings attributed to the most popular rating """
steps = self.rating_step_counts_json()
return max(steps.values())
def similar_courses(self):
topics = self.topics.split(',')
query = Q(topics__icontains=topics[0])
for topic in topics[1:]:
query = Q(topics__icontains=topic) | query
courses = Course.objects.filter(unlisted=False, deleted_at__isnull=True).filter(query).exclude(id=self.id).annotate(
num_learning_circles=Sum(
Case(
When(
studygroup__deleted_at__isnull=True, then=Value(1),
studygroup__course__id=F('id')
),
default=Value(0), output_field=models.IntegerField()
)
)
)[:3]
return courses
def detect_platform_from_link(self):
platform = course_platform_from_url(self.link)
self.platform = platform
self.save()
def discourse_topic_default_body(self):
return _("<p>What recommendations do you have for other facilitators who are using \"{}\"? Consider sharing additional resources you found helpful, activities that worked particularly well, and some reflections on who this course is best suited for. For more information, see this course on <a href='https://learningcircles.p2pu.org{}'>P2PU’s course page</a>.</p>".format(self.title, reverse('studygroups_course_page', args=(self.id,))))
def get_course_reviews(self):
from studygroups.models import StudyGroup
from surveys.models import LearnerSurveyResponse
from surveys.models import FacilitatorSurveyResponse
from surveys.models import learner_survey_summary
from surveys.models import facilitator_survey_summary
studygroup_ids = StudyGroup.objects.filter(course=self.id).distinct().values_list("id", flat=True)
learner_surveys = LearnerSurveyResponse.objects.filter(study_group__in=studygroup_ids)
facilitator_surveys = FacilitatorSurveyResponse.objects.filter(study_group__in=studygroup_ids)
all_surveys = list(map(learner_survey_summary, learner_surveys))
all_surveys += list(map(facilitator_survey_summary, facilitator_surveys))
return all_surveys
|
11597841
|
from django.db import models
from django_serializable_model import SerializableModel
class User(SerializableModel):
email = models.CharField(max_length=765, blank=True)
name = models.CharField(max_length=100)
# whitelisted fields that are allowed to be seen
WHITELISTED_FIELDS = set([
'name',
])
def serialize(self, *args, **kwargs):
"""Override serialize method to only serialize whitelisted fields"""
fields = kwargs.pop('fields', self.WHITELISTED_FIELDS)
return super(User, self).serialize(*args, fields=fields)
class Settings(SerializableModel):
user = models.OneToOneField(User, primary_key=True,
on_delete=models.CASCADE)
email_notifications = models.BooleanField(default=False)
def serialize(self, *args):
"""Override serialize method to not serialize the user field"""
return super(Settings, self).serialize(*args, exclude=['user'])
class Post(SerializableModel):
user = models.ForeignKey(User, on_delete=models.CASCADE)
text = models.TextField()
|
11597845
|
import abc
import numpy as np
class BaseSampler(abc.ABC):
@abc.abstractmethod
def __iter__(self):
pass
def __len__(self):
raise NotImplementedError
class BatchSampler(BaseSampler):
def __init__(self, dataset, batch_size, shuffle=True):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
def _iterator(self):
indices = np.arange(self.dataset.size, dtype=np.int32)
if self.shuffle:
np.random.shuffle(indices)
index = 0
while index < self.dataset.size:
end = min(index + self.batch_size, self.dataset.size)
yield self.dataset[indices[index:end]]
index = end
def __iter__(self):
return self._iterator()
def __len__(self):
return self.dataset.size // self.batch_size
|
11597855
|
from copy import deepcopy
from agent import pipeline, source, di
from agent.modules import logger
logger_ = logger.get_logger('scripts.upgrade.3.13.0', stdout=True)
di.init()
for pipeline_ in pipeline.repository.get_by_type(source.TYPE_INFLUX):
logger_.info(f'Updating `{pipeline_.name}` pipeline')
values = {}
config = deepcopy(pipeline_.config)
for value in config['value']['values']:
values[value] = config.get("target_type", "gauge")
config['values'] = values
del config['value']
pipeline_.set_config(config)
pipeline.manager.update(pipeline_)
pipeline.repository.save(pipeline_)
logger_.info('Done')
logger_.info('Finished influx pipelines update')
|
11597858
|
for row in range(7):
for col in range(7):
if (col == 0) or (col == 6 and (row != 0 and row != 3 and row != 6)) or ((row == 0 or row == 3 or row == 6) and (col > 0 and col < 6)):
print("*", end="")
else:
print(end=" ")
print()
|
11597934
|
import logging
import os
from piplapis.data import Person, Email, Name, URL, Username, UserID, Image, Phone, Address, OriginCountry, Language, \
DOB, Gender
from piplapis.data.containers import Relationship
from piplapis.search import SearchAPIRequest, SearchAPIResponse
from unittest import TestCase
# Tests for the pipl API using the python client library
# These tests expect two environment variables to be set:
# TESTING_KEY: the API key to use
# API_TESTS_BASE_URL: the base URL on which to execute requests
handler = logging.StreamHandler()
logging.getLogger('piplapis').addHandler(handler)
logger = logging.getLogger('piplapis')
logger.warning("The api_tests module API in piplapis.tests is deprecated & does not receive updates.")
class APITests(TestCase):
def setUp(self):
SearchAPIRequest.default_api_key = os.getenv("TESTING_KEY")
SearchAPIRequest.BASE_URL = os.getenv("API_TESTS_BASE_URL") + "?developer_class=business_premium"
def get_broad_search_request(self):
return SearchAPIRequest(first_name="brian", last_name="perks")
def get_narrow_search_request(self):
return SearchAPIRequest(email="<EMAIL>")
def get_narrow_md5_search_request(self):
return SearchAPIRequest(person=Person(fields=[
Email(address_md5="e34996fda036d60aa2a595ca86ed8fef")]))
def test_basic_request(self):
response = self.get_broad_search_request().send()
self.assertEquals(response.http_status_code, 200)
def test_search_makes_a_match_request(self):
response = self.get_narrow_search_request().send()
self.assertEquals(response.http_status_code, 200)
self.assertIsNotNone(response.person)
def test_recursive_request(self):
response = self.get_broad_search_request().send()
self.assertGreater(len(response.possible_persons), 0)
second_response = SearchAPIRequest(search_pointer=response.possible_persons[0].search_pointer).send()
self.assertIsNotNone(second_response.person)
def test_make_sure_hide_sponsored_works(self):
request = self.get_narrow_search_request()
request.hide_sponsored = True
response = request.send()
sponsored_links = [x for x in response.person.urls if x.sponsored]
self.assertEquals(len(sponsored_links), 0)
def test_make_sure_we_can_hide_inferred(self):
request = self.get_narrow_search_request()
request.minimum_probability = 1.
response = request.send()
inferred_data = [x for x in response.person.all_fields if x.inferred]
self.assertEquals(len(inferred_data), 0)
def test_make_sure_we_get_inferred(self):
request = self.get_narrow_search_request()
request.minimum_probability = .5
response = request.send()
inferred_data = [x for x in response.person.all_fields if x.inferred]
self.assertGreater(len(inferred_data), 0)
def test_make_sure_show_sources_matching_works(self):
request = self.get_narrow_search_request()
request.show_sources = "matching"
response = request.send()
self.assertGreater(len(response.sources), 0)
non_matching_sources = [x for x in response.sources if x.person_id != response.person.person_id]
self.assertEquals(len(non_matching_sources), 0)
def test_make_sure_show_sources_all_works(self):
request = self.get_narrow_search_request()
request.show_sources = "all"
response = request.send()
non_matching_sources = [x for x in response.sources if x.person_id != response.person.person_id]
self.assertGreater(len(non_matching_sources), 0)
def test_make_sure_minimum_match_works(self):
request = self.get_broad_search_request()
request.minimum_match = 0.7
response = request.send()
persons_below_match = [x for x in response.possible_persons if x.match < 0.7]
self.assertEquals(len(persons_below_match), 0)
def test_make_sure_deserialization_works(self):
response = SearchAPIRequest(email="<EMAIL>").send()
self.assertEquals(response.person.names[0].display, "<NAME>")
self.assertEquals(response.person.emails[1].address_md5, "999e509752141a0ee42ff455529c10fc")
self.assertEquals(response.person.usernames[0].content, "superman@facebook")
self.assertEquals(response.person.addresses[1].display, "1000-355 Broadway, Metropolis, Kansas")
self.assertEquals(response.person.jobs[0].display, "Field Reporter at The Daily Planet (2000-2012)")
self.assertEquals(response.person.educations[0].degree, "B.Sc Advanced Science")
def test_make_sure_md5_search_works(self):
self.assertIsNotNone(self.get_narrow_md5_search_request().send().person)
def test_contact_datatypes_are_as_expected(self):
SearchAPIRequest.BASE_URL = os.getenv("API_TESTS_BASE_URL") + "?developer_class=contact"
response = self.get_narrow_search_request().send()
available_data_types = {Name, Gender, DOB, URL, Language, OriginCountry, Address, Phone}
for field in response.person.all_fields:
if type(field) == Email:
self.assertEqual(field.address, '<EMAIL>')
else:
self.assertIn(type(field), available_data_types)
def test_social_datatypes_are_as_expected(self):
SearchAPIRequest.BASE_URL = os.getenv("API_TESTS_BASE_URL") + "?developer_class=social"
response = self.get_narrow_search_request().send()
available_data_types = {Name, Gender, DOB, Language, OriginCountry, Address, Phone, Username, UserID, Image,
Relationship, URL}
for field in response.person.all_fields:
if type(field) == Email:
self.assertEqual(field.address, '<EMAIL>')
else:
self.assertIn(type(field), available_data_types)
def test_forward_compatibility(self):
SearchAPIRequest.BASE_URL += "&show_unknown_fields=1"
request = SearchAPIRequest(email="<EMAIL>")
response = request.send()
self.assertIsNotNone(response.person)
def test_make_sure_insufficient_search_isnt_sent(self):
request = SearchAPIRequest(first_name="brian")
try:
request.send()
failed = False
except Exception as e:
failed = True
self.assertTrue(failed)
def test_make_sure_field_count_is_correct_on_premium(self):
res = self.get_narrow_search_request().send()
self.assertEqual(res.available_data.premium.relationships, 8)
self.assertEqual(res.available_data.premium.usernames, 2)
self.assertEqual(res.available_data.premium.jobs, 13)
self.assertEqual(res.available_data.premium.addresses, 9)
self.assertEqual(res.available_data.premium.phones, 4)
self.assertEqual(res.available_data.premium.emails, 4)
self.assertEqual(res.available_data.premium.languages, 1)
self.assertEqual(res.available_data.premium.names, 1)
self.assertEqual(res.available_data.premium.dobs, 1)
self.assertEqual(res.available_data.premium.images, 2)
self.assertEqual(res.available_data.premium.genders, 1)
self.assertEqual(res.available_data.premium.educations, 2)
self.assertEqual(res.available_data.premium.social_profiles, 3)
def test_make_sure_field_count_is_correct_on_basic(self):
SearchAPIRequest.BASE_URL = os.getenv("API_TESTS_BASE_URL") + "?developer_class=social"
res = self.get_narrow_search_request().send()
self.assertEqual(res.available_data.basic.relationships, 7)
self.assertEqual(res.available_data.basic.usernames, 2)
self.assertEqual(res.available_data.basic.jobs, 12)
self.assertEqual(res.available_data.basic.addresses, 6)
self.assertEqual(res.available_data.basic.phones, 1)
self.assertEqual(res.available_data.basic.emails, 3)
self.assertEqual(res.available_data.basic.user_ids, 4)
self.assertEqual(res.available_data.basic.languages, 1)
self.assertEqual(res.available_data.basic.names, 1)
self.assertEqual(res.available_data.basic.dobs, 1)
self.assertEqual(res.available_data.basic.images, 2)
self.assertEqual(res.available_data.basic.genders, 1)
self.assertEqual(res.available_data.basic.educations, 2)
self.assertEqual(res.available_data.basic.social_profiles, 3)
def test_response_class_default(self):
request = SearchAPIRequest(email="<EMAIL>")
response = request.send()
self.assertIsInstance(response, SearchAPIResponse)
def test_response_class_custom(self):
custom_response_class = type('CustomResponseClass', (SearchAPIResponse,), {})
request = SearchAPIRequest(email="<EMAIL>", response_class=custom_response_class)
response = request.send()
self.assertIsInstance(response, custom_response_class)
|
11597948
|
from excel4lib.utils import *
from excel4lib.sheet.cell import *
from excel4lib.lang import *
from .excel4_name import *
class Excel4Instruction(Cell):
'''
Represents Excel4 instruction (could be formula, empty cell, variable, obfuscated formula) which should be placed
at specified address.
Excel4Instruction is responsible for:
- returning the cell address;
- translating cell address;
- generating tag;
- storing information about language.
'''
def __init__(self, x, y):
Cell.__init__(self, x, y)
# Characters used to indicate row and column
self.row_character = ""
self.col_character = ""
# Save current language
self.language = Excel4Translator.language
# Generate random tag
self.tag = random_tag(random_string(5))
self.start_cell = None
# Change reference style to A1
if not Excel4Config.rc_reference_style:
self.reference_style = CellReferenceStyle.A1_STYLE
self.translate_address()
# If False then obfuscation of this object is disabled
# This flag has the highest priority.
# The object will not be obfuscated even if obfuscation is enabled in the options
self._obfuscate = True
self._obfuscate_formula = True
self._spread = True
# def get_start_address(self):
# if not self.start_cell:
# return self.get_address()
# return self.start_cell.get_address()
def set_language(self, lang):
'''
Sets `language` to `lang` and translates.
:param lang: name of the language
'''
self.language = lang
self.translate_address()
def get_reference(self, lang=None):
'''
Returns instruction address.
Address is translated to language passed in `lang`. If `lang` is None then language from `language` property is taken.
:param lang: language to which instruction should be translated
:return: string representing address
'''
t_r = self.row_character
t_c = self.col_character
if lang and (lang != self.language):
t_r = Excel4Translator.get_row_character(lang)
t_c = Excel4Translator.get_col_character(lang)
r = self.get_cell_address(t_r, t_c)
return r
def translate_address(self, lang=None):
'''
Translates characters indicating row and column.
:param lang: language in which address should be returned. If None then lang is equal to self.language
'''
if not lang:
lang = self.language
self.row_character = Excel4Translator.get_row_character(lang)
self.col_character = Excel4Translator.get_col_character(lang)
def revert_address_translation(self):
'''
Reverts translation of characters indicating row and column to native language
'''
self.translate_address(Excel4Translator.native_language)
def get_str(self, lang=None):
'''
Returns Excel4 instruction as string
:param lang: language to which instruction should be translated
:return: string representing address
'''
return self.get_reference(lang)
def __str__(self):
'''
Returns Excel4 instruction address
:return:string representing address
'''
return self.get_str(self.language)
|
11598014
|
class Motor:
# membuat variabel yang bersifat private
__kecepatan = 0
def __init__(self):
# memasukkan value dalam variabel kecepatan
self.__kecepatan = 120
# membuat fungsi untuk memanggil variabel
# dari kecepatan
def jalan(self):
print("jalan dengan kecepatan {} km".format(self.__kecepatan))
# membuat objek dari class Motor
matic = Motor()
matic.jalan()
# variabel tidak berubah karena sebelumnya
# bersifat private
matic.__kecepatan = 300
matic.jalan()
|
11598078
|
import FWCore.ParameterSet.Config as cms
from RecoBTag.SecondaryVertex.inclusiveSecondaryVertexFinderFilteredTagInfos_cfi import *
inclusiveSecondaryVertexFinderFilteredNegativeTagInfos = inclusiveSecondaryVertexFinderFilteredTagInfos.clone(
extSVDeltaRToJet = -0.4,
vertexCuts = dict(distVal2dMin = -2.5,
distVal2dMax = -0.01,
distSig2dMin = -99999.9,
distSig2dMax = -2.0,
maxDeltaRToJetAxis = -0.5)
)
|
11598103
|
import os
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
path = 'data/external/nltk_download_SUCCESS'
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
f.write('Downloaded nltk: stopwords, punkt, wordnet')
|
11598212
|
import wave
import sys
import struct
import time
import subprocess
import threading
import traceback
import shlex
import os
import string
import random
import datetime as dt
import numpy as np
import scipy as sp
import scipy.special
from contextlib import closing
from argparse import ArgumentParser
from pyoperant import Error
try:
import simplejson as json
except ImportError:
import json
class NumpyAwareJSONEncoder(json.JSONEncoder):
""" this json encoder converts numpy arrays to lists so that json can write them.
example usage:
>>> import numpy as np
>>> dict_to_save = {'array': np.zeros((5,))}
>>> json.dumps(dict_to_save,
cls=NumpyAwareJSONEncoder
)
'{"array": [0.0, 0.0, 0.0, 0.0, 0.0]}'
"""
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
# consider importing this from python-neo
class Event(object):
"""docstring for Event"""
def __init__(self, time=None, duration=None, label='', name=None, description=None, file_origin=None, *args, **kwargs):
super(Event, self).__init__()
self.time = time
self.duration = duration
self.label = label
self.name = name
self.description = description
self.file_origin = file_origin
self.annotations = {}
self.annotate(**kwargs)
def annotate(self,**kwargs):
self.annotations.update(kwargs)
class Stimulus(Event):
"""docstring for Stimulus"""
def __init__(self, *args, **kwargs):
super(Stimulus, self).__init__(*args, **kwargs)
if self.label=='':
self.label = 'stimulus'
class AuditoryStimulus(Stimulus):
"""docstring for AuditoryStimulus"""
def __init__(self, *args, **kwargs):
super(AuditoryStimulus, self).__init__(*args, **kwargs)
if self.label=='':
self.label = 'auditory_stimulus'
def run_state_machine(start_in='pre', error_state=None, error_callback=None, **state_functions):
"""runs a state machine defined by the keyword arguments
>>> def run_start():
>>> print "in 'run_start'"
>>> return 'next'
>>> def run_next():
>>> print "in 'run_next'"
>>> return None
>>> run_state_machine(start_in='start',
>>> start=run_start,
>>> next=run_next)
in 'run_start'
in 'run_next'
None
"""
# make sure the start state has a function to run
assert (start_in in state_functions.keys())
# make sure all of the arguments passed in are callable
for func in state_functions.values():
assert hasattr(func, '__call__')
state = start_in
while state is not None:
try:
state = state_functions[state]()
except Exception, e:
if error_callback:
error_callback(e)
raise
else:
raise
state = error_state
class Trial(Event):
"""docstring for Trial"""
def __init__(self,
index=None,
type_='normal',
class_=None,
*args, **kwargs):
super(Trial, self).__init__(*args, **kwargs)
self.label = 'trial'
self.session = None
self.index = index
self.type_ = type_
self.stimulus = None
self.class_ = class_
self.response = None
self.correct = None
self.rt = None
self.reward = False
self.punish = False
self.events = []
self.stim_event = None
class Command(object):
"""
Enables to run subprocess commands in a different thread with TIMEOUT option.
via https://gist.github.com/kirpit/1306188
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
if isinstance(command, basestring):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
def parse_commandline(arg_str=sys.argv[1:]):
""" parse command line arguments
note: optparse is depreciated w/ v2.7 in favor of argparse
"""
parser=ArgumentParser()
parser.add_argument('-B', '--box',
action='store', type=int, dest='box', required=False,
help='(int) box identifier')
parser.add_argument('-S', '--subject',
action='store', type=str, dest='subj', required=False,
help='subject ID and folder name')
parser.add_argument('-c','--config',
action='store', type=str, dest='config_file', default='config.json', required=True,
help='configuration file [default: %(default)s]')
args = parser.parse_args(arg_str)
return vars(args)
def check_cmdline_params(parameters, cmd_line):
# if someone is using red bands they should ammend the checks I perform here
allchars=string.maketrans('','')
nodigs=allchars.translate(allchars, string.digits)
if not ('box' not in cmd_line or cmd_line['box'] == int(parameters['panel_name'].encode('ascii','ignore').translate(allchars, nodigs))):
print "box number doesn't match config and command line"
return False
if not ('subj' not in cmd_line or int(cmd_line['subj'].encode('ascii','ignore').translate(allchars, nodigs)) == int(parameters['subject'].encode('ascii','ignore').translate(allchars, nodigs))):
print "subject number doesn't match config and command line"
return False
return True
def time_in_range(start, end, x):
"""Return true if x is in the range [start, end]"""
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
def is_day(latitude = '32.82', longitude = '-117.14'):
"""Is it daytime?
(lat,long) -- latitude and longitude of location to check (default is San Diego)
Returns True if it is daytime
"""
import ephem
obs = ephem.Observer()
obs.lat = latitude # San Diego, CA
obs.long = longitude
sun = ephem.Sun()
sun.compute()
next_sunrise = ephem.localtime(obs.next_rising(sun))
next_sunset = ephem.localtime(obs.next_setting(sun))
return next_sunset < next_sunrise
def check_time(schedule,fmt="%H:%M"):
""" determine whether trials should be done given the current time and the light schedule
returns Boolean if current time meets schedule
schedule='sun' will change lights according to local sunrise and sunset
schedule=[('07:00','17:00')] will have lights on between 7am and 5pm
schedule=[('06:00','12:00'),('18:00','24:00')] will have lights on between
"""
if schedule == 'sun':
if is_day():
return True
else:
for epoch in schedule:
assert len(epoch) is 2
now = dt.datetime.time(dt.datetime.now())
start = dt.datetime.time(dt.datetime.strptime(epoch[0],fmt))
end = dt.datetime.time(dt.datetime.strptime(epoch[1],fmt))
if time_in_range(start,end,now):
return True
return False
def wait(secs=1.0, final_countdown=0.0,waitfunc=None):
"""Smartly wait for a given time period.
secs -- total time to wait in seconds
final_countdown -- time at end of secs to wait and constantly poll the clock
waitfunc -- optional function to run in a loop during hogCPUperiod
If secs=1.0 and final_countdown=0.2 then for 0.8s python's time.sleep function will be used,
which is not especially precise, but allows the cpu to perform housekeeping. In
the final hogCPUsecs the more precise method of constantly polling the clock
is used for greater precision.
"""
#initial relaxed period, using sleep (better for system resources etc)
if secs > final_countdown:
time.sleep(secs-final_countdown)
secs = final_countdown # only this much is now left
#It's the Final Countdown!!
#hog the cpu, checking time
t0 = time.time()
while (time.time()-t0) < secs:
#let's see if any events were collected in meantime
try:
waitfunc()
except:
pass
def auditory_stim_from_wav(wav):
with closing(wave.open(wav,'rb')) as wf:
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wf.getparams()
duration = float(nframes)/sampwidth
duration = duration * 2.0 / framerate
stim = AuditoryStimulus(time=0.0,
duration=duration,
name=wav,
label='wav',
description='',
file_origin=wav,
annotations={'nchannels': nchannels,
'sampwidth': sampwidth,
'framerate': framerate,
'nframes': nframes,
'comptype': comptype,
'compname': compname,
}
)
return stim
def concat_wav(input_file_list, output_filename='concat.wav'):
""" concat a set of wav files into a single wav file and return the output filename
takes in a tuple list of files and duration of pause after the file
input_file_list = [
('a.wav', 0.1),
('b.wav', 0.09),
('c.wav', 0.0),
]
returns a list of AuditoryStimulus objects
TODO: add checks for sampling rate, number of channels, etc.
"""
cursor = 0
epochs = [] # list of file epochs
audio_data = ''
with closing(wave.open(output_filename, 'wb')) as output:
for input_filename, isi in input_file_list:
# read in the wav file
with closing(wave.open(input_filename,'rb')) as wav_part:
try:
params = wav_part.getparams()
output.setparams(params)
fs = output.getframerate()
except: # TODO: what was I trying to except here? be more specific
pass
audio_frames = wav_part.readframes(wav_part.getnframes())
# append the audio data
audio_data += audio_frames
part_start = cursor
part_dur = len(audio_frames)/params[1]
epochs.append(AuditoryStimulus(time=float(part_start)/fs,
duration=float(part_dur)/fs,
name=input_filename,
file_origin=input_filename,
annotations=params,
label='motif'
))
cursor += part_dur # move cursor length of the duration
# add isi
if isi > 0.0:
isi_frames = ''.join([struct.pack('h', fr) for fr in [0]*int(fs*isi)])
audio_data += isi_frames
cursor += len(isi_frames)/params[1]
# concat all of the audio together and write to file
output.writeframes(audio_data)
description = 'concatenated on-the-fly'
concat_wav = AuditoryStimulus(time=0.0,
duration=epochs[-1].time+epochs[-1].duration,
name=output_filename,
label='wav',
description=description,
file_origin=output_filename,
annotations=output.getparams(),
)
return (concat_wav,epochs)
def get_num_open_fds():
'''
return the number of open file descriptors for current process
.. warning: will only work on UNIX-like os-es.
'''
pid = os.getpid()
procs = subprocess.check_output(
[ "lsof", '-w', '-Ff', "-p", str( pid ) ] )
nprocs = len(
filter(
lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(),
procs.split( '\n' ) )
)
return nprocs
def rand_from_log_shape_dist(alpha=10):
"""
randomly samples from a distribution between 0 and 1 with pdf shaped like the log function
low probability of getting close to zero, increasing probability going towards 1
alpha determines how sharp the curve is, higher alpha, sharper curve.
"""
beta = (alpha + 1) * np.log(alpha + 1) - alpha
t = random.random()
ret = ((beta * t-1)/(sp.special.lambertw((beta*t-1)/np.e)) - 1) / alpha
return max(min(np.real(ret), 1), 0)
|
11598214
|
import os
import json
import logging
from fnmatch import fnmatch
from pathlib import Path
from version import VERSION
from typing import List
import mypy.stubgen as stubgen
import sys
log = logging.getLogger(__name__)
STUB_FOLDER = "./all-stubs"
def clean_version(version: str, build: bool = False):
"omit the commit hash from the git tag"
# 'v1.13-103-gb137d064e' --> 'v1.13-103'
nibbles = version.split("-")
if len(nibbles) == 1:
return version
elif build and build != "dirty":
return "-".join(version.split("-")[0:-1])
else:
return "-".join((version.split("-")[0], "N"))
def stubfolder(path: str) -> str:
"return path in the stub folder"
return "{}/{}".format(STUB_FOLDER, path)
def flat_version(version: str):
"Turn version from 'v1.2.3' into '1_2_3' to be used in filename"
return version.replace("v", "").replace(".", "_")
def cleanup(modules_folder: Path):
"Q&D cleanup"
# for some reason (?) the umqtt simple.pyi and robust.pyi are created twice
# - modules_root folder ( simple.pyi and robust.pyi) - NOT OK
# - umqtt folder (simple.py & pyi and robust.py & pyi) OK
# similar for mpy 1.9x - 1.11
# - core.pyi - uasyncio\core.py'
# - urequests.pyi - urllib\urequest.py'
# Mpy 1.13+
# - uasyncio.pyi -uasyncio\__init__.py
# todo - Add check for source folder
for file_name in (
"simple.pyi",
"robust.pyi",
"core.pyi",
"urequest.pyi",
"uasyncio.pyi",
):
f = Path.joinpath(modules_folder, file_name)
if f.exists():
try:
print(" - removing {}".format(f))
f.unlink()
except OSError:
log.error(" * Unable to remove extranous stub {}".format(f))
pass
def generate_pyi_from_file(file: Path) -> bool:
"""Generate a .pyi stubfile from a single .py module using mypy/stubgen"""
sg_opt = stubgen.Options(
pyversion=(3, 5),
no_import=False,
include_private=True,
doc_dir="",
search_path=[],
interpreter=sys.executable,
parse_only=False,
ignore_errors=True,
modules=[],
packages=[],
files=[],
output_dir="",
verbose=True,
quiet=False,
export_less=False,
)
sg_opt.files = [str(file)]
sg_opt.output_dir = str(file.parent)
try:
stubgen.generate_stubs(sg_opt)
return True
except BaseException as e:
print(e)
return False
def generate_pyi_files(modules_folder: Path) -> bool:
"""generate typeshed files for all scripts in a folder using mypy/stubgen"""
# stubgen cannot process folders with duplicate modules ( ie v1.14 and v1.15 )
modlist = list(modules_folder.glob("**/modules.json"))
if len(modlist) > 1:
# try to process each module seperatlely
r = True
for mod_manifest in modlist:
## generate fyi files for folder
r = r and generate_pyi_files(mod_manifest.parent)
return r
else: # one or less module manifests
## generate fyi files for folder
# clean before to clean any old stuff
cleanup(modules_folder)
print("running stubgen on {0}".format(modules_folder))
cmd = "stubgen {0} --output {0} --include-private --ignore-errors".format(modules_folder)
result = os.system(cmd)
# Check on error
if result != 0:
# in case of failure ( duplicate module in subfolder) then Plan B
# - run stubgen on each *.py
print("Failure on folder, attempt to stub per file.py")
py_files = modules_folder.glob("**/*.py")
for py in py_files:
generate_pyi_from_file(py)
# todo: report failures by adding to module manifest
# for py missing pyi:
py_files = list(modules_folder.rglob("*.py"))
pyi_files = list(modules_folder.rglob("*.pyi"))
for pyi in pyi_files:
# remove all py files that have been stubbed successfully from the list
try:
py_files.remove(pyi.with_suffix(".py"))
except ValueError:
pass
# now stub the rest
# note in some cases this will try a file twice
for py in py_files:
generate_pyi_from_file(py)
# todo: report failures by adding to module manifest
# and clean after to only check-in good stuff
cleanup(modules_folder)
return True
def manifest(
family=None,
machine=None,
port=None,
platform=None,
sysname=None,
nodename=None,
version=None,
release=None,
firmware=None,
) -> dict:
"create a new empty manifest dict"
if family is None:
family = "micropython" # family
if machine is None:
machine = family # family
if port is None:
port = "common" # family
if platform is None:
platform = port # family
if version is None:
version = "0.0.0"
if nodename is None:
nodename = sysname
if release is None:
release = version
if firmware is None:
firmware = "{}-{}-{}".format(family, port, flat_version(version))
mod_manifest = {
"firmware": {
"family": family,
"port": port,
"platform": platform,
"machine": machine,
"firmware": firmware,
"nodename": nodename,
"version": version,
"release": release,
"sysname": sysname,
},
"stubber": {"version": VERSION},
"modules": [],
}
return mod_manifest
def make_manifest(folder: Path, family: str, port: str, version: str) -> bool:
"""Create a `module.json` manifest listing all files/stubs in this folder and subfolders."""
mod_manifest = manifest(family=family, port=port, sysname=family, version=version)
try:
# list all *.py files, not strictly modules but decent enough for documentation
for file in folder.glob("**/*.py"):
mod_manifest["modules"].append({"file": str(file.relative_to(folder)), "module": file.stem})
# write the the module manifest
with open(os.path.join(folder, "modules.json"), "w") as outfile:
json.dump(mod_manifest, outfile, indent=4, sort_keys=True)
return True
except OSError:
return False
def generate_all_stubs():
"just create typeshed stubs"
# now generate typeshed files for all scripts
print("Generate type hint files (pyi) in folder: {}".format(STUB_FOLDER))
generate_pyi_files(Path(STUB_FOLDER))
def read_exclusion_file(path: Path = None) -> List[str]:
"""Read a .exclusion file to determine which files should not be automatically re-generated
in .GitIgnore format
"""
if path == None:
path = Path(".")
try:
with open(path.joinpath(".exclusions")) as f:
content = f.readlines()
return [line.rstrip() for line in content if line[0] != "#" and len(line.strip()) != 0]
except OSError:
return []
# exclusions = read_exclusion_file()
def should_ignore(file: str, exclusions: List[str]) -> bool:
"""Check if a file matches a line in the exclusion list."""
for excl in exclusions:
if fnmatch(file, excl):
return True
return False
# for file in Path(".").glob("**/*.py*"):
# if should_ignore(str(file), exclusions):
# print(file)
|
11598274
|
import os
import stat
import sys
_PATH = os.environ.get('PATH', '').split(os.pathsep)
_IS_WINDOWS = sys.platform.lower().startswith('win')
def is_executable(p, *path):
path = os.path.join(p, *path)
return os.path.exists(path) and \
not os.path.isdir(path) and \
os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
def which(binary):
if os.path.isabs(binary) and is_executable(binary):
return binary
for dir_ in _PATH:
exe = os.path.join(dir_, binary)
if is_executable(exe):
return exe
if _IS_WINDOWS and not binary.lower().endswith('.exe'):
return which(binary + '.exe')
return None
|
11598304
|
r"""
.. codeauthor:: <NAME> <<EMAIL>>
This module handles the boundaries of a single axis of a grid. There are
generally only two options, depending on whether the axis of the underlying
grid is defined as periodic or not. If it is periodic, the class
:class:`~pde.grids.boundaries.axis.BoundaryPeriodic` should be used, while
non-periodic axes have more option, which are represented by
:class:`~pde.grids.boundaries.axis.BoundaryPair`.
"""
from __future__ import annotations
from typing import Callable, Dict, Tuple, Union
import numpy as np
from numba.extending import register_jitable
from ...tools.typing import GhostCellSetter, NumberOrArray, VirtualPointEvaluator
from ..base import DomainError, GridBase, PeriodicityError
from .local import BCBase, BCDataError, BoundaryData, _make_get_arr_1d, _PeriodicBC
BoundaryPairData = Union[
Dict[str, BoundaryData], BoundaryData, Tuple[BoundaryData, BoundaryData]
]
class BoundaryAxisBase:
"""base class for defining boundaries of a single axis in a grid"""
low: BCBase
""":class:`~pde.grids.boundaries.local.BCBase`: Boundary condition at lower end """
high: BCBase
""":class:`~pde.grids.boundaries.local.BCBase`: Boundary condition at upper end """
def __init__(self, low: BCBase, high: BCBase):
"""
Args:
low (:class:`~pde.grids.boundaries.local.BCBase`):
Instance describing the lower boundary
high (:class:`~pde.grids.boundaries.local.BCBase`):
Instance describing the upper boundary
"""
# check data consistency
assert low.grid == high.grid
assert low.axis == high.axis
assert low.rank == high.rank
assert high.upper and not low.upper
self.low = low
self.high = high
# check grid consistency
periodic_low = isinstance(low, _PeriodicBC)
periodic_high = isinstance(high, _PeriodicBC)
if periodic_low != periodic_high:
raise PeriodicityError("Both boundaries must have same periodicity")
if periodic_low and not self.periodic:
raise PeriodicityError(
"Cannot impose periodic boundary condition on non-periodic axis"
)
if not periodic_low and self.periodic:
raise PeriodicityError(
"Cannot impose non-periodic boundary condition on periodic axis"
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.__class__ == other.__class__
and self.low == other.low
and self.high == other.high
)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.__class__ != other.__class__
or self.low != other.low
or self.high != other.high
)
def __iter__(self):
yield self.low
yield self.high
def __getitem__(self, index: Union[int, bool]) -> BCBase:
"""returns one of the sides"""
if index == 0 or index is False:
return self.low
elif index == 1 or index is True:
return self.high
else:
raise IndexError("Index can be either 0/False or 1/True")
@property
def grid(self) -> GridBase:
""":class:`~pde.grids.base.GridBase`: Underlying grid"""
return self.low.grid
@property
def axis(self) -> int:
"""int: The axis along which the boundaries are defined"""
return self.low.axis
@property
def periodic(self) -> bool:
"""bool: whether the axis is periodic"""
return self.grid.periodic[self.axis]
def get_data(self, idx: Tuple[int, ...]) -> Tuple[float, Dict[int, float]]:
"""sets the elements of the sparse representation of this condition
Args:
idx (tuple):
The index of the point that must lie on the boundary condition
Returns:
float, dict: A constant value and a dictionary with indices and
factors that can be used to calculate this virtual point
"""
axis_coord = idx[self.axis]
if axis_coord == -1:
# the virtual point on the lower side
return self.low.get_data(idx)
elif axis_coord == self.grid.shape[self.axis]:
# the virtual point on the upper side
return self.high.get_data(idx)
else:
# the normal case of an interior point
return 0, {axis_coord: 1}
def get_point_evaluator(
self, fill: np.ndarray = None
) -> Callable[[np.ndarray, Tuple[int, ...]], NumberOrArray]:
"""return a function to evaluate values at a given point
The point can either be a point inside the domain or a virtual point
right outside the domain
Args:
fill (:class:`~numpy.ndarray`, optional):
Determines how values out of bounds are handled. If `None`, a
`DomainError` is raised when out-of-bounds points are requested.
Otherwise, the given value is returned.
Returns:
function: A function taking a 1d array and an index as an argument,
returning the value of the array at this index.
"""
size = self.low.grid.shape[self.low.axis]
get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)
eval_low = self.low.make_virtual_point_evaluator()
eval_high = self.high.make_virtual_point_evaluator()
@register_jitable
def evaluate(arr: np.ndarray, idx: Tuple[int, ...]) -> NumberOrArray:
"""evaluate values of the 1d array `arr_1d` at an index `i`"""
arr_1d, i, _ = get_arr_1d(arr, idx)
if i == -1:
# virtual point on the lower side of the axis
return eval_low(arr, idx)
elif i == size:
# virtual point on the upper side of the axis
return eval_high(arr, idx)
elif 0 <= i < size:
# inner point of the axis
return arr_1d[..., i] # type: ignore
elif fill is None:
# point is outside the domain and no fill value is specified
raise DomainError("Point index lies outside bounds")
else:
# Point is outside the domain, but fill value is specified. Note
# that fill value needs to be given with the correct shape.
return fill
return evaluate # type: ignore
def make_virtual_point_evaluators(
self,
) -> Tuple[VirtualPointEvaluator, VirtualPointEvaluator]:
"""returns two functions evaluating the value at virtual support points
Returns:
tuple: Two functions that each take a 1d array as an argument and
return the associated value at the virtual support point outside the
lower and upper boundary, respectively.
"""
eval_low = self.low.make_virtual_point_evaluator()
eval_high = self.high.make_virtual_point_evaluator()
return (eval_low, eval_high)
def make_region_evaluator(
self,
) -> Callable[
[np.ndarray, Tuple[int, ...]],
Tuple[NumberOrArray, NumberOrArray, NumberOrArray],
]:
"""return a function to evaluate values in a neighborhood of a point
Returns:
function: A function that can be called with the data array and a
tuple indicating around what point the region is evaluated. The
function returns the data values left of the point, at the point,
and right of the point along the axis associated with this boundary
condition. The function takes boundary conditions into account if
the point lies on the boundary.
"""
get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)
ap_low = self.low.make_adjacent_evaluator()
ap_high = self.high.make_adjacent_evaluator()
@register_jitable
def region_evaluator(
arr: np.ndarray, idx: Tuple[int, ...]
) -> Tuple[NumberOrArray, NumberOrArray, NumberOrArray]:
"""compiled function return the values in the region"""
# extract the 1d array along axis
arr_1d, i_point, bc_idx = get_arr_1d(arr, idx)
return (
ap_low(arr_1d, i_point, bc_idx),
arr_1d[..., i_point],
ap_high(arr_1d, i_point, bc_idx),
)
return region_evaluator # type: ignore
def make_derivative_evaluator(
self, order: int = 1
) -> Callable[[np.ndarray, Tuple[int, ...]], NumberOrArray]:
"""return a function to evaluate the derivative at a point
Args:
order (int): The order of the derivative
Returns:
function: A function that can be called with the data array and a tuple
indicating around what point the derivative is evaluated. The function
returns the central finite difference at the point. The function takes
boundary conditions into account if the point lies on the boundary.
"""
get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)
ap_low = self.low.make_adjacent_evaluator()
ap_high = self.high.make_adjacent_evaluator()
if order == 1:
# first derivative
scale = 1 / (2 * self.grid.discretization[self.axis])
@register_jitable
def deriv_evaluator(arr: np.ndarray, idx: Tuple[int, ...]) -> NumberOrArray:
"""compiled function return the derivative at the pint"""
# extract the 1d array along axis
arr_1d, i_point, bc_idx = get_arr_1d(arr, idx)
val_low = ap_low(arr_1d, i_point, bc_idx)
val_high = ap_high(arr_1d, i_point, bc_idx)
# return the central derivative
return (val_high - val_low) * scale # type: ignore
elif order == 2:
# second derivative
scale = 1 / self.grid.discretization[self.axis] ** 2
@register_jitable
def deriv_evaluator(arr: np.ndarray, idx: Tuple[int, ...]) -> NumberOrArray:
"""compiled function return the derivative at the pint"""
# extract the 1d array along axis
arr_1d, i_point, bc_idx = get_arr_1d(arr, idx)
val_low = ap_low(arr_1d, i_point, bc_idx)
val_high = ap_high(arr_1d, i_point, bc_idx)
# return the central derivative
return (val_low - 2 * arr_1d[..., i_point] + val_high) * scale # type: ignore
else:
raise NotImplementedError(f"Derivative of oder {order} not implemented")
return deriv_evaluator # type: ignore
def set_ghost_cells(self, data_full: np.ndarray, *, args=None) -> None:
"""set the ghost cell values for all boundaries
Args:
data_full (:class:`~numpy.ndarray`):
The full field data including ghost points
args:
Additional arguments that might be supported by special boundary
conditions.
"""
self.low.set_ghost_cells(data_full, args=args)
self.high.set_ghost_cells(data_full, args=args)
def make_ghost_cell_setter(self) -> GhostCellSetter:
"""return function that sets the ghost cells for this axis on a full array"""
ghost_cell_setter_low = self.low.make_ghost_cell_setter()
ghost_cell_setter_high = self.high.make_ghost_cell_setter()
@register_jitable
def ghost_cell_setter(data_full: np.ndarray, args=None) -> None:
"""helper function setting the conditions on all axes"""
ghost_cell_setter_low(data_full, args=args)
ghost_cell_setter_high(data_full, args=args)
return ghost_cell_setter # type: ignore
class BoundaryPair(BoundaryAxisBase):
"""represents the two boundaries of an axis along a single dimension"""
def __repr__(self):
return f"{self.__class__.__name__}({self.low!r}, {self.high!r})"
def __str__(self):
if self.low == self.high:
return str(self.low)
else:
return f"({self.low}, {self.high})"
def _cache_hash(self) -> int:
"""returns a value to determine when a cache needs to be updated"""
return hash((self.low._cache_hash(), self.high._cache_hash()))
def copy(self) -> BoundaryPair:
"""return a copy of itself, but with a reference to the same grid"""
return self.__class__(self.low.copy(), self.high.copy())
@classmethod
def get_help(cls) -> str:
"""Return information on how boundary conditions can be set"""
return (
"Boundary conditions for each side can be set using a tuple: "
f"(lower_bc, upper_bc). {BCBase.get_help()}"
)
@classmethod
def from_data(cls, grid: GridBase, axis: int, data, rank: int = 0) -> BoundaryPair:
"""create boundary pair from some data
Args:
grid (:class:`~pde.grids.base.GridBase`):
The grid for which the boundary conditions are defined
axis (int):
The axis to which this boundary condition is associated
data (str or dict):
Data that describes the boundary pair
rank (int):
The tensorial rank of the field for this boundary condition
Returns:
:class:`~pde.grids.boundaries.axis.BoundaryPair`:
the instance created from the data
Throws:
ValueError if `data` cannot be interpreted as a boundary pair
"""
# handle the simple cases
if isinstance(data, dict):
if "low" in data or "high" in data:
# separate conditions for low and high
data_copy = data.copy()
low = BCBase.from_data(
grid, axis, upper=False, data=data_copy.pop("low"), rank=rank
)
high = BCBase.from_data(
grid, axis, upper=True, data=data_copy.pop("high"), rank=rank
)
if data_copy:
raise BCDataError(f"Data items {data_copy.keys()} were not used.")
else:
# one condition for both sides
low = BCBase.from_data(grid, axis, upper=False, data=data, rank=rank)
high = BCBase.from_data(grid, axis, upper=True, data=data, rank=rank)
elif isinstance(data, (str, BCBase)):
# a type for both boundaries
low = BCBase.from_data(grid, axis, upper=False, data=data, rank=rank)
high = BCBase.from_data(grid, axis, upper=True, data=data, rank=rank)
else:
# the only remaining valid format is a list of conditions for the
# lower and upper boundary
try:
# try obtaining the length
data_len = len(data)
except TypeError:
# if len is not supported, the format must be wrong
raise BCDataError(
f"Unsupported boundary format: `{data}`. " + cls.get_help()
)
else:
if data_len == 2:
# assume that data is given for each boundary
low = BCBase.from_data(
grid, axis, upper=False, data=data[0], rank=rank
)
high = BCBase.from_data(
grid, axis, upper=True, data=data[1], rank=rank
)
else:
# if the length is strange, the format must be wrong
raise BCDataError(
"Expected two conditions for the two sides of the axis, but "
f"got `{data}`. " + cls.get_help()
)
return cls(low, high)
def extract_component(self, *indices) -> BoundaryPair:
"""extracts the boundary pair of the given index.
Args:
*indices:
One or two indices for vector or tensor fields, respectively
"""
bc_sub_low = self.low.extract_component(*indices)
bc_sub_high = self.high.extract_component(*indices)
return self.__class__(bc_sub_low, bc_sub_high)
def check_value_rank(self, rank: int) -> None:
"""check whether the values at the boundaries have the correct rank
Args:
rank (int):
The tensorial rank of the field for this boundary condition
Throws:
RuntimeError: if the value does not have rank `rank`
"""
self.low.check_value_rank(rank)
self.high.check_value_rank(rank)
class BoundaryPeriodic(BoundaryPair):
"""represent a periodic axis"""
def __init__(self, grid: GridBase, axis: int):
"""
Args:
grid (:class:`~pde.grids.base.GridBase`):
The grid for which the boundary conditions are defined
axis (int):
The axis to which this boundary condition is associated
"""
low = _PeriodicBC(grid=grid, axis=axis, upper=False)
high = _PeriodicBC(grid=grid, axis=axis, upper=True)
super().__init__(low, high)
def __repr__(self):
return f"{self.__class__.__name__}(grid={self.grid}, axis={self.axis})"
def __str__(self):
return '"periodic"'
def _cache_hash(self) -> int:
"""returns a value to determine when a cache needs to be updated"""
return hash((self.grid._cache_hash(), self.axis))
def copy(self) -> BoundaryPeriodic:
"""return a copy of itself, but with a reference to the same grid"""
return self.__class__(grid=self.grid, axis=self.axis)
def extract_component(self, *indices) -> BoundaryPeriodic:
"""extracts the boundary pair of the given extract_component.
Args:
*indices:
One or two indices for vector or tensor fields, respectively
"""
return self
def check_value_rank(self, rank: int) -> None:
"""check whether the values at the boundaries have the correct rank
Args:
rank (int):
The tensorial rank of the field for this boundary condition
"""
def get_boundary_axis(
grid: GridBase, axis: int, data, rank: int = 0
) -> BoundaryAxisBase:
"""return object representing the boundary condition for a single axis
Args:
grid (:class:`~pde.grids.base.GridBase`):
The grid for which the boundary conditions are defined
axis (int):
The axis to which this boundary condition is associated
data (str or tuple or dict):
Data describing the boundary conditions for this axis
rank (int):
The tensorial rank of the field for this boundary condition
Returns:
:class:`~pde.grids.boundaries.axis.BoundaryAxisBase`:
Appropriate boundary condition for the axis
"""
# handle special constructs that describe boundary conditions
if data == "natural" or data == "auto_periodic_neumann":
# automatic choice between periodic and Neumann condition
data = "periodic" if grid.periodic[axis] else "derivative"
elif data == "auto_periodic_dirichlet":
# automatic choice between periodic and Dirichlet condition
data = "periodic" if grid.periodic[axis] else "value"
# handle different types of data that specify boundary conditions
if isinstance(data, BoundaryAxisBase):
# boundary is already an the correct format
return data
elif data == "periodic" or data == ("periodic", "periodic"):
# initialize a periodic boundary condition
return BoundaryPeriodic(grid, axis)
elif isinstance(data, dict) and data.get("type") == "periodic":
# initialize a periodic boundary condition
return BoundaryPeriodic(grid, axis)
else:
# initialize independent boundary conditions for the two sides
return BoundaryPair.from_data(grid, axis, data, rank=rank)
|
11598310
|
import argparse
import re
import sys
import logging
from datetime import datetime
import hashlib
import gzip
import requests
import os
import json
import time
from splunk_http_event_collector import http_event_collector
from helpers.certparser import process_cert
from helpers.hostparser import proccess_host
def process_hosts_file(gzfilename, key, logger, host='localhost', batchsize=16384, index='hosts', sourcetype='sonar-host', useesid=False):
logger.warning("Loading file {f} at {d}".format(f=gzfilename, d=datetime.now()))
hec = http_event_collector(key, host)
with gzip.open(gzfilename, 'rb') as resultsfile:
m = re.search('.*\/(\d{8})', gzfilename)
filedate = m.group(1)
filedate_struct = time.strptime(filedate, "%Y%m%d")
filedate_epoch = time.mktime(filedate_struct)
batchcount = 0
for line in resultsfile:
cleanline = line.strip('\n')
(host, certhash) = cleanline.split(',', 1)
newhost = {}
newhost['host'] = host
newhost['hash'] = certhash
newhost['seen'] = filedate
newhost['seen_epoch'] = filedate_epoch
if useesid:
cert_hash = hashlib.sha1(newhost['host']+newhost['hash']+'sonar')
newhost['id'] = cert_hash.hexdigest()
newhost = proccess_host(newhost, logger)
payload = {}
payload.update({"index":index})
payload.update({"host":host})
payload.update({"sourcetype":sourcetype})
payload.update({"source":gzfilename})
payload.update({"event":newhost})
hec.batchEvent(payload)
batchcount = batchcount + 1
if batchcount == batchsize:
hec.flushBatch()
batchcount = 0
if batchcount > 0:
hec.flushBatch()
def process_certs_file(gzfilename, key, logger, host='localhost', batchsize=16384, index='certs', sourcetype='sonar-cert'):
logger.warning("Loading file {f} at {d}".format(f=gzfilename, d=datetime.now()))
hec = http_event_collector(key, host)
with gzip.open(gzfilename, 'rb') as resultsfile:
m = re.search('.*\/(\d{8})', gzfilename)
filedate = m.group(1)
filedate_struct = time.strptime(filedate, "%Y%m%d")
filedate_epoch = time.mktime(filedate_struct)
batchcount = 0
for line in resultsfile:
cleanline = line.strip('\n')
(hash_string, cert_b64) = cleanline.split(',', 1)
newcert = process_cert(cert_b64, logger)
newcert_dict = json.dumps(newcert)
payload = {}
payload.update({"index":index})
payload.update({"sourcetype":sourcetype})
payload.update({"source":gzfilename})
payload.update({"event":newcert_dict})
hec.batchEvent(payload)
batchcount = batchcount + 1
if batchcount == batchsize:
hec.flushBatch()
batchcount = 0
if batchcount > 0:
hec.flushBatch()
'''
def main(argv):
logger = logging.getLogger('SSLImporter')
logger_format = logging.Formatter('\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():%(lineno)d %(asctime)s\033[0m| '
'%(message)s')
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(logger_format)
logger.addHandler(stream_handler)
parser = argparse.ArgumentParser()
#Cert load test
gzfilename = '/ssl/sonar/20150615_certs.gz'
http_event_collector_key = "f81e6e7e-1342-4fd7-8445-652ab368618b"
process_certs_file(gzfilename, http_event_collector_key, logger)
# Host load
gzfilename = '/ssl/sonar/20131030_hosts.gz'
http_event_collector_key = "5c95a3ec-5521-4ad6-ad93-e926bc09747c"
process_hosts_file(gzfilename, http_event_collector_key, logger)
if __name__ == '__main__':
main(sys.argv)
'''
|
11598318
|
from tick.plot import plot_hawkes_kernels
from tick.hawkes import SimuHawkesExpKernels, SimuHawkesMulti, HawkesExpKern
import matplotlib.pyplot as plt
end_time = 1000
n_realizations = 10
decays = [[4., 1.], [2., 2.]]
baseline = [0.12, 0.07]
adjacency = [[.3, 0.], [.6, .21]]
hawkes_exp_kernels = SimuHawkesExpKernels(
adjacency=adjacency, decays=decays, baseline=baseline,
end_time=end_time, verbose=False, seed=1039)
multi = SimuHawkesMulti(hawkes_exp_kernels, n_simulations=n_realizations)
multi.end_time = [(i + 1) / 10 * end_time for i in range(n_realizations)]
multi.simulate()
learner = HawkesExpKern(decays, penalty='l1', C=10)
learner.fit(multi.timestamps)
plot_hawkes_kernels(learner, hawkes=hawkes_exp_kernels)
|
11598347
|
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import confusion_matrix
import os
import tensorflow as tf
def metric_variable(shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES`) collections."""
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype),
trainable=False,
collections=[
ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
],
validate_shape=validate_shape,
name=name)
def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):
"""Calculate a streaming confusion matrix.
Calculates a confusion matrix. For estimation over a stream of data,
the function creates an `update_op` operation.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
Returns:
total_cm: A `Tensor` representing the confusion matrix.
update_op: An operation that increments the confusion matrix.
"""
# Local variable to accumulate the predictions in the confusion matrix.
total_cm = metric_variable(
[num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (weights is not None) and (weights.get_shape().ndims > 1):
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = tf.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)
update_op = state_ops.assign_add(total_cm, current_cm)
return total_cm, update_op
def _safe_div(numerator, denominator, name):
"""Divides two tensors element-wise, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
t = math_ops.truediv(numerator, denominator)
zero = array_ops.zeros_like(t, dtype=denominator.dtype)
condition = math_ops.greater(denominator, zero)
zero = math_ops.cast(zero, t.dtype)
return array_ops.where(condition, t, zero, name=name)
def iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_iou is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'iou',
(predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total_cm, update_op = _streaming_confusion_matrix(labels, predictions,
num_classes, weights)
def compute_iou(name):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
#
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
#
denominator = sum_over_row + sum_over_col - cm_diag
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(
math_ops.not_equal(denominator, 0), dtype=dtypes.float32))
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0), denominator,
array_ops.ones_like(denominator))
#
iou = math_ops.div(cm_diag, denominator)
return iou
iou_v = compute_iou('iou')
if metrics_collections:
ops.add_to_collections(metrics_collections, iou_v)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return iou_v, update_op
|
11598391
|
import jsonlines
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--corpus", type=str, default="../scifact/data/corpus.jsonl")
parser.add_argument("--claims", type=str, default="../scifact/data/claims_train.jsonl")
parser.add_argument("--t5_input", type=str, required=True)
parser.add_argument("--title", action="store_true")
parser.add_argument("--balanced", action='store_true')
args = parser.parse_args()
true_counter = 0
false_counter = 0
t5_input = open(args.t5_input, "w")
corpus = {doc['doc_id']: doc for doc in jsonlines.open(args.corpus)}
for claim in tqdm(jsonlines.open(args.claims)):
for doc_id, evidence in claim['evidence'].items():
doc = corpus[int(doc_id)]
evidence_sentence_idx = {s for es in evidence for s in es['sentences']}
title = doc['title'][:-1] if doc['title'][-1] == '.' else doc['title']
for i, sentence in enumerate(doc['abstract']):
qtext = claim['claim']
dtext = sentence.rstrip().replace('\n', ' ')
evidence = str(i in evidence_sentence_idx).lower()
if evidence == "true":
true_counter += 1
else:
false_counter += 1
if args.title:
if args.balanced:
if evidence == "true":
t5_input.write(f'Query: {qtext} Document: {title}. {dtext} Relevant:\t{evidence}\n')
t5_input.write(f'Query: {qtext} Document: {title}. {dtext} Relevant:\t{evidence}\n')
t5_input.write(f'Query: {qtext} Document: {title}. {dtext} Relevant:\t{evidence}\n')
t5_input.write(f'Query: {qtext} Document: {title}. {dtext} Relevant:\t{evidence}\n')
else:
if args.balanced:
if evidence == "true":
t5_input.write(f'Query: {qtext} Document: {dtext} Relevant:\t{evidence}\n')
t5_input.write(f'Query: {qtext} Document: {dtext} Relevant:\t{evidence}\n')
t5_input.write(f'Query: {qtext} Document: {dtext} Relevant:\t{evidence}\n')
t5_input.write(f'Query: {qtext} Document: {dtext} Relevant:\t{evidence}\n')
print(f'True: {true_counter}')
print(f'False: {false_counter}')
t5_input.close()
|
11598392
|
from typing import Tuple
import plotly.express as px
def hex_to_rgb(hex_string: str) -> Tuple[int, int, int]:
"""
Converts a hex_string to a tuple of rgb values.
Requires format including #, e.g.:
#000000
#ff00ff
"""
if len(hex_string) != 7:
raise ValueError(f"Invalid length for #{hex_string}")
if any(c not in "0123456789ABCDEF" for c in hex_string.lstrip("#").upper()):
raise ValueError(f"Invalid character in #{hex_string}")
r_hex = hex_string[1:3]
g_hex = hex_string[3:5]
b_hex = hex_string[5:7]
return int(r_hex, 16), int(g_hex, 16), int(b_hex, 16)
def get_color(id_: int, alpha: float = 1) -> str:
"""
Currently (Plotly version 5.3.1) there are 10 possible colors.
"""
color = px.colors.qualitative.Plotly[id_]
r, g, b = hex_to_rgb(color)
return f"rgba({r}, {g}, {b}, {alpha})"
|
11598425
|
import requests
# from: http://www.omdbapi.com/
title_text = input("Enter a title search string: ")
url = 'http://www.omdbapi.com/?y=&plot=short&r=json&s={}'.format(title_text)
# process json-> Search -> Title
resp = requests.get(url)
if resp.status_code != 200:
print("Whoa, status code unexpected! {}".format(resp.status_code))
else:
data = resp.json()
search = data['Search']
for m in search:
print("* {}".format(m['Title']))
|
11598429
|
import ctypes
import numpy as np
import os
import subprocess
import tempfile
import tvm
from tvm import relay, get_global_func, target, register_func
from tvm.relay.function import Function
from tvm.relay.expr import Expr, Let, GlobalVar
from tvm.relay.adt import Constructor
from tvm.relay.expr_functor import ExprFunctor, ExprVisitor
from tvm.relay.backend import compile_engine
from .little_cpp import PackedCall, CPPFunction, Invoke, Decl, CPPIf, CPPTuple, CPPMatch, CPPConstructor, CPPTupleGetItem
from .little_cpp import CPPRefCreate, CPPRefRead, CPPRefWrite
from . import to_source
from .convert import convert
TVM_PATH = os.environ['TVM_HOME']
def must_run_process(args):
proc = subprocess.run(args)
assert proc.returncode == 0
def compile_cpp(source, lib_name, flags=None, lib_path=None):
if flags is None:
flags = []
if lib_path is None:
lib_path = os.curdir
debug_source_path = os.path.join(lib_path, 'source.cc')
# Write out the file for debugging.
with open(debug_source_path, 'w') as source_file:
source_file.write(source)
# with tempfile.TmporaryDirectory() as tmpdir:
tmpdir = tempfile.mkdtemp(prefix="relay_aot_compiler")
lib_path = os.path.join(tmpdir, lib_name)
source_path = os.path.join(tmpdir, 'source.cc')
with open(source_path, 'w') as source_file:
source_file.write(source)
must_run_process(["clang-format", "-i", debug_source_path])
system = os.uname()[0]
if system == 'Darwin':
command = [
"clang",
"-std=c++14",
"-shared",
"-undefined",
"dynamic_lookup",
"-o",
lib_path,
source_path,
f"-I{TVM_PATH}/3rdparty/dmlc-core/include",
f"-I{TVM_PATH}/3rdparty/dlpack/include",
f"-I{TVM_PATH}/3rdparty/HalideIR/src",
f"-I{TVM_PATH}/include",
f"-L{TVM_PATH}/build",
"-ltvm"
] + flags
else:
command = [
"clang",
"-std=c++14",
"-shared",
"-fPIC",
"-o",
lib_path,
source_path,
f"-I{TVM_PATH}/3rdparty/dmlc-core/include",
f"-I{TVM_PATH}/3rdparty/dlpack/include",
f"-I{TVM_PATH}/3rdparty/HalideIR/src",
f"-I{TVM_PATH}/include",
f"-L{TVM_PATH}/build",
"-ltvm"
] + flags
must_run_process(command)
return lib_path
def load_lib(name):
return ctypes.CDLL(name, ctypes.RTLD_GLOBAL)
def is_primitive(e: relay.Expr):
return isinstance(e, relay.Function) and e.attrs and e.attrs.Primitive.value == 1
class AoTCompiler(ExprFunctor):
def __init__(self, mod, tgt) -> None:
super().__init__()
self.mod = mod
self.tgt = tgt
self.engine = compile_engine.get()
self.bindings = [[]]
self.gv_map = {}
def add_binding(self, var, value):
self.bindings[-1].append((var, value))
def optimize(self, expr: Function) -> Function:
opts = tvm.transform.Sequential([relay.transform.FuseOps(),
relay.transform.ToANormalForm()])
self.mod['main'] = expr
self.mod = opts(self.mod)
ret = self.mod['main']
return ret
def mk_primitive_op(self, func: Expr, args, output_type) -> Expr:
cc_key = compile_engine.CCacheKey(func, self.tgt)
hash = tvm.ir.structural_hash(func)
name = f"op_{hash}"
if not get_global_func(name, allow_missing=True):
jit_func = self.engine.jit(cc_key, self.tgt)
register_func(name, jit_func)
return PackedCall(name, args, [x.checked_type for x in args], output_type)
def visit_call(self, call: Expr) -> Expr:
if is_primitive(call.op):
return self.mk_primitive_op(call.op, call.args, call.checked_type)
elif isinstance(call.op, Constructor):
return CPPConstructor(call.op.tag, [self.visit(arg) for arg in call.args])
else:
assert(call.attrs == None)
args = [self.visit(arg) for arg in call.args]
fn = self.visit(call.op)
return Invoke(fn, args)
def visit_let(self, let: Expr) -> Expr:
self.bindings.append([])
while isinstance(let, Let):
cpp_value = self.visit(let.value)
self.add_binding(let.var, cpp_value)
let = let.body
bindings = self.bindings.pop()
body = self.visit(let)
return Decl(bindings, body)
def visit_var(self, var):
return var
def visit_global_var(self, gv):
if gv not in self.gv_map:
self.gv_map[gv] = "to be updated"
self.gv_map[gv] = self.visit(self.mod[gv])
return gv
def visit_function(self, func):
if is_primitive(func):
body = self.mk_primitive_op(func, func.params, func.ret_type)
return CPPFunction(func.params, body, func.checked_type.ret_type)
else:
return CPPFunction(func.params, self.visit(func.body), func.checked_type.ret_type)
def visit_constant(self, const):
return const
def visit_if(self, i):
return CPPIf(self.visit(i.cond),
self.visit(i.true_branch),
self.visit(i.false_branch),
i.checked_type)
def visit_tuple(self, t):
return CPPTuple([self.visit(f) for f in t.fields], t.checked_type)
def visit_match(self, m):
return CPPMatch(self.visit(m.data),
[(c.lhs, self.visit(c.rhs)) for c in m.clauses],
m.checked_type)
def visit_op(self, op):
raise Exception(f'op outside of primitive: {op}')
def visit_tuple_getitem(self, t):
return CPPTupleGetItem(self.visit(t.tuple_value), t.index, t.checked_type)
def visit_ref_create(self, r):
return CPPRefCreate(self.visit(r.value), r.checked_type)
def visit_ref_read(self, r):
return CPPRefRead(self.visit(r.ref), r.checked_type)
def visit_ref_write(self, r):
return CPPRefWrite(self.visit(r.ref), self.visit(r.value))
_LIB_COUNTER = 1
_LIB = []
def lib_and_func_name(name):
global _LIB_COUNTER
packed_name = f'relay.aot.{name}.{_LIB_COUNTER}'
lib_name = f"librelay_aot_{_LIB_COUNTER}.so"
_LIB_COUNTER += 1
return lib_name, packed_name
import time
def _mk_wrapper(fn, ctx, constants, record_time):
def _wrapper(*args):
new_constants = [convert(a, ctx) for a in constants]
new_args = [convert(a, ctx) for a in args]
begin = time.perf_counter()
res = fn(*new_constants, *new_args)
end = time.perf_counter()
return res if not record_time else (res, end - begin)
return _wrapper
import sys
sys.setrecursionlimit(10000)
def compile(func, mod, ctx, tgt, name='default', record_time=False):
"""Compile a relay function into a native library function.
Parameters
----------
func: Expr
The function.
mod: Module
The Module.
ctx: Context
The Context.
tgt: Target
The target
name: String
The name of the target binary library.
record_time: Bool
Time cost to call f?
Returns
-------
result: Function
A function that, when pass in some values,
will convert them to the right format and call the compiled func.
"""
global _LIB
if isinstance(func, GlobalVar):
func = mod[func]
assert isinstance(func, Function)
compiler = AoTCompiler(mod, tgt)
func = compiler.optimize(func)
func = compiler.visit(func)
lib_name, packed_name = lib_and_func_name(name)
constants, source_code = to_source.to_source(mod, func, compiler.gv_map, ctx, packed_name)
lib_name = f"librelay_aot_{_LIB_COUNTER}.so"
library_path = compile_cpp(source_code, lib_name, flags=["-O3"])
_LIB.append(load_lib(library_path))
fn = get_global_func(packed_name)
return _mk_wrapper(fn, ctx, constants, record_time)
|
11598446
|
from enum import Enum
from typing import List, Optional, Tuple
import faiss
import numpy as np
import torch
import typer
from transformers import AutoModel, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer
from semantic_search.schemas import Document
from semantic_search.ncbi import uids_to_docs
UID = str
class Emoji(Enum):
# Emoji's used in typer.secho calls
# See: https://github.com/carpedm20/emoji/blob/master/emoji/unicode_codes.py
SUCCESS = "\U00002705"
WARNING = "\U000026A0"
FAST = "\U0001F3C3"
def get_device(cuda_device: int = -1) -> torch.device:
"""Return a `torch.cuda` device if `torch.cuda.is_available()` and `cuda_device>=0`.
Otherwise returns a `torch.cpu` device.
"""
if cuda_device != -1 and torch.cuda.is_available():
device = torch.device("cuda")
typer.secho(
f"{Emoji.FAST.value} Using CUDA device {torch.cuda.get_device_name()} with index"
f" {torch.cuda.current_device()}.",
fg=typer.colors.GREEN,
bold=True,
)
else:
device = torch.device("cpu")
typer.secho(
f"{Emoji.WARNING.value} Using CPU. Note that this will be many times slower than a GPU.",
fg=typer.colors.YELLOW,
bold=True,
)
return device
def setup_model_and_tokenizer(
pretrained_model_name_or_path: str, cuda_device: int = -1
) -> Tuple[PreTrainedTokenizer, PreTrainedModel]:
"""Given a HuggingFace Transformer `pretrained_model_name_or_path`, return the corresponding
model and tokenizer. Optionally, places the model on `cuda_device`, if available.
"""
device = get_device(cuda_device)
# Load the Transformers tokenizer
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)
typer.secho(
(
f'{Emoji.SUCCESS.value} Tokenizer "{pretrained_model_name_or_path}" from Transformers'
" loaded successfully."
),
fg=typer.colors.GREEN,
bold=True,
)
# Load the Transformers model
model = AutoModel.from_pretrained(pretrained_model_name_or_path)
model = model.to(device)
model.eval()
typer.secho(
(
f'{Emoji.SUCCESS.value} Model "{pretrained_model_name_or_path}" from Transformers'
" loaded successfully."
),
fg=typer.colors.GREEN,
bold=True,
)
return tokenizer, model
@torch.no_grad()
def encode_with_transformer(
text: List[str],
tokenizer: PreTrainedTokenizer,
model: PreTrainedModel,
max_length: Optional[int] = None,
mean_pool: bool = True,
) -> torch.Tensor:
inputs = tokenizer(
text, padding=True, truncation=True, max_length=max_length, return_tensors="pt"
)
for name, tensor in inputs.items():
inputs[name] = tensor.to(model.device)
attention_mask = inputs["attention_mask"]
output = model(**inputs).last_hidden_state
if mean_pool:
embedding = torch.sum(output * attention_mask.unsqueeze(-1), dim=1) / torch.clamp(
torch.sum(attention_mask, dim=1, keepdims=True), min=1e-9
)
else:
embedding = output[:, 0, :]
return embedding
def setup_faiss_index(embedding_dim: int) -> faiss.Index:
"""Returns a simple `IndexFlatIP` FAISS index with a vector dimension size of `embedding_dim`
and an ID map for cosine similarity searching.
"""
index = faiss.IndexFlatIP(embedding_dim)
index = faiss.IndexPreTransform(faiss.NormalizationTransform(embedding_dim), index)
index = faiss.IndexIDMap(index)
return index
def add_to_faiss_index(ids: List[int], embeddings: np.ndarray, index: faiss.Index) -> None:
"""Adds the vectors `embeddings` to the `index` using the keys `ids`."""
ids = np.asarray(ids).astype("int64")
embeddings = embeddings.astype("float32")
index.add_with_ids(embeddings, ids)
def normalize_documents(pmids: List[str]) -> str:
normalized_docs = []
for doc in pmids:
normalized_docs.append(Document(**list(uids_to_docs([doc]))[0][0]))
return normalized_docs[0].text # type: ignore
|
11598511
|
import os
import uuid
import time
import json
import sys
import logging
import pymysql
import boto3
import base64
from botocore.exceptions import ClientError
from typing import Optional
from fastapi import FastAPI
_log_level = logging.INFO
logger = logging.getLogger()
logger.setLevel(_log_level)
log_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(log_handler)
app = FastAPI()
conn = None
def get_secret(secret_name: str):
try:
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager'
)
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
print('get_secret---exception', e)
if e.response['Error']['Code'] == 'DecryptionFailureException':
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
raise e
else:
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return secret
def connect_database(rds_host, rds_port, db_name, secret_value):
user_info = json.loads(secret_value)
user_name = user_info['username']
user_pw = user_info['password']
try:
conn = pymysql.connect(host=rds_host, port=rds_port, user=user_name, passwd=<PASSWORD>_pw, db=db_name, connect_timeout=5)
except pymysql.MySQLError as e:
logger.error("ERROR: Unexpected error: Could not connect to MySQL instance.")
logger.error(e)
sys.exit()
else:
return conn
def load_database():
secret_arn = os.environ.get('SECRET_ARN', 'no-arn')
print('SECRET_ARN', secret_arn)
secret_value = get_secret(secret_arn)
host_name = os.environ.get('HOST_NAME', 'no-host')
port_number = int(os.environ.get('PORT_NUMBER', '3306'))
database_name = os.environ.get('DATABASE_NAME', 'no-database')
global conn
conn = connect_database(host_name, port_number, database_name, secret_value)
@app.get("/")
def read_root():
logger.info('read_root')
return {"Health": "Good"}
@app.get("/items")
def read_item():
logger.info('get_items')
if conn == None:
load_database()
items = []
with conn.cursor() as cur:
cur.execute("select * from Items")
for row in cur:
logger.info(row)
items.append(row)
conn.commit()
return {'items': items}
|
11598515
|
import pandas as pd
data = [
['The', 'Business', 'Centre', '15', 'Stevenson', 'Lane'],
['6', 'Mossvale', 'Road'],
['Studio', '7', 'Tottenham', 'Court', 'Road']
]
class Address(object):
def __init__(self, *address):
if not address:
self.address = None
print('No address given')
else:
self.address = ' '.join(str(x) for x in address)
class ModelHyperparameters(object):
def __init__(self, **hyperparams):
if not hyperparams:
self.hyperparams = None
else:
self.hyperparams = hyperparams
|
11598543
|
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from newsSpiders.items import ArticleItem, ArticleSnapshotItem
from newsSpiders.helpers import generate_next_snapshot_time
import zlib
import time
class BasicDiscoverSpider(CrawlSpider):
name = "basic_discover"
def __init__(
self,
site_id="",
site_url="",
article_url_patterns="",
following_url_patterns="",
article_url_excludes=None,
selenium=False,
*args,
**kwargs,
):
super(BasicDiscoverSpider, self).__init__(*args, **kwargs)
self.site_id = site_id
self.site_url = site_url
self.start_urls = [site_url]
self.selenium = selenium
self.article_url_excludes = (
article_url_excludes if article_url_excludes is not None else []
)
article_url_patterns = article_url_patterns.split("; ")
following_url_patterns = following_url_patterns.split("; ")
social_media_links = [
"facebook.com",
"twitter.com",
"linkedin.com",
"plurk.com",
"line.me",
"line.naver.jp",
"plus.google.com",
]
BasicDiscoverSpider.rules = [
Rule(
LinkExtractor(allow=article_url_patterns, deny=social_media_links),
process_links=self.dedup_article_links,
callback="parse_articles",
)
]
if following_url_patterns:
BasicDiscoverSpider.rules.append(
Rule(LinkExtractor(allow=following_url_patterns), follow=True)
)
super(BasicDiscoverSpider, self)._compile_rules()
if site_id:
self.name = f"{self.name}:{site_id}"
def is_duplicate_url(self, link):
if link.url in self.article_url_excludes:
self.logger.debug(f"Found duplicated article url {link.url}")
return True
else:
return False
def dedup_article_links(self, article_links):
if len(self.article_url_excludes) == 0:
return article_links
return [link for link in article_links if not self.is_duplicate_url(link)]
def assign_article_type(self):
if "ptt.cc" in self.site_url:
article_type = "PTT"
else:
article_type = "Article"
return article_type
def parse_articles(self, response):
# init
article = ArticleItem()
article_snapshot = ArticleSnapshotItem()
# get current time
now = int(time.time())
# populate article item
article["site_id"] = self.site_id
article["url"] = response.url
article["article_type"] = self.assign_article_type()
article["first_snapshot_at"] = now
article["last_snapshot_at"] = now
article["snapshot_count"] = 1
article["next_snapshot_at"] = generate_next_snapshot_time(
self.site_url, article["snapshot_count"], now
)
if "redirect_urls" in response.meta.keys():
article["url"] = response.request.meta["redirect_urls"][0]
article["redirect_to"] = response.url
else:
article["redirect_to"] = None
article["url_hash"] = zlib.crc32(article["url"].encode())
# populate article_snapshot item
article_snapshot["raw_data"] = response.text
article_snapshot["snapshot_at"] = now
yield {"article": article, "article_snapshot": article_snapshot}
|
11598573
|
from copy import deepcopy
import numpy as np
import torch
from numpy.fft import *
def bandpass_filter(im: torch.Tensor, band_center=0.3, band_width_lower=0.1, band_width_upper=0.1):
'''Bandpass filter the image (assumes the image is square)
Returns
-------
im_bandpass: torch.Tensor
B, C, H, W
'''
freq_arr = fftshift(fftfreq(n=im.shape[-1]))
freq_arr /= np.max(np.abs(freq_arr))
im_c = torch.stack((im, torch.zeros_like(im)), dim=4)
im_f = batch_fftshift2d(torch.fft(im_c, 2))
mask_bandpass = torch.zeros(im_f.shape)
for r in range(im_f.shape[2]):
for c in range(im_f.shape[3]):
dist = np.sqrt(freq_arr[r] ** 2 + freq_arr[c] ** 2)
if dist >= band_center - band_width_lower and dist < band_center + band_width_upper:
mask_bandpass[:, :, r, c, :] = 1
if im.is_cuda:
mask_bandpass = mask_bandpass.to("cuda")
im_f_masked = torch.mul(im_f, mask_bandpass)
im_bandpass = torch.ifft(batch_ifftshift2d(im_f_masked), 2)[..., 0]
return im_bandpass
def transform_bandpass(im: torch.Tensor, band_center=0.3, band_width_lower=0.1, band_width_upper=0.1):
return im - bandpass_filter(im, band_center, band_width_lower, band_width_upper)
def tensor_t_augment(im: torch.Tensor, t):
'''
Returns
-------
im: torch.Tensor
2*B, C, H, W
'''
im_copy = deepcopy(im)
im_p = t(im)
return torch.cat((im_copy, im_p), dim=0)
def wavelet_filter(im: torch.Tensor, t, transform_i, idx=2, p=0.5):
'''Filter center of highpass wavelet coeffs
Params
------
im : torch.Tensor
idx : detail coefficients ('LH':0, 'HL':1, 'HH':2)
p : prop to perturb coeffs
'''
im_t = t(im)
# mask = torch.bernoulli((1-p) * torch.ones(im.shape[0], 5, 5))
# im_t[1][0][:,0,idx,6:11,6:11] = im_t[1][0][:,0,idx,6:11,6:11] * mask
im_t[1][0][:, 0, idx, 6:11, 6:11] = 0
return transform_i(im_t)
'''This code from https://github.com/tomrunia/PyTorchSteerablePyramid
'''
def roll_n(X, axis, n):
f_idx = tuple(slice(None, None, None) if i != axis else slice(0, n, None) for i in range(X.dim()))
b_idx = tuple(slice(None, None, None) if i != axis else slice(n, None, None) for i in range(X.dim()))
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front], axis)
def batch_fftshift2d(x):
real, imag = torch.unbind(x, -1)
for dim in range(1, len(real.size())):
n_shift = real.size(dim) // 2
if real.size(dim) % 2 != 0:
n_shift += 1 # for odd-sized images
real = roll_n(real, axis=dim, n=n_shift)
imag = roll_n(imag, axis=dim, n=n_shift)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
def batch_ifftshift2d(x):
real, imag = torch.unbind(x, -1)
for dim in range(len(real.size()) - 1, 0, -1):
real = roll_n(real, axis=dim, n=real.size(dim) // 2)
imag = roll_n(imag, axis=dim, n=imag.size(dim) // 2)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
|
11598609
|
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Concatenate, Activation
from keras.optimizers import Adam
from utils import *
#------------------------------------------------------------------------------
def siamese_model():
input1 = (image_size_h_p,image_size_w_p,nchannels)
input2 = (image_size_h_c,image_size_w_c,nchannels)
left_input_P = Input(input1)
right_input_P = Input(input1)
left_input_C = Input(input2)
right_input_C = Input(input2)
convnet_plate = small_vgg(input1)
convnet_car = small_vgg(input2)
encoded_l_P = convnet_plate(left_input_P)
encoded_r_P = convnet_plate(right_input_P)
encoded_l_C = convnet_car(left_input_C)
encoded_r_C = convnet_car(right_input_C)
# Add the distance function to the network
L1_distanceP = L1_layer([encoded_l_P, encoded_r_P])
L1_distanceC = L1_layer([encoded_l_C, encoded_r_C])
concatL1 = Concatenate()([L1_distanceP, L1_distanceC])
x = Dense(1024)(concatL1)
x = Dropout(0.2)(x)
x = Dense(512)(x)
x = Dropout(0.2)(x)
x = Dense(256)(x)
x = Dropout(0.2)(x)
x = Activation('relu')(x)
predictionF2 = Dense(2,activation='softmax', name="fusion2_output")(x)
optimizer = Adam(0.001, decay=2.5e-4)
model = Model(inputs=[left_input_P, right_input_P, left_input_C, right_input_C], outputs=predictionF2)
model.compile(loss="binary_crossentropy",optimizer=optimizer,metrics=['accuracy'])
return model
#------------------------------------------------------------------------------
if __name__ == '__main__':
run(siamese_model(), None)
|
11598644
|
from collections import namedtuple
from typing import NamedTuple, Any
class LogOptions(NamedTuple):
overwrite: bool = None
write_mode: str = None
class LogEntry(NamedTuple):
key: str
data: Any
type: str
options: LogOptions = None
class LoadEntry(NamedTuple):
key: str
type: str
start: Any = None
stop: Any = None
RemoveEntry = namedtuple("RemoveEntry", ['key'])
class GlobEntry(NamedTuple):
query: str
wd: Any = None
recursive: bool = True
start: Any = None
stop: Any = None
class MoveEntry(NamedTuple):
source: str
to: str
class CopyEntry(NamedTuple):
source: str
to: str
# for files
exists_ok: bool
follow_symlink: bool
# for directories
symlinks: bool
class PingData(NamedTuple):
exp_key: str
status: Any
burn: bool = False
Signal = namedtuple("Signal", ['exp_key', 'signal'])
import numpy as np
ALLOWED_TYPES = (np.uint8,) # ONLY uint8 is supported.
|
11598646
|
from django.apps import AppConfig
from lims.plugins.mounts import load_plugins
class PluginsConfig(AppConfig):
name = 'lims.plugins'
verbose_name = 'Plugins'
def ready(self):
load_plugins()
|
11598648
|
import unittest
from passlib.hash import bcrypt
import fabfile
import models
import settings
import utils
class TestUserModel(unittest.TestCase):
user_dict = None
user = None
def setUp(self):
users = fabfile.load_users()
self.user_dict = users[0]
self.user = models.User(self.user_dict)
self.user.save(test=True)
def test_create_user_with_args(self):
self.assertEqual(self.user.name, "<NAME>")
def test_create_user_with_kwargs(self):
self.user = models.User(**self.user_dict)
self.user.save(test=True)
self.assertEqual(self.user.name, "<NAME>")
def test_repr(self):
self.assertEqual(self.user.__repr__(), "<NAME>")
def test_create_id(self):
self.assertIsNotNone(self.user._id)
def test_create_login_hash(self):
self.assertIsNotNone(self.user.login_hash)
def test_removes_password(self):
self.assertIsNone(self.user.password)
def test_login_hash_is_bycrypt(self):
self.assertEqual(self.user.login_hash[:4],"$2b$")
def test_decrypted_password(self):
self.assertTrue(bcrypt.verify(self.user_dict['password'], self.user.login_hash))
def tearDown(self):
pass
class TestSessionModel(unittest.TestCase):
user_dict = None
user = None
session_dict = None
session = None
def setUp(self):
users = fabfile.load_users()
self.user_dict = users[0]
self.user = models.User(self.user_dict)
self.user.save(test=True)
sessions = fabfile.load_sessions()
self.session_dict = sessions[0]
self.session = models.Session(self.session_dict)
self.session.save(test=True)
def test_create_user_with_args(self):
self.assertEqual(self.session.title, "Having The Time Of Your Life")
def test_create_user_with_kwargs(self):
self.session = models.Session(**self.session_dict)
self.session.save(test=True)
self.assertEqual(self.session.title, "Having The Time Of Your Life")
if __name__ == '__main__':
unittest.main()
|
11598674
|
from html import escape
from decimal import Decimal
def html_escape(arg):
return escape(str(arg))
def html_int(a):
return f'{a}(<i>{str(hex(a))}</i>)'
def html_real(a):
return f'{round(a, 2):.2f}'
def html_str(s):
return html_escape(s).replace('/n', '<br/>\n')
def html_list(l):
items = (f'<li>{html_escape(item)}</li>'
for item in l)
return f'<ul>\n' + '\n'.join(items) + '\n</ul>'
def html_dict(d):
items = (f'<li>{html_escape(k)}={html_escape(v)}</li>'
for k, v in d.items())
return '<ul>\n' + '\n'.join(items) + '\n</ul>'
# print(html_str("""this is
# a multi line string
# with special characters: 10 < 100"""))
# this is
# a multi line string
# with special characters: 10 < 100
def htmlize(arg):
# INT
if isinstance(arg, int):
return html_int(arg)
# FLOAT AND DECIMALS
elif isinstance(arg, float) or isinstance(arg, Decimal):
return html_real(arg)
# STR
elif isinstance(arg, str):
return html_str(arg)
# LIST or TUPLE (Sequence)
elif isinstance(arg, list) or isinstance(arg, tuple):
return html_list(arg)
# DICT
elif isinstance(arg, dict):
return html_dict(arg)
else: # default behavior - just html escape string representation
return html_escape(str(arg))
print(htmlize([1, 2, 3]))
print(htmlize(dict(key1=1, key2=2)))
print(htmlize(255))
print(htmlize(["""first element is
a multi-line string""", (1, 2, 3)]))
############################################
# Second Version
def htmlize(arg):
if isinstance(arg, int):
return html_int(arg)
elif isinstance(arg, float) or isinstance(arg, Decimal):
return html_real(arg)
elif isinstance(arg, str):
return html_str(arg)
elif isinstance(arg, list) or isinstance(arg, tuple) or isinstance(arg, set):
return html_list(arg)
elif isinstance(arg, dict):
return html_dict(arg)
else:
# default behavior - just html escape string representation
return html_escape(str(arg))
def html_escape(arg):
return escape(str(arg))
def html_int(a):
return '{0}(<i>{1}</i)'.format(a, str(hex(a)))
def html_real(a):
return '{0:.2f}'.format(round(a, 2))
def html_str(s):
return html_escape(s).replace('\n', '<br/>\n')
def html_list(l):
items = ['<li>{0}</li>'.format(htmlize(item)) for item in l]
return '<ul>\n' + '\n'.join(items) + '\n</ul>'
def html_dict(d):
items = ['<li>{0}={1}</li>'.format(html_escape(k), htmlize(v)) for k, v in d.items()]
return '<ul>\n' + '\n'.join(items) + '\n</ul>'
|
11598744
|
import os
def relative_path(base, file):
# https://stackoverflow.com/questions/4060221 for more options
return os.path.join(os.path.dirname(base), file)
class ResourceLoader():
'''
Mixin which provides JS and CSS for apps.
'''
def load_resources(self):
self._css_urls = [
'https://maxcdn.bootstrapcdn.com/'
'bootstrap/3.3.7/css/bootstrap.min.css',
'static/extra.css'
]
self._js_urls = [
'https://code.jquery.com/'
'jquery-3.1.1.slim.min.js',
'https://maxcdn.bootstrapcdn.com/'
'bootstrap/3.3.7/js/bootstrap.min.js',
'static/extra.js'
]
for url in self._css_urls:
self.app.css.append_css({'external_url': url})
for url in self._js_urls:
self.app.scripts.append_script({'external_url': url})
|
11598754
|
from canvas.util import base36decode, Base36DecodeException
from services import Services
from apps.share_tracking.models import ShareTrackingUrl
class TrackShareViewsMiddleware(object):
def process_request(self, request):
share_b36 = request.GET.get('s')
if not share_b36:
return
try:
share_id = base36decode(share_b36)
except Base36DecodeException:
return
stu = ShareTrackingUrl.objects.get_or_none(id=share_id)
if not stu:
return
stu.record_view(request)
class TrackClickthroughMiddleware(object):
def process_request(self, request):
clickthrough_type = request.GET.get('ct')
if not clickthrough_type:
return
metric = getattr(Services.metrics, clickthrough_type + "_clickthrough", None)
if not metric:
return
meta = dict((k,v) for (k,v) in request.GET.items() if k.startswith('ct_'))
metric.record(request, **meta)
|
11598767
|
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from cuda import try_cuda
from attentions import WhSoftDotAttention
from context_encoder import ContextEncoder
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout, max_len):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the PE once
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(
torch.arange(0, d_model, 2).float() / d_model * (-math.log(10000.0))
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
return self.dropout(x)
class CogroundDecoderLSTM(nn.Module):
def __init__(self, embedding_size, hidden_size, dropout_ratio, feature_size,
max_len, history=False, visual_hidden_size=1024):
super(CogroundDecoderLSTM, self).__init__()
self.embedding_size = embedding_size
self.feature_size = feature_size
self.hidden_size = hidden_size
self.u_begin = try_cuda(Variable(torch.zeros(embedding_size),
requires_grad=False))
self.drop = nn.Dropout(p=dropout_ratio)
self.lstm = nn.LSTMCell(2 * embedding_size + hidden_size, hidden_size)
self.text_attention_layer = WhSoftDotAttention(hidden_size, hidden_size)
self.positional_encoding = PositionalEncoding(hidden_size,
dropout=0, max_len=max_len)
self.visual_attention_layer = WhSoftDotAttention(hidden_size,
visual_hidden_size)
self.visual_mlp = nn.Sequential(
nn.BatchNorm1d(feature_size),
nn.Linear(feature_size, visual_hidden_size),
nn.BatchNorm1d(visual_hidden_size),
nn.Dropout(dropout_ratio),
nn.ReLU()
)
self.action_attention_layer = WhSoftDotAttention(hidden_size * 2,
visual_hidden_size)
self.sm = nn.Softmax(dim=1)
if history:
self.linear_context_out = nn.Linear(hidden_size, hidden_size, bias=True)
self.linear_text_out = nn.Linear(hidden_size, hidden_size, bias=True)
self.context_encoder = ContextEncoder(feature_size, hidden_size,
dropout_ratio)
self.linear_combine = nn.Sequential(
nn.Linear(hidden_size * 4, hidden_size * 2, bias=True),
nn.ReLU(),
nn.Linear(hidden_size * 2, hidden_size * 2, bias=True)
)
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
param = param.data
own_state[name].copy_(param)
def forward(self, *args, **kwargs):
if 'context' in kwargs and kwargs['context'] == True:
return self.forward_context(*args)
else:
return self.forward_current(*args, ctx_mask=kwargs['ctx_mask'],
history_context=kwargs['history_context'])
def forward_current(self, u_t_prev, all_u_t, visual_context, h_0, c_0, ctx,
ctx_mask=None, history_context=None):
'''
u_t_prev: batch x embedding_size
all_u_t: batch x a_num x embedding_size
visual_context: batch x v_num x feature_size => panoramic view, DEP
h_0: batch x hidden_size
c_0: batch x hidden_size
ctx: batch x seq_len x dim
ctx_mask: batch x seq_len - indices to be masked
'''
ctx_pos = self.positional_encoding(ctx)
attn_text, _alpha_text = \
self.text_attention_layer(h_0, ctx_pos, v=ctx, mask=ctx_mask)
alpha_text = self.sm(_alpha_text)
batch_size, a_size, _ = all_u_t.size()
g_v = all_u_t.view(-1, self.feature_size)
g_v = self.visual_mlp(g_v).view(batch_size, a_size, -1)
attn_vision, _alpha_vision = \
self.visual_attention_layer(h_0, g_v, v=all_u_t)
alpha_vision = self.sm(_alpha_vision)
concat_input = torch.cat((attn_text, attn_vision, u_t_prev), 1)
drop = concat_input
h_1, c_1 = self.lstm(drop, (h_0, c_0))
if history_context is not None:
context = self.linear_context_out(history_context[0])
text = self.linear_text_out(history_context[1])
action_selector = self.linear_combine(
torch.cat((attn_text, h_1, context, text), 1))
else:
action_selector = torch.cat((attn_text, h_1), 1)
_, alpha_action = self.action_attention_layer(action_selector, g_v)
return h_1, c_1, alpha_text, alpha_action, alpha_vision
def forward_context(self, *args):
return self.context_encoder(*args)
|
11598770
|
from app.models.apply import GoingoutApplyModel
from tests.v2.views import TCBase
class TestGoingoutApplyInquire(TCBase):
"""
외출신청 정보 조회를 테스트합니다.
"""
def __init__(self, *args, **kwargs):
super(TestGoingoutApplyInquire, self).__init__(*args, **kwargs)
self.method = self.client.get
self.target_uri = '/student/apply/goingout'
def setUp(self):
super(TestGoingoutApplyInquire, self).setUp()
# ---
self._request = lambda *, token=self.student_access_token: self.request(
self.method,
self.target_uri,
token
)
def testInquireWithoutAnyAppliment(self):
# (1) 별도의 신청 정보 없이 조회
resp = self._request()
# (2) status code 200
self.assertEqual(resp.status_code, 200)
# (3) response data
self.assertDictEqual(resp.json, {
'sat': False,
'sun': False
})
def testInquireWithAppliment(self):
apply = GoingoutApplyModel(
student=self.student,
on_saturday=True,
on_sunday=False
).save()
# (1) 별도의 신청 정보가 있는 상태에서 조회
resp = self._request()
# (2) status code 200
self.assertEqual(resp.status_code, 200)
# (3) response data
self.assertDictEqual(resp.json, {
'sat': apply.on_saturday,
'sun': apply.on_sunday
})
def testForbidden(self):
self.assertEqual(self._request(token=self.admin_access_token).status_code, 403)
class TestGoingoutApply(TCBase):
"""
외출신청을 테스트합니다.
"""
def __init__(self, *args, **kwargs):
super(TestGoingoutApply, self).__init__(*args, **kwargs)
self.method = self.client.post
self.target_uri = '/student/apply/goingout'
self.sat = True
self.sun = True
def setUp(self):
super(TestGoingoutApply, self).setUp()
# ---
self._request = lambda *, token=self.student_access_token, sat=self.sat, sun=self.sun: self.request(
self.method,
self.target_uri,
token,
json={
'sat': sat,
'sun': sun
}
)
def testApplySuccess(self):
# (1) 외출신청
resp = self._request()
# (2) status code 201
self.assertEqual(resp.status_code, 201)
# (3) 데이터베이스 확인
self.assertTrue(GoingoutApplyModel.objects(student=self.student, on_saturday=self.sat, on_sunday=self.sun))
def testForbidden(self):
self.assertEqual(self._request(token=self.admin_access_token).status_code, 403)
# TODO 예외 사항을 더 넣어야 할듯..
|
11598791
|
import __init__
import redis
from Kite import config
from Killua.denull import DeNull
from Killua.deduplication import Deduplication
from Gon.nbdspyder import NbdSpyder
redis_client = redis.StrictRedis(config.REDIS_IP,
port=config.REDIS_PORT,
db=config.CACHE_RECORED_OPENED_PYTHON_PROGRAM_DB_ID)
redis_client.lpush(config.CACHE_RECORED_OPENED_PYTHON_PROGRAM_VAR, "realtime_starter_nbd.py")
# 如果没有历史数据从头爬取,如果已爬取历史数据,则从最新的时间开始爬取
# 如历史数据中最近的新闻时间是"2020-12-09 20:37:10",则从该时间开始爬取
nbd_spyder = NbdSpyder(config.DATABASE_NAME, config.COLLECTION_NAME_NBD)
nbd_spyder.get_historical_news()
# Deduplication(config.DATABASE_NAME, config.COLLECTION_NAME_NBD).run()
# DeNull(config.DATABASE_NAME, config.COLLECTION_NAME_NBD).run()
nbd_spyder.get_realtime_news()
|
11598809
|
p = [2,3,5,7,11]
l = []
up = 1
for x in p:
up *= x
for i in xrange(1, up+1):
flag = True
for j in p:
if i % j == 0:
flag = False
break
if flag:
l.append(i)
print l
print len(l)
|
11598820
|
import optparse
parser = optparse.OptionParser(
usage='%prog COMMAND [OPTIONS]',
version="x.x.x",
add_help_option=False)
parser.add_option(
'-h', '--help',
dest='help',
action='store_true',
help='Show help')
parser.disable_interspersed_args()
|
11598845
|
import pipert.core.shared_memory_generator as sm
from pipert.core.shared_memory_generator import get_shared_memory_object
class DummySharedMemoryGenerator(sm.SharedMemoryGenerator):
def __init__(self):
super().__init__("dummy_component", max_count=5)
self.create_memories()
def test_cleanup():
generator = DummySharedMemoryGenerator()
generator.cleanup()
assert generator.shared_memories == {}
def test_get_next_shared_memory():
generator = DummySharedMemoryGenerator()
first_memory = generator.get_next_shared_memory_name()
second_memory = generator.get_next_shared_memory_name()
assert first_memory == "dummy_component_0"
assert second_memory == "dummy_component_1"
generator.cleanup()
def test_max_count():
generator = DummySharedMemoryGenerator()
first_memory = generator.get_next_shared_memory_name()
for _ in range(generator.max_count - 1):
generator.get_next_shared_memory_name()
assert first_memory == generator.get_next_shared_memory_name()
generator.cleanup()
def test_write_and_read_from_memory():
generator = DummySharedMemoryGenerator()
memory_name = generator.get_next_shared_memory_name()
memory = get_shared_memory_object(memory_name)
memory.acquire_semaphore()
memory.write_to_memory(b"AAA")
message_size = len(b"AAA")
memory.release_semaphore()
memory.acquire_semaphore()
data = memory.read_from_memory(message_size)
memory.release_semaphore()
assert data == b"AAA"
generator.cleanup()
|
11598893
|
from neukivy.app import NeuApp
from kivy.lang import Builder
from kivy.animation import Animation
kv_string = """
Screen:
canvas:
Color:
rgba:app.theme_manager._bg_color_alp
Rectangle:
size:self.size
pos:self.pos
NeuCircularIconButton:
id:button
pos_hint:{'center_x':.7,'center_y':.5}
size:100,100
icon:'account-alert'
font_size:'40sp'
NeuCircularButton:
pos_hint:{'center_x':.3,'center_y':.5}
text:'NeuKivy'
radius:200
down_elevation:1
up_elevation:3
font_size:'20sp'
"""
class MainApp(NeuApp):
def build(self):
kv = Builder.load_string(kv_string)
# Set the app colors at start.
# The bg_color property should not have an alpha value. This is auto computed
self.theme_manager.bg_color = (0.2, 0.2, 0.2)
# Set this to a lighter shade of your bg_color
self.theme_manager.light_color = (0.3, 0.3, 0.3, 1)
# Set this to a darker shade of your bg_color
self.theme_manager.dark_color = (0.07, 0.07, 0.07, 1)
# The text color of your app
self.theme_manager.text_color = (0.5, 0.4, 0.2, 1)
# Disabled text color of your app
self.theme_manager.disabled_text_color = (0.1, 0.1, 0.1, 1)
return kv
if __name__ == "__main__":
MainApp().run()
|
11598902
|
from functools import partial
import pickle
import multiprocessing as mp
from multiprocessing.pool import ThreadPool
import traceback
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.linear_model import Lasso
from mercari.config import \
DUMP_DATASET, USE_CACHED_DATASET, DEBUG_N, HANDLE_TEST, logger, MIN_PRICE_PRED, MAX_PRICE_PRED
from mercari.mercari_io import load_train_validation, load_test_iter
from mercari.utils import Timer, rmsle
def fit_one(est, X, y):
print("fitting y min={} max={}".format(y.min(), y.max()))
return est.fit(X, y)
def predict_one(est, X):
yhat = est.predict(X)
print("predicting y min={} max={}".format(yhat.min(), yhat.max()))
return yhat
def predict_models(X, fitted_models, vectorizer=None, parallel='thread'):
if vectorizer:
# TODO: parallelize this
with Timer('Transforming data'):
X = vectorizer.transform(X)
predict_one_ = partial(predict_one, X=X)
preds = map_parallel(predict_one_, fitted_models, parallel)
return np.expm1(np.vstack(preds).T)
def fit_models(X_tr, y_tr, models, parallel='thread'):
y_tr = np.log1p(y_tr)
fit_one_ = partial(fit_one, X=X_tr, y=y_tr)
return map_parallel(fit_one_, models, parallel)
def map_parallel(fn, lst, parallel, max_processes=4):
if parallel == 'thread':
with ThreadPool(processes=max_processes) as pool:
return pool.map(fn, lst)
elif parallel == 'mp':
ctx = mp.get_context('spawn')
with ctx.Pool(processes=max_processes) as pool:
return pool.map(fn, lst)
elif parallel is None:
return list(map(fn, lst))
else:
raise ValueError(f'unexpected parallel value: {parallel}')
def predict_models_test_batches(models, vectorizer, parallel='thread'):
chunk_preds = []
test_idx = []
for df in load_test_iter():
test_idx.append(df.test_id.values)
print("Predicting batch {} {}".format(df.test_id.min(), df.test_id.max()))
chunk_preds.append(predict_models(df, models, vectorizer=vectorizer, parallel=parallel))
predictions = np.vstack(chunk_preds)
test_idx = np.concatenate(test_idx)
return test_idx, predictions
def make_submission(te_idx, preds, save_as):
submission = pd.DataFrame({
"test_id": te_idx,
"price": preds
}, columns=['test_id', 'price'])
submission.to_csv(save_as, index=False)
def fit_transform_vectorizer(vectorizer):
df_tr, df_va = load_train_validation()
y_tr = df_tr.price.values
y_va = df_va.price.values
X_tr = vectorizer.fit_transform(df_tr, y_tr)
X_va = vectorizer.transform(df_va)
return X_tr, y_tr, X_va, y_va, vectorizer
def fit_validate(models, vectorizer, name=None,
fit_parallel='thread', predict_parallel='thread'):
cached_path = 'data_{}.pkl'.format(name)
if USE_CACHED_DATASET:
assert name is not None
with open(cached_path, 'rb') as f:
X_tr, y_tr, X_va, y_va, fitted_vectorizer = pickle.load(f)
if DEBUG_N:
X_tr, y_tr = X_tr[:DEBUG_N], y_tr[:DEBUG_N]
else:
X_tr, y_tr, X_va, y_va, fitted_vectorizer = fit_transform_vectorizer(vectorizer)
if DUMP_DATASET:
assert name is not None
with open(cached_path, 'wb') as f:
pickle.dump((X_tr, y_tr, X_va, y_va, fitted_vectorizer), f)
fitted_models = fit_models(X_tr, y_tr, models, parallel=fit_parallel)
y_va_preds = predict_models(X_va, fitted_models, parallel=predict_parallel)
return fitted_vectorizer, fitted_models, y_va, y_va_preds
def merge_predictions(X_tr, y_tr, X_te=None, est=None, verbose=True):
if est is None:
est = Lasso(alpha=0.0001, precompute=True, max_iter=1000,
positive=True, random_state=9999, selection='random')
est.fit(np.log1p(X_tr), np.log1p(y_tr))
if hasattr(est, 'intercept_') and verbose:
logger.info('merge_predictions = \n{:+.4f}\n{}'.format(
est.intercept_,
'\n'.join('{:+.4f} * {}'.format(coef, i) for i, coef in
zip(range(X_tr.shape[0]), est.coef_))))
return (np.expm1(est.predict(np.log1p(X_tr))),
np.expm1(est.predict(np.log1p(X_te))) if X_te is not None else None)
def main(name, action, arg_map, fit_parallel='thread', predict_parallel='thread'):
prefix = lambda r: '{}_{}s'.format(name, r)
if action in ("1", "2", "3"):
model_round = int(action)
models, vectorizer = arg_map[model_round]
vectorizer, fitted_models, y_va, y_va_preds = fit_validate(
models, vectorizer, name=model_round,
fit_parallel=fit_parallel, predict_parallel=predict_parallel)
joblib.dump(y_va_preds, "{}_va_preds.pkl".format(prefix(model_round)), compress=3)
if HANDLE_TEST:
test_idx, y_te_preds = predict_models_test_batches(
fitted_models, vectorizer, parallel=predict_parallel)
joblib.dump(y_te_preds, "{}_te_preds.pkl".format(prefix(model_round)), compress=3)
joblib.dump(test_idx, "test_idx.pkl", compress=3)
joblib.dump(y_va, "y_va.pkl", compress=3)
for i in range(y_va_preds.shape[1]):
print("Model {} rmsle {:.4f}".format(i, rmsle(y_va_preds[:, i], y_va)))
print("Model mean rmsle {:.4f}".format(rmsle(y_va_preds.mean(axis=1), y_va)))
elif action == "merge":
va_preds = []
te_preds = []
for model_round in ("1", "2", "3"):
try:
va_preds.append(joblib.load("{}_va_preds.pkl".format(prefix(model_round))))
if HANDLE_TEST:
te_preds.append(joblib.load("{}_te_preds.pkl".format(prefix(model_round))))
except Exception as e:
print(f'Warning: error loading round {model_round}: {e}')
traceback.print_exc()
va_preds = np.hstack(va_preds).clip(MIN_PRICE_PRED, MAX_PRICE_PRED)
if HANDLE_TEST:
te_preds = np.hstack(te_preds).clip(MIN_PRICE_PRED, MAX_PRICE_PRED)
else:
te_preds = None
y_va = joblib.load("y_va.pkl")
va_preds_merged, te_preds_merged = merge_predictions(X_tr=va_preds, y_tr=y_va, X_te=te_preds)
print("Stacking rmsle", rmsle(y_va, va_preds_merged))
if HANDLE_TEST:
test_idx = joblib.load("test_idx.pkl")
make_submission(test_idx, te_preds_merged, 'submission_merged.csv')
elif action == "merge_describe":
va_preds = []
te_preds = []
for model_round in ("1", "2", "3"):
va_preds.append(joblib.load("{}_va_preds.pkl".format(prefix(model_round))))
te_preds.append(joblib.load("{}_te_preds.pkl".format(prefix(model_round))))
va_preds = np.hstack(va_preds)
te_preds = np.hstack(te_preds)
_, df_va = load_train_validation()
y_va = joblib.load("y_va.pkl")
va_preds_merged, te_preds_merged = merge_predictions(X_tr=va_preds, y_tr=y_va, X_te=te_preds)
print("Stacking rmsle", rmsle(y_va, va_preds_merged))
df_va['preds'] = va_preds_merged
df_va['err'] = (np.log1p(df_va['preds']) - np.log1p(df_va['price'])) ** 2
df_va.sort_values('err', ascending=False).to_csv('validation_preds.csv', index=False)
|
11598929
|
import torch
from torch import nn, Tensor, cos
from ..utils import to_onehotv2
__all__ = ['ZeroCenterRelu', 'LWTA', 'Snake']
class ZeroCenterRelu(nn.ReLU):
"""
As described by Jeremy of FastAI
"""
def __init__(self, inplace: bool=False):
super(ZeroCenterRelu, self).__init__(inplace)
def forward(self, input: Tensor) -> Tensor:
return super().forward(input) - 0.5
class LWTA(nn.Module):
"""
Local Winner-Take-All Layer
For every k consecutive units, keep only the one with highest activations and zero-out the rest.
References:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"Compete to Compute." https://papers.nips.cc/paper/5059-compete-to-compute.pdf
"""
def __init__(self, block_size):
super().__init__()
self.block_size = block_size
def forward(self, input: Tensor) -> Tensor:
assert input.shape[1] % self.block_size == 0
input = input.view(-1, input.shape[1] // self.block_size, self.block_size)
mask = to_onehotv2(torch.max(input, -1)[1], self.block_size).to(input.dtype).to(input.device)
return (input * mask).view(-1, input.shape[1] * input.shape[2])
class Snake(nn.Module):
"""
Snake activation function for learning periodic function:
snake_a(x) = x + 1 / freq * sin^2(freq * x) = x + (1 - cos(2 * freq * x)) / (2 * freq)
References:
<NAME>, <NAME>, <NAME>. "Neural Networks Fail to Learn Periodic Functions and How to Fix It."
https://arxiv.org/pdf/2006.08195.pdf
"""
def __init__(self, freq: float=1.0, freq_trainable: bool=False):
super().__init__()
assert freq > 0.0
self.freq = nn.parameter.Parameter(torch.zeros((1)) + freq, requires_grad=freq_trainable)
def forward(self, input: Tensor) -> Tensor:
return input + (1.0 - cos(2 * self.freq * input)) / (2 * self.freq)
|
11598942
|
import requests
from requests.auth import HTTPBasicAuth
from insightconnect_plugin_runtime.exceptions import PluginException
from json import JSONDecodeError
class ProofpointTapApi:
def __init__(self, service_principal: dict, secret: dict):
self.base_url = "https://tap-api-v2.proofpoint.com/v2/"
if service_principal and secret:
self.service_principal = service_principal.get("secretKey")
self.secret = secret.get("secretKey")
self.authorized = True
else:
self.authorized = False
def check_authorization(self):
if not self.authorized:
raise PluginException(
cause="Proofpoint Tap authorization is required for this action.",
assistance="Please check that credentials are correct and try again.",
)
return True
def _call_api(self, method: str, endpoint: str, params: dict = None, json_data: dict = None):
try:
response = requests.request(
url=self.base_url + endpoint,
method=method,
params=params,
json=json_data,
auth=HTTPBasicAuth(self.service_principal, self.secret) if self.authorized else None,
)
if response.status_code == 401:
raise PluginException(
cause="Invalid service principal or secret provided.",
assistance="Verify your service principal and secret are correct.",
)
elif response.status_code == 403:
raise PluginException(preset=PluginException.Preset.UNAUTHORIZED)
elif response.status_code == 404:
raise PluginException(
cause="No results found.",
assistance="Please provide valid inputs and try again.",
)
elif 400 <= response.status_code < 500:
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=response.text)
elif response.status_code >= 500:
raise PluginException(preset=PluginException.Preset.SERVER_ERROR, data=response.text)
elif 200 <= response.status_code < 300:
if not response.text:
return {}
return response.json()
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=response.text)
except JSONDecodeError as e:
raise PluginException(preset=PluginException.Preset.INVALID_JSON, data=e)
except requests.exceptions.HTTPError as e:
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e)
def siem_action(self, endpoint: str, query_params: dict) -> dict:
return self._call_api("GET", endpoint, params=query_params)
def get_top_clickers(self, query_params: dict) -> dict:
users = []
total_clickers = 0
query_params["page"] = 1
response = self._call_api("GET", "people/top-clickers", params=query_params)
while response.get("users"):
for i in response.get("users"):
users.append(i)
total_clickers += response.get("totalTopClickers", 0)
query_params["page"] += 1
response = self._call_api("GET", "people/top-clickers", params=query_params)
return {"users": users, "totalTopClickers": total_clickers, "interval": response.get("interval")}
def get_decoded_url(self, payload: dict):
return self._call_api("POST", "url/decode", json_data=payload)
def get_forensics(self, payload: dict):
return self._call_api("GET", "forensics", params=payload)
class Endpoint:
@staticmethod
def get_blocked_clicks() -> str:
return "siem/clicks/blocked"
@staticmethod
def get_permitted_clicks() -> str:
return "siem/clicks/permitted"
@staticmethod
def get_blocked_messages() -> str:
return "siem/messages/blocked"
@staticmethod
def get_delivered_threats() -> str:
return "siem/messages/delivered"
@staticmethod
def get_all_threats() -> str:
return "siem/all"
|
11598943
|
from .signed_object import SignedObject
from .experiments_path_controller import ExperimentsPathController
from .signature_scraper import SignatureScraper
|
11598952
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField,SubmitField,TextAreaField,FormField,SelectField
from wtforms.validators import DataRequired, EqualTo, length, regexp, optional, ValidationError
import re
isbn_regex = '^[0-9]{13}$'
book_id_regex = '^[0-9]{16}$'
user_id_regex = '^[0-9]{6}'
def book_insert_num(form, field):
message = '单次添加数量必须为正整数,且小于1000'
if field.data:
try:
data = int(field.data)
except:
raise ValidationError(message)
else:
if data < 0 or data > 999:
raise ValidationError(message)
def return_book_id(form, field):
message = '书籍id必须为16位数字'
if field.data:
x = re.findall(book_id_regex, field.data)
if not x or x[0] != field.data:
raise ValidationError(message)
def return_book_user_id(form, field):
message = '用户用户id必须为6位整数'
if field.data:
x = re.findall(user_id_regex, field.data)
if not x or x[0] != field.data:
raise ValidationError(message)
def isbn_check(form, field):
message='图书ISBN必须为13位数字'
if field.data:
x = re.findall(isbn_regex, field.data)
if not x or x[0] != field.data:
raise ValidationError(message)
class Insert_book_form(FlaskForm):
category = SelectField(
'操作类型',
choices=[
('delete', '删除书籍'),
('insert', '增加书籍')
],
validators=[DataRequired()]
)
isbn = StringField(
'图书ISBN',
validators=[
isbn_check
]
)
book_id = StringField(
'书籍id',
validators=[
return_book_id
]
)
name = StringField(
'图书名称',
validators=[
length(0, 100)
]
)
author = StringField(
'图书作者',
validators=[
length(0, 20)
]
)
update_num = StringField(
'新增图书数量',
validators=[
book_insert_num
]
)
submit = SubmitField('提交修改')
class Return_book_form(FlaskForm):
book_id = StringField(
'还书id',
render_kw={'placeholder': '16位图书id'},
validators=[
return_book_id
]
)
user_id = StringField(
'用户用户id',
render_kw={'placeholder': '6位用户用户id'},
validators=[
return_book_user_id
]
)
submit = SubmitField('提交查询')
class Lend_book_form(FlaskForm):
lend_book_id = StringField(
'借阅书籍id',
validators=[
DataRequired(),
return_book_id
]
)
lend_user_id = StringField(
'借阅书籍人用户id',
validators=[
DataRequired(),
return_book_user_id
]
)
submit = SubmitField('办理借阅')
|
11598953
|
import sys
if sys.version_info >= (3, 10): # pragma: no cover
from importlib import metadata
else: # pragma: no cover
import importlib_metadata as metadata
import pytest
from .sensor.test_sensor import GoodData
sem_ver: str = metadata.version("PyPMS")
version = tuple(int(v) for v in sem_ver.split("."))
@pytest.mark.skipif(version >= (1, 0), reason="deprecated module should be removed on 1.0 release")
def test_deprecated_module():
with pytest.deprecated_call():
import pms.sensor
@pytest.mark.skipif(version >= (1, 0), reason="deprecated module should be removed on 1.0 release")
@pytest.mark.parametrize("obs", GoodData.test_obs())
def test_deprecated_method(obs):
with pytest.deprecated_call():
obs.subset()
|
11598971
|
import requests
from bs4 import BeautifulSoup
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from pyoembed.exceptions import ProviderException
from pyoembed.providers import BaseProvider
class AutoDiscoverProvider(BaseProvider):
priority = 99999999 # should be the last, always.
# following properties are not used because we are overriding all the
# methods that used them.
oembed_endpoint = None
oembed_schemas = None
def url_supported(self, url):
return True # autodiscover supports anything :)
def oembed_url(self, url):
response = requests.get(url)
if not response.ok:
raise ProviderException('Failed to auto-discover oEmbed provider '
'for url: %s' % url)
bs = BeautifulSoup(response.text, 'lxml')
# we prefer json over xml, so let's try it first :)
oembed_url = bs.find('link', type='application/json+oembed', href=True)
# if json isn't available, try xml
if oembed_url is None:
oembed_url = bs.find('link', type='text/xml+oembed', href=True)
if oembed_url is None:
raise ProviderException('No oEmbed url found: %s' % url)
return urljoin(url, oembed_url['href'])
|
11599034
|
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
experiment_base_folder = '/itet-stor/baumgach/net_scratch/logs/phiseg/lidc/'
experiment_list = ['probunet',
'phiseg_7_1',
'phiseg_7_5',
'probunet_1annot',
'phiseg_7_1_1annot',
'phiseg_7_5_1annot']
experiment_names = ['probunet','phiseg_7_1', 'phiseg_7_5', 'probunet_1annot', 'phiseg_7_1_1annot', 'phiseg_7_5_1annot']
file_list = ['ncc100_best_loss.npz']*len(experiment_list)
ged_list = []
for folder, exp_name, file in zip(experiment_list, experiment_names, file_list):
experiment_path = os.path.join(experiment_base_folder, folder, file)
ged_arr = np.squeeze(np.load(experiment_path)['arr_0'])
ged_list.append(ged_arr)
ged_tot_arr = np.asarray(ged_list).T
print('significance')
print('REMINDER: are you checking the right methods?')
print(stats.ttest_rel(ged_list[2], ged_list[3]))
print('Results summary')
means = ged_tot_arr.mean(axis=0)
stds= ged_tot_arr.std(axis=0)
print(ged_tot_arr.shape)
for i in range(means.shape[0]):
print('Exp. name: %s \t %.4f +- %.4f' % (experiment_names[i], means[i], stds[i]))
df = pd.DataFrame(ged_tot_arr, columns=experiment_names)
df = df.melt(var_name='experiments', value_name='vals')
sns.boxplot(x='experiments', y='vals', data=df)
plt.show()
|
11599107
|
import streamlit.util as util
def test_repr_simple_class():
class Foo:
def __init__(self, foo, bar=5):
self.foo = foo
self.bar = bar
def __repr__(self):
return util.repr_(self)
foo = Foo("words")
assert repr(foo) == "Foo(foo='words', bar=5)"
def test_repr_dict_class():
class Foo:
def __repr__(self):
return util.repr_(self)
foo = Foo()
foo.bar = "bar"
assert repr(foo) == "Foo(bar='bar')"
def test_repr_thread_class():
import threading
thread = threading.current_thread()
# This should return a non empty string and not raise an exception.
assert str(thread) is not None
|
11599138
|
from insights.tests import context_wrap
from insights.parsers.greenboot_status import GreenbootStatus
GREEN = """
Boot Status is GREEN - Health Check SUCCESS
"""
RED = """
Mar 04 15:47:12 example greenboot[768]: Script 'check-dns.sh' SUCCESS
Mar 04 15:47:12 example required-services.sh[999]: active
Mar 04 15:47:12 example required-services.sh[999]: active
Mar 04 15:47:12 example required-services.sh[999]: inactive
Mar 04 15:47:10 example NetworkManager[886]: <info> [1614872830.0295] manager: NetworkManager state is now CONNECTED_GLOBAL
Mar 04 15:47:12 example check-dns.sh[801]: PING 192.168.81.1 (192.168.81.1) 56(84) bytes of data.
Mar 04 15:47:12 example check-dns.sh[801]: 64 bytes from 192.168.81.1: icmp_seq=1 ttl=64 time=0.253 ms
Mar 04 15:47:12 example check-dns.sh[801]: --- 192.168.81.1 ping statistics ---
Mar 04 15:47:12 example check-dns.sh[801]: 1 packets transmitted, 1 received, 0% packet loss, time 0ms
Mar 04 15:47:12 example check-dns.sh[801]: rtt min/avg/max/mdev = 0.253/0.253/0.253/0.000 ms
Mar 04 15:47:12 example greenboot[768]: Script 'check-dns.sh' SUCCESS
Mar 04 15:47:12 example required-services.sh[999]: active
Mar 04 15:47:12 example required-services.sh[999]: active
Mar 04 15:47:12 example required-services.sh[999]: inactive
Mar 04 15:47:12 example greenboot[768]: Script 'required-services.sh' FAILURE (exit code '3')
Mar 04 15:47:12 example systemd[1]: greenboot-healthcheck.service: Main process exited, code=exited, status=3/NOTIMPLEMENTED
Mar 04 15:47:12 example systemd[1]: greenboot-healthcheck.service: Failed with result 'exit-code'.
Mar 04 15:47:12 example systemd[1]: Failed to start greenboot Health Checks Runner.
Mar 04 15:47:12 example systemd[1]: Dependency failed for Boot Completion Check.
Mar 04 15:47:12 example systemd[1]: Dependency failed for Mark boot as successful in grubenv.
Mar 04 15:47:12 example systemd[1]: Dependency failed for Multi-User System.
Mar 04 15:47:12 example systemd[1]: multi-user.target: Job multi-user.target/start failed with result 'dependency'.
Mar 04 15:47:12 example systemd[1]: greenboot-grub2-set-success.service: Job greenboot-grub2-set-success.service/start failed with result 'dependency'.
Mar 04 15:47:12 example systemd[1]: Dependency failed for greenboot Success Scripts Runner.
Mar 04 15:47:12 example systemd[1]: greenboot-task-runner.service: Job greenboot-task-runner.service/start failed with result 'dependency'.
Mar 04 15:47:12 example systemd[1]: boot-complete.target: Job boot-complete.target/start failed with result 'dependency'.
Mar 04 15:47:12 example systemd[1]: greenboot-healthcheck.service: Triggering OnFailure= dependencies.
Mar 04 15:47:12 example systemd[1]: Starting greenboot Failure Scripts Runner...
Mar 04 15:47:12 example systemd[1]: Starting Update UTMP about System Runlevel Changes...
Mar 04 15:47:12 example greenboot[1004]: Boot Status is RED - Health Check FAILURE!
Mar 04 15:47:12 example greenboot[1004]: Running Red Scripts...
Mar 04 15:47:12 example systemd[1]: Started greenboot Failure Scripts Runner.
Mar 04 15:47:12 example systemd[1]: Starting Reboot on red boot status...
Mar 04 15:47:12 example systemd[1]: Starting greenboot MotD Generator...
Mar 04 15:47:12 example systemd[1]: Reached target Generic red boot target.
Mar 04 15:47:12 example redboot-auto-reboot[1009]: SYSTEM is UNHEALTHY, but boot_counter is unset in grubenv. Manual intervention necessary.
Mar 04 15:47:12 example systemd[1]: systemd-update-utmp-runlevel.service: Succeeded.
Mar 04 15:47:12 example systemd[1]: Started Update UTMP about System Runlevel Changes.
Mar 04 15:47:12 example systemd[1]: redboot-auto-reboot.service: Main process exited, code=exited, status=1/FAILURE
Mar 04 15:47:12 example systemd[1]: redboot-auto-reboot.service: Failed with result 'exit-code'.
Mar 04 15:47:12 example systemd[1]: Failed to start Reboot on red boot status.
Mar 04 15:47:12 example greenboot-status[1010]: Script 'required-services.sh' FAILURE (exit code '3')
Mar 04 15:47:12 example greenboot-status[1010]: Boot Status is RED - Health Check FAILURE!
Mar 04 15:47:12 example greenboot-status[1010]: SYSTEM is UNHEALTHY, but boot_counter is unset in grubenv. Manual intervention necessary.
Mar 04 15:47:12 example systemd[1]: Started greenboot MotD Generator.
"""
FALLBACK = """
Feb 22 22:50:26 example systemd[1]: Starting greenboot MotD Generator...
Feb 22 22:50:26 example greenboot-status[905]: Boot Status is GREEN - Health Check SUCCESS
Feb 22 22:50:26 example greenboot-status[905]: FALLBACK BOOT DETECTED! Default rpm-ostree deployment has been rolled back.
Feb 22 22:50:26 example systemd[1]: Started greenboot MotD Generator.
"""
def test_greenboot_status_green():
green = context_wrap(GREEN)
p = GreenbootStatus(green)
assert p.green
assert not p.red
def test_greenboot_status_red():
red = context_wrap(RED)
p = GreenbootStatus(red)
assert p.red
assert not p.green
def test_greenboot_status_fallback():
fb = context_wrap(FALLBACK)
p = GreenbootStatus(fb)
assert p.green
assert p.fallback
|
11599177
|
import numpy as np
import numba
"""
lineshapes.py
Module for defining line shape functions. The two that
are explicitly coded are Gaussian and Lorentizan functions,
and derivatives of these are calculated analytically
using SymPy.
When available, use the lmfit built-in functions for
lineshapes.
"""
@numba.jit(fastmath=True, nopython=True)
def gaussian(x: np.ndarray, A=1., x0=0., w=1.):
"""
Vectorized implementation of a single Gaussian lineshape. For a given
array of `x` values, we compute the corresponding amplitude of a
Gaussian for a set of amplitude, center, and widths.
Uses JIT compilation with Numba, and should be reasonably fast.
Parameters
----------
x : np.ndarray
NumPy 1D array containing x values to evaluate over.
A : float
Scaling of the Gaussian; actually corresponds to the area.
x0 : float
Center of the Gaussian
w : float
Width of the Gaussian.
Returns
-------
NumPy 1D array
Array of amplitude values of the specified Gaussian
"""
return A * np.exp(-(x - x0) ** 2.0 / (2.0 * w ** 2.0))
@numba.jit(fastmath=True, nopython=True)
def pair_gaussian(x: np.ndarray, A1: float, A2: float, x0: float, w: float, xsep: float):
"""
Paired Gaussian lineshape. The function allows for the amplitudes to be
floated, however the two Gaussians are locked in width and center
frequency.
Parameters
----------
x : np.ndarray
[description]
A1, A2 : float
Amplitude of the two Gaussians
x0 : float
Centroid of the two Gaussians
w : float
Width of the two Gaussians
xsep : float
Distance from the centroid and a Gaussian center
Returns
-------
NumPy 1D array
Array of amplitude values of the pair of Gaussians
"""
return gaussian(x, A1, x0 - xsep, w) + gaussian(x, A2, x0 + xsep, w)
@numba.jit(fastmath=True, nopython=True)
def lorentzian(x, x0, gamma, I):
"""
Function to evaluate a Lorentzian lineshape function.
Parameters
----------
x : Numpy 1D array
Array of floats corresponding to the x values to evaluate on
x0 : float
Center for the distribution
gamma : float
Width of the distribution
I : float
Height of the distribution
Returns
-------
Numpy 1D array
Values of the Lorentzian distribution
"""
return I * (gamma ** 2.0 / ((x - x0) ** 2.0 + gamma ** 2.0))
@numba.jit(fastmath=True, nopython=True)
def first_deriv_lorentzian(x, x0, gamma, I):
"""
Function to evaluate the first derivative of a Lorentzian lineshape function.
Parameters
----------
x : Numpy 1D array
Array of floats corresponding to the x values to evaluate on
x0 : float
Center for the distribution
gamma : float
Width of the distribution
I : float
Height of the distribution
Returns
-------
Numpy 1D array
Values of the Lorentzian distribution
"""
return (
-2.0
* I
* gamma ** 2.0
* (x - x0) ** 1.0
/ (gamma ** 2.0 + (x - x0) ** 2.0) ** 2
)
@numba.jit(fastmath=True, nopython=True)
def sec_deriv_lorentzian(x, x0, gamma, I):
"""
Function to evaluate the second derivative of a Lorentzian lineshape function.
This was evaluated analytically with SymPy by differentiation of the
Lorentzian expression used for the `lorentzian` function in this module.
Parameters
----------
x : Numpy 1D array
Array of floats corresponding to the x values to evaluate on
x0 : float
Center for the distribution
gamma : float
Width of the distribution
I : float
Height of the distribution
Returns
-------
Numpy 1D array
Values of the Lorentzian distribution
"""
return (
-I
* gamma ** 2.0
* (2.0 - 8.0 * (x - x0) ** 2.0 / (gamma ** 2.0 + (x - x0) ** 2.0))
/ (gamma ** 2.0 + (x - x0) ** 2.0) ** 2
)
@numba.jit(fastmath=True, nopython=True, parallel=True, nogil=True)
def fast_multi_gaussian(x: np.ndarray, A: np.ndarray, x0: np.ndarray, w: np.ndarray):
"""
Fast, parallel implementation of a mixture of Gaussian lineshapes.
Uses the `gaussian` function defined in this module, which itself is
also JIT'd, and uses parallel loops to evaluate multiple Gaussians.
The result is a NumPy 2D array of shape N x D, where N is the length
of the frequency array, and D is the number of Gaussians.
The inputs are expected to all be NumPy arrays, where A, x0, and w
all equal in length and contain parameters for each respective Gaussian.
With a 200,000 length frequency array and about 70 Gaussians, this code
takes ~70 ms to compute; about four times faster than the unJIT'd version.
Parameters
----------
x : np.ndarray
[description]
A : np.ndarray
[description]
x0 : np.ndarray
[description]
w : np.ndarray
[description]
Returns
-------
[type]
[description]
"""
assert A.size == x0.size == w.size
ngaussians = len(A)
y = np.zeros((x.size, ngaussians))
# parallelize the loop over number of Gaussians
for i in numba.prange(ngaussians):
y[:,i] = gaussian(x, A[i], x0[i], w[i])
return y
|
11599180
|
from typing import Dict, Union
from collections import Counter
from spacy.tokens import Doc
from .constants import BASIC_STATS_DESC, COMPLEX_SYL_FACTOR, PUNCTUATIONS, RU_LETTERS, SPACES
from .extractors import SentsExtractor, WordsExtractor
from .utils import count_syllables
class BasicStats(object):
"""
Класс для вычисления основных статистик текста
Пример использования:
>>> from ruts import BasicStats
>>> text = "Существуют три вида лжи: ложь, наглая ложь и статистика"
>>> bs = BasicStats(text)
>>> bs.get_stats()
{'c_letters': {1: 1, 3: 2, 4: 3, 6: 1, 10: 2},
'c_syllables': {1: 5, 2: 1, 3: 1, 4: 2},
'n_chars': 55,
'n_complex_words': 2,
'n_letters': 45,
'n_long_words': 3,
'n_monosyllable_words': 5,
'n_polysyllable_words': 4,
'n_punctuations': 2,
'n_sents': 1,
'n_simple_words': 7,
'n_spaces': 8,
'n_syllables': 18,
'n_unique_words': 8,
'n_words': 9}
Аргументы:
source (str|Doc): Источник данных (строка или объект Doc)
sents_extractor (SentsExtractor): Инструмент для извлечения предложений
words_extractor (WordsExtractor): Инструмент для извлечения слов
normalize (bool): Вычислять нормализованные статистики
Атрибуты:
c_letters (dict[int, int]): Распределение слов по количеству букв
c_syllables (dict[int, int]): Распределение слов по количеству слогов
n_sents (int): Количество предложений
n_words (int): Количество слов
n_unique_words (int): Количество уникальных слов
n_long_words (int): Количество длинных слов
n_complex_words (int): Количество сложных слов
n_simple_words (int): Количество простых слов
n_monosyllable_words (int): Количество односложных слов
n_polysyllable_words (int): Количество многосложных слов
n_chars (int): Количество символов
n_letters (int): Количество букв
n_spaces (int): Количество пробелов
n_syllables (int): Количество слогов
n_punctuations (int): Количество знаков препинания
p_unique_words (float): Нормализованное количество уникальных слов
p_long_words (float): Нормализованное количество длинных слов
p_complex_words (float): Нормализованное количество сложных слов
p_simple_words (float): Нормализованное количество простых слов
p_monosyllable_words (float): Нормализованное количество односложных слов
p_polysyllable_words (float): Нормализованное количество многосложных слов
p_letters (float): Нормализованное количество букв
p_spaces (float): Нормализованное количество пробелов
p_punctuations (float): Нормализованное количество знаков препинания
Методы:
get_stats: Получение вычисленных статистик текста
print_stats: Отображение вычисленных статистик текста с описанием на экран
Исключения:
TypeError: Если передаваемое значение не является строкой или объектом Doc
ValueError: Если в источнике данных отсутствуют слова
"""
def __init__(
self,
source: Union[str, Doc],
sents_extractor: SentsExtractor = None,
words_extractor: WordsExtractor = None,
normalize: bool = False,
):
if isinstance(source, Doc):
text = source.text
sents = source.sents
words = tuple(word.text for word in source)
elif isinstance(source, str):
text = source
if not sents_extractor:
sents_extractor = SentsExtractor()
sents = sents_extractor.extract(text)
if not words_extractor:
words_extractor = WordsExtractor()
words = words_extractor.extract(text)
else:
raise TypeError("Некорректный источник данных")
if not words:
raise ValueError("В источнике данных отсутствуют слова")
letters_per_word = tuple(len(word) for word in words)
syllables_per_word = tuple(count_syllables(word) for word in words)
self.c_letters = dict(sorted(Counter(letters_per_word).items()))
self.c_syllables = dict(sorted(Counter(syllables_per_word).items()))
self.n_sents = sum(1 for sent in sents)
self.n_words = len(words)
self.n_unique_words = len({word.lower() for word in words})
self.n_long_words = sum(1 for cpw in letters_per_word if cpw >= 6)
self.n_complex_words = sum(1 for spw in syllables_per_word if spw >= COMPLEX_SYL_FACTOR)
self.n_simple_words = sum(1 for spw in syllables_per_word if COMPLEX_SYL_FACTOR > spw > 0)
self.n_monosyllable_words = self.c_syllables.get(1, 0)
self.n_polysyllable_words = (
self.n_words - self.c_syllables.get(1, 0) - self.c_syllables.get(0, 0)
)
self.n_chars = len(text.replace("\n", ""))
self.n_letters = sum((1 for char in text if char in RU_LETTERS))
self.n_spaces = sum((1 for char in text if char in SPACES))
self.n_syllables = sum(syllables_per_word)
self.n_punctuations = sum((1 for char in text if char in PUNCTUATIONS))
if normalize:
self.p_unique_words = self.n_unique_words / self.n_words
self.p_long_words = self.n_long_words / self.n_words
self.p_complex_words = self.n_complex_words / self.n_words
self.p_simple_words = self.n_simple_words / self.n_words
self.p_monosyllable_words = self.n_monosyllable_words / self.n_words
self.p_polysyllable_words = self.n_polysyllable_words / self.n_words
self.p_letters = self.n_letters / self.n_chars
self.p_spaces = self.n_spaces / self.n_chars
self.p_punctuations = self.n_punctuations / self.n_chars
def get_stats(self) -> Dict[str, int]:
"""
Получение вычисленных статистик текста
Вывод:
dict[str, int]: Справочник вычисленных статистик текста
"""
return vars(self)
def print_stats(self):
"""Отображение вычисленных статистик текста с описанием на экран"""
print(f"{'Статистика':^20}|{'Значение':^10}")
print("-" * 30)
for stat, value in BASIC_STATS_DESC.items():
print(f"{value:20}|{self.get_stats().get(stat):^10}")
|
11599218
|
from typing import Callable, Any
from .core_functions import quick_fn, to_callable
from .fn import fn
from .._toolz import compose as _compose, juxt as _juxt
from ..typing import Func, TYPE_CHECKING
if TYPE_CHECKING:
from .. import api as sk # noqa: F401
from ..api import X, op # noqa: F401
@fn
def compose(*funcs: Func) -> fn:
"""
Create function that apply argument from right to left.
compose(f, g, h, ...) ==> f << g << h << ...
Example:
>>> f = sk.compose((X + 1), (X * 2))
>>> f(2) # double than increment
5
See Also:
:func:`pipe`
:func:`pipeline`
"""
return quick_fn(_compose(*map(to_callable, funcs)).__call__)
@fn
def pipeline(*funcs: Func) -> fn:
"""
Similar to compose, but order of application is reversed.
pipeline(f, g, h, ...) ==> f >> g >> h >> ...
Example:
>>> f = sk.pipeline((X + 1), (X * 2))
>>> f(2) # increment and double
6
See Also:
:func:`pipe`
:func:`compose`
"""
return quick_fn(_compose(*map(to_callable, reversed(funcs))).__call__)
@fn
def pipe(data: Any, *funcs: Callable) -> Any:
"""
Pipe a value through a sequence of functions.
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))`` or
to ``data | f | g | h``, if ``f, g, h`` are fn objects.
Examples:
>>> from math import sqrt
>>> sk.pipe(-4, abs, sqrt)
2.0
See Also:
:func:`pipeline`
:func:`compose`
:func:`thread`
:func:`rthread`
"""
if funcs:
for func in funcs:
data = func(data)
return data
else:
return lambda *args: pipe(data, *args)
@fn
def thread(data, *forms):
"""
Similar to pipe, but accept extra arguments to each function in the
pipeline.
Arguments are passed as tuples and the value is passed as the
first argument.
Examples:
>>> sk.thread(20, (op.div, 2), (op.mul, 4), (op.add, 2))
42.0
See Also:
:func:`pipe`
:func:`rthread`
"""
for form in forms:
if isinstance(form, tuple):
func, *args = form
else:
func = form
args = ()
data = func(data, *args)
return data
@fn
def rthread(data, *forms):
"""
Like thread, but data is passed as last argument to functions,
instead of first.
Examples:
>>> sk.rthread(2, (op.div, 20), (op.mul, 4), (op.add, 2))
42.0
See Also:
:func:`pipe`
:func:`thread`
"""
for form in forms:
if isinstance(form, tuple):
func, *args = form
else:
func = form
args = ()
data = func(*args, data)
return data
@fn
def thread_if(data, *forms):
"""
Similar to thread, but each form must be a tuple with (test, fn, ...args)
and only pass the argument to fn if the boolean test is True.
If test is callable, the current value to the callable to decide if fn must
be executed or not.
Like thread, Arguments are passed as tuples and the value is passed as the
first argument.
Examples:
>>> sk.thread_if(20, (True, op.div, 2), (False, op.mul, 4), (sk.is_even, op.add, 2))
12.0
See Also:
:func:`thread`
:func:`rthread_if`
"""
for i, form in enumerate(forms, 1):
do_it, func, *args = form
if callable(do_it):
do_it = do_it(data)
if do_it:
try:
data = func(data, *args)
except Exception as ex:
raise _thread_error(ex, func, (data, *args)) from ex
return data
@fn
def rthread_if(data, *forms):
"""
Similar to rthread, but each form must be a tuple with (test, fn, ...args)
and only pass the argument to fn if the boolean test is True.
If test is callable, the current value to the callable to decide if fn must
be executed or not.
Like rthread, Arguments are passed as tuples and the value is passed as the
last argument.
Examples:
>>> sk.rthread_if(20, (True, op.div, 2), (False, op.mul, 4), (sk.is_even, op.add, 2))
0.1
See Also:
:func:`thread`
:func:`rthread_if`
"""
for form in forms:
do_it, func, *args = form
if callable(do_it):
do_it = do_it(data)
if do_it:
try:
data = func(*args, data)
except Exception as ex:
raise _thread_error(ex, func, (*args, data)) from ex
return data
@fn
def juxt(*funcs: Callable, first=None, last=None) -> fn:
"""
Juxtapose several functions.
Creates a function that calls several functions with the same arguments and
return a tuple with all results.
It return a tuple with the results of calling each function.
If last=True or first=True, return the result of the last/first call instead
of a tuple with all the elements.
Examples:
We can create an argument logger using either first/last=True
>>> sqr_log = sk.juxt(print, (X * X), last=True)
>>> sqr_log(4)
4
16
Consume a sequence
>>> pairs = sk.juxt(next, next)
>>> nums = iter(range(10))
>>> pairs(nums), pairs(nums)
((0, 1), (2, 3))
"""
funcs = (to_callable(f) for f in funcs)
if first is True:
result_func, *funcs = funcs
if not funcs:
return fn(result_func)
funcs = tuple(funcs)
def juxt_first(*args, **kwargs):
result = result_func(*args, **kwargs)
for func in funcs:
func(*args, **kwargs)
return result
return fn(juxt_first)
if last is True:
*funcs, result_func = funcs
if not funcs:
return fn(result_func)
funcs = tuple(funcs)
def juxt_last(*args, **kwargs):
for func in funcs:
func(*args, **kwargs)
return result_func(*args, **kwargs)
return fn(juxt_last)
return fn(_juxt(*funcs))
def _thread_error(ex, func, args):
args = ", ".join(map(repr, args))
name = getattr(func, "__name__")
msg = f"raised at {name}({args})" f"{type(ex).__name__}: {ex}"
return ValueError(msg)
|
11599236
|
import pickle
from itertools import combinations_with_replacement as comb_w_r
import numpy as np
import tensorflow as tf
class TensorMinMax:
"""Copy of sklearn's MinMaxScaler implemented to work with tensorflow.
When used, tensorflow is able to take gradients on the transformation as
well as on the network itself, allowing for gradient-based optimization in
inverse design problems.
Parameters
----------
feature_range : 2-tuple, optional
Desired range of transformed data. Defaults to (0, 1)
copy : bool, optional
Set to false to perform inplace operations. Defaults to True.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
self.min_ = None
self.scale_ = None
self.data_min = None
self.data_max = None
def fit(self, X):
"""Fits the transfomer to the data.
Essentially finds original min and max of data to be able to shift the data.
Parameters
----------
X : tensor or ndarray
Data to fit
"""
self.data_min = np.amin(X, axis=0)
self.data_max = np.amax(X, axis=0)
self.scale_ = (self.feature_range[1] - self.feature_range[0]) / (
self.data_max - self.data_min
)
self.min_ = self.feature_range[0] - self.data_min * self.scale_
def transform(self, X, mode="numpy"):
"""Actually does the transorm.
Parameters
----------
X : tensor or ndarray
Data to transform
mode : {'numpy' or 'tensor'}, optional
Whether to use numpy or tensorflow operations.
Returns
-------
X : tensor or ndarray
Transformed data
"""
if mode == "numpy":
X *= self.scale_
X += self.min_
elif mode == "tensor":
X = X * tf.constant(self.scale_, tf.float32) + tf.constant(
self.min_, tf.float32
)
return X
def inverse_transform(self, X, mode="numpy"):
"""Undo the transorm.
Parameters
----------
X : tensor or ndarray
Data to inverse transform
mode : {'numpy' or 'tensor'}, optional
Whether to use numpy or tensorflow operations.
Returns
-------
X : tensor or ndarray
Inverse transformed data
"""
if mode == "numpy":
X -= self.min_
X /= self.scale_
elif mode == "tensor":
X = (X - tf.constant(self.min_, tf.float32)) / tf.constant(
self.scale_, tf.float32
)
return X
class ImportNN:
"""Class to import trained NN.
This the way we've been saving and using our neural networks. After saving them
we can simply import them using this class and it keeps them open for as many
operations as we desire.
Attributes
----------
normX : TensorMinMax
Norm of the inputs
normY: TensorMinMax)
Norm of the outputs
s_data : 2-tuple
Dimensions (size) of input and outputs
Parameters
----------
directory : str
The directory where the model has been stored
"""
def __init__(self, directory):
# import all graph info
with open(directory + "/Import.pkl", "rb") as file:
dict_ = pickle.load(file)
self.normX = dict_["normX"]
self.normY = dict_["normY"]
self.s_data = dict_["s_data"]
self.graph = tf.Graph()
self.sess = tf.compat.v1.Session(graph=self.graph)
with self.graph.as_default():
# Import graph
imported_meta = tf.compat.v1.train.import_meta_graph(
directory + "/model.meta"
)
imported_meta.restore(self.sess, directory + "/model")
# get all tensor names
self.output_tf = self.graph.get_tensor_by_name("OUTPUT:0")
self.input_tf = self.graph.get_tensor_by_name("INPUT:0")
self.input_tf_parts = []
for i in range(self.s_data[0]):
self.input_tf_parts.append(
self.graph.get_tensor_by_name("INPUT_{}:0".format(i))
)
self.keep_prob = self.graph.get_tensor_by_name("KEEP_PROB:0")
tf.compat.v1.disable_eager_execution()
def validate_input(self, input):
"""Used to check for valid input.
If it is only a single data point, expands the dimensions so it fits properly
Parameters
-----------
input : ndarray
Numpy array with width s_data[0] (hopefully)
Returns
--------
input : ndarray
Numpy array with width s_data[0] (hopefully) and height 1
"""
# validate and prepare data
input = np.array(input)
# make sure it's 2-dimensional
if len(input.shape) == 1:
input = np.expand_dims(input, axis=1).T
# make sure it's the right size
if input.shape[1] != self.s_data[0]:
raise ValueError("Data is the wrong size")
return input
def output(self, input, kp=1):
"""Runs input through neural network.
Parameters
----------
input : ndarray
Numpy array with width s_data[0]
kp : int, optional
Value from 0 to 1, 1 refers to not performing any dropout on nodes, 0 drops all of them. Defaults to 1.
Returns
----------
output: ndarray
numpy array with width s_data[1]
"""
# validate data
input = self.validate_input(input)
# return the outputs
return self.sess.run(
self.normY.inverse_transform(self.output_tf),
feed_dict={self.input_tf: input, self.keep_prob: kp},
)
def differentiate(self, input, d, kp=1):
"""Returns partial derivative of neural network.
Parameters
----------
input : ndarray
numpy array with width s_data[0]
d : 3-tuple of ints
Refers to partial of first element wrt second element to the order of third element
kp : int, optional
Value from 0 to 1, 1 refers to not performing any dropout on nodes, 0 drops all of them. Defaults to 1.
Returns
----------
output : ndarray
numpy array with width s_data[1]
"""
# validate data
input = self.validate_input(input)
# make feed dict
fd = {self.keep_prob: kp}
for i in range(self.s_data[0]):
fd[self.input_tf_parts[i]] = input[:, i : i + 1]
# take first derivatives, then the rest
deriv = tf.gradients(
self.normY.inverse_transform(self.output_tf)[:, d[0] : d[0] + 1],
self.input_tf_parts[d[1]],
)[0]
for i in range(1, d[2]):
deriv = tf.gradients(deriv, self.input_tf_parts[d[1]])[0]
return self.sess.run(deriv, feed_dict=fd)
def rel_error(self, input, output, kp=1):
"""Returns relative error of network.
Parameters
----------
input : ndarray
Numpy array with width s_data[0]
output : ndarray
Numpy array with width s_data[1]
kp : int, optional
Value from 0 to 1, 1 refers to not performing any dropout on nodes, 0 drops all of them. Defaults to 1.
Returns
----------
relative error : scalar
The relative error of inputs/outputs
"""
# validate data
input = self.validate_input(input)
# get output
output_nn = self.output(input, kp)
# get rid of any possible divide by 0's)
mask = ~np.isin(output, 0)
# make relative error
re = np.abs((output[mask] - output_nn[mask]) / output[mask])
return re.mean()
class ImportLR:
"""Class to import trained Linear Regression.
To remove independence on sklearn and it's updates, we manually implement an sklearn
Pipeline that includes (PolynomialFeatures, LinearRegression). We use the actual sklearn
implementation to train, save the coefficients, and then proceed to implement it here.
To see how to save a pipeline like above to be used here see SiPANN/LR/regress.py
Attributes
-----------
coef_ : ndarray
Linear Regression Coefficients
degree_ : float
Degree to be used in PolynomialFeatures.
s_data : 2-tuple
Dimensions of inputs and outputs
Parameters
----------
directory : str
The directory where the model has been stored
"""
def __init__(self, directory):
# import all graph info
with open(directory, "rb") as file:
dict_ = pickle.load(file)
self.coef_ = dict_["coef_"]
self.degree_ = dict_["degree_"]
self.s_data = dict_["s_data"]
def make_combos(self, X):
"""Duplicates Polynomial Features.
Takes in an input X, and makes all possibly combinations of it using
polynomials of specified degree.
Parameters
-----------
X : ndarray
Numpy array of size (N, s_data[0])
Returns
--------
polyCombos : ndarray
Numpy array of size (N, )
"""
combos = []
for i in range(self.degree_ + 1):
combos += [k for k in comb_w_r(range(self.s_data[0]), i)]
# make matrix of all combinations
n = len(X)
polyCombos = np.ones((n, len(combos)))
for j, c in enumerate(combos):
if c == ():
polyCombos[:, j] = 1
else:
for k in c:
polyCombos[:, j] *= X[:, k]
return polyCombos
def validate_input(self, input):
"""Used to check for valid input.
If it is only a single data point, expands the dimensions so it fits properly
Parameters
-----------
input : ndarray
Numpy array with width s_data[0] (hopefully)
Returns
--------
input : ndarray
Numpy array with width s_data[0] (hopefully) and height 1
"""
# validate and prepare data
input = np.array(input)
# make sure it's 2-dimensional
if len(input.shape) == 1:
input = np.expand_dims(input, axis=1).T
# make sure it's the right size
if input.shape[1] != self.s_data[0]:
raise ValueError("Data is the wrong size")
return input
def predict(self, X):
"""Predict values.
Runs X through Pipeline to make prediction
Parameters
-----------
X : ndarray
Numpy array of size (N, s_data[0])
Returns
--------
polyCombos : ndarray
Numpy array of size (N, )
"""
X = self.validate_input(X)
Xcombo = self.make_combos(X)
return Xcombo @ (self.coef_.T)
|
11599247
|
from django.apps import apps
from django.utils.html import mark_safe
from mako.exceptions import RichTraceback
from mako.template import Template as MakoTemplate
from mako.runtime import Context as MakoContext, _populate_self_namespace
import io
import os, os.path
def template_inheritance(obj):
'''
Generator that iterates the template and its ancestors.
The order is from most specialized (furthest descendant) to
most general (furthest ancestor).
obj can be either:
1. Mako Template object
2. Mako `self` object (available within a rendering template)
'''
if isinstance(obj, MakoTemplate):
obj = create_mako_context(obj)['self']
elif isinstance(obj, MakoContext):
obj = obj['self']
while obj is not None:
yield obj.template
obj = obj.inherits
def create_mako_context(template_obj, **kwargs):
# I'm hacking into private Mako methods here, but I can't see another
# way to do this. Hopefully this can be rectified at some point.
kwargs.pop('self', None) # some contexts have self in them, and it messes up render_unicode below because we get two selfs
runtime_context = MakoContext(io.StringIO(), **kwargs)
runtime_context._set_with_template(template_obj)
_, mako_context = _populate_self_namespace(runtime_context, template_obj)
return mako_context
def get_template_debug(template_name, error):
'''
This structure is what Django wants when errors occur in templates.
It gives the user a nice stack trace in the error page during debug.
'''
# This is taken from mako.exceptions.html_error_template(), which has an issue
# in Py3 where files get loaded as bytes but `lines = src.split('\n')` below
# splits with a string. Not sure if this is a bug or if I'm missing something,
# but doing a custom debugging template allows a workaround as well as a custom
# DMP look.
# I used to have a file in the templates directory for this, but too many users
# reported TemplateNotFound errors. This function is a bit of a hack, but it only
# happens during development (and mako.exceptions does this same thing).
# /justification
stacktrace_template = MakoTemplate(r"""
<%! from mako.exceptions import syntax_highlight, pygments_html_formatter %>
<style>
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px;
font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
.highlight { white-space:pre; }
.sampleline { white-space:pre; }
% if pygments_html_formatter:
${pygments_html_formatter.get_style_defs() | n}
.linenos { min-width: 2.5em; text-align: right; }
pre { margin: 0; }
.syntax-highlighted { padding: 0 10px; }
.syntax-highlightedtable { border-spacing: 1px; }
.nonhighlight { border-top: 1px solid #DFDFDF;
border-bottom: 1px solid #DFDFDF; }
.stacktrace .nonhighlight { margin: 5px 15px 10px; }
.sourceline { margin: 0 0; font-family:monospace; }
.code { background-color: #F8F8F8; width: 100%; }
.error .code { background-color: #FFBDBD; }
.error .syntax-highlighted { background-color: #FFBDBD; }
% endif
## adjustments to Django css
table.source {
background-color: #fdfdfd;
}
table.source > tbody > tr > th {
width: auto;
}
table.source > tbody > tr > td {
font-family: inherit;
white-space: normal;
padding: 15px;
}
#template {
background-color: #b3daff;
}
</style>
<%
src = tback.source
line = tback.lineno
if isinstance(src, bytes):
src = src.decode()
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${tback.errorname}: ${tback.message}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = index + 1
%>
% if index + 1 == line:
<%
if pygments_html_formatter:
old_cssclass = pygments_html_formatter.cssclass
pygments_html_formatter.cssclass = 'error ' + old_cssclass
%>
${lines[index] | n,syntax_highlight(language='mako')}
<%
if pygments_html_formatter:
pygments_html_formatter.cssclass = old_cssclass
%>
% else:
${lines[index] | n,syntax_highlight(language='mako')}
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="nonhighlight">
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = lineno
%>
<div class="sourceline">${line | n,syntax_highlight(filename)}</div>
</div>
% endfor
</div>
""")
tback = RichTraceback(error, error.__traceback__)
lines = stacktrace_template.render_unicode(tback=tback)
return {
'message': '',
'source_lines': [
( '', mark_safe(lines) ),
],
'before': '',
'during': '',
'after': '',
'top': 0,
'bottom': 0,
'total': 0,
'line': tback.lineno or 0,
'name': template_name,
'start': 0,
'end': 0,
}
|
11599283
|
import pytest
from virtool.users.utils import PERMISSIONS
@pytest.fixture
def bob(no_permissions, static_time):
return {
"_id": "abc123",
"handle": "bob",
"administrator": False,
"force_reset": False,
"groups": ["peasants"],
"last_password_change": static_time.datetime,
"invalidate_sessions": False,
"password": "<PASSWORD>",
"permissions": no_permissions,
"primary_group": "",
"settings": {
"skip_quick_analyze_dialog": True,
"show_ids": True,
"show_versions": True,
"quick_analyze_workflow": "pathoscope_bowtie",
},
}
@pytest.fixture
def create_user(static_time):
def func(
user_id="test", handle="bob", administrator=False, groups=None, permissions=None
):
permissions = permissions or list()
return {
"_id": user_id,
"handle": handle,
"administrator": administrator,
"permissions": {perm: perm in permissions for perm in PERMISSIONS},
"groups": groups or list(),
"invalidate_sessions": False,
"last_password_change": static_time.datetime,
"primary_group": "technician",
"api_keys": [],
"settings": {
"skip_quick_analyze_dialog": True,
"show_ids": True,
"show_versions": True,
"quick_analyze_workflow": "pathoscope_bowtie",
},
"force_reset": False,
"password": <PASSWORD>(),
}
return func
@pytest.fixture
def all_permissions():
return {permission: True for permission in PERMISSIONS}
@pytest.fixture
def no_permissions():
return {permission: False for permission in PERMISSIONS}
|
11599288
|
import numpy as np
import os
import pandas as pd
from snorkel.labeling import PandasLFApplier
from synthesizer.parser import nlp
from config import DATASETS_PATH
from config import DEFAULT_MAX_TRAINING_SIZE
from config import MIN_LABELLED_SIZE
from config import PROCESSED_FILE_NAME
ALLOWED_EXTENSIONS = {'csv'}
def allowed_file(filename: str):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
class Dataset:
def __init__(self, dataframe):
assert 'text' in dataframe.columns
if not "seen" in dataframe.columns:
dataframe.loc[:,"seen"] = 0
self.df = dataframe
def __len__(self):
return len(self.df)
def __getitem__(self, x):
return self.df[x]
def __contains__(self, x):
return x in self.df
@staticmethod
def load(path, force_prep = False):
df = pd.read_csv(path)
df['seen'] = 0
return Dataset(df)
def save(self, path, y=None):
"""Save the dataset to a file, with versioning.
Args:
path (string): path to file
y (matrix, optional): Model predictions. If passed, they will be saved with the data.
"""
if y is not None:
for i in range(y.shape[1]):
self.df["pred_{}".format(i)] = y[:,i]
self.df.to_csv(path)
def apply_lfs(self, lfs: list):
if len(lfs) == 0:
raise ValueError("Apply_lfs was called with no lfs (api/dataset.py")
# uuid should change anytime the LF changes
lf_ids = [lf.uuid for lf in lfs]
to_apply = [lf for lf in lfs if not lf.uuid in self.df.columns]
if len(to_apply) > 0:
applier = PandasLFApplier(lfs=to_apply)
f_outputs = applier.apply(df=self.df)
for i, lf in enumerate(to_apply):
lf_id = lf.uuid
self.df.at[:,lf_id] = f_outputs[:,i]
return self.df[lf_ids].values
#### Data preparation utils
class DataPreparer:
"""Converts the data into the correct format, precomputes values, and logs progress"""
def __init__(self):
self.launch_progress = 0
def rename_column(self, old_col_name, new_col_name, df, print_debug=False):
df = df.rename(columns={old_col_name: new_col_name})
if print_debug:
print("Using column \"{}\" for \"{}\"".format(old_col_name, new_col_name))
print("Example: ")
print(df[new_col_name].head())
return df
def progress(self):
"""Returns value between 0 and 1 describing how much of the data has been prepared"""
return self.launch_progress
def update(self, steps):
"""Update progress to approximate percentage of process completed"""
self.launch_progress += (steps)/self.total
def set_status(self, status):
"""Record what step of process we're on"""
self.status = status
print(status)
def prepare(self, dataset_uuid, force_prep=False, test_split=True):
processed_file_path = os.path.join(DATASETS_PATH, dataset_uuid, PROCESSED_FILE_NAME)
if os.path.exists(processed_file_path) and not force_prep:
df = pd.read_csv(processed_file_path)
# Let's say loading the file is ~half the launch time
# (if the file already exists)
self.total = 2
self.update(1)
else:
try:
datafiles = [os.path.join(DATASETS_PATH, dataset_uuid, d) \
for d in os.listdir(os.path.join(DATASETS_PATH, dataset_uuid))]
except NotADirectoryError:
datafiles = [os.path.join(DATASETS_PATH, dataset_uuid)]
df = self.process_files(datafiles, test_split=test_split)
print("Saving processed files at {}".format(os.path.join(DATASETS_PATH, PROCESSED_FILE_NAME)))
df.to_csv(processed_file_path)
return self.split(df)
def mask_labelled(self, label_col_series):
return label_col_series.notnull()
def process_files(self, files: list, delimiter=None, max_size=DEFAULT_MAX_TRAINING_SIZE, test_split=True):
dfs = []
for i, filename in enumerate(files):
if allowed_file(filename):
print("--- filename: " + filename)
df = pd.read_csv(filename, header=0)
# Add field indicating source file
df["file"] = filename
dfs.append(df)
df_full = pd.concat(dfs)
self.total = len(df_full)*(1.1)
df_full = self.set_headers(df_full)
# Remove delimiter chars
if delimiter is not None:
df['text'].replace(regex=True, inplace=True, to_replace=delimiter, value=r'')
df_full = df_full[df_full['text'].notna()]
self.total = len(df_full)*(1.1)
df_split = self.make_splits(df_full, test_split=test_split)
df_final = self.precompute_values(df_split)
self.launch_progress = 1.0
#transform to Datasets
return df_final
def split(self, df):
return {
data_split: Dataset(df[df['split']==data_split]) \
for data_split in list(df['split'].value_counts().index)
}
def make_splits(self, df, test_split=True, max_size=DEFAULT_MAX_TRAINING_SIZE):
if len(df) < 10:
test_split = False
# shuffle the order
df = df.sample(frac=1, random_state=123)
# split the data into labelled and training (unlabelled)
try:
mask = self.mask_labelled(df.label)
except KeyError:
# no labels available
# all the data is (unlabelled) training data
df['split'] = 'train'
return df
labelled = df[mask]
training = df[~mask]
# if all the data provided is labelled,
# set some aside to use for interaction examples (training set)
if len(training) == 0:
np.random.seed(123)
msk = np.random.rand(len(df)) < 0.5
training = df[msk]
labelled = df[~msk]
# Make sure we have enough labelled data
if len(labelled) <= MIN_LABELLED_SIZE:
print("WARNING (dataset.py) Not enough labelled data. \
(Only {} examples detected)".format(len(labelled)))
labelled = labelled[:min(max_size, len(labelled))].reset_index(drop=True)
if test_split:
fifth = int(len(labelled)/5)
labelled.at[:fifth*2, 'split'] = 'dev'
labelled.at[fifth*2:fifth*3, 'split'] = 'valid'
labelled.at[fifth*3:, 'split'] = 'test'
else:
labelled['split'] = 'dev'
training = training[:min(max_size, len(training))]
training['split'] = 'train'
# reset index
df_split = pd.concat([labelled, training])
df_split = df_split.reset_index(drop=True)
# make sure we have train and dev splits
available_splits = list(df_split['split'].value_counts().index)
for split_name in 'train', 'dev':
assert split_name in available_splits
return df_split
def precompute_values(self, df):
"""Precompute values that labelling functions will need to use
Currently only named entities are precomputed"""
self.total = len(df)*(1.1)
with nlp.disable_pipes("tagger", "parser"):
def ner_tags(row):
self.update(1)
doc = nlp(row.text)
for ent in doc.ents:
row[ent.label_] = (doc[ent.start-1].idx, doc[ent.end-2].idx + len(doc[ent.end-2].text))
return row
POSSIBLE_NER = ['CARDINAL', 'DATE', 'EVENT', 'FAC', 'GPE',
'LANGUAGE', 'LAW', 'LOC', 'MONEY', 'NORP', 'ORDINAL',
'ORG', 'PERCENT', 'PERSON', 'PRODUCT', 'QUANTITY',
'TIME', 'WORK_OF_ART']
for NE in POSSIBLE_NER:
df[NE] = False
return df.apply(ner_tags, axis=1)
def set_headers(self, df):
if not 'text' in df.columns:
obj_columns = df.select_dtypes(include=["object"], exclude=["number"])
longest_avg_string_col = max(obj_columns, key=lambda x: df[x].apply(lambda x: len(str(x))).mean())
df = self.rename_column(longest_avg_string_col, 'text', df)
print(df)
if 'label' in df.columns:
return df
for col in df.columns:
if str(col).lower() in ['class', 'label', 'target']:
df = self.rename_column(col, 'label', df)
return df
integer_columns = df.select_dtypes(include=['int64'])
for col in integer_columns:
if len(df[col].value_counts()) < min(20, len(df)):
label_found = True
df = self.rename_column(col, 'label', df)
return df
print("WARNING (dataset.py): No label column found. Try renaming your label column to 'label', if you have one.")
df['label'] = None
return df
if __name__=='__main__':
dp = DataPreparer()
datasets = dp.prepare('Amazon Reviews')
|
11599305
|
from unittest.mock import patch, MagicMock
from rest_framework.reverse import reverse
from rest_framework.status import (
HTTP_200_OK,
HTTP_204_NO_CONTENT,
HTTP_206_PARTIAL_CONTENT,
HTTP_400_BAD_REQUEST,
HTTP_401_UNAUTHORIZED,
)
from authy.api.resources import User
from rest_framework.test import APITestCase
from .models import CustomUser
class BaseTestCase(APITestCase):
def setUp(self):
self.username = "mc_hammer"
self.password = "<PASSWORD>"
self.user = CustomUser.objects.create(username=self.username)
self.user.set_password(self.password)
self.user.save()
# authy api mocks goes here
mock_response = MagicMock()
mock_resource = MagicMock()
self.mock_user = MagicMock(User(mock_resource, mock_response))
self.mock_user.content = {
"user": {"id": 98765432},
"success": True,
"message": "Message from authy api.",
}
self.mock_user.errors = MagicMock(return_value={})
self.mock_user.ok = MagicMock(return_value=True)
self.mock_user.id = 98765432
class AuthenticationApiTest(BaseTestCase):
def obtain_jwt(self):
payload = {"username": self.username, "password": self.password}
response = self.client.post(
reverse("token_obtain_pair"), payload, format="json"
)
return response
def test_obtain_jwt(self):
# # obtain jwt
jwt_response = self.obtain_jwt()
self.assertEqual(jwt_response.status_code, HTTP_200_OK)
self.assertTrue("refresh" in jwt_response.data)
self.assertTrue("access" in jwt_response.data)
self.client.logout()
# try to login with incorrect jwt
self.client.credentials(
HTTP_AUTHORIZATION="Bearer {}".format("stuff and nonsense")
)
get_user_response = self.client.get(
reverse("custom_auth:customuser-list"), data={"format": "json"}
)
self.assertEqual(get_user_response.status_code, HTTP_401_UNAUTHORIZED)
# login with jwt
self.client.credentials(
HTTP_AUTHORIZATION="Bearer {}".format(jwt_response.data["access"])
)
get_user_response = self.client.get(
reverse("custom_auth:customuser-list"), data={"format": "json"}
)
self.assertEqual(get_user_response.status_code, HTTP_200_OK)
@patch("auth.serializers.authy_api", autospec=True)
def test_verify_phone_number_for_user(self, mock_authy_api):
jwt_response = self.obtain_jwt()
self.client.credentials(
HTTP_AUTHORIZATION="Bearer {}".format(jwt_response.data["access"])
)
payload = {"authy_phone": "+48123456789"}
response = self.client.post(reverse("2fa_phone_verify"), payload, format="json")
self.assertEqual(response.status_code, HTTP_204_NO_CONTENT)
@patch("auth.views.authy_api", autospec=True)
@patch("auth.serializers.authy_api", autospec=True)
def test_register_phone_number_for_user(
self, mock_authy_api_serializers, mock_authy_api_views
):
jwt_response = self.obtain_jwt()
self.client.credentials(
HTTP_AUTHORIZATION="Bearer {}".format(jwt_response.data["access"])
)
# mock twilio api response
mock_authy_api_views.users.create = MagicMock(return_value=self.mock_user)
# register phone for two factor autentication
payload = {"authy_phone": "+48123456789", "token": <PASSWORD>}
response = self.client.post(
reverse("2fa_register_phone"), payload, format="json"
)
self.assertEqual(response.status_code, HTTP_204_NO_CONTENT)
@patch("auth.views.authy_api", autospec=True)
@patch("auth.serializers.authy_api", autospec=True)
def test_obtain_jwt_with_twofa(
self, mock_authy_api_serializers, mock_authy_api_views
):
jwt_response = self.obtain_jwt()
self.client.credentials(
HTTP_AUTHORIZATION="Bearer {}".format(jwt_response.data["access"])
)
# mock twilio api response
mock_authy_api_views.users.create = MagicMock(return_value=self.mock_user)
# register phone with twilio api
payload = {"authy_phone": "+48123456789", "token": <PASSWORD>}
self.client.post(reverse("2fa_register_phone"), payload, format="json")
jwt_response = self.obtain_jwt()
self.assertEqual(jwt_response.status_code, HTTP_206_PARTIAL_CONTENT)
self.assertEqual(
jwt_response.json(),
{"message": "SMS request successful. 2FA token verification expected."},
)
# obtain jwt with correct two factor authentication token
payload = {
"username": self.username,
"password": <PASSWORD>,
"token": <PASSWORD>,
}
token_response = self.client.post(
reverse("2fa_token_verify"), payload, format="json"
)
self.assertEqual(token_response.status_code, HTTP_200_OK)
self.assertTrue("refresh" in token_response.data)
self.assertTrue("access" in token_response.data)
# obtain jwt with incorrect two factor token
payload = {
"username": self.username,
"password": <PASSWORD>,
"token": "<PASSWORD>",
}
token_response = self.client.post(
reverse("2fa_token_verify"), payload, format="json"
)
self.assertEqual(
token_response.json(),
{"token": ["Ensure this field has at least 7 characters."]},
)
self.assertEqual(token_response.status_code, HTTP_400_BAD_REQUEST)
# obtain jwt with two factor token for not registered user
payload = {
"username": "wrong_username",
"password": "<PASSWORD>",
"token": <PASSWORD>,
}
token_response = self.client.post(
reverse("2fa_token_verify"), payload, format="json"
)
self.assertEqual(
token_response.json(),
{"detail": "No active account found with the given credentials"},
)
self.assertEqual(token_response.status_code, HTTP_401_UNAUTHORIZED)
|
11599320
|
from typing import List, Optional, Tuple
from PyQt5.QtGui import QColor
from brainframe.api.bf_codecs import Detection
from brainframe_qt.ui.resources.config import RenderSettings
from brainframe_qt.ui.resources.video_items.base import LabelItem, VideoItem
class DetectionLabelItem(LabelItem):
MIN_WIDTH = 150
def __init__(self, detection: Detection, color: QColor,
*, render_config: RenderSettings, parent: VideoItem):
self.detection = detection
self.render_config = render_config
super().__init__(self.text, self._detection_pos,
color=color, max_width=self._max_label_width,
parent=parent)
# TODO: background opacity
@property
def _detection_pos(self) -> Tuple[int, int]:
# Naive. Maybe refine for non-rectangular detections in future?
top_left = self.detection.coords[0]
# noinspection PyTypeChecker
return tuple(top_left)
@property
def text(self):
text_items = []
if self.render_config.show_detection_labels:
text_items.append(self._detection_name_text)
if self.render_config.show_recognition_labels:
text_items.append(self._recognition_text)
if self.render_config.show_attributes:
text_items.append(self._attributes_text)
if self.render_config.show_extra_data:
text_items.append(self._extra_data_text)
text = "\n".join(filter(None.__ne__, text_items))
return text
@property
def _detection_name_text(self):
return self.detection.class_name
@property
def _recognition_text(self) -> Optional[str]:
identity = self.detection.with_identity
if identity is None:
return None
nickname = identity.nickname
unique_name = identity.unique_name
name = unique_name if nickname is None else nickname
confidence = self.detection.extra_data['encoding_distance']
return f"{name} ({round(confidence, 2)})"
@property
def _attributes_text(self) -> Optional[str]:
attributes = self.detection.attributes
attribute_strings = sorted(f"{key}: {val}"
for key, val in attributes.items())
attribute_text = "\n".join(attribute_strings)
return attribute_text or None
@property
def _extra_data_text(self) -> Optional[str]:
extra_data = self.detection.extra_data
extra_data_strings: List[str] = []
for key, val in extra_data.items():
if isinstance(val, float):
val = round(val, 3)
extra_data_strings.append(f"{key}: {val}")
extra_data_text = "\n".join(extra_data_strings)
return extra_data_text or None
@property
def _max_label_width(self) -> int:
# Naive. Maybe refine for non-rectangular detections in future?
detection_width = self.detection.bbox[1][0] - self.detection.bbox[0][0]
return max(self.MIN_WIDTH, detection_width)
|
11599357
|
import random
from collections.abc import Iterable
from uninas.optimization.hpo.uninas.candidate import Candidate
from uninas.optimization.hpo.uninas.values import ValueSpace
class Crossover:
def __init__(self, value_space: ValueSpace, fixed_num_crossover: int = None):
self.value_space_size = value_space.num_choices()
self.fixed_num_crossover = fixed_num_crossover
def _num_crossover(self) -> int:
if self.fixed_num_crossover is not None:
return self.fixed_num_crossover
return random.randint(1, self.value_space_size - 1)
def yield_genes(self, c0: Candidate, c1: Candidate) -> Iterable:
""" yield lists (genes) """
raise NotImplementedError
class MixedCrossover(Crossover):
""" take genes randomly from either candidate """
def yield_genes(self, c0: Candidate, c1: Candidate) -> Iterable:
mask = random.sample(range(self.value_space_size), k=self._num_crossover())
new_gene0, new_gene1 = [], []
for j, (gene0, gene1) in enumerate(zip(c0.values, c1.values)):
g0_, g1_ = (gene0, gene1) if j in mask else (gene1, gene0)
new_gene0.append(g0_)
new_gene1.append(g1_)
yield new_gene0
yield new_gene1
class SinglePointCrossover(Crossover):
""" take the first n genes from the first candidate, the rest from the second """
def yield_genes(self, c0: Candidate, c1: Candidate) -> Iterable:
n = self._num_crossover()
yield list(c0.values[:n] + c1.values[n:])
yield list(c1.values[:n] + c0.values[n:])
|
11599377
|
import copy
import logging
import numpy as np
from matminer.datasets import get_all_dataset_info
from matminer.featurizers.conversions import (
StrToComposition,
StructureToComposition,
)
from monty.json import MSONable
from matbench.constants import (
CLF_KEY,
CLF_METRICS,
FOLD_DIST_METRICS,
MBV01_KEY,
REG_KEY,
REG_METRICS,
)
from matbench.data_ops import load, score_array
from matbench.metadata import mbv01_metadata, mbv01_validation
from matbench.util import MSONable2File, RecursiveDotDict, immutify_dictionary
logger = logging.getLogger(__name__)
class MatbenchTask(MSONable, MSONable2File):
"""The core interface for running a Matbench task and recording its results.
MatbenchTask handles creating training/validation and testing sets, as
well as recording and managing all data in a consistent fashion.
MatbenchTask also validates data according to te specifications in the
validation file.
MatbenchTasks have a few core methods:
- MatbenchTask.get_train_and_val_data: Get nested cross validation data to
be used for all training and validation.
- MatbenchTask.get_test_data: Get test data for nested cross validation.
- MatbenchTask.record: Record your predicted results for the test data.
- MatbenchTask.validate: Check to make sure the data you recorded for this
task is valid.
You can iterate through the folds of a matbench task using .folds and
the .get_*_data methods.
You can load the results of a task without having to load large
datasets themselves. However, to get training and testing data,
you must load the datasets. Tasks loaded from files do not
automatically load the dataset into memory; to load a dataset into memory,
use MatbenchTask.load().
See the full documentation online for more info and tutorials on
using MatbenchTask.
Attributes:
benchmark_name (str): The name of the benchmark this task belongs to.
df (pd.DataFrame): the dataframe of the dataset for this task
info (str): Info about this dataset
metadata (RecursiveDotDict): all metadata about this dataset
validation (RecursiveDotDict): The validation specification for this
task, including the training and testing splits for each fold.
folds_keys ([str]): Keys of folds, fold_i for the ith fold.
folds_nums ([int]): Values of folds, i for the ith fold.
folds_map ({int: str}): Mapping of folds_nums to folds_keys
folds ([int]): Alias for folds_nums
results (RecursiveDotDict): all raw results in dict-like form.
"""
_RESULTS_KEY = "results"
_BENCHMARK_KEY = "benchmark_name"
_DATASET_KEY = "dataset_name"
_DATA_KEY = "data"
_PARAMS_KEY = "parameters"
_SCORES_KEY = "scores"
def __init__(self, dataset_name, autoload=True, benchmark=MBV01_KEY):
"""
Args:
dataset_name (str): Name of the task. Must belong to the benchmark
given in the 'benchmark' argument.
autoload (bool): If True, will load the benchmark's raw data. This
includes deserializing many large structures for some datasets,
so loading make take some time. If False, you will need to
run .load() before running .get_*_data() methods.
benchmark (str): Name of the benchmark this task belongs to.
"""
self.dataset_name = dataset_name
self.df = load(self.dataset_name) if autoload else None
self.info = get_all_dataset_info(dataset_name)
# define all static data needed for this task
# including citations, data size, as well as specific validation splits
if benchmark == MBV01_KEY:
self.benchmark_name = MBV01_KEY
self.metadata = mbv01_metadata[dataset_name]
self.validation = mbv01_validation.splits[dataset_name]
else:
raise ValueError(
f"Only {MBV01_KEY} available. No other benchmarks defined!"
)
# keeping track of folds
self.folds_keys = list(self.validation.keys())
self.folds_nums = list(range(len(self.folds_keys)))
self.folds_map = dict(zip(self.folds_nums, self.folds_keys))
# Alias for ease of use
self.folds = self.folds_nums
self.results = RecursiveDotDict({})
def _get_data_from_df(self, ids, as_type):
"""Private function to get fold data from the task dataframe.
Args:
ids (list-like): List of string indices to grab from the df.
as_type (str): either "df" or "tuple". If "df", returns the
data as a subset of the task df. If "tuple", returns
list-likes of the inputs and outputs as a 2-tuple.
Returns:
(pd.DataFrame or (list-like, list-like))
"""
relevant_df = self.df.loc[ids]
if as_type == "df":
return relevant_df
elif as_type == "tuple":
# inputs, outputs
return (
relevant_df[self.metadata.input_type],
relevant_df[self.metadata.target],
)
def _check_is_loaded(self):
"""Private method to check if the dataset is loaded.
Throws error if the dataset is not loaded.
Returns:
None
"""
if self.df is None:
raise ValueError(
"Task dataset is not loaded! Run MatbenchTask.load() to "
"load the dataset into memory."
)
def _check_all_folds_recorded(self, msg):
"""Private method to check if all folds have been recorded.
Throws error if all folds have not been recorded.
Args:
msg (str): Error message to be displayed.
Returns:
None
"""
if not self.all_folds_recorded:
raise ValueError(
f"{msg}; folds "
f"{[f for f in self.is_recorded if not self.is_recorded[f]]} "
f"not recorded!"
)
@classmethod
def from_dict(cls, d):
"""Create a MatbenchTask from a dictionary input.
Required method from MSONable.
Args:
d (dict):
Returns:
(MatbenchTask)
"""
req_base_keys = [
"@module",
"@class",
cls._DATASET_KEY,
cls._RESULTS_KEY,
cls._BENCHMARK_KEY,
]
for k in req_base_keys:
if k not in d:
raise KeyError(f"Required key '{k}' not found.")
extra_base_keys = [k for k in d.keys() if k not in req_base_keys]
if extra_base_keys:
raise KeyError(f"Extra keys {extra_base_keys} not allowed.")
return cls._from_args(
dataset_name=d[cls._DATASET_KEY],
benchmark_name=d[cls._BENCHMARK_KEY],
results_dict=d[cls._RESULTS_KEY],
)
@classmethod
def _from_args(cls, dataset_name, benchmark_name, results_dict):
"""Instantiate a MatbenchTask from a arguments
Args:
dataset_name (str): The name of the dataset/task
benchmark_name (str): The name of the corresponding benchmark
results_dict (dict): A formatted dictionary of raw results.
Returns:
(MatbenchTask)
"""
obj = cls(dataset_name, autoload=False, benchmark=benchmark_name)
obj.results = RecursiveDotDict(results_dict)
obj.validate()
return obj
def load(self):
"""Load the dataset for this task into memory.
Returns:
None
"""
if self.df is None:
logger.info(f"Loading dataset '{self.dataset_name}'...")
self.df = load(self.dataset_name)
logger.info(f"Dataset '{self.dataset_name} loaded.")
else:
logger.info(
f"Dataset {self.dataset_name} already loaded; "
f"not reloading dataset."
)
def get_info(self):
logger.info(self.info)
def get_train_and_val_data(self, fold_number, as_type="tuple"):
"""
The training + validation data. All model tuning and
hyperparameter selection must be done on this data, NOT test data.
Args:
fold_number:
Returns:
"""
self._check_is_loaded()
fold_key = self.folds_map[fold_number]
ids = self.validation[fold_key].train
return self._get_data_from_df(ids, as_type)
def get_test_data(self, fold_number, as_type="tuple", include_target=False):
"""
The test data used for recording benchmarks.
Args:
fold_number:
Returns:
"""
self._check_is_loaded()
fold_key = self.folds_map[fold_number]
ids = self.validation[fold_key].test
if include_target:
return self._get_data_from_df(ids, as_type)
else:
if as_type == "tuple":
return self._get_data_from_df(ids, as_type)[0]
elif as_type == "df":
return self._get_data_from_df(ids, as_type)[
[self.metadata.input_type]
]
def record(self, fold_number, predictions, params=None):
"""Record the test data as well as parameters about the model
trained on this fold.
Args:
fold_number (int): The fold number.
predictions ([float] or [bool] or np.ndarray): A list
of predictions for fold number {fold_number}
params (dict): Any free-form parameters for information
about the algorithm on this fold. For example,
hyperparameters determined during validation. Parameters
must be a dictionary; dictionary types must adhere to
the same requirements as in the MatbenchBenchmark.add_metadata
docstring.
Returns:
None
"""
if self.is_recorded[fold_number]:
logger.error(
f"Fold number {fold_number} already recorded! Aborting record..."
)
else:
# avoid problems with json serialization
if isinstance(predictions, np.ndarray):
predictions = predictions.tolist()
fold_key = self.folds_map[fold_number]
# create map of original df index to prediction, e.g.,
# {ix_of_original_df1: prediction1, ... etc.}
split_ids = self.validation[fold_key].test
if len(predictions) != len(split_ids):
raise ValueError(
f"Prediction outputs must be the same length as the "
f"inputs! {len(predictions)} != {len(split_ids)}"
)
ids_to_predictions = {split_ids[i]: p for i, p in enumerate(predictions)}
self.results[fold_key][self._DATA_KEY] = ids_to_predictions
if not isinstance(params, (dict, type(None))):
raise TypeError(
f"Parameters must be stored as a dictionary, not {type(params)}!"
)
params = immutify_dictionary(params) if params else params
self.results[fold_key][self._PARAMS_KEY] = params if params else {}
self.is_recorded[fold_number] = True
logger.info(
f"Recorded fold " f"{self.dataset_name}-{fold_number} successfully."
)
truth = self._get_data_from_df(split_ids, as_type="tuple")[1]
self.results[fold_key][self._SCORES_KEY] = score_array(
truth, predictions, self.metadata.task_type
)
logger.debug(
f"Scored fold '" f"{self.dataset_name}-{fold_key} successfully."
)
def as_dict(self):
"""Return a MatbenchTask object as a dictionary.
Required method from MSONAble.
Returns:
(dict)
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
self._BENCHMARK_KEY: self.benchmark_name,
self._DATASET_KEY: self.dataset_name,
self._RESULTS_KEY: dict(self.results),
}
def validate(self):
"""Validate a task after all folds have been recorded.
There are a few requirements for a task to be validated:
- Data types of each predicted sample must match those
specified by the validation procedure
- All folds must be recorded
- There must be no extra or missing required keys from
the data, including indices. Every index specified in
the validation procedure must be present in its
correct fold, and no extras may be present.
Returns:
"""
self._check_all_folds_recorded(
f"Cannot validate task {self.dataset_name} "
f"unless all folds recorded!"
)
task_type = self.metadata.task_type
# Check for extra fold keys
extra_fold_keys = [k for k in self.results if k not in self.folds_keys]
if extra_fold_keys:
raise KeyError(
f"Extra fold keys {extra_fold_keys} for task "
f"{self.dataset_name} not allowed."
)
for fold_key in self.folds_keys:
if fold_key not in self.results:
raise KeyError(
f"Required fold data for fold '{fold_key}' "
f"for task {self.dataset_name} not found."
)
# Check for extra or missing keys inside each fold:
# need params, scores, and data.
req_subfold_keys = [self._SCORES_KEY, self._DATA_KEY, self._PARAMS_KEY]
extra_subfold_keys = [
k for k in self.results[fold_key] if k not in req_subfold_keys
]
if extra_subfold_keys:
raise KeyError(
f"Extra keys {extra_subfold_keys} for fold results of "
f"'{fold_key}' for task {self.dataset_name} not allowed."
)
for subkey in req_subfold_keys:
fold_results = self.results[fold_key]
if subkey not in fold_results:
raise KeyError(
f"Required key '{subkey}' for task {self.dataset_name} "
f"not found for fold '{fold_key}'."
)
if subkey == self._SCORES_KEY:
scores = self.results[fold_key][subkey]
metrics = REG_METRICS if task_type == REG_KEY else CLF_METRICS
for m in metrics:
if m not in scores:
raise KeyError(
f"Required score '{m}' for task "
f"{self.dataset_name} "
f"not found for '{fold_key}'."
)
elif not isinstance(scores[m], float):
raise TypeError(
f"Required score '{m}' for task "
f"{self.dataset_name} "
f"is not float-type for '{fold_key}'!"
)
extra_metrics = [k for k in scores if k not in metrics]
if extra_metrics:
raise KeyError(
f"Extra keys {extra_metrics} for fold scores of "
f"'{fold_key}' for task {self.dataset_name} "
f"not allowed."
)
# results data indices are cast by json to be strings,
# so must be converted to int
elif subkey == self._DATA_KEY:
fold_data = self.results[fold_key].data
# Ensure all the indices are present with no
# extras for each fold
req_indices = set(self.validation[fold_key].test)
remaining_indices = copy.deepcopy(req_indices)
extra_indices = {}
if self.metadata.task_type == REG_KEY:
allowed_types = (float,)
else:
allowed_types = (bool, float)
for ix, datum in fold_data.items():
if ix not in req_indices:
extra_indices[ix] = datum
else:
if not isinstance(datum, allowed_types):
raise TypeError(
f"Data point '{ix}: {datum}' has data type "
f"{type(datum)} while required type is "
f"{allowed_types} for task "
f"{self.dataset_name} !"
)
if self.metadata.task_type == CLF_KEY:
if isinstance(datum, float):
if datum < 0 or datum > 1:
raise ValueError(
f"Probability estimate '{ix}': {datum}"
f"for task {self.dataset_name} outside "
f"of range [0, 1]."
)
remaining_indices.remove(ix)
if extra_indices and not remaining_indices:
raise ValueError(
f"{len(extra_indices)} extra indices for problem "
f"{self.dataset_name} are not allowed (found in "
f"{fold_key}: {remaining_indices}"
)
elif not extra_indices and remaining_indices:
raise ValueError(
f"{len(remaining_indices)} required indices "
f"for problem {self.dataset_name} not "
f"found for {fold_key}: {remaining_indices}"
)
elif extra_indices and remaining_indices:
raise ValueError(
f"{len(remaining_indices)} required indices "
f"for problem {self.dataset_name} not "
f"found and {len(extra_indices)} not "
f"allowed indices found for {fold_key}!"
)
else:
pass
# Params key has no required form;
# it is up to the model to determine it.
logger.debug(f"Data for {self.dataset_name} successfully validated.")
@property
def scores(self):
"""Comprehensive score metrics for this task.
Gets means, maxes, mins, and more distribution stats (across folds)
for all scoring metrics defined for this task.
There will be different scores for classification problems and
regression problems.
Returns:
(dict): A dictionary of all the scores for this
"""
metric_keys = (
REG_METRICS if self.metadata.task_type == REG_KEY else CLF_METRICS
)
scores = {}
self._check_all_folds_recorded("Cannot score unless all folds are recorded!")
for mk in metric_keys:
metric = {}
# scores for a metric among all folds
raw_metrics_on_folds = [
self.results[fk][self._SCORES_KEY][mk]
for fk in self.folds_map.values()
]
for op in FOLD_DIST_METRICS:
metric[op] = getattr(np, op)(raw_metrics_on_folds)
scores[mk] = metric
return RecursiveDotDict(scores)
@property
def is_recorded(self):
"""Determine what folds in the task are recorded.
Returns:
({int: bool}): Keys are fold numbers, values are whether the
fold is recorded or not.
"""
is_recorded = {}
for fnum, fkey in self.folds_map.items():
if self.results[fkey][self._DATA_KEY]:
is_recorded[fnum] = True
else:
is_recorded[fnum] = False
return is_recorded
@property
def all_folds_recorded(self):
"""Determine if all folds are recorded.
Returns:
(bool): True if all folds are recorded, False otherwise.
"""
return all([v for v in self.is_recorded.values()])
@property
def has_polymorphs(self):
"""Determine if a task's raw data contains polymorphs.
Returns:
(bool) If true, contains polymorphs.
"""
checker_key = "pmg_composition"
self._check_is_loaded()
if self.metadata.input_type == "composition":
stc = StrToComposition(target_col_id=checker_key, reduce=True)
comps = stc.featurize_dataframe(self.df, "composition")[
checker_key
].values
elif self.metadata.input_type == "structure":
stc = StructureToComposition(target_col_id=checker_key, reduce=True)
comps = stc.featurize_dataframe(self.df, "structure")[checker_key].values
else:
raise ValueError(
"Cannot check for polymorphs without input type in "
"(structure, composition)!"
)
unique_comps = set(comps)
if len(unique_comps) != len(comps):
return True
else:
return False
|
11599402
|
from __future__ import annotations
import itertools
import re
from collections import defaultdict
from configparser import ConfigParser
from typing import Callable, Mapping
from .requires import requires
from .util import fix_and_reorder, is_substitute, to_boolean
def format_test_env(parser: ConfigParser, name: str) -> None:
tox_section_cfg: Mapping[str, Callable[[str], str]] = {
"description": str,
"passenv": to_pass_env,
"setenv": to_set_env,
"basepython": str,
"skip_install": to_boolean,
"usedevelop": to_boolean,
"deps": to_deps,
"extras": to_extras,
"parallel_show_output": to_boolean,
"changedir": str,
"commands": to_commands,
}
fix_and_reorder(parser, name, tox_section_cfg)
CONDITIONAL_MARKER = re.compile(r"(?P<envs>[a-zA-Z0-9,]+):(?P<value>.*)")
def to_deps(value: str) -> str:
raw_deps, substitute = collect_multi_line(value, line_split=None)
groups = defaultdict(list)
for dep in raw_deps:
if dep.startswith("-r"):
groups["-r"].append(dep)
else:
match = CONDITIONAL_MARKER.match(dep)
if match:
elements = match.groupdict()
groups[",".join(sorted(elements["envs"].split(",")))].append(elements["value"].strip())
else:
groups[""].append(dep)
groups_requires = {key: requires(value) for key, value in groups.items()}
deps = list(
itertools.chain.from_iterable(
(f"{k}: {d}" if k not in ("", "-r") else d for d in v) for k, v in sorted(groups_requires.items())
)
)
return fmt_list(deps, substitute)
def collect_multi_line(value: str, line_split: str | None = r",| |\t") -> tuple[list[str], list[str]]:
lines = value.strip().splitlines()
substitute, elements = [], []
for line in lines:
for part in re.split(line_split, line.strip()) if line_split else [line.strip()]:
if part: # remove empty lines
if is_substitute(part):
substitute.append(part)
else:
if part not in elements: # remove duplicates
elements.append(part)
return elements, substitute
def fmt_list(values: list[str], substitute: list[str]) -> str:
return "\n".join([""] + substitute + values)
def to_extras(value: str) -> str:
"""Must be a line separated list - fix comma separated format"""
extras, substitute = collect_multi_line(value)
return fmt_list(sorted(extras), substitute)
def to_pass_env(value: str) -> str:
pass_env, substitute = collect_multi_line(value)
return fmt_list(sorted(pass_env), substitute)
def to_set_env(value: str) -> str:
raw_set_env, substitute = collect_multi_line(value, line_split=None)
set_env: list[str] = []
for env in raw_set_env:
at = env.find("=")
if at == -1:
raise RuntimeError(f"invalid line {env} in setenv")
set_env.append(f"{env[:at].strip()} = {env[at+1:].strip()}")
return fmt_list(sorted(set_env), substitute)
_CMD_SEP = "\\"
def to_commands(value: str) -> str:
result: list[str] = []
ends_with_sep = False
for val in value.splitlines():
val = val.strip()
cur_ends_with_sep = val.endswith(_CMD_SEP)
if cur_ends_with_sep:
val = val[:-1].strip()
if val and val != _CMD_SEP:
ending = f" {_CMD_SEP}" if cur_ends_with_sep else ""
prepend = " " if ends_with_sep else ""
result.append(f"{prepend}{val}{ending}")
ends_with_sep = cur_ends_with_sep
return fmt_list(result, [])
|
11599449
|
from .inference import *
from .model_selection import *
from .objective_functions import ObjectiveFunction
from .objective_functions import TraditionalUnnormalizedLogLikelyhood
from .objective_functions import TraditionalMicrocanonicalEntropy
from .objective_functions import DegreeCorrectedUnnormalizedLogLikelyhood
from .objective_functions import DegreeCorrectedMicrocanonicalEntropy
from .peixotos_flat_sbm import LogLikelihoodOfFlatMicrocanonicalDegreeCorrectedSbmWrapper
from .peixotos_flat_sbm import LogLikelihoodOfFlatMicrocanonicalNonDegreeCorrected
from .peixotos_flat_sbm import LogLikelihoodOfFlatMicrocanonicalDegreeCorrectedUniform
from .peixotos_flat_sbm import LogLikelihoodOfFlatMicrocanonicalDegreeCorrectedUniformHyperprior
from .peixotos_hierarchical_sbm import LogLikelihoodOfHierarchicalMicrocanonicalDegreeCorrectedSbmWrapper
from .peixotos_hierarchical_sbm import LogLikelihoodOfHierarchicalMicrocanonicalDegreeCorrectedSbmWrapper
from .peixotos_hierarchical_sbm import LogLikelihoodOfHierarchicalMicrocanonicalNonDegreeCorrected
from .peixotos_hierarchical_sbm import LogLikelihoodOfHierarchicalMicrocanonicalDegreeCorrectedUniform
from .peixotos_hierarchical_sbm import LogLikelihoodOfHierarchicalMicrocanonicalDegreeCorrectedUniformHyperprior
from .objective_function_iclex import IntegratedCompleteLikelihoodExact
from .objective_function_iclex import IntegratedCompleteLikelihoodExactJeffrey
from .objective_function_iclex import IntegratedCompleteLikelihoodExactUniform
from .objective_function_newman_group_size import NewmanReinertNonDegreeCorrected
from .objective_function_newman_group_size import NewmanReinertDegreeCorrected
from .partition import *
from .exceptions import NoFreeNodeException
from .hierarchical_inference import HierarchicalInference
from .hierarchical_inference import PeixotoHierarchicalInference
from .nxpartitiongraphbased import NxPartitionGraphBased
from .nxpartitiongraphbased import NxPartitionGraphBasedWithMoveCounter
from .nxpartitiongraphbased import NxHierarchicalPartition
|
11599487
|
import numpy as np
import scipy.sparse as sp
import sklearn
import sklearn.metrics
import torch
import pandas as pd
import random
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def loadsparse(filename):
df = pd.read_csv(filename, header=None, delimiter=",")
a = np.array(df.as_matrix())
a = sp.csr_matrix(a)
return a
def loadsparse2(fname):
df = pd.read_csv(fname, header=None, delimiter=",")
a = np.array(df.as_matrix())
row = np.max(a[:, 0])
column = np.max(a[:, 1])
s = sp.csr_matrix((a[:, 2], (a[:, 0],a[:, 1])), shape=(row.astype('int64') + 1, column.astype('int64') + 1))
return s
def loaddata(filename):
df = pd.read_csv(filename, header=None, delimiter=",")
a = np.array(df.as_matrix())
return a
def load_raw_ts(path, dataset, tensor_format=True):
path = path + dataset + "/"
x_train = np.load(path + 'X_train.npy')
y_train = np.load(path + 'y_train.npy')
x_test = np.load(path + 'X_test.npy')
y_test = np.load(path + 'y_test.npy')
ts = np.concatenate((x_train, x_test), axis=0)
ts = np.transpose(ts, axes=(0, 2, 1))
labels = np.concatenate((y_train, y_test), axis=0)
nclass = int(np.amax(labels)) + 1
# total data size: 934
train_size = y_train.shape[0]
# train_size = 10
total_size = labels.shape[0]
idx_train = range(train_size)
idx_val = range(train_size, total_size)
idx_test = range(train_size, total_size)
if tensor_format:
# features = torch.FloatTensor(np.array(features))
ts = torch.FloatTensor(np.array(ts))
labels = torch.LongTensor(labels)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return ts, labels, idx_train, idx_val, idx_test, nclass
def load_muse(data_path="./data/", dataset="ECG", sparse=False, tensor_format=True, shuffle=False):
if sparse:
path = data_path + "muse_sparse/" + dataset + "/"
else:
path = data_path + "muse/" + dataset + "/"
file_header = dataset + "_"
# load feature
if sparse:
train_features = loadsparse2(path + file_header + "train.csv")
test_features = loadsparse2(path + file_header + "test.csv")
else:
train_features = loadsparse(path + file_header + "train.csv")
test_features = loadsparse(path + file_header + "test.csv")
# crop the features
mf = np.min((test_features.shape[1], train_features.shape[1]))
train_features = train_features[:, 0: mf]
test_features = test_features[:, 0: mf]
print("Train Set:", train_features.shape, ",", "Test Set:", test_features.shape)
if shuffle:
# shuttle train features
non_test_size = train_features.shape[0]
idx_non_test = random.sample(range(non_test_size), non_test_size)
train_features = train_features[idx_non_test, ]
features = sp.vstack([train_features, test_features])
features = normalize(features)
train_labels = loaddata(path + file_header + "train_label.csv")
if shuffle:
train_labels = train_labels[idx_non_test, ] # shuffle labels
test_labels = loaddata(path + file_header + "test_label.csv")
labels = np.concatenate((train_labels, test_labels), axis=0)
nclass = np.amax(labels) + 1
non_test_size = train_labels.shape[0]
# val_size = int(non_test_size * val_ratio)
# train_size = non_test_size - val_size
total_size = features.shape[0]
idx_train = range(non_test_size)
idx_val = range(non_test_size, total_size)
idx_test = range(non_test_size, total_size)
if tensor_format:
features = torch.FloatTensor(np.array(features.toarray()))
labels = torch.LongTensor(labels)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return features, labels, idx_train, idx_val, idx_test, nclass
def normalize(mx):
"""Row-normalize sparse matrix"""
# rowsum = np.array(mx.sum(1))
# r_inv = np.power(rowsum, -1).flatten()
# r_inv[np.isinf(r_inv)] = 0.
# r_mat_inv = sp.diags(r_inv)
# mx = r_mat_inv.dot(mx)
row_sums = mx.sum(axis=1)
mx = mx.astype('float32')
row_sums_inverse = 1 / row_sums
f = mx.multiply(row_sums_inverse)
return sp.csr_matrix(f).astype('float32')
def convert2sparse(features):
aaa = sp.coo_matrix(features)
value = aaa.data
column_index = aaa.col
row_pointers = aaa.row
a = np.array(column_index)
b = np.array(row_pointers)
a = np.reshape(a, (a.shape[0],1))
b = np.reshape(b, (b.shape[0],1))
s = np.concatenate((a, b), axis=1)
t = torch.sparse.FloatTensor(torch.LongTensor(s.T), torch.FloatTensor(value))
return t
def accuracy(output, labels):
preds = output.max(1)[1].cpu().numpy()
labels = labels.cpu().numpy()
accuracy_score = (sklearn.metrics.accuracy_score(labels, preds))
return accuracy_score
def random_hash(features,K):
idx=np.array(range(features.shape[1]));
np.random.shuffle(idx)
feat=features[:,idx]
for i in range(features.shape[0]):
f=np.array(feat[0].toarray())
f.reshape
tmp=torch.FloatTensor(features[:,idx[0:K]].toarray())
return tmp
def to_sparse(x):
""" converts dense tensor x to sparse format """
x_typename = torch.typename(x).split('.')[-1]
sparse_tensortype = getattr(torch.sparse, x_typename)
indices = torch.nonzero(x)
if len(indices.shape) == 0: # if all elements are zeros
return sparse_tensortype(*x.shape)
indices = indices.t()
values = x[tuple(indices[i] for i in range(indices.shape[0]))]
return sparse_tensortype(indices, values, x.size())
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def euclidean_dist(x, y):
# x: N x D
# y: M x D
n = x.size(0)
m = y.size(0)
d = x.size(1)
assert d == y.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
return torch.pow(x - y, 2).sum(2)
def output_conv_size(in_size, kernel_size, stride, padding):
output = int((in_size - kernel_size + 2 * padding) / stride) + 1
return output
def dump_embedding(proto_embed, sample_embed, labels, dump_file='./plot/embeddings.txt'):
proto_embed = proto_embed.cpu().detach().numpy()
sample_embed = sample_embed.cpu().detach().numpy()
embed = np.concatenate((proto_embed, sample_embed), axis=0)
nclass = proto_embed.shape[0]
labels = np.concatenate((np.asarray([i for i in range(nclass)]),
labels.squeeze().cpu().detach().numpy()), axis=0)
with open(dump_file, 'w') as f:
for i in range(len(embed)):
label = str(labels[i])
line = label + "," + ",".join(["%.4f" % j for j in embed[i].tolist()])
f.write(line + '\n')
|
11599490
|
class EvaluateConfig:
def __init__(self):
self.game_num = 400
self.replace_rate = 0.55
self.play_config = PlayConfig()
self.play_config.simulation_num_per_move = 200
self.play_config.thinking_loop = 1
self.play_config.c_puct = 1
self.play_config.change_tau_turn = 0
self.play_config.noise_eps = 0
self.evaluate_latest_first = True
class PlayDataConfig:
def __init__(self):
self.sl_nb_game_in_file = 100
self.nb_game_in_file = 100
self.max_file_num = 200
class PlayConfig:
def __init__(self):
self.simulation_num_per_move = 200
self.thinking_loop = 1
self.logging_thinking = False
self.c_puct = 1.5
self.noise_eps = 0.25
self.dirichlet_alpha = 0.3
self.change_tau_turn = 10
self.virtual_loss = 3
self.prediction_queue_size = 16
self.search_threads = 16
self.prediction_worker_sleep_sec = 0.00001
self.wait_for_expanding_sleep_sec = 0.000001
self.resign_threshold = -0.8
self.min_resign_turn = 5
self.average_chess_movements = 50
class TrainerConfig:
def __init__(self):
self.batch_size = 2048
self.epoch_to_checkpoint = 1
self.start_total_steps = 0
self.save_model_steps = 2000
self.load_data_steps = 1000
self.loss_weights = [1.0, 1.0] # prevent value overfit in SL
class ModelConfig:
cnn_filter_num = 256
cnn_filter_size = 3
res_layer_num = 7
l2_reg = 1e-4
value_fc_size = 256
distributed = False
|
11599497
|
import collections
class FirstUnique:
def __init__(self, nums: List[int]):
self.table = collections.Counter(nums)
self.Q = collections.deque(nums)
def showFirstUnique(self) -> int:
while self.Q and self.table[self.Q[0]] > 1:
self.Q.popleft()
return self.Q[0] if self.Q else -1
def add(self, value: int) -> None:
self.Q.append(value)
self.table[value] += 1
# Your FirstUnique object will be instantiated and called as such:
# obj = FirstUnique(nums)
# param_1 = obj.showFirstUnique()
# obj.add(value)
|
11599503
|
from ninja import Body, Form, NinjaAPI
from ninja.testing import TestClient
api = NinjaAPI()
# testing Body marker:
@api.post("/task")
def create_task(request, start: int = Body(...), end: int = Body(...)):
return [start, end]
@api.post("/task2")
def create_task2(request, start: int = Body(2), end: int = Form(1)):
return [start, end]
def test_body():
client = TestClient(api)
assert client.post("/task", json={"start": 1, "end": 2}).json() == [1, 2]
def test_body_form():
client = TestClient(api)
assert client.post("/task2", POST={"start": "1", "end": "2"}).json() == [1, 2]
assert client.post("/task2").json() == [2, 1]
|
11599516
|
import os
import json
"""
Details of models for the simulated eval.
"""
LOG_PATH = 'logs'
MANIFEST_PATH = LOG_PATH + '/manifest.json'
def check_args(cfg, **kwargs):
good = True
for val in cfg.values():
if val is None:
good = False
break
good = good and os.path.isfile(cfg['log'])
if not good:
raise ValueError('Invalid model parameters for {}: {}'.format(
cfg['name'], kwargs
))
MODELS = {}
print('loading manifest {}...'.format(MANIFEST_PATH))
MANIFEST = json.load(open(MANIFEST_PATH, 'r'))
MANIFEST = {model['name']: model for model in MANIFEST['models']}
INVALID_MODELS = []
for model_name, model in MANIFEST.items():
try:
check_args(model)
except ValueError:
print('ignoring invalid model "{}" from manifest'.format(model_name, MANIFEST_PATH))
INVALID_MODELS.append(model_name)
for m in INVALID_MODELS:
MANIFEST.pop(m)
print('found models: {}'.format(list(MANIFEST.keys())))
|
11599591
|
import collections
import functools
import operator
from typing import Any, Callable, Tuple
from river import utils
__all__ = ["NearestNeighbors", "MinkowskiNeighbors"]
DistanceFunc = Callable[[Any, Any], float]
class NearestNeighbors:
"""A basic data structure to hold nearest neighbors.
Parameters
----------
n_neighbors
Number of neighbors to use.
window_size
Size of the sliding window use to search neighbors with.
min_distance_keep
The minimum distance (similarity) to consider adding a point to the window.
E.g., a value of 0.0 will add even exact duplicates. Default is 0.05 to add
similar but not exactly the same points.
distance_func
An required distance function that accept two input items to compare
and optional parameters. It's recommended to use functools.partial.
Notes
-----
Updates are by default stored by the FIFO (first in first out) method,
which means that when the size limit is reached, old samples are dumped to
give room for new samples. This is circular, meaning that older points
are dumped first. This also gives the implementation a temporal aspect,
because older samples are replaced with newer ones.
The parameter `min_dinstance_keep` controls the addition of new items to the
window - items that are far enough away (> min_distance_keep) are added to
the window. Thus a value of 0 indicates that we add all points, and
increasing from 0 makes it less likely we will keep a new item.
"""
def __init__(
self,
n_neighbors: int = 5,
window_size: int = 1000,
min_distance_keep: float = 0.0,
distance_func: DistanceFunc = None,
):
self.n_neighbors = n_neighbors
self.window_size = window_size
# A minimum distance (similarity) to determine adding to window
# The model will perform better with a more diverse window
# Since the distance function can be anything, it could be < 0
self.min_distance_keep = min_distance_keep
self.distance_func = distance_func
self.reset()
def append(self, item: Any, extra: [Tuple, list] = None):
"""Add a point to the window, optionally with extra metadata.
Parameters
----------
item
The data intended to be provided to the distance function. It is always
the first item in the window, and typically this will be a tuple
(x,y) with features `x` and class or value `y`.
extra:
An extra set of metadata to add to the window that is not passed to
the distance function, and allows easy customization without needing
to always write a custom distance function.
"""
self.window.append((item, *(extra or [])))
def update(self, item: Any, n_neighbors=1, extra: [Tuple, list] = None):
"""Update the window with a new point, only added if > min distance.
If min distance is 0, we do not need to do the calculation. The item
(and extra metadata) will not be added to the window if it is too close
to an existing point.
Parameters
----------
item
The data intended to be provided to the distance function. For a
standard case, it is expected to be a tuple with x first and y
second.
extra
Metadata that is separate from the item that should also be added
to the window, but is not included to be passed to the distance
function.
Returns
-------
A boolean (true/false) to indicate if the point was added.
"""
# If min distance is 0, we add all points
if self.min_distance_keep == 0:
self.append(item, extra=extra)
return True
# Don't add VERY similar points to window
nearest = self.find_nearest(item, n_neighbors)
# Distance always the last index, (x,y <extra> distance)
if not nearest or nearest[0][-1] < self.min_distance_keep:
self.append(item, extra=extra)
return True
return False
def find_nearest(self, item: Any, n_neighbors=1):
"""Find the `n_neighbors` closest points to `x`, along with their distances.
This function assumes the x is a tuple or list with x[0] having relevant
data for the distance calculation.
"""
# Compute the distances to each point in the window
# Item is JUST the (x,y) however the window is (item, <extra>, distance)
points = ((*p, self.distance_func(item, p[0])) for p in self.window)
# Return the k closest points (last index is distance)
return sorted(points, key=operator.itemgetter(-1))[:n_neighbors]
def reset(self) -> "NearestNeighbors":
"""Reset window"""
self.window = collections.deque(maxlen=self.window_size)
def custom_minkowski(a, b, p):
"""Custom minkoski function. Must be global to be pickle-able."""
return utils.math.minkowski_distance(a[0], b[0], p=p)
class MinkowskiNeighbors(NearestNeighbors):
"""NearestNeighbors using the Minkowski metric as the distance with p=2.
You can still overwrite the distance_func here, however the default is
provided for the nearest neighbors classifiers to use, expecting that a
typical user will not provide a custom function.
Parameters
----------
n_neighbors
Number of neighbors to use.
window_size
Size of the sliding window use to search neighbors with.
min_distance_keep
The minimum distance (similarity) to consider adding a point to the window.
E.g., a value of 0.0 will add even exact duplicates. Default is 0.05 to add
similar but not exactly the same points.
distance_func
An optional distance function that should accept an a=, b=, and any
custom set of kwargs (defined in distance_func_kwargs). If not defined,
the default Minkowski distance is used.
p
p-norm value for the Minkowski metric. When `p=1`, this corresponds to the
Manhattan distance, while `p=2` corresponds to the Euclidean distance.
Valid values are in the interval $[1, +\\infty)$
"""
def __init__(
self,
n_neighbors: int = 5,
window_size: int = 1000,
min_distance_keep: float = 0.0,
distance_func: DistanceFunc = None,
p: float = 2.0,
):
self.p = p
super().__init__(
n_neighbors=n_neighbors,
window_size=window_size,
distance_func=distance_func
or functools.partial(custom_minkowski, p=self.p),
min_distance_keep=min_distance_keep,
)
|
11599604
|
import pygame
import random
import time
import math
import pygame.gfxdraw
# knob1 = x pos ; knob2 = y pos ; knob3 = length ; knob4 = color select
def setup(screen, etc):
pass
def draw(screen, etc):
etc.color_picker_bg(etc.knob5)
yr = etc.yres
xr = etc.xres
sel = etc.knob4*5
i = ((180*yr)/720)
if 0 <= sel < 1 : #grayscale
for i in range(i):
push = abs(int(etc.knob3*etc.audio_in[i%24]/360))
boing = int(etc.knob3*i)+etc.audio_in[1]/500
i = boing
color = (int(128 + 127 * math.sin(i * .02 + time.time())),
int(127 + 127 * math.sin(i * .02 + time.time())),
int(127 + 127 * math.sin(i * .02 + time.time())))
radius = int(10+push + 10 * math.sin(i * .05 + time.time()))
xpos = int(((1080*etc.knob1 + 100*math.sin(i * .0006 + time.time()))+100)*xr)/1280
ypos = int(((((2*etc.knob2-1)/2*720+360))-int(i*etc.knob2))*yr)/720-4*i
pygame.gfxdraw.filled_circle(screen, xpos, ypos-boing, radius+1, color)
if 1 <= sel < 2 : #red
for i in range(i):
push = abs(int(etc.knob3*etc.audio_in[i%24]/300))
boing = int(etc.knob3*i)+(etc.audio_in[1]/500)
i = boing
color = (int(128 + 127 * math.sin(i * .02 + time.time())),0,0,)
radius = int(10+push + 10 * math.sin(i * .05 + time.time()))
xpos = int(((1080*etc.knob1 + 100*math.sin(i * .0006 + time.time()))+100)*xr)/1280
ypos = int(((((2*etc.knob2-1)/2*720+360))-int(i*etc.knob2))*yr)/720-4*i
pygame.gfxdraw.filled_circle(screen, xpos, ypos-boing, radius+1, color)
if 2 <= sel < 3 : #green
for i in range(i):
push = abs(int(etc.knob3*etc.audio_in[i%24]/300))
boing = int(etc.knob3*i)+etc.audio_in[1]/500
i = boing
color = (0, int(127 + 127 * math.sin(i * .012 + time.time())),0)
radius = int(10+push + 10 * math.sin(i * .05 + time.time()))
xpos = int(((1080*etc.knob1 + 100*math.sin(i * .0006 + time.time()))+100)*xr)/1280
ypos = int(((((2*etc.knob2-1)/2*720+360))-int(i*etc.knob2))*yr)/720-4*i
pygame.gfxdraw.filled_circle(screen, xpos, ypos-boing, radius+1, color)
if 3 <= sel < 4 : #blue
for i in range(i):
push = abs(int(etc.knob3*etc.audio_in[i%24]/300))
boing = int(etc.knob3*i)+etc.audio_in[1]/500
i = boing
color = (0, 0,int(127 + 127 * math.sin(i * .012 + time.time())))
radius = int(10+push + 10 * math.sin(i * .05 + time.time()))
xpos = int(((1080*etc.knob1 + 100*math.sin(i * .0006 + time.time()))+100)*xr)/1280
ypos = int(((((2*etc.knob2-1)/2*720+360))-int(i*etc.knob2))*yr)/720-4*i
pygame.gfxdraw.filled_circle(screen, xpos, ypos-boing, radius+1, color)
if 4 <= sel : #all colors
for i in range(i):
push = abs(int(etc.knob3*etc.audio_in[i%24]/300))
boing = int(etc.knob3*i)+(etc.audio_in[1]/500)
i = boing
color = (int(127 + 127 * math.sin(i*4 * .05 + time.time())),
int(127 + 127 * math.sin(i*4 * .018 + time.time())),
int(127 + 127 * math.sin(i*4 * .012 + time.time())))
radius = int(10+push + 10 * math.sin(i * .05 + time.time()))
xpos = int(((1080*etc.knob1 + 100*math.sin(i * .0006 + time.time()))+100)*xr)/1280
ypos = int(((((2*etc.knob2-1)/2*720+360))-int(i*etc.knob2))*yr)/720-4*i
pygame.gfxdraw.filled_circle(screen, xpos, ypos-boing, radius+1, color)
|
11599652
|
import config.config as config
import termcolor
import sys
import os
output = config.output_dir
try:
sys.argv[1]
if sys.argv[1] == "--new":
try:
sys.argv[2]
try:
sys.argv[3]
if sys.argv[3] == "--fa":
os.system(f"mkdir {output}/posts/{sys.argv[2]}")
os.system(f"cp -r files/fa.html {output}/posts/{sys.argv[2]}")
os.system(f"mv {output}/posts/{sys.argv[2]}/fa.html {output}/posts/{sys.argv[2]}/index.html")
print(termcolor.colored("Post created", "green"))
elif sys.argv[3] == "--en":
os.system(f"mkdir {output}/posts/{sys.argv[2]}")
os.system(f"cp -r files/en.html {output}/posts/{sys.argv[2]}")
os.system(f"mv {output}/posts/{sys.argv[2]}/en.html {output}/posts/{sys.argv[2]}/index.html")
print(termcolor.colored("Post created", "green"))
else:
print(termcolor.colored("Language didn't found", "red"))
except:
print(termcolor.colored("Enter the language", "red"))
except:
print(termcolor.colored("Enter a post name", "red"))
else:
print(termcolor.colored("No match cases", "red"))
print(termcolor.colored("Try --help or --h to get more information", "yellow"))
except:
print(termcolor.colored("At least enter on argument", "red"))
|
11599675
|
def minimumSwaps(arr):
count = 0
for i in range(len(arr)):
while arr[i] != i+1:
temp = arr[i];
arr[i] = arr[temp-1];
arr[temp-1] = temp;
count +=1;
return count;
n=int(input())
a=list(map(int,input().split()))
print(minimumSwaps(a))
|
11599699
|
import sys
import os, errno
import logging
#-----------------------------------------------------------------------------------------------------------#
def set_logger(out_dir=None):
console_format = BColors.OKBLUE + '[%(levelname)s]' + BColors.ENDC + ' (%(name)s) %(message)s'
#datefmt='%Y-%m-%d %Hh-%Mm-%Ss'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(console_format))
logger.addHandler(console)
if out_dir:
file_format = '[%(levelname)s] (%(name)s) %(message)s'
log_file = logging.FileHandler(out_dir + '/log.txt', mode='w')
log_file.setLevel(logging.DEBUG)
log_file.setFormatter(logging.Formatter(file_format))
logger.addHandler(log_file)
#-----------------------------------------------------------------------------------------------------------#
def mkdir_p(path):
if path == '':
return
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def get_root_dir():
return os.path.dirname(sys.argv[0])
def bincounts(array):
num_rows = array.shape[0]
if array.ndim > 1:
num_cols = array.shape[1]
else:
num_cols = 1
array = array[:, None]
counters = []
mfe_list = []
for col in range(num_cols):
counter = {}
for row in range(num_rows):
element = array[row,col]
if element in counter:
counter[element] += 1
else:
counter[element] = 1
max_count = 0
for element in counter:
if counter[element] > max_count:
max_count = counter[element]
mfe = element
counters.append(counter)
mfe_list.append(mfe)
return counters, mfe_list
# Convert all arguments to strings
def ltos(*args):
outputs = []
for arg in args:
if type(arg) == list:
out = ' '.join(['%.3f' % e for e in arg])
if len(arg) == 1:
outputs.append(out)
else:
outputs.append('[' + out + ']')
else:
outputs.append(str(arg))
return tuple(outputs)
#-----------------------------------------------------------------------------------------------------------#
import re
class BColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
WHITE = '\033[37m'
YELLOW = '\033[33m'
GREEN = '\033[32m'
BLUE = '\033[34m'
CYAN = '\033[36m'
RED = '\033[31m'
MAGENTA = '\033[35m'
BLACK = '\033[30m'
BHEADER = BOLD + '\033[95m'
BOKBLUE = BOLD + '\033[94m'
BOKGREEN = BOLD + '\033[92m'
BWARNING = BOLD + '\033[93m'
BFAIL = BOLD + '\033[91m'
BUNDERLINE = BOLD + '\033[4m'
BWHITE = BOLD + '\033[37m'
BYELLOW = BOLD + '\033[33m'
BGREEN = BOLD + '\033[32m'
BBLUE = BOLD + '\033[34m'
BCYAN = BOLD + '\033[36m'
BRED = BOLD + '\033[31m'
BMAGENTA = BOLD + '\033[35m'
BBLACK = BOLD + '\033[30m'
@staticmethod
def cleared(s):
return re.sub("\033\[[0-9][0-9]?m", "", s)
def red(message):
return BColors.RED + str(message) + BColors.ENDC
def b_red(message):
return BColors.BRED + str(message) + BColors.ENDC
def blue(message):
return BColors.BLUE + str(message) + BColors.ENDC
def b_yellow(message):
return BColors.BYELLOW + str(message) + BColors.ENDC
def green(message):
return BColors.GREEN + str(message) + BColors.ENDC
def b_green(message):
return BColors.BGREEN + str(message) + BColors.ENDC
#-----------------------------------------------------------------------------------------------------------#
def print_args(args, path=None):
if path:
output_file = open(path, 'w')
logger = logging.getLogger(__name__)
logger.info("Arguments:")
args.command = ' '.join(sys.argv)
items = vars(args)
for key in sorted(items.keys(), key=lambda s: s.lower()):
value = items[key]
if not value:
value = "None"
logger.info(" " + key + ": " + str(items[key]))
if path is not None:
output_file.write(" " + key + ": " + str(items[key]) + "\n")
if path:
output_file.close()
del args.command
def get_args(args):
items = vars(args)
output_string = ''
for key in sorted(items.keys(), key=lambda s: s.lower()):
value = items[key]
if not value:
value = "None"
output_string += " " + key + ": " + str(items[key] + "\n")
return output_string
|
11599717
|
from django.test import TestCase
from pydis_site.apps.resources.templatetags.to_kebabcase import _to_kebabcase
class TestToKebabcase(TestCase):
"""Tests for the `as_css_class` template tag."""
def test_to_kebabcase(self):
"""Test the to_kebabcase utility and template tag."""
weird_input = (
"_-_--_A_LEm0n?in&¤'the##trEE£$@€@€@@£is-NOT----QUITE//"
"as#good! as one __IN-YOUR|||HaND"
)
self.assertEqual(
_to_kebabcase(weird_input),
"a-lem0n-in-the-tree-is-not-quite-as-good-as-one-in-your-hand",
)
|
11599737
|
import pysam
import pathlib
import tempfile
import shutil
def get_read_zmw_counts(file):
bf = pysam.Samfile(file, 'rb', check_sq=False)
zmw_counts = {}
for read in bf:
zmw = read.get_tag("zm")
zmw_counts[zmw] = zmw_counts.get(zmw, 0) + 1
bf.close()
return zmw_counts
def test_shard_bam(script_runner):
bam = "test/test_data/for_scripts/shard_bam_test_file.bam"
testdir = tempfile.mkdtemp()
prefix = f"{testdir}/shard"
num_shards = 2
pathlib.Path(testdir).mkdir(parents=True, exist_ok=True)
ret = script_runner.run("docker/lr-pb/shard_bam.py", "-p", prefix, "-n", str(num_shards), bam)
files_with_zmws = {}
zmw_counts_orig = get_read_zmw_counts(bam)
for i in range(0, num_shards):
zmw_counts_shard = get_read_zmw_counts(f'{prefix}{i}.bam')
for zmw in zmw_counts_shard:
zmw_counts_orig[zmw] = zmw_counts_orig.get(zmw, 0) - zmw_counts_shard.get(zmw, 0)
if zmw not in files_with_zmws:
files_with_zmws[zmw] = set()
files_with_zmws[zmw].add(i)
shutil.rmtree(testdir)
assert ret.success
for zmw in files_with_zmws:
# Verify that ZMWs only ever appear in one shard.
assert len(files_with_zmws[zmw]) == 1
# Verify that every instance of a ZMW seen in the original file are accounted for in the shards.
assert zmw_counts_orig[zmw] == 0
|
11599757
|
from shexer.utils.factories.triple_yielders_factory import get_triple_yielder
from shexer.core.class_profiler import ClassProfiler
def get_class_profiler(target_classes_dict, source_file, list_of_source_files, input_format,
instantiation_property_str,
namespaces_to_ignore=None,
infer_numeric_types_for_untyped_literals=False,
raw_graph=None,
namespaces_dict=None,
url_input=None,
list_of_url_input=None,
rdflib_graph=None,
shape_map_file=None,
shape_map_raw=None,
track_classes_for_entities_at_last_depth_level=True,
depth_for_building_subgraph=1,
url_endpoint=None,
strict_syntax_with_corners=False,
target_classes=None,
file_target_classes=None,
built_remote_graph=None,
built_shape_map=None,
remove_empty_shapes=True,
limit_remote_instances=-1):
yielder = get_triple_yielder(source_file=source_file,
list_of_source_files=list_of_source_files,
input_format=input_format,
namespaces_to_ignore=namespaces_to_ignore,
raw_graph=raw_graph,
allow_untyped_numbers=infer_numeric_types_for_untyped_literals,
namespaces_dict=namespaces_dict,
url_input=url_input,
list_of_url_input=list_of_url_input,
rdflib_graph=rdflib_graph,
shape_map_file=shape_map_file,
shape_map_raw=shape_map_raw,
track_classes_for_entities_at_last_depth_level=track_classes_for_entities_at_last_depth_level,
depth_for_building_subgraph=depth_for_building_subgraph,
url_endpoint=url_endpoint,
instantiation_property=instantiation_property_str,
strict_syntax_with_corners=strict_syntax_with_corners,
target_classes=target_classes,
file_target_classes=file_target_classes,
built_remote_graph=built_remote_graph,
built_shape_map=built_shape_map,
limit_remote_instances=limit_remote_instances)
return ClassProfiler(triples_yielder=yielder,
target_classes_dict=target_classes_dict,
instantiation_property_str=instantiation_property_str,
original_target_classes=target_classes,
original_shape_map=built_shape_map,
remove_empty_shapes=remove_empty_shapes)
|
11599782
|
import math
import keras
from keras.optimizers import SGD, adadelta, rmsprop, adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.metrics import matthews_correlation, precision, recall
import keras.backend as K
import cPickle
import numpy as np
import getpass
username = getpass.getuser()
from foo_three import foo
def contingency(y_true, y_pred):
tp = 0
tn = 0
fp = 0
fn = 0
for i in xrange(len(y_true)):
if y_true[i] == 0: #if true label is negative:
if y_pred[i] == y_true[i]: #if pred label = true label = neg:
tn += 1 #its truly negative
elif y_pred[i] == 1: # if pred label is pos, but real is neg:
fp += 1 #its a false positive
elif y_true[i] == 1: #if true label is possy:
if y_pred[i] == y_true[i]: # if pred label = true label = possy:
tp += 1
elif y_pred[i] == 0: # if pred label is neg but real is possy:
fn += 1
return tp, tn, fp, fn
def get_weights(i, name):
weights='best_weights_labcrossval_{0}_train_1_{1}.h5'.format(i, username)
model = foo()
model.load_weights(weights)
print ('weights loaded')
return model
def get_data(n_dataset, name):
f = file('MODS_224_224_{0}_{1}.pkl'.format(n_dataset, name),'rb')
data = cPickle.load(f)
f.close()
training_data = data[0]
validation_data = data[1]
t_data = training_data[0]
t_label = training_data[1]
test_data = validation_data[0]
test_label = validation_data[1]
t_data = np.array(t_data)
t_label = np.array(t_label)
test_data = np.array(test_data)
test_label = np.array(test_label)
t_data = t_data.reshape(t_data.shape[0], 1, 224, 224)
test_data = test_data.reshape(test_data.shape[0], 1, 224, 224)
t_data = t_data.astype('float32')
test_data = test_data.astype('float32')
return (t_data, t_label), (test_data, test_label)
def test_net(i, name):
model = get_weights(i, name)
print 'using weights from net trained on dataset {0} for {1}'. format(i, name)
history = LossAccHistory()
(X_train, y_train), (X_test, y_test) = get_data(i, name)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_test /= 255
print(X_test.shape[0], 'test samples')
model.compile(loss='binary_crossentropy',
optimizer= rmsprop(lr=0.001), #adadelta
metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
ypred = model.predict_classes(X_test, verbose=1)
ytrue = Y_test
tp, tn, fp, fn = contingency(y_test, ypred)
print ' | true label\n---------------------------------'
print 'pred label | positive | negative'
print 'positive | ', tp, ' | ', fp
print 'negative | ', fn, ' | ', tn
prec = float(tp)/(tp+fp)
se = float(tp) / (tp + fn)
sp = float(tn) / (fp + tn)
mcc = float(tp*tn - tp*fn)/(math.sqrt((tp + fp)*(tp+fn)*(tn+fp)*(tn+fn)))
f1 = (2*prec*se)/(prec+se)
acc = float(tp+tn)/(tp+tn+fp+fn)
print ' sens | spec | mcc | f1 | prec | acc '
print se, sp, mcc, f1, prec, acc
model.reset_states()
return [se, sp, mcc, f1, prec, acc]
def cv_calc(cvscores):
#calculate mean and stdev for each metric, and append them to test_metrics file
test_metrics.append(cvscores[0])
other_counter = 0
for metric in cvscores[1:]:
v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][other_counter], np.mean(metric), np.std(metric))
print v
test_metrics.append(v)
other_counter +=1
if other_counter == 6:
other_counter=0
return cvscores, test_metrics
class LossAccHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.accu = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.accu.append(logs.get('acc'))
nb_classes = 2
nb_epoch = 100
n_dataset = 5
dropout = 0.5
batch_size = 72
optimizer = 'rmsprop'
test_metrics = []
cvscores = [[],[],[],[],[],[], [], []]
for name in ['test_1', 'test_2', 'test_3']:
manualcalc = [['sens', 'spec', 'mcc', 'f1', 'prec', 'acc'], [], [], [], [], [], []]
#manualcalc = [[metrics], [sens], [spec], [mcc], [f1], [prec], [acc]]
for i in xrange(n_dataset): # 5 datasets
scorez = test_net(i, name)
for i in xrange(len(manualcalc[1:])):
#print i
manualcalc[i+1].append(scorez[i])
print manualcalc
cvscores, test_metrics = cv_calc(manualcalc)
print 'test set ', name
print cvscores, test_metrics
|
11599791
|
import math
import collections
class Solution:
"""
@param ring: a string
@param key: a string
@return: return a integer
"""
def findRotateSteps(self, ring, key):
# write your code here
table = collections.defaultdict(list)
for i, c in enumerate(ring):
table[c].append(i)
dp = [[math.inf]*len(ring) for _ in range(len(key))]
for j in range(len(ring)):
if key[0] == ring[j]:
dp[0][j] = min(j, len(ring) - j) + 1
for i in range(1, len(key)):
for j in table[key[i]]:
for k in table[key[i - 1]]:
dp[i][j] = min(dp[i][j], dp[i - 1][k] + min(abs(j - k), len(ring) - abs(j - k)) + 1)
return min(dp[-1][j] for j in table[key[-1]])
|
11599799
|
import os
import requests
import unittest
from random import choice
from string import ascii_letters
from pyupload.uploader import *
def generate_random_file_content():
result = ''
for _ in range(30):
result += choice(ascii_letters)
return result
class TestUploadMethods(unittest.TestCase):
def setUp(self):
self.content = generate_random_file_content()
self.filename = 'testfile'
with open(self.filename, 'w') as f:
f.write(self.content)
def tearDown(self):
os.remove(self.filename)
def compare(self, url):
r = requests.get(url)
self.assertEqual(r.text, self.content)
def test_catbox(self):
uploader = CatboxUploader(self.filename)
result = uploader.execute()
self.compare(result)
def test_uguu(self):
uploader = UguuUploader(self.filename)
result = uploader.execute()
self.compare(result)
def test_fileio(self):
uploader = FileioUploader(self.filename)
result = uploader.execute()
self.compare(result)
def test_mixtape(self):
uploader = MixTapeUploader(self.filename)
result = uploader.execute()
self.compare(result)
if __name__ == "__main__":
unittest.main()
|
11599829
|
from __future__ import print_function
from awesome_thirdparty_library import AwesomeClass
import Pyro4
# expose the class from the library using @expose as wrapper function:
ExposedClass = Pyro4.expose(AwesomeClass)
with Pyro4.Daemon() as daemon:
# register the wrapped class instead of the library class itself:
uri = daemon.register(ExposedClass, "example.thirdpartylib")
print("wrapped class registered, uri: ", uri)
daemon.requestLoop()
|
11599879
|
import numpy as np
import glob
import multiprocessing
import os
import tensorflow as tf
from tf_model import load_one_file
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--target", type=str, choices=["cand", "gen"], help="Regress to PFCandidates or GenParticles", default="cand")
parser.add_argument("--datapath", type=str, required=True, help="Input data path")
parser.add_argument("--num-files-per-tfr", type=int, default=100, help="Number of pickle files to merge to one TFRecord file")
args = parser.parse_args()
return args
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
#https://stackoverflow.com/questions/47861084/how-to-store-numpy-arrays-as-tfrecord
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))): # if value ist tensor
value = value.numpy() # get value of tensor
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _parse_tfr_element(element):
parse_dic = {
'X': tf.io.FixedLenFeature([], tf.string),
'y': tf.io.FixedLenFeature([], tf.string),
'w': tf.io.FixedLenFeature([], tf.string),
}
example_message = tf.io.parse_single_example(element, parse_dic)
X = example_message['X']
arr_X = tf.io.parse_tensor(X, out_type=tf.float32)
y = example_message['y']
arr_y = tf.io.parse_tensor(y, out_type=tf.float32)
w = example_message['w']
arr_w = tf.io.parse_tensor(w, out_type=tf.float32)
#https://github.com/tensorflow/tensorflow/issues/24520#issuecomment-577325475
arr_X.set_shape(tf.TensorShape((None, 15)))
arr_y.set_shape(tf.TensorShape((None, 5)))
arr_w.set_shape(tf.TensorShape((None, )))
#inds = tf.stack([arr_dm_row, arr_dm_col], axis=-1)
#dm_sparse = tf.SparseTensor(values=arr_dm_data, indices=inds, dense_shape=[tf.shape(arr_X)[0], tf.shape(arr_X)[0]])
return arr_X, arr_y, arr_w
def serialize_X_y_w(writer, X, y, w):
feature = {
'X': _bytes_feature(tf.io.serialize_tensor(X)),
'y': _bytes_feature(tf.io.serialize_tensor(y)),
'w': _bytes_feature(tf.io.serialize_tensor(w)),
}
sample = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(sample.SerializeToString())
def serialize_chunk(args):
path, files, ichunk, target = args
out_filename = os.path.join(path, "chunk_{}.tfrecords".format(ichunk))
writer = tf.io.TFRecordWriter(out_filename)
Xs = []
ys = []
ws = []
dms = []
for fi in files:
X, y, ycand = load_one_file(fi)
Xs += X
if target == "cand":
ys += ycand
elif target == "gen":
ys += y
else:
raise Exception("Unknown target")
#set weights for each sample to be equal to the number of samples of this type
#in the training script, this can be used to compute either inverse or class-balanced weights
uniq_vals, uniq_counts = np.unique(np.concatenate([y[:, 0] for y in ys]), return_counts=True)
for i in range(len(ys)):
w = np.ones(len(ys[i]), dtype=np.float32)
for uv, uc in zip(uniq_vals, uniq_counts):
w[ys[i][:, 0]==uv] = uc
ws += [w]
for X, y, w in zip(Xs, ys, ws):
serialize_X_y_w(writer, X, y, w)
writer.close()
if __name__ == "__main__":
args = parse_args()
tf.config.experimental_run_functions_eagerly(True)
datapath = args.datapath
filelist = sorted(glob.glob("{}/raw/*.pkl".format(datapath)))
print("found {} files".format(len(filelist)))
#means, stds = extract_means_stds(filelist)
outpath = "{}/tfr/{}".format(datapath, args.target)
if not os.path.isdir(outpath):
os.makedirs(outpath)
pars = []
for ichunk, files in enumerate(chunks(filelist, args.num_files_per_tfr)):
pars += [(outpath, files, ichunk, args.target)]
#serialize_chunk(pars[0])
pool = multiprocessing.Pool(20)
pool.map(serialize_chunk, pars)
#Load and test the dataset
tfr_dataset = tf.data.TFRecordDataset(glob.glob(outpath + "/*.tfrecords"))
dataset = tfr_dataset.map(_parse_tfr_element)
num_ev = 0
num_particles = 0
for X, y, w in dataset:
num_ev += 1
num_particles += len(X)
print("Created TFRecords dataset in {} with {} events, {} particles".format(
datapath, num_ev, num_particles))
|
11599895
|
import json
import logging
import unicodedata
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
import requests
from bs4 import BeautifulSoup
from utils.constants import HEADERS, LOGFILE_PATH
from utils.reception import Reception
def normalize(text):
"""Normalizes the provided text. This is needed to get rid of weird entries like \xa0."""
return unicodedata.normalize("NFKD", text)
def get_website_content(url, headers=HEADERS):
"""Gets the website content with BS4."""
website = requests.get(url, headers=headers)
return BeautifulSoup(website.content, "html.parser")
def get_reception_points(
kml: dict,
folder_name_whitelist=None,
style_urls_blacklist=None,
):
if style_urls_blacklist is None:
style_urls_blacklist = []
reception_points: list[Reception] = []
folders = kml["kml"]["Document"]["Folder"]
for folder in folders:
if folder_name_whitelist is None or any(
value in normalize(folder["name"]) for value in folder_name_whitelist
):
if "Placemark" in folder.keys():
for placemark in folder["Placemark"]:
if placemark["styleUrl"] not in style_urls_blacklist:
r = Reception()
r.name = normalize(placemark["name"])
r.address = r.name # TEMPORARY
coord = placemark["Point"]["coordinates"].split(",")
r.lon = coord[0].strip()
r.lat = coord[1].strip()
reception_points.append(r)
return reception_points
def gmaps_url_to_lat_lon(url):
"""Converts a Google maps URL string into latitude and longitude."""
if "!3d" in url:
return url.split("!3d")[1].split("!4d")
else:
return url.split("/")[6].split(",")
def write_to_json(filename, text_arr, reception_arr, source):
reception = []
for rec in reception_arr:
reception.append(
{
"name": rec.name,
"lat": rec.lat,
"lon": rec.lon,
"address": rec.address,
"qr": rec.qr,
}
)
data = {"general": text_arr, "reception": reception, "source": source}
with open(filename, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
class LogLevelEnum(Enum):
INFO = 562741
DEBUG = 10227
WARN = 16240465
ERROR = 15942656
@dataclass()
class DiscordLogData:
title: str
description: str
log_level: LogLevelEnum
def log_to_discord(logs: list[DiscordLogData]):
try:
with open(Path(__file__).parent / ".discord-webhook", "r") as file:
url = file.read().strip()
if len(logs) > 10:
raise ValueError("Discord supports a maximum of 10 embeds per message")
content = {
"content": None,
"embeds": [
{
"title": log.title,
"description": log.description,
"color": log.log_level.value,
}
for log in logs
],
}
requests.post(url, json=content)
except Exception as exception:
# Don't fail execution if logging fails
logging.exception("Failed to send Discord notification.")
|
11599935
|
import logging
import tempfile
import os
import torch
from collections import OrderedDict
from tqdm import tqdm
from lvis import LVIS, LVISResults, LVISEval, LVISEvalPerCat
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.utils.miscellaneous import mkdir
def do_lvis_evaluation(
dataset,
gt_path,
predictions,
output_folder,
iou_types,
iteration,
):
logger = logging.getLogger("maskrcnn_benchmark.inference")
logger.info("Preparing results for LVIS format")
lvis_results = prepare_for_lvis_evaluation(predictions, dataset, iou_types)
if len(lvis_results) == 0:
return {}
dt_path = os.path.join(output_folder, "lvis_dt.json")
import json
with open(dt_path, "w") as f:
json.dump(lvis_results, f)
logger.info("Evaluating predictions")
lvis_eval_info = {}
for iou_type in iou_types:
lvis_eval = LVISEval(
gt_path, dt_path, iou_type
)
lvis_eval.run()
print(iou_type)
lvis_eval.print_results()
keys = lvis_eval.get_results().keys()
for k in keys:
lvis_eval_info[iou_type + k] = lvis_eval.get_results()[k]
save_path = os.path.join(output_folder, str(iteration))
mkdir(save_path)
lvis_eval_percat = LVISEvalPerCat(
gt_path, dt_path, iou_type, save_path)
lvis_eval_percat.run()
lvis_eval_percat.print_results()
return lvis_eval_info
def prepare_for_lvis_evaluation(predictions, dataset, iou_types):
import pycocotools.mask as mask_util
import numpy as np
if 'segm' in iou_types:
masker = Masker(threshold=0.5, padding=1)
# assert isinstance(dataset, COCODataset)
lvis_results = []
for image_id, prediction in tqdm(enumerate(predictions)):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
masks = prediction.get_field("mask")
# t = time.time()
# Masker is necessary only if masks haven't been already resized.
if list(masks.shape[-2:]) != [image_height, image_width]:
masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)
masks = masks[0]
# logger.info('Time mask: {}'.format(time.time() - t))
prediction = prediction.convert('xywh')
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
# rles = prediction.get_field('mask')
rles = [
mask_util.encode(
np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
# mapped_labels = [int(i) for i in labels]
mapped_labels = [dataset.sorted_id_to_category_id[i]
for i in labels]
lvis_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"segmentation": rle,
"score": scores[k],
}
for k, (rle, box) in enumerate(zip(rles, boxes))
]
)
return lvis_results
else:
lvis_results = []
for image_id, prediction in tqdm(enumerate(predictions)):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
# logger.info('Time mask: {}'.format(time.time() - t))
prediction = prediction.convert('xywh')
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
# mapped_labels = [int(i) for i in labels]
mapped_labels = [dataset.sorted_id_to_category_id[i]
for i in labels]
lvis_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return lvis_results
|
11599959
|
from typing import Sequence
from ..simai import (
SimaiChart,
pattern_from_int,
)
from ..maima2 import MaiMa2, TapNote, HoldNote, SlideNote, TouchTapNote, TouchHoldNote
from ..event import MaiNote, NoteType
def ma2_to_simai(ma2: MaiMa2) -> SimaiChart:
simai_chart = SimaiChart()
for bpm in ma2.bpms:
simai_chart.set_bpm(bpm.measure, bpm.bpm)
convert_notes(simai_chart, ma2.notes)
return simai_chart
def convert_notes(simai_chart: SimaiChart, ma2_notes: Sequence[MaiNote]) -> None:
for ma2_note in ma2_notes:
note_type = ma2_note.note_type
if isinstance(ma2_note, TapNote):
is_break = note_type in [NoteType.break_tap, NoteType.break_star]
is_ex = note_type in [NoteType.ex_tap, NoteType.ex_star]
is_star = note_type in [
NoteType.star,
NoteType.break_star,
NoteType.ex_star,
]
simai_chart.add_tap(
measure=ma2_note.measure,
position=ma2_note.position,
is_break=is_break,
is_star=is_star,
is_ex=is_ex,
)
elif isinstance(ma2_note, HoldNote):
is_ex = note_type == NoteType.ex_hold
simai_chart.add_hold(
measure=ma2_note.measure,
position=ma2_note.position,
duration=ma2_note.duration,
is_ex=is_ex,
)
elif isinstance(ma2_note, SlideNote):
# Ma2 slide durations does not include the delay
# like in simai
pattern = pattern_from_int(
ma2_note.pattern, ma2_note.position, ma2_note.end_position
)
simai_chart.add_slide(
measure=ma2_note.measure,
start_position=ma2_note.position,
end_position=ma2_note.end_position,
duration=ma2_note.duration,
pattern=pattern[0],
delay=ma2_note.delay,
reflect_position=pattern[1],
)
elif isinstance(ma2_note, TouchTapNote):
simai_chart.add_touch_tap(
measure=ma2_note.measure,
position=ma2_note.position,
region=ma2_note.region,
is_firework=ma2_note.is_firework,
)
elif isinstance(ma2_note, TouchHoldNote):
simai_chart.add_touch_hold(
measure=ma2_note.measure,
position=ma2_note.position,
region=ma2_note.region,
duration=ma2_note.duration,
is_firework=ma2_note.is_firework,
)
else:
print("Warning: Unknown note type {}".format(note_type))
|
11599973
|
from rest_framework.routers import Route
from rest_framework_nested.routers import NestedDefaultRouter
class NestedStorageRouter(NestedDefaultRouter):
routes = [
# Detail route without identifier. Viewset must override get_object to work correctly.
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
},
name='{basename}-detail',
detail=True,
initkwargs={'suffix': 'Instance'}
),
]
|
11599986
|
import pandas as pd
from PIL import Image
import numpy as np
import keras
from math import floor
import random
from random import shuffle
from sklearn.model_selection import train_test_split
from keras import regularizers
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.models import Model, load_model
from keras.layers import Input, Dense, Dropout, Flatten, Activation, Reshape
from keras.layers.convolutional import Conv2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.optimizers import SGD, Adam, Adadelta
from keras.preprocessing.image import ImageDataGenerator
import csv
from keras.utils import np_utils
import matplotlib.pyplot as plt
import time
import os
from keras.callbacks import EarlyStopping
os.system('echo $CUDA_VISIBLE_DEVICES')
#PATIENCE = 5 # The parameter is used for early stopping
def load(readnpy=True):
if readnpy:
y = np.load('./feature/DNNlabel.npy')
X = np.load('./feature/DNNfeature.npy')
X_test = np.load('./feature/DNNX_test.npy')
X_train = np.load('./feature/DNNX_train.npy')
X_valid = np.load('./feature/DNNX_valid.npy')
y_train = np.load('./feature/DNNy_train.npy')
y_valid = np.load('./feature/DNNy_valid.npy')
else :
df_train = pd.read_csv('./feature/train.csv')
'''train,valid data'''
y = df_train['label'].as_matrix()
print(df_train.groupby('label').count())
y = y.reshape(len(y),1)
X = df_train['feature'].as_matrix()
X = np.array([np.array([*map(int, x.split())]) for x in X])
np.save('./feature/DNNlabel.npy', y)
np.save('./feature/DNNfeature.npy',X)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.1, random_state=0)
np.save('./feature/DNNX_train.npy',X_train)
np.save('./feature/DNNX_valid.npy',X_valid)
np.save('./feature/DNNy_train.npy',y_train)
np.save('./feature/DNNy_valid.npy',y_valid)
'''testing data'''
df_test = pd.read_csv('./feature/test.csv')
X_test = df_test['feature'].as_matrix()
X_test = np.array([np.array([*map(int, x.split())]) for x in X_test])
np.save('./feature/DNNX_test.npy',X_test)
return X,y,X_test,X_train,X_valid,y_train,y_valid
label_dict={0:"pissed off",1:"disgust",2:"fear",3:"happy",4:"sad",
5:"surprised",6:"neutral"}
def plot_images_labels_prediction(images,labels,prediction,
idx,num=20):
fig = plt.gcf()
fig.set_size_inches(12, 14)
if num>25: num=25
for i in range(0, num):
ax=plt.subplot(5,5, 1+i)
ax.imshow(images[idx],cmap='binary')
title=str(i)+','+label_dict[labels[i][0]]
if len(prediction)>0:
title+='=>'+label_dict[prediction[i]]
ax.set_title(title,fontsize=10)
ax.set_xticks([]);ax.set_yticks([])
idx+=1
plt.show()
def normalize(X):
X = X.astype('float32')
X /=255
return X
def OneHotEncode(y):
#轉換label 為OneHot Encoding
y = np_utils.to_categorical(y)
#y = pd.get_dummies(y).values
return y
def split_valid_set(X_all, Y_all, percentage):
all_data_size = len(X_all)
valid_data_size = int(floor(all_data_size * percentage))
X_all, Y_all = _shuffle(X_all, Y_all)
X_valid, Y_valid = X_all[0:valid_data_size], Y_all[0:valid_data_size]
X_train, Y_train = X_all[valid_data_size:], Y_all[valid_data_size:]
return X_train, Y_train, X_valid, Y_valid
def valid(X_all, Y_all):
# Split a 10%-validation set from the training set
valid_set_percentage = 0.1
X_train, Y_train, X_valid, Y_valid = split_valid_set(X_all, Y_all, valid_set_percentage)
return X_train, Y_train, X_valid, Y_valid
def _shuffle(X, Y):
randomize = np.arange(len(X))
np.random.shuffle(randomize)
return (X[randomize], Y[randomize])
def buildmodel(X, Y):
#X = X.reshape(len(X),48,48,1)
model = Sequential()
model.add(Dense(4096,input_dim=X.shape[1],
kernel_regularizer=regularizers.l2(0.001),
activation='relu'))
model.add(Dense(4096,input_dim=X.shape[1],
kernel_regularizer=regularizers.l2(0.001),
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048,kernel_regularizer=regularizers.l2(0.001),
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
# opt = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
opt = Adam(lr=1e-4)
# opt = Adadelta(lr=0.1, rho=0.95, epsilon=1e-08)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
return model
def show_train_history(train_history,train,validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel(train)
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
def prediction(model,X_test):
prediction=model.predict_classes(X_test)
print(prediction[:10])
def plot_images_labels_prediction(images,labels,prediction,
idx,num=10):
fig = plt.gcf()
fig.set_size_inches(12, 14)
if num>25: num=25
for i in range(0, num):
ax=plt.subplot(5,5, 1+i)
ax.imshow(images[idx],cmap='binary')
title=str(i)+','+label_dict[labels[i][0]]
if len(prediction)>0:
title+='=>'+label_dict[prediction[i]]
ax.set_title(title,fontsize=10)
ax.set_xticks([]);ax.set_yticks([])
idx+=1
plt.show()
def show_Predicted_Probability(y,prediction,
x_img,Predicted_Probability,i):
print('label:',label_dict[y[i][0]],
'predict:',label_dict[prediction[i]])
plt.figure(figsize=(2,2))
plt.imshow(np.reshape(x_img_test[i],(32, 32,3)))
plt.show()
for j in range(10):
print(label_dict[j]+
' Probability:%1.9f'%(Predicted_Probability[i][j]))
#confusion matrix
def confusionmatrix():
print(label_dict)
pd.crosstab(y_test.reshape(-1),prediction,
rownames=['label'],colnames=['predict'])
def savemodel(model,json=True,yaml=True,h5=True):
if json:
model_json = model.to_json()
with open("SaveModel/cifarCnnModelDNN.json", "w") as json_file:
json_file.write(model_json)
print("save model as json file!")
if yaml:
model_yaml = model.to_yaml()
with open("SaveModel/cifarCnnModelDNN.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
print("save model as yaml file!")
if h5:
model.save_weights("SaveModel/cifarCnnModelDNN.h5")
model.save('SaveModel/hw3-modelDNN.h5')
print("save weight!")
def main():
X_all,Y_all,X_test,X_train,X_valid,Y_train,Y_valid = load(True)
#plot_images_labels_prediction(X_all,Y_all,[],0)
X_all = normalize(X_all)
X_test = normalize(X_test)
Y_all = OneHotEncode(Y_all)
X_train = normalize(X_train)
X_valid = normalize(X_valid)
Y_train = OneHotEncode(Y_train)
Y_valid = OneHotEncode(Y_valid)
batchsize = 64
num_epoch = 100
print(X_all.shape)
print(Y_all.shape)
print(X_test.shape)
print(X_train.shape)
print(X_valid.shape)
print(Y_train.shape)
print(Y_valid.shape)
model = buildmodel(X_train,Y_train)
print(model.summary())
savemodel(model)
ACCearlyStopping=EarlyStopping(monitor='val_acc',patience=50, verbose=0, mode='auto')
filepath="weightDNN/weights-improvement-{epoch:02d}-{val_acc:.3f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
train_history = model.fit(X_train, Y_train,validation_split=0.1,epochs=num_epoch, batch_size=batchsize,
verbose=1,callbacks=[checkpoint,ACCearlyStopping])
show_train_history(train_history,'acc','val_acc')
show_train_history(train_history,'loss','val_loss')
#評估模型準確率
scores = model.evaluate(X_valid, Y_valid, verbose=1)
print(scores[1])
proba=model.predict(X_test)
prediction = proba.argmax(axis=-1)
answer = []
for i in range(len(prediction)):
answer.append([str(i)])
answer[i].append(prediction[i])
#filename = args[0][6]
filename = "result/predictDNN.csv"
text = open(filename, "w+")
s = csv.writer(text,delimiter=',',lineterminator='\n')
s.writerow(["id","label"])
for i in range(len(answer)):
s.writerow(answer[i])
text.close()
'''
#plot_images_labels_prediction(X_test,y_test,prediction,0,10)
#Predicted_Probability=model.predict(x_img_test_normalize)
#show_Predicted_Probability(y_label_test,prediction,x_img_test,Predicted_Probability,0)
#show_Predicted_Probability(y_label_test,prediction,x_img_test,Predicted_Probability,3)
'''
if __name__ == '__main__':
main()
|
11599999
|
import os
import pytest
import unittest
from sacrerouge.commands.correlate import aggregate_metrics
from sacrerouge.data import Metrics
from sacrerouge.io import JsonlReader
_metrics_A_file_path = 'datasets/duc-tac/tac2010/v1.0/task1.A.metrics.jsonl'
_metrics_B_file_path = 'datasets/duc-tac/tac2010/v1.0/task1.B.metrics.jsonl'
class TestTAC2010SystemLevel(unittest.TestCase):
@pytest.mark.skipif(not os.path.exists(_metrics_A_file_path), reason='TAC 2010-A metrics file does not exist')
def test_system_level_A(self):
summary_level_metrics = JsonlReader(_metrics_A_file_path, Metrics).read()
system_level_metrics = aggregate_metrics(summary_level_metrics)
# Check a few metrics to make sure they are equal to what's in the NIST files
# ROUGE/rouge2_A.m.avg
assert system_level_metrics['22']['rouge-2']['recall'] == pytest.approx(9.574, 1e-2)
assert system_level_metrics['18']['rouge-2']['recall'] == pytest.approx(9.418, 1e-2)
assert system_level_metrics['23']['rouge-2']['recall'] == pytest.approx(9.404, 1e-2)
assert system_level_metrics['24']['rouge-2']['recall'] == pytest.approx(9.196, 1e-2)
assert system_level_metrics['36']['rouge-2']['recall'] == pytest.approx(9.194, 1e-2)
# ROUGE/rouge2_A.jk.m.avg
assert system_level_metrics['D']['rouge-2_jk']['recall'] == pytest.approx(12.862, 1e-2)
assert system_level_metrics['H']['rouge-2_jk']['recall'] == pytest.approx(12.841, 1e-1)
assert system_level_metrics['F']['rouge-2_jk']['recall'] == pytest.approx(12.556, 1e-2)
assert system_level_metrics['22']['rouge-2_jk']['recall'] == pytest.approx(9.620, 1e-2)
assert system_level_metrics['18']['rouge-2_jk']['recall'] == pytest.approx(9.451, 1e-2)
# ROUGE/rougeSU4_A.m.avg
assert system_level_metrics['22']['rouge-su4']['recall'] == pytest.approx(13.014, 1e-2)
assert system_level_metrics['23']['rouge-su4']['recall'] == pytest.approx(12.963, 1e-2)
assert system_level_metrics['24']['rouge-su4']['recall'] == pytest.approx(12.829, 1e-2)
assert system_level_metrics['18']['rouge-su4']['recall'] == pytest.approx(12.407, 1e-2)
assert system_level_metrics['34']['rouge-su4']['recall'] == pytest.approx(12.283, 1e-2)
# ROUGE/rougeSU4_A.jk.m.avg
assert system_level_metrics['H']['rouge-su4_jk']['recall'] == pytest.approx(16.294, 1e-2)
assert system_level_metrics['F']['rouge-su4_jk']['recall'] == pytest.approx(16.212, 1e-2)
assert system_level_metrics['D']['rouge-su4_jk']['recall'] == pytest.approx(16.200, 1e-2)
assert system_level_metrics['22']['rouge-su4_jk']['recall'] == pytest.approx(13.049, 1e-2)
assert system_level_metrics['23']['rouge-su4_jk']['recall'] == pytest.approx(12.978, 1e-2)
# manual/manual.model.A.avg
assert system_level_metrics['A']['num_scus_jk'] == pytest.approx(10.870, 1e-2)
assert system_level_metrics['B']['num_scus_jk'] == pytest.approx(11.087, 1e-2)
assert system_level_metrics['C']['num_scus_jk'] == pytest.approx(9.826, 1e-2)
assert system_level_metrics['A']['modified_pyramid_score_jk'] == pytest.approx(0.779, 1e-2)
assert system_level_metrics['B']['modified_pyramid_score_jk'] == pytest.approx(0.747, 1e-2)
assert system_level_metrics['C']['modified_pyramid_score_jk'] == pytest.approx(0.661, 1e-2)
assert system_level_metrics['A']['linguistic_quality'] == pytest.approx(4.913, 1e-2)
assert system_level_metrics['B']['linguistic_quality'] == pytest.approx(4.870, 1e-2)
assert system_level_metrics['C']['linguistic_quality'] == pytest.approx(4.826, 1e-2)
assert system_level_metrics['A']['overall_responsiveness'] == pytest.approx(4.783, 1e-2)
assert system_level_metrics['B']['overall_responsiveness'] == pytest.approx(4.696, 1e-2)
assert system_level_metrics['C']['overall_responsiveness'] == pytest.approx(4.565, 1e-2)
# manual/manual.peer.A.avg
assert system_level_metrics['1']['modified_pyramid_score'] == pytest.approx(0.233, 1e-2)
assert system_level_metrics['2']['modified_pyramid_score'] == pytest.approx(0.296, 1e-2)
assert system_level_metrics['3']['modified_pyramid_score'] == pytest.approx(0.399, 1e-2)
assert system_level_metrics['1']['num_scus'] == pytest.approx(3.304, 1e-2)
assert system_level_metrics['2']['num_scus'] == pytest.approx(4.217, 1e-2)
assert system_level_metrics['3']['num_scus'] == pytest.approx(5.500, 1e-2)
assert system_level_metrics['1']['num_repetitions'] == pytest.approx(0.522, 1e-2)
assert system_level_metrics['2']['num_repetitions'] == pytest.approx(1.217, 1e-2)
assert system_level_metrics['3']['num_repetitions'] == pytest.approx(1.413, 1e-2)
assert system_level_metrics['1']['modified_pyramid_score_jk'] == pytest.approx(0.229, 1e-2)
assert system_level_metrics['2']['modified_pyramid_score_jk'] == pytest.approx(0.291, 1e-2)
assert system_level_metrics['3']['modified_pyramid_score_jk'] == pytest.approx(0.393, 1e-2)
assert system_level_metrics['1']['linguistic_quality'] == pytest.approx(3.652, 1e-2)
assert system_level_metrics['2']['linguistic_quality'] == pytest.approx(2.717, 1e-2)
assert system_level_metrics['3']['linguistic_quality'] == pytest.approx(3.043, 1e-2)
assert system_level_metrics['1']['overall_responsiveness'] == pytest.approx(2.174, 1e-2)
assert system_level_metrics['2']['overall_responsiveness'] == pytest.approx(2.500, 1e-2)
assert system_level_metrics['3']['overall_responsiveness'] == pytest.approx(2.978, 1e-2)
# BE/simple_A.m.hm.avg
assert system_level_metrics['22']['rouge-be-hm']['recall'] == pytest.approx(5.937, 1e-2)
assert system_level_metrics['23']['rouge-be-hm']['recall'] == pytest.approx(5.809, 1e-2)
assert system_level_metrics['18']['rouge-be-hm']['recall'] == pytest.approx(5.749, 1e-2)
assert system_level_metrics['13']['rouge-be-hm']['recall'] == pytest.approx(5.553, 1e-2)
assert system_level_metrics['16']['rouge-be-hm']['recall'] == pytest.approx(5.497, 1e-2)
# BE/simplejk_A.m.hm.avg
assert system_level_metrics['F']['rouge-be-hm_jk']['recall'] == pytest.approx(9.114, 1e-2)
assert system_level_metrics['H']['rouge-be-hm_jk']['recall'] == pytest.approx(8.690, 1e-1)
assert system_level_metrics['D']['rouge-be-hm_jk']['recall'] == pytest.approx(8.449, 1e-1)
assert system_level_metrics['22']['rouge-be-hm_jk']['recall'] == pytest.approx(5.973, 1e-2)
assert system_level_metrics['23']['rouge-be-hm_jk']['recall'] == pytest.approx(5.828, 1e-2)
# aesop_allpeers_A
assert system_level_metrics['A']['aesop']['1'] == pytest.approx(0.09517478261, 1e-2)
assert system_level_metrics['C']['aesop']['8'] == pytest.approx(0.0, 1e-2)
assert system_level_metrics['4']['aesop']['13'] == pytest.approx(0.6150630435, 1e-2)
assert system_level_metrics['8']['aesop']['22'] == pytest.approx(0.3684913043, 1e-2)
assert system_level_metrics['16']['aesop']['27'] == pytest.approx(11.80434783, 1e-2)
@pytest.mark.skipif(not os.path.exists(_metrics_B_file_path), reason='TAC 2010-B metrics file does not exist')
def test_system_level_B(self):
summary_level_metrics = JsonlReader(_metrics_B_file_path, Metrics).read()
system_level_metrics = aggregate_metrics(summary_level_metrics)
# Check a few metrics to make sure they are equal to what's in the NIST files
# ROUGE/rouge2_B.m.avg
assert system_level_metrics['16']['rouge-2']['recall'] == pytest.approx(8.024, 1e-2)
assert system_level_metrics['13']['rouge-2']['recall'] == pytest.approx(7.913, 1e-2)
assert system_level_metrics['36']['rouge-2']['recall'] == pytest.approx(7.311, 1e-2)
assert system_level_metrics['8']['rouge-2']['recall'] == pytest.approx(7.251, 1e-2)
assert system_level_metrics['4']['rouge-2']['recall'] == pytest.approx(7.058, 1e-2)
# ROUGE/rouge2_B.jk.m.avg
assert system_level_metrics['D']['rouge-2_jk']['recall'] == pytest.approx(13.021, 1e-2)
assert system_level_metrics['E']['rouge-2_jk']['recall'] == pytest.approx(10.196, 1e-1)
assert system_level_metrics['F']['rouge-2_jk']['recall'] == pytest.approx(9.777, 1e-2)
assert system_level_metrics['16']['rouge-2_jk']['recall'] == pytest.approx(7.993, 1e-2)
assert system_level_metrics['13']['rouge-2_jk']['recall'] == pytest.approx(7.902, 1e-2)
# ROUGE/rougeSU4_B.m.avg
assert system_level_metrics['16']['rouge-su4']['recall'] == pytest.approx(12.006, 1e-2)
assert system_level_metrics['13']['rouge-su4']['recall'] == pytest.approx(11.878, 1e-2)
assert system_level_metrics['6']['rouge-su4']['recall'] == pytest.approx(11.198, 1e-2)
assert system_level_metrics['22']['rouge-su4']['recall'] == pytest.approx(11.107, 1e-2)
assert system_level_metrics['8']['rouge-su4']['recall'] == pytest.approx(11.039, 1e-2)
# ROUGE/rougeSU4_B.jk.m.avg
assert system_level_metrics['D']['rouge-su4_jk']['recall'] == pytest.approx(16.193, 1e-2)
assert system_level_metrics['E']['rouge-su4_jk']['recall'] == pytest.approx(13.978, 1e-2)
assert system_level_metrics['G']['rouge-su4_jk']['recall'] == pytest.approx(13.573, 1e-2)
assert system_level_metrics['16']['rouge-su4_jk']['recall'] == pytest.approx(11.979, 1e-2)
assert system_level_metrics['13']['rouge-su4_jk']['recall'] == pytest.approx(11.869, 1e-2)
# manual/manual.model.B.avg
assert system_level_metrics['A']['num_scus_jk'] == pytest.approx(6.609, 1e-2)
assert system_level_metrics['B']['num_scus_jk'] == pytest.approx(7.696, 1e-2)
assert system_level_metrics['C']['num_scus_jk'] == pytest.approx(5.913, 1e-2)
assert system_level_metrics['A']['modified_pyramid_score_jk'] == pytest.approx(0.629, 1e-2)
assert system_level_metrics['B']['modified_pyramid_score_jk'] == pytest.approx(0.729, 1e-2)
assert system_level_metrics['C']['modified_pyramid_score_jk'] == pytest.approx(0.551, 1e-2)
assert system_level_metrics['A']['linguistic_quality'] == pytest.approx(4.913, 1e-2)
assert system_level_metrics['B']['linguistic_quality'] == pytest.approx(4.826, 1e-2)
assert system_level_metrics['C']['linguistic_quality'] == pytest.approx(4.870, 1e-2)
assert system_level_metrics['A']['overall_responsiveness'] == pytest.approx(4.783, 1e-2)
assert system_level_metrics['B']['overall_responsiveness'] == pytest.approx(4.783, 1e-2)
assert system_level_metrics['C']['overall_responsiveness'] == pytest.approx(4.826, 1e-2)
# manual/manual.peer.B.avg
assert system_level_metrics['1']['modified_pyramid_score'] == pytest.approx(0.187, 1e-2)
assert system_level_metrics['2']['modified_pyramid_score'] == pytest.approx(0.262, 1e-2)
assert system_level_metrics['3']['modified_pyramid_score'] == pytest.approx(0.235, 1e-2)
assert system_level_metrics['1']['num_scus'] == pytest.approx(2.065, 1e-2)
assert system_level_metrics['2']['num_scus'] == pytest.approx(2.804, 1e-2)
assert system_level_metrics['3']['num_scus'] == pytest.approx(2.609, 1e-2)
assert system_level_metrics['1']['num_repetitions'] == pytest.approx(0.348, 1e-2)
assert system_level_metrics['2']['num_repetitions'] == pytest.approx(0.522, 1e-2)
assert system_level_metrics['3']['num_repetitions'] == pytest.approx(0.348, 1e-2)
assert system_level_metrics['1']['modified_pyramid_score_jk'] == pytest.approx(0.184, 1e-2)
assert system_level_metrics['2']['modified_pyramid_score_jk'] == pytest.approx(0.256, 1e-2)
assert system_level_metrics['3']['modified_pyramid_score_jk'] == pytest.approx(0.228, 1e-2)
assert system_level_metrics['1']['linguistic_quality'] == pytest.approx(3.739, 1e-2)
assert system_level_metrics['2']['linguistic_quality'] == pytest.approx(2.696, 1e-2)
assert system_level_metrics['3']['linguistic_quality'] == pytest.approx(2.957, 1e-2)
assert system_level_metrics['1']['overall_responsiveness'] == pytest.approx(2.022, 1e-2)
assert system_level_metrics['2']['overall_responsiveness'] == pytest.approx(2.478, 1e-2)
assert system_level_metrics['3']['overall_responsiveness'] == pytest.approx(2.217, 1e-2)
# BE/simple_B.m.hm.avg
assert system_level_metrics['16']['rouge-be-hm']['recall'] == pytest.approx(4.445, 1e-2)
assert system_level_metrics['13']['rouge-be-hm']['recall'] == pytest.approx(4.417, 1e-2)
assert system_level_metrics['8']['rouge-be-hm']['recall'] == pytest.approx(4.350, 1e-1)
assert system_level_metrics['4']['rouge-be-hm']['recall'] == pytest.approx(4.115, 1e-2)
assert system_level_metrics['22']['rouge-be-hm']['recall'] == pytest.approx(4.050, 1e-2)
# BE/simplejk_B.m.hm.avg
assert system_level_metrics['D']['rouge-be-hm_jk']['recall'] == pytest.approx(8.842, 1e-2)
assert system_level_metrics['F']['rouge-be-hm_jk']['recall'] == pytest.approx(7.842, 1e-1)
assert system_level_metrics['B']['rouge-be-hm_jk']['recall'] == pytest.approx(7.081, 1e-1)
assert system_level_metrics['16']['rouge-be-hm_jk']['recall'] == pytest.approx(4.411, 1e-2)
assert system_level_metrics['13']['rouge-be-hm_jk']['recall'] == pytest.approx(4.402, 1e-2)
# aesop_allpeers_B
assert system_level_metrics['B']['aesop']['2'] == pytest.approx(0.1358091304, 1e-2)
assert system_level_metrics['E']['aesop']['4'] == pytest.approx(0.1376682609, 1e-2)
assert system_level_metrics['6']['aesop']['7'] == pytest.approx(0.2641304348, 1e-2)
assert system_level_metrics['9']['aesop']['20'] == pytest.approx(0.09438347826, 1e-2)
assert system_level_metrics['14']['aesop']['22'] == pytest.approx(0.3394478261, 1e-2)
|
400010
|
import pytest
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.util import set_rng_seed
from tests.common import assert_equal
EXAMPLE_MODELS = []
EXAMPLE_MODEL_IDS = []
class ExampleModel(object):
def __init__(self, fn, poutine_kwargs):
self.fn = fn
self.poutine_kwargs = poutine_kwargs
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def bind_poutine(self, poutine_name):
"""
Bind model-specific kwargs to the poutine.
"""
p = getattr(poutine, poutine_name)
kwargs = self.poutine_kwargs.get(poutine_name, {})
return lambda fn: p(fn, **kwargs)
def register_model(**poutine_kwargs):
"""
Decorator to register a model as an example model for testing.
"""
def register_fn(fn):
model = ExampleModel(fn, poutine_kwargs)
EXAMPLE_MODELS.append(model)
EXAMPLE_MODEL_IDS.append(model.fn.__name__)
return model
return register_fn
@register_model(replay={'trace': poutine.Trace()},
block={},
condition={'data': {}},
do={'data': {}})
def trivial_model():
return []
tr_normal = poutine.Trace()
tr_normal.add_node("normal_0", type="sample", is_observed=False, value=torch.zeros(1), infer={})
@register_model(replay={'trace': tr_normal},
block={'hide': ['normal_0']},
condition={'data': {'normal_0': torch.zeros(1)}},
do={'data': {'normal_0': torch.zeros(1)}})
def normal_model():
normal_0 = pyro.sample('normal_0', dist.Normal(torch.zeros(1), torch.ones(1)))
return [normal_0]
tr_normal_normal = poutine.Trace()
tr_normal_normal.add_node("normal_0", type="sample", is_observed=False, value=torch.zeros(1), infer={})
@register_model(replay={'trace': tr_normal_normal},
block={'hide': ['normal_0']},
condition={'data': {'normal_0': torch.zeros(1)}},
do={'data': {'normal_0': torch.zeros(1)}})
def normal_normal_model():
normal_0 = pyro.sample('normal_0', dist.Normal(torch.zeros(1), torch.ones(1)))
normal_1 = torch.ones(1)
pyro.sample('normal_1', dist.Normal(normal_0, torch.ones(1)),
obs=normal_1)
return [normal_0, normal_1]
tr_bernoulli_normal = poutine.Trace()
tr_bernoulli_normal.add_node("bern_0", type="sample", is_observed=False, value=torch.ones(1), infer={})
@register_model(replay={'trace': tr_bernoulli_normal},
block={'hide': ['bern_0']},
condition={'data': {'bern_0': torch.ones(1)}},
do={'data': {'bern_0': torch.ones(1)}})
def bernoulli_normal_model():
bern_0 = pyro.sample('bern_0', dist.Bernoulli(torch.zeros(1) * 1e-2))
loc = torch.ones(1) if bern_0.item() else -torch.ones(1)
normal_0 = torch.ones(1)
pyro.sample('normal_0', dist.Normal(loc, torch.ones(1) * 1e-2),
obs=normal_0)
return [bern_0, normal_0]
def get_trace(fn, *args, **kwargs):
set_rng_seed(123)
return poutine.trace(fn).get_trace(*args, **kwargs)
@pytest.mark.parametrize('model', EXAMPLE_MODELS, ids=EXAMPLE_MODEL_IDS)
@pytest.mark.parametrize('poutine_name', [
'block',
'do',
'replay',
'trace',
])
def test_idempotent(poutine_name, model):
p = model.bind_poutine(poutine_name)
expected_trace = get_trace(p(model))
actual_trace = get_trace(p(p(model)))
assert_equal(actual_trace, expected_trace, prec=0)
@pytest.mark.parametrize('model', EXAMPLE_MODELS, ids=EXAMPLE_MODEL_IDS)
@pytest.mark.parametrize('p1_name,p2_name', [
('trace', 'condition'),
('trace', 'do'),
('trace', 'replay'),
])
def test_commutes(p1_name, p2_name, model):
p1 = model.bind_poutine(p1_name)
p2 = model.bind_poutine(p2_name)
expected_trace = get_trace(p1(p2(model)))
actual_trace = get_trace(p2(p1(model)))
assert_equal(actual_trace, expected_trace, prec=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.