hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
52f0f18f17af4d2c54c0498397db375726154423 | 3,485 | py | Python | mexmi/subset_selection_strategy/adversarial_deepfool_sss.py | mexmi2021/mexmi-project | ef735cb290d33b326f592a70fa9b7f7dc6b6281b | [
"MIT"
] | null | null | null | mexmi/subset_selection_strategy/adversarial_deepfool_sss.py | mexmi2021/mexmi-project | ef735cb290d33b326f592a70fa9b7f7dc6b6281b | [
"MIT"
] | null | null | null | mexmi/subset_selection_strategy/adversarial_deepfool_sss.py | mexmi2021/mexmi-project | ef735cb290d33b326f592a70fa9b7f7dc6b6281b | [
"MIT"
] | null | null | null | """
"""
from torch.autograd import Variable
from tqdm import tqdm
from base_sss import SubsetSelectionStrategy
import base_sss
import random
import numpy as np
import torch
import torch.nn.functional as F
class AdversarialDeepFoolStrategy(SubsetSelectionStrategy):
def __init__(self, size, X, Y_vec, copy_model, max_iter=50, previous_s=None):
self.max_iter = max_iter
self.X = X
self.copy_model = copy_model.get_model() #this means the copy model
self.previous_s = previous_s
super(AdversarialDeepFoolStrategy, self).__init__(size, Y_vec)
def cal_dis(self, x, y):
x = torch.tensor(x)
nx = torch.unsqueeze(x, 0).to('cuda')
# print("x.shape", x.shape)
# nx = Variable(nx, requires_grad=True)
nx.requires_grad_()
eta = torch.zeros(nx.shape).to('cuda')
out = self.copy_model((nx+eta))#torch.tensor(y)# here we assume it is numpy#= self.copy_model(nx+eta) #e1
if isinstance(out, tuple):
out = out[0]
out = F.softmax(out, dim=1)
# out = Variable(out, requires_grad=True)
n_class = 10#out.shape[1]
ny = py = np.argmax(out[0].cpu().detach().numpy()) #positive y and negtive y
# print("ny,py:", ny, ",", py)
# py = out.max(1)[1].item()
# ny = out.max(1)[1].item()
i_iter = 0
while py == ny and i_iter < self.max_iter:
out[0, py].backward(retain_graph=True)
# print("nx.grad", nx.grad)
grad_np = nx.grad.data.clone()
value_l = np.inf
ri = None
for i in range(n_class):
if i == py:
continue
nx.grad.data.zero_()
out[0, i].backward(retain_graph=True)
grad_i = nx.grad.data.clone()
wi = grad_i - grad_np
fi = out[0, i] - out[0, py]
value_i = np.abs(fi.cpu().item()) / np.linalg.norm(wi.cpu().numpy().flatten())
if value_i <= value_l:
ri = value_i/np.linalg.norm(wi.cpu().numpy().flatten()) * wi
eta += ri.clone()
nx.grad.data.zero_()
query_inp = (nx+eta).to('cuda')
out = self.copy_model(query_inp) #, e1
if isinstance(out, tuple):
out = out[0]
out = F.softmax(out, dim=1)
# print("out.shape,", out.shape)
out.squeeze()
py = np.argmax(out[0].cpu().detach().numpy())
i_iter += 1
return (eta*eta).sum()
def get_subset(self):
# random.setstate(base_sss.sss_random_state)
if self.previous_s is not None:
Y_e = [self.Y_vec[int(ie)] for ie in self.previous_s]#index
X = [self.X[int(ie)] for ie in self.previous_s]
self.X = X
else:
Y_e = self.Y_vec
dis = np.zeros(len(Y_e)) #deep fool distances
with tqdm(total = len(self.X)) as bar:
for i in range(len(self.X)):
# print('adv{}/{}'.format(i, len(Y_e)))
x = self.X[i]
y = self.Y_vec[i]
dis[i] = self.cal_dis(x, y) # x is the input label
bar.update(1)
s = dis.argsort()[:self.size]
if self.previous_s is not None:
s_final = [self.previous_s[int(si)] for si in s] #index
else:
s_final = s #Index
return s_final #index | 34.166667 | 113 | 0.53056 |
ac2fe4ced216d52fe0160e5b27409be90a2d553f | 808 | py | Python | py/py_0643_2-friendly.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0643_2-friendly.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0643_2-friendly.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 643: 2-Friendly
# https://projecteuler.net/problem=643
#
# Two positive integers $a$ and $b$ are 2-friendly when $\gcd(a,b) = 2^t,
# t>0$. For example, 24 and 40 are 2-friendly because $\gcd(24,40) = 8 = 2^3$
# while 24 and 36 are not because $\gcd(24,36) = 12 = 2^2\cdot 3$ not a power
# of 2. Let $f(n)$ be the number of pairs, $(p,q)$, of positive integers with
# $1\le p\lt q\le n$ such that $p$ and $q$ are 2-friendly. You are given
# $f(10^2) = 1031$ and $f(10^6) = 321418433$ modulo $1\,000\,000\,007$. Find
# $f(10^{11})$ modulo $1\,000\,000\,007$.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 643
timed.caller(dummy, n, i, prob_id)
| 31.076923 | 78 | 0.623762 |
ab6eb5cdf71a138c66334e60c9a87a321d0b738f | 8,584 | py | Python | examples/torch/hiera_rl/meta_kant_half_cheetach_vel.py | fangqyi/garage | ddafba385ef005f46f913ab352f9638760e5b412 | [
"MIT"
] | 1 | 2021-03-02T08:43:20.000Z | 2021-03-02T08:43:20.000Z | examples/torch/hiera_rl/meta_kant_half_cheetach_vel.py | fangqyi/garage | ddafba385ef005f46f913ab352f9638760e5b412 | [
"MIT"
] | null | null | null | examples/torch/hiera_rl/meta_kant_half_cheetach_vel.py | fangqyi/garage | ddafba385ef005f46f913ab352f9638760e5b412 | [
"MIT"
] | null | null | null | import faulthandler
import os
faulthandler.enable()
import click
import joblib
import numpy as np
from torch.nn import functional
import garage.torch.utils as tu
from garage import wrap_experiment
from garage.envs import GarageEnv, DiaynEnvWrapper
from garage.envs import normalize
from garage.envs.mujoco import HalfCheetahVelEnv
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import EnvPoolSampler, SetTaskSampler
from garage.sampler.local_skill_sampler import LocalSkillSampler
from garage.torch.algos.meta_kant import MetaKant, KantWorker
from garage.torch.modules.categorical_mlp import CategoricalMLPPolicy
from garage.torch.policies.context_conditioned_controller_policy import \
OpenContextConditionedControllerPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
seed = np.random.randint(0, 1000)
skills_num = 10
load_dir = os.path.join(os.getcwd(), 'data/local/experiment/diayn_half_cheetah_vel_batch_for_pearl_3')
itr = 900
load_from_file = os.path.join(load_dir, 'itr_{}.pkl'.format(itr))
file = open(load_from_file, 'rb')
saved = joblib.load(file)
file.close()
skill_env = saved['env']
diayn = saved['algo']
skill_actor = diayn.networks[0] # _policy
task_proposer = diayn.networks[1] # _discriminator
param_num_epoches = 500
param_train_tasks_num = skills_num # 100
param_test_tasks_num = 5 # skills_num / 2 # 30
param_encoder_hidden_size = 200
param_net_size = 300
param_num_steps_per_epoch = 1000
param_num_initial_steps = 1000
param_num_skills_reason_steps = 1000
param_num_steps_prior = 400
param_num_extra_rl_steps_posterior = 600
param_num_skills_sample = 10
param_batch_size = 256
param_embedding_batch_size = 100
param_embedding_mini_batch_size = 100
param_max_path_length = 300
param_latent_size = 5
param_num_tasks_sample = 5
param_meta_batch_size = 16
param_skills_reason_reward_scale = 1
param_tasks_adapt_reward_scale = 1.2
param_use_gpu = True
@click.command()
@click.option('--num_epochs', default=param_num_epoches)
@click.option('--num_train_tasks', default=param_train_tasks_num)
@click.option('--num_test_tasks', default=param_test_tasks_num)
@click.option('--encoder_hidden_size', default=param_encoder_hidden_size)
@click.option('--net_size', default=param_net_size)
@click.option('--num_steps_per_epoch', default=param_num_steps_per_epoch)
@click.option('--num_initial_steps', default=param_num_initial_steps)
@click.option('--num_steps_prior', default=param_num_steps_prior)
@click.option('--num_extra_rl_steps_posterior',
default=param_num_extra_rl_steps_posterior)
@click.option('--num_skills_sample', default=param_num_skills_sample)
@click.option('--num_skills_reason_steps',
default=param_num_skills_reason_steps)
@click.option('--batch_size', default=param_batch_size)
@click.option('--embedding_batch_size', default=param_embedding_batch_size)
@click.option('--embedding_mini_batch_size',
default=param_embedding_mini_batch_size)
@click.option('--max_path_length', default=param_max_path_length)
@wrap_experiment(snapshot_mode='gap and last', snapshot_gap=100)
def meta_kant_cheetah_vel(ctxt=None,
seed=seed,
num_skills=skills_num,
num_epochs=param_num_epoches,
num_train_tasks=param_train_tasks_num,
num_test_tasks=param_test_tasks_num,
is_encoder_recurrent=False,
latent_size=param_latent_size,
encoder_hidden_size=param_encoder_hidden_size,
net_size=param_net_size,
meta_batch_size=param_meta_batch_size,
num_steps_per_epoch=param_num_steps_per_epoch,
num_initial_steps=param_num_initial_steps,
num_tasks_sample=param_num_tasks_sample,
num_steps_prior=param_num_steps_prior,
num_extra_rl_steps_posterior=param_num_extra_rl_steps_posterior,
num_skills_sample=param_num_skills_sample,
num_skills_reason_steps=param_num_skills_reason_steps,
batch_size=param_batch_size,
embedding_batch_size=param_embedding_batch_size,
embedding_mini_batch_size=param_embedding_mini_batch_size,
max_path_length=param_max_path_length,
skills_reason_reward_scale=param_skills_reason_reward_scale,
tasks_adapt_reward_scale=param_tasks_adapt_reward_scale,
use_gpu=param_use_gpu):
assert num_train_tasks is skills_num
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
ML_train_envs = [DiaynEnvWrapper(task_proposer, skills_num, task_name,
normalize(HalfCheetahVelEnv()))
for task_name in range(skills_num)]
env_sampler = EnvPoolSampler(ML_train_envs)
env = env_sampler.sample(num_train_tasks)
test_env_sampler = SetTaskSampler(lambda: GarageEnv(normalize(
HalfCheetahVelEnv())))
runner = LocalRunner(ctxt)
qf_env = MetaKant.get_env_spec(env[0](), latent_size, num_skills, "qf")
qf = ContinuousMLPQFunction(env_spec=qf_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = MetaKant.get_env_spec(env[0](), latent_size, num_skills, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
controller_policy_env = MetaKant.get_env_spec(env[0](), latent_size,
module="controller_policy",
num_skills=num_skills)
controller_policy = CategoricalMLPPolicy(env_spec=controller_policy_env,
hidden_sizes=[net_size, net_size],
hidden_nonlinearity=functional.relu)
metakant = MetaKant(
env=env,
skill_env=skill_env,
controller_policy=controller_policy,
skill_actor=skill_actor,
qf=qf,
vf=vf,
num_skills=num_skills,
num_train_tasks=num_train_tasks,
num_test_tasks=num_test_tasks,
sampler_class=LocalSkillSampler,
is_encoder_recurrent=is_encoder_recurrent,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_per_epoch=num_steps_per_epoch,
num_steps_prior=num_steps_prior, # num_steps_posterior
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
num_skills_reason_steps=num_skills_reason_steps,
num_skills_sample=num_skills_sample,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
max_path_length=max_path_length,
skills_reason_reward_scale=skills_reason_reward_scale,
tasks_adapt_reward_scale=tasks_adapt_reward_scale
)
tu.set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
metakant.to()
worker_args = dict(num_skills=num_skills,
skill_actor_class=type(skill_actor),
controller_class=OpenContextConditionedControllerPolicy,
deterministic=False, accum_context=True)
runner.setup(algo=metakant,
env=env[0](),
sampler_cls=LocalSkillSampler,
sampler_args=dict(max_path_length=max_path_length),
n_workers=1,
worker_class=KantWorker,
worker_args=worker_args
)
average_returns = runner.train(n_epochs=num_epochs, batch_size=batch_size)
runner.save(num_epochs - 1)
return average_returns
metakant_returns = meta_kant_cheetah_vel()
def save_list_to_file(x, filename):
with open(filename, 'w') as f:
for item in x:
f.write("%s\n" % item)
if not os.path.exists('tmp'):
os.makedirs('tmp')
save_list_to_file(metakant_returns, "tmp/metakant_half_cheetah_returns.txt")
| 40.682464 | 102 | 0.695946 |
1397cc247308490d4eab9239de0169c3a567681e | 11,053 | py | Python | src/pyams_scheduler/process.py | Py-AMS/pyams-scheduler | 58333798a43ca2f8af33cae4ec8f80c6c9a7ce9f | [
"ZPL-2.1"
] | null | null | null | src/pyams_scheduler/process.py | Py-AMS/pyams-scheduler | 58333798a43ca2f8af33cae4ec8f80c6c9a7ce9f | [
"ZPL-2.1"
] | null | null | null | src/pyams_scheduler/process.py | Py-AMS/pyams-scheduler | 58333798a43ca2f8af33cae4ec8f80c6c9a7ce9f | [
"ZPL-2.1"
] | null | null | null | #
# Copyright (c) 2015-2021 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_scheduler.process module
This module defines the main scheduler base classes, to handle scheduler process and main
tasks management threads.
"""
__docformat__ = 'restructuredtext'
import logging
from datetime import datetime
from threading import Thread
from apscheduler.jobstores.memory import MemoryJobStore
from apscheduler.schedulers.background import BackgroundScheduler
from pyramid.config import Configurator
from pyramid.threadlocal import manager
from zope.interface import implementer
from pyams_scheduler.interfaces import ISchedulerProcess, ITask, SCHEDULER_NAME
from pyams_scheduler.trigger import ImmediateTaskTrigger
from pyams_site.interfaces import PYAMS_APPLICATION_DEFAULT_NAME, PYAMS_APPLICATION_SETTINGS_KEY
from pyams_utils.zodb import ZODBConnection
from pyams_zmq.handler import ZMQMessageHandler
from pyams_zmq.process import ZMQProcess
LOGGER = logging.getLogger('PyAMS (scheduler)')
class BaseTaskThread(Thread):
"""Base task management thread class"""
def __init__(self, process, settings):
Thread.__init__(self)
self.process = process
if ITask.providedBy(settings):
scheduler = settings.__parent__
self.settings = {
'zodb_name': scheduler.zodb_name,
'task_name': settings.__name__,
'job_id': settings.internal_id
}
else:
self.settings = settings
def run(self):
"""Thread start method
Subclasses may override this method, but must call this super()
method to correctly initialize ZCA hook in the thread.
"""
registry = self.process.registry
manager.push({'registry': registry, 'request': None})
config = Configurator(registry=registry)
config.hook_zca()
def _get_connection(self):
"""ZODB connection getter"""
return ZODBConnection(name=self.settings.get('zodb_name'))
class TaskResettingThread(BaseTaskThread):
"""Task resetting thread
Task reset is run in another thread, so that:
- other transactions applied on updated tasks are visible
- ØMQ request returns immediately to calling process
"""
def run(self):
super().run()
LOGGER.debug("Starting task resetting thread...")
settings = self.settings
job_id = settings.get('job_id')
if job_id is None:
return
job_id = str(job_id)
LOGGER.debug("Loading ZODB connection...")
with self._get_connection() as root:
LOGGER.debug("Loaded ZODB root {0!r}".format(root))
try:
registry = self.process.registry
application_name = registry.settings.get(PYAMS_APPLICATION_SETTINGS_KEY,
PYAMS_APPLICATION_DEFAULT_NAME)
application = root.get(application_name)
LOGGER.debug("Loaded application {0!r}".format(application))
sm = application.getSiteManager() # pylint: disable=invalid-name
scheduler_util = sm.get(SCHEDULER_NAME)
LOGGER.debug("Loaded scheduler utility {0!r}".format(scheduler_util))
scheduler = self.process.scheduler
LOGGER.debug("Removing job '{0}'".format(job_id))
job = scheduler.get_job(job_id)
if job is not None:
LOGGER.debug("Loaded job {0!r} ({0.id!r})".format(job))
scheduler.remove_job(job.id)
LOGGER.debug("Loading scheduler task '{0}'".format(
settings.get('task_name').lower()))
task = scheduler_util.get(settings.get('task_name').lower())
LOGGER.debug("Loaded scheduler task {0!r}".format(task))
if (task is not None) and task.is_runnable():
trigger = task.get_trigger()
LOGGER.debug("Getting task trigger {0!r}".format(trigger))
LOGGER.debug("Adding new job to scheduler {0!r}".format(scheduler))
scheduler.add_job(task, trigger,
id=str(task.internal_id),
name=task.name,
kwargs={
'zodb_name': scheduler_util.zodb_name,
'registry': registry
})
LOGGER.debug("Added job")
except: # pylint: disable=bare-except
LOGGER.exception("An exception occurred:")
class TaskRemoverThread(BaseTaskThread):
"""Task remover thread"""
def run(self):
super().run()
LOGGER.debug("Starting task remover thread...")
settings = self.settings
job_id = settings.get('job_id')
if job_id is None:
return
job_id = str(job_id)
LOGGER.debug("Loading ZODB connection...")
with self._get_connection() as root:
LOGGER.debug("Loaded ZODB root {0!r}".format(root))
try:
registry = self.process.registry
application_name = registry.settings.get(PYAMS_APPLICATION_SETTINGS_KEY,
PYAMS_APPLICATION_DEFAULT_NAME)
application = root.get(application_name)
LOGGER.debug("Loaded application {0!r}".format(application))
sm = application.getSiteManager() # pylint: disable=invalid-name
scheduler_util = sm.get(SCHEDULER_NAME)
LOGGER.debug("Loaded scheduler utility {0!r}".format(scheduler_util))
scheduler = self.process.scheduler
LOGGER.debug("Removing job '{0}'".format(job_id))
job = scheduler.get_job(job_id)
if job is not None:
LOGGER.debug("Loaded job {0!r} ({0.id!r})".format(job))
scheduler.remove_job(job.id)
LOGGER.debug("Removed job")
except: # pylint: disable=bare-except
LOGGER.exception("An exception occurred:")
class TaskRunnerThread(BaseTaskThread):
"""Task immediate runner thread"""
def run(self):
super().run()
LOGGER.debug("Starting task runner thread...")
settings = self.settings
job_id = settings.get('job_id')
if job_id is None:
return
LOGGER.debug("Loading ZODB connection...")
with self._get_connection() as root:
LOGGER.debug("Loaded ZODB root {0!r}".format(root))
try:
registry = self.process.registry
application_name = registry.settings.get(PYAMS_APPLICATION_SETTINGS_KEY,
PYAMS_APPLICATION_DEFAULT_NAME)
application = root.get(application_name)
LOGGER.debug("Loaded application {0!r}".format(application))
sm = application.getSiteManager() # pylint: disable=invalid-name
scheduler_util = sm.get(SCHEDULER_NAME)
LOGGER.debug("Loaded scheduler utility {0!r}".format(scheduler_util))
scheduler = self.process.scheduler
LOGGER.debug("Loading scheduler task '{0}'".format(
settings.get('task_name').lower()))
task = scheduler_util.get(settings.get('task_name').lower())
LOGGER.debug("Loaded scheduler task {0!r}".format(task))
if task is not None:
trigger = ImmediateTaskTrigger()
LOGGER.debug("Getting task trigger {0!r}".format(trigger))
LOGGER.debug("Adding new job to scheduler {0!r}".format(scheduler))
scheduler.add_job(task, trigger,
id='{0.internal_id}::{1}'.format(
task, datetime.utcnow().isoformat()),
name=task.name,
kwargs={
'zodb_name': scheduler_util.zodb_name,
'registry': self.process.registry,
'run_immediate': True
})
LOGGER.debug("Added job")
except: # pylint: disable=bare-except
LOGGER.exception("An exception occurred:")
class SchedulerHandler:
"""Scheduler handler"""
process = None
def test(self, settings): # pylint: disable=unused-argument
"""Scheduler handler test"""
messages = [
'OK - Tasks scheduler ready to handle requests.',
'{0} currently running jobs'.format(len(self.process.scheduler.get_jobs()))
]
return [200, '\n'.join(messages)]
def get_jobs(self, settings): # pylint: disable=unused-argument
"""Getter of currently running jobs"""
scheduler = self.process.scheduler
return [200, [{
'id': job.id,
'name': job.name,
'trigger': '{0!s}'.format(job.trigger),
'next_run': job.next_run_time.timestamp()
} for job in scheduler.get_jobs()]]
def reset_task(self, settings):
"""Reset task with given properties"""
TaskResettingThread(self.process, settings).start()
return [200, 'OK']
def remove_task(self, settings):
"""Remove task with given properties"""
TaskRemoverThread(self.process, settings).start()
return [200, 'OK']
def run_task(self, settings):
"""Run task with given properties"""
TaskRunnerThread(self.process, settings).start()
return [200, 'OK']
class SchedulerMessageHandler(ZMQMessageHandler):
"""ØMQ scheduler messages handler"""
handler = SchedulerHandler
@implementer(ISchedulerProcess)
class SchedulerProcess(ZMQProcess):
"""ØMQ tasks scheduler process"""
def __init__(self, zmq_address, handler, auth, clients, registry):
# pylint: disable=too-many-arguments
super().__init__(zmq_address, handler, auth, clients, registry)
self.scheduler = BackgroundScheduler()
self.jobstore = MemoryJobStore()
def run(self):
if self.scheduler is not None:
self.scheduler.add_jobstore(self.jobstore, 'default')
self.scheduler.start()
ZMQProcess.run(self)
| 41.397004 | 96 | 0.589252 |
e432d15a93139a0c4ad10049c8e110ad32813780 | 4,220 | py | Python | openpeerpower/components/isy994/fan.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | openpeerpower/components/isy994/fan.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | openpeerpower/components/isy994/fan.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """Support for ISY994 fans."""
from __future__ import annotations
import math
from pyisy.constants import ISY_VALUE_UNKNOWN, PROTO_INSTEON
from openpeerpower.components.fan import DOMAIN as FAN, SUPPORT_SET_SPEED, FanEntity
from openpeerpower.config_entries import ConfigEntry
from openpeerpower.core import OpenPeerPower
from openpeerpower.helpers.entity_platform import AddEntitiesCallback
from openpeerpower.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from .const import _LOGGER, DOMAIN as ISY994_DOMAIN, ISY994_NODES, ISY994_PROGRAMS
from .entity import ISYNodeEntity, ISYProgramEntity
from .helpers import migrate_old_unique_ids
SPEED_RANGE = (1, 255) # off is not included
async def async_setup_entry(
opp: OpenPeerPower,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> bool:
"""Set up the ISY994 fan platform."""
opp_isy_data = opp.data[ISY994_DOMAIN][entry.entry_id]
devices = []
for node in opp_isy_data[ISY994_NODES][FAN]:
devices.append(ISYFanEntity(node))
for name, status, actions in opp_isy_data[ISY994_PROGRAMS][FAN]:
devices.append(ISYFanProgramEntity(name, status, actions))
await migrate_old_unique_ids(opp, FAN, devices)
async_add_entities(devices)
class ISYFanEntity(ISYNodeEntity, FanEntity):
"""Representation of an ISY994 fan device."""
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return ranged_value_to_percentage(SPEED_RANGE, self._node.status)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
if self._node.protocol == PROTO_INSTEON:
return 3
return int_states_in_range(SPEED_RANGE)
@property
def is_on(self) -> bool:
"""Get if the fan is on."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return self._node.status != 0
async def async_set_percentage(self, percentage: int) -> None:
"""Set node to speed percentage for the ISY994 fan device."""
if percentage == 0:
await self._node.turn_off()
return
isy_speed = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
await self._node.turn_on(val=isy_speed)
async def async_turn_on(
self,
speed: str = None,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
"""Send the turn on command to the ISY994 fan device."""
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 fan device."""
await self._node.turn_off()
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
class ISYFanProgramEntity(ISYProgramEntity, FanEntity):
"""Representation of an ISY994 fan program."""
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return ranged_value_to_percentage(SPEED_RANGE, self._node.status)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return int_states_in_range(SPEED_RANGE)
@property
def is_on(self) -> bool:
"""Get if the fan is on."""
return self._node.status != 0
async def async_turn_off(self, **kwargs) -> None:
"""Send the turn on command to ISY994 fan program."""
if not await self._actions.run_then():
_LOGGER.error("Unable to turn off the fan")
async def async_turn_on(
self,
speed: str = None,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
"""Send the turn off command to ISY994 fan program."""
if not await self._actions.run_else():
_LOGGER.error("Unable to turn on the fan")
| 31.729323 | 84 | 0.669431 |
d033befd38017816c3af474ab2709b10602c4b44 | 8,059 | py | Python | skfuzzy/control/fuzzyvariable.py | adaj/scikit-fuzzy | ae70db3a381a2b75d0c8e765736f83cf5e5c3ec5 | [
"BSD-3-Clause"
] | null | null | null | skfuzzy/control/fuzzyvariable.py | adaj/scikit-fuzzy | ae70db3a381a2b75d0c8e765736f83cf5e5c3ec5 | [
"BSD-3-Clause"
] | null | null | null | skfuzzy/control/fuzzyvariable.py | adaj/scikit-fuzzy | ae70db3a381a2b75d0c8e765736f83cf5e5c3ec5 | [
"BSD-3-Clause"
] | null | null | null | """
fuzzyvariable.py : Contains the base fuzzy variable class, FuzzyVariable.
"""
from collections import OrderedDict
import numpy as np
from .term import Term
from .visualization import FuzzyVariableVisualizer
from ..membership import trimf, smf, zmf
class FuzzyVariable(object):
"""
Base class containing universe variable & associated membership functions.
Parameters
----------
universe : array-like
Universe variable. Must be 1-dimensional and convertible to a NumPy
array. Required.
label : string
Name of the universe variable. Optional.
defuzzify_method : string
name of method used for defuzzification, defaults to 'centroid'
Methods
-------
Notes
-----
This class is designed as the base class underlying the Antecedent and
Consequent classes, not for individual use.
"""
def __init__(self, universe, label, defuzzify_method='centroid'):
"""
Initialization of fuzzy variable
Parameters
----------
universe : array-like
Universe variable. Must be 1-dimensional and convertible to a NumPy
array.
label : string
Unique name of the universe variable, e.g., 'food' or 'velocity'.
"""
self.universe = np.asarray(universe)
self.label = label
self.defuzzify_method = defuzzify_method
self.terms = OrderedDict()
self._id = id(self)
def __repr__(self):
return "{0}: {1}".format(self.__name__, self.label)
def __len__(self):
return self.universe.size
def __getitem__(self, key):
"""
Calling `variable['label']` will return the 'label' term
"""
if key in self.terms.keys():
return self.terms[key]
else:
# Build a pretty list of available mf labels and raise an
# informative error message
options = ''
i0 = len(self.terms) - 1
i1 = len(self.terms) - 2
for i, available_key in enumerate(self.terms.keys()):
if i == i1:
options += "'" + str(available_key) + "', or "
elif i == i0:
options += "'" + str(available_key) + "'."
else:
options += "'" + str(available_key) + "'; "
raise ValueError("Membership function '{0}' does not exist for "
"{1} {2}.\n"
"Available options: {3}".format(
key, self.__name__, self.label, options))
def __setitem__(self, key, item):
"""
Enable terms to be added with the syntax::
variable['new_label'] = new_mf
"""
if isinstance(item, Term):
if item.label != key:
raise ValueError("Term's label must match new key")
if item.parent is not None:
raise ValueError("Term must not already have a parent")
else:
# Try to create a term from item, assuming it is a membership
# function
item = Term(key, np.asarray(item))
mf = item.mf
if mf.size != self.universe.size:
raise ValueError("New membership function {0} must be equivalent "
"in length to the universe variable.\n"
"Expected {1}, got {2}.".format(
key, self.universe.size, mf.size))
if (mf.max() > 1. + 1e-6) or (mf.min() < 0 - 1e-6):
raise ValueError("Membership function {0} contains values out of "
"range. Allowed range is [0, 1].".format(key))
# If above pass, add the new membership function
item.parent = self
self.terms[key] = item
def __contains__(self, item):
return item in self.terms
def __iter__(self):
return iter(self.terms)
def view(self, ax=None, *args, **kwargs):
"""""" + FuzzyVariableVisualizer.view.__doc__
ax = FuzzyVariableVisualizer(self, ax).view(*args, **kwargs)
def automf(self, number=5, variable_type='quality', names=None,
invert=False, zs=True):
"""
Automatically populate the universe with membership functions.
Parameters
----------
number : integer or list of names
Number of membership functions to create. For fully automated use,
supply 3, 5, or 7. Any number may be generated, if you provide
an appropriately sized list of `names`. If names are provided,
they are used in lieu of the default names below.
variable_type : string
Type of variable this is. Accepted arguments are
* 'quality' : Continuous variable, higher values are better.
* 'quant' : Quantitative variable, no value judgements.
names : list
List of names to use when creating membership functions if you wish
to override the default. Naming proceeds from lowest to highest,
unless invert is True.
invert : bool
Reverses the naming order if True. Membership function peaks still
march from lowest to highest.
Notes
-----
This convenience function allows quick construction of fuzzy variables
with overlapping, triangular membership functions.
It uses a standard naming convention defined for ``'quality'`` as::
* dismal
* poor
* mediocre
* average (always middle)
* decent
* good
* excellent
and for ``'quant'`` as::
* lowest
* lower
* low
* average (always middle)
* high
* higher
* highest
where the names on either side of ``'average'`` are used as needed to
create 3, 5, or 7 membership functions.
"""
if names is not None:
# set number based on names passed
number = len(names)
else:
if number not in [3, 5, 7]:
raise ValueError("If number is not 3, 5, or 7, "
"you must pass a list of names "
"equal in length to number.")
if variable_type.lower() == 'quality':
names = ['dismal',
'poor',
'mediocre',
'average',
'decent',
'good',
'excellent']
else:
names = ['lowest',
'lower',
'low',
'average',
'high',
'higher',
'highest']
if number == 3:
if variable_type.lower() == 'quality':
names = names[1:6:2]
else:
names = names[2:5]
if number == 5:
names = names[1:6]
if invert is True:
names = names[::-1]
limits = [self.universe.min(), self.universe.max()]
universe_range = limits[1] - limits[0]
widths = [universe_range / ((number - 1) / 2.)] * int(number)
centers = np.linspace(limits[0], limits[1], number)
abcs = [[c - w / 2, c, c + w / 2] for c, w in zip(centers, widths)]
# Clear existing adjectives, if any
self.terms = OrderedDict()
# Repopulate
for name, abc in zip(names, abcs):
if zs and name==names[0]:
print(111, abc)
self[name] = zmf(self.universe, abc[1], abc[2])
elif zs and name==names[-1]:
print(-111, abc)
self[name] = smf(self.universe, abc[0], abc[1])
else:
print(000, abc)
self[name] = trimf(self.universe, abc)
| 34.004219 | 79 | 0.519295 |
4fbf0863167e1d4c13e961b6ac2b4931240f928b | 20,098 | py | Python | src/python/pants/jvm/resolve/coursier_fetch.py | xlevus/pants | 2e4301f90b4cba7b3dd71850ae34ca1b59832591 | [
"Apache-2.0"
] | null | null | null | src/python/pants/jvm/resolve/coursier_fetch.py | xlevus/pants | 2e4301f90b4cba7b3dd71850ae34ca1b59832591 | [
"Apache-2.0"
] | null | null | null | src/python/pants/jvm/resolve/coursier_fetch.py | xlevus/pants | 2e4301f90b4cba7b3dd71850ae34ca1b59832591 | [
"Apache-2.0"
] | 4 | 2021-06-18T09:11:27.000Z | 2021-09-30T08:38:43.000Z | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import logging
import os
from dataclasses import dataclass
from pathlib import PurePath
from typing import Any, Iterable, Optional, Tuple
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.collection import Collection, DeduplicatedCollection
from pants.engine.fs import (
AddPrefix,
Digest,
DigestContents,
DigestSubset,
FileDigest,
MergeDigests,
PathGlobs,
RemovePrefix,
)
from pants.engine.process import BashBinary, Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import Targets, TransitiveTargets, TransitiveTargetsRequest
from pants.jvm.resolve.coursier_setup import Coursier
from pants.jvm.target_types import JvmLockfileSources, MavenRequirementsField
from pants.jvm.util_rules import ExtractFileDigest
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
logger = logging.getLogger(__name__)
class CoursierError(Exception):
"""An exception relating to invoking Coursier or processing its output."""
class MavenRequirements(DeduplicatedCollection[str]):
@classmethod
def create_from_maven_coordinates_fields(
cls,
fields: Iterable[MavenRequirementsField],
*,
additional_requirements: Iterable[str] = (),
) -> MavenRequirements:
field_requirements = (
str(maven_coord) for field in fields for maven_coord in (field.value or ())
)
return MavenRequirements((*field_requirements, *additional_requirements))
@dataclass(frozen=True)
class MavenCoord:
"""A single Maven-style coordinate for a JVM dependency."""
# TODO: parse and validate the input into individual coordinate
# components, then re-expose the string coordinate as a property
# or __str__.
coord: str
class MavenCoordinates(DeduplicatedCollection[MavenCoord]):
"""An ordered list of MavenCoord."""
@dataclass(frozen=True)
class CoursierLockfileEntry:
"""A single artifact entry from a Coursier-resolved lockfile.
These fields are nearly identical to the JSON objects from the
"dependencies" entries in Coursier's --json-output-file format.
But unlike Coursier's JSON report, a CoursierLockfileEntry
includes the content-address of the artifact fetched by Coursier
and ingested by Pants.
For example, a Coursier JSON report dependency entry might look like this:
```
{
"coord": "com.chuusai:shapeless_2.13:2.3.3",
"file": "/home/USER/.cache/coursier/v1/https/repo1.maven.org/maven2/com/chuusai/shapeless_2.13/2.3.3/shapeless_2.13-2.3.3.jar",
"directDependencies": [
"org.scala-lang:scala-library:2.13.0"
],
"dependencies": [
"org.scala-lang:scala-library:2.13.0"
]
}
```
The equivalent CoursierLockfileEntry would look like this:
```
CoursierLockfileEntry(
coord="com.chuusai:shapeless_2.13:2.3.3", # identical
file_name="shapeless_2.13-2.3.3.jar" # PurePath(entry["file"].name)
direct_dependencies=(MavenCoord("org.scala-lang:scala-library:2.13.0"),),
dependencies=(MavenCoord("org.scala-lang:scala-library:2.13.0"),),
file_digest=FileDigest(fingerprint=<sha256 of the jar>, ...),
)
```
"""
coord: MavenCoord
file_name: str
direct_dependencies: MavenCoordinates
dependencies: MavenCoordinates
file_digest: FileDigest
@classmethod
def from_json_dict(cls, entry) -> CoursierLockfileEntry:
"""Construct a CoursierLockfileEntry from its JSON dictionary representation."""
return cls(
coord=MavenCoord(coord=entry["coord"]),
file_name=entry["file_name"],
direct_dependencies=MavenCoordinates(
MavenCoord(coord=d) for d in entry["directDependencies"]
),
dependencies=MavenCoordinates(MavenCoord(coord=d) for d in entry["dependencies"]),
file_digest=FileDigest(
fingerprint=entry["file_digest"]["fingerprint"],
serialized_bytes_length=entry["file_digest"]["serialized_bytes_length"],
),
)
def to_json_dict(self) -> dict[str, Any]:
"""Export this CoursierLockfileEntry to a JSON object."""
return dict(
coord=self.coord.coord,
directDependencies=[coord.coord for coord in self.direct_dependencies],
dependencies=[coord.coord for coord in self.dependencies],
file_name=self.file_name,
file_digest=dict(
fingerprint=self.file_digest.fingerprint,
serialized_bytes_length=self.file_digest.serialized_bytes_length,
),
)
@dataclass(frozen=True)
class CoursierResolvedLockfile:
"""An in-memory representation of Pants' Coursier lockfile format."""
entries: Tuple[CoursierLockfileEntry, ...]
@classmethod
def from_json_dict(cls, lockfile) -> CoursierResolvedLockfile:
"""Construct a CoursierResolvedLockfile from its JSON dictionary representation."""
return CoursierResolvedLockfile(
entries=tuple(CoursierLockfileEntry.from_json_dict(dep) for dep in lockfile)
)
def to_json(self) -> bytes:
"""Export this CoursierResolvedLockfile to human-readable JSON.
This JSON is intended to be checked in to the user's repo as a hermetic snapshot of a
Coursier resolved JVM classpath.
"""
return json.dumps([entry.to_json_dict() for entry in self.entries], indent=4).encode(
"utf-8"
)
@rule(level=LogLevel.DEBUG)
async def coursier_resolve_lockfile(
bash: BashBinary,
coursier: Coursier,
maven_requirements: MavenRequirements,
) -> CoursierResolvedLockfile:
"""Run `coursier fetch ...` against a list of Maven coordinates and capture the result.
This rule does two things in a single Process invocation:
* Runs `coursier fetch` to let Coursier do the heavy lifting of resolving
dependencies and downloading resolved artifacts (jars, etc).
* Copies the resolved artifacts into the Process output directory, capturing
the artifacts as content-addressed `Digest`s.
It's important that this happens in the same process, since the process isn't
guaranteed to run on the same machine as the rule, nor is a subsequent process
invocation. This guarantees that whatever Coursier resolved, it was fully
captured into Pants' content addressed artifact storage.
Note however that we still get the benefit of Coursier's "global" cache if it
had already been run on the machine where the `coursier fetch` runs, so rerunning
`coursier fetch` tends to be fast in practice.
Finally, this rule bundles up the result into a `CoursierResolvedLockfile`. This
data structure encapsulates everything necessary to either materialize the
resolved dependencies to a classpath for Java invocations, or to write the
lockfile out to the workspace to hermetically freeze the result of the resolve.
"""
if len(maven_requirements) == 0:
return CoursierResolvedLockfile(entries=())
coursier_report_file_name = "coursier_report.json"
process_result = await Get(
ProcessResult,
Process(
argv=[
bash.path,
coursier.wrapper_script,
coursier.coursier.exe,
coursier_report_file_name,
*list(maven_requirements),
],
input_digest=coursier.digest,
output_directories=("classpath",),
output_files=(coursier_report_file_name,),
description=(
"Running `coursier fetch` against "
f"{pluralize(len(maven_requirements), 'requirement')}: "
f"{', '.join(maven_requirements)}"
),
level=LogLevel.DEBUG,
),
)
report_digest = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([coursier_report_file_name]))
)
report_contents = await Get(DigestContents, Digest, report_digest)
report = json.loads(report_contents[0].content)
artifact_file_names = tuple(PurePath(dep["file"]).name for dep in report["dependencies"])
artifact_output_paths = tuple(f"classpath/{file_name}" for file_name in artifact_file_names)
artifact_digests = await MultiGet(
Get(Digest, DigestSubset(process_result.output_digest, PathGlobs([output_path])))
for output_path in artifact_output_paths
)
stripped_artifact_digests = await MultiGet(
Get(Digest, RemovePrefix(artifact_digest, "classpath"))
for artifact_digest in artifact_digests
)
artifact_file_digests = await MultiGet(
Get(FileDigest, ExtractFileDigest(stripped_artifact_digest, file_name))
for stripped_artifact_digest, file_name in zip(
stripped_artifact_digests, artifact_file_names
)
)
return CoursierResolvedLockfile(
entries=tuple(
CoursierLockfileEntry(
coord=MavenCoord(dep["coord"]),
direct_dependencies=MavenCoordinates(
MavenCoord(dd) for dd in dep["directDependencies"]
),
dependencies=MavenCoordinates(MavenCoord(d) for d in dep["dependencies"]),
file_name=file_name,
file_digest=artifact_file_digest,
)
for dep, file_name, artifact_file_digest in zip(
report["dependencies"], artifact_file_names, artifact_file_digests
)
)
)
@dataclass(frozen=True)
class FetchOneCoordRequest:
coord: MavenCoord
expected_digest: FileDigest
@dataclass(frozen=True)
class ResolvedClasspathEntry:
"""A single classpath entry from a resolver (e.g. Coursier), typically a jar."""
coord: MavenCoord
file_name: str
digest: Digest
class ResolvedClasspathEntries(Collection[ResolvedClasspathEntry]):
"""A collection of resolved classpath entries."""
@rule(level=LogLevel.DEBUG)
async def coursier_fetch_one_coord(
bash: BashBinary,
coursier: Coursier,
request: CoursierLockfileEntry,
) -> ResolvedClasspathEntry:
"""Run `coursier fetch --intrasitive` to fetch a single artifact.
This rule exists to permit efficient subsetting of a "global" classpath
in the form of a lockfile. Callers can determine what subset of dependencies
from the lockfile are needed for a given target, then request those
lockfile entries individually.
By fetching only one entry at a time, we maximize our cache efficiency. If instead
we fetched the entire subset that the caller wanted, there would be a different cache
key for every possible subset.
This rule also guarantees exact reproducibility. If all caches have been
removed, `coursier fetch` will re-download the artifact, and this rule will
confirm that what was downloaded matches exactly (by content digest) what
was specified in the lockfile (what Coursier originally downloaded).
"""
coursier_report_file_name = "coursier_report.json"
process_result = await Get(
ProcessResult,
Process(
argv=[
bash.path,
coursier.wrapper_script,
coursier.coursier.exe,
coursier_report_file_name,
"--intransitive",
request.coord.coord,
],
input_digest=coursier.digest,
output_directories=("classpath",),
output_files=(coursier_report_file_name,),
description="Run coursier resolve",
level=LogLevel.DEBUG,
),
)
report_digest = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([coursier_report_file_name]))
)
report_contents = await Get(DigestContents, Digest, report_digest)
report = json.loads(report_contents[0].content)
report_deps = report["dependencies"]
if len(report_deps) == 0:
raise CoursierError("Coursier fetch report has no dependencies (i.e. nothing was fetched).")
elif len(report_deps) > 1:
raise CoursierError(
"Coursier fetch report has multiple dependencies, but exactly 1 was expected."
)
dep = report_deps[0]
resolved_coord = dep["coord"]
if resolved_coord != request.coord.coord:
raise CoursierError(
f'Coursier resolved coord ("{resolved_coord}") does not match requested coord ("{request.coord.coord}").'
)
file_path = PurePath(dep["file"])
classpath_dest = f"classpath/{file_path.name}"
resolved_file_digest = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([classpath_dest]))
)
stripped_digest = await Get(Digest, RemovePrefix(resolved_file_digest, "classpath"))
file_digest = await Get(
FileDigest,
ExtractFileDigest(stripped_digest, file_path.name),
)
if file_digest != request.file_digest:
raise CoursierError(
f"Coursier fetch for '{resolved_coord}' succeeded, but fetched artifact {file_digest} did not match the expected artifact: {request.file_digest}."
)
return ResolvedClasspathEntry(
coord=request.coord, file_name=file_path.name, digest=stripped_digest
)
@rule(level=LogLevel.DEBUG)
async def coursier_fetch_lockfile(lockfile: CoursierResolvedLockfile) -> ResolvedClasspathEntries:
"""Fetch every artifact in a lockfile."""
classpath_entries = await MultiGet(
Get(ResolvedClasspathEntry, CoursierLockfileEntry, entry) for entry in lockfile.entries
)
return ResolvedClasspathEntries(classpath_entries)
@rule(level=LogLevel.DEBUG)
async def load_coursier_lockfile_from_source(
lockfile_field: JvmLockfileSources,
) -> CoursierResolvedLockfile:
lockfile_sources = await Get(
SourceFiles,
SourceFilesRequest(
[lockfile_field],
for_sources_types=[JvmLockfileSources],
enable_codegen=False,
),
)
if len(lockfile_sources.files) != 1:
raise CoursierError("JvmLockfileSources must have exactly 1 source file")
source_lockfile_digest_contents = await Get(
DigestContents, Digest, lockfile_sources.snapshot.digest
)
source_lockfile_content = source_lockfile_digest_contents[0]
return CoursierResolvedLockfile.from_json_dict(json.loads(source_lockfile_content.content))
@dataclass(frozen=True)
class CoursierLockfileForTargetRequest:
targets: Targets
@rule
async def get_coursier_lockfile_for_target(
request: CoursierLockfileForTargetRequest,
) -> CoursierResolvedLockfile:
"""Determine the lockfile that applies to a given JVM target.
Presently this just walks the target's transitive dependencies to find a dependency
that provides `JvmLockfileSources`.
This rule asserts that at most 1 such dependency may be in the JVM target's transitive
dependencies.
"""
transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest(target.address for target in request.targets)
)
transitive_jvm_lockfile_sources = [
target[JvmLockfileSources]
for target in transitive_targets.closure
if target.has_field(JvmLockfileSources)
]
if len(transitive_jvm_lockfile_sources) == 0:
raise CoursierError(
"Exactly 1 target with a coursier_lockfile should appear in the transitive closure"
" of a JVM target, but none were found."
)
if len(transitive_jvm_lockfile_sources) > 1:
raise CoursierError(
f"Exactly 1 target with a coursier_lockfile should appear in the transitive closure"
f" of a JVM library, but {len(transitive_jvm_lockfile_sources)} were found."
)
jvm_lockfile_sources = transitive_jvm_lockfile_sources[0]
lockfile_sources = await Get(
SourceFiles,
SourceFilesRequest(
[jvm_lockfile_sources],
for_sources_types=[JvmLockfileSources],
enable_codegen=False,
),
)
if len(lockfile_sources.files) != 1:
raise CoursierError(
f"JvmLockfileSources must have exactly 1 source file, but {jvm_lockfile_sources}"
f" has {len(lockfile_sources.files)}"
)
source_lockfile_digest_contents = await Get(
DigestContents, Digest, lockfile_sources.snapshot.digest
)
source_lockfile_content = source_lockfile_digest_contents[0]
return CoursierResolvedLockfile.from_json_dict(json.loads(source_lockfile_content.content))
@dataclass(frozen=True)
class MaterializedClasspathRequest:
"""A helper to merge various classpath elements.
:param prefix: if set, should be a relative directory that will
be prepended to every classpath element. This is useful for
keeping all classpath elements isolated under a single directory
in a process invocation, where other inputs on the process's
root directory might interfere with un-prefixed classpath
entries (or vice versa).
"""
prefix: Optional[str] = None
lockfiles: Tuple[CoursierResolvedLockfile, ...] = ()
resolved_classpaths: Tuple[ResolvedClasspathEntries, ...] = ()
maven_requirements: Tuple[MavenRequirements, ...] = ()
@dataclass(frozen=True)
class MaterializedClasspath:
"""A fully fetched and merged classpath, ready to hand to a JVM process invocation."""
digest: Digest
file_names: Tuple[str, ...]
prefix: Optional[str]
def classpath_arg(self, root: Optional[str] = None) -> str:
"""Construct the argument to be passed to `-classpath`.
:param root: if set, will be prepended to all entries. This is useful
if the process working directory is not the same as the root
directory for the process input `Digest`.
"""
def maybe_add_prefix(file_name: str) -> str:
if self.prefix is not None:
file_name = os.path.join(self.prefix, file_name)
if root is not None:
file_name = os.path.join(root, file_name)
return file_name
return ":".join(maybe_add_prefix(file_name) for file_name in self.file_names)
@rule(level=LogLevel.DEBUG)
async def materialize_classpath(request: MaterializedClasspathRequest) -> MaterializedClasspath:
"""Resolve, fetch, and merge various classpath types to a single `Digest` and metadata."""
maven_requirements_lockfiles = await MultiGet(
Get(
CoursierResolvedLockfile,
MavenRequirements,
maven_requirements,
)
for maven_requirements in request.maven_requirements
)
lockfile_and_requirements_classpath_entries = await MultiGet(
Get(
ResolvedClasspathEntries,
CoursierResolvedLockfile,
lockfile,
)
for lockfile in (*request.lockfiles, *maven_requirements_lockfiles)
)
all_classpath_entries = (
*lockfile_and_requirements_classpath_entries,
*request.resolved_classpaths,
)
merged_digest = await Get(
Digest,
MergeDigests(
classpath_entry.digest
for classpath_entries in all_classpath_entries
for classpath_entry in classpath_entries
),
)
if request.prefix is not None:
merged_digest = await Get(Digest, AddPrefix(merged_digest, request.prefix))
file_names = tuple(
classpath_entry.file_name
for classpath_entries in all_classpath_entries
for classpath_entry in classpath_entries
)
return MaterializedClasspath(prefix=request.prefix, digest=merged_digest, file_names=file_names)
def rules():
return [*collect_rules()]
| 36.809524 | 158 | 0.690218 |
07262952208c028ef1a8ba0c5cd9a7b9341c5811 | 9,789 | py | Python | atomate/vasp/workflows/tests/test_bulk_modulus_workflow.py | Zhuoying/atomate | 067023f0f740d3abac47b7ae7743c1c31eff8a06 | [
"BSD-3-Clause-LBNL"
] | null | null | null | atomate/vasp/workflows/tests/test_bulk_modulus_workflow.py | Zhuoying/atomate | 067023f0f740d3abac47b7ae7743c1c31eff8a06 | [
"BSD-3-Clause-LBNL"
] | null | null | null | atomate/vasp/workflows/tests/test_bulk_modulus_workflow.py | Zhuoying/atomate | 067023f0f740d3abac47b7ae7743c1c31eff8a06 | [
"BSD-3-Clause-LBNL"
] | null | null | null | import json
import os
import unittest
import numpy as np
from fireworks import FWorker
from fireworks.core.rocket_launcher import rapidfire
from monty.json import MontyEncoder
from pymatgen.core import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.testing import PymatgenTest
from atomate.utils.testing import AtomateTest
from atomate.vasp.powerups import use_fake_vasp, use_no_vasp
from atomate.vasp.workflows.presets.core import wf_bulk_modulus
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
db_dir = os.path.join(module_dir, "..", "..", "..", "common", "test_files")
reference_dir = os.path.join(module_dir, "..", "..", "test_files", "bulk_modulus_wf")
DEBUG_MODE = (
False # If True, retains the database and output dirs at the end of the test
)
VASP_CMD = (
None # If None, runs a "fake" VASP. Otherwise, runs VASP with this command...
)
_write_task_docs = (
False # Test developer option: defaults to False, need to be True only once
)
class TestBulkModulusWorkflow(AtomateTest):
"""
This test will either actually run VASP (if VASP_CMD is set) or artificially pass on outputs
(if not VASP_CMD) and test the whole bulk modulus workflow and its implementation and outputs
for an example calculation for silicon.
note for the developer of the test:
This tests can be run in two modes if not VASP_CMD:
1. first all inputs and outputs of all deformations are present in which case
in each folder a trimmed version of task.json will be generated so that
2. once task.json is present, VaspRun can be skipped where task.json is
available and their "inputs" and "outputs" folders can be removed
"""
def setUp(self):
super().setUp()
self.struct_si = PymatgenTest.get_structure("Si")
self.ndeformations = 6
self.deformations = [
(np.identity(3) * (1 + x)).tolist()
for x in np.linspace(-0.05, 0.05, self.ndeformations)
]
self.wf_config = {"VASP_CMD": ">>vasp_cmd<<", "DB_FILE": ">>db_file<<"}
self.wf = wf_bulk_modulus(self.struct_si, self.wf_config)
def _simulate_vasprun(self, wf):
no_vasp_ref_dirs = {}
fake_vasp_ref_dirs = {}
for i in range(2, self.ndeformations + 2):
if os.path.exists(os.path.join(reference_dir, str(i), "inputs")):
if not VASP_CMD:
fake_vasp_ref_dirs[
f"bulk_modulus deformation {i - 2}"
] = os.path.join(reference_dir, str(i))
else:
no_vasp_ref_dirs[f"bulk_modulus deformation {i - 2}"] = os.path.join(
self.scratch_dir, str(i)
)
fake_vasp_ref_dirs["structure optimization"] = os.path.join(reference_dir, "1")
new_wf = use_no_vasp(wf, no_vasp_ref_dirs)
return use_fake_vasp(new_wf, fake_vasp_ref_dirs, params_to_check=["ENCUT"])
def _check_run(self, d, mode):
if mode not in [
"structure optimization",
"bulk_modulus deformation 0",
"bulk_modulus deformation 4",
"fit equation of state",
]:
raise ValueError("Invalid mode!")
if mode not in ["fit equation of state"]:
self.assertEqual(d["formula_pretty"], "Si")
self.assertEqual(d["formula_anonymous"], "A")
self.assertEqual(d["nelements"], 1)
self.assertEqual(d["state"], "successful")
if mode in ["structure optimization"]:
self.assertAlmostEqual(
d["calcs_reversed"][0]["output"]["structure"]["lattice"]["a"], 3.866, 3
)
self.assertAlmostEqual(d["output"]["energy_per_atom"], -5.432, 3)
self.relaxed_struct_si = d["calcs_reversed"][0]["output"]["structure"]
elif mode in ["bulk_modulus deformation 0"]:
for i, l in enumerate(["a", "b", "c"]):
self.assertAlmostEqual(
d["input"]["structure"]["lattice"][l],
self.relaxed_struct_si["lattice"][l] * (self.deformations[0][i][i]),
2,
)
stress = d["calcs_reversed"][0]["output"]["ionic_steps"][-1]["stress"]
np.testing.assert_allclose(
stress, np.diag([189.19, 189.19, 189.19]), atol=1e-2
)
elif mode in ["bulk_modulus deformation 4"]:
for i, l in enumerate(["a", "b", "c"]):
self.assertAlmostEqual(
d["input"]["structure"]["lattice"][l],
self.relaxed_struct_si["lattice"][l] * (self.deformations[4][i][i]),
2,
)
stress = d["calcs_reversed"][0]["output"]["ionic_steps"][-1]["stress"]
np.testing.assert_allclose(
stress, np.diag([-65.56, -65.56, -65.56]), atol=1e-2
)
elif mode in ["fit equation of state"]:
self.assertAlmostEqual(d["bulk_modulus"], 88.90, places=2)
self.assertEqual(len(d["all_task_ids"]), 7)
self.assertEqual(len(d["energies"]), self.ndeformations)
self.assertEqual(len(d["volumes"]), self.ndeformations)
s = SpacegroupAnalyzer(
Structure.from_dict(d["structure"])
).get_conventional_standard_structure()
self.assertAlmostEqual(s.lattice.c, 5.468, places=3)
def setup_task_docs(self):
self.task_file = "task.json"
for i in range(2, self.ndeformations + 2):
if os.path.exists(os.path.join(reference_dir, str(i), self.task_file)):
with open(os.path.join(reference_dir, str(i), self.task_file)) as fp:
d = json.load(fp)
new_fw = self.lp.fireworks.find_one(
{"name": {"$regex": f"bulk_modulus deformation {i - 2}"}}
)
# the fw tag (inluded in "name") is important in pulling tasks in the last FW
# in wf_bulk_modulus
d["task_label"] = new_fw["name"]
d["task_id"] += i + 1000000 # to avoid duplicate task_id
os.makedirs(os.path.join(self.scratch_dir, str(i)))
with open(
os.path.join(self.scratch_dir, str(i), "task.json"), "w"
) as fp:
json.dump(
d,
fp,
sort_keys=True,
indent=4,
ensure_ascii=False,
cls=MontyEncoder,
)
elif not os.path.exists(os.path.join(reference_dir, str(i), "inputs")):
raise OSError(
"neither {} nor {} are present in {}".format(
"inputs", self.task_file, os.path.join(reference_dir, str(i))
)
)
def write_task_docs(self):
# this step needs to be run once: once task.json is present, remove the inputs/outputs folders
for i in range(2, self.ndeformations + 2):
# not to unnecessarily override available task.json
if not os.path.exists(os.path.join(reference_dir, str(i), "task.json")):
d = self.get_task_collection().find_one(
{"task_label": {"$regex": f"bulk_modulus deformation {i - 2}"}}
)
rm_props = ["bandstructure", "input"]
for icalc in range(len(d["calcs_reversed"])):
for prop in rm_props:
try:
del d["calcs_reversed"][icalc][prop]
except Exception:
pass
with open(os.path.join(reference_dir, str(i), "task.json"), "w") as fp:
json.dump(
d,
fp,
sort_keys=True,
indent=4,
ensure_ascii=False,
cls=MontyEncoder,
)
def test_wf(self):
self.wf = self._simulate_vasprun(self.wf)
self.assertEqual(len(self.wf.fws), self.ndeformations + 2)
defo_vis = [
fw.tasks[2]["vasp_input_set"] for fw in self.wf.fws if "deform" in fw.name
]
assert all([vis.user_incar_settings["NSW"] == 99 for vis in defo_vis])
assert all([vis.user_incar_settings["IBRION"] == 2 for vis in defo_vis])
self.lp.add_wf(self.wf)
# this is specific to bulk_modulus_wf "fit equation of state" that uses FW tag
self.setup_task_docs()
rapidfire(
self.lp, fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")})
)
if _write_task_docs:
self.write_task_docs()
# check relaxation
d = self.get_task_collection().find_one(
{"task_label": {"$regex": "structure optimization"}}
)
self._check_run(d, mode="structure optimization")
# check two of the deformation calculations
d = self.get_task_collection().find_one(
{"task_label": {"$regex": "bulk_modulus deformation 0"}}
)
self._check_run(d, mode="bulk_modulus deformation 0")
d = self.get_task_collection().find_one(
{"task_label": {"$regex": "bulk_modulus deformation 4"}}
)
self._check_run(d, mode="bulk_modulus deformation 4")
# check the final results
d = self.get_task_collection(coll_name="eos").find_one()
self._check_run(d, mode="fit equation of state")
if __name__ == "__main__":
unittest.main()
| 41.478814 | 102 | 0.561242 |
b878622023c0904ad2c91b1cb3165f4a68fcf584 | 13,790 | py | Python | tests/test_basics.py | k-dominik/pytest-qt | d15703fbcdfce4e74b79b2dff7f5a7e883ed2961 | [
"MIT"
] | null | null | null | tests/test_basics.py | k-dominik/pytest-qt | d15703fbcdfce4e74b79b2dff7f5a7e883ed2961 | [
"MIT"
] | null | null | null | tests/test_basics.py | k-dominik/pytest-qt | d15703fbcdfce4e74b79b2dff7f5a7e883ed2961 | [
"MIT"
] | null | null | null | import weakref
import pytest
from pytestqt import qt_compat
from pytestqt.qt_compat import qt_api
def test_basics(qtbot):
"""
Basic test that works more like a sanity check to ensure we are setting up a QApplication
properly and are able to display a simple event_recorder.
"""
assert qt_api.QApplication.instance() is not None
widget = qt_api.QWidget()
qtbot.addWidget(widget)
widget.setWindowTitle("W1")
widget.show()
assert widget.isVisible()
assert widget.windowTitle() == "W1"
def test_key_events(qtbot, event_recorder):
"""
Basic key events test.
"""
def extract(key_event):
return (key_event.type(), key_event.key(), key_event.text())
event_recorder.registerEvent(qt_api.QtGui.QKeyEvent, extract)
qtbot.keyPress(event_recorder, "a")
assert event_recorder.event_data == (
qt_api.QEvent.KeyPress,
int(qt_api.Qt.Key_A),
"a",
)
qtbot.keyRelease(event_recorder, "a")
assert event_recorder.event_data == (
qt_api.QEvent.KeyRelease,
int(qt_api.Qt.Key_A),
"a",
)
def test_mouse_events(qtbot, event_recorder):
"""
Basic mouse events test.
"""
def extract(mouse_event):
return (mouse_event.type(), mouse_event.button(), mouse_event.modifiers())
event_recorder.registerEvent(qt_api.QtGui.QMouseEvent, extract)
qtbot.mousePress(event_recorder, qt_api.Qt.LeftButton)
assert event_recorder.event_data == (
qt_api.QEvent.MouseButtonPress,
qt_api.Qt.LeftButton,
qt_api.Qt.NoModifier,
)
qtbot.mousePress(event_recorder, qt_api.Qt.RightButton, qt_api.Qt.AltModifier)
assert event_recorder.event_data == (
qt_api.QEvent.MouseButtonPress,
qt_api.Qt.RightButton,
qt_api.Qt.AltModifier,
)
def test_stop_for_interaction(qtbot, timer):
"""
Test qtbot.stopForInteraction()
"""
widget = qt_api.QWidget()
qtbot.addWidget(widget)
qtbot.waitForWindowShown(widget)
timer.single_shot_callback(widget.close, 0)
qtbot.stopForInteraction()
@pytest.mark.parametrize("show", [True, False])
@pytest.mark.parametrize("method_name", ["waitExposed", "waitActive"])
def test_wait_window(show, method_name, qtbot):
"""
Using one of the wait-widget methods should not raise anything if the widget
is properly displayed, otherwise should raise a TimeoutError.
"""
method = getattr(qtbot, method_name)
if qt_api.pytest_qt_api != "pyqt5":
with pytest.raises(RuntimeError) as exc_info:
with method(None, None):
pass
assert str(exc_info.value) == "Available in PyQt5 only"
else:
widget = qt_api.QWidget()
qtbot.add_widget(widget)
if show:
with method(widget, timeout=1000):
widget.show()
else:
with pytest.raises(qtbot.TimeoutError):
with method(widget, timeout=100):
pass
@pytest.mark.parametrize("method_name", ["waitExposed", "waitActive"])
def test_wait_window_propagates_other_exception(method_name, qtbot):
"""
Exceptions raised inside the with-statement of wait-widget methods should
propagate properly.
"""
if qt_api.pytest_qt_api != "pyqt5":
pytest.skip("Available in PyQt5 only")
method = getattr(qtbot, method_name)
widget = qt_api.QWidget()
qtbot.add_widget(widget)
with pytest.raises(ValueError) as exc_info:
with method(widget, timeout=100):
widget.show()
raise ValueError("some other error")
assert str(exc_info.value) == "some other error"
def test_widget_kept_as_weakref(qtbot):
"""
Test if the widget is kept as a weak reference in QtBot
"""
widget = qt_api.QWidget()
qtbot.add_widget(widget)
widget = weakref.ref(widget)
assert widget() is None
def test_event_processing_before_and_after_teardown(testdir):
"""
Make sure events are processed before and after fixtures are torn down.
The test works by creating a session object which pops() one of its events
whenever a processEvents() occurs. Fixture and tests append values
to the event list but expect the list to have been processed (by the pop())
at each point of interest.
https://github.com/pytest-dev/pytest-qt/issues/67
"""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
@pytest.fixture(scope='session')
def events_queue(qapp):
class EventsQueue(qt_api.QtCore.QObject):
def __init__(self):
qt_api.QtCore.QObject.__init__(self)
self.events = []
def pop_later(self):
qapp.postEvent(self, qt_api.QEvent(qt_api.QEvent.User))
def event(self, ev):
if ev.type() == qt_api.QEvent.User:
self.events.pop(-1)
return qt_api.QtCore.QObject.event(self, ev)
return EventsQueue()
@pytest.fixture
def fix(events_queue, qapp):
assert events_queue.events == []
yield
assert events_queue.events == []
events_queue.events.append('fixture teardown')
events_queue.pop_later()
@pytest.mark.parametrize('i', range(3))
def test_events(events_queue, fix, i):
assert events_queue.events == []
events_queue.events.append('test event')
events_queue.pop_later()
"""
)
res = testdir.runpytest()
res.stdout.fnmatch_lines(["*3 passed in*"])
def test_header(testdir):
testdir.makeconftest(
"""
from pytestqt import qt_compat
from pytestqt.qt_compat import qt_api
def mock_get_versions():
return qt_compat.VersionTuple('PyQtAPI', '1.0', '2.5', '3.5')
assert hasattr(qt_api, 'get_versions')
qt_api.get_versions = mock_get_versions
"""
)
res = testdir.runpytest()
res.stdout.fnmatch_lines(
["*test session starts*", "PyQtAPI 1.0 -- Qt runtime 2.5 -- Qt compiled 3.5"]
)
def test_public_api_backward_compatibility():
"""
Test backward compatibility for version 1.6.0: since then symbols that were available from
pytestqt.plugin have been moved to other modules to enhance navigation and maintainability,
this test ensures the same symbols are still available from the same imports. (#90)
"""
import pytestqt.plugin
assert pytestqt.plugin.QtBot
assert pytestqt.plugin.SignalBlocker
assert pytestqt.plugin.MultiSignalBlocker
assert pytestqt.plugin.SignalTimeoutError
assert pytestqt.plugin.format_captured_exceptions
assert pytestqt.plugin.capture_exceptions
assert pytestqt.plugin.QtLoggingPlugin
assert pytestqt.plugin.Record
def test_qvariant(tmpdir):
"""Test that make_variant and extract_from_variant work in the same way
across all supported Qt bindings.
"""
settings = qt_api.QtCore.QSettings(
str(tmpdir / "foo.ini"), qt_api.QtCore.QSettings.IniFormat
)
settings.setValue("int", qt_api.make_variant(42))
settings.setValue("str", qt_api.make_variant("Hello"))
settings.setValue("empty", qt_api.make_variant())
assert qt_api.extract_from_variant(settings.value("int")) == 42
assert qt_api.extract_from_variant(settings.value("str")) == "Hello"
assert qt_api.extract_from_variant(settings.value("empty")) is None
def test_widgets_closed_before_fixtures(testdir):
"""
Ensure widgets added by "qtbot.add_widget" are closed before all other
fixtures are teardown. (#106).
"""
testdir.makepyfile(
"""
import pytest
from pytestqt.qt_compat import qt_api
class Widget(qt_api.QWidget):
closed = False
def closeEvent(self, e):
e.accept()
self.closed = True
@pytest.fixture
def widget(qtbot):
w = Widget()
qtbot.add_widget(w)
yield w
assert w.closed
def test_foo(widget):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*= 1 passed in *"])
def test_qtbot_wait(qtbot, stop_watch):
stop_watch.start()
qtbot.wait(250)
stop_watch.stop()
assert stop_watch.elapsed >= 220
@pytest.fixture
def event_recorder(qtbot):
class EventRecorder(qt_api.QWidget):
"""
Widget that records some kind of events sent to it.
When this event_recorder receives a registered event (by calling `registerEvent`), it will call
the associated *extract* function and hold the return value from the function in the
`event_data` member.
"""
def __init__(self):
qt_api.QWidget.__init__(self)
self._event_types = {}
self.event_data = None
def registerEvent(self, event_type, extract_func):
self._event_types[event_type] = extract_func
def event(self, ev):
for event_type, extract_func in self._event_types.items():
if isinstance(ev, event_type):
self.event_data = extract_func(ev)
return True
return False
widget = EventRecorder()
qtbot.addWidget(widget)
return widget
@pytest.mark.parametrize(
"value, expected",
[
(True, True),
(False, False),
("True", True),
("False", False),
("true", True),
("false", False),
],
)
def test_parse_ini_boolean_valid(value, expected):
import pytestqt.qtbot
assert pytestqt.qtbot._parse_ini_boolean(value) == expected
def test_parse_ini_boolean_invalid():
import pytestqt.qtbot
with pytest.raises(ValueError):
pytestqt.qtbot._parse_ini_boolean("foo")
@pytest.mark.parametrize("option_api", ["pyqt4", "pyqt5", "pyside", "pyside2"])
def test_qt_api_ini_config(testdir, monkeypatch, option_api):
"""
Test qt_api ini option handling.
"""
from pytestqt.qt_compat import qt_api
monkeypatch.delenv("PYTEST_QT_API", raising=False)
testdir.makeini(
"""
[pytest]
qt_api={option_api}
""".format(
option_api=option_api
)
)
testdir.makepyfile(
"""
import pytest
def test_foo(qtbot):
pass
"""
)
result = testdir.runpytest_subprocess()
if qt_api.pytest_qt_api.replace("v2", "") == option_api: # handle pyqt4v2
result.stdout.fnmatch_lines(["* 1 passed in *"])
else:
try:
ModuleNotFoundError
except NameError:
# Python < 3.6
result.stderr.fnmatch_lines(["*ImportError:*"])
else:
# Python >= 3.6
result.stderr.fnmatch_lines(["*ModuleNotFoundError:*"])
@pytest.mark.parametrize("envvar", ["pyqt4", "pyqt5", "pyside", "pyside2"])
def test_qt_api_ini_config_with_envvar(testdir, monkeypatch, envvar):
"""ensure environment variable wins over config value if both are present
"""
testdir.makeini(
"""
[pytest]
qt_api={option_api}
""".format(
option_api="piecute"
)
)
monkeypatch.setenv("PYTEST_QT_API", envvar)
testdir.makepyfile(
"""
import pytest
def test_foo(qtbot):
pass
"""
)
result = testdir.runpytest_subprocess()
if qt_api.pytest_qt_api.replace("v2", "") == envvar:
result.stdout.fnmatch_lines(["* 1 passed in *"])
else:
try:
ModuleNotFoundError
except NameError:
# Python < 3.6
result.stderr.fnmatch_lines(["*ImportError:*"])
else:
# Python >= 3.6
result.stderr.fnmatch_lines(["*ModuleNotFoundError:*"])
def test_invalid_qt_api_envvar(testdir, monkeypatch):
"""
Make sure the error message with an invalid PYQTEST_QT_API is correct.
"""
testdir.makepyfile(
"""
import pytest
def test_foo(qtbot):
pass
"""
)
monkeypatch.setenv("PYTEST_QT_API", "piecute")
result = testdir.runpytest_subprocess()
result.stderr.fnmatch_lines(["* Invalid value for $PYTEST_QT_API: piecute"])
@pytest.mark.skipif(
qt_api.pytest_qt_api in ["pyqt4", "pyqt4v2", "pyside"],
reason="QApplication.arguments() doesn't return custom arguments with Qt4 and Windows",
)
def test_qapp_args(testdir):
"""
Test customizing of QApplication arguments.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(scope='session')
def qapp_args():
return ['--test-arg']
"""
)
testdir.makepyfile(
"""
def test_args(qapp):
assert '--test-arg' in list(qapp.arguments())
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *"])
def test_importerror(monkeypatch):
def _fake_import(name, *args):
raise ImportError("Failed to import {}".format(name))
monkeypatch.delenv("PYTEST_QT_API", raising=False)
monkeypatch.setattr(qt_compat, "_import", _fake_import)
expected = (
"pytest-qt requires either PySide, PySide2, PyQt4 or PyQt5 to be installed\n"
" PyQt4.QtCore: Failed to import PyQt4.QtCore\n"
" PyQt5.QtCore: Failed to import PyQt5.QtCore\n"
" PySide.QtCore: Failed to import PySide.QtCore\n"
" PySide2.QtCore: Failed to import PySide2.QtCore"
)
with pytest.raises(RuntimeError, match=expected):
qt_api.set_qt_api(api=None)
| 28.609959 | 103 | 0.631762 |
23dc64a84527eec3e58d9f5bcb91bf9ec4952486 | 4,415 | py | Python | b_down_file/b_more_part.py | learnman1994/blibli_videodown | cde6018d3e3ee69d0638a0acb8d6024580d3300d | [
"Apache-2.0"
] | 1 | 2022-03-08T02:29:08.000Z | 2022-03-08T02:29:08.000Z | b_down_file/b_more_part.py | cattypapa/blibli_videodown | 2ab1f6df92b1b70df75ef1e8b63c4db2f5b2e1a6 | [
"Apache-2.0"
] | null | null | null | b_down_file/b_more_part.py | cattypapa/blibli_videodown | 2ab1f6df92b1b70df75ef1e8b63c4db2f5b2e1a6 | [
"Apache-2.0"
] | 1 | 2022-01-23T02:12:59.000Z | 2022-01-23T02:12:59.000Z | import requests
import re
import time
try:
import ffmpy
except Exception:
print('请先在终端执行 pip3 install ffmpy')
import os
import random
try:
from lxml import etree
except Exception:
print('请先在终端执行 pip3 install lxml')
from . import get_proxy
from . import mk_folder
def get_names(bv, proxy_list):
url = 'http://www.bilibili.com/video/' + bv
headers = {
'referer': url,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/84.0.4147.135 Safari/537.36 '
}
proxy = random.choice(proxy_list)
print('正在发起请求,获取视频集数...')
response = requests.get(url=url, headers=headers)
pattern = re.compile('"part":"(.*?)"', re.S)
name_list = pattern.findall(response.text)
# print(name_list)
print('一共有%dP视频' % len(name_list))
p_start = input('从第几P开始下载:')
p_end = input('从第几P结束下载:')
down_vd(name_list, url, p_start, p_end, proxy)
def down_vd(name_list, url, p_start, p_end, proxy):
d_path = mk_folder.mk_folder()
for i in range(int(p_start), int(p_end)+1):
# page = str(i + 1)
page = str(i)
url1 = url + '?p=' + page
headers = {
'referer': url1,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/84.0.4147.135 Safari/537.36 '
}
name = ('P' + page + '_' + re.sub(r'\W', '', name_list[i-1]), 'mp4')
suf = '.'
vd_name = suf.join(name)
# file_name = d_path + '\\' + vd_name + '.mp4'
file_name = os.path.join(d_path, vd_name).replace('\\', '//')
response = requests.get(url=url1, headers=headers, proxies=proxy)
content = response.text
print('正在分析下载地址...')
vd_pattern = re.compile('"min_buffer_time".*?"baseUrl":"(.*?)"')
vd_url = vd_pattern.findall(content)[0]
ad_pattern = re.compile('"audio".*"base_url":"(.*?)"')
ad_url = ad_pattern.findall(content)[0]
vd_file_name = os.path.join(d_path, 'test.mp4').replace('\\', '//')
ad_file_name = os.path.join(d_path, 'test.aac').replace('\\', '//')
vd_zh_name = os.path.join(d_path, file_name).replace('\\', '//')
vd_response = requests.get(url=vd_url, headers=headers, proxies=proxy)
size = 0
chunk_size = 1024
content_size = int(vd_response.headers['Content-Length'])
print('开始下载视频,[视频大小]:{size:.2f} MB'.format(size=content_size / chunk_size / 1024))
with open(vd_file_name, 'wb') as f:
for data in vd_response.iter_content(chunk_size):
f.write(data)
size += len(data)
print(
'\r' + '[下载进度]:%s%.2f%%' % ('>' * int(size * 50 / content_size), float(size / content_size * 100)),
end='')
print('\n')
ad_response = requests.get(url=ad_url, headers=headers, proxies=proxy)
size = 0
content_size = int(ad_response.headers['Content-Length'])
print('开始下载音频,[音频大小]:{size:.2f} MB'.format(size=content_size / chunk_size / 1024))
with open(ad_file_name, 'wb') as f:
for data in ad_response.iter_content(chunk_size):
f.write(data)
size += len(data)
print(
'\r' + '[下载进度]:%s%.2f%%' % ('>' * int(size * 50 / content_size), float(size / content_size * 100)),
end='')
print('\n')
print('准备合并视频...')
# time.sleep(0.5)
ff = ffmpy.FFmpeg(inputs={vd_file_name: None,
ad_file_name: None},
outputs={
vd_zh_name: '-vcodec copy -acodec copy -loglevel quiet'
})
ff.run()
file_list = os.listdir(d_path)
# print(file_list)
for f in file_list:
if f == 'test.mp4':
os.remove(vd_file_name)
if f == 'test.aac':
os.remove(ad_file_name)
print('视频合并成功~')
print('*' * 20)
time.sleep(2)
def main():
bv = input('请输入bv/av号:')
proxy_list = get_proxy.get_proxy()
start = time.time()
get_names(bv, proxy_list)
end = time.time()
print('全部下载完成了哦')
print('下载共耗时%d秒' % (end-start))
if __name__ == '__main__':
main()
| 36.791667 | 119 | 0.543828 |
701bcebde5ebb6c8973c615a20b88055b57ca284 | 6,160 | py | Python | customtkinter/customtkinter_progressbar.py | o0morgan0o/CustomTkinter | f7d4f902a91d87c7f078963d6e8b757d84abd743 | [
"CC0-1.0"
] | null | null | null | customtkinter/customtkinter_progressbar.py | o0morgan0o/CustomTkinter | f7d4f902a91d87c7f078963d6e8b757d84abd743 | [
"CC0-1.0"
] | null | null | null | customtkinter/customtkinter_progressbar.py | o0morgan0o/CustomTkinter | f7d4f902a91d87c7f078963d6e8b757d84abd743 | [
"CC0-1.0"
] | null | null | null | import tkinter
from .customtkinter_frame import CTkFrame
from .appearance_mode_tracker import AppearanceModeTracker
from .customtkinter_color_manager import CTkColorManager
class CTkProgressBar(tkinter.Frame):
""" tkinter custom progressbar, always horizontal, values are from 0 to 1 """
def __init__(self,
bg_color=None,
border_color=CTkColorManager.PROGRESS_BG,
fg_color=CTkColorManager.PROGRESS_BG,
progress_color=CTkColorManager.MAIN,
width=160,
height=10,
border_width=0,
*args, **kwargs):
super().__init__(*args, **kwargs)
AppearanceModeTracker.add(self.change_appearance_mode)
if bg_color is None:
if isinstance(self.master, CTkFrame):
self.bg_color = self.master.fg_color
else:
self.bg_color = self.master.cget("bg")
else:
self.bg_color = bg_color
self.border_color = border_color
self.fg_color = fg_color
self.progress_color = CTkColorManager.MAIN if progress_color is None else progress_color
self.appearance_mode = AppearanceModeTracker.get_mode() # 0: "Light" 1: "Dark"
self.width = width
self.height = height
self.border_width = border_width
self.value = 0.5
self.configure(width=self.width, height=self.height)
self.canvas = tkinter.Canvas(master=self,
highlightthicknes=0,
width=self.width,
height=self.height)
self.canvas.place(x=0, y=0)
self.border_parts = []
self.fg_parts = []
self.progress_parts = []
self.draw()
def draw(self):
self.canvas.delete("all")
self.border_parts = []
self.fg_parts = []
self.progress_parts = []
# frame_border
self.border_parts.append(self.canvas.create_oval(0, 0,
self.height, self.height))
self.border_parts.append(self.canvas.create_rectangle(self.height/2, 0,
self.width-(self.height/2), self.height))
self.border_parts.append(self.canvas.create_oval(self.width-self.height, 0,
self.width, self.height))
# foreground
self.fg_parts.append(self.canvas.create_oval(self.border_width, self.border_width,
self.height-self.border_width, self.height-self.border_width))
self.fg_parts.append(self.canvas.create_rectangle(self.height/2, self.border_width,
self.width-(self.height/2), self.height-self.border_width))
self.fg_parts.append(self.canvas.create_oval(self.width-self.height+self.border_width, self.border_width,
self.width-self.border_width, self.height-self.border_width))
if type(self.bg_color) == tuple:
self.canvas.configure(bg=self.bg_color[self.appearance_mode])
else:
self.canvas.configure(bg=self.bg_color)
for part in self.border_parts:
if type(self.border_color) == tuple:
self.canvas.itemconfig(part, fill=self.border_color[self.appearance_mode], width=0)
else:
self.canvas.itemconfig(part, fill=self.border_color, width=0)
for part in self.fg_parts:
if type(self.fg_color) == tuple:
self.canvas.itemconfig(part, fill=self.fg_color[self.appearance_mode], width=0)
else:
self.canvas.itemconfig(part, fill=self.fg_color, width=0)
self.set(self.value)
def set(self, value):
self.value = value
if self.value > 1:
self.value = 1
elif self.value < 0:
self.value = 0
for part in self.progress_parts:
self.canvas.delete(part)
# progress
self.progress_parts.append(self.canvas.create_oval(self.border_width,
self.border_width,
self.height - self.border_width,
self.height - self.border_width))
self.progress_parts.append(self.canvas.create_rectangle(self.height / 2,
self.border_width,
self.height / 2 + (self.width - self.height) * self.value,
self.height - self.border_width))
self.progress_parts.append(self.canvas.create_oval(self.height / 2 + (self.width - self.height) * self.value - (self.height) / 2 + self.border_width,
self.border_width,
self.height / 2 + (self.width - self.height) * self.value + (self.height) / 2 - self.border_width,
self.height - self.border_width))
for part in self.progress_parts:
if type(self.progress_color) == tuple:
self.canvas.itemconfig(part, fill=self.progress_color[self.appearance_mode], width=0)
else:
self.canvas.itemconfig(part, fill=self.progress_color, width=0)
self.canvas.update()
self.canvas.update_idletasks()
def change_appearance_mode(self, mode_string):
if mode_string.lower() == "dark":
self.appearance_mode = 1
elif mode_string.lower() == "light":
self.appearance_mode = 0
if isinstance(self.master, CTkFrame):
self.bg_color = self.master.fg_color
else:
self.bg_color = self.master.cget("bg")
self.draw()
| 42.191781 | 157 | 0.538961 |
61f000cf5fac9fd9bc4e516c916e3ca335056d9c | 2,485 | py | Python | backend/web_exceptions.py | research-software-company/t2wml | 82faa2c42ede29fb4e1d50340d7c885741950b4f | [
"MIT"
] | null | null | null | backend/web_exceptions.py | research-software-company/t2wml | 82faa2c42ede29fb4e1d50340d7c885741950b4f | [
"MIT"
] | null | null | null | backend/web_exceptions.py | research-software-company/t2wml | 82faa2c42ede29fb4e1d50340d7c885741950b4f | [
"MIT"
] | null | null | null | def make_frontend_err_dict(error):
'''
convenience function to convert all errors to frontend readable ones
'''
return {
"errorCode": 500,
"errorTitle": "Undefined Backend Error",
"errorDescription": str(error)
}
class WebException(Exception):
errorTitle = "Undefined web exception"
code = 400
def __init__(self, message=""):
super().__init__(message)
self.detail_message = message
@property
def error_dict(self):
return {
"errorCode": self.code,
"errorTitle": self.errorTitle,
"errorDescription": self.detail_message
}
class ProjectNotFoundException(WebException):
code = 404
errorTitle = "Project not found"
class ProjectAlreadyExistsException(WebException):
errorTitle = "Cannot create new project in folder with existing project"
class NoFilePartException(WebException):
errorTitle = "Missing file parameter in the upload request"
class BlankFileNameException(WebException):
errorTitle = "Resource not selected for uploading"
class AuthenticationFailureException(WebException):
errorTitle = "Authentication failed"
class InvalidRequestException(WebException):
errorTitle = "Resource requested without appropriate arguments"
class YAMLEvaluatedWithoutDataFileException(WebException):
errorTitle = "Cannot evaluate YAML file without the data file"
class MissingYAMLFileException(WebException):
errorTitle = "YAML file not found"
class CellResolutionWithoutYAMLFileException(WebException):
errorTitle = "Cannot resolve cell without the YAML file"
class WikifyWithoutDataFileException(WebException):
errorTitle = "Wikification cannot be done without the data file"
class FileTypeNotSupportedException(WebException):
errorTitle = "This file type is currently not supported"
class InvalidYAMLFileException(WebException):
errorTitle = "YAML file is either empty or not valid"
class NoSuchDatasetIDException(WebException):
code = 404
errorTitle = "Datamart Integration Error"
def __init__(self, dataset_id):
errorDescription = """Dataset: \"{dataset_id}\" does not exist. To create this dataset,
please provide dataset name in cell C1 \n
dataset description in cell D1 \n
and url in cell E1\n\n""".format(dataset_id=dataset_id)
super().__init__(errorDescription) | 29.235294 | 97 | 0.707042 |
1a2064b04cb8657e18b7220b890694f4a69578a0 | 7,373 | py | Python | common/base.py | nuannuanhcc/3DMPPE_POSENET_RELEASE | e5bb4899137b5a0c8fb26a2ce7beea480bc37974 | [
"MIT"
] | null | null | null | common/base.py | nuannuanhcc/3DMPPE_POSENET_RELEASE | e5bb4899137b5a0c8fb26a2ce7beea480bc37974 | [
"MIT"
] | null | null | null | common/base.py | nuannuanhcc/3DMPPE_POSENET_RELEASE | e5bb4899137b5a0c8fb26a2ce7beea480bc37974 | [
"MIT"
] | null | null | null | import os
import os.path as osp
import math
import time
import glob
import abc
from torch.utils.data import DataLoader
import torch.optim
import torchvision.transforms as transforms
from config import cfg
from dataset import DatasetLoader
from timer import Timer
from logger import colorlogger
from torch.nn.parallel.data_parallel import DataParallel
from model import get_pose_net
# dynamic dataset import
for i in range(len(cfg.trainset)):
exec('from ' + cfg.trainset[i] + ' import ' + cfg.trainset[i])
exec('from ' + cfg.testset + ' import ' + cfg.testset)
class Base(object):
__metaclass__ = abc.ABCMeta
def __init__(self, log_name='logs.txt'):
self.cur_epoch = 0
# timer
self.tot_timer = Timer()
self.gpu_timer = Timer()
self.read_timer = Timer()
# logger
self.logger = colorlogger(cfg.log_dir, log_name=log_name)
@abc.abstractmethod
def _make_batch_generator(self):
return
@abc.abstractmethod
def _make_model(self):
return
def save_model(self, state, epoch):
file_path = osp.join(cfg.model_dir,'snapshot_{}.pth.tar'.format(str(epoch)))
torch.save(state, file_path)
self.logger.info("Write snapshot into {}".format(file_path))
def load_model(self, model, optimizer):
model_file_list = glob.glob(osp.join(cfg.model_dir,'*.pth.tar'))
cur_epoch = max([int(file_name[file_name.find('snapshot_') + 9 : file_name.find('.pth.tar')]) for file_name in model_file_list])
ckpt = torch.load(osp.join(cfg.model_dir, 'snapshot_' + str(cur_epoch) + '.pth.tar'))
fix_pose = False
if fix_pose:
print('loading checkpoint from...', model_file_list)
model.load_state_dict(ckpt['network'], strict=False)
for param in model.module.backbone.parameters():
param.requires_grad = False
for param in model.module.head.parameters():
param.requires_grad = False
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.module.parameters()), lr=cfg.lr)
return 1, model, optimizer
pre_train = False
if pre_train:
print('loading checkpoint from...', model_file_list)
model.load_state_dict(ckpt['network'], strict=False)
return 1, model, optimizer
start_epoch = ckpt['epoch'] + 1
model.load_state_dict(ckpt['network'])
optimizer.load_state_dict(ckpt['optimizer'])
return start_epoch, model, optimizer
class Trainer(Base):
def __init__(self):
super(Trainer, self).__init__(log_name = 'train_logs.txt')
def get_optimizer(self, model):
optimizer = torch.optim.Adam(model.parameters(), lr=cfg.lr)
return optimizer
def set_lr(self, epoch):
for e in cfg.lr_dec_epoch:
if epoch < e:
break
if epoch < cfg.lr_dec_epoch[-1]:
idx = cfg.lr_dec_epoch.index(e)
for g in self.optimizer.param_groups:
g['lr'] = cfg.lr / (cfg.lr_dec_factor ** idx)
else:
for g in self.optimizer.param_groups:
g['lr'] = cfg.lr / (cfg.lr_dec_factor ** len(cfg.lr_dec_epoch))
def get_lr(self):
for g in self.optimizer.param_groups:
cur_lr = g['lr']
return cur_lr
def _make_batch_generator(self):
# data load and construct batch generator
self.logger.info("Creating dataset...")
trainset_loader = []
batch_generator = []
iterator = []
for i in range(len(cfg.trainset)):
if i > 0:
ref_joints_name = trainset_loader[0].joints_name # 多个数据集时只使用第一个的joint吗??
else:
ref_joints_name = None
trainset_loader.append(DatasetLoader(eval(cfg.trainset[i])("train"), ref_joints_name, True, transforms.Compose([\
transforms.ToTensor(),
transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)]\
)))
batch_generator.append(DataLoader(dataset=trainset_loader[-1], batch_size=cfg.num_gpus*cfg.batch_size//len(cfg.trainset), shuffle=True, num_workers=cfg.num_thread, pin_memory=True))
iterator.append(iter(batch_generator[-1]))
self.joint_num = trainset_loader[0].joint_num
self.itr_per_epoch = math.ceil(trainset_loader[0].__len__() / cfg.num_gpus / (cfg.batch_size // len(cfg.trainset)))
self.batch_generator = batch_generator
self.iterator = iterator
def _make_model(self):
# prepare network
self.logger.info("Creating graph and optimizer...")
model = get_pose_net(cfg, True, self.joint_num)
model = DataParallel(model).cuda()
optimizer = self.get_optimizer(model)
if cfg.continue_train:
start_epoch, model, optimizer = self.load_model(model, optimizer)
else:
start_epoch = 0
model.train()
self.start_epoch = start_epoch
self.model = model
self.optimizer = optimizer
class Tester(Base):
def __init__(self, test_epoch):
self.test_epoch = int(test_epoch)
super(Tester, self).__init__(log_name = 'test_logs.txt')
def _make_batch_generator(self):
# data load and construct batch generator
self.logger.info("Creating dataset...")
testset = eval(cfg.testset)("test")
testset_loader = DatasetLoader(testset, None, False, transforms.Compose([\
transforms.ToTensor(),
transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)]\
))
batch_generator = DataLoader(dataset=testset_loader, batch_size=cfg.num_gpus*cfg.test_batch_size, shuffle=False, num_workers=cfg.num_thread, pin_memory=True)
self.testset = testset
self.joint_num = testset_loader.joint_num
self.skeleton = testset_loader.skeleton
self.flip_pairs = testset.flip_pairs
self.batch_generator = batch_generator
def _make_model(self):
model_path = os.path.join(cfg.model_dir, 'snapshot_%d.pth.tar' % self.test_epoch)
assert os.path.exists(model_path), 'Cannot find model at ' + model_path
self.logger.info('Load checkpoint from {}'.format(model_path))
# prepare network
self.logger.info("Creating graph...")
model = get_pose_net(cfg, False, self.joint_num)
model = DataParallel(model).cuda()
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['network'])
model.eval()
self.model = model
def _evaluate(self, preds, result_save_path, global_steps=None):
self.testset.evaluate(preds, result_save_path, global_steps)
| 40.289617 | 193 | 0.590397 |
c827953dbb443e2ee3dfdec1340c8ccacdff3b73 | 4,052 | py | Python | agagd/agagd_core/views/beta.py | vash3g/agagd | 67d143909c2f2ab4aeb9e9700967610704f5c0c0 | [
"MIT"
] | null | null | null | agagd/agagd_core/views/beta.py | vash3g/agagd | 67d143909c2f2ab4aeb9e9700967610704f5c0c0 | [
"MIT"
] | null | null | null | agagd/agagd_core/views/beta.py | vash3g/agagd | 67d143909c2f2ab4aeb9e9700967610704f5c0c0 | [
"MIT"
] | null | null | null | # Datetime Imports
from datetime import date, datetime, timedelta
# AGAGD Models Import
import agagd_core.models as agagd_models
# AGAGD Tables Import
import agagd_core.tables.beta as agagd_tables
# Django Imports
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db.models import F, Q
from django.shortcuts import get_object_or_404, redirect, render
# Django Table Imports
from django_tables2 import RequestConfig
def agagd_paginator_helper(
request, query_list_object, max_rows_per_page=50, page_request_get_value="pg"
):
paginator = Paginator(query_list_object, max_rows_per_page)
page_number = request.GET.get(page_request_get_value, 1)
try:
query_list_object_with_page_information = paginator.page(page_number)
except PageNotAnInteger:
query_list_object_with_page_information = paginator.page(1)
except EmptyPage:
query_list_object_with_page_information = paginator.page(paginator.num_pages)
return query_list_object_with_page_information
def index(request):
game_list = agagd_models.Game.objects.filter(
game_date__gte=datetime.now() - timedelta(days=180)
).order_by("-game_date")
table = agagd_tables.GameTable(game_list, prefix="games")
topDanList = agagd_models.TopDan.objects.values()
topDanTable = agagd_tables.TopDanTable(topDanList)
topKyuList = agagd_models.TopKyu.objects.values()
topKyuTable = agagd_tables.TopKyuTable(topKyuList)
mostRatedGamesPastYearList = agagd_models.MostRatedGamesPastYear.objects.values()
mostRatedGamesTable = agagd_tables.MostRatedGamesPastYearTable(
mostRatedGamesPastYearList
)
mostTournamentsPastYearList = agagd_models.MostTournamentsPastYear.objects.values()
mostTournamentsPastYearTable = agagd_tables.MostTournamentsPastYearTable(
mostTournamentsPastYearList
)
RequestConfig(request).configure(table)
tourneys = agagd_models.Tournament.objects.all().order_by("-tournament_date")
t_table = agagd_tables.TournamentTable(tourneys, prefix="tourneys")
RequestConfig(request, paginate={"per_page": 10}).configure(t_table)
return render(
request,
"agagd_core/index.beta.html",
{
"table": table,
"top_dan_table": topDanTable,
"top_kyu_table": topKyuTable,
"most_rated_games_table": mostRatedGamesTable,
"most_tournaments_table": mostTournamentsPastYearTable,
"tournaments": t_table,
},
)
def list_all_players(request):
list_all_players_query = (
agagd_models.Member.objects.select_related("chapter_id")
.filter(status="accepted")
.filter(players__rating__isnull=False)
.exclude(type__iexact="e-journal")
.exclude(type__iexact="chapter")
.exclude(type__iexact="library")
.exclude(type__iexact="institution")
.values(
"chapter_id",
"member_id",
"chapter_id__name",
"full_name",
"type",
"players__rating",
"state",
"players__sigma",
)
.order_by("-players__rating")
)
mobile_column_attrs = "d-none d-lg-table-cell d-xl-table-cell"
list_all_players_columns = (
{"name": "Name", "attrs": None},
{"name": "Chapter", "attrs": None},
{"name": "State", "attrs": mobile_column_attrs},
{"name": "Type", "attrs": mobile_column_attrs},
{"name": "Rating", "attrs": None},
{"name": "Sigma", "attrs": mobile_column_attrs},
)
list_all_players_with_pagination = agagd_paginator_helper(
request, list_all_players_query
)
return render(
request,
"agagd_core/players_list.html",
{
"mobile_column_attrs": mobile_column_attrs,
"list_all_players_columns": list_all_players_columns,
"list_all_players_data": list_all_players_with_pagination,
"page_title": "Members Ratings",
},
)
| 34.05042 | 87 | 0.690276 |
96f90a6df33398a2fd9f6e2219b52c3ab236c000 | 17,148 | py | Python | numba/core/extending.py | crusaderky/numba | ce02012f036e9e5717448176f8dd610f096620bd | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | numba/core/extending.py | crusaderky/numba | ce02012f036e9e5717448176f8dd610f096620bd | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | numba/core/extending.py | crusaderky/numba | ce02012f036e9e5717448176f8dd610f096620bd | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | import os
import uuid
import weakref
import collections
import numba
from numba.core import types, errors, utils, config
# Exported symbols
from numba.core.typing.typeof import typeof_impl # noqa: F401
from numba.core.typing.templates import infer, infer_getattr # noqa: F401
from numba.core.imputils import ( # noqa: F401
lower_builtin, lower_getattr, lower_getattr_generic, # noqa: F401
lower_setattr, lower_setattr_generic, lower_cast) # noqa: F401
from numba.core.datamodel import models # noqa: F401
from numba.core.datamodel import register_default as register_model # noqa: F401, E501
from numba.core.pythonapi import box, unbox, reflect, NativeValue # noqa: F401
from numba._helperlib import _import_cython_function # noqa: F401
from numba.core.serialize import ReduceMixin
def type_callable(func):
"""
Decorate a function as implementing typing for the callable *func*.
*func* can be a callable object (probably a global) or a string
denoting a built-in operation (such 'getitem' or '__array_wrap__')
"""
from numba.core.typing.templates import (CallableTemplate, infer,
infer_global)
if not callable(func) and not isinstance(func, str):
raise TypeError("`func` should be a function or string")
try:
func_name = func.__name__
except AttributeError:
func_name = str(func)
def decorate(typing_func):
def generic(self):
return typing_func(self.context)
name = "%s_CallableTemplate" % (func_name,)
bases = (CallableTemplate,)
class_dict = dict(key=func, generic=generic)
template = type(name, bases, class_dict)
infer(template)
if callable(func):
infer_global(func, types.Function(template))
return typing_func
return decorate
# By default, an *overload* does not have a cpython wrapper because it is not
# callable from python.
_overload_default_jit_options = {'no_cpython_wrapper': True}
def overload(func, jit_options={}, strict=True, inline='never'):
"""
A decorator marking the decorated function as typing and implementing
*func* in nopython mode.
The decorated function will have the same formal parameters as *func*
and be passed the Numba types of those parameters. It should return
a function implementing *func* for the given types.
Here is an example implementing len() for tuple types::
@overload(len)
def tuple_len(seq):
if isinstance(seq, types.BaseTuple):
n = len(seq)
def len_impl(seq):
return n
return len_impl
Compiler options can be passed as an dictionary using the **jit_options**
argument.
Overloading strictness (that the typing and implementing signatures match)
is enforced by the **strict** keyword argument, it is recommended that this
is set to True (default).
To handle a function that accepts imprecise types, an overload
definition can return 2-tuple of ``(signature, impl_function)``, where
the ``signature`` is a ``typing.Signature`` specifying the precise
signature to be used; and ``impl_function`` is the same implementation
function as in the simple case.
If the kwarg inline determines whether the overload is inlined in the
calling function and can be one of three values:
* 'never' (default) - the overload is never inlined.
* 'always' - the overload is always inlined.
* a function that takes two arguments, both of which are instances of a
namedtuple with fields:
* func_ir
* typemap
* calltypes
* signature
The first argument holds the information from the caller, the second
holds the information from the callee. The function should return Truthy
to determine whether to inline, this essentially permitting custom
inlining rules (typical use might be cost models).
"""
from numba.core.typing.templates import make_overload_template, infer_global
# set default options
opts = _overload_default_jit_options.copy()
opts.update(jit_options) # let user options override
def decorate(overload_func):
template = make_overload_template(func, overload_func, opts, strict,
inline)
infer(template)
if callable(func):
infer_global(func, types.Function(template))
return overload_func
return decorate
def register_jitable(*args, **kwargs):
"""
Register a regular python function that can be executed by the python
interpreter and can be compiled into a nopython function when referenced
by other jit'ed functions. Can be used as::
@register_jitable
def foo(x, y):
return x + y
Or, with compiler options::
@register_jitable(_nrt=False) # disable runtime allocation
def foo(x, y):
return x + y
"""
def wrap(fn):
# It is just a wrapper for @overload
inline = kwargs.pop('inline', 'never')
@overload(fn, jit_options=kwargs, inline=inline, strict=False)
def ov_wrap(*args, **kwargs):
return fn
return fn
if kwargs:
return wrap
else:
return wrap(*args)
def overload_attribute(typ, attr, **kwargs):
"""
A decorator marking the decorated function as typing and implementing
attribute *attr* for the given Numba type in nopython mode.
*kwargs* are passed to the underlying `@overload` call.
Here is an example implementing .nbytes for array types::
@overload_attribute(types.Array, 'nbytes')
def array_nbytes(arr):
def get(arr):
return arr.size * arr.itemsize
return get
"""
# TODO implement setters
from numba.core.typing.templates import make_overload_attribute_template
def decorate(overload_func):
template = make_overload_attribute_template(
typ, attr, overload_func,
inline=kwargs.get('inline', 'never'),
)
infer_getattr(template)
overload(overload_func, **kwargs)(overload_func)
return overload_func
return decorate
def overload_method(typ, attr, **kwargs):
"""
A decorator marking the decorated function as typing and implementing
attribute *attr* for the given Numba type in nopython mode.
*kwargs* are passed to the underlying `@overload` call.
Here is an example implementing .take() for array types::
@overload_method(types.Array, 'take')
def array_take(arr, indices):
if isinstance(indices, types.Array):
def take_impl(arr, indices):
n = indices.shape[0]
res = np.empty(n, arr.dtype)
for i in range(n):
res[i] = arr[indices[i]]
return res
return take_impl
"""
from numba.core.typing.templates import make_overload_method_template
def decorate(overload_func):
template = make_overload_method_template(
typ, attr, overload_func,
inline=kwargs.get('inline', 'never'),
)
infer_getattr(template)
overload(overload_func, **kwargs)(overload_func)
return overload_func
return decorate
def make_attribute_wrapper(typeclass, struct_attr, python_attr):
"""
Make an automatic attribute wrapper exposing member named *struct_attr*
as a read-only attribute named *python_attr*.
The given *typeclass*'s model must be a StructModel subclass.
"""
from numba.core.typing.templates import AttributeTemplate
from numba.core.datamodel import default_manager
from numba.core.datamodel.models import StructModel
from numba.core.imputils import impl_ret_borrowed
from numba.core import cgutils
if not isinstance(typeclass, type) or not issubclass(typeclass, types.Type):
raise TypeError("typeclass should be a Type subclass, got %s"
% (typeclass,))
def get_attr_fe_type(typ):
"""
Get the Numba type of member *struct_attr* in *typ*.
"""
model = default_manager.lookup(typ)
if not isinstance(model, StructModel):
raise TypeError("make_struct_attribute_wrapper() needs a type "
"with a StructModel, but got %s" % (model,))
return model.get_member_fe_type(struct_attr)
@infer_getattr
class StructAttribute(AttributeTemplate):
key = typeclass
def generic_resolve(self, typ, attr):
if attr == python_attr:
return get_attr_fe_type(typ)
@lower_getattr(typeclass, python_attr)
def struct_getattr_impl(context, builder, typ, val):
val = cgutils.create_struct_proxy(typ)(context, builder, value=val)
attrty = get_attr_fe_type(typ)
attrval = getattr(val, struct_attr)
return impl_ret_borrowed(context, builder, attrty, attrval)
class _Intrinsic(ReduceMixin):
"""
Dummy callable for intrinsic
"""
_memo = weakref.WeakValueDictionary()
# hold refs to last N functions deserialized, retaining them in _memo
# regardless of whether there is another reference
_recent = collections.deque(maxlen=config.FUNCTION_CACHE_SIZE)
__uuid = None
def __init__(self, name, defn):
self._name = name
self._defn = defn
@property
def _uuid(self):
"""
An instance-specific UUID, to avoid multiple deserializations of
a given instance.
Note this is lazily-generated, for performance reasons.
"""
u = self.__uuid
if u is None:
u = str(uuid.uuid1())
self._set_uuid(u)
return u
def _set_uuid(self, u):
assert self.__uuid is None
self.__uuid = u
self._memo[u] = self
self._recent.append(self)
def _register(self):
from numba.core.typing.templates import (make_intrinsic_template,
infer_global)
template = make_intrinsic_template(self, self._defn, self._name)
infer(template)
infer_global(self, types.Function(template))
def __call__(self, *args, **kwargs):
"""
This is only defined to pretend to be a callable from CPython.
"""
msg = '{0} is not usable in pure-python'.format(self)
raise NotImplementedError(msg)
def __repr__(self):
return "<intrinsic {0}>".format(self._name)
def __deepcopy__(self, memo):
# NOTE: Intrinsic are immutable and we don't need to copy.
# This is triggered from deepcopy of statements.
return self
def _reduce_states(self):
"""
NOTE: part of ReduceMixin protocol
"""
return dict(uuid=self._uuid, name=self._name, defn=self._defn)
@classmethod
def _rebuild(cls, uuid, name, defn):
"""
NOTE: part of ReduceMixin protocol
"""
try:
return cls._memo[uuid]
except KeyError:
llc = cls(name=name, defn=defn)
llc._register()
llc._set_uuid(uuid)
return llc
def intrinsic(*args, **kwargs):
"""
A decorator marking the decorated function as typing and implementing
*func* in nopython mode using the llvmlite IRBuilder API. This is an escape
hatch for expert users to build custom LLVM IR that will be inlined to
the caller.
The first argument to *func* is the typing context. The rest of the
arguments corresponds to the type of arguments of the decorated function.
These arguments are also used as the formal argument of the decorated
function. If *func* has the signature ``foo(typing_context, arg0, arg1)``,
the decorated function will have the signature ``foo(arg0, arg1)``.
The return values of *func* should be a 2-tuple of expected type signature,
and a code-generation function that will passed to ``lower_builtin``.
For unsupported operation, return None.
Here is an example implementing a ``cast_int_to_byte_ptr`` that cast
any integer to a byte pointer::
@intrinsic
def cast_int_to_byte_ptr(typingctx, src):
# check for accepted types
if isinstance(src, types.Integer):
# create the expected type signature
result_type = types.CPointer(types.uint8)
sig = result_type(types.uintp)
# defines the custom code generation
def codegen(context, builder, signature, args):
# llvm IRBuilder code here
[src] = args
rtype = signature.return_type
llrtype = context.get_value_type(rtype)
return builder.inttoptr(src, llrtype)
return sig, codegen
"""
# Make inner function for the actual work
def _intrinsic(func):
name = getattr(func, '__name__', str(func))
llc = _Intrinsic(name, func, **kwargs)
llc._register()
return llc
if not kwargs:
# No option is given
return _intrinsic(*args)
else:
# options are given, create a new callable to recv the
# definition function
def wrapper(func):
return _intrinsic(func)
return wrapper
def get_cython_function_address(module_name, function_name):
"""
Get the address of a Cython function.
Args
----
module_name:
Name of the Cython module
function_name:
Name of the Cython function
Returns
-------
A Python int containing the address of the function
"""
return _import_cython_function(module_name, function_name)
def include_path():
"""Returns the C include directory path.
"""
include_dir = os.path.dirname(os.path.dirname(numba.__file__))
path = os.path.abspath(include_dir)
return path
def sentry_literal_args(pysig, literal_args, args, kwargs):
"""Ensures that the given argument types (in *args* and *kwargs*) are
literally typed for a function with the python signature *pysig* and the
list of literal argument names in *literal_args*.
Alternatively, this is the same as::
SentryLiteralArgs(literal_args).for_pysig(pysig).bind(*args, **kwargs)
"""
boundargs = pysig.bind(*args, **kwargs)
# Find literal argument positions and whether it is satisfied.
request_pos = set()
missing = False
for i, (k, v) in enumerate(boundargs.arguments.items()):
if k in literal_args:
request_pos.add(i)
if not isinstance(v, types.Literal):
missing = True
if missing:
# Yes, there are missing required literal arguments
e = errors.ForceLiteralArg(request_pos)
# A helper function to fold arguments
def folded(args, kwargs):
out = pysig.bind(*args, **kwargs).arguments.values()
return tuple(out)
raise e.bind_fold_arguments(folded)
class SentryLiteralArgs(collections.namedtuple(
'_SentryLiteralArgs', ['literal_args'])):
"""
Parameters
----------
literal_args : Sequence[str]
A sequence of names for literal arguments
Examples
--------
The following line:
>>> SentryLiteralArgs(literal_args).for_pysig(pysig).bind(*args, **kwargs)
is equivalent to:
>>> sentry_literal_args(pysig, literal_args, args, kwargs)
"""
def for_function(self, func):
"""Bind the sentry to the signature of *func*.
Parameters
----------
func : Function
A python function.
Returns
-------
obj : BoundLiteralArgs
"""
return self.for_pysig(utils.pysignature(func))
def for_pysig(self, pysig):
"""Bind the sentry to the given signature *pysig*.
Parameters
----------
pysig : inspect.Signature
Returns
-------
obj : BoundLiteralArgs
"""
return BoundLiteralArgs(
pysig=pysig,
literal_args=self.literal_args,
)
class BoundLiteralArgs(collections.namedtuple(
'BoundLiteralArgs', ['pysig', 'literal_args'])):
"""
This class is usually created by SentryLiteralArgs.
"""
def bind(self, *args, **kwargs):
"""Bind to argument types.
"""
return sentry_literal_args(
self.pysig,
self.literal_args,
args,
kwargs,
)
def is_jitted(function):
"""Returns True if a function is wrapped by one of the Numba @jit
decorators, for example: numba.jit, numba.njit
The purpose of this function is to provide a means to check if a function is
already JIT decorated.
"""
# don't want to export this so import locally
from numba.core.dispatcher import Dispatcher
return isinstance(function, Dispatcher)
| 32.477273 | 87 | 0.639317 |
e077efe3e4fbd0b62fb14039d4cc056aa537a3ab | 591 | py | Python | mnist_model/mnist_ml.py | alexshpunt/ml_practise | 41845288f8572bde3a9c55c1e2f65d349c515d84 | [
"MIT"
] | null | null | null | mnist_model/mnist_ml.py | alexshpunt/ml_practise | 41845288f8572bde3a9c55c1e2f65d349c515d84 | [
"MIT"
] | null | null | null | mnist_model/mnist_ml.py | alexshpunt/ml_practise | 41845288f8572bde3a9c55c1e2f65d349c515d84 | [
"MIT"
] | null | null | null | from mnist_model.mnist_data import *
from sklearn.neighbors import KNeighborsClassifier
from shared.data_utility import *
import pandas as pd
import numpy as np
data = pd.read_csv("data/mnist_small.csv")
def prepare_data(data):
return data
dataTrain, dataTest = build_train_test_data(data)
featuresTrain, featuresTest = build_train_test_features(dataTrain, dataTest, prepare_data)
knn = KNeighborsClassifier(n_neighbors=10)
knn.fit(dataTrain.drop('label', axis=1), dataTrain['label'])
#TODO: Try it with a real image
knn.score(dataTest.drop('label', axis=1), dataTest['label'])
| 29.55 | 90 | 0.783418 |
6a7a472f0fdf864d1d4a56c98f9e3163db9e3aea | 962 | py | Python | test/test_fuzzymatch.py | KBoehme/hops-pipeline | cc1f87f0861b933de29b0ee38a1089721565b9bd | [
"MIT"
] | 5 | 2015-07-04T07:23:51.000Z | 2021-12-20T16:37:30.000Z | test/test_fuzzymatch.py | KBoehme/hops-pipeline | cc1f87f0861b933de29b0ee38a1089721565b9bd | [
"MIT"
] | 3 | 2018-01-10T16:09:47.000Z | 2022-01-30T21:29:23.000Z | test/test_fuzzymatch.py | KBoehme/hops-pipeline | cc1f87f0861b933de29b0ee38a1089721565b9bd | [
"MIT"
] | 1 | 2020-06-23T23:03:07.000Z | 2020-06-23T23:03:07.000Z | minbaseoffset = 4
maxbaseoffset = 6
def fuzzy_match_beginning(pattern, genome, mismatches):
for i in range(minbaseoffset, maxbaseoffset+1):
chunk = genome[i: i + len(pattern)]
# now compare chunk with pattern to see if they match with at least mismatches.
if (chunk == pattern):
return i + len(pattern)
return -1
def test_fuzzy_match_beginning():
with open('../example_data/base_offset/extended_offset.fastq') as f:
while True:
name = f.readline().strip()[1:] # Strip the @ sign infront of fastq names.
seq = f.readline().strip().upper()
plus = f.readline().strip()
score = f.readline().strip()
if not name or not seq or not plus or not score:
break # We are done, lets break out of the loop.
match = fuzzy_match_beginning('TCGAGATGTGTATAAGAGACAG', seq, 0)
assert match != -1, "Didn't find a match!"
| 38.48 | 87 | 0.611227 |
7a3e3e15043fe138210398bd8c3d5de1d5171de5 | 1,635 | py | Python | tasks/copy_memory/main.py | abhitoronto/keras-tcn | 3b6880f54bc50fbf2c0dfd2b7cd4b9bc13ac58fa | [
"MIT"
] | null | null | null | tasks/copy_memory/main.py | abhitoronto/keras-tcn | 3b6880f54bc50fbf2c0dfd2b7cd4b9bc13ac58fa | [
"MIT"
] | null | null | null | tasks/copy_memory/main.py | abhitoronto/keras-tcn | 3b6880f54bc50fbf2c0dfd2b7cd4b9bc13ac58fa | [
"MIT"
] | null | null | null | from uuid import uuid4
import tensorflow.keras as keras
import numpy as np
from tcn import compiled_tcn
from utils import data_generator
x_train, y_train = data_generator(601, 10, 30000)
x_test, y_test = data_generator(601, 10, 6000)
class PrintSomeValues(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
print('y_true')
print(np.array(y_test[:5, -10:].squeeze(), dtype=int))
print('y_pred')
print(self.model.predict(x_test[:5])[:, -10:].argmax(axis=-1))
def run_task():
model = compiled_tcn(num_feat=1,
output_layers=[10],
nb_filters=10,
kernel_size=8,
dilations=[2 ** i for i in range(9)],
nb_stacks=1,
max_len=x_train[0:1].shape[1],
use_skip_connections=True,
opt='rmsprop',
lr=5e-4,
return_sequences=True)
# print(f'x_train.shape = {x_train.shape}')
# print(f'y_train.shape = {y_train.shape}')
psv = PrintSomeValues()
# Using sparse softmax.
# http://chappers.github.io/web%20micro%20log/2017/01/26/quick-models-in-keras/
model.summary()
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=100,
callbacks=[psv], batch_size=256)
test_acc = model.evaluate(x=x_test, y=y_test)[1] # accuracy.
with open("copy_memory_{}.txt".format(str(uuid4())[0:5]), 'w') as w:
w.write(str(test_acc) + '\n')
if __name__ == '__main__':
run_task()
| 30.277778 | 83 | 0.570642 |
17bad783e86471bcf83c80fb7d3e5b380138d39d | 806 | py | Python | app/core/admin.py | StephPoleo/recipe-app-api | 63269eb720023f15b16c009b46a161ba7621496f | [
"MIT"
] | null | null | null | app/core/admin.py | StephPoleo/recipe-app-api | 63269eb720023f15b16c009b46a161ba7621496f | [
"MIT"
] | null | null | null | app/core/admin.py | StephPoleo/recipe-app-api | 63269eb720023f15b16c009b46a161ba7621496f | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ["id"]
list_display = ["email", "name"]
fieldsets = (
(None, {"fields": ("email", "password")}),
(_("Personal Info"), {"fields": ("name",)}),
(_("Permissions"), {"fields": ("is_active", "is_staff", "is_superuser")}),
(_("Important dates"), {"fields": ("last_login",)}),
)
add_fieldsets = (
(None, {"classes": ("wide",), "fields": ("email", "password1", "password2")}),
)
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Tag)
admin.site.register(models.Ingredients)
admin.site.register(models.Recipe)
| 31 | 86 | 0.643921 |
fa52cbfcbf0cd83be314cf169c300bc1bdc619ff | 5,062 | py | Python | tletools/pandas.py | kerel-fs/tletools | 6ccf12aea705044fb4c780382739c1cf0ea45e6e | [
"MIT"
] | null | null | null | tletools/pandas.py | kerel-fs/tletools | 6ccf12aea705044fb4c780382739c1cf0ea45e6e | [
"MIT"
] | null | null | null | tletools/pandas.py | kerel-fs/tletools | 6ccf12aea705044fb4c780382739c1cf0ea45e6e | [
"MIT"
] | 1 | 2020-05-12T15:37:26.000Z | 2020-05-12T15:37:26.000Z | """
The module :mod:`tletools.pandas` provides convenience functions to load
two-line element set files into :class:`pandas.DataFrame`'s.'
Given a file ``oneweb.txt`` with the following contents::
ONEWEB-0012
1 44057U 19010A 19290.71624163 .00000233 00000-0 58803-3 0 9997
2 44057 87.9055 22.9851 0002022 94.9226 265.2135 13.15296315 30734
ONEWEB-0010
1 44058U 19010B 19290.71785289 .00000190 00000-0 47250-3 0 9991
2 44058 87.9054 22.9846 0002035 97.1333 263.0028 13.15294565 30783
ONEWEB-0008
1 44059U 19010C 19290.86676214 -.00000034 00000-0 -12353-3 0 9990
2 44059 87.9055 22.9563 0001967 95.9628 264.1726 13.15300216 30897
ONEWEB-0007
1 44060U 19010D 19290.87154896 .00000182 00000-0 45173-3 0 9998
2 44060 87.9067 22.9618 0001714 97.9802 262.1523 13.15299021 30927
ONEWEB-0006
1 44061U 19010E 19290.72095254 .00000179 00000-0 44426-3 0 9991
2 44061 87.9066 22.9905 0001931 95.0539 265.0811 13.15294588 30940
ONEWEB-0011
1 44062U 19010F 19291.17894923 .00000202 00000-0 50450-3 0 9993
2 44062 87.9056 22.8943 0002147 94.8298 265.3077 13.15300820 31002
you can load the TLEs into a :class:`pandas.DataFrame` by using
>>> load_dataframe("oneweb.txt") # doctest: +SKIP
name norad classification int_desig epoch_year epoch_day dn_o2 ddn_o6 bstar set_num inc raan ecc argp M n rev_num epoch
0 ONEWEB-0012 44057 U 19010A 2019 290.716242 2.330000e-06 0.0 0.000588 999 87.9055 22.9851 0.000202 94.9226 265.2135 13.152963 3073 2019-10-17 17:11:23.276832
1 ONEWEB-0010 44058 U 19010B 2019 290.717853 1.900000e-06 0.0 0.000472 999 87.9054 22.9846 0.000204 97.1333 263.0028 13.152946 3078 2019-10-17 17:13:42.489696
2 ONEWEB-0008 44059 U 19010C 2019 290.866762 -3.400000e-07 0.0 -0.000124 999 87.9055 22.9563 0.000197 95.9628 264.1726 13.153002 3089 2019-10-17 20:48:08.248896
3 ONEWEB-0007 44060 U 19010D 2019 290.871549 1.820000e-06 0.0 0.000452 999 87.9067 22.9618 0.000171 97.9802 262.1523 13.152990 3092 2019-10-17 20:55:01.830144
4 ONEWEB-0006 44061 U 19010E 2019 290.720953 1.790000e-06 0.0 0.000444 999 87.9066 22.9905 0.000193 95.0539 265.0811 13.152946 3094 2019-10-17 17:18:10.299456
5 ONEWEB-0011 44062 U 19010F 2019 291.178949 2.020000e-06 0.0 0.000504 999 87.9056 22.8943 0.000215 94.8298 265.3077 13.153008 3100 2019-10-18 04:17:41.213472
You can also load multiple files into a single :class:`pandas.DataFrame` with
>>> from glob import glob
>>> load_dataframe(glob("*.txt")) # doctest: +SKIP
"""
import pandas as pd
from .tle import TLE
from .utils import partition, dt_dt64_Y, dt_td64_us
def load_dataframe(filename, *, computed=False, epoch=True):
"""Load multiple TLEs from one or more files and return a :class:`pandas.DataFrame`.
:param filename: A single filename (:class:`str`) or an iterable producing filenames.
:type filename: str or iterable
:returns: A :class:`pandas.DataFrame` with all the loaded TLEs.
**Examples**
>>> load_dataframe("oneweb.txt") # doctest: +SKIP
>>> load_dataframe(["oneweb.txt", "starlink.txt"]) # doctest: +SKIP
>>> from glob import glob
>>> load_dataframe(glob("*.txt")) # doctest: +SKIP
"""
if isinstance(filename, str):
with open(filename) as fp:
df = pd.DataFrame(TLE.from_lines(*l012).asdict(computed=computed)
for l012 in partition(fp, 3))
if epoch:
add_epoch(df)
return df
else:
df = pd.concat(
[TLE.load_dataframe(fn, computed=computed, epoch=False) for fn in filename],
ignore_index=True, join='inner', copy=False)
df.drop_duplicates(inplace=True)
df.reset_index(drop=True, inplace=True)
add_epoch(df)
return df
def add_epoch(df):
"""Add a column ``'epoch'`` to a dataframe.
`df` must have columns ``'epoch_year'`` and ``'epoch_day'``, from which the
column ``'epoch'`` is computed.
:param pandas.DataFrame df: :class:`pandas.DataFrame` instance to modify.
**Example**
>>> from pandas import DataFrame
>>> df = DataFrame([[2018, 31.2931], [2019, 279.3781]],
... columns=['epoch_year', 'epoch_day'])
>>> add_epoch(df)
>>> df
epoch_year epoch_day epoch
0 2018 31.2931 2018-01-31 07:02:03.840
1 2019 279.3781 2019-10-06 09:04:27.840
"""
years = (df.epoch_year.values - 1970).astype(dt_dt64_Y)
days = ((df.epoch_day.values - 1) * 86400 * 1000000).astype(dt_td64_us)
df['epoch'] = years + days
| 49.145631 | 205 | 0.615765 |
9fe50b141ef0acbdd88fee17a1a9691a197dac21 | 3,575 | py | Python | cogs/crypto.py | bill1on/brush | f3f7d8968ffaa4c9b052a565a7a76f58e7c3273d | [
"MIT"
] | 19 | 2021-05-29T19:02:32.000Z | 2021-05-29T19:02:40.000Z | cogs/crypto.py | bill1on/brush | f3f7d8968ffaa4c9b052a565a7a76f58e7c3273d | [
"MIT"
] | null | null | null | cogs/crypto.py | bill1on/brush | f3f7d8968ffaa4c9b052a565a7a76f58e7c3273d | [
"MIT"
] | null | null | null | from re import X
import discord
from discord.ext import commands, tasks
import aiohttp
from datetime import datetime
import time
from utilsdb import sqlt
ENABLE_CRYPTO = False
MIN_VALUE = 5000000
if ENABLE_CRYPTO:
with open('API_KEY.txt', 'r') as f:
API_KEY = f.read()
@tasks.loop(minutes = 1)
async def whaletrans(channel):
embed = discord.Embed(description = 'Gets data from the [whale alert api](https://whale-alert.io/) and posts it every minute.')
embed.set_author(name = 'Whale alert', icon_url = 'https://pbs.twimg.com/profile_images/1132579647374417921/9ifIGXEQ_400x400.png')
embed.set_thumbnail(url = 'https://pbs.twimg.com/profile_images/1132579647374417921/9ifIGXEQ_400x400.png')
t = int(time.time())
trn = await sqlt.checktime(channel)
print(t-trn)
if trn == None or int(t-trn) >= 3600:
await sqlt.updatetime(channel, t)
return
else:
async with aiohttp.ClientSession() as session:
async with session.get(f'https://api.whale-alert.io/v1/transactions?api_key={API_KEY}&min_value={MIN_VALUE}&start={int(trn)}') as response:
dat = await response.json()
dat['count']
print(dat['count'])
if dat['count'] == 0:
await sqlt.updatetime(channel, t)
return
else:
for i in range(0, dat['count']):
embed.clear_fields()
trans = dat['transactions'][i]
embed.add_field(name = 'Blockchain', value = f'{trans["blockchain"]}', inline = True)
embed.add_field(name = 'Coin:', value = f"{trans['symbol']}", inline = True)
embed.add_field(name = 'Transaction type:', value = f"{trans['transaction_type']}", inline = True)
embed.add_field(name = 'Transaction hash:', value = f"{trans['hash']}", inline = False)
embed.add_field(name = f"From (owner type: {trans['from']['owner_type']}):", value = f"{trans['from']['address']}", inline = False)
embed.add_field(name = f"To (owner type: {trans['to']['owner_type']}):", value = f"{trans['to']['address']}", inline = False)
embed.add_field(name = 'Amount:', value = f"{'{:,}'.format(trans['amount'])} **{trans['symbol']}** / {'{:,}'.format(trans['amount_usd'])} **USD**", inline = False)
embed.set_footer(text = f'{datetime.utcfromtimestamp(int(trn)).strftime("%m-%d | %H:%M")}', icon_url = 'https://cdn.discordapp.com/avatars/695608937503916032/811f272fbeb62b75cc420149edc03018.png')
await channel.send(embed = embed)
await sqlt.updatetime(channel, t)
class Crypto(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def csetup(self, ctx, channel:discord.TextChannel):
if sqlt.checkcrypto(channel):
await ctx.send("You can only have 1 channel sending updates.")
else:
await sqlt.createcchannel(channel)
whaletrans.start(channel)
await ctx.send("Success!")
@commands.command()
async def cstop(self, ctx, channel:discord.TextChannel):
if not await sqlt.checktime(channel):
await ctx.send("This channel is not sending updates.")
else:
await sqlt.removecrypto(channel)
await ctx.send("Sucessfully stop sending updates.")
def setup(bot):
bot.add_cog(Crypto(bot))
| 48.310811 | 220 | 0.596084 |
d289c6fa3858b8ce463b0f4dfe61f17b5ef98e44 | 9,188 | py | Python | handlers/merge_commands.py | DurbeKK/tg_pdf_bot | 75a180cc7c876df84a4cac825939898892a43aa9 | [
"MIT"
] | 3 | 2021-12-03T09:27:54.000Z | 2021-12-09T03:19:42.000Z | handlers/merge_commands.py | DurbeKK/tg-pdf-bot | 75a180cc7c876df84a4cac825939898892a43aa9 | [
"MIT"
] | null | null | null | handlers/merge_commands.py | DurbeKK/tg-pdf-bot | 75a180cc7c876df84a4cac825939898892a43aa9 | [
"MIT"
] | null | null | null | """
The part that deals with merging PDF files into one.
(message handlers)
"""
import logging
from os import listdir
from typing import List
from aiogram import types
from aiogram.dispatcher import FSMContext
from loader import bot, dp, input_path, output_path
from PyPDF2 import PdfFileMerger
from states.all_states import MergingStates
from utils.clean_up import reset
@dp.message_handler(commands="done", state=MergingStates.waiting_for_files_to_merge)
async def get_confirmation(message: types.Message, state: FSMContext):
"""
This handler will be called when user sends `/done` command.
Gets confirmation on the files that need to be merged.
"""
await state.finish()
# sorted is called since the file names have corresponding file counts
# this is done to maintain the order of the files
# (the files will be merged in the order that the user sends the files in)
files = sorted(listdir(f"{input_path}/{message.chat.id}"))
if not files:
await message.reply("You didn't send any PDF files.")
elif len(files) == 1:
await message.reply(
"You sent only one file. What am I supposed to merge it with?"
)
else:
# since file names are in this format: number_name ("01_cool.pdf")
# to provide a list of pdfs for the user, we make the list with a
# list comprehension, not displaing the number part of the file
# ("01_" in case of "01_cool.pdf")
file_list = [
f"{index}. {value[3:]}" for index, value in enumerate(files, start=1)
]
file_list = "\n".join(file_list)
keyboard = types.InlineKeyboardMarkup()
buttons = [
types.InlineKeyboardButton(text="Yes", callback_data="ask_for_name"),
types.InlineKeyboardButton(text="No", callback_data="modify_files"),
]
keyboard.add(*buttons)
await message.reply(
(
"<b><u>Are these the files that you want to merge?</u></b>\n\n"
+ file_list
),
reply_markup=keyboard,
)
@dp.message_handler(
is_media_group=True,
content_types=types.ContentType.DOCUMENT,
state=MergingStates.waiting_for_files_to_merge,
)
async def handle_albums(message: types.Message, album: List[types.Message]):
"""
This handler will be called when user sends a group of files
as an album for merging. Checks if the files are PDF files and asks
if there are any more files that need to be merged.
"""
await message.answer("Downloading files, please wait")
for obj in album:
name = obj.document.file_name
# replacing empty spaces in the file name with underscores
# if there are spaces in the file name, some of the code does not work
# there definitely should be a better way of doing this, but i'm dumb
if " " in name:
name = name.replace(" ", "_")
if not name.lower().endswith(".pdf"):
return await message.answer("That's not a PDF file.")
# initially there should be no files in this directory,
# so to start with "1" for the first file, we add 1
# the whole reason why we have the file count is so that the order
# of files is maintained and can be changed around later.
file_count = len(listdir(f"{input_path}/{message.chat.id}")) + 1
# to have file counts like "01", "02", etc so that the order is still
# maintained if the user sends more than 9 files
if file_count < 10:
file_count = "0" + str(file_count)
await bot.download_file_by_id(
obj.document.file_id,
destination=f"{input_path}/{message.chat.id}/{file_count}_{name}",
)
logging.info("File downloaded.")
await message.answer(
"Great, if you have any more PDF files you want to merge, "
"send them now. Once you are done, send /done"
)
@dp.message_handler(
is_media_group=False,
content_types=types.message.ContentType.DOCUMENT,
state=MergingStates.waiting_for_files_to_merge,
)
async def merge_file_received(message: types.Message):
"""
This handler will be called when user provides a file for merging.
Checks if the file is a PDF and asks if there are any more files
that need to be merged.
"""
name = message.document.file_name
if name.endswith(".pdf"):
# replacing empty spaces in the file name with underscores
# if there are spaces in the file name, some of the code does not work
# there definitely should be a better way of doing this, but i'm dumb
if " " in name:
name = name.replace(" ", "_")
# initially there should be no files in this directory,
# so to start with "1" for the first file, we add 1
# the whole reason why we have the file count is so that the order
# of files is maintained and can be changed around later.
file_count = len(listdir(f"{input_path}/{message.chat.id}")) + 1
# to have file counts like "01", "02", etc so that the order is still
# maintained if the user sends more than 9 files
if file_count < 10:
file_count = "0" + str(file_count)
await message.answer("Downloading the file, please wait")
await bot.download_file_by_id(
message.document.file_id,
destination=f"{input_path}/{message.chat.id}/{file_count}_{name}",
)
logging.info("File downloaded")
await message.reply(
"Great, if you have any more PDF files you want to merge, "
"send them now. Once you are done, send /done"
)
else:
await message.reply(
"That's not a PDF file.",
)
@dp.message_handler(
is_media_group=False,
content_types=types.message.ContentType.DOCUMENT,
state=MergingStates.waiting_for_specific_file,
)
async def specific_file_received(message: types.Message, state: FSMContext):
"""
This handler will be called when user sends a file of type `Document`
that has to be added to a certain position in the list of files (Merging).
Checks if the file is a PDF and adds it to the desired position in the
list of files. This is done by naming the file with the appropriate
file count number.
After the file is added, triggers the get confirmation function to
confirm the new list of files.
"""
name = message.document.file_name
if name.endswith(".pdf"):
logging.info("Adding a file")
# replacing empty spaces in the file name with underscores
# if there are spaces in the file name, some of the code does not work
# there definitely should be a better way of doing this, but i'm dumb
if " " in name:
name = name.replace(" ", "_")
# the desired position of the file will be stored in the state
file_count = await state.get_data()
file_count = file_count["num"]
# to have file counts like "01", "02", etc so that the order is still
# maintained if the user sends more than 9 files
if file_count < 10:
file_count = "0" + str(file_count)
await message.answer("Downloading the file, please wait")
await bot.download_file_by_id(
message.document.file_id,
destination=f"{input_path}/{message.chat.id}/{file_count}_{name}",
)
logging.info("File downloaded")
await state.finish()
# getting confirmation on the new list of files
await get_confirmation(message, state)
else:
await message.reply(
"That's not a PDF file.",
)
@dp.message_handler(state=MergingStates.waiting_for_a_name)
async def merge_files(message: types.Message, state: FSMContext):
"""
This handler will be called when user provides a name for the merged PDF.
Merges all the input files into one output PDF and sends it to the user.
"""
await message.answer("Working on it")
# sorted is called since the file names have corresponding file counts
# this is done to maintain the order of the files
# (the files will be merged in the order that the user sends the files in)
files = sorted(listdir(f"{input_path}/{message.chat.id}"))
logging.info("Merging started")
merger = PdfFileMerger(strict=False)
for file in files:
merger.append(f"{input_path}/{message.chat.id}/{file}")
# replace the white space with underscores if there are spaces
# otherwise some stuff doesn't work, im too dumb to figure out why for now
merged_pdf_name = message.text.replace(" ", "_")
if not message.text.lower().endswith(".pdf"):
merged_pdf_name = merged_pdf_name + ".pdf"
output = f"{output_path}/{message.chat.id}/{merged_pdf_name}"
merger.write(output)
merger.close()
with open(output, "rb") as result:
await message.answer_chat_action(action="upload_document")
await message.reply_document(result, caption="Here you go")
logging.info("Sent the document")
await reset(message, state)
| 36.899598 | 84 | 0.65357 |
0dcfe7f39f438467f28a23d806b6b4c139a672e4 | 7,034 | py | Python | core/model/finetuning/skd_model.py | cjy97/LibFewShot | cffd0f6d9cb9a13cb4eaf0fb69c13f317508591f | [
"MIT"
] | 471 | 2021-09-13T11:28:34.000Z | 2022-03-30T07:26:54.000Z | core/model/finetuning/skd_model.py | cjy97/LibFewShot | cffd0f6d9cb9a13cb4eaf0fb69c13f317508591f | [
"MIT"
] | 24 | 2021-09-22T02:34:05.000Z | 2022-02-19T07:26:39.000Z | core/model/finetuning/skd_model.py | cjy97/LibFewShot | cffd0f6d9cb9a13cb4eaf0fb69c13f317508591f | [
"MIT"
] | 82 | 2021-09-16T12:48:01.000Z | 2022-03-28T06:57:47.000Z | # -*- coding: utf-8 -*-
"""
@article{DBLP:journals/corr/abs-2006-09785,
author = {Jathushan Rajasegaran and
Salman Khan and
Munawar Hayat and
Fahad Shahbaz Khan and
Mubarak Shah},
title = {Self-supervised Knowledge Distillation for Few-shot Learning},
journal = {CoRR},
volume = {abs/2006.09785},
year = {2020},
url = {https://arxiv.org/abs/2006.09785},
archivePrefix = {arXiv},
eprint = {2006.09785}
}
https://arxiv.org/abs/2006.09785
Adapted from https://github.com/brjathu/SKD.
"""
import copy
import numpy as np
import torch
try:
from sklearnex import patch_sklearn
patch_sklearn(verbose=False) # BUG: sklearnex logs INFO level log, still not fixed this bug
finally:
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from torch import nn
from torch.nn import functional as F
from core.utils import accuracy
from .finetuning_model import FinetuningModel
from .. import DistillKLLoss
from core.model.loss import L2DistLoss
# FIXME: Add multi-GPU support
class DistillLayer(nn.Module):
def __init__(
self,
emb_func,
cls_classifier,
is_distill,
emb_func_path=None,
cls_classifier_path=None,
):
super(DistillLayer, self).__init__()
self.emb_func = self._load_state_dict(emb_func, emb_func_path, is_distill)
self.cls_classifier = self._load_state_dict(cls_classifier, cls_classifier_path, is_distill)
def _load_state_dict(self, model, state_dict_path, is_distill):
new_model = None
if is_distill and state_dict_path is not None:
model_state_dict = torch.load(state_dict_path, map_location="cpu")
model.load_state_dict(model_state_dict)
new_model = copy.deepcopy(model)
return new_model
@torch.no_grad()
def forward(self, x):
output = None
if self.emb_func is not None and self.cls_classifier is not None:
output = self.emb_func(x)
output = self.cls_classifier(output)
return output
class SKDModel(FinetuningModel):
def __init__(
self,
feat_dim,
num_class,
gamma=1,
alpha=1,
is_distill=False,
kd_T=4,
emb_func_path=None,
cls_classifier_path=None,
**kwargs
):
super(SKDModel, self).__init__(**kwargs)
self.feat_dim = feat_dim
self.num_class = num_class
self.gamma = gamma
self.alpha = alpha
self.is_distill = is_distill
self.cls_classifier = nn.Linear(self.feat_dim, self.num_class)
self.rot_classifier = nn.Linear(self.num_class, 4)
self.ce_loss_func = nn.CrossEntropyLoss()
self.l2_loss_func = L2DistLoss()
self.kl_loss_func = DistillKLLoss(T=kd_T)
self.distill_layer = DistillLayer(
self.emb_func,
self.cls_classifier,
self.is_distill,
emb_func_path,
cls_classifier_path,
)
def set_forward(self, batch):
"""
:param batch:
:return:
"""
image, global_target = batch
image = image.to(self.device)
with torch.no_grad():
feat = self.emb_func(image)
support_feat, query_feat, support_target, query_target = self.split_by_episode(feat, mode=1)
episode_size = support_feat.size(0)
output_list = []
acc_list = []
for idx in range(episode_size):
SF = support_feat[idx]
QF = query_feat[idx]
ST = support_target[idx].reshape(-1)
QT = query_target[idx].reshape(-1)
classifier = self.set_forward_adaptation(SF, ST)
QF = F.normalize(QF, p=2, dim=1).detach().cpu().numpy()
QT = QT.detach().cpu().numpy()
output = classifier.predict(QF)
acc = metrics.accuracy_score(QT, output) * 100
output_list.append(output)
acc_list.append(acc)
output = np.stack(output_list, axis=0)
acc = sum(acc_list) / episode_size
return output, acc
def set_forward_loss(self, batch):
"""
:param batch:
:return:
"""
image, target = batch
image = image.to(self.device)
target = target.to(self.device)
batch_size = image.size(0)
generated_image, generated_target, rot_target = self.rot_image_generation(image, target)
feat = self.emb_func(generated_image)
output = self.cls_classifier(feat)
distill_output = self.distill_layer(image)
if self.is_distill:
gamma_loss = self.kl_loss_func(output[:batch_size], distill_output)
alpha_loss = self.l2_loss_func(output[batch_size:], output[:batch_size]) / 3
else:
rot_output = self.rot_classifier(output)
gamma_loss = self.ce_loss_func(output, generated_target)
alpha_loss = torch.sum(F.binary_cross_entropy_with_logits(rot_output, rot_target))
loss = gamma_loss * self.gamma + alpha_loss * self.alpha
acc = accuracy(output, generated_target)
return output, acc, loss
def set_forward_adaptation(self, support_feat, support_target):
classifier = LogisticRegression(
random_state=0,
solver="lbfgs",
max_iter=1000,
penalty="l2",
multi_class="multinomial",
)
support_feat = F.normalize(support_feat, p=2, dim=1).detach().cpu().numpy()
support_target = support_target.detach().cpu().numpy()
classifier.fit(support_feat, support_target)
return classifier
def rot_image_generation(self, image, target):
batch_size = image.size(0)
images_90 = image.transpose(2, 3).flip(2)
images_180 = image.flip(2).flip(3)
images_270 = image.flip(2).transpose(2, 3)
if self.is_distill:
generated_image = torch.cat((image, images_180), dim=0)
generated_target = target.repeat(2)
rot_target = torch.zeros(batch_size * 4)
rot_target[batch_size:] += 1
rot_target = rot_target.long().to(self.device)
else:
generated_image = torch.cat([image, images_90, images_180, images_270], dim=0)
generated_target = target.repeat(4)
rot_target = torch.zeros(batch_size * 4)
rot_target[batch_size:] += 1
rot_target[batch_size * 2 :] += 1
rot_target[batch_size * 3 :] += 1
rot_target = F.one_hot(rot_target.to(torch.int64), 4).float().to(self.device)
return generated_image, generated_target, rot_target
def train(self, mode=True):
self.emb_func.train(mode)
self.rot_classifier.train(mode)
self.cls_classifier.train(mode)
self.distill_layer.train(False)
def eval(self):
super(SKDModel, self).eval()
| 30.450216 | 100 | 0.619136 |
f92bcc4fab17fb1dcfd9d5df2b7660261f6400dc | 1,200 | py | Python | day20a.py | Jamibaraki/adventOfCode2017 | dd1518d12595dd2c2554af549f80a0be84a6ba8a | [
"MIT"
] | 2 | 2018-02-26T11:41:55.000Z | 2020-04-12T09:55:03.000Z | day20a.py | Jamibaraki/adventOfCode2017 | dd1518d12595dd2c2554af549f80a0be84a6ba8a | [
"MIT"
] | null | null | null | day20a.py | Jamibaraki/adventOfCode2017 | dd1518d12595dd2c2554af549f80a0be84a6ba8a | [
"MIT"
] | null | null | null | #Advent of Code 20a
# A Particle Swarm
import re
print('Particle Swarm')
#file = open('inputTest.txt','r')
file = open('input20.txt','r')
input = file.read().split('\n')
file.close()
#hypothesis - we just need to figure out the lowest acceleration..
#in the long term that will always be closer
#there may be a tie for this..
lowestTotal = 100000000
closest = []
#parse the particles
#for particle in input:
for x in range( 0, len(input) ):
group = re.findall('\<.*?\>',input[x])
print( str( len( group ) ) )
print( group[2] )
nums = group[2].split(',')
total = 0
for num in nums:
num = int( num.strip("<>") )
print( str(num) )
total += abs(num)
if total < lowestTotal:
lowestTotal = total
closest = []
closest.append(x)
elif total == lowestTotal:
closest.append(x)
#for item in group:
# print( group )
print( "Lowest: " + str( lowestTotal ) )
print( closest )
#copy the closest to candidates list
candidates = []
for item in closest:
candidates.append( input[item] )
print( input[item] )
#print( input[119] )
#print( input[299] )
#print( input[516] )
print( str(len(candidates)))
| 22.641509 | 66 | 0.611667 |
d1e1152f46a927d7f7579d5c5ea2023965f2c0fc | 3,182 | py | Python | setup.py | esayyari/q2-feature-engineering | d3419dabd4722818aafd7b13da957222ee4d3cf8 | [
"BSD-3-Clause"
] | null | null | null | setup.py | esayyari/q2-feature-engineering | d3419dabd4722818aafd7b13da957222ee4d3cf8 | [
"BSD-3-Clause"
] | 2 | 2020-10-12T18:48:47.000Z | 2020-10-14T00:19:40.000Z | setup.py | esayyari/q2-feature-engineering | d3419dabd4722818aafd7b13da957222ee4d3cf8 | [
"BSD-3-Clause"
] | 1 | 2020-09-01T23:34:30.000Z | 2020-09-01T23:34:30.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
#!/usr/bin/env python
from distutils.core import setup
from setuptools import find_packages
import sys
import re
import ast
from glob import glob
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
# python version control from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 6)
# This check and everything above must remain compatible with Python 2.7.
if CURRENT_PYTHON > REQUIRED_PYTHON or CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""
==========================
Unsupported Python version
==========================
This version of TADA requires Python {}.{}, but you're trying to
install it on Python {}.{}.
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('q2_feature_engineering/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
__version__ = str(ast.literal_eval(hit))
long_description = ("TADA: phylogenetic augmentation of microbiome samples enhances phenotype classification")
setup(name='q2-feature-engineering',
version=__version__,
long_description=long_description,
license='BSD-3-Clause',
description='Qiime2 plugin to facilitate feature extraction for metagenomic analyses.',
python_requires='>={}.{}'.format(*REQUIRED_PYTHON),
author = 'Erfan Sayyari, Ban Kawas, Siavash Mirarab',
author_email = 'smirarabbaygi@eng.ucsd.edu',
url='https://github.com/tada-alg/TADA/',
packages=find_packages(),
install_requires=["dendropy>=4.0.0", "numpy>=1.14.0", "biom-format>=2.1.5","imbalanced-learn>=0.4.3",
"scikit-learn>=0.19.1",
"scipy>=1.0.0","pandas>=0.22.0", "treeswift", "niemads"],
scripts=glob("q2_feature_engineering/scripts/*"),
package_data={'q2_feature_engineering': ['citations.bib'],
'': ['data']},
entry_points={
'qiime2.plugins':
['q2-feature-engineering=q2_feature_engineering.plugin_setup:plugin']
},
classifiers=["Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
("License :: OSI approved:: Berkeley Source Distribution"
"License (BSD)"),
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Bio-Informatics"])
| 40.794872 | 110 | 0.604023 |
64afa2549204ca8a4f48fbb9b9da5e50531ff765 | 696 | py | Python | pyk/util.py | kubernauts/pyk | 88531b1f09f23c049b3ad7aa9caebfc02a4a420d | [
"Apache-2.0"
] | 6 | 2015-12-29T15:19:52.000Z | 2017-12-15T03:30:15.000Z | pyk/util.py | mhausenblas/pyk | 88531b1f09f23c049b3ad7aa9caebfc02a4a420d | [
"Apache-2.0"
] | null | null | null | pyk/util.py | mhausenblas/pyk | 88531b1f09f23c049b3ad7aa9caebfc02a4a420d | [
"Apache-2.0"
] | 3 | 2016-05-27T18:21:30.000Z | 2019-03-23T20:34:07.000Z | """
Utility functions for the pyk toolkit.
@author: Michael Hausenblas, http://mhausenblas.info/#i
@since: 2015-11-27
@status: init
"""
import yaml
import json
def load_yaml(filename):
"""
Loads a YAML-formatted file.
"""
with open(filename) as f:
ydoc = yaml.safe_load(f.read())
return (ydoc, serialize_tojson(ydoc))
def serialize_yaml_tofile(filename, resource):
"""
Serializes a K8S resource to YAML-formatted file.
"""
stream = file(filename, "w")
yaml.dump(resource, stream, default_flow_style=False)
def serialize_tojson(resource):
"""
Serializes a K8S resource to JSON-formatted string.
"""
return json.dumps(resource)
| 21.75 | 57 | 0.675287 |
2ad1e8a36cc22c21268660b7d2b950dc6c3ccf6a | 9,281 | py | Python | planet/control/simulate.py | JingbinLiu/planet_A | 1b072c3bd417d2ecff95653e53079c0cd0bb38bb | [
"Apache-2.0"
] | 5 | 2019-03-29T10:13:30.000Z | 2021-05-02T10:26:15.000Z | planet/control/simulate.py | JingbinLiu/planet_A | 1b072c3bd417d2ecff95653e53079c0cd0bb38bb | [
"Apache-2.0"
] | null | null | null | planet/control/simulate.py | JingbinLiu/planet_A | 1b072c3bd417d2ecff95653e53079c0cd0bb38bb | [
"Apache-2.0"
] | 4 | 2019-03-15T16:25:51.000Z | 2019-05-09T05:31:39.000Z | # Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""In-graph simulation step of a vectorized algorithm with environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from planet import tools
from planet.control import batch_env
from planet.control import in_graph_batch_env
from planet.control import mpc_agent
from planet.control import wrappers
from planet.tools import streaming_mean
import numpy as np
def simulate(
step, env_ctor, duration, num_agents, agent_config,
env_processes=False, name='simulate'):
summaries = []
with tf.variable_scope(name):
return_, image, action, reward = collect_rollouts(
step=step,
env_ctor=env_ctor,
duration=duration,
num_agents=num_agents,
agent_config=agent_config,
env_processes=env_processes)
return_mean = tf.reduce_mean(return_)
summaries.append(tf.summary.histogram('return_hist', return_))
summaries.append(tf.summary.scalar('return', return_mean))
summaries.append(tf.summary.histogram('reward_hist', reward))
summaries.append(tf.summary.histogram('action_hist', action))
summaries.append(tools.image_strip_summary(
'image', image, max_length=duration))
summary = tf.summary.merge(summaries)
return summary, return_mean
def collect_rollouts(
step, env_ctor, duration, num_agents, agent_config, env_processes):
batch_env = define_batch_env(env_ctor, num_agents, env_processes)
agent = mpc_agent.MPCAgent(batch_env, step, False, False, agent_config)
def simulate_fn(unused_last, step):
done, score, unused_summary = simulate_step(
batch_env, agent,
log=False,
reset=tf.equal(step, 0))
with tf.control_dependencies([done, score]):
image = batch_env.observ
batch_action = batch_env.action
batch_reward = batch_env.reward
return done, score, image, batch_action, batch_reward
initializer = (
tf.zeros([num_agents], tf.bool),
tf.zeros([num_agents], tf.float32),
0 * batch_env.observ,
0 * batch_env.action,
tf.zeros([num_agents], tf.float32))
done, score, image, action, reward = tf.scan(
simulate_fn, tf.range(duration),
initializer, parallel_iterations=1)
score = tf.boolean_mask(score, done)
image = tf.transpose(image, [1, 0, 2, 3, 4])
action = tf.transpose(action, [1, 0, 2])
reward = tf.transpose(reward)
return score, image, action, reward
def define_batch_env(env_ctor, num_agents, env_processes):
with tf.variable_scope('environments'):
if env_processes:
envs = [
wrappers.ExternalProcess(env_ctor)
for _ in range(num_agents)]
else:
envs = [env_ctor() for _ in range(num_agents)]
env = batch_env.BatchEnv(envs, blocking=not env_processes)
# # For testing the wrapper class BatchEnv:
# env.reset()
# env.step(np.array([[0.5,0.5]]))
env = in_graph_batch_env.InGraphBatchEnv(env)
# # For testing the wrapper class BatchEnv:
# env.reset()
# env.step(np.array([[0.5,0.5]]))
return env
def simulate_step(batch_env, algo, log=True, reset=False):
"""Simulation step of a vectorized algorithm with in-graph environments.
Integrates the operations implemented by the algorithm and the environments
into a combined operation.
Args:
batch_env: In-graph batch environment.
algo: Algorithm instance implementing required operations.
log: Tensor indicating whether to compute and return summaries.
reset: Tensor causing all environments to reset.
Returns:
Tuple of tensors containing done flags for the current episodes, possibly
intermediate scores for the episodes, and a summary tensor.
"""
def _define_begin_episode(agent_indices):
"""Reset environments, intermediate scores and durations for new episodes.
Args:
agent_indices: Tensor containing batch indices starting an episode.
Returns:
Summary tensor, new score tensor, and new length tensor.
"""
assert agent_indices.shape.ndims == 1
zero_scores = tf.zeros_like(agent_indices, tf.float32)
zero_durations = tf.zeros_like(agent_indices)
update_score = tf.scatter_update(score_var, agent_indices, zero_scores)
update_length = tf.scatter_update(length_var, agent_indices, zero_durations)
reset_ops = [
batch_env.reset(agent_indices), update_score, update_length]
with tf.control_dependencies(reset_ops):
return algo.begin_episode(agent_indices), update_score, update_length
def _define_step():
"""Request actions from the algorithm and apply them to the environments.
Increments the lengths of all episodes and increases their scores by the
current reward. After stepping the environments, provides the full
transition tuple to the algorithm.
Returns:
Summary tensor, new score tensor, and new length tensor.
"""
prevob = batch_env.observ + 0 # Ensure a copy of the variable value.
agent_indices = tf.range(len(batch_env))
action, step_summary = algo.perform(agent_indices, prevob) # get action from the planner.
action.set_shape(batch_env.action.shape)
with tf.control_dependencies([batch_env.step(action)]): # interact with the env.
add_score = score_var.assign_add(batch_env.reward)
inc_length = length_var.assign_add(tf.ones(len(batch_env), tf.int32))
with tf.control_dependencies([add_score, inc_length]):
agent_indices = tf.range(len(batch_env))
experience_summary = algo.experience(
agent_indices, prevob,
batch_env.action,
batch_env.reward,
batch_env.done,
batch_env.observ)
summary = tf.summary.merge([step_summary, experience_summary])
return summary, add_score, inc_length
def _define_end_episode(agent_indices):
"""Notify the algorithm of ending episodes.
Also updates the mean score and length counters used for summaries.
Args:
agent_indices: Tensor holding batch indices that end their episodes.
Returns:
Summary tensor.
"""
assert agent_indices.shape.ndims == 1
submit_score = mean_score.submit(tf.gather(score, agent_indices))
submit_length = mean_length.submit(
tf.cast(tf.gather(length, agent_indices), tf.float32))
with tf.control_dependencies([submit_score, submit_length]):
return algo.end_episode(agent_indices)
def _define_summaries():
"""Reset the average score and duration, and return them as summary.
Returns:
Summary string.
"""
score_summary = tf.cond(
tf.logical_and(log, tf.cast(mean_score.count, tf.bool)),
lambda: tf.summary.scalar('mean_score', mean_score.clear()), str)
length_summary = tf.cond(
tf.logical_and(log, tf.cast(mean_length.count, tf.bool)),
lambda: tf.summary.scalar('mean_length', mean_length.clear()), str)
return tf.summary.merge([score_summary, length_summary])
with tf.name_scope('simulate'):
log = tf.convert_to_tensor(log)
reset = tf.convert_to_tensor(reset)
with tf.variable_scope('simulate_temporary'):
score_var = tf.get_variable(
'score', (len(batch_env),), tf.float32,
tf.constant_initializer(0),
trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
length_var = tf.get_variable(
'length', (len(batch_env),), tf.int32,
tf.constant_initializer(0),
trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
mean_score = streaming_mean.StreamingMean((), tf.float32, 'mean_score')
mean_length = streaming_mean.StreamingMean((), tf.float32, 'mean_length')
agent_indices = tf.cond(
reset,
lambda: tf.range(len(batch_env)),
lambda: tf.cast(tf.where(batch_env.done)[:, 0], tf.int32))
begin_episode, score, length = tf.cond(
tf.cast(tf.shape(agent_indices)[0], tf.bool),
lambda: _define_begin_episode(agent_indices),
lambda: (str(), score_var, length_var))
with tf.control_dependencies([begin_episode]):
step, score, length = _define_step()
with tf.control_dependencies([step]):
agent_indices = tf.cast(tf.where(batch_env.done)[:, 0], tf.int32)
end_episode = tf.cond(
tf.cast(tf.shape(agent_indices)[0], tf.bool),
lambda: _define_end_episode(agent_indices), str)
with tf.control_dependencies([end_episode]):
summary = tf.summary.merge([
_define_summaries(), begin_episode, step, end_episode])
with tf.control_dependencies([summary]):
score = 0.0 + score
done = batch_env.done
return done, score, summary
| 38.510373 | 109 | 0.707898 |
b1388fd18225e9188b2c6ec5ce1444a127244d4d | 1,851 | py | Python | DeepLearningServer/Scripts/Falconapp.py | Criscraft/DeepVisionVRServer | a649cbf236effd2a9bab61be0ed370e690d17ef0 | [
"MIT"
] | null | null | null | DeepLearningServer/Scripts/Falconapp.py | Criscraft/DeepVisionVRServer | a649cbf236effd2a9bab61be0ed370e690d17ef0 | [
"MIT"
] | null | null | null | DeepLearningServer/Scripts/Falconapp.py | Criscraft/DeepVisionVRServer | a649cbf236effd2a9bab61be0ed370e690d17ef0 | [
"MIT"
] | null | null | null | import falcon
def start_server(DLWebServer):
app = application = falcon.App()
app.add_route('/testshortresource', DLWebServer.TestShortResource())
app.add_route('/testlongresource', DLWebServer.TestLongResource())
app.add_route('/network', DLWebServer.NetworkResource())
app.add_route('/network/{networkid:int}', DLWebServer.NetworkArchitectureResource())
app.add_route('/network/{networkid:int}/activation/layerid/{layerid:int}', DLWebServer.NetworkActivationImageResource())
app.add_route('/network/{networkid:int}/featurevisualization/layerid/{layerid:int}', DLWebServer.NetworkFeatureVisualizationResource())
app.add_route('/network/{networkid:int}/prepareforinput', DLWebServer.NetworkPrepareForInputResource())
app.add_route('/network/{networkid:int}/classificationresult', DLWebServer.NetworkClassificationResultResource())
app.add_route('/network/{networkid:int}/weighthistogram/layerid/{layerid:int}', DLWebServer.NetworkWeightHistogramResource())
app.add_route('/network/{networkid:int}/activationhistogram/layerid/{layerid:int}', DLWebServer.NetworkActivationHistogramResource())
app.add_route('/network/{networkid:int}/setnetworkgenfeatvis', DLWebServer.NetworkSetNetworkGenFeatVisResource())
app.add_route('/network/{networkid:int}/setnetworkloadfeatvis', DLWebServer.NetworkSetNetworkLoadFeatVisResource())
app.add_route('/network/{networkid:int}/setnetworkdeletefeatvis', DLWebServer.NetworkSetNetworkDeleteFeatVisResource())
app.add_route('/network/{networkid:int}/export/layerid/{layerid:int}', DLWebServer.NetworkExportLayerResource())
app.add_route('/dataset/{datasetid:int}/images', DLWebServer.DataImagesResource())
app.add_route('/noiseimage/{noiseid:int}', DLWebServer.DataNoiseImageResource())
return application | 61.7 | 140 | 0.776877 |
adb1a3a590a3acaa310d639c501e22246499c5cf | 251 | py | Python | conftest.py | pitbulk/sentry-auth-okta | 19d9e0304b55e64a1f475091907ebf1816a9f1b4 | [
"Apache-2.0"
] | null | null | null | conftest.py | pitbulk/sentry-auth-okta | 19d9e0304b55e64a1f475091907ebf1816a9f1b4 | [
"Apache-2.0"
] | null | null | null | conftest.py | pitbulk/sentry-auth-okta | 19d9e0304b55e64a1f475091907ebf1816a9f1b4 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
# Run tests against sqlite for simplicity
os.environ.setdefault('DB', 'sqlite')
pytest_plugins = [
'sentry.utils.pytest'
]
| 17.928571 | 59 | 0.752988 |
27e221b4aa5123c90105554b4efb7b497496878a | 2,663 | py | Python | 2017/day15.py | andypymont/adventofcode | 912aa48fc5b31ec9202fb9654380991fc62afcd1 | [
"MIT"
] | null | null | null | 2017/day15.py | andypymont/adventofcode | 912aa48fc5b31ec9202fb9654380991fc62afcd1 | [
"MIT"
] | null | null | null | 2017/day15.py | andypymont/adventofcode | 912aa48fc5b31ec9202fb9654380991fc62afcd1 | [
"MIT"
] | null | null | null | """
2017 Day 15
https://adventofcode.com/2017/day/15
"""
from collections import deque
from typing import Dict, Sequence
import aocd # type: ignore
import regex as re # type: ignore
class Judge:
def __init__(self, comparisons: int):
self.comparisons = comparisons
self.count = 0
self.queues: Dict[str, deque] = {
"A": deque(),
"B": deque(),
}
@property
def completed(self) -> bool:
return self.comparisons <= 0
def check(self) -> None:
while len(self.queues["A"]) > 0 and len(self.queues["B"]) > 0:
first = self.queues["A"].popleft()
second = self.queues["B"].popleft()
self.comparisons -= 1
if first == second:
self.count += 1
def report(self, source: str, hex_number: str) -> None:
if not self.completed:
self.queues.get(source, deque()).append(hex_number)
self.check()
class Generator:
def __init__(
self, name: str, start_value: int, judge: Judge, filtered_judge: Judge
):
self.name = name
self.value = start_value
self.factor = 16807 if name == "A" else 48271
self.check = 4 if name == "A" else 8
self.judge = judge
self.filtered_judge = filtered_judge
@staticmethod
def hexstart(number: int) -> str:
return f"{number:{0}16b}"[-16:]
def run(self) -> None:
hex_value = self.hexstart(self.value)
self.judge.report(self.name, hex_value)
if self.value % self.check == 0:
self.filtered_judge.report(self.name, hex_value)
self.value = (self.value * self.factor) % 2147483647
RE_GENERATORS = re.compile(r"Generator (\w) starts with (\d+)")
def read_start_values(text: str) -> Dict[str, int]:
return {name: int(val) for name, val in RE_GENERATORS.findall(text)}
def run_full_check(start_values: Dict[str, int]) -> Sequence[int]:
judges = (
Judge(40_000_000),
Judge(5_000_000),
)
generators = (
Generator("A", start_values["A"], *judges),
Generator("B", start_values["B"], *judges),
)
while not all(judge.completed for judge in judges):
for gen in generators:
gen.run()
return tuple(judge.count for judge in judges)
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2017, day=15)
values = read_start_values(data)
part1, part2 = run_full_check(values)
print(f"Part 1: {part1}")
print(f"Part 2: {part2}")
if __name__ == "__main__":
main()
| 26.107843 | 78 | 0.591814 |
16861f728525914da70ebd4a41fe0de6dbf4afae | 1,221 | py | Python | tests/terraform/checks/resource/aws/test_AthenaWorkgroupConfiguration.py | antonblr/checkov | 9415c6593c537945c08f7a19f28bdd8b96966f67 | [
"Apache-2.0"
] | 4,013 | 2019-12-09T13:16:54.000Z | 2022-03-31T14:31:01.000Z | tests/terraform/checks/resource/aws/test_AthenaWorkgroupConfiguration.py | antonblr/checkov | 9415c6593c537945c08f7a19f28bdd8b96966f67 | [
"Apache-2.0"
] | 1,258 | 2019-12-17T09:55:51.000Z | 2022-03-31T19:17:17.000Z | tests/terraform/checks/resource/aws/test_AthenaWorkgroupConfiguration.py | antonblr/checkov | 9415c6593c537945c08f7a19f28bdd8b96966f67 | [
"Apache-2.0"
] | 638 | 2019-12-19T08:57:38.000Z | 2022-03-30T21:38:37.000Z | import unittest
from checkov.common.models.enums import CheckResult
from checkov.terraform.checks.resource.aws.AthenaWorkgroupConfiguration import check
class TestAthenaWorkgroupConfiguration(unittest.TestCase):
def test_failure(self):
resource_conf = {
"name": "Example",
"configuration": [
{
"enforce_workgroup_configuration": False,
}
],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {
"name": "Example",
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_full(self):
resource_conf = {
"name": "Example",
"configuration": [
{
"enforce_workgroup_configuration": True,
}
],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 27.133333 | 84 | 0.59869 |
070ddbf3091205484a63003a8812aa25de400bbf | 12,649 | py | Python | OGBL_Collab/unify/ogb/ogbl_collab/pruning.py | x-zho14/Unified-LTH-GNN | edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55 | [
"MIT"
] | 29 | 2021-02-17T02:46:54.000Z | 2022-03-18T02:09:03.000Z | OGBL_Collab/unify/ogb/ogbl_collab/pruning.py | x-zho14/Unified-LTH-GNN | edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55 | [
"MIT"
] | 1 | 2021-09-03T13:30:50.000Z | 2021-09-03T13:30:50.000Z | OGBL_Collab/unify/ogb/ogbl_collab/pruning.py | x-zho14/Unified-LTH-GNN | edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55 | [
"MIT"
] | 10 | 2021-04-01T16:27:03.000Z | 2022-03-07T09:20:38.000Z | import torch
import torch.nn as nn
from abc import ABC
import numpy as np
import random
import os
import matplotlib.pyplot as plt
import pdb
import torch.nn.init as init
import math
from tqdm import tqdm
def resume_change(resume_ckpt, model, args):
model_state_dict = resume_ckpt['model_state_dict']
rewind_weight_mask = resume_ckpt['rewind_weight_mask']
rewind_weight_mask['edge_mask1_train'] = model_state_dict['edge_mask2_fixed']
rewind_weight_mask['edge_mask2_fixed'] = model_state_dict['edge_mask2_fixed']
adj_remain = rewind_weight_mask['edge_mask2_fixed'].sum()
adj_total = rewind_weight_mask['edge_mask2_fixed'].numel()
wei_remain = 0
wei_total = 0
for i in range(args.num_layers):
key_train = 'gcns.{}.mlp.0.weight_mask_train'.format(i)
key_fixed = 'gcns.{}.mlp.0.weight_mask_fixed'.format(i)
rewind_weight_mask[key_train] = model_state_dict[key_fixed]
rewind_weight_mask[key_fixed] = rewind_weight_mask[key_train]
wei_total += rewind_weight_mask[key_fixed].numel()
wei_remain += rewind_weight_mask[key_fixed].sum()
adj_spar = adj_remain * 100 / adj_total
wei_spar = wei_remain * 100 / wei_total
print("resume :adj{:.2f} \t wei{:.2f}".format(adj_spar, wei_spar))
return rewind_weight_mask, adj_spar, wei_spar
def change(rewind_weight, model, args):
rewind_weight['edge_mask1_train'] = model.state_dict()['edge_mask2_fixed']
rewind_weight['edge_mask2_fixed'] = model.state_dict()['edge_mask2_fixed']
adj_remain = rewind_weight['edge_mask2_fixed'].sum()
adj_total = rewind_weight['edge_mask2_fixed'].numel()
wei_remain = 0
wei_total = 0
for i in range(args.num_layers):
key_train = 'gcns.{}.mlp.0.weight_mask_train'.format(i)
key_fixed = 'gcns.{}.mlp.0.weight_mask_fixed'.format(i)
rewind_weight[key_train] = model.gcns[i].mlp[0].state_dict()['weight_mask_fixed']
rewind_weight[key_fixed] = rewind_weight[key_train]
wei_total += rewind_weight[key_fixed].numel()
wei_remain += rewind_weight[key_fixed].sum()
adj_spar = adj_remain * 100 / adj_total
wei_spar = wei_remain * 100 / wei_total
return rewind_weight, adj_spar, wei_spar
def save_all(model, predictor, rewind_weight, optimizer, imp_num, epoch, save_path, save_name='default'):
state = {
'imp_num': imp_num,
'epoch': epoch,
'model_state_dict': model.state_dict(),
'predictor_state_dict': predictor.state_dict(),
'rewind_weight_mask': rewind_weight,
'optimizer_state_dict': optimizer.state_dict()
}
if not os.path.exists(save_path):
os.mkdir(save_path)
print("Directory ", save_path, " is created.")
filename = '{}/{}.pth'.format(save_path, save_name)
torch.save(state, filename)
def print_args(args, str_num=80):
for arg, val in args.__dict__.items():
print(arg + '.' * (str_num - len(arg) - len(str(val))) + str(val))
print()
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
random.seed(seed)
class AddTrainableMask(ABC):
_tensor_name: str
def __init__(self):
pass
def __call__(self, module, inputs):
setattr(module, self._tensor_name, self.apply_mask(module))
def apply_mask(self, module):
mask_train = getattr(module, self._tensor_name + "_mask_train")
mask_fixed = getattr(module, self._tensor_name + "_mask_fixed")
orig_weight = getattr(module, self._tensor_name + "_orig_weight")
pruned_weight = mask_train * mask_fixed * orig_weight
return pruned_weight
@classmethod
def apply(cls, module, name, mask_train, mask_fixed, *args, **kwargs):
method = cls(*args, **kwargs)
method._tensor_name = name
orig = getattr(module, name)
module.register_parameter(name + "_mask_train", mask_train.to(dtype=orig.dtype))
module.register_parameter(name + "_mask_fixed", mask_fixed.to(dtype=orig.dtype))
module.register_parameter(name + "_orig_weight", orig)
del module._parameters[name]
setattr(module, name, method.apply_mask(module))
module.register_forward_pre_hook(method)
return method
def add_mask(model, args):
for i in range(args.num_layers):
mask_train = nn.Parameter(torch.ones_like(model.gcns[i].mlp[0].weight))
mask_fixed = nn.Parameter(torch.ones_like(model.gcns[i].mlp[0].weight), requires_grad=False)
AddTrainableMask.apply(model.gcns[i].mlp[0], 'weight', mask_train, mask_fixed)
def subgradient_update_mask(model, args):
model.edge_mask1_train.grad.data.add_(args.s1 * torch.sign(model.edge_mask1_train.data))
for i in range(args.num_layers):
model.gcns[i].mlp[0].weight_mask_train.grad.data.add_(args.s2 * torch.sign(model.gcns[i].mlp[0].weight_mask_train.data))
def get_soft_mask_distribution(model, args):
weight_total = 0
edge_mask1_train = (model.edge_mask1_train * model.edge_mask2_fixed).detach()
adj_mask_vector = edge_mask1_train.flatten()
nonzero = torch.abs(adj_mask_vector) > 0
adj_mask_vector = adj_mask_vector[nonzero]
weight_mask_vector = torch.tensor([]).to(torch.device("cuda:0"))
for i in range(args.num_layers):
weight_total += model.gcns[i].mlp[0].weight_mask_train.numel()
weight_mask = model.gcns[i].mlp[0].weight_mask_train.flatten()
nonzero = torch.abs(weight_mask) > 0
weight_mask = weight_mask[nonzero]
weight_mask_vector = torch.cat((weight_mask_vector, weight_mask))
return adj_mask_vector.detach().cpu(), weight_mask_vector.detach().cpu(), weight_total
# def get_each_mask(mask_weight_tensor, threshold):
# ones = torch.ones_like(mask_weight_tensor)
# zeros = torch.zeros_like(mask_weight_tensor)
# mask = torch.where(mask_weight_tensor.abs() > threshold, ones, zeros)
# return mask
def get_each_mask(mask_weight_tensor, threshold, ifone=True):
if ifone:
ones = torch.ones_like(mask_weight_tensor)
zeros = torch.zeros_like(mask_weight_tensor)
mask = torch.where(mask_weight_tensor.abs() > threshold, ones, zeros)
return mask
else:
zeros = torch.zeros_like(mask_weight_tensor)
mask = torch.where(mask_weight_tensor.abs() > threshold, mask_weight_tensor, zeros)
return mask
def pruning_mask(model, args):
pruning_info_dict = {}
pruning_adj_percent = args.pruning_percent_adj / args.mask_epochs
pruning_wei_percent = args.pruning_percent_wei / args.mask_epochs
adj_total = model.edge_num
adj_mask, wei_mask, wei_total = get_soft_mask_distribution(model, args)
# print(adj_mask.shape, wei_mask.shape)
### sort
adj_y, adj_i = torch.sort(adj_mask.abs())
wei_y, wei_i = torch.sort(wei_mask.abs())
### get threshold
adj_thre_index = int(adj_total * pruning_adj_percent)
adj_thre = adj_y[adj_thre_index]
wei_thre_index = int(wei_total * pruning_wei_percent)
wei_thre = wei_y[wei_thre_index]
### pruning soft and hard mask on model
# model.edge_mask1_train = nn.Parameter(get_each_mask(model.edge_mask1_train, adj_thre, ifone=False), requires_grad=True)
model.edge_mask1_train.requires_grad = False
mask1_train = model.edge_mask1_train.detach()
fixed_mask = get_each_mask(mask1_train * model.edge_mask2_fixed, adj_thre, ifone=True)
model.edge_mask1_train.mul_(fixed_mask)
model.edge_mask2_fixed = nn.Parameter(fixed_mask, requires_grad=False)
model.edge_mask1_train.requires_grad = True
adj_remain = model.edge_mask2_fixed.detach().sum()
wei_remain = 0
for i in range(args.num_layers):
model.gcns[i].mlp[0].weight_mask_train = nn.Parameter(get_each_mask(model.gcns[i].mlp[0].weight_mask_train, wei_thre, ifone=False), requires_grad=True)
model.gcns[i].mlp[0].weight_mask_fixed = nn.Parameter(get_each_mask(model.gcns[i].mlp[0].weight_mask_train, wei_thre, ifone=True), requires_grad=False)
wei_remain += model.gcns[i].mlp[0].weight_mask_fixed.detach().sum()
adj_spar = adj_remain * 100 / adj_total
wei_spar = wei_remain * 100 / wei_total
pruning_info_dict['wei_spar']= wei_spar
pruning_info_dict['adj_spar'] = adj_spar
pruning_info_dict['wei_total'] = wei_total
pruning_info_dict['adj_total'] = adj_total
pruning_info_dict['wei_prune'] = wei_total - wei_remain
pruning_info_dict['adj_prune'] = adj_total - adj_remain
return pruning_info_dict
##### pruning remain mask percent #######
def get_final_mask_epoch(model, rewind_weight, args):
adj_mask, wei_mask = get_soft_mask_distribution(model, args)
adj_total = adj_mask.shape[0] # 2484941
wei_total = wei_mask.shape[0] # 458752
### sort
adj_y, adj_i = torch.sort(adj_mask.abs())
wei_y, wei_i = torch.sort(wei_mask.abs())
### get threshold
adj_thre_index = int(adj_total * args.pruning_percent_adj)
adj_thre = adj_y[adj_thre_index]
wei_thre_index = int(wei_total * args.pruning_percent_wei)
wei_thre = wei_y[wei_thre_index]
### create mask dict
ori_edge_mask = model.edge_mask1_train.detach().cpu()
rewind_weight['edge_mask1_train'] = get_each_mask(ori_edge_mask, adj_thre)
rewind_weight['edge_mask2_fixed'] = rewind_weight['edge_mask1_train']
for i in range(args.num_layers):
key_train = 'gcns.{}.mlp.0.weight_mask_train'.format(i)
key_fixed = 'gcns.{}.mlp.0.weight_mask_fixed'.format(i)
rewind_weight[key_train] = get_each_mask(model.gcns[i].mlp[0].state_dict()['weight_mask_train'], wei_thre)
rewind_weight[key_fixed] = rewind_weight[key_train]
return rewind_weight
def random_pruning(model, args):
model.edge_mask1_train.requires_grad = False
adj_total = model.edge_mask1_train.numel()
adj_pruned_num = int(adj_total * args.pruning_percent_adj)
adj_nonzero = model.edge_mask1_train.nonzero()
adj_pruned_index = random.sample([i for i in range(adj_total)], adj_pruned_num)
adj_pruned_list = adj_nonzero[adj_pruned_index].tolist()
print("pruning adj ......")
for i, j in tqdm(adj_pruned_list):
model.edge_mask1_train[i][j] = 0
model.edge_mask2_fixed[i][j] = 0
model.edge_mask1_train.requires_grad = True
for i in range(args.num_layers):
model.gcns[i].mlp[0].weight_mask_train.requires_grad = False
wei_total = model.gcns[i].mlp[0].weight_mask_train.numel()
wei_pruned_num = int(wei_total * args.pruning_percent_wei)
wei_nonzero = model.gcns[i].mlp[0].weight_mask_train.nonzero()
wei_pruned_index = random.sample([j for j in range(wei_total)], wei_pruned_num)
wei_pruned_list = wei_nonzero[wei_pruned_index].tolist()
for ii, (ai, wj) in enumerate(wei_pruned_list):
model.gcns[i].mlp[0].weight_mask_train[ai][wj] = 0
model.gcns[i].mlp[0].weight_mask_fixed[ai][wj] = 0
model.gcns[i].mlp[0].weight_mask_train.requires_grad = True
def print_sparsity(model, args):
adj_nonzero = model.edge_num
adj_mask_nonzero = model.edge_mask2_fixed.sum().item()
adj_spar = adj_mask_nonzero * 100 / adj_nonzero
weight_total = 0
weight_nonzero = 0
for i in range(args.num_layers):
weight_total += model.gcns[i].mlp[0].weight_mask_fixed.numel()
weight_nonzero += model.gcns[i].mlp[0].weight_mask_fixed.sum().item()
wei_spar = weight_nonzero * 100 / weight_total
print("-" * 100)
print("Sparsity: Adj:[{:.2f}%] Wei:[{:.2f}%]".format(adj_spar, wei_spar))
print("-" * 100)
return adj_spar, wei_spar
def add_trainable_mask_noise(model, args, c=1e-5):
model.edge_mask1_train.requires_grad = False
rand = (2 * torch.rand(model.edge_mask1_train.shape) - 1) * c
rand = rand.to(model.edge_mask1_train.device)
rand = rand * model.edge_mask1_train
model.edge_mask1_train.add_(rand)
model.edge_mask1_train.requires_grad = True
for i in range(args.num_layers):
model.gcns[i].mlp[0].weight_mask_train.requires_grad = False
rand = (2 * torch.rand(model.gcns[i].mlp[0].weight_mask_train.shape) - 1) * c
rand = rand.to(model.gcns[i].mlp[0].weight_mask_train.device)
rand = rand * model.gcns[i].mlp[0].weight_mask_train
model.gcns[i].mlp[0].weight_mask_train.add_(rand)
model.gcns[i].mlp[0].weight_mask_train.requires_grad = True
| 37.423077 | 159 | 0.696814 |
dca2922801c2dedff67fa57e6569608100677c64 | 2,326 | py | Python | api_v2/urls.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 22 | 2015-01-16T01:36:32.000Z | 2020-06-08T00:46:18.000Z | api_v2/urls.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 8 | 2015-12-28T18:56:19.000Z | 2019-04-01T17:33:48.000Z | api_v2/urls.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 13 | 2015-01-13T20:56:22.000Z | 2022-02-23T06:01:17.000Z | from django.conf.urls.defaults import *
from piston.resource import Resource
from api_v2.system_handler import SystemHandler
from api_v2.networkadapter_handler import NetworkAdapterHandler
from api_v2.keyvalue_handler import KeyValueHandler
from api_v2.truth_handler import TruthHandler
from api_v2.oncall_handler import OncallHandler
from api_v2.dhcp_handler import DHCPHandler
from api_v2.reverse_dns_handler import ReverseDNSHandler
from api_v2.system_rack_handler import SystemRackHandler
from api_v2.system_status_handler import SystemStatusHandler
from django.views.decorators.cache import cache_control
cached_resource = cache_control(public=True, maxage=600, s_maxage=600)
systems_handler = Resource(SystemHandler)
network_adapter_handler = Resource(NetworkAdapterHandler)
keyvalue_handler = Resource(KeyValueHandler)
reverse_dns_handler = Resource(ReverseDNSHandler)
dhcp_handler = Resource(DHCPHandler)
system_rack_handler = Resource(SystemRackHandler)
system_status_handler = Resource(SystemStatusHandler)
oncall_handler = Resource(OncallHandler)
urlpatterns = patterns('',
url(r'^dhcp/(?P<dhcp_scope>[^/]+)/(?P<dhcp_action>[^/]+)', cached_resource(dhcp_handler)),
url(r'^dhcp/', cached_resource(dhcp_handler)),
url(r'^reverse_dns/(?P<reverse_dns_zone>[^/]+)/(?P<reverse_dns_action>[^/]+)', cached_resource(reverse_dns_handler)),
url(r'^reverse_dns/', cached_resource(reverse_dns_handler)),
url(r'^system/(?P<system_id>[^/]+)/', cached_resource(systems_handler)),
url(r'^systems/', cached_resource(systems_handler)),
url(r'^systemrack/(?P<system_rack_id>[^/]+)/', cached_resource(system_rack_handler)),
url(r'^systemrack/', cached_resource(system_rack_handler)),
url(r'^systemstatus/(?P<system_status_id>[^/]+)/', cached_resource(system_status_handler)),
url(r'^systemstatus/', cached_resource(system_status_handler)),
url(r'^keyvalue/(?P<key_value_id>[^/]+)/', cached_resource(keyvalue_handler)),
url(r'^keyvalue/', cached_resource(keyvalue_handler), name='api_v2_keyvalue_get'),
url(r'^networkadapter/(?P<network_adapter_id>[^/]+)/', cached_resource(network_adapter_handler)),
url(r'^networkadapter/', cached_resource(network_adapter_handler)),
url(r'^oncall/(?P<oncall_type>[^/]+)/(?P<display_type>[^/]+)/', cached_resource(oncall_handler)),
)
| 56.731707 | 121 | 0.781599 |
d78f9c7e752823704154d6a4f8c3ae860ef36164 | 214 | py | Python | Python3/1079-Letter-Tile-Possibilities/soln-1.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/1079-Letter-Tile-Possibilities/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/1079-Letter-Tile-Possibilities/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | import functools
class Solution:
def numTilePossibilities(self, tiles: str) -> int:
return len(functools.reduce(operator.or_, [set(itertools.permutations(tiles, i)) for i in range(1, len(tiles) + 1)]))
| 42.8 | 125 | 0.71028 |
85a9975181565a4fee7936afb1b1a00004c75ad9 | 58 | py | Python | src/striga/service/sqlobjectsvc/__init__.py | ateska/striga | 451b5d9421e2e5fdf49b94c8f3d76e576abc5923 | [
"MIT"
] | null | null | null | src/striga/service/sqlobjectsvc/__init__.py | ateska/striga | 451b5d9421e2e5fdf49b94c8f3d76e576abc5923 | [
"MIT"
] | null | null | null | src/striga/service/sqlobjectsvc/__init__.py | ateska/striga | 451b5d9421e2e5fdf49b94c8f3d76e576abc5923 | [
"MIT"
] | null | null | null | #Interface
from ._stsvcso_service import SQLObjectFactory
| 19.333333 | 46 | 0.87931 |
6a5b8a928802f558e2836edb3a64a09cbeeddfd9 | 2,474 | py | Python | .history/my_classes/basic/for_loop_20210430210110.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/basic/for_loop_20210430210110.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/basic/for_loop_20210430210110.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | i = 0 # This is an example of the while loop
while i < 5:
print(i)
i += 1
i = None # 0
# 1
# 2
# 3
# 4
for i in range(5): # This example uses the iterable object 'range'
print(i) # 0
# 1
# 2
# 3
# 4
for i in [1, 2, 3, 4]:
print(i) # 1
# 2
# 3
# 4
for c in 'hello':
print(c) # h
# e
# l
# l
# o
for x in ('a',"b", 'c', 4):
print(x) # a
# b
# c
# 4
for x in [(1, 2), (3, 4), (5, 6)]:
print(x) # (1, 2)
# (3, 4)
# (5, 6)
for i in range(5):
if i==3:
continue
print(i) # 0
# 1
# 2
# 4
for i in range(5):
if i==3:
break
print(i) # 0
# 1
# 2
for i in range(1, 5):
print(i)
if i % 7 == 0:
print('multiple of 7 found')
break
else:
print('no multiple of 7 found') # 1
# 2
# 3
# 4
# no multiple of 7 found
for i in range(1, 8):
print(i)
if i % 7 == 0:
print('multiple of 7 found')
break
else:
print('no multiple of 7 found') # 1
# 2
# 3
# 4
# 5
# 6
# 7
# multiple of 7 found
for i in range(6):
print('------------------')
try:
10 / i-3
except ZeroDivisionError:
print('divided by 0')
continue
finally:
print("always run")
print(i) # -----------------
# divided by 0
# always run
# ------------------
# always run
# 1
# ------------------
# always run
# 2
# ------------------
# always run
# 3
# ------------------
# always run
# 4
# ------------------
# always run
# 5
s = 'hello'
for c in s
| 17.798561 | 67 | 0.260307 |
2dae3d1bbaa4e236d1f1b9c758b03fd9c489f83f | 47,466 | py | Python | discussion/fun_mcmc/fun_mcmc_test.py | brianwa84/probability | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | [
"Apache-2.0"
] | 2 | 2020-02-21T06:30:00.000Z | 2021-08-08T19:29:15.000Z | discussion/fun_mcmc/fun_mcmc_test.py | brianwa84/probability | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | [
"Apache-2.0"
] | null | null | null | discussion/fun_mcmc/fun_mcmc_test.py | brianwa84/probability | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | [
"Apache-2.0"
] | 1 | 2020-05-31T13:08:33.000Z | 2020-05-31T13:08:33.000Z | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for fun_mcmc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
# Dependency imports
from absl.testing import parameterized
from jax.config import config as jax_config
import numpy as np
import tensorflow.compat.v2 as real_tf
from discussion.fun_mcmc import backend
from discussion.fun_mcmc import fun_mcmc_lib as fun_mcmc
from discussion.fun_mcmc import prefab
from tensorflow_probability.python.internal import test_util as tfp_test_util
tf = backend.tf
tfp = backend.tfp
util = backend.util
real_tf.enable_v2_behavior()
jax_config.update('jax_enable_x64', True)
TestNamedTuple = collections.namedtuple('TestNamedTuple', 'x, y')
def _test_seed():
return tfp_test_util.test_seed() % (2**32 - 1)
def _no_compile(fn):
return fn
def _fwd_mclachlan_optimal_4th_order_step(*args, **kwargs):
return fun_mcmc.mclachlan_optimal_4th_order_step(
*args, forward=True, **kwargs)
def _rev_mclachlan_optimal_4th_order_step(*args, **kwargs):
return fun_mcmc.mclachlan_optimal_4th_order_step(
*args, forward=False, **kwargs)
def _skip_on_jax(fn):
@functools.wraps(fn)
def _wrapper(self, *args, **kwargs):
if not self._is_on_jax:
return fn(self, *args, **kwargs)
return _wrapper
def _gen_cov(data, axis):
"""This computes a generalized covariance, supporting batch and reduction.
This computes a batch of covariances from data, with the relevant dimensions
are determined as follows:
- Dimensions specified by `axis` are reduced over.
- The final non-reduced over dimension is taken as the event dimension.
- The remaining dimensions are batch dimensions.
Args:
data: An NDArray.
axis: An integer or a tuple of integers.
Returns:
cov: A batch of covariances.
"""
axis = tuple(util.flatten_tree(axis))
shape = tuple(data.shape)
rank = len(shape)
centered_data = data - np.mean(data, axis, keepdims=True)
symbols = 'abcdefg'
# Destination is missing the axes we reduce over.
dest = []
last_unaggregated_src_dim = None
last_unaggregated_dest_dim = None
for i in range(rank):
if i not in axis:
dest.append(symbols[i])
last_unaggregated_src_dim = i
last_unaggregated_dest_dim = len(dest) - 1
source_1 = list(symbols[:rank])
source_1[last_unaggregated_src_dim] = 'x'
source_2 = list(symbols[:rank])
source_2[last_unaggregated_src_dim] = 'y'
dest = dest[:last_unaggregated_dest_dim] + [
'x', 'y'
] + dest[last_unaggregated_dest_dim + 1:]
formula = '{source_1},{source_2}->{dest}'.format(
source_1=''.join(source_1),
source_2=''.join(source_2),
dest=''.join(dest))
cov = (
np.einsum(formula, centered_data, centered_data) /
np.prod(np.array(shape)[np.array(axis)]))
return cov
class GenCovTest(real_tf.test.TestCase):
def testGenCov(self):
x = np.arange(10).reshape(5, 2)
true_cov = np.cov(x, rowvar=False, bias=True)
self.assertAllClose(true_cov, _gen_cov(x, 0))
true_cov = np.cov(x, rowvar=True, bias=True)
self.assertAllClose(true_cov, _gen_cov(x, 1))
class FunMCMCTest(real_tf.test.TestCase, parameterized.TestCase):
_is_on_jax = backend.BACKEND_NAME == 'jax'
def _make_seed(self, seed):
return util.make_tensor_seed([seed, 0])
@property
def _dtype(self):
raise NotImplementedError()
def _constant(self, value):
return tf.constant(value, self._dtype)
@parameterized.named_parameters(
('Unrolled', True),
('NotUnrolled', False),
)
def testTraceSingle(self, unroll):
def fun(x):
return x + 1., 2 * x
x, e_trace = fun_mcmc.trace(
state=0.,
fn=fun,
num_steps=5,
trace_fn=lambda _, xp1: xp1,
unroll=unroll)
self.assertAllEqual(5., x)
self.assertAllEqual([0., 2., 4., 6., 8.], e_trace)
@parameterized.named_parameters(
('Unrolled', True),
('NotUnrolled', False),
)
def testTraceNested(self, unroll):
def fun(x, y):
return (x + 1., y + 2.), ()
(x, y), (x_trace, y_trace) = fun_mcmc.trace(
state=(0., 0.),
fn=fun,
num_steps=5,
trace_fn=lambda xy, _: xy,
unroll=unroll)
self.assertAllEqual(5., x)
self.assertAllEqual(10., y)
self.assertAllEqual([1., 2., 3., 4., 5.], x_trace)
self.assertAllEqual([2., 4., 6., 8., 10.], y_trace)
@parameterized.named_parameters(
('Unrolled', True),
('NotUnrolled', False),
)
def testTraceTrace(self, unroll):
def fun(x):
return fun_mcmc.trace(
x, lambda x: (x + 1., x + 1.), 2, trace_mask=False, unroll=unroll)
x, trace = fun_mcmc.trace(0., fun, 2)
self.assertAllEqual(4., x)
self.assertAllEqual([2., 4.], trace)
def testTraceDynamic(self):
@tf.function
def trace_n(num_steps):
return fun_mcmc.trace(0, lambda x: (x + 1, ()), num_steps)[0]
x = trace_n(5)
self.assertAllEqual(5, x)
@parameterized.named_parameters(
('Unrolled', True),
('NotUnrolled', False),
)
def testTraceMask(self, unroll):
def fun(x):
return x + 1, (2 * x, 3 * x)
x, (trace_1, trace_2) = fun_mcmc.trace(
state=0, fn=fun, num_steps=3, trace_mask=(True, False), unroll=unroll)
self.assertAllEqual(3, x)
self.assertAllEqual([0, 2, 4], trace_1)
self.assertAllEqual(6, trace_2)
x, (trace_1, trace_2) = fun_mcmc.trace(
state=0, fn=fun, num_steps=3, trace_mask=False, unroll=unroll)
self.assertAllEqual(3, x)
self.assertAllEqual(4, trace_1)
self.assertAllEqual(6, trace_2)
def testCallFn(self):
sum_fn = lambda *args: sum(args)
self.assertEqual(1, fun_mcmc.call_fn(sum_fn, 1))
self.assertEqual(3, fun_mcmc.call_fn(sum_fn, (1, 2)))
def testCallFnDict(self):
sum_fn = lambda a, b: a + b
self.assertEqual(3, fun_mcmc.call_fn(sum_fn, [1, 2]))
self.assertEqual(3, fun_mcmc.call_fn(sum_fn, {'a': 1, 'b': 2}))
@parameterized.named_parameters(
('ArgsToTuple1', (1,), {}, (1,)),
('ArgsToList1', (1,), {}, [1]),
('ArgsToTuple3', (1, 2, 3), {}, [1, 2, 3]),
('ArgsToList3', (1, 2, 3), {}, [1, 2, 3]),
('ArgsToOrdDict3',
(1, 2, 3), {}, collections.OrderedDict([('c', 1), ('b', 2), ('a', 3)])),
('ArgsKwargsToOrdDict3', (1, 2), {
'a': 3
}, collections.OrderedDict([('c', 1), ('b', 2), ('a', 3)])),
('KwargsToOrdDict3', (), {
'a': 3,
'b': 2,
'c': 1
}, collections.OrderedDict([('c', 1), ('b', 2), ('a', 3)])),
('KwargsToDict3', (), {
'a': 3,
'b': 2,
'c': 1
}, {
'c': 1,
'b': 2,
'a': 3
}),
('ArgsToNamedTuple', (TestNamedTuple(1, 2),), {}, TestNamedTuple(1, 2)),
('KwargsToNamedTuple', (), {
'a': TestNamedTuple(1, 2)
}, TestNamedTuple(1, 2)),
('ArgsToScalar', (1,), {}, 1),
('KwargsToScalar', (), {
'a': 1
}, 1),
('Tuple0', (), {}, ()),
('List0', (), {}, []),
('Dict0', (), {}, {}),
)
def testRecoverStateFromArgs(self, args, kwargs, state_structure):
state = fun_mcmc.recover_state_from_args(args, kwargs, state_structure)
self.assertEqual(type(state_structure), type(state))
self.assertAllEqual(state_structure, state)
@parameterized.named_parameters(
('BadKwargs', (), {
'a': 1,
'b': 2
}, 'c'),
('ArgsOverlap', (1, 2), {
'c': 1,
'b': 2
}, 'a'),
)
def testRecoverStateFromArgsMissing(self, args, kwargs, missing):
state_structure = collections.OrderedDict([('c', 1), ('b', 2), ('a', 3)])
with self.assertRaisesRegexp(ValueError,
'Missing \'{}\' from kwargs.'.format(missing)):
fun_mcmc.recover_state_from_args(args, kwargs, state_structure)
@parameterized.named_parameters(
('Tuple1', {
'a': 1
}, (1,)),
('List1', {
'a': 1
}, [1]),
)
def testRecoverStateFromArgsNoKwargs(self, kwargs, state_structure):
with self.assertRaisesRegexp(ValueError, 'This wrapper does not'):
fun_mcmc.recover_state_from_args((), kwargs, state_structure)
def testBroadcastStructure(self):
struct = fun_mcmc.maybe_broadcast_structure(1, [1, 2])
self.assertEqual([1, 1], struct)
struct = fun_mcmc.maybe_broadcast_structure([3, 4], [1, 2])
self.assertEqual([3, 4], struct)
def testCallPotentialFn(self):
def potential(x):
return x, ()
x, extra = fun_mcmc.call_potential_fn(potential, 0.)
self.assertEqual(0., x)
self.assertEqual((), extra)
def testCallPotentialFnMissingExtra(self):
def potential(x):
return x
with self.assertRaisesRegexp(TypeError, 'A common solution is to adjust'):
fun_mcmc.call_potential_fn(potential, 0.)
def testCallTransitionOperator(self):
def kernel(x, y):
del y
return [x, [1]], ()
[x, [y]], extra = fun_mcmc.call_transition_operator(kernel, [0., None])
self.assertEqual(0., x)
self.assertEqual(1, y)
self.assertEqual((), extra)
def testCallTransitionOperatorMissingExtra(self):
def potential(x):
return x
with self.assertRaisesRegexp(TypeError, 'A common solution is to adjust'):
fun_mcmc.call_transition_operator(potential, 0.)
def testCallTransitionOperatorBadArgs(self):
def potential(x, y, z):
del z
return (x, y), ()
with self.assertRaisesRegexp(TypeError, 'The structure of `new_args=`'):
fun_mcmc.call_transition_operator(potential, (1, 2, 3))
def testTransformLogProbFn(self):
def log_prob_fn(x, y):
return (tfp.distributions.Normal(self._constant(0.), 1.).log_prob(x) +
tfp.distributions.Normal(self._constant(1.), 1.).log_prob(y)), ()
bijectors = [
tfp.bijectors.Scale(scale=self._constant(2.)),
tfp.bijectors.Scale(scale=self._constant(3.))
]
(transformed_log_prob_fn,
transformed_init_state) = fun_mcmc.transform_log_prob_fn(
log_prob_fn, bijectors,
[self._constant(2.), self._constant(3.)])
self.assertIsInstance(transformed_init_state, list)
self.assertAllClose([1., 1.], transformed_init_state)
tlp, (orig_space, _) = (
transformed_log_prob_fn(self._constant(1.), self._constant(1.)))
lp = log_prob_fn(self._constant(2.), self._constant(3.))[0] + sum(
b.forward_log_det_jacobian(self._constant(1.), event_ndims=0)
for b in bijectors)
self.assertAllClose([2., 3.], orig_space)
self.assertAllClose(lp, tlp)
def testTransformLogProbFnKwargs(self):
def log_prob_fn(x, y):
return (tfp.distributions.Normal(self._constant(0.), 1.).log_prob(x) +
tfp.distributions.Normal(self._constant(1.), 1.).log_prob(y)), ()
bijectors = {
'x': tfp.bijectors.Scale(scale=self._constant(2.)),
'y': tfp.bijectors.Scale(scale=self._constant(3.))
}
(transformed_log_prob_fn,
transformed_init_state) = fun_mcmc.transform_log_prob_fn(
log_prob_fn, bijectors, {
'x': self._constant(2.),
'y': self._constant(3.),
})
self.assertIsInstance(transformed_init_state, dict)
self.assertAllClose({
'x': self._constant(1.),
'y': self._constant(1.),
}, transformed_init_state)
tlp, (orig_space, _) = transformed_log_prob_fn(
x=self._constant(1.), y=self._constant(1.))
lp = log_prob_fn(
x=self._constant(2.), y=self._constant(3.))[0] + sum(
b.forward_log_det_jacobian(self._constant(1.), event_ndims=0)
for b in bijectors.values())
self.assertAllClose({
'x': self._constant(2.),
'y': self._constant(3.)
}, orig_space)
self.assertAllClose(lp, tlp)
# The +1's here are because we initialize the `state_grads` at 1, which
# require an extra call to `target_log_prob_fn`.
@parameterized.named_parameters(
('Leapfrog', lambda: fun_mcmc.leapfrog_step, 1 + 1),
('Ruth4', lambda: fun_mcmc.ruth4_step, 3 + 1),
('Blanes3', lambda: fun_mcmc.blanes_3_stage_step, 3 + 1),
('McLachlan4Fwd', lambda: _fwd_mclachlan_optimal_4th_order_step, 4 + 1,
9),
('McLachlan4Rev', lambda: _rev_mclachlan_optimal_4th_order_step, 4 + 1,
9),
)
def testIntegratorStep(self,
method_fn,
num_tlp_calls,
num_tlp_calls_jax=None):
method = method_fn()
tlp_call_counter = [0]
def target_log_prob_fn(q):
tlp_call_counter[0] += 1
return -q**2, 1.
def kinetic_energy_fn(p):
return tf.abs(p)**3., 2.
state = self._constant(1.)
_, _, state_grads = fun_mcmc.call_potential_fn_with_grads(
target_log_prob_fn,
state,
)
state, extras = method(
integrator_step_state=fun_mcmc.IntegratorStepState(
state=state, state_grads=state_grads, momentum=self._constant(2.)),
step_size=self._constant(0.1),
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn)
if num_tlp_calls_jax is not None and self._is_on_jax:
num_tlp_calls = num_tlp_calls_jax
self.assertEqual(num_tlp_calls, tlp_call_counter[0])
self.assertEqual(1., extras.state_extra)
self.assertEqual(2., extras.kinetic_energy_extra)
initial_hamiltonian = -target_log_prob_fn(
self._constant(1.))[0] + kinetic_energy_fn(self._constant(2.))[0]
fin_hamiltonian = -target_log_prob_fn(state.state)[0] + kinetic_energy_fn(
state.momentum)[0]
self.assertAllClose(fin_hamiltonian, initial_hamiltonian, atol=0.2)
@parameterized.named_parameters(
('Leapfrog', fun_mcmc.leapfrog_step),
('Ruth4', fun_mcmc.ruth4_step),
('Blanes3', fun_mcmc.blanes_3_stage_step),
)
def testIntegratorStepReversible(self, method):
def target_log_prob_fn(q):
return -q**2, []
def kinetic_energy_fn(p):
return p**2., []
seed = self._make_seed(_test_seed())
state = self._constant(1.)
_, _, state_grads = fun_mcmc.call_potential_fn_with_grads(
target_log_prob_fn,
state,
)
state_fwd, _ = method(
integrator_step_state=fun_mcmc.IntegratorStepState(
state=state,
state_grads=state_grads,
momentum=util.random_normal([], self._dtype, seed)),
step_size=self._constant(0.1),
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn)
state_rev, _ = method(
integrator_step_state=state_fwd._replace(momentum=-state_fwd.momentum),
step_size=self._constant(0.1),
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn)
self.assertAllClose(state, state_rev.state, atol=1e-6)
def testMclachlanIntegratorStepReversible(self):
def target_log_prob_fn(q):
return -q**2, []
def kinetic_energy_fn(p):
return p**2., []
seed = self._make_seed(_test_seed())
state = self._constant(1.)
_, _, state_grads = fun_mcmc.call_potential_fn_with_grads(
target_log_prob_fn,
state,
)
state_fwd, _ = _fwd_mclachlan_optimal_4th_order_step(
integrator_step_state=fun_mcmc.IntegratorStepState(
state=state,
state_grads=state_grads,
momentum=util.random_normal([], self._dtype, seed)),
step_size=self._constant(0.1),
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn)
state_rev, _ = _rev_mclachlan_optimal_4th_order_step(
integrator_step_state=state_fwd._replace(momentum=-state_fwd.momentum),
step_size=self._constant(0.1),
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn)
self.assertAllClose(state, state_rev.state, atol=1e-6)
def testMetropolisHastingsStep(self):
seed = self._make_seed(_test_seed())
zero = self._constant(0.)
one = self._constant(1.)
accepted, mh_extra = fun_mcmc.metropolis_hastings_step(
current_state=zero,
proposed_state=one,
energy_change=-np.inf,
seed=seed)
self.assertAllEqual(one, accepted)
self.assertAllEqual(True, mh_extra.is_accepted)
accepted, mh_extra = fun_mcmc.metropolis_hastings_step(
current_state=zero, proposed_state=one, energy_change=np.inf, seed=seed)
self.assertAllEqual(zero, accepted)
self.assertAllEqual(False, mh_extra.is_accepted)
accepted, mh_extra = fun_mcmc.metropolis_hastings_step(
current_state=zero, proposed_state=one, energy_change=np.nan, seed=seed)
self.assertAllEqual(zero, accepted)
self.assertAllEqual(False, mh_extra.is_accepted)
accepted, mh_extra = fun_mcmc.metropolis_hastings_step(
current_state=zero, proposed_state=one, energy_change=np.nan, seed=seed)
self.assertAllEqual(zero, accepted)
self.assertAllEqual(False, mh_extra.is_accepted)
accepted, mh_extra = fun_mcmc.metropolis_hastings_step(
current_state=zero,
proposed_state=one,
log_uniform=-one,
energy_change=self._constant(-np.log(0.5)),
seed=seed)
self.assertAllEqual(one, accepted)
self.assertAllEqual(True, mh_extra.is_accepted)
accepted, mh_extra = fun_mcmc.metropolis_hastings_step(
current_state=zero,
proposed_state=one,
log_uniform=zero,
energy_change=self._constant(-np.log(0.5)),
seed=seed)
self.assertAllEqual(zero, accepted)
self.assertAllEqual(False, mh_extra.is_accepted)
accepted, _ = fun_mcmc.metropolis_hastings_step(
current_state=tf.zeros(1000, dtype=self._dtype),
proposed_state=tf.ones(1000, dtype=self._dtype),
energy_change=-tf.math.log(0.5 * tf.ones(1000, dtype=self._dtype)),
seed=seed)
self.assertAllClose(0.5, tf.reduce_mean(accepted), rtol=0.1)
def testMetropolisHastingsStepStructure(self):
struct_type = collections.namedtuple('Struct', 'a, b')
current = struct_type([1, 2], (3, [4, [0, 0]]))
proposed = struct_type([5, 6], (7, [8, [0, 0]]))
accepted, mh_extra = fun_mcmc.metropolis_hastings_step(
current_state=current,
proposed_state=proposed,
energy_change=-np.inf,
seed=self._make_seed(_test_seed()))
self.assertAllEqual(True, mh_extra.is_accepted)
self.assertAllEqual(
util.flatten_tree(proposed), util.flatten_tree(accepted))
@parameterized.named_parameters(
('Unrolled', True),
('NotUnrolled', False),
)
def testBasicHMC(self, unroll):
step_size = self._constant(0.2)
num_steps = 2000
num_leapfrog_steps = 10
state = tf.ones([16, 2], dtype=self._dtype)
base_mean = self._constant([2., 3.])
base_scale = self._constant([2., 0.5])
def target_log_prob_fn(x):
return -tf.reduce_sum(0.5 * tf.square(
(x - base_mean) / base_scale), -1), ()
def kernel(hmc_state, seed):
hmc_seed, seed = util.split_seed(seed, 2)
hmc_state, _ = fun_mcmc.hamiltonian_monte_carlo(
hmc_state,
step_size=step_size,
num_integrator_steps=num_leapfrog_steps,
target_log_prob_fn=target_log_prob_fn,
unroll_integrator=unroll,
seed=hmc_seed)
return (hmc_state, seed), hmc_state.state
seed = self._make_seed(_test_seed())
# Subtle: Unlike TF, JAX needs a data dependency from the inputs to outputs
# for the jit to do anything.
_, chain = tf.function(lambda state, seed: fun_mcmc.trace( # pylint: disable=g-long-lambda
state=(fun_mcmc.hamiltonian_monte_carlo_init(state, target_log_prob_fn),
seed),
fn=kernel,
num_steps=num_steps))(state, seed)
# Discard the warmup samples.
chain = chain[1000:]
sample_mean = tf.reduce_mean(chain, axis=[0, 1])
sample_var = tf.math.reduce_variance(chain, axis=[0, 1])
true_samples = util.random_normal(
shape=[4096, 2], dtype=self._dtype, seed=seed) * base_scale + base_mean
true_mean = tf.reduce_mean(true_samples, axis=0)
true_var = tf.math.reduce_variance(true_samples, axis=0)
self.assertAllClose(true_mean, sample_mean, rtol=0.1, atol=0.1)
self.assertAllClose(true_var, sample_var, rtol=0.1, atol=0.1)
def testPreconditionedHMC(self):
step_size = self._constant(0.2)
num_steps = 2000
num_leapfrog_steps = 10
state = tf.ones([16, 2], dtype=self._dtype)
base_mean = self._constant([1., 0])
base_cov = self._constant([[1, 0.5], [0.5, 1]])
bijector = tfp.bijectors.Softplus()
base_dist = tfp.distributions.MultivariateNormalFullCovariance(
loc=base_mean, covariance_matrix=base_cov)
target_dist = bijector(base_dist)
def orig_target_log_prob_fn(x):
return target_dist.log_prob(x), ()
target_log_prob_fn, state = fun_mcmc.transform_log_prob_fn(
orig_target_log_prob_fn, bijector, state)
# pylint: disable=g-long-lambda
def kernel(hmc_state, seed):
hmc_seed, seed = util.split_seed(seed, 2)
hmc_state, _ = fun_mcmc.hamiltonian_monte_carlo(
hmc_state,
step_size=step_size,
num_integrator_steps=num_leapfrog_steps,
target_log_prob_fn=target_log_prob_fn,
seed=hmc_seed)
return (hmc_state, seed), hmc_state.state_extra[0]
seed = self._make_seed(_test_seed())
# Subtle: Unlike TF, JAX needs a data dependency from the inputs to outputs
# for the jit to do anything.
_, chain = tf.function(lambda state, seed: fun_mcmc.trace( # pylint: disable=g-long-lambda
state=(fun_mcmc.hamiltonian_monte_carlo_init(state, target_log_prob_fn),
seed),
fn=kernel,
num_steps=num_steps))(state, seed)
# Discard the warmup samples.
chain = chain[1000:]
sample_mean = tf.reduce_mean(chain, axis=[0, 1])
sample_cov = tfp.stats.covariance(chain, sample_axis=[0, 1])
true_samples = target_dist.sample(4096, seed=self._make_seed(_test_seed()))
true_mean = tf.reduce_mean(true_samples, axis=0)
true_cov = tfp.stats.covariance(chain, sample_axis=[0, 1])
self.assertAllClose(true_mean, sample_mean, rtol=0.1, atol=0.1)
self.assertAllClose(true_cov, sample_cov, rtol=0.1, atol=0.1)
@parameterized.parameters((tf.function, 1), (_no_compile, 2))
@_skip_on_jax # `trace` doesn't have an efficient path in JAX yet.
def testHMCCountTargetLogProb(self, compile_fn, expected_count):
counter = [0]
@compile_fn
def target_log_prob_fn(x):
counter[0] += 1
return -tf.square(x), []
# pylint: disable=g-long-lambda
@tf.function
def trace():
kernel = lambda state: fun_mcmc.hamiltonian_monte_carlo(
state,
step_size=self._constant(0.1),
num_integrator_steps=3,
target_log_prob_fn=target_log_prob_fn,
seed=_test_seed())
fun_mcmc.trace(
state=fun_mcmc.hamiltonian_monte_carlo_init(
tf.zeros([1], dtype=self._dtype), target_log_prob_fn),
fn=kernel,
num_steps=4,
trace_fn=lambda *args: ())
trace()
self.assertEqual(expected_count, counter[0])
@_skip_on_jax # `trace` doesn't have an efficient path in JAX yet.
def testHMCCountTargetLogProbEfficient(self):
counter = [0]
def target_log_prob_fn(x):
counter[0] += 1
return -tf.square(x), []
@tf.function
def trace():
# pylint: disable=g-long-lambda
kernel = lambda state: fun_mcmc.hamiltonian_monte_carlo(
state,
step_size=self._constant(0.1),
num_integrator_steps=3,
target_log_prob_fn=target_log_prob_fn,
seed=self._make_seed(_test_seed()))
fun_mcmc.trace(
state=fun_mcmc.hamiltonian_monte_carlo_init(
state=tf.zeros([1], dtype=self._dtype),
target_log_prob_fn=target_log_prob_fn),
fn=kernel,
num_steps=4,
trace_fn=lambda *args: ())
trace()
self.assertEqual(2, counter[0])
def testAdaptiveStepSize(self):
step_size = self._constant(0.2)
num_steps = 200
num_adapt_steps = 100
num_leapfrog_steps = 10
state = tf.ones([16, 2], dtype=self._dtype)
base_mean = self._constant([1., 0])
base_cov = self._constant([[1, 0.5], [0.5, 1]])
@tf.function
def computation(state, seed):
bijector = tfp.bijectors.Softplus()
base_dist = tfp.distributions.MultivariateNormalFullCovariance(
loc=base_mean, covariance_matrix=base_cov)
target_dist = bijector(base_dist)
def orig_target_log_prob_fn(x):
return target_dist.log_prob(x), ()
target_log_prob_fn, state = fun_mcmc.transform_log_prob_fn(
orig_target_log_prob_fn, bijector, state)
def kernel(hmc_state, step_size_state, step, seed):
hmc_seed, seed = util.split_seed(seed, 2)
hmc_state, hmc_extra = fun_mcmc.hamiltonian_monte_carlo(
hmc_state,
step_size=tf.exp(step_size_state.state),
num_integrator_steps=num_leapfrog_steps,
target_log_prob_fn=target_log_prob_fn,
seed=hmc_seed)
rate = prefab._polynomial_decay( # pylint: disable=protected-access
step=step,
step_size=self._constant(0.01),
power=0.5,
decay_steps=num_adapt_steps,
final_step_size=0.)
mean_p_accept = tf.reduce_mean(
tf.exp(tf.minimum(self._constant(0.), hmc_extra.log_accept_ratio)))
loss_fn = fun_mcmc.make_surrogate_loss_fn(
lambda _: (0.9 - mean_p_accept, ()))
step_size_state, _ = fun_mcmc.adam_step(
step_size_state, loss_fn, learning_rate=rate)
return ((hmc_state, step_size_state, step + 1, seed),
(hmc_state.state_extra[0], hmc_extra.log_accept_ratio))
_, (chain, log_accept_ratio_trace) = fun_mcmc.trace(
state=(fun_mcmc.hamiltonian_monte_carlo_init(state,
target_log_prob_fn),
fun_mcmc.adam_init(tf.math.log(step_size)), 0, seed),
fn=kernel,
num_steps=num_adapt_steps + num_steps,
)
true_samples = target_dist.sample(
4096, seed=self._make_seed(_test_seed()))
return chain, log_accept_ratio_trace, true_samples
seed = self._make_seed(_test_seed())
chain, log_accept_ratio_trace, true_samples = computation(state, seed)
log_accept_ratio_trace = log_accept_ratio_trace[num_adapt_steps:]
chain = chain[num_adapt_steps:]
sample_mean = tf.reduce_mean(chain, axis=[0, 1])
sample_cov = tfp.stats.covariance(chain, sample_axis=[0, 1])
true_mean = tf.reduce_mean(true_samples, axis=0)
true_cov = tfp.stats.covariance(chain, sample_axis=[0, 1])
self.assertAllClose(true_mean, sample_mean, rtol=0.05, atol=0.05)
self.assertAllClose(true_cov, sample_cov, rtol=0.05, atol=0.05)
self.assertAllClose(
tf.reduce_mean(tf.exp(tf.minimum(0., log_accept_ratio_trace))),
0.9,
rtol=0.1)
def testSignAdaptation(self):
new_control = fun_mcmc.sign_adaptation(
control=self._constant(1.),
output=self._constant(0.5),
set_point=self._constant(1.),
adaptation_rate=self._constant(0.1))
self.assertAllClose(new_control, 1. / 1.1)
new_control = fun_mcmc.sign_adaptation(
control=self._constant(1.),
output=self._constant(0.5),
set_point=self._constant(0.),
adaptation_rate=self._constant(0.1))
self.assertAllClose(new_control, 1. * 1.1)
def testRaggedIntegrator(self):
def target_log_prob_fn(q):
return -q**2, q
def kinetic_energy_fn(p):
return tf.abs(p)**3., p
integrator_fn = lambda state, num_steps: fun_mcmc.hamiltonian_integrator( # pylint: disable=g-long-lambda
state,
num_steps=num_steps,
integrator_step_fn=lambda state: fun_mcmc.leapfrog_step( # pylint: disable=g-long-lambda
state,
step_size=0.1,
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn),
kinetic_energy_fn=kinetic_energy_fn,
integrator_trace_fn=lambda state, extra: (state, extra))
state = tf.zeros([2], dtype=self._dtype)
momentum = tf.ones([2], dtype=self._dtype)
target_log_prob, _, state_grads = fun_mcmc.call_potential_fn_with_grads(
target_log_prob_fn, state)
start_state = fun_mcmc.IntegratorState(
target_log_prob=target_log_prob,
momentum=momentum,
state=state,
state_grads=state_grads,
state_extra=state,
)
state_1 = integrator_fn(start_state, 1)
state_2 = integrator_fn(start_state, 2)
state_1_2 = integrator_fn(start_state, [1, 2])
# Make sure integrators actually integrated to different points.
self.assertFalse(np.all(state_1[0].state == state_2[0].state))
# Ragged integration should be consistent with the non-ragged equivalent.
def get_batch(state, idx):
# For the integrator trace, we'll grab the final value.
return util.map_tree(
lambda x: x[idx] if len(x.shape) == 1 else x[-1, idx], state)
self.assertAllClose(get_batch(state_1, 0), get_batch(state_1_2, 0))
self.assertAllClose(get_batch(state_2, 0), get_batch(state_1_2, 1))
# Ragged traces should be equal up to the number of steps for the batch
# element.
def get_slice(state, num, idx):
return util.map_tree(lambda x: x[:num, idx], state[1].integrator_trace)
self.assertAllClose(get_slice(state_1, 1, 0), get_slice(state_1_2, 1, 0))
self.assertAllClose(get_slice(state_2, 2, 0), get_slice(state_1_2, 2, 1))
def testAdam(self):
def loss_fn(x, y):
return tf.square(x - 1.) + tf.square(y - 2.), []
_, [(x, y), loss] = fun_mcmc.trace(
fun_mcmc.adam_init([self._constant(0.), self._constant(0.)]),
lambda adam_state: fun_mcmc.adam_step( # pylint: disable=g-long-lambda
adam_state,
loss_fn,
learning_rate=self._constant(0.01)),
num_steps=1000,
trace_fn=lambda state, extra: [state.state, extra.loss])
self.assertAllClose(1., x[-1], atol=1e-3)
self.assertAllClose(2., y[-1], atol=1e-3)
self.assertAllClose(0., loss[-1], atol=1e-3)
def testGradientDescent(self):
def loss_fn(x, y):
return tf.square(x - 1.) + tf.square(y - 2.), []
_, [(x, y), loss] = fun_mcmc.trace(
fun_mcmc.GradientDescentState([self._constant(0.), self._constant(0.)]),
lambda gd_state: fun_mcmc.gradient_descent_step( # pylint: disable=g-long-lambda
gd_state,
loss_fn,
learning_rate=self._constant(0.01)),
num_steps=1000,
trace_fn=lambda state, extra: [state.state, extra.loss])
self.assertAllClose(1., x[-1], atol=1e-3)
self.assertAllClose(2., y[-1], atol=1e-3)
self.assertAllClose(0., loss[-1], atol=1e-3)
def testSimpleDualAverages(self):
def loss_fn(x, y):
return tf.square(x - 1.) + tf.square(y - 2.), []
def kernel(sda_state, rms_state):
sda_state, _ = fun_mcmc.simple_dual_averages_step(sda_state, loss_fn, 1.)
rms_state, _ = fun_mcmc.running_mean_step(rms_state, sda_state.state)
return (sda_state, rms_state), rms_state.mean
_, (x, y) = fun_mcmc.trace(
(
fun_mcmc.simple_dual_averages_init(
[self._constant(0.), self._constant(0.)]),
fun_mcmc.running_mean_init([[], []], [self._dtype, self._dtype]),
),
kernel,
num_steps=1000,
)
self.assertAllClose(1., x[-1], atol=1e-1)
self.assertAllClose(2., y[-1], atol=1e-1)
def testRandomWalkMetropolis(self):
num_steps = 1000
state = tf.ones([16], dtype=tf.int32)
target_logits = self._constant([1., 2., 3., 4.]) + 2.
proposal_logits = self._constant([4., 3., 2., 1.]) + 2.
def target_log_prob_fn(x):
return tf.gather(target_logits, x), ()
def proposal_fn(x, seed):
current_logits = tf.gather(proposal_logits, x)
proposal = util.random_categorical(proposal_logits[tf.newaxis],
x.shape[0], seed)[0]
proposed_logits = tf.gather(proposal_logits, proposal)
return tf.cast(proposal, x.dtype), ((), proposed_logits - current_logits)
def kernel(rwm_state, seed):
rwm_seed, seed = util.split_seed(seed, 2)
rwm_state, rwm_extra = fun_mcmc.random_walk_metropolis(
rwm_state,
target_log_prob_fn=target_log_prob_fn,
proposal_fn=proposal_fn,
seed=rwm_seed)
return (rwm_state, seed), rwm_extra
seed = self._make_seed(_test_seed())
# Subtle: Unlike TF, JAX needs a data dependency from the inputs to outputs
# for the jit to do anything.
_, chain = tf.function(lambda state, seed: fun_mcmc.trace( # pylint: disable=g-long-lambda
state=(fun_mcmc.random_walk_metropolis_init(state, target_log_prob_fn),
seed),
fn=kernel,
num_steps=num_steps,
trace_fn=lambda state, extra: state[0].state))(state, seed)
# Discard the warmup samples.
chain = chain[500:]
sample_mean = tf.reduce_mean(tf.one_hot(chain, 4), axis=[0, 1])
self.assertAllClose(tf.nn.softmax(target_logits), sample_mean, atol=0.11)
@parameterized.named_parameters(
('Basic', (10, 3), None),
('Batched', (10, 4, 3), None),
('Aggregated0', (10, 4, 3), 0),
('Aggregated1', (10, 4, 3), 1),
('Aggregated01', (10, 4, 3), (0, 1)),
('Aggregated02', (10, 4, 5, 3), (0, 2)),
)
def testRunningMean(self, shape, aggregation):
rng = np.random.RandomState(_test_seed())
data = self._constant(rng.randn(*shape))
def kernel(rms, idx):
rms, _ = fun_mcmc.running_mean_step(rms, data[idx], axis=aggregation)
return (rms, idx + 1), ()
true_aggregation = (0,) + (() if aggregation is None else tuple(
[a + 1 for a in util.flatten_tree(aggregation)]))
true_mean = np.mean(data, true_aggregation)
(rms, _), _ = fun_mcmc.trace(
state=(fun_mcmc.running_mean_init(true_mean.shape, data.dtype), 0),
fn=kernel,
num_steps=len(data),
trace_fn=lambda *args: ())
self.assertAllClose(true_mean, rms.mean)
def testRunningMeanMaxPoints(self):
window_size = 100
rng = np.random.RandomState(_test_seed())
data = self._constant(
np.concatenate(
[rng.randn(window_size), 1. + 2. * rng.randn(window_size * 10)],
axis=0))
def kernel(rms, idx):
rms, _ = fun_mcmc.running_mean_step(
rms, data[idx], window_size=window_size)
return (rms, idx + 1), rms.mean
_, mean = fun_mcmc.trace(
state=(fun_mcmc.running_mean_init([], data.dtype), 0),
fn=kernel,
num_steps=len(data),
)
# Up to window_size, we compute the running mean exactly.
self.assertAllClose(np.mean(data[:window_size]), mean[window_size - 1])
# After window_size, we're doing exponential moving average, and pick up the
# mean after the change in the distribution. Since the moving average is
# computed only over ~window_size points, this test is rather noisy.
self.assertAllClose(1., mean[-1], atol=0.2)
@parameterized.named_parameters(
('Basic', (10, 3), None),
('Batched', (10, 4, 3), None),
('Aggregated0', (10, 4, 3), 0),
('Aggregated1', (10, 4, 3), 1),
('Aggregated01', (10, 4, 3), (0, 1)),
('Aggregated02', (10, 4, 5, 3), (0, 2)),
)
def testRunningVariance(self, shape, aggregation):
rng = np.random.RandomState(_test_seed())
data = self._constant(rng.randn(*shape))
true_aggregation = (0,) + (() if aggregation is None else tuple(
[a + 1 for a in util.flatten_tree(aggregation)]))
true_mean = np.mean(data, true_aggregation)
true_var = np.var(data, true_aggregation)
def kernel(rvs, idx):
rvs, _ = fun_mcmc.running_variance_step(rvs, data[idx], axis=aggregation)
return (rvs, idx + 1), ()
(rvs, _), _ = fun_mcmc.trace(
state=(fun_mcmc.running_variance_init(true_mean.shape,
data[0].dtype), 0),
fn=kernel,
num_steps=len(data),
trace_fn=lambda *args: ())
self.assertAllClose(true_mean, rvs.mean)
self.assertAllClose(true_var, rvs.variance)
def testRunningVarianceMaxPoints(self):
window_size = 100
rng = np.random.RandomState(_test_seed())
data = self._constant(
np.concatenate(
[rng.randn(window_size), 1. + 2. * rng.randn(window_size * 10)],
axis=0))
def kernel(rvs, idx):
rvs, _ = fun_mcmc.running_variance_step(
rvs, data[idx], window_size=window_size)
return (rvs, idx + 1), (rvs.mean, rvs.variance)
_, (mean, var) = fun_mcmc.trace(
state=(fun_mcmc.running_variance_init([], data.dtype), 0),
fn=kernel,
num_steps=len(data),
)
# Up to window_size, we compute the running mean/variance exactly.
self.assertAllClose(np.mean(data[:window_size]), mean[window_size - 1])
self.assertAllClose(np.var(data[:window_size]), var[window_size - 1])
# After window_size, we're doing exponential moving average, and pick up the
# mean/variance after the change in the distribution. Since the moving
# average is computed only over ~window_size points, this test is rather
# noisy.
self.assertAllClose(1., mean[-1], atol=0.2)
self.assertAllClose(4., var[-1], atol=0.8)
@parameterized.named_parameters(
('Basic', (10, 3), None),
('Batched', (10, 4, 3), None),
('Aggregated0', (10, 4, 3), 0),
('Aggregated01', (10, 4, 5, 3), (0, 1)),
)
def testRunningCovariance(self, shape, aggregation):
rng = np.random.RandomState(_test_seed())
data = self._constant(rng.randn(*shape))
true_aggregation = (0,) + (() if aggregation is None else tuple(
[a + 1 for a in util.flatten_tree(aggregation)]))
true_mean = np.mean(data, true_aggregation)
true_cov = _gen_cov(data, true_aggregation)
def kernel(rcs, idx):
rcs, _ = fun_mcmc.running_covariance_step(
rcs, data[idx], axis=aggregation)
return (rcs, idx + 1), ()
(rcs, _), _ = fun_mcmc.trace(
state=(fun_mcmc.running_covariance_init(true_mean.shape,
data[0].dtype), 0),
fn=kernel,
num_steps=len(data),
trace_fn=lambda *args: ())
self.assertAllClose(true_mean, rcs.mean)
self.assertAllClose(true_cov, rcs.covariance)
def testRunningCovarianceMaxPoints(self):
window_size = 100
rng = np.random.RandomState(_test_seed())
data = self._constant(
np.concatenate(
[
rng.randn(window_size, 2),
np.array([1., 2.]) +
np.array([2., 3.]) * rng.randn(window_size * 10, 2)
],
axis=0,
))
def kernel(rvs, idx):
rvs, _ = fun_mcmc.running_covariance_step(
rvs, data[idx], window_size=window_size)
return (rvs, idx + 1), (rvs.mean, rvs.covariance)
_, (mean, cov) = fun_mcmc.trace(
state=(fun_mcmc.running_covariance_init([2], data.dtype), 0),
fn=kernel,
num_steps=len(data),
)
# Up to window_size, we compute the running mean/variance exactly.
self.assertAllClose(
np.mean(data[:window_size], axis=0), mean[window_size - 1])
self.assertAllClose(
_gen_cov(data[:window_size], axis=0), cov[window_size - 1])
# After window_size, we're doing exponential moving average, and pick up the
# mean/variance after the change in the distribution. Since the moving
# average is computed only over ~window_size points, this test is rather
# noisy.
self.assertAllClose(np.array([1., 2.]), mean[-1], atol=0.2)
self.assertAllClose(np.array([[4., 0.], [0., 9.]]), cov[-1], atol=1.)
@parameterized.named_parameters(
('BasicScalar', (10, 20), 1),
('BatchedScalar', (10, 5, 20), 2),
('BasicVector', (10, 5, 20), 1),
('BatchedVector', (10, 5, 20, 7), 2),
)
def testPotentialScaleReduction(self, chain_shape, independent_chain_ndims):
rng = np.random.RandomState(_test_seed())
chain_means = rng.randn(*((1,) + chain_shape[1:])).astype(np.float32)
chains = 0.4 * rng.randn(*chain_shape).astype(np.float32) + chain_means
true_rhat = tfp.mcmc.potential_scale_reduction(
chains, independent_chain_ndims=independent_chain_ndims)
chains = self._constant(chains)
psrs, _ = fun_mcmc.trace(
state=fun_mcmc.potential_scale_reduction_init(chain_shape[1:],
self._dtype),
fn=lambda psrs: fun_mcmc.potential_scale_reduction_step( # pylint: disable=g-long-lambda
psrs, chains[psrs.num_points]),
num_steps=chain_shape[0],
trace_fn=lambda *_: ())
running_rhat = fun_mcmc.potential_scale_reduction_extract(
psrs, independent_chain_ndims=independent_chain_ndims)
self.assertAllClose(true_rhat, running_rhat)
@parameterized.named_parameters(
('Basic', (), None, None),
('Batched1', (2,), 1, None),
('Batched2', (3, 2), 1, None),
('Aggregated0', (3, 2), 1, 0),
('Aggregated01', (3, 4, 2), 1, (0, 1)),
)
def testRunningApproximateAutoCovariance(self, state_shape, event_ndims,
aggregation):
# We'll use HMC as the source of our chain.
# While HMC is being sampled, we also compute the running autocovariance.
step_size = 0.2
num_steps = 1000
num_leapfrog_steps = 10
max_lags = 300
state = tf.zeros(state_shape, dtype=self._dtype)
def target_log_prob_fn(x):
lp = -0.5 * tf.square(x)
if event_ndims is None:
return lp, ()
else:
return tf.reduce_sum(lp, -1), ()
def kernel(hmc_state, raac_state, seed):
hmc_seed, seed = util.split_seed(seed, 2)
hmc_state, hmc_extra = fun_mcmc.hamiltonian_monte_carlo(
hmc_state,
step_size=step_size,
num_integrator_steps=num_leapfrog_steps,
target_log_prob_fn=target_log_prob_fn,
seed=hmc_seed)
raac_state, _ = fun_mcmc.running_approximate_auto_covariance_step(
raac_state, hmc_state.state, axis=aggregation)
return (hmc_state, raac_state, seed), hmc_extra
seed = self._make_seed(_test_seed())
# Subtle: Unlike TF, JAX needs a data dependency from the inputs to outputs
# for the jit to do anything.
(_, raac_state, _), chain = tf.function(lambda state, seed: fun_mcmc.trace( # pylint: disable=g-long-lambda
state=(
fun_mcmc.hamiltonian_monte_carlo_init(state, target_log_prob_fn),
fun_mcmc.running_approximate_auto_covariance_init(
max_lags=max_lags,
state_shape=state_shape,
dtype=state.dtype,
axis=aggregation),
seed,
),
fn=kernel,
num_steps=num_steps,
trace_fn=lambda state, extra: state[0].state))(state, seed)
true_aggregation = (0,) + (() if aggregation is None else tuple(
[a + 1 for a in util.flatten_tree(aggregation)]))
true_variance = np.array(
tf.math.reduce_variance(np.array(chain), true_aggregation))
true_autocov = np.array(
tfp.stats.auto_correlation(np.array(chain), axis=0, max_lags=max_lags))
if aggregation is not None:
true_autocov = tf.reduce_mean(
true_autocov, [a + 1 for a in util.flatten_tree(aggregation)])
self.assertAllClose(true_variance, raac_state.auto_covariance[0], 1e-5)
self.assertAllClose(
true_autocov,
raac_state.auto_covariance / raac_state.auto_covariance[0],
atol=0.1)
@parameterized.named_parameters(
('Positional1', 0.),
('Positional2', (0., 1.)),
('Named1', {'a': 0.}),
('Named2', {'a': 0., 'b': 1.}),
)
def testSurrogateLossFn(self, state):
def grad_fn(*args, **kwargs):
# This is uglier than user code due to the parameterized test...
new_state = util.unflatten_tree(state, util.flatten_tree((args, kwargs)))
return util.map_tree(lambda x: x + 1., new_state), new_state
loss_fn = fun_mcmc.make_surrogate_loss_fn(grad_fn)
# Mutate the state to make sure we didn't capture anything.
state = util.map_tree(lambda x: self._constant(x + 1.), state)
ret, extra, grads = fun_mcmc.call_potential_fn_with_grads(loss_fn, state)
# The default is 0.
self.assertAllClose(0., ret)
# The gradients of the surrogate loss are state + 1.
self.assertAllClose(util.map_tree(lambda x: x + 1., state), grads)
self.assertAllClose(state, extra)
def testSurrogateLossFnDecorator(self):
@fun_mcmc.make_surrogate_loss_fn(loss_value=1.)
def loss_fn(_):
return 3., 2.
ret, extra, grads = fun_mcmc.call_potential_fn_with_grads(loss_fn, 0.)
self.assertAllClose(1., ret)
self.assertAllClose(2., extra)
self.assertAllClose(3., grads)
@parameterized.named_parameters(
('Probability', True),
('Loss', False),
)
def testReparameterizeFn(self, track_volume):
def potential_fn(x, y):
return -x**2 + -y**2, ()
def transport_map_fn(x, y):
return [2 * x, 3 * y], ((), tf.math.log(2.) + tf.math.log(3.))
def inverse_map_fn(x, y):
return [x / 2, y / 3], ((), -tf.math.log(2.) - tf.math.log(3.))
transport_map_fn.inverse = inverse_map_fn
(transformed_potential_fn,
transformed_init_state) = fun_mcmc.reparameterize_potential_fn(
potential_fn,
transport_map_fn,
[self._constant(2.), self._constant(3.)],
track_volume=track_volume)
self.assertIsInstance(transformed_init_state, list)
self.assertAllClose([1., 1.], transformed_init_state)
transformed_potential, (orig_space, _, _) = transformed_potential_fn(1., 1.)
potential = potential_fn(2., 3.)[0]
if track_volume:
potential += tf.math.log(2.) + tf.math.log(3.)
self.assertAllClose([2., 3.], orig_space)
self.assertAllClose(potential, transformed_potential)
@backend.multi_backend_test(globals(), 'fun_mcmc_test')
class FunMCMCTest32(FunMCMCTest):
@property
def _dtype(self):
return tf.float32
@backend.multi_backend_test(globals(), 'fun_mcmc_test')
class FunMCMCTest64(FunMCMCTest):
@property
def _dtype(self):
return tf.float64
del FunMCMCTest
if __name__ == '__main__':
real_tf.test.main()
| 33.85592 | 112 | 0.645304 |
3609324ab2e387d9e64cd991fb6adda8c5f952b1 | 84,382 | py | Python | src/config/api-server/vnc_cfg_api_server/tests/in_place_upgrade/test_r2002.py | atsgen/contrail-controller | 6f552316d9bacab29deb7d2507a49fcc96f70b52 | [
"Apache-2.0"
] | null | null | null | src/config/api-server/vnc_cfg_api_server/tests/in_place_upgrade/test_r2002.py | atsgen/contrail-controller | 6f552316d9bacab29deb7d2507a49fcc96f70b52 | [
"Apache-2.0"
] | null | null | null | src/config/api-server/vnc_cfg_api_server/tests/in_place_upgrade/test_r2002.py | atsgen/contrail-controller | 6f552316d9bacab29deb7d2507a49fcc96f70b52 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020 Juniper Networks, Inc. All rights reserved.
#
import logging
import unittest
import six
from vnc_api.exceptions import RefsExistError
# Pawel Z.: I decided to import with an asterisk because in this file
# I need all available vnc_api objects. Importing hundreds of objects
# line by line isn't effective and introduces a lot of unnecessary noise.
from vnc_api.vnc_api import * # noqa: F403
from vnc_cfg_api_server.tests.in_place_upgrade import test_case
logger = logging.getLogger(__name__)
class TestInPlaceUpgradeR2002(test_case.InPlaceUpgradeTestCase):
def setUp(self, **kwargs):
super(TestInPlaceUpgradeR2002, self).setUp()
gsc_fq_name = GlobalSystemConfig().fq_name
self.gsc = self.api.global_system_config_read(gsc_fq_name)
def id(self): # noqa: A003
"""ID method is a workaround for conflicting names.
While run tests under PY2 and PY3 at the same time.
The other way would be to randomize fq_names for each object.
:return:
"""
py_v = '-py2' if six.PY2 else '-py3'
return super(TestInPlaceUpgradeR2002, self).id() + py_v
@property
def api(self):
return self._vnc_lib
def test_enable_4byte_asn(self):
self.gsc.enable_4byte_as = True
self.api.global_system_config_update(self.gsc)
def _project_fetch_or_create(self, test_id):
project = Project(name='project-{}'.format(test_id))
try:
uuid = self.api.project_create(project)
except RefsExistError:
uuid = self.api.fq_name_to_id('project', project.fq_name)
project.set_uuid(uuid)
return project
def test_job_template_create(self):
exe_info_list = ExecutableInfoListType()
exe_info_list.set_executable_info([
ExecutableInfoType(executable_path='/tmp/fake',
executable_args='a b c',
job_completion_weightage=10)])
playbook_info_list = PlaybookInfoListType()
playbook_info_list.set_playbook_info([
PlaybookInfoType(playbook_uri='/tmp/fake/uri/playbook.yaml',
multi_device_playbook=False,
job_completion_weightage=5)])
prop_map = {'name': 'jt-{}'.format(self.id()),
'parent_obj': self.gsc,
'parent_type': 'global-system-config',
'display_name': 'job template test',
'job_template_executables': exe_info_list,
'job_template_output_schema': 'some_string',
'job_template_output_ui_schema': 'some_string',
'job_template_concurrency_level': 'fabric',
'job_template_description': 'test template',
'job_template_type': 'workflow',
'job_template_input_ui_schema': 'some_string',
'job_template_synchronous_job': False,
'job_template_input_schema': 'some_string',
'job_template_playbooks': playbook_info_list,
'annotations': {}}
obj = self.set_properties(JobTemplate, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_fabric_create(self):
fabric_credentials = DeviceCredentialList(device_credential=[
DeviceCredential(
credential=UserCredentials(username='admin', password='admin'),
vendor='juniper',
device_family='juniper')])
prop_map = {'name': 'f-{}'.format(self.id()),
'display_name': 'fabric test',
'parent_obj': self.gsc,
'parent_type': 'global-system-config',
'fabric_ztp': True,
'fabric_os_version': 'junos',
'fabric_credentials': fabric_credentials,
'fabric_enterprise_style': False,
'disable_vlan_vn_uniqueness_check': False,
'annotations': {}}
obj = self.set_properties(Fabric, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_device_chassis_create(self):
prop_map = {'name': 'dc-{}'.format(self.id()),
'annotations': {},
'device_chassis_type': 'fake_chassis_type',
'display_name': 'device chassis'}
obj = self.set_properties(DeviceChassis, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_device_functional_group_create(self):
project = self._project_fetch_or_create(self.id())
bridging_roles = RoutingBridgingRolesType(rb_roles=['CRB', 'ERB'])
prop_map = {'name': 'dfg-{}'.format(self.id()),
'annotations': {},
'parent_obj': project,
'parent_type': 'project',
'device_functional_group_description': 'some description',
'device_functional_group_os_version': 'junos',
'device_functional_group_routing_bridging_roles':
bridging_roles,
'display_name': 'device functional group'}
obj = self.set_properties(DeviceFunctionalGroup, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_hardware_inventory_create(self):
default_physical_router = PhysicalRouter()
try:
uuid = self.api.physical_router_create(default_physical_router)
except RefsExistError:
uuid = self.api.fq_name_to_id('physical_router',
default_physical_router.fq_name)
default_physical_router.set_uuid(uuid)
prop_map = {'name': 'hi-{}'.format(self.id()),
'annotations': {},
'parent_obj': default_physical_router,
'parent_type': 'physical-router',
'display_name': 'hardware inventory',
'hardware_inventory_inventory_info':
'{"id": 123, "name": "fake"}'}
obj = self.set_properties(HardwareInventory, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_physical_router_create(self):
dnsmasq = DnsmasqLeaseParameters(
lease_expiry_time=100,
client_id='4d972916-3204-11ea-a6fa-d7a3d77d36a2')
auto_system = AutonomousSystemsType(asn=[294967295])
junos_service_ports = JunosServicePorts(service_port=['2010'])
snmp_credentials = SNMPCredentials(
version=1,
local_port=8081,
retries=3,
timeout=300,
v2_community='some_string',
v3_security_name='some_string',
v3_security_level='some_string',
v3_security_engine_id='some_string',
v3_context='some_string',
v3_context_engine_id='some_string',
v3_authentication_protocol='some_string',
v3_authentication_password='some_string',
v3_privacy_protocol='some_string',
v3_privacy_password='some_string',
v3_engine_id='some_string',
v3_engine_boots=3,
v3_engine_time=300)
user_credentials = UserCredentials(username='admin', password='admin')
routing_bridging_roles = RoutingBridgingRolesType(rb_roles=['CRB'])
telemetry_info = TelemetryStateInfo(server_ip='10.100.0.100',
server_port='8080',
resource=[TelemetryResourceInfo()])
prop_map = {'name': 'pr-{}'.format(self.id()),
'annotations': {},
'parent_obj': self.gsc,
'parent_type': 'global-system-config',
'display_name': 'some_string',
'physical_router_autonomous_system': auto_system,
'physical_router_cli_commit_state': 'in_sync',
'physical_router_dataplane_ip': 'some_string',
'physical_router_device_family': 'some_string',
'physical_router_dhcp_parameters': dnsmasq,
'physical_router_encryption_type': 'none',
'physical_router_hostname': 'some_string',
'physical_router_junos_service_ports': junos_service_ports,
'physical_router_lldp': False,
'physical_router_loopback_ip': '127.0.0.1',
'physical_router_managed_state': 'dhcp',
'physical_router_management_ip': '10.100.100.255',
'physical_router_management_mac': 'some_string',
'physical_router_os_version': 'some_string',
'physical_router_product_name': 'some_string',
'physical_router_replicator_loopback_ip': 'some_string',
'physical_router_role': 'spine',
'physical_router_serial_number': 'some_string',
'physical_router_snmp': False,
'physical_router_snmp_credentials': snmp_credentials,
'physical_router_supplemental_config': 'some_string',
'physical_router_underlay_config': 'some_string',
'physical_router_underlay_managed': False,
'physical_router_user_credentials': user_credentials,
'physical_router_vendor_name': 'some_string',
'physical_router_vnc_managed': False,
'routing_bridging_roles': routing_bridging_roles,
'telemetry_info': telemetry_info}
obj = self.set_properties(PhysicalRouter, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_telemetry_profile_create(self):
project = self._project_fetch_or_create(self.id())
prop_map = {'name': 'tp-{}'.format(self.id()),
'annotations': {},
'parent_obj': project,
'parent_type': 'project',
'display_name': 'some_string',
'telemetry_profile_is_default': False}
obj = self.set_properties(TelemetryProfile, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_storm_control_profile_create(self):
project = self._project_fetch_or_create(self.id())
scp_params = StormControlParameters(
storm_control_actions=['interface-shutdown'],
recovery_timeout=30,
no_unregistered_multicast=True,
no_registered_multicast=False,
no_unknown_unicast=False,
no_multicast=False,
no_broadcast=False,
bandwidth_percent=40)
prop_map = {'name': 'scp-{}'.format(self.id()),
'annotations': {},
'parent_obj': project,
'parent_type': 'project',
'display_name': 'some_string',
'storm_control_parameters': scp_params}
obj = self.set_properties(StormControlProfile, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_sflow_profile_create(self):
project = self._project_fetch_or_create(self.id())
stats_collection_freq = StatsCollectionFrequency(sample_rate=42,
polling_interval=10,
direction='ingress')
sfp_params = SflowParameters(
stats_collection_frequency=stats_collection_freq,
agent_id='10.100.150.126',
adaptive_sample_rate=500,
enabled_interface_type='custom',
enabled_interface_params=[EnabledInterfaceParams(
name='default-interface-params',
stats_collection_frequency=stats_collection_freq,
)])
prop_map = {'name': 'sfp-{}'.format(self.id()),
'annotations': {},
'parent_obj': project,
'parent_type': 'project',
'display_name': 'sflow profile',
'sflow_parameters': sfp_params,
'sflow_profile_is_default': False}
obj = self.set_properties(SflowProfile, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_routing_instance_create(self):
project = self._project_fetch_or_create(self.id())
vn = VirtualNetwork(name='vn-{}'.format(self.id()), parent_obj=project)
try:
uuid = self.api.virtual_network_create(vn)
except RefsExistError:
uuid = self.api.fq_name_to_id('virtual_network', vn.fq_name)
vn.set_uuid(uuid)
ri_name = 'default-routing-instance'
ri_fq_name = ':'.join(vn.fq_name + [ri_name])
sci = ServiceChainInfo(
service_chain_id=ri_fq_name,
prefix=['20.0.0.0/24'],
routing_instance=ri_name,
service_chain_address='0.255.255.250',
service_instance='default-domain:default-project:test_service',
sc_head=True)
sciv6 = ServiceChainInfo(
service_chain_id=ri_fq_name,
prefix=['1000::/16'],
routing_instance=ri_name,
service_chain_address='::0.255.255.252',
service_instance='default-domain:default-project:test_service_v6',
sc_head=False)
static_route_entries = StaticRouteEntriesType(route=[
StaticRouteType(prefix='test',
next_hop='10.100.100.100',
route_target=['test-route-target'],
community='')])
default_ce_protocol = DefaultProtocolType(
bgp=ProtocolBgpType(autonomous_system=42),
ospf=ProtocolOspfType(area=1))
prop_map = {'name': ri_name,
'annotations': {},
'parent_obj': vn,
'parent_type': vn.object_type,
'default_ce_protocol': default_ce_protocol,
'display_name': 'some_string',
'evpn_ipv6_service_chain_information': sciv6,
'evpn_service_chain_information': sci,
'ipv6_service_chain_information': sciv6,
'routing_instance_fabric_snat': False,
'routing_instance_has_pnf': False,
'routing_instance_is_default': False,
'service_chain_information': sci,
'static_route_entries': static_route_entries}
obj = self.set_properties(RoutingInstance, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_control_node_zone_create(self):
prop_map = {
'name': 'cnz-{}'.format(self.id()),
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ControlNodeZone, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_service_endpoint_create(self):
prop_map = {
'annotations': {},
'display_name': 'service endpoint name'
}
obj = self.set_properties(ServiceEndpoint, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_instance_ip_create(self):
net_ipam = NetworkIpam(name=self.id())
net_ipam.uuid = self.api.network_ipam_create(net_ipam)
vn = VirtualNetwork(name=self.id())
vn_properties = VirtualNetworkType()
vn_properties.set_vxlan_network_identifier(2001 if six.PY2 else 2002)
vn_properties.set_forwarding_mode('l2_l3')
vn.set_virtual_network_properties(vn_properties)
vn.add_network_ipam(net_ipam, VnSubnetsType(
[IpamSubnetType(SubnetType('10.100.0.0', 16))]))
vn.uuid = self.api.virtual_network_create(vn)
prop_map = {
'name': self.id(),
'instance_ip_address': '10.100.0.34',
'instance_ip_family': 'v4',
'instance_ip_mode': 'active-standby',
'secondary_ip_tracking_ip': None,
'subnet_uuid': None,
'instance_ip_subscriber_tag': 'somestring',
'instance_ip_secondary': False,
'instance_ip_local_ip': False,
'service_instance_ip': False,
'service_health_check_ip': False,
'instance_ip_subnet': None,
'annotations': {},
'display_name': 'some text'
}
obj = self.set_properties(InstanceIp, prop_map)
obj.set_virtual_network(vn)
obj.set_network_ipam(net_ipam)
self.assertSchemaObjCreateOrUpdate(obj)
def test_service_appliance_set_create(self):
prop_map = {
'name': 'service appliance test name',
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'service_appliance_set_virtualization_type': 'virtual-machine',
'service_appliance_set_properties': {},
'service_appliance_driver': 'Juniper',
'service_appliance_ha_mode': 'active-active',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ServiceApplianceSet, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_route_target_create(self):
prop_map = {
'name': 'target:1:1',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(RouteTarget, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_load_balancer_listener_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'loadbalancer_listener_properties': LoadbalancerListenerType(
protocol='HTTP', protocol_port=80, connection_limit=15),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(LoadbalancerListener, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_config_root_create(self):
prop_map = {
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ConfigRoot, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_service_template_create(self):
prop_map = {
'service_template_properties': ServiceTemplateType(
version=12, service_mode='transparent',
service_type='firewall', service_scaling=False,
interface_type=[ServiceTemplateInterfaceType(
service_interface_type='left',
shared_ip=False, static_route_enable=True)],
ordered_interfaces=False, availability_zone_enable=True,
service_virtualization_type='virtual-machine',
vrouter_instance_type='libvirt-qemu'),
'service_config_managed': False,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ServiceTemplate, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_firewall_policy_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(FirewallPolicy, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_route_table_create(self):
prop_map = {
'routes': RouteTableType(route=[RouteType(
prefix='10.100.77.1',
next_hop='10.100.77.13',
next_hop_type='ip-address')]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(RouteTable, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_provider_attachment_create(self):
prop_map = {
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ProviderAttachment, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_overlay_role_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(OverlayRole, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_multicast_policy_create(self):
prop_map = {
'multicast_source_groups': MulticastSourceGroups(
multicast_source_group=[MulticastSourceGroup(
name='test name', path='/etc/contrail/foo', rate='')]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(MulticastPolicy, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_network_device_config_create(self):
prop_map = {
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(NetworkDeviceConfig, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_virtual_dns_record_create(self):
domain = Domain(name='custom-{}'.format(self.id()))
self.api.domain_create(domain)
virtual_dns = VirtualDns('inplaceupgrade',
parent_obj=domain,
virtual_DNS_data=VirtualDnsType(
domain_name='www',
dynamic_records_from_client=True,
record_order='random',
default_ttl_seconds=23))
self.api.virtual_DNS_create(virtual_dns)
prop_map = {
'parent_obj': virtual_dns,
'parent_type': virtual_dns.object_type,
'virtual_DNS_record_data': VirtualDnsRecordType(
record_name='www', record_type='CNAME',
record_class='IN', record_data='foo',
record_ttl_seconds=123),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(VirtualDnsRecord, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_dsa_rule_create(self):
dsa = DiscoveryServiceAssignment(name=self.id())
self.api.discovery_service_assignment_create(dsa)
prop_map = {
'parent_obj': dsa,
'parent_type': dsa.object_type,
'dsa_rule_entry': DiscoveryServiceAssignmentType(
publisher=DiscoveryPubSubEndPointType(
ep_type='some string'),
subscriber=[DiscoveryPubSubEndPointType(
ep_type='some string')]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(DsaRule, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_structured_syslog_config_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(StructuredSyslogConfig, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_discovery_service_assignment_create(self):
prop_map = {
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(DiscoveryServiceAssignment, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_logical_interface_create(self):
pr = PhysicalRouter(name=self.id())
self.api.physical_router_create(pr)
prop_map = {
'parent_obj': pr,
'parent_type': pr.object_type,
'logical_interface_vlan_tag': 2,
'logical_interface_type': 'l3',
'logical_interface_port_params': PortParameters(
port_disable=False,
port_mtu=1500,
port_description='some string'),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(LogicalInterface, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_flow_node_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'flow_node_ip_address': '10.100.0.222',
'flow_node_load_balancer_ip': '10.100.2.12',
'flow_node_inband_interface': 'some string',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(FlowNode, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_port_group_create(self):
node = Node(name=self.id())
self.api.node_create(node)
prop_map = {
'parent_obj': node,
'parent_type': node.object_type,
'bms_port_group_info': BaremetalPortGroupInfo(
standalone_ports_supported=False,
node_uuid=node.uuid,
),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(PortGroup, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_route_aggregate_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'aggregate_route_entries': RouteListType(route=['100.0.0.0/24']),
'aggregate_route_nexthop': 'some string',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(RouteAggregate, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_logical_router_create(self):
project = self._project_fetch_or_create(self.id())
lr_name = 'lr-{}'.format(self.id())
ip = IpAddressesType(["1.1.1.1"])
prop_map = {
"name": lr_name,
'parent_type': project.object_type,
"parent_obj": project,
"display_name": lr_name,
"fq_name": ['lr' + lr_name],
"id_perms": IdPermsType(enable=True),
"logical_router_gateway_external": False,
"logical_router_type": 'vxlan-routing',
"vxlan_network_identifier": '1111' if six.PY2 else '1212',
"configured_route_target_list": None,
"logical_router_dhcp_relay_server": ip,
'annotations': {}
}
obj = self.set_properties(LogicalRouter, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_structured_syslog_hostname_record_create(self):
ssc = StructuredSyslogConfig(
name=self.id(),
parent_obj=self._project_fetch_or_create(self.id()))
self.api.structured_syslog_config_create(ssc)
prop_map = {
'parent_obj': ssc,
'parent_type': ssc.object_type,
'structured_syslog_hostaddr': '10.100.0.124',
'structured_syslog_tenant': 'tenant name',
'structured_syslog_location': '/var/log/foo/',
'structured_syslog_device': '9b5e5547-a7b2-4cd2-99ed-87eff7ed34da',
'structured_syslog_hostname_tags': 'some,tags',
'structured_syslog_linkmap': StructuredSyslogLinkmap(
links=[StructuredSyslogLinkType(
overlay='', underlay='', link_type='',
traffic_destination='', metadata='')]),
'structured_syslog_lan_segment_list':
StructuredSyslogLANSegmentList(
LANSegmentList=[StructuredSyslogLANSegmentType(
vpn='', network_ranges='')]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(StructuredSyslogHostnameRecord, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_service_instance_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'service_instance_properties': ServiceInstanceType(
auto_policy=True, availability_zone='ZoneOne',
interface_list=[ServiceInstanceInterfaceType(
virtual_network='test')]),
'service_instance_bindings': {},
'service_instance_bgp_enabled': True,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ServiceInstance, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_node_profile_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'node_profile_type': 'end-system',
'node_profile_vendor': 'Juniper',
'node_profile_device_family': 'device family name',
'node_profile_hitless_upgrade': True,
'node_profile_roles': NodeProfileRolesType(
role_mappings=[NodeProfileRoleType(
physical_role='admin', rb_roles=['admin'])]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(NodeProfile, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_bridge_domain_create(self):
vn = VirtualNetwork(
name=self.id(),
parent_obj=self._project_fetch_or_create(self.id()))
self.api.virtual_network_create(vn)
prop_map = {
'parent_obj': vn,
'parent_type': vn.object_type,
'mac_learning_enabled': False,
'mac_limit_control': MACLimitControlType(
mac_limit=23, mac_limit_action='alarm'),
'mac_move_control': MACMoveLimitControlType(
mac_move_limit=10, mac_move_time_window=23,
mac_move_limit_action='drop'),
'mac_aging_time': 1423,
'isid': 3435,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(BridgeDomain, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_webui_node_create(self):
prop_map = {
'webui_node_ip_address': '197.12.31.11',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(WebuiNode, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_port_create(self):
node = Node(self.id())
self.api.node_create(node)
prop_map = {
'parent_obj': node,
'parent_type': node.object_type,
'port_group_uuid': '74b6a80d-5c53-41c1-8ca1-88ff8d044a11',
'bms_port_info': BaremetalPortInfo(
pxe_enabled=True, local_link_connection=LocalLinkConnection(
switch_info='48bf14a6-b780-49fd-beb1-597b7361ebb2',
port_index='12', port_id='123', switch_id='5'),
node_uuid=node.uuid, address='00-10-FA-6E-38-4A'),
'esxi_port_info': ESXIProperties(dvs_name='some string',
dvs_id='M0id'),
'label': 'some string',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Port, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_bgp_as_a_service_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'autonomous_system': self.gsc.autonomous_system,
'bgpaas_shared': False,
'bgpaas_ip_address': '172.142.142.1',
'bgpaas_session_attributes': BgpSessionAttributes(
bgp_router=None, admin_down=True, passive=False,
as_override=False, hold_time=15503, loop_count=4,
local_autonomous_system=124,
address_families=AddressFamilies(family=['inet']),
auth_data=AuthenticationData(
key_type='md5', key_items=[AuthenticationKeyItem(
key_id=0, key='somestring')]),
family_attributes=[BgpFamilyAttributes(
address_family='inet', loop_count=4,
prefix_limit=BgpPrefixLimit(
maximum=16, idle_timeout=40000),
default_tunnel_encap=['vxlan'])],
private_as_action='remove',
route_origin_override=RouteOriginOverride()),
'bgpaas_ipv4_mapped_ipv6_nexthop': False,
'bgpaas_suppress_route_advertisement': True,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(BgpAsAService, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_subnet_create(self):
prop_map = {
'subnet_ip_prefix': SubnetType(ip_prefix='192.168.0.0',
ip_prefix_len=16),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Subnet, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_sub_cluster_create(self):
prop_map = {
'name': 'test-subcluster-{}'.format(self.id()),
'annotations': {},
'display_name': 'some string',
'sub_cluster_asn': 124,
}
obj = self.set_properties(SubCluster, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_forwarding_class_create(self):
prop_map = {
'forwarding_class_dscp': 0,
'forwarding_class_mpls_exp': 2,
'forwarding_class_vlan_priority': 1,
'forwarding_class_id': 0,
'sub_cluster_asn': 12,
'sub_cluster_id': 235,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ForwardingClass, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_service_group_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'service_group_firewall_service_list': FirewallServiceGroupType(
firewall_service=[FirewallServiceType(
protocol='udp', protocol_id=17,
src_ports=PortType(start_port=1, end_port=5),
dst_ports=PortType(start_port=6, end_port=10),
)]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ServiceGroup, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_global_analytics_config_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(GlobalAnalyticsConfig, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_address_group_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'address_group_prefix': SubnetListType(subnet=[SubnetType(
ip_prefix='192.168.0.0', ip_prefix_len=16)]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(AddressGroup, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_application_policy_set_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ApplicationPolicySet, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_virtual_ip_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'virtual_ip_properties': VirtualIpType(
address='192.168.1.1', status='UP',
status_description='some string', admin_state=True,
protocol='HTTP', protocol_port=80, connection_limit=5,
subnet_id='b48448f8-33ee-4f79-a530-e400c5e8d930',
persistence_cookie_name='somestring',
persistence_type='APP_COOKIE'),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(VirtualIp, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_intent_map_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'intent_map_intent_type': 'assisted-replicator',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(IntentMap, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_port_tuple_create(self):
si = ServiceInstance(
name=self.id(), service_instance_properties=ServiceInstanceType(
auto_policy=True),
parent_obj=self._project_fetch_or_create(self.id()))
self.api.service_instance_create(si)
prop_map = {
'parent_obj': si,
'parent_type': si.object_type,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(PortTuple, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_analytics_alarm_node_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'analytics_alarm_node_ip_address': '172.172.10.10',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(AnalyticsAlarmNode, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_qos_queue_create(self):
qos_cfg = GlobalQosConfig(name=self.id(), parent_obj=self.gsc,
control_traffic_dscp=ControlTrafficDscpType(
control=10, analytics=5, dns=2))
self.api.global_qos_config_create(qos_cfg)
prop_map = {
'parent_obj': qos_cfg,
'parent_type': qos_cfg.object_type,
'min_bandwidth': 1255,
'max_bandwidth': 4634,
'qos_queue_identifier': 35,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(QosQueue, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_physical_role_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(PhysicalRole, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_card_create(self):
prop_map = {
'interface_map': InterfaceMapType(port_info=[PortInfoType(
name='some string', type='fc', port_speed='10G',
channelized=True, channelized_port_speed='1G',
port_group='some string', labels=['some', 'strings'])]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Card, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_security_logging_object_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'security_logging_object_rules': SecurityLoggingObjectRuleListType(
rule=[SecurityLoggingObjectRuleEntryType(
rule_uuid='f1abda8b-5456-4774-a741-b4d236a7ba8e',
rate=23)]),
'security_logging_object_rate': 23,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(SecurityLoggingObject, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_qos_config_create(self):
qos_forwarding = QosIdForwardingClassPairs(
qos_id_forwarding_class_pair=[
QosIdForwardingClassPair(key=1, forwarding_class_id=255),
])
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'qos_config_type': 'vhost',
'dscp_entries': qos_forwarding,
'vlan_priority_entries': qos_forwarding,
'mpls_exp_entries': qos_forwarding,
'default_forwarding_class_id': 125,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(QosConfig, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_analytics_snmp_node_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'analytics_snmp_node_ip_address': '192.168.10.10',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(AnalyticsSnmpNode, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_virtual_machine_interface_create(self):
vn = VirtualNetwork(self.id())
self.api.virtual_network_create(vn)
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'ecmp_hashing_include_fields': EcmpHashingIncludeFields(
hashing_configured=True, source_ip=True,
destination_ip=True, ip_protocol=True,
source_port=False, destination_port=False),
'port_security_enabled': True,
'virtual_machine_interface_mac_addresses': MacAddressesType(),
'virtual_machine_interface_dhcp_option_list':
DhcpOptionsListType(dhcp_option=[DhcpOptionType(
dhcp_option_name='an option name',
dhcp_option_value='an option value',
dhcp_option_value_bytes='some string')]),
'virtual_machine_interface_host_routes': RouteTableType(
route=[RouteType(prefix='10.10.100.0/24',
next_hop='10.10.101.20',
next_hop_type='ip-address')]),
'virtual_machine_interface_allowed_address_pairs':
AllowedAddressPairs(allowed_address_pair=[AllowedAddressPair(
ip=SubnetType(ip_prefix='10.10.100.0', ip_prefix_len=24),
mac='8:0:27:90:7a:75', address_mode='active-active')]),
'vrf_assign_table': VrfAssignTableType(
vrf_assign_rule=[VrfAssignRuleType(
match_condition=MatchConditionType(
protocol='UDP', ethertype='IPv4'),
vlan_tag=23, routing_instance='somestring',
ignore_acl=False)]),
'virtual_machine_interface_device_owner': 'some string',
'virtual_machine_interface_disable_policy': False,
'virtual_machine_interface_properties':
VirtualMachineInterfacePropertiesType(
service_interface_type='left',
interface_mirror=InterfaceMirrorType(
traffic_direction='ingress',
mirror_to=MirrorActionType(
analyzer_ip_address='10.10.100.24',
routing_instance='some string')),
local_preference=10, sub_interface_vlan_tag=23,
max_flows=235325),
'virtual_machine_interface_bindings': {'key_value_pair': tuple()},
'virtual_machine_interface_fat_flow_protocols': FatFlowProtocols(
fat_flow_protocol=[ProtocolType(
protocol='UDP', port=22, ignore_address='none')]),
'vlan_tag_based_bridge_domain': False,
'igmp_enable': True,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(VirtualMachineInterface, prop_map)
obj.set_virtual_network(vn)
self.assertSchemaObjCreateOrUpdate(obj)
def test_cli_config_create(self):
pr = PhysicalRouter(name=self.id())
self.api.physical_router_create(pr)
prop_map = {
'parent_obj': pr,
'parent_type': pr.object_type,
'accepted_cli_config': 'some string',
'commit_diff_list': CliDiffListType(
commit_diff_info=[CliDiffInfoType(
username='admin', time='2020-06-06 22:00:32',
config_changes='some string')]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(CliConfig, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_service_object_create(self):
prop_map = {
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ServiceObject, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_feature_flag_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'feature_id': 'default',
'feature_flag_version': 'R2008',
'enable_feature': True,
'feature_state': 'experimental',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(FeatureFlag, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_load_balancer_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'loadbalancer_provider': 'Juniper',
'loadbalancer_properties': LoadbalancerType(),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Loadbalancer, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_structured_syslog_tenant_record_create(self):
ssc = StructuredSyslogConfig(
name=self.id(),
parent_obj=self._project_fetch_or_create(self.id()))
self.api.structured_syslog_config_create(ssc)
prop_map = {
'parent_obj': ssc,
'parent_type': ssc.object_type,
'structured_syslog_tenant': 'admin',
'structured_syslog_tenantaddr': 'some string',
'structured_syslog_tenant_tags': 'some,tags',
'structured_syslog_dscpmap': StructuredSyslogDSCPMap(
dscpListIPv4=[StructuredSyslogDSCPType(
dscp_value='some string', alias_code='some string')]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(StructuredSyslogTenantRecord, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_peering_policy_create(self):
prop_map = {
'peering_service': 'public-peering',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(PeeringPolicy, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_structured_syslog_application_record_create(self):
ssc = StructuredSyslogConfig(
name=self.id(),
parent_obj=self._project_fetch_or_create(self.id()))
self.api.structured_syslog_config_create(ssc)
prop_map = {
'parent_obj': ssc,
'parent_type': ssc.object_type,
'structured_syslog_app_category': 'some string',
'structured_syslog_app_subcategory': 'yet another string',
'structured_syslog_app_groups': 'a string',
'structured_syslog_app_risk': 'string',
'structured_syslog_app_service_tags': 'red,blue,green',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(StructuredSyslogApplicationRecord,
prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_floating_ip_create(self):
net_ipam = NetworkIpam(
name=self.id(),
ipam_subnet_method='flat-subnet',
ipam_subnets=IpamSubnets(subnets=[
IpamSubnetType(subnet=SubnetType(ip_prefix='10.100.0.0',
ip_prefix_len=16))]))
self.api.network_ipam_create(net_ipam)
vn = VirtualNetwork(
name=self.id(),
virtual_network_properties=VirtualNetworkType(
forwarding_mode='l3'))
vn.add_network_ipam(net_ipam, VnSubnetsType())
self.api.virtual_network_create(vn)
fip_pool = FloatingIpPool(
parent_obj=vn, name=self.id())
self.api.floating_ip_pool_create(fip_pool)
prop_map = {
'parent_obj': fip_pool,
'parent_type': fip_pool.object_type,
'floating_ip_address': '10.100.0.123',
'floating_ip_is_virtual_ip': False,
'floating_ip_fixed_ip_address': '10.10.100.10',
'floating_ip_address_family': 'v4',
'floating_ip_port_mappings_enable': False,
'floating_ip_port_mappings': PortMappings(port_mappings=[
PortMap(protocol='TCP', src_port=9124, dst_port=9354)]),
'floating_ip_traffic_direction': 'ingress',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(FloatingIp, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_link_aggregation_group_create(self):
pr = PhysicalRouter(name=self.id())
self.api.physical_router_create(pr)
prop_map = {
'parent_obj': pr,
'parent_type': pr.object_type,
'link_aggregation_group_lacp_enabled': True,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(LinkAggregationGroup, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_virtual_router_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'virtual_router_type': 'embedded',
'virtual_router_dpdk_enabled': True,
'virtual_router_ip_address': '10.100.124.12',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(VirtualRouter, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'port_profile_params': PortProfileParameters(
port_params=PortParameters(port_disable=False, port_mtu=1500,
port_description='some string'),
lacp_params=LacpParams(lacp_enable=True, lacp_interval='slow',
lacp_mode='passive'),
flow_control=False,
bpdu_loop_protection=False,
port_cos_untrust=False),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(PortProfile, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_policy_management_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(PolicyManagement, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_e2_service_provider_create(self):
prop_map = {
'e2_service_provider_promiscuous': True,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(E2ServiceProvider, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_routing_policy_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'routing_policy_entries': PolicyStatementType(
term=[PolicyTermType(
term_match_condition=TermMatchConditionType(
protocol=['xmpp'], prefix=[PrefixMatchType(
prefix='some string', prefix_type='exact')],
community='some string',
community_list=['some string'],
community_match_all=True,
extcommunity_list=['some string'],
extcommunity_match_all=False, family='inet',
as_path=[124], external='ospf-type-1', local_pref=124,
nlri_route_type=[124], prefix_list=[
PrefixListMatchType(
interface_route_table_uuid=[
'0f107c6b-0d36-4f3e-b6e4-7a2c5372617e'],
prefix_type='longer')],
route_filter=RouteFilterType(
route_filter_properties=[RouteFilterProperties(
route='some string', rote_type='longer',
route_type_value='some string')]),
term_action_list=TermActionListType(
update=ActionUpdateType(
as_path=ActionAsPathType(
expand=AsListType(asn_list=235)),
community=ActionCommunityType(
add=CommunityListType(
community='no-export'),
remove=CommunityListType(
community='no-export'),
set=CommunityListType(
community='no-export')),
extcommunity=ActionExtCommunityType(
add=CommunityListType(
community='no-export'),
remove=CommunityListType(
community='no-export'),
set=CommunityListType(
community='no-export')),
local_pref=124, med=125), action='reject',
external='ospf-type-1',
as_path_expand='some string',
as_path_prepend='some string')))]),
'term_type': 'vrouter',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(RoutingPolicy, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_role_config_create(self):
node_profile = NodeProfile(name=self.id(), parent_obj=self.gsc)
self.api.node_profile_create(node_profile)
prop_map = {
'parent_obj': node_profile,
'parent_type': node_profile.object_type,
'role_config_config': 'some string',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(RoleConfig, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_structured_syslog_message_create(self):
ssc = StructuredSyslogConfig(
name=self.id(),
parent_obj=self._project_fetch_or_create(self.id()))
self.api.structured_syslog_config_create(ssc)
field_names_list = FieldNamesList(field_names=['some_field_name'])
prop_map = {
'parent_obj': ssc,
'parent_type': ssc.object_type,
'structured_syslog_message_tagged_fields': field_names_list,
'structured_syslog_message_integer_fields': field_names_list,
'structured_syslog_message_process_and_store': False,
'structured_syslog_message_process_and_summarize': True,
'structured_syslog_message_process_and_summarize_user': False,
'structured_syslog_message_forward': 'do-not-forward',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(StructuredSyslogMessage, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_load_balancer_pool_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'loadbalancer_pool_properties': LoadbalancerPoolType(
status='UP', status_description='ok', admin_state=True,
protocol='HTTP', loadbalancer_method='ROUND_ROBIN',
subnet_id='432c6811-dba6-411e-9152-d8a40a9e38b3',
session_persistence='APP_COOKIE',
persistence_cookie_name='some string'),
'loadbalancer_pool_provider': 'silver',
'loadbalancer_pool_custom_attributes': {},
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(LoadbalancerPool, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_global_qos_config_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'control_traffic_dscp': ControlTrafficDscpType(control=10,
analytics=5,
dns=2),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(GlobalQosConfig, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_analytics_node_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'analytics_node_ip_address': '172.102.135.43',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(AnalyticsNode, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_virtual_dns_create(self):
domain = Domain(name='admin-{}'.format(self.id()))
self.api.domain_create(domain)
prop_map = {
'parent_obj': domain,
'parent_type': domain.object_type,
'virtual_DNS_data': VirtualDnsType(
domain_name='www', dynamic_records_from_client=True,
record_order='random', default_ttl_seconds=23),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(VirtualDns, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_config_database_node_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'config_database_node_ip_address': '234.234.234.234',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ConfigDatabaseNode, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_config_node_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'config_node_ip_address': '111.10.110.24',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ConfigNode, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_firewall_rule_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'action_list': ActionListType(simple_action='deny'),
'service': FirewallServiceType(
protocol='udp', protocol_id=17,
src_ports=PortType(start_port=1, end_port=5),
dst_ports=PortType(start_port=6, end_port=10)),
'endpoint_1': FirewallRuleEndpointType(any=True),
'endpoint_2': FirewallRuleEndpointType(any=True),
'match_tags': FirewallRuleMatchTagsType(tag_list=[
'application', 'tier', 'site']),
'match_tag_types': FirewallRuleMatchTagsTypeIdList(),
'direction': '<>',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(FirewallRule, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_bgp_vpn_create(self):
rt_list = RouteTargetList(route_target=['target:3:1'])
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'route_target_list': rt_list,
'import_route_target_list': rt_list,
'export_route_target_list': rt_list,
'bgpvpn_type': 'l3',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Bgpvpn, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_role_definition_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(RoleDefinition, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_service_connection_module_create(self):
prop_map = {
'e2_service': 'point-to-point',
'service_type': 'vpws-l2ckt',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ServiceConnectionModule, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_database_node_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'database_node_ip_address': '234.234.234.234',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(DatabaseNode, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_load_balancer_health_monitor_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'loadbalancer_healthmonitor_properties':
LoadbalancerHealthmonitorType(
admin_state=True, monitor_type='PING', delay=10,
timeout=2424, max_retries=10, http_method='GET',
url_path='http://localhost/check', expected_codes='200'),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(LoadbalancerHealthmonitor, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_devicemgr_node_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'devicemgr_node_ip_address': '10.100.100.100',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(DevicemgrNode, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_fabric_namespace_create(self):
fabric = Fabric(name=self.id(), parent_obj=self.gsc)
self.api.fabric_create(fabric)
prop_map = {
'parent_obj': fabric,
'parent_type': fabric.object_type,
'fabric_namespace_type': 'ASN',
'fabric_namespace_value': NamespaceValue(
ipv4_cidr=SubnetListType(subnet=[SubnetType(
ip_prefix='10.100.0.0', ip_prefix_len=16)]),
asn=AutonomousSystemsType(asn=[self.gsc.autonomous_system])),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(FabricNamespace, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_network_ipam_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'ipam_subnets': IpamSubnets(subnets=[IpamSubnetType(
subnet=SubnetType(ip_prefix='10.100.0.0', ip_prefix_len=16))]),
'ipam_subnet_method': 'flat-subnet',
'ipam_subnetting': True,
'network_ipam_mgmt': IpamType(
ipam_method='dhcp', ipam_dns_method='none'),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(NetworkIpam, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_network_policy_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'network_policy_entries': PolicyEntriesType(
policy_rule=[PolicyRuleType(
direction='<>', protocol='tcp', src_addresses=[AddressType(
virtual_network='124.23.23.12')],
src_ports=[PortType(start_port=10, end_port=12)],
dst_addresses=[AddressType(
virtual_network='125.23.23.12')],
dst_ports=[PortType(start_port=13, end_port=15)],
action_list=ActionListType(simple_action='deny'),
ethertype='IPv4')]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(NetworkPolicy, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_hardware_create(self):
prop_map = {
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Hardware, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_tag_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'tag_type_name': 'foo',
'tag_value': 'bar',
'tag_predefined': False,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Tag, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_feature_config_create(self):
rd = RoleDefinition(name=self.id(), parent_obj=self.gsc)
self.api.role_definition_create(rd)
prop_map = {
'parent_obj': rd,
'parent_type': rd.object_type,
'feature_config_additional_params': {},
'feature_config_vendor_config': {},
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(FeatureConfig, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
@unittest.skip("Failing CAT test")
def test_bgp_router_create(self):
project = self._project_fetch_or_create(self.id())
vn = VirtualNetwork(name='vn-{}'.format(self.id()), parent_obj=project)
self.api.virtual_network_create(vn)
ri = RoutingInstance(name=self.id(), parent_obj=vn)
self.api.routing_instance_create(ri)
prop_map = {
'parent_obj': ri,
'parent_type': ri.object_type,
'bgp_router_parameters': BgpRouterParams(
autonomous_system=self.gsc.autonomous_system,
identifier='some string', address='10.10.10.10'),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(BgpRouter, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_virtual_network_create(self):
project = self._project_fetch_or_create(self.id())
vn_name = 'vn-{}'.format(self.id())
rtl = RouteTargetList(route_target=['target:3:1'])
prop_map = {"name": vn_name,
"display_name": vn_name,
"fq_name": ['vm' + vn_name],
"parent_obj": project,
"is_shared": True,
"router_external": True,
"virtual_network_category": "routed",
"port_security_enabled": True,
"route_target_list": rtl,
"virtual_network_properties": VirtualNetworkType(
forwarding_mode='l3'),
"address_allocation_mode": 'flat-subnet-only',
"mac_learning_enabled": True,
"pbb_evpn_enable": True,
"pbb_etree_enable": True,
"igmp_enable": True,
"id_perms": IdPermsType(enable=True),
'annotations': {},
"mac_aging_time": 400,
"fabric_snat": True,
"virtual_network_routed_properties":
VirtualNetworkRoutedPropertiesType(),
"ecmp_hashing_include_fields": False,
"provider_properties": None,
"flood_unknown_unicast": True,
"layer2_control_word": True,
"mac_move_control":
MACMoveLimitControlType(
mac_move_limit=1024,
mac_move_limit_action='log',
mac_move_time_window=60),
"export_route_target_list": rtl,
"mac_limit_control":
MACLimitControlType(mac_limit=1024,
mac_limit_action='log'),
"virtual_network_fat_flow_protocols":
FatFlowProtocols([
ProtocolType(protocol='p1', port=1),
ProtocolType(protocol='p2', port=2)]),
"virtual_network_network_id": None,
"import_route_target_list": rtl,
"external_ipam": True,
"multi_policy_service_chains_enabled": False
}
obj = self.set_properties(VirtualNetwork, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_virtual_port_group_create(self):
# port = Port(name=self.id())
# port.uuid = self.api.port_create(port)
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'virtual_port_group_lacp_enabled': True,
'virtual_port_group_trunk_port_id': None,
'virtual_port_group_user_created': True,
'virtual_port_group_type': 'access',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(VirtualPortGroup, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_service_appliance_create(self):
sas = ServiceApplianceSet(name=self.id(), parent_obj=self.gsc)
self.api.service_appliance_set_create(sas)
prop_map = {
'parent_obj': sas,
'parent_type': sas.object_type,
'service_appliance_user_credentials': UserCredentials(),
'service_appliance_ip_address': '10.100.10.100',
'service_appliance_virtualization_type': 'virtual-machine',
'service_appliance_properties': {},
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ServiceAppliance, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_namespace_create(self):
domain = Domain(name=self.id())
self.api.domain_create(domain)
prop_map = {
'parent_obj': domain,
'parent_type': domain.object_type,
'namespace_cidr': SubnetType(ip_prefix='10.100.100.0',
ip_prefix_len=24),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Namespace, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_feature_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Feature, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_device_image_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'device_image_file_name': 'some_file_name',
'device_image_vendor_name': 'Juniper',
'device_image_device_family': 'JunOS',
'device_image_supported_platforms': DevicePlatformListType(),
'device_image_os_version': '1',
'device_image_file_uri': 'some string uri',
'device_image_size': 235235,
'device_image_md5': 'eb2a9193443f8aac2e9d83362f02fd86',
'device_image_sha1': '3DA541559918A808C2402BBA5012F6C60B27661C',
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(DeviceImage, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_physical_interface_create(self):
pr = PhysicalRouter(name=self.id())
self.api.physical_router_create(pr)
prop_map = {
'parent_obj': pr,
'parent_type': pr.object_type,
'ethernet_segment_identifier': '00:11:22:33:44:55:66:77:88:99',
'physical_interface_type': 'regular',
'physical_interface_mac_addresses': MacAddressesType(),
'physical_interface_port_id': 'some string with ID',
'physical_interface_lacp_force_up': False,
'physical_interface_flow_control': False,
'physical_interface_port_params': PortParameters(
port_disable=False, port_mtu=1500,
port_description='some string'),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(PhysicalInterface, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_access_control_list_create(self):
vn = VirtualNetwork(
name=self.id(),
parent_obj=self._project_fetch_or_create(self.id()))
self.api.virtual_network_create(vn)
prop_map = {
'parent_obj': vn,
'parent_type': vn.object_type,
'access_control_list_entries': AclEntriesType(
dynamic=True, acl_rule=[AclRuleType(
match_condition=MatchConditionType(protocol='UDP',
ethertype='IPv4'),
action_list=ActionListType(simple_action='deny'),
)]),
'access_control_list_hash': 23534214,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(AccessControlList, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_node_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'node_type': 'baremetal',
'esxi_info': ESXIHostInfo(
username='admin', datacenter='default', esxi_name='testhost',
cluster='test', mac='42:7f:33:d1:76:82', datastore='default',
password='secret123', vcenter_server='default'),
'ip_address': '10.10.10.10',
'hostname': 'test-host',
'bms_info': BaremetalServerInfo(
network_interface='some string', driver='some string',
properties=BaremetalProperties(), driver_info=DriverInfo(),
name='some string'),
'mac_address': '2e:37:05:05:54:b5',
'disk_partition': 'sda,sdb',
'interface_name': 'some string',
'cloud_info': CloudInstanceInfo(
os_version='1805', operating_system='centos7'),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Node, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_customer_attachment_create(self):
prop_map = {
'attachment_address': AttachmentAddressType(),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(CustomerAttachment, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_structured_syslog_sla_profile_create(self):
ssc = StructuredSyslogConfig(
name=self.id(),
parent_obj=self._project_fetch_or_create(self.id()))
self.api.structured_syslog_config_create(ssc)
prop_map = {
'parent_obj': ssc,
'parent_type': ssc.object_type,
'annotations': {},
'display_name': 'some string',
'structured_syslog_sla_params': '',
}
obj = self.set_properties(StructuredSyslogSlaProfile, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_virtual_machine_create(self):
project = self._project_fetch_or_create(self.id())
vm_name = 'vm-{}'.format(self.id())
prop_map = {
"name": vm_name,
"parent_obj": project,
"display_name": 'vm' + vm_name,
"fq_name": ['vm' + vm_name],
"server_type": 'baremetal-server',
"id_perms": IdPermsType(enable=True),
'annotations': {}
}
obj = self.set_properties(VirtualMachine, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_interface_route_table_create(self):
prop_map = {
'interface_route_table_routes': RouteTableType(
route=[RouteType(prefix='10.10.100.0/24',
next_hop='10.10.101.20',
next_hop_type='ip-address')]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(InterfaceRouteTable, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_load_balancer_member_create(self):
lbp = LoadbalancerPool(
name=self.id(),
arent_obj=self._project_fetch_or_create(self.id()))
self.api.loadbalancer_pool_create(lbp)
prop_map = {
'parent_obj': lbp,
'parent_type': lbp.object_type,
'loadbalancer_member_properties': LoadbalancerMemberType(
admin_state=True, status='UP'),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(LoadbalancerMember, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_service_health_check_create(self):
prop_map = {
'parent_obj': self._project_fetch_or_create(self.id()),
'parent_type': 'project',
'service_health_check_properties': ServiceHealthCheckType(
enabled=True, health_check_type='link-local',
monitor_type='PING', delay=10, delayUsecs=1000, timeout=10,
timeoutUsecs=1000, max_retries=2),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ServiceHealthCheck, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_alarm_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'uve_keys': UveKeysType(uve_key=['somestring']),
'alarm_rules': AlarmOrList(or_list=[AlarmAndList(
and_list=[AlarmExpression(
operation='==',
operand1='NodeStatus.process_info.process_state',
operand2=AlarmOperand2(
uve_attribute='NodeStatus.process_info.process_state',
))])]),
'alarm_severity': 0,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(Alarm, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_api_access_list_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'api_access_list_entries': RbacRuleEntriesType(
rbac_rule=[RbacRuleType(
rule_object='config', rule_perms=[RbacPermType(
role_name='admin', role_crud='CRUD')])]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(ApiAccessList, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_alias_ip_pool_create(self):
vn = VirtualNetwork(
name=self.id(),
parent_obj=self._project_fetch_or_create(self.id()))
self.api.virtual_network_create(vn)
prop_map = {
'parent_obj': vn,
'parent_type': vn.object_type,
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(AliasIpPool, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_data_center_interconnect_create(self):
prop_map = {
'parent_obj': self.gsc,
'parent_type': self.gsc.object_type,
'data_center_interconnect_bgp_hold_time': 10,
'data_center_interconnect_mode': 'l3',
'data_center_interconnect_bgp_address_families': AddressFamilies(
family=['inet']),
'data_center_interconnect_configured_route_target_list':
RouteTargetList(route_target=['target:3:1']),
'data_center_interconnect_type': 'inter_fabric',
'destination_physical_router_list': LogicalRouterPRListType(
logical_router_list=[LogicalRouterPRListParams(
logical_router_uuid='432c6811-dba6-411e-9152-d8a40a9e38b3',
physical_router_uuid_list=[
'f42862ae-45c1-4d70-b152-ee30d9caf985'])]),
'annotations': {},
'display_name': 'some string',
}
obj = self.set_properties(DataCenterInterconnect, prop_map)
self.assertSchemaObjCreateOrUpdate(obj)
def test_hbs_create(self):
project = self._project_fetch_or_create(self.id())
project.set_quota(QuotaType(host_based_service=1))
self.api.project_update(project)
hbs = HostBasedService('hbs-%s' % self.id(), parent_obj=project)
self.api.host_based_service_create(hbs)
| 42.148851 | 79 | 0.581309 |
8c937bad45e39b651b93f987589c1997aafa9b37 | 2,209 | py | Python | pbsuite/utils/quickN50.py | shokrof/pbhoney | 288dbb9cbbd16621f815c0e9eb99e1d1455c5c26 | [
"MIT"
] | null | null | null | pbsuite/utils/quickN50.py | shokrof/pbhoney | 288dbb9cbbd16621f815c0e9eb99e1d1455c5c26 | [
"MIT"
] | null | null | null | pbsuite/utils/quickN50.py | shokrof/pbhoney | 288dbb9cbbd16621f815c0e9eb99e1d1455c5c26 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys, math
def getStats(seqLengths):
data = {}
seqLengths.sort(reverse=True)
data["numItems"] = len(seqLengths)
data["itemSum"] = sum(seqLengths)
tl = data["itemSum"]
n50_mark = data["itemSum"] * .5
n90_mark = data["itemSum"] * .90
n95_mark = data["itemSum"] * .95
data["n50"] = None
data["n50_gt_count"] = None
data["n90"] = None
data["n90_gt_count"] = None
data["n95"] = None
data["n95_gt_count"] = None
basesSeen = 0
for pos,n in enumerate(seqLengths):
basesSeen += n
if data["n50"] is None and basesSeen > n50_mark:
data["n50"] = n
data["n50_gt_count"] = pos
if data["n90"] is None and basesSeen > n90_mark:
data["n90"] = n
data["n90_gt_count"] = pos
if data["n95"] is None and basesSeen > n95_mark:
data["n95"] = n
data["n95_gt_count"] = pos
break
#may not have gaps
if data["numItems"] == 0:
return data
data["min"] = seqLengths[-1]
data["FstQu"] = seqLengths[ int(math.floor(data["numItems"]*.75)) ]
median = data["numItems"]*.50
data["median"] = int( (seqLengths[ int(math.floor(median)) ] + \
seqLengths[ int(math.floor(median)) ]) / 2)
data["mean"] = data["itemSum"]/data["numItems"]
data["TrdQu"] = seqLengths[ int(math.floor(data["numItems"]*.25)) ]
data["max"] = seqLengths[0]
return data
def run(data):
"""
list of numbers - can be a string if you want
"""
data = map(float, data)
ret = getStats(data)
outputOrder = ["itemSum",
"numItems",
"min",
"FstQu",
"mean",
"median",
"n50",
"n50_gt_count",
"TrdQu",
"n90",
"n90_gt_count",
"n95",
"n95_gt_count",
"max"]
for key in outputOrder:
print "{0}\t{1:.2f}".format(key, ret[key])
if __name__ == '__main__':
run(sys.stdin.read().strip().split('\n'))
| 28.320513 | 72 | 0.492078 |
52c7c25e771b65ed6ee4780b532cd829d3757f44 | 6,953 | py | Python | mlnext/io.py | PLCnext/MLnext-Framework | aff791ace391e46c7cee12e5901090551d7c2103 | [
"MIT"
] | null | null | null | mlnext/io.py | PLCnext/MLnext-Framework | aff791ace391e46c7cee12e5901090551d7c2103 | [
"MIT"
] | null | null | null | mlnext/io.py | PLCnext/MLnext-Framework | aff791ace391e46c7cee12e5901090551d7c2103 | [
"MIT"
] | null | null | null | """ Module for loading and saving files.
"""
import glob
import json
import os
from typing import Any
from typing import Dict
from typing import List
import yaml
from pydantic import BaseModel
__all__ = [
'save_json',
'load_json',
'save_yaml',
'load_yaml',
'save_config',
'load',
'get_files',
'get_folders'
]
def save_json(data: Dict[str, Any], *, name: str, folder: str = '.'):
"""Saves `data` to a name.json in `folder`.
Args:
data (Dict[str, Any]): Data to save.
folder (str): Path to folder.
name (str): Name of file.
Example:
>>> # Save a dictionary to disk
>>> save_json(data={'name': 'mlnext'}, name='mlnext.json')
"""
if not os.path.isdir(folder):
raise ValueError(f'{folder} is not a valid directory.')
filename, ext = os.path.splitext(name)
if not ext:
name = f'{filename}.json'
elif ext not in {'.json'}:
raise ValueError(f'Invalid extension "{ext}".')
with open(os.path.join(folder, name), mode='w') as file:
json.dump(data, file, indent=2)
def load_json(path: str) -> Dict[str, Any]:
"""Loads a `.json` file from `path`.
Args:
path (str): Path to file.
Returns:
Dict[str, Any]: Returns the loaded json.
Example:
>>> # Load a json file
>>> load_json('mlnext.json')
{'name': 'mlnext'}
"""
if not os.path.isfile(path):
raise FileNotFoundError(f'Path {path} invalid.')
with open(path, 'r') as file:
data = json.load(file)
return data
def save_yaml(data: Dict[str, Any], *, name: str, folder: str = '.'):
"""Saves `data` to a name.yaml in `folder`.
Args:
data (Dict[str, Any]): Data to save.
folder (str): Path to folder.
name (str): Name of file.
Example:
>>> # Save dictionary to yaml
>>> save_yaml(data={'name': 'mlnext'}, name='mlnext.yaml')
"""
if not os.path.isdir(folder):
raise ValueError(f'{folder} is not a valid directory.')
filename, ext = os.path.splitext(name)
if not ext:
name = f'{filename}.yaml'
elif ext not in {'.yaml', '.yml'}:
raise ValueError(f'Invalid extension "{ext}".')
with open(os.path.join(folder, name), mode='w') as file:
yaml.dump(data, file, indent=2, sort_keys=False)
def load_yaml(path: str) -> Dict[str, Any]:
"""Loads a `.yaml`/`.yml` file from `path`.
Args:
path (str): Path to file.
Returns:
Dict[str, Any]: Returns the loaded yaml file.
Example:
>>> # Load a yaml file
>>> load_yaml('mlnext.yaml')
{'name': 'mlnext'}
"""
if not os.path.isfile(path):
raise FileNotFoundError(f'Path {path} invalid.')
with open(path, 'r') as file:
data = yaml.safe_load(file)
return data
def save_config(config: BaseModel, *, name: str, folder: str = '.'):
"""Saves a `pydantic.BaseModel` to `yaml`.
Args:
model (BaseModel): Basemodel to save
folder (str): Path to folder
name (str): Name of file
Raises:
ValueError: Raised if folder is invalid.
Example:
>>> # Save a pydantic model to yaml
>>> class User(pydantic.BaseModel): id: int
>>> user = User(id=1)
>>> save_config(config=user)
"""
if not os.path.isdir(folder):
raise ValueError(f'{folder} is not a valid directory.')
settings = {
'exclude_unset': True,
'exclude_none': True
}
data = yaml.safe_load(config.json(**settings)) # type: ignore
save_yaml(data=data, folder=folder, name=name)
def load(path: str) -> Dict[str, Any]:
"""Loads a file from `path` with the supported python parser.
Args:
path (str): Path to file.
Raises:
ValueError: Raised if
Returns:
Dict[str, Any]: Returns the content.
Example:
>>> # Loads file from path
>>> load('./resources/task.json')
{
"name": "task",
...
}
"""
_, ext = os.path.splitext(path)
exts = {
'.json': load_json,
'.yaml': load_yaml,
'.yml': load_yaml
}
if ext not in exts:
raise ValueError(f'Incompatible extension "{ext}".'
f'Supported extensions: {exts.keys()}.')
return exts[ext](path)
def get_files(
path: str,
*,
name: str = '*',
ext: str = '*',
absolute: bool = False
) -> List[str]:
"""List all files in `path` with extension `ext`.
Args:
path (str): Path of the directory.
ext (str): File extension (without dot).
name (str): Pattern for the name of the files to appear in the result.
absolute (bool): Whether to return the absolute path or only the
filenames.
Raises:
ValueError: Raised if `path` is not a directory.
Returns:
List[str]: Returns a list of files with extension `ext` in `path`.
Example:
>>> # lists all files in dir
>>> get_files(path='./resources/tasks', ext='json')
['task.json']
>>> # get all files named task
>>> get_files(path='./resources/tasks', name='task')
['task.json', 'task.yaml']
>>> # get the absolute path of the files
>>> get_files(path='.resources/tasks', ext='json',
... absolute=True)
['.../resources/tasks/task.json']
"""
if not os.path.isdir(path):
raise ValueError(f'Path "{path}" is not a directory.')
files = glob.glob(f'{path}/{name}.{ext}')
if absolute:
return files
return list(map(os.path.basename, files)) # type: ignore
def get_folders(
path: str,
*,
filter: str = '',
absolute: bool = False
) -> List[str]:
"""Lists all folders in `folder`.
Args:
path (str): Path of the directory.
filter (str): Pattern to match the beginning of the folders names.
absolute (bool): Whether to return the absolute path or only the
foldernames.
Raises:
ValueError: Raised if `folder` is not a directory.
Returns:
List[str]: Returns a list of the names of the folders.
Example:
>>> # list all folder in a directory
>>> get_folders('./resources')
['tasks', 'models']
>>> # Get all folders that start with the letter m
>>> get_folders('./resources', filter='m')
['models']
# Get the absolute path of the folders
>>> get_folders('./resources', absolute=True)
['.../resources/tasks', '.../resources/models']
"""
if not os.path.isdir(path):
raise ValueError(f'Path "{path}" is not a directory.')
return [name if not absolute else os.path.join(path, name)
for name in os.listdir(path)
if (os.path.isdir(os.path.join(path, name))
and name.startswith(filter))]
| 25.101083 | 78 | 0.565799 |
c6b10a3b5ddf37fa1fe98e29f0c6a3eb4f64a206 | 6,311 | py | Python | Operators.py | clavigne/feedback-control-retinal | 5b2349961ae5fe09a21ddfb35e64ee958bc0a7aa | [
"BSD-2-Clause"
] | 1 | 2020-07-20T13:40:57.000Z | 2020-07-20T13:40:57.000Z | Operators.py | clavigne/feedback-control-retinal | 5b2349961ae5fe09a21ddfb35e64ee958bc0a7aa | [
"BSD-2-Clause"
] | null | null | null | Operators.py | clavigne/feedback-control-retinal | 5b2349961ae5fe09a21ddfb35e64ee958bc0a7aa | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import numpy.linalg as npl
import scipy.sparse as sps
from functools import reduce
# This is the max order of a polynomial potential we will be
# declaring. Basically the x and p matrices internally will be nbasis
# + MAX_ORDER so that q^MAX_ORDER is correct up to nbas.
MAX_ORDER = 4
class System:
def __init__(self):
self.modes = {}
self.nmodes = 0
self.constants = {}
self.mode_list = []
self.shape = []
def define_mode(self, name, nbas, typ='harmonic', **params):
if typ=='harmonic':
self.modes[name] = HarmMode(name, nbas)
self.mode_list += [name]
self.shape += [nbas]
self.nmodes+=1
elif typ=='electronic':
self.modes[name] = ElecMode(name, nbas)
self.mode_list += [name]
self.shape += [nbas]
self.nmodes+=1
elif typ=='grid':
self.modes[name] = GridMode(name, nbas,**params)
self.mode_list += [name]
self.shape += [nbas]
self.nmodes+=1
else:
raise NotImplemented("Type " + typ + " not implemented")
def build_operator(self, dictionary):
op = Operator(self)
for key, val in dictionary.items():
m = self.modes[key].interpret(val, self.consts)
op.add_term(key, m)
return op
def get_index(self,index_set):
ind = 0
siz = [1]
for key in self.mode_list[::-1]:
ind = ind + index_set[key] * np.product(siz)
siz += [self.modes[key].nbas]
return ind
def trace(self, matrix, modes):
tensor = matrix.reshape(self.shape+self.shape)
nmode_list = self.mode_list[:]
for mode in modes:
il = nmode_list.index(mode)
ir = il + len(nmode_list)
tensor = np.trace(tensor, axis1=il, axis2=ir)
nmode_list.pop(il)
return tensor
class Operator:
def __init__(self, system):
self.parent = system
self.terms = {}
for key in self.parent.modes.keys():
self.terms[key] = None
def add_term(self,mode, matrix):
if type(matrix) is np.ndarray:
self.terms[mode] = matrix
else:
self.terms[mode] = np.array(matrix.todense())
def get_term(self, key):
return self.terms.get(key, None)
def to_fullmat(self):
# For testing, returns the full matrix representation of the operator
mats = []
for name in self.parent.mode_list:
val = self.terms[name]
if val is None:
mats += [self.parent.modes[name].get_I()]
else:
mats += [val]
return reduce(lambda x,y:sps.kron(x,y,format='csr'), mats)
def to_fullterms(self):
# For testing, returns the full matrix representation of the operator
mats = []
for name in self.parent.mode_list:
val = self.terms[name]
if val is None:
pass
else:
mats += [val]
return mats
def term_inv(self):
new = Operator(self.parent)
for k,v in self.terms.items():
if v is not None:
new.terms[k] = npl.inv(v)
return new
class Mode:
def interpret(self, string, consts):
m = eval(string, consts, self.mode_consts)
return m[:self.nbas,:self.nbas]
def get_I(self):
return sps.eye(self.nbas,self.nbas, format='csr')
class HarmMode(Mode):
def __init__(self, name, nbas):
self.name = name
self.nbas = nbas
self.mode_consts= dict(q=self.get_q(),
p=self.get_p(),
I=self.get_harm_I())
def get_q(self, off=MAX_ORDER):
"""Return q operator."""
i = np.arange(0, self.nbas+off-1)
j = np.arange(1, self.nbas+off)
dat = np.sqrt(0.5 * (i +1))
mat = sps.csr_matrix( (dat,[i,j]), shape=(self.nbas+off, self.nbas + off))
return mat + mat.T
def get_p(self, off=MAX_ORDER):
"""Return laplacian operator."""
i = np.arange(0, self.nbas+off-1)
j = np.arange(1, self.nbas+off)
dat = 1j * np.sqrt(0.5 * (i +1))
mat = sps.csr_matrix( (dat,[i,j]), shape=(self.nbas+off, self.nbas + off))
return mat - mat.T
def get_harm_I(self, off=MAX_ORDER):
return sps.eye(self.nbas+off,self.nbas+off, format='csr')
def get_grid_points(self):
xi, Pi = npl.eigh(self.get_q(off=0).todense())
return xi, Pi
class ElecMode(Mode):
def __init__(self, name, nbas):
self.name = name
self.nbas = nbas
self.mode_consts = {}
for i in range(nbas):
for j in range(nbas):
key = "S%iS%i"%(i,j)
val = self.get_el(i,j)
self.mode_consts[key] = val
def get_el(self, iel, jel):
ij = [[iel], [jel]]
dat = [1.0]
return sps.csr_matrix( (dat,ij), shape=(self.nbas, self.nbas))
class GridMode(Mode):
def __init__(self, name, nbas, low=0.0, high=2*np.pi):
self.name = name
self.nbas = nbas
self.bound = [low, high]
self.grid = np.linspace(low, high, nbas)
self.delta = self.grid[1] - self.grid[0]
self.mode_consts = dict(dx2=self.get_lapl(),
I=self.get_I(),
V=np.diag,
x=self.grid)
def get_lapl(self):
T = np.zeros((self.nbas,self.nbas), dtype=np.complex)
# Calculate prefactors and stuff
k_squared = (np.pi/self.delta)**2.0
n_squared = self.nbas**2.0
diagonalElements = (k_squared/3.0)*(1.0+(2.0/n_squared))
notDiagonal = (2*k_squared / n_squared)
fun = (lambda i,j: # DO NOT CHANGE THIS (Formula from Tannor)
diagonalElements if i==j
else notDiagonal * (
np.power(-1.0,j-i) * np.power(np.sin(np.pi*(j-i)/self.nbas),-2)))
for i in range(self.nbas):
for j in range(self.nbas):
T[i,j] = fun(i,j)
return T
def define_term(self,name, fun):
self.mode_consts[name] = fun
| 31.713568 | 88 | 0.536048 |
bcf823c3bf2bbeabdb81d5e5e5ad0885ded16aed | 7,578 | py | Python | backups/tojson.bak.py | ono7/f5config2json | 9ad0372f33d58fca7403cda03e7c02325d04d11f | [
"MIT"
] | null | null | null | backups/tojson.bak.py | ono7/f5config2json | 9ad0372f33d58fca7403cda03e7c02325d04d11f | [
"MIT"
] | null | null | null | backups/tojson.bak.py | ono7/f5config2json | 9ad0372f33d58fca7403cda03e7c02325d04d11f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
""" Utilities for implementing stacks to track stanza config objects
and generate JSON output
e.g.
virtual-server /Common/best_vs {
attributes {
key1 value1
key2 value2
}
empty-definition { }
}
Fri Jul 23 13:11:27 2021
__author__ = 'Jose Lima'
"""
import re
from base64 import b64encode
from typing import Tuple, List, Any
from context import kv_context, storage_context
### rock stars ###
class Storage:
"""Storage container
relies on results from is_parent function to create
an appropiate data structure for the type of node
currently in the stack
"""
def __init__(self, k1: str = None, k2: Any = None):
self.k1 = k1
self.k2 = k2
self.parent = None
self.root = None
if isinstance(k2, list):
self.storage = {k1: []}
elif isinstance(k2, dict):
self.storage = {}
elif isinstance(k2, str):
self.storage = {k1: {k2: {}}}
else:
self.storage = {k1: {}}
def update(self, data: dict) -> None:
if isinstance(self.k2, str):
self.storage[self.k1][self.k2].update(data)
elif isinstance(self.k2, list):
self.storage[self.k1].append(data)
elif isinstance(self.k2, dict):
self.storage.update(data)
else:
self.storage[self.k1].update(data)
def get_store(self):
return self.storage
class Stack:
""" returns a stack that keeps track of stanza schema start and end block"""
def __init__(self):
self.stack = []
self.state = False
self.last = None
self.current = None
self.len = 0
self.by_who = None
def update_state(self, line: str) -> bool:
self.last = self.current
self.current = line
if self.current.endswith("{"):
self.stack.append("{")
self.len += 1
self.by_who = line
elif self.current.strip() == "}":
self.stack.pop()
self.len -= 1
if self.len == 0:
self.state = True
self.by_who = line
return self.state
def is_balanced(self):
return self.state
def get_stack(self):
return self.stack
### regex compile ###
re_quotes = re.compile(r"\b(\S+) (.*)")
re_kv = re.compile(r"\S+")
# re_keys = re.compile(r"[^{} ]+")
# re_keys below covers cases where there are spaces in "/Common/space here"
re_keys = re.compile(r'("[^{}]+"|[^{} ]+)')
re_list = re.compile(r"(\S+) {(?:([^{}]*))}")
store_contex = {"ltm:virtual": ["stuff"]}
list_keys = [
"images",
"variables",
"rows",
"log-settings",
"\d+ {",
"attributes",
"assertion-consumer-services",
]
list_keys = sorted(list_keys, key=len, reverse=True)
list_keys = "|".join(list_keys)
value_is_list = re.compile(f"({list_keys})")
## policy helpers ##
def clean_data_chunk(chunk: str) -> str:
"""cleans up a F5 configuration chunk
clean_empty_lines: removes any empty lines only [\n] is supported
clean_broken_line: there are cases where the F5 config has lines that are
broken specially seen in long quoted strings, this fixes it by replacing
the trailing space and newline with a single space (this might require more testing)
"""
clean_empty_lines = re.compile(r"[\n]+")
clean_broken_line = re.compile(r"\s+\n")
c = chunk.strip()
c = clean_empty_lines.sub("\n", c)
return clean_broken_line.sub(" ", c)
def create_new_objects(line: str, storage_stack: object, obj_stack: object) -> object:
"""creates new storage and this_stack objects
if the obj_stack contains a previous object
this new_node object's parent attribute is set
this allows a direct update once we encounter and end of a stanza block
"""
new_node = Storage(*is_parent(line))
if len(storage_stack) > 0:
new_node.parent = storage_stack[-1]
storage_stack.append(new_node)
new_stack = Stack()
new_stack.update_state(line)
obj_stack.append(new_stack)
return new_stack
### parsers ###
def parse_singleton(data: str) -> object:
"""parse single line objects
e.g.
apm client-packaging /Common/client-packaging
"""
new_node = Storage(*is_parent(data))
return new_node.get_store()
def is_parent(line: str) -> Tuple:
"""if the line ends with `word {`, this represents the start of a
new objectk if a line is multiple words:
`level1 word2 /Common/level2 {}`
we pair the first 2 words to represent the parent key
and return a nested structure:
-> {"level1:word2" : {"/Common/level2" : {}}
other wise if the line is `level1 {}`
-> {"level1" : {}}
this function works together with Storage to create the correct
data structure for the current object
"""
if line.strip() == "{":
return None, {}
results = re_keys.findall(line)
if results:
if len(results) > 1:
level2 = results.pop(-1)
level1 = ":".join(results)
return level1, level2
# if level1 key is in this, return a list
if value_is_list.search(results[0]):
return results[0], []
level1, level2 = results[0], None
return level1, level2
def parse_policy(policy: str, b64: bool = False, encode_this: list = None) -> object:
"""parse a stanza object from f5 and return python dict
policy: a block of F5 config, e.g. parent { attribute value }
one probably should regex-fu the blocks out of a config text document
before sending them here
b64: optionaly embed original config block encoded in base64
parse_policy(data, b64=True)
encode_this: list of object parent keys e.g. "ltm:rule" to by pass parsing
and skip to encoding. avoids building complex expressions for data that
is not necessary for migration correlation.
"""
if not encode_this:
encode_this = []
lines = clean_data_chunk(policy).splitlines()
if len(lines) == 1:
return parse_singleton(lines[0])
storage_stack: List[object] = []
obj_stack: List[object] = []
for line in lines:
if line.strip() == "}" and this_stack.is_balanced():
if storage_stack[-1].parent and len(storage_stack) != 1:
storage_stack[-1].parent.update(storage_stack[-1].get_store())
storage_stack.pop()
this_stack = obj_stack.pop()
continue
if line.strip() == "}":
this_stack.update_state(line)
if this_stack.is_balanced() and len(obj_stack) != 0:
this_stack = obj_stack.pop()
if storage_stack[-1].parent and len(storage_stack) != 1:
storage_stack[-1].parent.update(storage_stack[-1].get_store())
storage_stack.pop()
continue
if line.endswith("{"):
this_stack = create_new_objects(line, storage_stack, obj_stack)
if storage_stack[-1].k1 in encode_this:
storage_stack[-1].update(
{"b64": f"{b64encode(policy.encode()).decode()}"}
)
return storage_stack[0].get_store()
continue
storage_stack[-1].update(kv_context(line, context=storage_stack[0].k1))
if b64:
storage_stack[0].update({"b64": f"{b64encode(policy.encode()).decode()}"})
return storage_stack[0].get_store()
| 31.057377 | 92 | 0.601742 |
4668cf52f03734858cfcf0e889b2c822d7eba157 | 4,457 | py | Python | carbon/lib/carbon/instrumentation.py | ehazlett/graphite | b20573e92cb90de254505baa160210483f203be9 | [
"Apache-2.0"
] | 1 | 2015-05-21T10:23:03.000Z | 2015-05-21T10:23:03.000Z | carbon/lib/carbon/instrumentation.py | Cue/graphite | 450eeeb0eacc433bc5914c1dff2e05dbf420cf8d | [
"Apache-2.0"
] | null | null | null | carbon/lib/carbon/instrumentation.py | Cue/graphite | 450eeeb0eacc433bc5914c1dff2e05dbf420cf8d | [
"Apache-2.0"
] | null | null | null | import os
import time
import socket
from resource import getrusage, RUSAGE_SELF
from twisted.application.service import Service
from twisted.internet.task import LoopingCall
from carbon.conf import settings
stats = {}
HOSTNAME = socket.gethostname().replace('.','_')
PAGESIZE = os.sysconf('SC_PAGESIZE')
rusage = getrusage(RUSAGE_SELF)
lastUsage = rusage.ru_utime + rusage.ru_stime
lastUsageTime = time.time()
# TODO(chrismd) refactor the graphite metrics hierarchy to be cleaner,
# more consistent, and make room for frontend metrics.
#metric_prefix = "Graphite.backend.%(program)s.%(instance)s." % settings
def increment(stat, increase=1):
try:
stats[stat] += increase
except KeyError:
stats[stat] = increase
def append(stat, value):
try:
stats[stat].append(value)
except KeyError:
stats[stat] = [value]
def getCpuUsage():
global lastUsage, lastUsageTime
rusage = getrusage(RUSAGE_SELF)
currentUsage = rusage.ru_utime + rusage.ru_stime
currentTime = time.time()
usageDiff = currentUsage - lastUsage
timeDiff = currentTime - lastUsageTime
if timeDiff == 0: #shouldn't be possible, but I've actually seen a ZeroDivisionError from this
timeDiff = 0.000001
cpuUsagePercent = (usageDiff / timeDiff) * 100.0
lastUsage = currentUsage
lastUsageTime = currentTime
return cpuUsagePercent
def getMemUsage():
rss_pages = int( open('/proc/self/statm').read().split()[1] )
return rss_pages * PAGESIZE
def recordMetrics():
global lastUsage
myStats = stats.copy()
stats.clear()
# cache metrics
if settings.program == 'carbon-cache':
record = cache_record
updateTimes = myStats.get('updateTimes', [])
committedPoints = myStats.get('committedPoints', 0)
creates = myStats.get('creates', 0)
errors = myStats.get('errors', 0)
cacheQueries = myStats.get('cacheQueries', 0)
cacheOverflow = myStats.get('cache.overflow', 0)
if updateTimes:
avgUpdateTime = sum(updateTimes) / len(updateTimes)
record('avgUpdateTime', avgUpdateTime)
if committedPoints:
pointsPerUpdate = float(committedPoints) / len(updateTimes)
record('pointsPerUpdate', pointsPerUpdate)
record('updateOperations', len(updateTimes))
record('committedPoints', committedPoints)
record('creates', creates)
record('errors', errors)
record('cache.queries', cacheQueries)
record('cache.queues', len(cache.MetricCache))
record('cache.size', cache.MetricCache.size)
record('cache.overflow', cacheOverflow)
# aggregator metrics
elif settings.program == 'carbon-aggregator':
record = aggregator_record
record('allocatedBuffers', len(BufferManager))
record('bufferedDatapoints',
sum([b.size for b in BufferManager.buffers.values()]))
record('aggregateDatapointsSent', myStats.get('aggregateDatapointsSent', 0))
# common metrics
record('metricsReceived', myStats.get('metricsReceived', 0))
record('cpuUsage', getCpuUsage())
try: # This only works on Linux
record('memUsage', getMemUsage())
except:
pass
def cache_record(metric, value):
if settings.instance is None:
fullMetric = 'carbon.agents.%s.%s' % (HOSTNAME, metric)
else:
fullMetric = 'carbon.agents.%s-%s.%s' % (HOSTNAME, settings.instance, metric)
datapoint = (time.time(), value)
cache.MetricCache.store(fullMetric, datapoint)
def relay_record(metric, value):
if settings.instance is None:
fullMetric = 'carbon.relays.%s.%s' % (HOSTNAME, metric)
else:
fullMetric = 'carbon.relays.%s-%s.%s' % (HOSTNAME, settings.instance, metric)
datapoint = (time.time(), value)
events.metricGenerated(fullMetric, datapoint)
def aggregator_record(metric, value):
if settings.instance is None:
fullMetric = 'carbon.aggregator.%s.%s' % (HOSTNAME, metric)
else:
fullMetric = 'carbon.aggregator.%s-%s.%s' % (HOSTNAME, settings.instance, metric)
datapoint = (time.time(), value)
events.metricGenerated(fullMetric, datapoint)
class InstrumentationService(Service):
def __init__(self):
self.record_task = LoopingCall(recordMetrics)
def startService(self):
self.record_task.start(60, False)
Service.startService(self)
def stopService(self):
self.record_task.stop()
Service.stopService(self)
# Avoid import circularities
from carbon import state, events, cache
from carbon.aggregator.buffers import BufferManager
| 29.130719 | 96 | 0.709221 |
bd77d7b1add8435d92a021947c43de5150f97abd | 1,783 | py | Python | Features/Stylistic Sets/Synchronize ssXX glyphs.py | danielgamage/Mekkablue-Scripts | 0b0b4468ec938f8c669b3552e2fa429080b65bf1 | [
"Apache-2.0"
] | 1 | 2021-09-04T18:41:30.000Z | 2021-09-04T18:41:30.000Z | Stylistic Sets/Synchronize ssXX glyphs.py | davidtahim/Glyphs-Scripts | 5ed28805b5fe03c63d904ad2f79117844c22aa44 | [
"Apache-2.0"
] | null | null | null | Stylistic Sets/Synchronize ssXX glyphs.py | davidtahim/Glyphs-Scripts | 5ed28805b5fe03c63d904ad2f79117844c22aa44 | [
"Apache-2.0"
] | 1 | 2021-06-09T19:16:47.000Z | 2021-06-09T19:16:47.000Z | #MenuTitle: Synchronize ssXX glyphs
# -*- coding: utf-8 -*-
__doc__="""
Creates missing ssXX glyphs so that you have synchronous groups of ssXX glyphs.
E.g. you have a.ss01 b.ss01 c.ss01 a.ss02 c.ss02 --> the script creates b.ss02
"""
Font = Glyphs.font
allGlyphs = [ x.name for x in list( Font.glyphs ) ]
linelength = 70
def ssXXsuffix( i ):
"""Turns an integer into an ssXX ending between .ss01 and .ss20, e.g. 5 -> '.ss05'."""
if i < 1:
i = 1
elif i > 20:
i = 20
return ".ss%0.2d" % i
def stripsuffix( glyphname ):
"""Returns the glyphname without the dot suffix."""
dotindex = glyphname.find(".")
return glyphname[:dotindex]
i = 1
ssXX_exists = True
while ssXX_exists:
ssXX = ssXXsuffix(i)
ssXXglyphs = [ x for x in allGlyphs if x.find( ssXX ) is not -1 ]
if len(ssXXglyphs) == 0:
i-=1
ssXX = ssXXsuffix(i)
ssXXglyphs = [ x for x in allGlyphs if x.find( ssXX ) is not -1 ]
ssXX_exists = False
else:
i+=1
if i == 0:
print "No ssXX glyphs in the font. Aborting."
else:
print "Highest ssXX:", ssXX
print "Creating",
for XX in range( i ):
ssXXglyphs = [ x for x in allGlyphs if x.find( ssXXsuffix( XX+1 ) ) is not -1 ]
baseglyphs = [ stripsuffix( x ) for x in ssXXglyphs ]
for YY in range( i ):
if XX != YY:
allGlyphs = [ x.name for x in list( Font.glyphs ) ] # neu holen, Glyphen haben sich u.U. schon geaendert
for thisglyphname in baseglyphs:
targetglyphname = thisglyphname + ssXXsuffix( YY+1 )
if not targetglyphname in allGlyphs:
sourceglyphname = thisglyphname + ssXXsuffix( XX+1 )
sourceglyph = Font.glyphs[ sourceglyphname ]
targetglyph = sourceglyph.copy()
targetglyph.name = targetglyphname
Font.glyphs.append( targetglyph )
print targetglyphname,
print | 27.859375 | 108 | 0.659002 |
85f4224d2319b4ea1507b9c11ab31b77237a80f6 | 260 | py | Python | Dataset/Leetcode/test/4/676.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/4/676.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/4/676.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, nums1: List[int], nums2: List[int]) -> float:
num=nums1+nums2
num.sort()
if len(num)%2==0:
return (num[len(num)//2]+num[len(num)//2-1])/2
else:
return num[len(num)//2]
| 26 | 63 | 0.5 |
f5eb75e24a49d469354c2b9d15b57fdbf852cd11 | 3,979 | py | Python | ECS/ecs-taskstopped-handling/lambda/TaskStoppedHandler.py | terratenney/aws-tools | d8ca07d56d812deb819b039752b94a0f1b9e6eb2 | [
"MIT"
] | 8 | 2020-12-27T18:44:17.000Z | 2022-03-10T22:20:28.000Z | ECS/ecs-taskstopped-handling/lambda/TaskStoppedHandler.py | terratenney/aws-tools | d8ca07d56d812deb819b039752b94a0f1b9e6eb2 | [
"MIT"
] | 28 | 2020-08-30T02:57:03.000Z | 2021-05-12T09:13:15.000Z | ECS/ecs-taskstopped-handling/lambda/TaskStoppedHandler.py | kyhau/arki | b5d6b160ef0780032f231362158dd9dd892f4e8e | [
"MIT"
] | 8 | 2020-09-03T19:00:13.000Z | 2022-03-31T05:31:35.000Z | """
This is a Lambda function to be triggered from CloudWatch Events (ECS Task Stopped).
"""
import boto3
import json
import logging
# Update the root logger to get messages at DEBUG and above
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger("botocore").setLevel(logging.CRITICAL)
logging.getLogger("boto3").setLevel(logging.CRITICAL)
logging.getLogger("urllib3.connectionpool").setLevel(logging.CRITICAL)
logging.info(f"boto3.__version__: {boto3.__version__}")
def lambda_handler(event, context):
"""
Main entry point when triggering the AWS Lambda function.
:param event: a dictionary of event information from AWS ECS Task Stopped event
:param context: a dictionary of runtime information from AWS ECS
Example of `event`: (taskArn is the task_id in DynamoDB)
"version": "0",
"id": "xxxx",
"detail-type": "ECS Task State Change",
"source": "aws.ecs",
"account": "111122223333",
"time": "2019-06-30T03:36:57Z",
"region": "ap-southeast-2",
"resources": [
"arn:aws:ecs:ap-southeast-2:111122223333:task/xxxx"
],
"detail": {
"clusterArn": "arn:aws:ecs:ap-southeast-2:111122223333:cluster/Orca-Cluster",
"containerInstanceArn": "arn:aws:ecs:ap-southeast-2:111122223333:container-instance/xxxx",
"containers": [
{
"containerArn": "arn:aws:ecs:ap-southeast-2:111122223333:container/xxxx",
"exitCode": 1,
"lastStatus": "STOPPED",
"name": "sample_app-1_0_0",
"taskArn": "arn:aws:ecs:ap-southeast-2:111122223333:task/xxxx",
"networkInterfaces": [],
"cpu": "4",
"memory": "1024"
}
],
"createdAt": "2019-06-30T03:36:53.763Z",
"launchType": "EC2",
"cpu": "4",
"memory": "1024",
"desiredStatus": "STOPPED",
"group": "family:sample_app-1_0_0",
"lastStatus": "STOPPED",
"overrides": {
"containerOverrides": [
{
"environment": [
{
"name": "S3_BUCKET",
"value": "bucket-name-xxx"
}
],
"name": "sample_app-1_0_0"
}
]
},
"attachments": [],
"connectivity": "CONNECTED",
"connectivityAt": "2019-06-30T03:36:53.763Z",
"pullStartedAt": "2019-06-30T03:36:55.311Z",
"startedAt": "2019-06-30T03:36:56.311Z",
"stoppingAt": "2019-06-30T03:36:57.935Z",
"stoppedAt": "2019-06-30T03:36:57.935Z",
"pullStoppedAt": "2019-06-30T03:36:55.311Z",
"executionStoppedAt": "2019-06-30T03:36:57Z",
"stoppedReason": "Essential container in task exited",
"stopCode": "EssentialContainerExited",
"updatedAt": "2019-06-30T03:36:57.935Z",
"taskArn": "arn:aws:ecs:ap-southeast-2:111122223333:task/xxxx",
"taskDefinitionArn": "arn:aws:ecs:ap-southeast-2:111122223333:task-definition/sample_app-1_0_0:1",
"version": 3
}
}
"""
logging.debug("Received event: " + json.dumps(event))
if event["source"] != "aws.ecs":
raise ValueError("Function only supports input from events with a source type of: aws.ecs")
# Extract data from event
params = {
"timestamp": event["detail"]["stoppedAt"],
"task_arn": event["detail"]["taskArn"],
"other_env_data": event["detail"]["overrides"]["containerOverrides"][0]["environment"],
}
return process_ecs_task_stopped_event(params)
def process_ecs_task_stopped_event(params):
# TODO
return 0
| 37.537736 | 110 | 0.546117 |
98221d020546f646e478df4af65e07dcdb54b855 | 2,535 | py | Python | strands_monitored_nav_states/src/strands_monitored_nav_states/recover_stuck_on_carpet_no_help.py | cdondrup/strands_recovery_behaviours | 8fd1fd5dee1a7a473403dadab7ce91adab9590df | [
"MIT"
] | null | null | null | strands_monitored_nav_states/src/strands_monitored_nav_states/recover_stuck_on_carpet_no_help.py | cdondrup/strands_recovery_behaviours | 8fd1fd5dee1a7a473403dadab7ce91adab9590df | [
"MIT"
] | null | null | null | strands_monitored_nav_states/src/strands_monitored_nav_states/recover_stuck_on_carpet_no_help.py | cdondrup/strands_recovery_behaviours | 8fd1fd5dee1a7a473403dadab7ce91adab9590df | [
"MIT"
] | null | null | null | import rospy
import smach
from monitored_navigation.recover_state_machine import RecoverStateMachine
from monitored_navigation.recover_state import RecoverState
from geometry_msgs.msg import Twist
class RecoverStuckOnCarpetNoHelp(RecoverStateMachine):
def __init__(self,max_recovery_attempts=float("inf")):
RecoverStateMachine.__init__(self)
self.state=CarpetState(max_recovery_attempts=max_recovery_attempts)
with self:
smach.StateMachine.add('CARPET_STATE',
self.state,
transitions={'preempted':'preempted',
'recovered_without_help':'recovered_without_help',
'preempted':'preempted',
'not_active':'not_recovered_without_help'})
class CarpetState(RecoverState):
def __init__(self,
name="recover_stuck_on_carpet",
is_active=True,
max_recovery_attempts=float("inf")):
RecoverState.__init__(self,
name=name,
outcomes=['recovered_with_help', 'recovered_without_help','not_recovered_with_help', 'not_recovered_without_help', 'preempted'],
is_active=is_active,
max_recovery_attempts=max_recovery_attempts
)
self.was_helped=False
self.vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self.vel_cmd = Twist()
def active_execute(self,userdata):
if self.preempt_requested():
self.service_preempt()
return 'preempted'
#small forward vel to unstuck robot
self.vel_cmd.linear.x=0.8
self.vel_cmd.angular.z=0.4
for i in range(0,4):
self.vel_pub.publish(self.vel_cmd)
self.vel_cmd.linear.x=self.vel_cmd.linear.x-0.2
self.vel_cmd.angular.z=self.vel_cmd.angular.z-0.2
rospy.sleep(0.2)
self.vel_cmd.linear.x=0.0
self.vel_cmd.angular.z=0.0
self.vel_pub.publish(self.vel_cmd)
if self.preempt_requested():
self.service_preempt()
return 'preempted'
#TODO: find way to check if behaviour was successful
if True:
return 'recovered_without_help'
else:
return 'not_recovered_with_help'
| 40.238095 | 152 | 0.575542 |
ffadcc9b870712e8c360f551470ccfc684e4d711 | 5,404 | py | Python | venv/lib/python3.6/site-packages/twilio/rest/api/v2010/account/new_signing_key.py | fernandoleira/stocktext | f755f83ffdaee3b179e21de955854354aced9134 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/twilio/rest/api/v2010/account/new_signing_key.py | fernandoleira/stocktext | f755f83ffdaee3b179e21de955854354aced9134 | [
"MIT"
] | 11 | 2019-12-26T17:21:03.000Z | 2022-03-21T22:17:07.000Z | venv/lib/python3.6/site-packages/twilio/rest/api/v2010/account/new_signing_key.py | fernandoleira/stocktext | f755f83ffdaee3b179e21de955854354aced9134 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class NewSigningKeyList(ListResource):
""" """
def __init__(self, version, account_sid):
"""
Initialize the NewSigningKeyList
:param Version version: Version that contains the resource
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyList
:rtype: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyList
"""
super(NewSigningKeyList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/SigningKeys.json'.format(**self._solution)
def create(self, friendly_name=values.unset):
"""
Create a new NewSigningKeyInstance
:param unicode friendly_name: A string to describe the resource
:returns: Newly created NewSigningKeyInstance
:rtype: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyInstance
"""
data = values.of({'FriendlyName': friendly_name, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return NewSigningKeyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.NewSigningKeyList>'
class NewSigningKeyPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the NewSigningKeyPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyPage
:rtype: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyPage
"""
super(NewSigningKeyPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of NewSigningKeyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyInstance
:rtype: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyInstance
"""
return NewSigningKeyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.NewSigningKeyPage>'
class NewSigningKeyInstance(InstanceResource):
""" """
def __init__(self, version, payload, account_sid):
"""
Initialize the NewSigningKeyInstance
:returns: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyInstance
:rtype: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyInstance
"""
super(NewSigningKeyInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'friendly_name': payload['friendly_name'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'secret': payload['secret'],
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, }
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def secret(self):
"""
:returns: The secret your application uses to sign Access Tokens and to authenticate to the REST API.
:rtype: unicode
"""
return self._properties['secret']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.NewSigningKeyInstance>'
| 30.531073 | 109 | 0.642117 |
98110bd45ba4c564eea535635683d77272792126 | 2,001 | py | Python | pygazebo/msg/param_v_pb2.py | dmacmill/py3gazebo | b9b303aa8338d25993362ae469bc593bfb29ba2c | [
"Apache-2.0"
] | 6 | 2019-04-28T09:48:02.000Z | 2021-06-27T07:39:15.000Z | pygazebo/msg/param_v_pb2.py | dmacmill/py3gazebo | b9b303aa8338d25993362ae469bc593bfb29ba2c | [
"Apache-2.0"
] | null | null | null | pygazebo/msg/param_v_pb2.py | dmacmill/py3gazebo | b9b303aa8338d25993362ae469bc593bfb29ba2c | [
"Apache-2.0"
] | 3 | 2021-06-18T20:05:09.000Z | 2022-02-03T16:54:47.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: param_v.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import param_pb2 as param__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='param_v.proto',
package='gazebo.msgs',
syntax='proto2',
serialized_pb=_b('\n\rparam_v.proto\x12\x0bgazebo.msgs\x1a\x0bparam.proto\",\n\x07Param_V\x12!\n\x05param\x18\x01 \x03(\x0b\x32\x12.gazebo.msgs.Param')
,
dependencies=[param__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PARAM_V = _descriptor.Descriptor(
name='Param_V',
full_name='gazebo.msgs.Param_V',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='param', full_name='gazebo.msgs.Param_V.param', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=43,
serialized_end=87,
)
_PARAM_V.fields_by_name['param'].message_type = param__pb2._PARAM
DESCRIPTOR.message_types_by_name['Param_V'] = _PARAM_V
Param_V = _reflection.GeneratedProtocolMessageType('Param_V', (_message.Message,), dict(
DESCRIPTOR = _PARAM_V,
__module__ = 'param_v_pb2'
# @@protoc_insertion_point(class_scope:gazebo.msgs.Param_V)
))
_sym_db.RegisterMessage(Param_V)
# @@protoc_insertion_point(module_scope)
| 27.410959 | 153 | 0.754623 |
a3fb1cee8c21464feecf1f17413f284a9c01e8b7 | 3,049 | py | Python | bigquery_erd/bigquery.py | janjagusch/bigquery-eralchemy | 3d0c5b33a7bcb306d00bc3569213465905e7cc9f | [
"Apache-2.0"
] | 2 | 2021-02-14T16:42:21.000Z | 2022-03-08T18:59:25.000Z | bigquery_erd/bigquery.py | janjagusch/bigquery-eralchemy | 3d0c5b33a7bcb306d00bc3569213465905e7cc9f | [
"Apache-2.0"
] | null | null | null | bigquery_erd/bigquery.py | janjagusch/bigquery-eralchemy | 3d0c5b33a7bcb306d00bc3569213465905e7cc9f | [
"Apache-2.0"
] | null | null | null | """
Converts Google BigQuery tables into the eralchemy intermediate representation.
"""
import os
import re
from collections import namedtuple
from typing import Iterable, Tuple, Union
from google.cloud.bigquery import Table
from eralchemy.models import (
Column as ERColumn,
Table as ERTable,
Relation as ERRelation,
)
_BQColumn = namedtuple("BQColumn", ("name", "field_type", "mode", "description"))
_DEFAULT_PATTERN = r"->\s([?*+1 ]:[?*+1 ]\s)?(.*\.)?(.*)\.(.*)$"
_DEFAULT_CARDINALITY = ("*", "1")
_PATTERN = re.compile(os.environ.get("GBQ_RELATION_PATTERN") or _DEFAULT_PATTERN)
def _walk_columns(fields, name_prefix=""):
for col in fields:
name = ".".join((name_prefix, col.name)) if name_prefix else col.name
yield _BQColumn(name, col.field_type, col.mode, col.description)
if col.fields:
for nested_col in _walk_columns(col.fields, name):
yield nested_col
def _process_relation(column_description, right_dataset, right_table):
if not column_description:
return None
result = re.search(_PATTERN, column_description)
if not result:
return None
cardinality = result.group(1)
cardinality = (
tuple(cardinality.strip().split(":")) if cardinality else _DEFAULT_CARDINALITY
)
left_dataset = result.group(2)
left_dataset = left_dataset.strip(".") if left_dataset else right_dataset
left_table = result.group(3)
return ERRelation(
left_col=f"{left_dataset}.{left_table}",
right_col=f"{right_dataset}.{right_table}",
left_cardinality=cardinality[1],
right_cardinality=cardinality[0],
)
def _process_column_type(column):
mode = column.mode
if mode:
return f"{mode}({column.field_type})"
return column.field_type
# pylint: disable=unused-argument
def _process_column_is_key(column) -> bool:
return False
# pylint: enable=unused-argument
def _process_table(table: Table) -> ERTable:
columns = [
ERColumn(col.name, _process_column_type(col), _process_column_is_key(col))
for col in _walk_columns(table.schema)
]
table = ERTable(f"{table.dataset_id}.{table.table_id}", columns)
return table
def bigquery_to_intermediary(
tables: Iterable[Table],
) -> Tuple[Iterable[Union[ERTable, ERRelation]]]:
"""
Converts BigQuery tables into the eralchemy intermediary format.
Args:
tables (Iterable[google.cloud.bigquery.Table]): An iterable of Table instances.
Returns:
A tuple with two elements:
- The first element is an iterable of eralchemy.models.Table
- The second element is an iterable of eralchemy.models.Relation
"""
tables_ = [_process_table(table) for table in tables]
relations = [
_process_relation(col.description, table.dataset_id, table.table_id)
for table in tables
for col in _walk_columns(table.schema)
]
relations = [relation for relation in relations if relation]
return tables_, relations
| 29.601942 | 87 | 0.686783 |
ee6fbb1890b092e1ac69b2e57a452e4f9ad960f8 | 1,927 | py | Python | tpdatasrc/co8fixes/scr/py00167feldrin.py | dolio/TemplePlus | 37446bb3d1fcbf460e611a4fcb2caff167e9ac08 | [
"MIT"
] | 69 | 2015-05-05T14:09:25.000Z | 2022-02-15T06:13:04.000Z | tpdatasrc/co8fixes/scr/py00167feldrin.py | anatoliy-savchak/TemplePlus | 50922bb14cc2d7dcf8fceeccf45c3b905c1b512f | [
"MIT"
] | 457 | 2015-05-01T22:07:45.000Z | 2022-03-31T02:19:10.000Z | tpdatasrc/co8fixes/scr/py00167feldrin.py | anatoliy-savchak/TemplePlus | 50922bb14cc2d7dcf8fceeccf45c3b905c1b512f | [
"MIT"
] | 25 | 2016-02-04T21:19:53.000Z | 2021-11-15T23:14:51.000Z | from utilities import *
from Co8 import *
from toee import *
from combat_standard_routines import *
def san_dialog( attachee, triggerer ):
return SKIP_DEFAULT
def san_first_heartbeat( attachee, triggerer):
if (game.global_flags[372] == 1):
attachee.object_flag_set(OF_OFF)
return RUN_DEFAULT
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
game.global_flags[177] = 1
return RUN_DEFAULT
def san_start_combat( attachee, triggerer ):
if attachee.is_unconscious(): # fixes issue that just stops combat
return RUN_DEFAULT
while(attachee.item_find(8903) != OBJ_HANDLE_NULL):
attachee.item_find(8903).destroy()
#if (attachee.d20_query(Q_Is_BreakFree_Possible)): # workaround no longer necessary!
# create_item_in_inventory( 8903, attachee )
if (obj_percent_hp(attachee) < 75):
found_pc = OBJ_HANDLE_NULL
for pc in game.party:
if pc.type == obj_t_pc:
found_pc = pc
attachee.ai_shitlist_remove( pc )
if found_pc != OBJ_HANDLE_NULL:
StopCombat(attachee, 1)
found_pc.begin_dialog( attachee, 1 )
game.new_sid = 0
return SKIP_DEFAULT
#################################
# Spiritual Weapon Shenanigens #
#################################
Spiritual_Weapon_Begone( attachee )
return RUN_DEFAULT
def san_resurrect( attachee, triggerer ):
game.global_flags[177] = 0
return RUN_DEFAULT
def san_heartbeat( attachee, triggerer ):
if ( game.global_flags[176] == 1 ):
for pc in game.party:
attachee.ai_shitlist_remove( pc )
location = location_from_axis( 560, 437 )
attachee.runoff(location)
return RUN_DEFAULT
def run_off( attachee, triggerer ):
attachee.runoff(attachee.location-3)
if ( game.global_flags[176] == 0 ):
game.timevent_add( kill_brunk, ( attachee, ), 28800000 )
game.global_flags[176] = 1
return RUN_DEFAULT
def kill_brunk( attachee ):
game.global_flags[174] = 1
return RUN_DEFAULT | 26.040541 | 85 | 0.72081 |
9ac13cfbb9de30a4dbda59bdcfc3c82888a076a0 | 12,910 | py | Python | config/settings/base.py | Andrew-Chen-Wang/cookiecutter-django-ec2-gh-action | 3a65464eb37fc8dad60641b8d7a125d1b04d333e | [
"BSD-3-Clause"
] | 1 | 2021-01-14T05:38:40.000Z | 2021-01-14T05:38:40.000Z | config/settings/base.py | Andrew-Chen-Wang/cookiecutter-django-ec2-gh-action | 3a65464eb37fc8dad60641b8d7a125d1b04d333e | [
"BSD-3-Clause"
] | null | null | null | config/settings/base.py | Andrew-Chen-Wang/cookiecutter-django-ec2-gh-action | 3a65464eb37fc8dad60641b8d7a125d1b04d333e | [
"BSD-3-Clause"
] | 1 | 2021-12-10T11:17:08.000Z | 2021-12-10T11:17:08.000Z | """
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).parents[2]
# github_ec2_deploy/)
APPS_DIR = ROOT_DIR / "github_ec2_deploy"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"django_celery_beat",
"rest_framework",
"rest_framework.authtoken",
]
LOCAL_APPS = [
"github_ec2_deploy.users.apps.UsersConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "github_ec2_deploy.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"github_ec2_deploy.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Andrew Chen Wang""", "acwangpython@gmail.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# Celery
# ------------------------------------------------------------------------------
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env("CELERY_BROKER_URL")
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ["json"]
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_SOFT_TIME_LIMIT = 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#beat-scheduler
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "github_ec2_deploy.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "github_ec2_deploy.users.adapters.SocialAccountAdapter"
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# Your stuff...
# ------------------------------------------------------------------------------
| 42.607261 | 100 | 0.648722 |
c981cc5724ec45541c44997be45c347677e35002 | 2,207 | py | Python | cfgov/v1/models/browse_page.py | m3brown/cfgov-refresh | 9582dccc97498a27fcf78a70bb50ef06efa2ce74 | [
"CC0-1.0"
] | null | null | null | cfgov/v1/models/browse_page.py | m3brown/cfgov-refresh | 9582dccc97498a27fcf78a70bb50ef06efa2ce74 | [
"CC0-1.0"
] | null | null | null | cfgov/v1/models/browse_page.py | m3brown/cfgov-refresh | 9582dccc97498a27fcf78a70bb50ef06efa2ce74 | [
"CC0-1.0"
] | null | null | null | import itertools
from django.db import models
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \
StreamFieldPanel, FieldPanel
from wagtail.wagtailcore import blocks
from wagtail.wagtailcore.models import PAGE_TEMPLATE_VAR
from .base import CFGOVPage
from ..atomic_elements import molecules, organisms
from ..util.util import get_secondary_nav_items
class BrowsePage(CFGOVPage):
header = StreamField([
('text_introduction', molecules.TextIntroduction()),
('featured_content', molecules.FeaturedContent()),
], blank=True)
content = StreamField([
('image_text_25_75_group', organisms.ImageText2575Group()),
('image_text_50_50_group', organisms.ImageText5050Group()),
('half_width_link_blob_group', organisms.HalfWidthLinkBlobGroup()),
('well', organisms.Well()),
('full_width_text', organisms.FullWidthText()),
('expandable', organisms.Expandable()),
('expandable_group', organisms.ExpandableGroup()),
('table', organisms.Table()),
], blank=True)
secondary_nav_exclude_sibling_pages = models.BooleanField(default=False)
# General content tab
content_panels = CFGOVPage.content_panels + [
StreamFieldPanel('header'),
StreamFieldPanel('content'),
]
sidefoot_panels = CFGOVPage.sidefoot_panels + [
FieldPanel('secondary_nav_exclude_sibling_pages'),
]
# Tab handler interface
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='General Content'),
ObjectList(sidefoot_panels, heading='Sidebar'),
ObjectList(CFGOVPage.settings_panels, heading='Configuration'),
])
template = 'browse-basic/index.html'
def add_page_js(self, js):
super(BrowsePage, self).add_page_js(js)
js['template'] += ['secondary-navigation.js']
def full_width_serif(self):
return true
def get_context(self, request, *args, **kwargs):
context = super(BrowsePage, self).get_context(request, *args, **kwargs)
context.update({'get_secondary_nav_items': get_secondary_nav_items})
return context
| 33.953846 | 79 | 0.707748 |
2bf9f8bf8d975b421c3021e86187a7f943187ad8 | 12,212 | py | Python | test_net.py | kris-singh/fasterrcnn | 82ff26639b3e90b6f8e727a98051752bb780d6f6 | [
"MIT"
] | null | null | null | test_net.py | kris-singh/fasterrcnn | 82ff26639b3e90b6f8e727a98051752bb780d6f6 | [
"MIT"
] | null | null | null | test_net.py | kris-singh/fasterrcnn | 82ff26639b3e90b6f8e727a98051752bb780d6f6 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
import pdb
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
print("input_dir", input_dir)
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.05
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= data[1][0][2].item()
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in xrange(1, imdb.num_classes):
inds = torch.nonzero(scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes[j][i] = cls_dets.cpu().numpy()
else:
all_boxes[j][i] = empty_array
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
cv2.imwrite('result.png', im2show)
pdb.set_trace()
#cv2.imshow('test', im2show)
#cv2.waitKey(0)
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
end = time.time()
print("test time: %0.4fs" % (end - start))
| 37.231707 | 111 | 0.623076 |
17c08694d17784b172b9f5845cadb19cc6fc370a | 11,206 | py | Python | owtf/api/reporter.py | Lonewolf-Information-systems/owtf | 65355ce8bf4a4ea0177e24ee106f77e2f87c17fa | [
"BSD-3-Clause"
] | 1 | 2018-02-05T12:10:28.000Z | 2018-02-05T12:10:28.000Z | owtf/api/reporter.py | Lonewolf-Information-systems/owtf | 65355ce8bf4a4ea0177e24ee106f77e2f87c17fa | [
"BSD-3-Clause"
] | 2 | 2021-03-11T03:35:23.000Z | 2022-02-10T23:40:23.000Z | owtf/api/reporter.py | Lonewolf-Information-systems/owtf | 65355ce8bf4a4ea0177e24ee106f77e2f87c17fa | [
"BSD-3-Clause"
] | null | null | null | """
owtf.interface.reporter
~~~~~~~~~~~~~~~~~~~~~~~
The reporter module is in charge of producing the HTML Report as well as
provide plugins with common HTML Rendering functions
.note::
This is being deprecated.
"""
import cgi
from tornado.template import Loader
from owtf.dependency_management.dependency_resolver import BaseComponent
from owtf.dependency_management.interfaces import ReporterInterface
class Reporter(BaseComponent, ReporterInterface):
COMPONENT_NAME = "reporter"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.resource = self.get_component("resource")
self.transaction = self.get_component("transaction")
self.plugin_handler = self.get_component("plugin_handler")
self.requester = None
self.Init = False
self.Loader = Loader(self.config.get_val('POUTPUT_TEMPLATES_DIR'))
self.mNumLinesToShow = 15
self.CounterList = []
def init(self):
self.requester = self.get_component("requester")
def TransactionTableFromIDs(self, TransactionIDs, NumLinesReq=15, NumLinesRes=15):
""" Draws a table of HTTP Transactions """
# functions to get the first lines of a long string
transactions = self.transaction.get_by_ids(TransactionIDs)
return self.TransactionTableForTransactions(transactions)
def TransactionTableForURL(self, UseCache, URL, Method=None, Data=None):
transaction = self.requester.get_transaction(UseCache, URL, method=Method, data=Data)
return self.TransactionTableForTransactions([transaction])
def TransactionTableForURLList(self, UseCache, URLList, Method=None, Data=None):
transactions = self.requester.get_transactions(UseCache, URLList, method=Method, data=Data)
return self.TransactionTableForTransactions(transactions)
def TransactionTableForTransactions(self, Transactions):
return self.Loader.load("transaction_table.html").generate(TransactionList=Transactions)
def str(self, *args):
try:
return str(*args)
except TypeError:
return args[0] # Input is already Unicode
def reset_loader(self):
return self.Loader.reset()
# ----------------------------------- Methods exported from plugin_helper.py ---------------------------------
def cmd_table(self, command):
return self.Loader.load("command_table.html").generate(Command=Command)
def link_list(self, link_listName, Links):
"""
Wrapper to allow rendering a bunch of links -without name- as resource
links with name = link
"""
return self.Loader.load("link_list.html").generate(link_listName=link_listName, Links=Links)
def resource_linklist(self, ResourceListName, ResourceList):
"""
Draws an HTML Search box for defined Vuln Search resources
"""
return self.Loader.load("resource_link_list.html").generate(ResourceListName=ResourceListName,
ResourceList=ResourceList)
def Tabbedresource_linklist(self, ResourcesList):
"""
ResourceList = [
"ResourceListName", [["Name1","Resource1"],["Name2","Resource2"]]
]
"""
TabData = []
Resources = []
for ResourceListName, ResourceList in ResourcesList:
TabID = ResourceListName.replace(' ', '_')
TabData.append([ResourceListName, TabID])
Resources.append([TabID, ResourceList])
return self.Loader.load("tabbed_resource_link_list.html").generate(TabData=TabData, Resources=Resources)
def ListPostProcessing(self, ResourceListName, link_list, HTMLlink_list):
return self.Loader.load("list_post_processing.html").generate(ResourceListName=ResourceListName,
link_list=link_list, HTMLlink_list=HTMLlink_list)
def Requestlink_list(self, ResourceListName, link_list):
return self.Loader.load("request_link_list.html").generate(ResourceListName=ResourceListName, link_list=link_list)
def VulnerabilitySearchBox(self, SearchStr):
"""
Draws an HTML Search box for defined Vuln Search resources
"""
VulnSearchResources = self.resource.get_resources('VulnSearch')
return self.Loader.load("vulnerability_search_box.html").generate(SearchStr=SearchStr,
VulnSearchResources=VulnSearchResources)
def SuggestedCommandBox(self, PluginOutputDir, CommandCategoryList, Header=''):
"""
Draws HTML tabs for a list of TabName => Resource Group (i.e. how to run hydra, etc)
"""
TitleList = []
CommandList = []
for item in CommandCategoryList:
TitleList.append(item[0])
CommandList.append(self.resource.get_resources(item[1]))
# TODO: Fix up the plugin
return self.Loader.load("suggested_command_box.html").generate(Header=Header, TitleList=TitleList,
CommandList=CommandList)
def CommandDump(self, Name, CommandIntro, ModifiedCommand, RelativeFilePath, OutputIntro, TimeStr):
AbsPath = self.plugin_handler.get_abs_path(RelativeFilePath)
OutputLines = open(AbsPath, "r").readlines()
longOutput = (len(OutputLines) > self.mNumLinesToShow)
if (len(OutputLines) > self.mNumLinesToShow):
OutputLines = ''.join(OutputLines[0:self.mNumLinesToShow])
else:
OutputLines = ''.join(OutputLines)
table_vars = {
"Name": Name,
"CommandIntro": CommandIntro,
"ModifiedCommand": ModifiedCommand,
"FilePath": RelativeFilePath,
"OutputIntro": OutputIntro,
"OutputLines": OutputLines,
"TimeStr": TimeStr,
"mNumLinesToShow": self.mNumLinesToShow,
"longOutput": longOutput
}
return self.Loader.load("command_dump.html").generate(**table_vars)
def URLsFromStr(self, TimeStr, VisitURLs, URLList, NumFound):
html_content = self.Loader.load("urls_from_str.html").generate(TimeStr=TimeStr, VisitURLs=VisitURLs,
NumURLs=len(URLList), NumFound=NumFound)
if URLList:
html_content += self.link_list("URLs Scraped", URLList)
return html_content
def Robots(self, NotStr, NumLines, NumAllow, NumDisallow, NumSitemap, SavePath, EntriesList, NumAddedURLs):
vars = {
"robots_found": NotStr,
"num_lines": NumLines,
"num_allow": NumAllow,
"num_disallow": NumDisallow,
"num_sitemap": NumSitemap,
"save_path": SavePath
}
TestResult = self.Loader.load("robots.html").generate(**vars)
# robots.txt contains some entries, show browsable list! :)
if NumDisallow > 0 or NumAllow > 0 or NumSitemap > 0:
for Display, Links in EntriesList:
if Links: # Filters empty lists
TestResult += self.resource_linklist(Display, Links)
return TestResult
def HtmlString(self, String):
return String
# ---------------------- Grep Plugin Outputs -------------------- #
def ResponseBodyMatches(self, ResponseRegexpName):
RegexpName, GrepOutputs, TransactionIDS, match_percent = self.transaction.search_by_regex_name(ResponseRegexpName,
stats=True)
variables = {
"name": RegexpName.replace("RESPONSE_REGEXP_FOR_", "").replace('_', ' '),
"matches": GrepOutputs,
"transaction_ids": TransactionIDS,
"match_percent": match_percent
}
return self.Loader.load("response_matches.html").generate(**variables)
def ResponseHeaderMatches(self, HeaderRegexpName):
return self.ResearchHeaders(HeaderRegexpName)[0]
def ResearchHeaders(self, RegexName):
regex_name, grep_outputs, transaction_ids, match_percent = self.transaction.search_by_regex_name(RegexName,
stats=True)
# [[unique_matches, matched_transactions, matched_percentage]]
searches = self.Loader.load("header_searches.html").generate(match_percent=match_percent, matches=grep_outputs,
transaction_ids=transaction_ids)
return [searches, grep_outputs]
def FingerprintData(self):
HeaderTable, matches = self.ResearchHeaders('HEADERS_FOR_FINGERPRINT')
for item in matches:
# Add Vulnerability search boxes after table
HeaderTable += self.VulnerabilitySearchBox(item[1])
return HeaderTable
def TopTransactionsBySpeed(self, Order):
transactions = self.transaction.get_top_by_speed(Order)
return self.TransactionTableForTransactions(transactions)
def CookieAttributeAnalysis(self, CookieValueList, Header2TransacDict):
vars = {
"Cookies": [{
"Name": Cookie.split('=')[0],
"Link": Header2TransacDict[self.config.get('HEADERS_FOR_COOKIES').lower() + Cookie],
"Attribs": Cookie.replace(Cookie.split('=')[0] + "=", "").replace("; ", ";").split(";"),
} for Cookie in CookieValueList],
}
Table = self.Render.CreateTable({'class': 'report_intro'})
SetCookie = self.config.get('HEADERS_FOR_COOKIES').lower()
PossibleCookieAttributes = self.config.get('COOKIE_ATTRIBUTES').split(',')
for Cookie in CookieValueList:
CookieName = Cookie.split('=')[0]
CookieLink = self.Render.DrawButtonLink(cgi.escape(CookieName), Header2TransacDict[SetCookie + Cookie])
CookieAttribs = Cookie.replace(CookieName + "=", "").replace("; ", ";").split(";")
Table.CreateCustomRow('<tr><th colspan="2">Cookie: %s</th></tr>' % CookieLink)
Table.CreateRow(['Attribute', 'Value'], True)
NotFoundStr = "<b>Not Found</b>"
if CookieAttribs[0]:
CookieValue = CookieAttribs[0]
else:
CookieValue = NotFoundStr
Table.CreateRow(['Value', CookieValue])
for Attrib in PossibleCookieAttributes:
DisplayAttribute = NotFoundStr
for PresentAttrib in CookieAttribs:
# Avoid false positives due to cookie contents
if PresentAttrib.lower().startswith(Attrib.lower()):
DisplayAttribute = PresentAttrib
break
Table.CreateRow([Attrib, DisplayAttribute])
if Table.GetNumRows() == 0:
return "" # No Attributes found
return "<h3>Cookie Attribute Analysis</h3>%s" % Table.Render()
| 46.305785 | 122 | 0.618151 |
974e83c61b680149ecc4c92c18acbc333beae7c7 | 776 | py | Python | qudt/units/area.py | gnmerritt/pyqudt | 2579e824b2e002d2e27a40eae84f1b1449006487 | [
"BSD-3-Clause"
] | null | null | null | qudt/units/area.py | gnmerritt/pyqudt | 2579e824b2e002d2e27a40eae84f1b1449006487 | [
"BSD-3-Clause"
] | null | null | null | qudt/units/area.py | gnmerritt/pyqudt | 2579e824b2e002d2e27a40eae84f1b1449006487 | [
"BSD-3-Clause"
] | null | null | null | ################################################################################
#
# Copyright (C) 2019 Garrett Brown
# This file is part of pyqudt - https://github.com/eigendude/pyqudt
#
# pyqudt is derived from jQUDT
# Copyright (C) 2012-2013 Egon Willighagen <egonw@users.sf.net>
#
# SPDX-License-Identifier: BSD-3-Clause
# See the file LICENSE for more information.
#
################################################################################
from qudt.unit import Unit
from qudt.ontology.unit_factory import UnitFactory
class AreaUnit(object):
"""
"""
SQUARE_METER: Unit = UnitFactory.get_qudt('M2')
SQUARE_ANGSTROM: Unit = UnitFactory.get_unit('http://www.openphacts.org/units/SquareAngstrom')
ACRE: Unit = UnitFactory.get_qudt('AC')
| 32.333333 | 98 | 0.572165 |
fa07666229ed01214352287a238558d506d573f6 | 2,394 | py | Python | apps/dc_algorithm/tasks.py | pinkerltm/datacube-ui | 325d404a994d49c23922e7de10c7ab244b78500b | [
"Apache-2.0"
] | 1 | 2019-07-22T05:24:40.000Z | 2019-07-22T05:24:40.000Z | apps/dc_algorithm/tasks.py | SivaramakrishnanKN/NE-GeoCloud | affcae49e0ccd7d29360a2771a9517147ed56590 | [
"Apache-2.0"
] | 1 | 2019-06-06T18:31:29.000Z | 2019-06-06T18:31:29.000Z | apps/dc_algorithm/tasks.py | SivaramakrishnanKN/NE-GeoCloud | affcae49e0ccd7d29360a2771a9517147ed56590 | [
"Apache-2.0"
] | 5 | 2019-06-05T07:26:13.000Z | 2019-06-08T06:53:11.000Z | import celery
from celery.decorators import periodic_task
from celery.task.schedules import crontab
from datetime import datetime, timedelta
import os
import shutil
from django.apps import apps
from .models import Application
class DCAlgorithmBase(celery.Task):
"""Serves as a base class for all DC algorithm celery tasks"""
app_name = None
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Onfailure call for celery tasks
all tasks should have a kwarg 'task_id' that can be used to 'get' the model
from the app.
"""
task_id = kwargs.get('task_id')
camel_case = "".join(x.title() for x in self._get_app_name().split('_'))
task_model = apps.get_model(".".join([self._get_app_name(), camel_case + "Task"]))
history_model = apps.get_model(".".join([self._get_app_name(), "UserHistory"]))
try:
task = task_model.objects.get(pk=task_id)
if task.complete:
return
task.complete = True
task.update_status("ERROR", "There was an unhandled exception during the processing of your task.")
history_model.objects.filter(task_id=task.pk).delete()
except task_model.DoesNotExist:
pass
def _get_app_name(self):
"""Get the app name of the task - raise an error if None"""
if self.app_name is None:
raise NotImplementedError(
"You must specify an app_name in classes that inherit DCAlgorithmBase. See the DCAlgorithmBase docstring for more details."
)
return self.app_name
@periodic_task(
name="dc_algorithm.clear_cache",
#run_every=(30.0),
run_every=(crontab(hour=0, minute=0)),
ignore_result=True)
def clear_cache():
_apps = Application.objects.all()
time_threshold = datetime.now() - timedelta(days=2)
for app in _apps:
camel_case = "".join(x.title() for x in app.pk.split('_'))
task_model = apps.get_model(".".join([app.pk, camel_case + "Task"]))
history_model = apps.get_model(".".join([app.pk, "UserHistory"]))
tasks = task_model.objects.filter(execution_start__lt=time_threshold)
for task in tasks:
history_model.objects.filter(task_id=task.pk).delete()
shutil.rmtree(task.get_result_path())
task.delete()
print("Cache Cleared.")
| 37.40625 | 139 | 0.649123 |
b8df3b07111fad02c6791d67a0d67e6a8a108c5a | 958 | py | Python | examples/get_color_all.py | ceboxsell/LocalLifxLan | c13975661e52abfbd5d5b22a27bf01c362c42781 | [
"MIT"
] | null | null | null | examples/get_color_all.py | ceboxsell/LocalLifxLan | c13975661e52abfbd5d5b22a27bf01c362c42781 | [
"MIT"
] | null | null | null | examples/get_color_all.py | ceboxsell/LocalLifxLan | c13975661e52abfbd5d5b22a27bf01c362c42781 | [
"MIT"
] | 1 | 2020-06-03T09:15:00.000Z | 2020-06-03T09:15:00.000Z | #!/usr/bin/env python
# coding=utf-8
import sys
from lifxlan import LifxLAN
def main():
num_lights = None
if len(sys.argv) != 2:
print("\nDiscovery will go much faster if you provide the number of lights on your LAN:")
print(" python {} <number of lights on LAN>\n".format(sys.argv[0]))
else:
num_lights = int(sys.argv[1])
# instantiate LifxLAN client, num_lights may be None (unknown).
# In fact, you don't need to provide LifxLAN with the number of bulbs at all.
# lifx = LifxLAN() works just as well. Knowing the number of bulbs in advance
# simply makes initial bulb discovery faster.
lifx = LifxLAN(num_lights)
# get devices
print("Discovering lights...")
devices = lifx.get_lights()
print("Found {} lights:".format(len(devices)))
for d in devices:
print("{} ({}) HSBK: {}".format(d.get_label(), d.mac_addr, d.get_color()))
if __name__=="__main__":
main()
| 29.030303 | 97 | 0.646138 |
e817b4a26a08cde5896f63b57f6ae8054318cba3 | 10,670 | py | Python | src/.history/Test/HiwinRT605_Strategy_test_20190625201520.py | SamKaiYang/2019_Hiwin_Shaking | d599f8c87dc4da89eae266990d12eb3a8b0f3e16 | [
"MIT"
] | null | null | null | src/.history/Test/HiwinRT605_Strategy_test_20190625201520.py | SamKaiYang/2019_Hiwin_Shaking | d599f8c87dc4da89eae266990d12eb3a8b0f3e16 | [
"MIT"
] | null | null | null | src/.history/Test/HiwinRT605_Strategy_test_20190625201520.py | SamKaiYang/2019_Hiwin_Shaking | d599f8c87dc4da89eae266990d12eb3a8b0f3e16 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# license removed for brevity
#策略 機械手臂 四點來回跑
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import math
import enum
import Hiwin_RT605_Arm_Command as ArmTask
##----Arm state-----------
Arm_state_flag = 0
Strategy_flag = 0
Sent_data_flag = False
##----Arm status enum
class Arm_status(enum.IntEnum):
Idle = 0
Isbusy = 1
Error = 2
shutdown = 6
##-----------server feedback arm state----------
def Arm_state(req):
global CurrentMissionType,Strategy_flag,Arm_state_flag
Arm_state_flag = int('%s'%req.Arm_state)
if Arm_state_flag == Arm_status.Isbusy: #表示手臂忙碌
Strategy_flag = False
return(1)
if Arm_state_flag == Arm_status.Idle: #表示手臂準備
Strategy_flag = True
return(0)
if Arm_state_flag == Arm_status.shutdown: #表示程式中斷
Strategy_flag = 6
return(6)
##-----------server feedback Sent_flag----------
def Sent_flag(req):
global Sent_data_flag
Sent_data_flag = int('%s'%req.sent_flag)
return(1)
def arm_state_server():
#rospy.init_node(NAME)
s = rospy.Service('arm_state',arm_state, Arm_state) ##server arm state
a = rospy.Service('sent_flag',sent_flag,Sent_flag)
#rospy.spin() ## spin one
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##------------class-------
class point():
def __init__(self,x,y,z,pitch,roll,yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##-------------------------strategy---------------------
##-----Mission 參數
GetInfoFlag = False
ExecuteFlag = False
GetKeyFlag = False
MotionSerialKey = []
MissionType_Flag = 0
MotionStep = 0
##-----手臂動作位置資訊
angle_SubCue = 0
LinePtpFlag = False
MoveFlag = False
PushBallHeight = 6
ObjAboveHeight = 10
SpeedValue = 10
MissionEndFlag = False
CurrentMissionType = 0
##---------------Enum---------------##
class ArmMotionCommand(enum.IntEnum):
Arm_Stop = 0
Arm_MoveToTargetUpside = 1
Arm_MoveFowardDown = 2
Arm_MoveVision = 3
Arm_PushBall = 4
Arm_LineUp = 5
Arm_LineDown = 6
Arm_Angle = 7
Arm_StopPush = 8
class MissionType(enum.IntEnum):
Get_Img = 0
PushBall = 1
Pushback = 2
Mission_End = 3
##-----------switch define------------##
# class pos():
# def __init__(self, x, y, z, pitch, roll, yaw):
# self.x = 0
# self.y = 36.8
# self.z = 11.35
# self.pitch = -90
# self.roll = 0
# self.yaw = 0
class Target_pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = 0
self.y = 36.8
self.z = 11.35
self.pitch = -90
self.roll = 0
self.yaw = 0
class TargetPush_pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = 0
self.y = 36.8
self.z = 11.35
self.pitch = -90
self.roll = 0
self.yaw = 0
class Item():
def __init__(self,x,y,label):
self.x = x
self.y = y
self.label = label
def Mission_Trigger():
if GetInfoFlag == True and GetKeyFlag == False and ExecuteFlag == False:
GetInfo_Mission()
if GetInfoFlag == False and GetKeyFlag == True and ExecuteFlag == False:
GetKey_Mission()
if GetInfoFlag == False and GetKeyFlag == False and ExecuteFlag == True:
Execute_Mission()
def GetInfo_Mission():
global GetInfoFlag,GetKeyFlag,ExecuteFlag
#Billiards_Calculation()
GetInfoFlag = False
GetKeyFlag = True
ExecuteFlag = False
def GetKey_Mission():
global GetInfoFlag,GetKeyFlag,ExecuteFlag,MotionKey,MotionSerialKey
Mission = Get_MissionType()
MissionItem(Mission)
MotionSerialKey = MotionKey
GetInfoFlag = False
GetKeyFlag = False
ExecuteFlag = True
def Get_MissionType():
global MissionType_Flag,CurrentMissionType
for case in switch(MissionType_Flag): #傳送指令給socket選擇手臂動作
if case(0):
Type = MissionType.PushBall
MissionType_Flag +=1
break
if case(1):
Type = MissionType.Pushback
MissionType_Flag -=1
break
CurrentMissionType = Type
return Type
def MissionItem(ItemNo):
global MotionKey
Key_PushBallCommand = [\
ArmMotionCommand.Arm_MoveToTargetUpside,\
ArmMotionCommand.Arm_LineDown,\
ArmMotionCommand.Arm_PushBall,\
ArmMotionCommand.Arm_LineUp,\
ArmMotionCommand.Arm_Stop,\
]
Key_PushBackCommand = [\
ArmMotionCommand.Arm_MoveVision,\
ArmMotionCommand.Arm_Stop,\
ArmMotionCommand.Arm_StopPush,\
]
for case in switch(ItemNo): #傳送指令給socket選擇手臂動作
if case(MissionType.PushBall):
MotionKey = Key_PushBallCommand
break
if case(MissionType.Pushback):
MotionKey = Key_PushBackCommand
break
return MotionKey
def Execute_Mission():
global GetInfoFlag,GetKeyFlag,ExecuteFlag,MotionKey,MotionStep,MotionSerialKey,MissionEndFlag,CurrentMissionType,Strategy_flag,Arm_state_flag
if Arm_state_flag == Arm_status.Idle and Strategy_flag == True:
Strategy_flag = False
if MotionKey[MotionStep] == ArmMotionCommand.Arm_Stop:
if MissionEndFlag == True:
CurrentMissionType = MissionType.Mission_End
GetInfoFlag = False
GetKeyFlag = False
ExecuteFlag = False
print("Mission_End")
elif CurrentMissionType == MissionType.PushBall:
GetInfoFlag = False
GetKeyFlag = True
ExecuteFlag = False
MotionStep = 0
print("PushBall")
else:
GetInfoFlag = True
GetKeyFlag = False
ExecuteFlag = False
MotionStep = 0
else:
MotionItem(MotionSerialKey[MotionStep])
MotionStep += 1
def MotionItem(ItemNo):
global angle_SubCue,SpeedValue,PushFlag,LinePtpFlag,MissionEndFlag
SpeedValue = 5
for case in switch(ItemNo): #傳送指令給socket選擇手臂動作
if case(ArmMotionCommand.Arm_Stop):
MoveFlag = False
print("Arm_Stop")
break
if case(ArmMotionCommand.Arm_StopPush):
MoveFlag = False
PushFlag = True #重新掃描物件
print("Arm_StopPush")
break
if case(ArmMotionCommand.Arm_MoveToTargetUpside):
pos.x = 10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 10
MoveFlag = True
LinePtpFlag = False
SpeedValue = 10
print("Arm_MoveToTargetUpside")
break
if case(ArmMotionCommand.Arm_LineUp):
pos.z = ObjAboveHeight
MoveFlag = True
LinePtpFlag = True
SpeedValue = 5
print("Arm_LineUp")
break
if case(ArmMotionCommand.Arm_LineDown):
pos.z = PushBallHeight
MoveFlag = True
LinePtpFlag = True
SpeedValue = 5
print("Arm_LineDown")
break
if case(ArmMotionCommand.Arm_PushBall):
pos.x = -10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = -10
SpeedValue = 10 ##待測試up
MoveFlag = True
LinePtpFlag = False
print("Arm_PushBall")
break
if case(ArmMotionCommand.Arm_MoveVision):
pos.x = 0
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
SpeedValue = 10
MoveFlag = True
LinePtpFlag = False
##任務結束旗標
MissionEndFlag = True
print("Arm_MoveVision")
break
if case(ArmMotionCommand.Arm_MoveFowardDown):
pos.x = 0
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
MoveFlag = True
LinePtpFlag = False
print("Arm_MoveFowardDown")
break
if case(): # default, could also just omit condition or 'if True'
print ("something else!")
# No need to break here, it'll stop anyway
if MoveFlag == True:
if LinePtpFlag == False:
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
#strategy_client_Arm_Mode(0,1,0,30,2)#action,ra,grip,vel,both
# ArmTask.strategy_client_Speed_Mode(1)
# ArmTask.strategy_client_Arm_Mode(4,1,0,SpeedValue,2)
ArmTask.strategy_client_Arm_Mode(2,1,0,SpeedValue,2)#action,ra,grip,vel,both
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
elif LinePtpFlag == True:
#strategy_client_Arm_Mode(0,1,0,40,2)#action,ra,grip,vel,both
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
# ArmTask.strategy_client_Speed_Mode(0)
# ArmTask.strategy_client_Arm_Mode(4,1,0,SpeedValue,2)
ArmTask.strategy_client_Arm_Mode(2,1,2,SpeedValue,2)#action,ra,grip,vel,both
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
#action: ptp line
#ra : abs rel
#grip 夾爪
#vel speed
#both : Ctrl_Mode
##-------------strategy end ------------
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
argv = rospy.myargv()
rospy.init_node('strategy', anonymous=True)
GetInfoFlag = True #Test no data
arm_state_server()
while 1:
Mission_Trigger()
if CurrentMissionType == MissionType.Mission_End:
ArmTask.rospy.on_shutdown(myhook)
ArmTask.rospy.spin()
rospy.spin()
| 31.017442 | 145 | 0.579288 |
8264dc5be18dc59bad2c7bf54ca9dee0f183b3ea | 228 | py | Python | datahub/omis/market/apps.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 6 | 2019-12-02T16:11:24.000Z | 2022-03-18T10:02:02.000Z | datahub/omis/market/apps.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 1,696 | 2019-10-31T14:08:37.000Z | 2022-03-29T12:35:57.000Z | datahub/omis/market/apps.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 9 | 2019-11-22T12:42:03.000Z | 2021-09-03T14:25:05.000Z | from django.apps import AppConfig
class MarketConfig(AppConfig):
"""Django App Config for the OMIS Market app."""
name = 'datahub.omis.market'
label = 'omis_market' # namespaced app. Use this e.g. when migrating
| 25.333333 | 73 | 0.70614 |
8d255ca944750ef19e3db56e8e749d8ec3f6c567 | 1,444 | py | Python | web/watch.py | intoj/intoj-legacy | 2384bcc78ca360c83324a6d5de10c2fd3fbbe2d6 | [
"BSD-2-Clause"
] | 7 | 2020-01-09T06:42:58.000Z | 2021-06-27T04:18:46.000Z | web/watch.py | intoj/intoj-legacy | 2384bcc78ca360c83324a6d5de10c2fd3fbbe2d6 | [
"BSD-2-Clause"
] | null | null | null | web/watch.py | intoj/intoj-legacy | 2384bcc78ca360c83324a6d5de10c2fd3fbbe2d6 | [
"BSD-2-Clause"
] | null | null | null | #coding: utf-8
__author__ = 'Michael Liao'
import os, sys, time, subprocess
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
def log(s):
print('[Monitor] %s' % s)
class MyFileSystemEventHander(FileSystemEventHandler):
def __init__(self, fn):
super(MyFileSystemEventHander, self).__init__()
self.restart = fn
def on_any_event(self, event):
if event.src_path.endswith('.py'):
log('Python source file changed: %s' % event.src_path)
self.restart()
command = ['echo', 'ok']
process = None
def kill_process():
global process
if process:
log('Kill process [%s]...' % process.pid)
process.kill()
process.wait()
log('Process ended with code %s.' % process.returncode)
process = None
def start_process():
global process, command
log('Start process %s...' % ' '.join(command))
process = subprocess.Popen(command, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
def restart_process():
kill_process()
start_process()
def start_watch(path, callback):
observer = Observer()
observer.schedule(MyFileSystemEventHander(restart_process), path, recursive=True)
observer.start()
log('Watching directory %s...' % path)
start_process()
try:
while True:
time.sleep(.5)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
command = ['gunicorn','-c','config.py','app:app']
path = os.path.abspath('.')
start_watch(path, None)
| 23.290323 | 91 | 0.715374 |
2104f82a9661db47141cb41409d01501e9adf12b | 3,155 | py | Python | Project-5/predict.py | kshntn/DLND | c573748bb191157ba59683fb08ec6b429dd86d29 | [
"MIT"
] | null | null | null | Project-5/predict.py | kshntn/DLND | c573748bb191157ba59683fb08ec6b429dd86d29 | [
"MIT"
] | null | null | null | Project-5/predict.py | kshntn/DLND | c573748bb191157ba59683fb08ec6b429dd86d29 | [
"MIT"
] | 1 | 2019-12-09T08:03:39.000Z | 2019-12-09T08:03:39.000Z | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the store model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
# TODO: Process input_data so that it is ready to be sent to our model.
# You should produce two variables:
# data_X - A sequence of length 500 which represents the converted review
# data_len - The length of the review
words = review_to_words(input_data)
data_X, data_len = convert_and_pad(model.word_dict, words)
# Using data_X and data_len we construct an appropriate input tensor. Remember
# that our model expects input data of the form 'len, review[500]'.
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
# Make sure to put the model into evaluation mode
model.eval()
# TODO: Compute the result of applying the model to the input data. The variable `result` should
# be a numpy array which contains a single integer which is either 1 or 0
print('computing output')
with torch.no_grad():
output = model.forward(data)
result = np.round(output.numpy())
return result
| 33.210526 | 107 | 0.698257 |
11612295d92d1ba3e390ba2b39271d2875a3c9de | 25,692 | py | Python | tensorflow/python/framework/common_shapes.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/python/framework/common_shapes.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/python/framework/common_shapes.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def has_fully_defined_shape(tensor):
"""Returns true if tensor has a fully defined shape."""
return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
def rank(tensor):
"""Return a rank if it is a tensor, else return None."""
if isinstance(tensor, ops.Tensor):
return tensor._rank() # pylint: disable=protected-access
return None
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.TensorShape([])]
def unchanged_shape(op):
"""Shape function for ops that output a tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def get_conv_output_size(input_size, filter_size, strides, padding_type):
"""Returns the spatial size of a n-d convolution/pooling output."""
input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])
filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])
strides = [int(x) for x in strides]
if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
return input_size
if any(x is not None and y is not None and x > y for x, y in
zip(filter_size, input_size)):
raise ValueError("Filter must not be larger than the input: "
"Filter: %r Input: %r" % (filter_size, input_size))
if padding_type == b"VALID":
def _valid(in_dim, k_dim, s_dim):
if in_dim is not None and k_dim is not None:
return (in_dim - k_dim + s_dim) // s_dim
else:
return None
output_size = [
_valid(in_dim, k_dim, s_dim)
for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
]
elif padding_type == b"SAME":
def _same(in_dim, s_dim):
if in_dim is not None:
return (in_dim + s_dim - 1) // s_dim
else:
return None
output_size = [_same(in_dim, s_dim)
for in_dim, s_dim in zip(input_size, strides)]
else:
raise ValueError("Invalid padding: %r" % padding_type)
return tuple(output_size)
def get2d_conv_output_size(input_height, input_width, filter_height,
filter_width, row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
return get_conv_output_size((input_height, input_width),
(filter_height, filter_width),
(row_stride, col_stride), padding_type)
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
if data_format == b"NCHW":
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth_out]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def depthwise_conv2d_native_shape(op):
"""Shape function for a DepthwiseConv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depthwise_multiplier]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_in*depthwise_multiplier], where out_rows and out_cols depend
on the value of the op's "padding" and "strides" attrs.
Args:
op: A DepthwiseConv2dNative Operation.
Returns:
A list containing the Shape of the DepthwiseConv2DNative output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3] * filter_shape[2]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
output_shape = [batch_size, in_rows, in_cols, depth // ksize_d]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
def _broadcast_shape_helper(shape_x, shape_y):
"""Helper functions for is_broadcast_compatible and broadcast_shape.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
Returns None if the shapes are not broadcast compatible,
a list of the broadcast dimensions otherwise.
"""
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
return None
return return_dims
def is_broadcast_compatible(shape_x, shape_y):
"""Returns True if `shape_x` and `shape_y` are broadcast compatible.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
True if a shape exists that both `shape_x` and `shape_y` can be broadcasted
to. False otherwise.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return False
return _broadcast_shape_helper(shape_x, shape_y) is not None
def broadcast_shape(shape_x, shape_y):
"""Returns the broadcasted shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return tensor_shape.unknown_shape()
return_dims = _broadcast_shape_helper(shape_x, shape_y)
if return_dims is None:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return tensor_shape.TensorShape(return_dims)
def call_cpp_shape_fn(op, require_shape_fn=True):
"""A shape function that delegates to the registered C++ shape function.
Args:
op: the node in the graph for which to compute output shapes.
require_shape_fn: If true, and the C++ shape function is not registered
in the current binary then an exception is raised; otherwise, if the
C++ shape function is not registered then unknown_shape is used.
Returns:
A dictionary with the following keys:
shapes: A TensorShape list of the output shapes of the op, as computed
using the C++ shape inference function registered for the op.
handle_shapes: A TensorShape list of the shapes for handle outputs, if
any.
handle_dtypes: A list of DataType enums for the handle outputs, if any.
Raises:
ValueError: If the C++ shape function returned an error (e.g. because the
shapes of the inputs are of the wrong rank or otherwise incompatible
according to the shape function).
RuntimeError: If the C++ shape function is not registered and
<require_shape_fn> is True.
"""
if op.type == "Const":
# To avoid serializing large constants, we special-case constant
# here, even though it has a C++ shape function. When Python
# calls the C / C-API directly, we should be able to remove this.
return {
"shapes": [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
"handle_data": [None]
}
input_tensors_needed = []
input_tensors_as_shapes_needed = []
while True:
res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed,
require_shape_fn)
if not isinstance(res, dict):
# Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
return res
# See if we need to evaluate some inputs.
if not res["inputs_needed"]:
return res
p = cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded()
p = p.FromString(res["inputs_needed"])
changed = False
for idx in p.input_tensors_needed:
if idx not in input_tensors_needed:
input_tensors_needed.append(idx)
changed = True
for idx in p.input_tensors_as_shapes_needed:
if idx not in input_tensors_as_shapes_needed:
input_tensors_as_shapes_needed.append(idx)
changed = True
if not changed:
return res
def _call_cpp_shape_fn_impl(
op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn):
"""Core implementation of call_cpp_shape_fn."""
graph_def_version = op.graph.graph_def_versions.producer
node_def_str = op.node_def.SerializeToString()
def tensor_to_inference_result(t):
r = cpp_shape_inference_pb2.CppShapeInferenceResult()
r.shape.CopyFrom(t.get_shape().as_proto())
# pylint: disable=protected-access
if t._handle_data is not None:
r.handle_data.CopyFrom(t._handle_data)
# pylint: enable=protected-access
return r.SerializeToString()
input_shapes = [tensor_to_inference_result(i) for i in op.inputs]
input_tensors = [None for i in input_shapes]
for idx in input_tensors_needed:
v = tensor_util.constant_value(op.inputs[idx])
if v is not None:
input_tensors[idx] = np.asarray(v)
serialized_unknown_shape = (
tensor_shape.TensorShape(None).as_proto().SerializeToString())
arr = [serialized_unknown_shape for i in input_shapes]
for idx in input_tensors_as_shapes_needed:
s = tensor_util.constant_value_as_shape(op.inputs[idx])
if s is not None:
arr[idx] = s.as_proto().SerializeToString()
input_tensors_as_shapes = arr
missing_shape_fn = False
try:
output = pywrap_tensorflow.RunCppShapeInference(
graph_def_version, node_def_str, input_shapes, input_tensors,
input_tensors_as_shapes)
except errors.InvalidArgumentError as err:
if err.message.startswith("No shape inference function exists for op"):
missing_shape_fn = True
else:
raise ValueError(err.message)
if missing_shape_fn:
if require_shape_fn:
raise RuntimeError(
"No C++ shape function registered for standard op: %s" % op.type)
return unknown_shape(op)
output_shapes = output[:-1]
# Convert TensorShapeProto values in output_shapes.
result_protos = [
cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
for s in output_shapes
]
result = [r.shape for r in result_protos]
result_handle_data = [
r.handle_data if r.handle_data.is_set else None for r in result_protos
]
return {
"shapes": result,
"handle_data": result_handle_data,
"inputs_needed": output[-1]
}
# pylint: disable=protected-access
ops._set_call_cpp_shape_fn(call_cpp_shape_fn)
# pylint: enable=protected-access
| 35.146375 | 80 | 0.691577 |
e3bf5bf8d35661a001652415aa8a0c09d97ff0b3 | 11,665 | py | Python | kfac/python/kernel_tests/estimator_test.py | ntselepidis/kfac | ddad6375bbdebfae809bccfd3a5c3db073128764 | [
"Apache-2.0"
] | 179 | 2018-02-08T00:10:26.000Z | 2022-02-25T06:58:28.000Z | kfac/python/kernel_tests/estimator_test.py | ntselepidis/kfac | ddad6375bbdebfae809bccfd3a5c3db073128764 | [
"Apache-2.0"
] | 40 | 2018-02-02T00:10:00.000Z | 2022-02-09T01:46:32.000Z | kfac/python/kernel_tests/estimator_test.py | isabella232/kfac | 3ee1bec8dcd851d50618cd542a8d1aff92512f7c | [
"Apache-2.0"
] | 40 | 2018-03-11T10:10:23.000Z | 2022-01-24T12:03:48.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kfac.estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
from kfac.python.ops import estimator
from kfac.python.ops import fisher_factors as ff
from kfac.python.ops import layer_collection as lc
from kfac.python.ops import utils
# We need to set these constants since the numerical values used in the tests
# were chosen when these used to be the defaults.
ff.set_global_constants(zero_debias=False)
_ALL_ESTIMATION_MODES = ["gradients", "empirical", "curvature_prop", "exact"]
class EstimatorTest(tf.test.TestCase):
def setUp(self):
self._graph = tf.Graph()
with self._graph.as_default():
self.layer_collection = lc.LayerCollection()
self.inputs = tf.random_normal((2, 2), dtype=tf.float32)
self.weights = tf.get_variable("w", shape=(2, 2), dtype=tf.float32)
self.bias = tf.get_variable(
"b", initializer=tf.zeros_initializer(), shape=(2, 1))
self.output = tf.matmul(self.inputs, self.weights) + self.bias
# Only register the weights.
self.layer_collection.register_fully_connected(
params=(self.weights,), inputs=self.inputs, outputs=self.output)
self.outputs = tf.tanh(self.output)
self.targets = tf.zeros_like(self.outputs)
self.layer_collection.register_categorical_predictive_distribution(
logits=self.outputs, targets=self.targets)
def testEstimatorInitManualRegistration(self):
with self._graph.as_default():
# We should be able to build an estimator for only the registered vars.
estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection
)
# Check that we throw an error if we try to build an estimator for vars
# that were not manually registered.
with self.assertRaises(ValueError):
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights, self.bias],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection
)
est.make_vars_and_create_op_thunks()
# Check that we throw an error if we don't include registered variables,
# i.e. self.weights
with self.assertRaises(ValueError):
est = estimator.FisherEstimatorRoundRobin(
variables=[],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection)
est.make_vars_and_create_op_thunks()
@tf.test.mock.patch.object(utils.SubGraph, "variable_uses", return_value=42)
def testVariableWrongNumberOfUses(self, mock_uses):
with self.assertRaises(ValueError):
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection)
est.make_vars_and_create_op_thunks()
def testInvalidEstimationMode(self):
with self.assertRaises(ValueError):
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="not_a_real_mode")
est.make_vars_and_create_op_thunks()
def testGradientsModeBuild(self):
with self._graph.as_default():
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="gradients")
est.make_vars_and_create_op_thunks()
def testEmpiricalModeBuild(self):
with self._graph.as_default():
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="empirical")
est.make_vars_and_create_op_thunks()
def testCurvaturePropModeBuild(self):
with self._graph.as_default():
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="curvature_prop")
est.make_vars_and_create_op_thunks()
def testExactModeBuild(self):
with self._graph.as_default():
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="exact")
est.make_vars_and_create_op_thunks()
def test_cov_update_thunks(self):
"""Ensures covariance update ops run once per global_step."""
with self._graph.as_default(), self.test_session() as sess:
fisher_estimator = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
layer_collection=self.layer_collection,
damping=0.2,
cov_ema_decay=0.0)
# Construct an op that executes one covariance update per step.
global_step = tf.train.get_or_create_global_step()
(cov_variable_thunks, cov_update_op_thunks, _,
_) = fisher_estimator.create_ops_and_vars_thunks()
for thunk in cov_variable_thunks:
thunk()
cov_matrices = [
fisher_factor.cov
for fisher_factor in self.layer_collection.get_factors()
]
cov_update_op = tf.case([(tf.equal(global_step, i), thunk)
for i, thunk in enumerate(cov_update_op_thunks)])
increment_global_step = global_step.assign_add(1)
sess.run(tf.global_variables_initializer())
initial_cov_values = sess.run(cov_matrices)
# Ensure there's one update per covariance matrix.
self.assertEqual(len(cov_matrices), len(cov_update_op_thunks))
# Test is no-op if only 1 covariance matrix.
assert len(cov_matrices) > 1
for i in range(len(cov_matrices)):
# Compare new and old covariance values
new_cov_values = sess.run(cov_matrices)
is_cov_equal = [
np.allclose(initial_cov_value, new_cov_value)
for (initial_cov_value,
new_cov_value) in zip(initial_cov_values, new_cov_values)
]
num_cov_equal = sum(is_cov_equal)
# Ensure exactly one covariance matrix changes per step.
self.assertEqual(num_cov_equal, len(cov_matrices) - i)
# Run all covariance update ops.
sess.run(cov_update_op)
sess.run(increment_global_step)
def test_round_robin_placement(self):
"""Check if the ops and variables are placed on devices correctly."""
with self._graph.as_default():
fisher_estimator = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
layer_collection=self.layer_collection,
damping=0.2,
cov_ema_decay=0.0,
cov_devices=["/cpu:{}".format(i) for i in range(2)],
inv_devices=["/cpu:{}".format(i) for i in range(2)])
# Construct an op that executes one covariance update per step.
(cov_update_thunks,
inv_update_thunks) = fisher_estimator.make_vars_and_create_op_thunks(
scope="test")
cov_update_ops = tuple(thunk() for thunk in cov_update_thunks)
inv_update_ops = tuple(thunk() for thunk in inv_update_thunks)
self.assertEqual(cov_update_ops[0].device, "/device:CPU:0")
self.assertEqual(cov_update_ops[1].device, "/device:CPU:1")
self.assertEqual(inv_update_ops[0].device, "/device:CPU:0")
self.assertEqual(inv_update_ops[1].device, "/device:CPU:1")
cov_matrices = [
fisher_factor._cov._var
for fisher_factor in self.layer_collection.get_factors()
]
inv_matrices = [
matrix
for fisher_factor in self.layer_collection.get_factors()
for matrix in fisher_factor._matpower_by_exp_and_damping.values()
]
self.assertEqual(cov_matrices[0].device, "/device:CPU:0")
self.assertEqual(cov_matrices[1].device, "/device:CPU:1")
# Inverse matrices need to be explicitly placed.
self.assertEqual(inv_matrices[0].device, "")
self.assertEqual(inv_matrices[1].device, "")
def test_inv_update_thunks(self):
"""Ensures inverse update ops run once per global_step."""
with self._graph.as_default(), self.test_session() as sess:
fisher_estimator = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
layer_collection=self.layer_collection,
damping=0.2,
cov_ema_decay=0.0)
# Construct op that updates one inverse per global step.
global_step = tf.train.get_or_create_global_step()
(cov_variable_thunks, _, inv_variable_thunks,
inv_update_op_thunks) = fisher_estimator.create_ops_and_vars_thunks()
for thunk in cov_variable_thunks:
thunk()
for thunk in inv_variable_thunks:
thunk()
inv_matrices = [
matrix
for fisher_factor in self.layer_collection.get_factors()
for matrix in fisher_factor._matpower_by_exp_and_damping.values()
]
inv_update_op = tf.case([(tf.equal(global_step, i), thunk)
for i, thunk in enumerate(inv_update_op_thunks)])
increment_global_step = global_step.assign_add(1)
sess.run(tf.global_variables_initializer())
initial_inv_values = sess.run(inv_matrices)
# Ensure there's one update per inverse matrix. This is true as long as
# there's no fan-in/fan-out or parameter re-use.
self.assertEqual(len(inv_matrices), len(inv_update_op_thunks))
# Test is no-op if only 1 invariance matrix.
assert len(inv_matrices) > 1
# Assign each covariance matrix a value other than the identity. This
# ensures that the inverse matrices are updated to something different as
# well.
sess.run([
fisher_factor._cov.add_to_average(
2 * tf.eye(int(fisher_factor._cov_shape[0])))
for fisher_factor in self.layer_collection.get_factors()
])
for i in range(len(inv_matrices)):
# Compare new and old inverse values
new_inv_values = sess.run(inv_matrices)
is_inv_equal = [
np.allclose(initial_inv_value, new_inv_value)
for (initial_inv_value,
new_inv_value) in zip(initial_inv_values, new_inv_values)
]
num_inv_equal = sum(is_inv_equal)
# Ensure exactly one inverse matrix changes per step.
self.assertEqual(num_inv_equal, len(inv_matrices) - i)
# Run all inverse update ops.
sess.run(inv_update_op)
sess.run(increment_global_step)
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.test.main()
| 38.49835 | 80 | 0.67724 |
4c3b7f7d4f798204831e9c4b8f2f5853cc0978e5 | 27,462 | py | Python | applications/welcome/languages/es.py | misl6/web2py | 4191d4c48c37c66cc7eb293b610a6b6e86870571 | [
"BSD-3-Clause"
] | 5 | 2020-03-11T17:03:49.000Z | 2021-12-26T11:22:49.000Z | applications/welcome/languages/es.py | misl6/web2py | 4191d4c48c37c66cc7eb293b610a6b6e86870571 | [
"BSD-3-Clause"
] | 16 | 2020-03-30T13:00:10.000Z | 2020-05-16T16:42:52.000Z | applications/welcome/languages/es.py | misl6/web2py | 4191d4c48c37c66cc7eb293b610a6b6e86870571 | [
"BSD-3-Clause"
] | 2 | 2020-09-18T15:12:26.000Z | 2020-11-10T22:09:59.000Z | # -*- coding: utf-8 -*-
{
'!langcode!': 'es',
'!langname!': 'Español',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"actualice" es una expresión opcional como "campo1=\'nuevo_valor\'". No se puede actualizar o eliminar resultados de un JOIN',
'%(nrows)s records found': '%(nrows)s registros encontrados',
'%s %%{position}': '%s %%{posición}',
'%s %%{row} deleted': '%s %%{fila} %%{eliminada}',
'%s %%{row} updated': '%s %%{fila} %%{actualizada}',
'%s selected': '%s %%{seleccionado}',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'(something like "it-it")': '(algo como "it-it")',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**no disponible** (requiere la libreria [[guppy http://pypi.python.org/pypi/guppy/ popup]] de Python)',
'?': '?',
'@markmin\x01(**%.0d MB**)': '(**%.0d MB**)',
'@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**not available** (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)': '**not available** (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)',
'@markmin\x01``**not available**``:red (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)': '``**not available**``:red (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Ha ocurrido un error, por favor [[recargar %s]] la página',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'@markmin\x01Number of entries: **%s**': 'Número de entradas: **%s**',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**no disponible**``:red (Necesita libreria de Python: [[guppy http://pypi.python.org/pypi/guppy/ popup]])',
'A new password was emailed to you': 'A new password was emailed to you',
'A new version of web2py is available': 'Hay una nueva versión de web2py disponible',
'A new version of web2py is available: %s': 'Hay una nueva versión de web2py disponible: %s',
'About': 'Acerca de',
'about': 'acerca de',
'About application': 'Acerca de la aplicación',
'Access Control': 'Control de Acceso',
'Add': 'Añadir',
'additional code for your application': 'código adicional para su aplicación',
'admin': 'administrar',
'admin disabled because no admin password': 'admin deshabilitado por falta de contraseña',
'admin disabled because not supported on google app engine': 'admin deshabilitado, no es soportado en GAE',
'admin disabled because unable to access password file': 'admin deshabilitado, imposible acceder al archivo con la contraseña',
'Admin is disabled because insecure channel': 'Admin deshabilitado, el canal no es seguro',
'Admin is disabled because unsecure channel': 'Admin deshabilitado, el canal no es seguro',
'Administrative interface': 'Interfaz administrativa',
'Administrative Interface': 'Interfaz Administrativa',
'Administrator Password:': 'Contraseña del Administrador:',
'Ajax Recipes': 'Recetas AJAX',
'An error occured, please %s the page': 'Ha ocurrido un error, por favor %s la página',
'An error occured, please [[reload %s]] the page': 'Ha ocurrido un error, por favor [[reload %s]] la pagina',
'And': 'Y',
'and rename it (required):': 'y renómbrela (requerido):',
'and rename it:': ' y renómbrelo:',
'API Example': 'API Example',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin deshabilitado, el canal no es seguro',
'application "%s" uninstalled': 'aplicación "%s" desinstalada',
'application compiled': 'aplicación compilada',
'application is compiled and cannot be designed': 'la aplicación está compilada y no puede ser modificada',
'Apply changes': 'Aplicar cambios',
'Appointment': 'Nombramiento',
'Are you sure you want to delete file "%s"?': '¿Está seguro que desea eliminar el archivo "%s"?',
'Are you sure you want to delete this object?': '¿Está seguro que desea borrar este objeto?',
'Are you sure you want to uninstall application "%s"': '¿Está seguro que desea desinstalar la aplicación "%s"',
'Are you sure you want to uninstall application "%s"?': '¿Está seguro que desea desinstalar la aplicación "%s"?',
'at': 'en',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENCION: Inicio de sesión requiere una conexión segura (HTTPS) o corriendo en localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENCION: NO EJECUTE VARIAS PRUEBAS SIMULTANEAMENTE, NO SON THREAD SAFE.',
'ATTENTION: you cannot edit the running application!': 'ATENCION: no puede modificar la aplicación que está ejecutandose!',
'Authentication': 'Autenticación',
'Authentication code': 'Authentication code',
'Authentication failed at client DB!': '¡La autenticación ha fallado en la BDD cliente!',
'Authentication failed at main DB!': '¡La autenticación ha fallado en la BDD principal!',
'Available Databases and Tables': 'Bases de datos y tablas disponibles',
'Back': 'Atrás',
'Buy this book': 'Compra este libro',
"Buy web2py's book": 'Compra el libro de web2py',
'Cache': 'Caché',
'cache': 'caché',
'Cache Cleared': 'Cache Limpiada',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'La Cache contiene items con **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} de antiguedad.',
'Cache Keys': 'Llaves de la Caché',
'cache, errors and sessions cleaned': 'caché, errores y sesiones eliminados',
'Cannot be empty': 'No puede estar vacío',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'No se puede compilar: hay errores en su aplicación. Depure, corrija errores y vuelva a intentarlo.',
'cannot create file': 'no es posible crear archivo',
'cannot upload file "%(filename)s"': 'no es posible subir archivo "%(filename)s"',
'Change Password': 'Cambie la Contraseña',
'Change password': 'Cambie la contraseña',
'change password': 'cambie la contraseña',
'check all': 'marcar todos',
'Check to delete': 'Marque para eliminar',
'choose one': 'escoja uno',
'clean': 'limpiar',
'Clear': 'Limpiar',
'Clear CACHE?': '¿Limpiar CACHÉ?',
'Clear DISK': 'Limpiar DISCO',
'Clear RAM': 'Limpiar RAM',
'Click on the link %(link)s to reset your password': 'Pulse en el enlace %(link)s para reiniciar su contraseña',
'click to check for upgrades': 'haga clic para buscar actualizaciones',
'client': 'cliente',
'Client IP': 'IP del Cliente',
'Close': 'Cerrar',
'Community': 'Comunidad',
'compile': 'compilar',
'compiled application removed': 'aplicación compilada eliminada',
'Components and Plugins': 'Componentes y Plugins',
'Config.ini': 'Config.ini',
'contains': 'contiene',
'Controller': 'Controlador',
'Controllers': 'Controladores',
'controllers': 'controladores',
'Copyright': 'Copyright',
'create file with filename:': 'cree archivo con nombre:',
'Create new application': 'Cree una nueva aplicación',
'create new application:': 'cree una nueva aplicación:',
'Created By': 'Creado Por',
'Created On': 'Creado En',
'CSV (hidden cols)': 'CSV (columnas ocultas)',
'Current request': 'Solicitud en curso',
'Current response': 'Respuesta en curso',
'Current session': 'Sesión en curso',
'currently saved or': 'actualmente guardado o',
'customize me!': '¡Personalizame!',
'data uploaded': 'datos subidos',
'Database': 'Base de datos',
'Database %s select': 'selección en base de datos %s',
'database administration': 'administración de base de datos',
'Database Administration (appadmin)': 'Administración de Base de Datos (appadmin)',
'Date and Time': 'Fecha y Hora',
'DB': 'BDD',
'db': 'bdd',
'DB Model': 'Modelo BDD',
'defines tables': 'define tablas',
'Delete': 'Eliminar',
'delete': 'eliminar',
'delete all checked': 'eliminar marcados',
'Delete:': 'Eliminar:',
'Demo': 'Demostración',
'Deploy on Google App Engine': 'Despliegue en Google App Engine',
'Deployment Recipes': 'Recetas de despliegue',
'Description': 'Descripción',
'design': 'diseño',
'DESIGN': 'DISEÑO',
'Design': 'Diseño',
'Design for': 'Diseño por',
'detecting': 'detectando',
'DISK': 'DISCO',
'Disk Cache Keys': 'Llaves de Caché en Disco',
'Disk Cleared': 'Disco limpiado',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'El DISCO contiene items con **%(hours)02d** %%{hora(hours)} **%(min)02d** %%{minuto(min)} **%(sec)02d** %%{segundo(sec)} de antiguedad.',
'Documentation': 'Documentación',
"Don't know what to do?": '¿No sabe que hacer?',
'done!': '¡hecho!',
'Download': 'Descargas',
'E-mail': 'Correo electrónico',
'edit': 'editar',
'EDIT': 'EDITAR',
'Edit': 'Editar',
'Edit application': 'Editar aplicación',
'edit controller': 'editar controlador',
'Edit current record': 'Edite el registro actual',
'Edit Profile': 'Editar Perfil',
'edit profile': 'editar perfil',
'Edit This App': 'Edite esta App',
'Editing file': 'Editando archivo',
'Editing file "%s"': 'Editando archivo "%s"',
'Email and SMS': 'Correo electrónico y SMS',
'Email sent': 'Correo electrónico enviado',
'Email verification': 'Email verification',
'Email verified': 'Email verified',
'End of impersonation': 'Fin de suplantación',
'enter a number between %(min)g and %(max)g': 'introduzca un numero entre %(min)g y %(max)g',
'enter a value': 'introduzca un valor',
'enter an integer between %(min)g and %(max)g': 'introduzca un número entero entre %(min)g y %(max)g',
'enter date and time as %(format)s': 'introduzca fecha y hora como %(format)s',
'Error logs for "%(app)s"': 'Bitácora de errores para "%(app)s"',
'errors': 'errores',
'Errors': 'Errores',
'Errors in form, please check it out.': 'Hay errores en el formulario, por favor compruébelo.',
'export as csv file': 'exportar como archivo CSV',
'Export:': 'Exportar:',
'exposes': 'expone',
'extends': 'extiende',
'failed to reload module': 'la recarga del módulo ha fallado',
'FAQ': 'Preguntas frecuentes',
'file "%(filename)s" created': 'archivo "%(filename)s" creado',
'file "%(filename)s" deleted': 'archivo "%(filename)s" eliminado',
'file "%(filename)s" uploaded': 'archivo "%(filename)s" subido',
'file "%(filename)s" was not deleted': 'archivo "%(filename)s" no fué eliminado',
'file "%s" of %s restored': 'archivo "%s" de %s restaurado',
'file changed on disk': 'archivo modificado en el disco',
'file does not exist': 'archivo no existe',
'file saved on %(time)s': 'archivo guardado %(time)s',
'file saved on %s': 'archivo guardado %s',
'First name': 'Nombre',
'Forgot username?': '¿Olvidó el nombre de usuario?',
'Forms and Validators': 'Formularios y validadores',
'Free Applications': 'Aplicaciones Libres',
'Function disabled': 'Function disabled',
'Functions with no doctests will result in [passed] tests.': 'Funciones sin doctests equivalen a pruebas [aceptadas].',
'Graph Model': 'Modelo en Grafo',
'Grid Example': 'Grid Example',
'Group %(group_id)s created': 'Grupo %(group_id)s creado',
'Group %(group_id)s deleted': 'Group %(group_id)s deleted',
'Group ID': 'ID de Grupo',
'Group uniquely assigned to user %(id)s': 'Grupo asignado únicamente al usuario %(id)s',
'Groups': 'Grupos',
'Hello World': 'Hola Mundo',
'help': 'ayuda',
'Helping web2py': 'Ayudando a web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Home': 'Inicio',
'How did you get here?': '¿Cómo llegaste aquí?',
'htmledit': 'htmledit',
'Impersonate': 'Suplantar',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'in': 'en',
'includes': 'incluye',
'Incorrect code. {0} more attempt(s) remaining.': 'Incorrect code. {0} more attempt(s) remaining.',
'Index': 'Índice',
'insert new': 'inserte nuevo',
'insert new %s': 'inserte nuevo %s',
'Installed applications': 'Aplicaciones instaladas',
'Insufficient privileges': 'Privilegios insuficientes',
'internal error': 'error interno',
'Internal State': 'Estado Interno',
'Introduction': 'Introducción',
'Invalid action': 'Acción inválida',
'Invalid email': 'Correo electrónico inválido',
'invalid expression': 'expresión inválida',
'Invalid key': 'Invalid key',
'Invalid login': 'Inicio de sesión inválido',
'invalid password': 'contraseña inválida',
'Invalid password': 'Invalid password',
'Invalid Query': 'Consulta inválida',
'invalid request': 'Solicitud inválida',
'Invalid reset password': 'Reinicio de contraseña inválido',
'invalid ticket': 'Tiquete inválido',
'Invalid user': 'Invalid user',
'Invalid username': 'Invalid username',
'Invitation to join %(site)s': 'Invitation to join %(site)s',
'Is Active': 'Está Activo',
'Key': 'Llave',
'Key verified': 'Key verified',
'language file "%(filename)s" created/updated': 'archivo de lenguaje "%(filename)s" creado/actualizado',
'Language files (static strings) updated': 'Archivos de lenguaje (cadenas estáticas) actualizados',
'languages': 'lenguajes',
'Languages': 'Lenguajes',
'languages updated': 'lenguajes actualizados',
'Last name': 'Apellido',
'Last saved on:': 'Guardado en:',
'Layout': 'Diseño de página',
'Layout Plugins': 'Plugins de diseño',
'Layouts': 'Diseños de páginas',
'License for': 'Licencia para',
'Live Chat': 'Chat en vivo',
'loading...': 'cargando...',
'Log In': 'Iniciar sesion',
'Logged in': 'Sesión iniciada',
'Logged out': 'Sesión finalizada',
'Login': 'Inicio de sesión',
'login': 'inicio de sesión',
'Login disabled by administrator': 'Inicio de sesión deshabilitado por el administrador',
'Login to the Administrative Interface': 'Inicio de sesión para la Interfaz Administrativa',
'logout': 'fin de sesión',
'Logout': 'Fin de sesión',
'Lost Password': 'Contraseña perdida',
'Lost password?': '¿Olvidó la contraseña?',
'lost password?': '¿olvidó la contraseña?',
'Lost your password?': 'Lost your password?',
'Main Menu': 'Menú principal',
'Manage %(action)s': 'Gestionar %(action)s',
'Manage Access Control': 'Gestionar control de acceso',
'Manage Cache': 'Gestionar la Caché',
'Memberships': 'Membresias',
'Menu Model': 'Modelo "menu"',
'merge': 'Combinar',
'Models': 'Modelos',
'models': 'modelos',
'Modified By': 'Modificado Por',
'Modified On': 'Modificado En',
'Modules': 'Módulos',
'modules': 'módulos',
'must be YYYY-MM-DD HH:MM:SS!': '¡debe ser DD/MM/YYYY HH:MM:SS!',
'must be YYYY-MM-DD!': '¡debe ser DD/MM/YYYY!',
'My Sites': 'Mis Sitios',
'Name': 'Nombre',
'New': 'Nuevo',
'New %(entity)s': 'Nuevo %(entity)s',
'new application "%s" created': 'nueva aplicación "%s" creada',
'New password': 'Contraseña nueva',
'New Record': 'Registro nuevo',
'new record inserted': 'nuevo registro insertado',
'next %s rows': 'siguiente %s filas',
'next 100 rows': '100 filas siguientes',
'NO': 'NO',
'No databases in this application': 'No hay bases de datos en esta aplicación',
'No records found': 'No se han encontrado registros',
'Not authorized': 'No autorizado',
'not in': 'no en',
'Number of entries: **%s**': 'Numero de entradas: **%s**',
'Object or table name': 'Nombre del objeto o tabla',
'Old password': 'Contraseña vieja',
'Online book': 'Libro Online',
'Online examples': 'Ejemplos en línea',
'Or': 'O',
'or import from csv file': 'o importar desde archivo CSV',
'or provide application url:': 'o provea URL de la aplicación:',
'Origin': 'Origen',
'Original/Translation': 'Original/Traducción',
'Other Plugins': 'Otros Plugins',
'Other Recipes': 'Otras Recetas',
'Overview': 'Resumen',
'pack all': 'empaquetar todo',
'pack compiled': 'empaquetar compilados',
'Password': 'Contraseña',
'Password changed': 'Contraseña cambiada',
"Password fields don't match": 'Los campos de contraseña no coinciden',
'Password reset': 'Reinicio de contraseña',
'Password retrieve': 'Password retrieve',
'Peeking at file': 'Visualizando archivo',
'Permission': 'Permiso',
'Permissions': 'Permisos',
'Phone': 'Teléfono',
'please input your password again': 'por favor introduzca su contraseña otra vez',
'Plugins': 'Plugins',
'Powered by': 'Este sitio usa',
'Preface': 'Prefacio',
'previous %s rows': 'fila %s anterior',
'previous 100 rows': '100 filas anteriores',
'Profile': 'Perfil',
'Profile updated': 'Perfil actualizado',
'pygraphviz library not found': 'Libreria pygraphviz no encontrada',
'Python': 'Python',
'Query Not Supported: %s': 'Consulta No Soportada: %s',
'Query:': 'Consulta:',
'Quick Examples': 'Ejemplos Rápidos',
'RAM': 'RAM',
'RAM Cache Keys': 'Llaves de la Caché en RAM',
'Ram Cleared': 'Ram Limpiada',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'La RAM contiene items con **%(hours)02d** %%{hora(hours)} **%(min)02d** %%{minuto(min)} **%(sec)02d** %%{segundo(sec)} de antiguedad.',
'Recipes': 'Recetas',
'Record': 'Registro',
'Record %(id)s created': 'Registro %(id)s creado',
'Record %(id)s deleted': 'Record %(id)s deleted',
'Record %(id)s read': 'Record %(id)s read',
'Record %(id)s updated': 'Record %(id)s updated',
'Record Created': 'Registro Creado',
'Record Deleted': 'Record Deleted',
'record does not exist': 'el registro no existe',
'Record ID': 'ID de Registro',
'Record id': 'Id de registro',
'Record Updated': 'Record Updated',
'register': 'regístrese',
'Register': 'Regístrese',
'Registration identifier': 'Identificador de Registro',
'Registration is pending approval': 'Registration is pending approval',
'Registration key': 'Llave de registro',
'Registration needs verification': 'Registration needs verification',
'Registration successful': 'Registro con éxito',
'reload': 'recargar',
'Remember me (for 30 days)': 'Recuérdame (durante 30 días)',
'remove compiled': 'eliminar compiladas',
'Request reset password': 'Solicitar reinicio de contraseña',
'Reset password': 'Reiniciar contraseña',
'Reset Password key': 'Restaurar Llave de la Contraseña',
'Resolve Conflict file': 'archivo Resolución de Conflicto',
'restore': 'restaurar',
'Retrieve username': 'Recuperar nombre de usuario',
'revert': 'revertir',
'Role': 'Rol',
'Roles': 'Roles',
'Rows in Table': 'Filas en la tabla',
'Rows selected': 'Filas seleccionadas',
'save': 'guardar',
'Save model as...': 'Guardar modelo como...',
'Saved file hash:': 'Hash del archivo guardado:',
'Search': 'Buscar',
'Semantic': 'Semántica',
'Services': 'Servicios',
'session expired': 'sesión expirada',
'shell': 'terminal',
'Sign Up': 'Registrarse',
'Sign up': 'Sign up',
'site': 'sitio',
'Size of cache:': 'Tamaño de la Caché:',
'some files could not be removed': 'algunos archivos no pudieron ser removidos',
'start': 'inicio',
'starts with': 'comienza por',
'state': 'estado',
'static': 'estático',
'Static files': 'Archivos estáticos',
'Statistics': 'Estadísticas',
'Stylesheet': 'Hoja de estilo',
'Submit': 'Enviar',
'submit': 'enviar',
'Success!': '¡Correcto!',
'Support': 'Soporte',
'Sure you want to delete this object?': '¿Está seguro que desea eliminar este objeto?',
'Table': 'tabla',
'Table name': 'Nombre de la tabla',
'test': 'probar',
'Testing application': 'Probando aplicación',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "consulta" es una condición como "db.tabla1.campo1==\'valor\'". Algo como "db.tabla1.campo1==db.tabla2.campo2" resulta en un JOIN SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'la lógica de la aplicación, cada ruta URL se mapea en una función expuesta en el controlador',
'The Core': 'El Núcleo',
'the data representation, define database tables and sets': 'la representación de datos, define tablas y conjuntos de base de datos',
'The output of the file is a dictionary that was rendered by the view %s': 'La salida de dicha función es un diccionario que es desplegado por la vista %s',
'the presentations layer, views are also known as templates': 'la capa de presentación, las vistas también son llamadas plantillas',
'The Views': 'Las Vistas',
'There are no controllers': 'No hay controladores',
'There are no models': 'No hay modelos',
'There are no modules': 'No hay módulos',
'There are no static files': 'No hay archivos estáticos',
'There are no translators, only default language is supported': 'No hay traductores, sólo el lenguaje por defecto es soportado',
'There are no views': 'No hay vistas',
'these files are served without processing, your images go here': 'estos archivos son servidos sin procesar, sus imágenes van aquí',
'This App': 'Esta Aplicación',
'This code was emailed to you and is required for login.': 'This code was emailed to you and is required for login.',
'This email already has an account': 'Este correo electrónico ya tiene una cuenta',
'This is a copy of the scaffolding application': 'Esta es una copia de la aplicación de andamiaje',
'This is the %(filename)s template': 'Esta es la plantilla %(filename)s',
'Ticket': 'Tiquete',
'Time in Cache (h:m:s)': 'Tiempo en Caché (h:m:s)',
'Timestamp': 'Marca de tiempo',
'to previous version.': 'a la versión previa.',
'To emulate a breakpoint programatically, write:': 'Emular un punto de ruptura programáticamente, escribir:',
'to use the debugger!': '¡usar el depurador!',
'toggle breakpoint': 'alternar punto de ruptura',
'Toggle comment': 'Alternar comentario',
'Toggle Fullscreen': 'Alternar pantalla completa',
'too short': 'demasiado corto',
'Traceback': 'Rastrear',
'translation strings for the application': 'cadenas de caracteres de traducción para la aplicación',
'try': 'intente',
'try something like': 'intente algo como',
'TSV (Excel compatible)': 'TSV (compatible con Excel)',
'TSV (Excel compatible, hidden cols)': 'TSV (compatible con Excel, columnas ocultas)',
'Twitter': 'Twitter',
'Two-step Login Authentication Code': 'Two-step Login Authentication Code',
'Unable to check for upgrades': 'No es posible verificar la existencia de actualizaciones',
'unable to create application "%s"': 'no es posible crear la aplicación "%s"',
'unable to delete file "%(filename)s"': 'no es posible eliminar el archivo "%(filename)s"',
'Unable to download': 'No es posible la descarga',
'Unable to download app': 'No es posible descargar la aplicación',
'unable to parse csv file': 'no es posible analizar el archivo CSV',
'Unable to send email': 'Unable to send email',
'unable to uninstall "%s"': 'no es posible instalar "%s"',
'uncheck all': 'desmarcar todos',
'uninstall': 'desinstalar',
'unknown': 'desconocido',
'update': 'actualizar',
'update all languages': 'actualizar todos los lenguajes',
'Update:': 'Actualice:',
'upload application:': 'subir aplicación:',
'Upload existing application': 'Suba esta aplicación',
'upload file:': 'suba archivo:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, y ~(...) para NOT, para crear consultas más complejas.',
'User': 'Usuario',
'User %(id)s is impersonating %(other_id)s': 'El usuario %(id)s está suplantando %(other_id)s',
'User %(id)s Logged-in': 'El usuario %(id)s inició la sesión',
'User %(id)s Logged-out': 'El usuario %(id)s finalizó la sesión',
'User %(id)s Password changed': 'Contraseña del usuario %(id)s cambiada',
'User %(id)s Password reset': 'Contraseña del usuario %(id)s reiniciada',
'User %(id)s Password retrieved': 'User %(id)s Password retrieved',
'User %(id)s Profile updated': 'Actualizado el perfil del usuario %(id)s',
'User %(id)s Registered': 'Usuario %(id)s Registrado',
'User %(id)s Username retrieved': 'Se ha recuperado el nombre de usuario del usuario %(id)s',
'User %(id)s Verification email sent': 'User %(id)s Verification email sent',
'User %(id)s verified registration key': 'User %(id)s verified registration key',
'User %(username)s Logged-in': 'El usuario %(username)s inició la sesión',
"User '%(username)s' Logged-in": "El usuario '%(username)s' inició la sesión",
"User '%(username)s' Logged-out": "El usuario '%(username)s' finalizó la sesión",
'User Id': 'Id de Usuario',
'User ID': 'ID de Usuario',
'User Logged-out': 'El usuario finalizó la sesión',
'Username': 'Nombre de usuario',
'Username already taken': 'Username already taken',
'Username retrieve': 'Recuperar nombre de usuario',
'Users': 'Usuarios',
'value already in database or empty': 'el valor ya existe en la base de datos o está vacío',
'value not allowed': 'valor no permitido',
'value not in database': 'el valor no está en la base de datos',
'Verify Password': 'Verificar Contraseña',
'Version': 'Versión',
'versioning': 'versionado',
'Videos': 'Vídeos',
'View': 'Vista',
'view': 'vista',
'View %(entity)s': 'Ver %(entity)s',
'Views': 'Vistas',
'views': 'vistas',
'web2py is up to date': 'web2py está actualizado',
'web2py Recent Tweets': 'Tweets Recientes de web2py',
'Welcome': 'Bienvenido',
'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Welcome %(username)s! Click on the link %(link)s to verify your email',
'Welcome %s': 'Bienvenido %s',
'Welcome to web2py': 'Bienvenido a web2py',
'Welcome to web2py!': '¡Bienvenido a web2py!',
'Which called the function %s located in the file %s': 'La cual llamó la función %s localizada en el archivo %s',
'Wiki Example': 'Wiki Example',
'Working...': 'Trabajando...',
'YES': 'SÍ',
'You are successfully running web2py': 'Usted está ejecutando web2py exitosamente',
'You can modify this application and adapt it to your needs': 'Usted puede modificar esta aplicación y adaptarla a sus necesidades',
'You have been invited to join %(site)s, click %(link)s to complete the process': 'You have been invited to join %(site)s, click %(link)s to complete the process',
'You visited the url %s': 'Usted visitó la url %s',
'Your password is: %(password)s': 'Your password is: %(password)s',
'Your temporary login code is {0}': 'Your temporary login code is {0}',
'Your username is: %(username)s': 'Su nombre de usuario es: %(username)s',
'Your username was emailed to you': 'Your username was emailed to you',
}
| 53.22093 | 281 | 0.69161 |
e089c7505483cb4e816086d0c7c6373af6509759 | 382 | py | Python | Scripts/myspider.py | Car105JuanDev/Scrapy | a3cad8e429e462ab6ce6d7089f469accb2dadb97 | [
"bzip2-1.0.6"
] | null | null | null | Scripts/myspider.py | Car105JuanDev/Scrapy | a3cad8e429e462ab6ce6d7089f469accb2dadb97 | [
"bzip2-1.0.6"
] | null | null | null | Scripts/myspider.py | Car105JuanDev/Scrapy | a3cad8e429e462ab6ce6d7089f469accb2dadb97 | [
"bzip2-1.0.6"
] | 1 | 2021-08-31T13:31:19.000Z | 2021-08-31T13:31:19.000Z | import scrapy
class BlogSpider(scrapy.Spider):
name = 'blogspider'
start_urls = ['https://blog.scrapinghub.com']
def parse(self, response):
for title in response.css('.post-header>h2'):
yield {'title': title.css('a ::text').get()}
for next_page in response.css('a.next-posts-link'):
yield response.follow(next_page, self.parse) | 31.833333 | 59 | 0.63089 |
51b4c720ffd6ab407d3429e2d97ffaeb4f05d92e | 230 | py | Python | 03_Listas/01_criando uma lista.py | Basilio40/exercicios_Python | 327a6430e36e8a37596bed521617a2eba3024ddb | [
"Apache-2.0"
] | null | null | null | 03_Listas/01_criando uma lista.py | Basilio40/exercicios_Python | 327a6430e36e8a37596bed521617a2eba3024ddb | [
"Apache-2.0"
] | null | null | null | 03_Listas/01_criando uma lista.py | Basilio40/exercicios_Python | 327a6430e36e8a37596bed521617a2eba3024ddb | [
"Apache-2.0"
] | null | null | null | '''
Faça um Programa que leia uma lista de 5 números inteiros e mostre-os.
'''
# Resposta:
num = list()
for i in range(5):
num.append(int(input(f'Digite o {i+1}º número: ')))
print(f'\n{num} ')
print(f'Tipo: {type(num)}')
| 16.428571 | 70 | 0.626087 |
d3801c414b2a4f31c9e765d154cc4a0b15222a24 | 2,483 | py | Python | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/EipNotifyPaidRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/EipNotifyPaidRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/EipNotifyPaidRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class EipNotifyPaidRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'EipNotifyPaid','ecs')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_data(self):
return self.get_query_params().get('data')
def set_data(self,data):
self.add_query_param('data',data)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_UserCidr(self):
return self.get_query_params().get('UserCidr')
def set_UserCidr(self,UserCidr):
self.add_query_param('UserCidr',UserCidr)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | 34.013699 | 74 | 0.76762 |
c4022a67c9f5c879907d015a3466c1436192a13a | 44,982 | py | Python | tle/cogs/graphs.py | kuroni/TLE | 092eee3e23e421a699360759e0976d43810a086b | [
"MIT"
] | 1 | 2020-08-12T11:54:45.000Z | 2020-08-12T11:54:45.000Z | tle/cogs/graphs.py | shim1998/TLE | 83391a3662f4c47465d60e1aee4f0fc32176bb7e | [
"MIT"
] | null | null | null | tle/cogs/graphs.py | shim1998/TLE | 83391a3662f4c47465d60e1aee4f0fc32176bb7e | [
"MIT"
] | null | null | null | import bisect
import collections
import datetime as dt
import time
import itertools
import math
from typing import List
import discord
import numpy as np
import pandas as pd
import seaborn as sns
from discord.ext import commands
from matplotlib import pyplot as plt
from matplotlib import patches as patches
from matplotlib import lines as mlines
from matplotlib import dates as mdates
from matplotlib.ticker import MultipleLocator
from tle import constants
from tle.util import codeforces_api as cf
from tle.util import codeforces_common as cf_common
from tle.util import discord_common
from tle.util import graph_common as gc
pd.plotting.register_matplotlib_converters()
# A user is considered active if the duration since his last contest is not more than this
CONTEST_ACTIVE_TIME_CUTOFF = 90 * 24 * 60 * 60 # 90 days
class GraphCogError(commands.CommandError):
pass
def nice_sub_type(types):
nice_map = {'CONTESTANT':'Contest: {}',
'OUT_OF_COMPETITION':'Unofficial: {}',
'VIRTUAL':'Virtual: {}',
'PRACTICE':'Practice: {}'}
return [nice_map[t] for t in types]
def _plot_rating(resp, mark='o'):
for rating_changes in resp:
ratings, times = [], []
for rating_change in rating_changes:
ratings.append(rating_change.newRating)
times.append(dt.datetime.fromtimestamp(rating_change.ratingUpdateTimeSeconds))
plt.plot(times,
ratings,
linestyle='-',
marker=mark,
markersize=3,
markerfacecolor='white',
markeredgewidth=0.5)
gc.plot_rating_bg(cf.RATED_RANKS)
plt.gcf().autofmt_xdate()
def _classify_submissions(submissions):
solved_by_type = {sub_type: [] for sub_type in cf.Party.PARTICIPANT_TYPES}
for submission in submissions:
solved_by_type[submission.author.participantType].append(submission)
return solved_by_type
def _plot_scatter(regular, practice, virtual, point_size):
for contest in [practice, regular, virtual]:
if contest:
times, ratings = zip(*contest)
plt.scatter(times, ratings, zorder=10, s=point_size)
def _running_mean(x, bin_size):
n = len(x)
cum_sum = [0] * (n + 1)
for i in range(n):
cum_sum[i + 1] = x[i] + cum_sum[i]
res = [0] * (n - bin_size + 1)
for i in range(bin_size, n + 1):
res[i - bin_size] = (cum_sum[i] - cum_sum[i - bin_size]) / bin_size
return res
def _get_extremes(contest, problemset, submissions):
def in_contest(sub):
return (sub.author.participantType == 'CONTESTANT' or
(cf_common.is_rated_for_onsite_contest(contest) and
sub.author.participantType == 'OUT_OF_COMPETITION'))
problemset = [prob for prob in problemset if prob.rating is not None]
submissions = [sub for sub in submissions
if in_contest(sub) and sub.problem.rating is not None]
solved = {sub.problem.index: sub.problem.rating for sub in submissions if
sub.verdict == 'OK'}
max_solved = max(solved.values(), default=None)
min_unsolved = min((prob.rating for prob in problemset if prob.index not in solved),
default=None)
return min_unsolved, max_solved
def _plot_extreme(handle, rating, packed_contest_subs_problemset, solved, unsolved, legend):
extremes = [
(dt.datetime.fromtimestamp(contest.end_time), _get_extremes(contest, problemset, subs))
for contest, problemset, subs in packed_contest_subs_problemset
]
regular = []
fullsolves = []
nosolves = []
for t, (mn, mx) in extremes:
if mn and mx:
regular.append((t, mn, mx))
elif mx:
fullsolves.append((t, mx))
elif mn:
nosolves.append((t, mn))
else:
# No rated problems in the contest, which means rating is not yet available for
# problems in this contest. Skip this data point.
pass
solvedcolor = 'tab:orange'
unsolvedcolor = 'tab:blue'
linecolor = '#00000022'
outlinecolor = '#00000022'
def scatter_outline(*args, **kwargs):
plt.scatter(*args, **kwargs)
kwargs['zorder'] -= 1
kwargs['color'] = outlinecolor
if kwargs['marker'] == '*':
kwargs['s'] *= 3
elif kwargs['marker'] == 's':
kwargs['s'] *= 1.5
else:
kwargs['s'] *= 2
if 'alpha' in kwargs:
del kwargs['alpha']
if 'label' in kwargs:
del kwargs['label']
plt.scatter(*args, **kwargs)
plt.clf()
time_scatter, plot_min, plot_max = zip(*regular)
if unsolved:
scatter_outline(time_scatter, plot_min, zorder=10,
s=14, marker='o', color=unsolvedcolor,
label='Easiest unsolved')
if solved:
scatter_outline(time_scatter, plot_max, zorder=10,
s=14, marker='o', color=solvedcolor,
label='Hardest solved')
ax = plt.gca()
if solved and unsolved:
for t, mn, mx in regular:
ax.add_line(mlines.Line2D((t, t), (mn, mx), color=linecolor))
if fullsolves:
scatter_outline(*zip(*fullsolves), zorder=15,
s=42, marker='*',
color=solvedcolor)
if nosolves:
scatter_outline(*zip(*nosolves), zorder=15,
s=32, marker='X',
color=unsolvedcolor)
if legend:
plt.legend(title=f'{handle}: {rating}', title_fontsize=plt.rcParams['legend.fontsize'],
loc='upper left').set_zorder(20)
gc.plot_rating_bg(cf.RATED_RANKS)
plt.gcf().autofmt_xdate()
def _plot_average(practice, bin_size, label: str = ''):
if len(practice) > bin_size:
sub_times, ratings = map(list, zip(*practice))
sub_timestamps = [sub_time.timestamp() for sub_time in sub_times]
mean_sub_timestamps = _running_mean(sub_timestamps, bin_size)
mean_sub_times = [dt.datetime.fromtimestamp(timestamp) for timestamp in mean_sub_timestamps]
mean_ratings = _running_mean(ratings, bin_size)
plt.plot(mean_sub_times,
mean_ratings,
linestyle='-',
marker='',
markerfacecolor='white',
markeredgewidth=0.5,
label=label)
class Graphs(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.converter = commands.MemberConverter()
@commands.group(brief='Graphs for analyzing Codeforces activity',
invoke_without_command=True)
async def plot(self, ctx):
"""Plot various graphs. Wherever Codeforces handles are accepted it is possible to
use a server member's name instead by prefixing it with '!',
for name with spaces use "!name with spaces" (with quotes)."""
await ctx.send_help('plot')
@plot.command(brief='Plot Codeforces rating graph', usage='[+zoom] [+peak] [handles...] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy]')
async def rating(self, ctx, *args: str):
"""Plots Codeforces rating graph for the handles provided."""
(zoom, peak), args = cf_common.filter_flags(args, ['+zoom' , '+peak'])
filt = cf_common.SubFilter()
args = filt.parse(args)
handles = args or ('!' + str(ctx.author),)
handles = await cf_common.resolve_handles(ctx, self.converter, handles)
resp = [await cf.user.rating(handle=handle) for handle in handles]
resp = [filt.filter_rating_changes(rating_changes) for rating_changes in resp]
if not any(resp):
handles_str = ', '.join(f'`{handle}`' for handle in handles)
if len(handles) == 1:
message = f'User {handles_str} is not rated'
else:
message = f'None of the given users {handles_str} are rated'
raise GraphCogError(message)
def max_prefix(user):
max_rate = 0
res = []
for data in user:
old_rating = data.oldRating
if old_rating == 0:
old_rating = 1500
if data.newRating - old_rating >= 0 and data.newRating >= max_rate:
max_rate = data.newRating
res.append(data)
return(res)
if peak:
resp = [max_prefix(user) for user in resp]
plt.clf()
plt.axes().set_prop_cycle(gc.rating_color_cycler)
_plot_rating(resp)
current_ratings = [rating_changes[-1].newRating if rating_changes else 'Unrated' for rating_changes in resp]
labels = [gc.StrWrap(f'{handle} ({rating})') for handle, rating in zip(handles, current_ratings)]
plt.legend(labels, loc='upper left')
if not zoom:
min_rating = 1100
max_rating = 1800
for rating_changes in resp:
for rating in rating_changes:
min_rating = min(min_rating, rating.newRating)
max_rating = max(max_rating, rating.newRating)
plt.ylim(min_rating - 100, max_rating + 200)
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title='Rating graph on Codeforces')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief='Plot Codeforces extremes graph',
usage='[handles] [+solved] [+unsolved] [+nolegend]')
async def extreme(self, ctx, *args: str):
"""Plots pairs of lowest rated unsolved problem and highest rated solved problem for every
contest that was rated for the given user.
"""
(solved, unsolved, nolegend), args = cf_common.filter_flags(args, ['+solved', '+unsolved', '+nolegend'])
legend, = cf_common.negate_flags(nolegend)
if not solved and not unsolved:
solved = unsolved = True
handles = args or ('!' + str(ctx.author),)
handle, = await cf_common.resolve_handles(ctx, self.converter, handles)
ratingchanges = await cf.user.rating(handle=handle)
if not ratingchanges:
raise GraphCogError(f'User {handle} is not rated')
contest_ids = [change.contestId for change in ratingchanges]
subs_by_contest_id = {contest_id: [] for contest_id in contest_ids}
for sub in await cf.user.status(handle=handle):
if sub.contestId in subs_by_contest_id:
subs_by_contest_id[sub.contestId].append(sub)
packed_contest_subs_problemset = [
(cf_common.cache2.contest_cache.get_contest(contest_id),
cf_common.cache2.problemset_cache.get_problemset(contest_id),
subs_by_contest_id[contest_id])
for contest_id in contest_ids
]
rating = max(ratingchanges, key=lambda change: change.ratingUpdateTimeSeconds).newRating
_plot_extreme(handle, rating, packed_contest_subs_problemset, solved, unsolved, legend)
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title='Codeforces extremes graph')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief="Show histogram of solved problems' rating on CF",
usage='[handles] [+practice] [+contest] [+virtual] [+outof] [+team] [+tag..] [r>=rating] [r<=rating] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy] [c+marker..] [i+index..]')
async def solved(self, ctx, *args: str):
"""Shows a histogram of solved problems' rating on Codeforces for the handles provided.
e.g. ;plot solved meooow +contest +virtual +outof +dp"""
filt = cf_common.SubFilter()
args = filt.parse(args)
handles = args or ('!' + str(ctx.author),)
handles = await cf_common.resolve_handles(ctx, self.converter, handles)
resp = [await cf.user.status(handle=handle) for handle in handles]
all_solved_subs = [filt.filter_subs(submissions) for submissions in resp]
if not any(all_solved_subs):
raise GraphCogError(f'There are no problems within the specified parameters.')
plt.clf()
plt.xlabel('Problem rating')
plt.ylabel('Number solved')
if len(handles) == 1:
# Display solved problem separately by type for a single user.
handle, solved_by_type = handles[0], _classify_submissions(all_solved_subs[0])
all_ratings = [[sub.problem.rating for sub in solved_by_type[sub_type]]
for sub_type in filt.types]
nice_names = nice_sub_type(filt.types)
labels = [name.format(len(ratings)) for name, ratings in zip(nice_names, all_ratings)]
step = 100
# shift the range to center the text
hist_bins = list(range(filt.rlo - step // 2, filt.rhi + step // 2 + 1, step))
plt.hist(all_ratings, stacked=True, bins=hist_bins, label=labels)
total = sum(map(len, all_ratings))
plt.legend(title=f'{handle}: {total}', title_fontsize=plt.rcParams['legend.fontsize'],
loc='upper right')
else:
all_ratings = [[sub.problem.rating for sub in solved_subs]
for solved_subs in all_solved_subs]
labels = [gc.StrWrap(f'{handle}: {len(ratings)}')
for handle, ratings in zip(handles, all_ratings)]
step = 200 if filt.rhi - filt.rlo > 3000 // len(handles) else 100
hist_bins = list(range(filt.rlo - step // 2, filt.rhi + step // 2 + 1, step))
plt.hist(all_ratings, bins=hist_bins)
plt.legend(labels, loc='upper right')
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title='Histogram of problems solved on Codeforces')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief='Show histogram of solved problems on CF over time',
usage='[handles] [+practice] [+contest] [+virtual] [+outof] [+team] [+tag..] [r>=rating] [r<=rating] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy] [phase_days=] [c+marker..] [i+index..]')
async def hist(self, ctx, *args: str):
"""Shows the histogram of problems solved on Codeforces over time for the handles provided"""
filt = cf_common.SubFilter()
args = filt.parse(args)
phase_days = 1
handles = []
for arg in args:
if arg[0:11] == 'phase_days=':
phase_days = int(arg[11:])
else:
handles.append(arg)
if phase_days < 1:
raise GraphCogError('Invalid parameters')
phase_time = dt.timedelta(days=phase_days)
handles = handles or ['!' + str(ctx.author)]
handles = await cf_common.resolve_handles(ctx, self.converter, handles)
resp = [await cf.user.status(handle=handle) for handle in handles]
all_solved_subs = [filt.filter_subs(submissions) for submissions in resp]
if not any(all_solved_subs):
raise GraphCogError(f'There are no problems within the specified parameters.')
plt.clf()
plt.xlabel('Time')
plt.ylabel('Number solved')
if len(handles) == 1:
handle, solved_by_type = handles[0], _classify_submissions(all_solved_subs[0])
all_times = [[dt.datetime.fromtimestamp(sub.creationTimeSeconds) for sub in solved_by_type[sub_type]]
for sub_type in filt.types]
nice_names = nice_sub_type(filt.types)
labels = [name.format(len(times)) for name, times in zip(nice_names, all_times)]
dlo = min(itertools.chain.from_iterable(all_times)).date()
dhi = min(dt.datetime.today() + dt.timedelta(days=1), dt.datetime.fromtimestamp(filt.dhi)).date()
phase_cnt = math.ceil((dhi - dlo) / phase_time)
plt.hist(
all_times,
stacked=True,
label=labels,
range=(dhi - phase_cnt * phase_time, dhi),
bins=min(40, phase_cnt))
total = sum(map(len, all_times))
plt.legend(title=f'{handle}: {total}', title_fontsize=plt.rcParams['legend.fontsize'])
else:
all_times = [[dt.datetime.fromtimestamp(sub.creationTimeSeconds) for sub in solved_subs]
for solved_subs in all_solved_subs]
# NOTE: matplotlib ignores labels that begin with _
# https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend
# Add zero-width space to work around this
labels = [gc.StrWrap(f'{handle}: {len(times)}')
for handle, times in zip(handles, all_times)]
dlo = min(itertools.chain.from_iterable(all_times)).date()
dhi = min(dt.datetime.today() + dt.timedelta(days=1), dt.datetime.fromtimestamp(filt.dhi)).date()
phase_cnt = math.ceil((dhi - dlo) / phase_time)
plt.hist(
all_times,
range=(dhi - phase_cnt * phase_time, dhi),
bins=min(40 // len(handles), phase_cnt))
plt.legend(labels)
# NOTE: In case of nested list, matplotlib decides type using 1st sublist,
# it assumes float when 1st sublist is empty.
# Hence explicitly assigning locator and formatter is must here.
locator = mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(locator)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
plt.gcf().autofmt_xdate()
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title='Histogram of number of solved problems over time')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief='Plot count of solved CF problems over time',
usage='[handles] [+practice] [+contest] [+virtual] [+outof] [+team] [+tag..] [r>=rating] [r<=rating] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy] [c+marker..] [i+index..]')
async def curve(self, ctx, *args: str):
"""Plots the count of problems solved over time on Codeforces for the handles provided."""
filt = cf_common.SubFilter()
args = filt.parse(args)
handles = args or ('!' + str(ctx.author),)
handles = await cf_common.resolve_handles(ctx, self.converter, handles)
resp = [await cf.user.status(handle=handle) for handle in handles]
all_solved_subs = [filt.filter_subs(submissions) for submissions in resp]
if not any(all_solved_subs):
raise GraphCogError(f'There are no problems within the specified parameters.')
plt.clf()
plt.xlabel('Time')
plt.ylabel('Cumulative solve count')
all_times = [[dt.datetime.fromtimestamp(sub.creationTimeSeconds) for sub in solved_subs]
for solved_subs in all_solved_subs]
for times in all_times:
cumulative_solve_count = list(range(1, len(times)+1)) + [len(times)]
timestretched = times + [min(dt.datetime.now(), dt.datetime.fromtimestamp(filt.dhi))]
plt.plot(timestretched, cumulative_solve_count)
labels = [gc.StrWrap(f'{handle}: {len(times)}')
for handle, times in zip(handles, all_times)]
plt.legend(labels)
plt.gcf().autofmt_xdate()
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title='Curve of number of solved problems over time')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief='Show history of problems solved by rating',
aliases=['chilli'], usage='[handle] [+practice] [+contest] [+virtual] [+outof] [+team] [+tag..] [r>=rating] [r<=rating] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy] [b=10] [s=3] [c+marker..] [i+index..] [+nolegend]')
async def scatter(self, ctx, *args):
"""Plot Codeforces rating overlaid on a scatter plot of problems solved.
Also plots a running average of ratings of problems solved in practice."""
(nolegend,), args = cf_common.filter_flags(args, ['+nolegend'])
legend, = cf_common.negate_flags(nolegend)
filt = cf_common.SubFilter()
args = filt.parse(args)
handle, bin_size, point_size = None, 10, 3
for arg in args:
if arg[0:2] == 'b=':
bin_size = int(arg[2:])
elif arg[0:2] == 's=':
point_size = int(arg[2:])
else:
if handle:
raise GraphCogError('Only one handle allowed.')
handle = arg
if bin_size < 1 or point_size < 1 or point_size > 100:
raise GraphCogError('Invalid parameters')
handle = handle or '!' + str(ctx.author)
handle, = await cf_common.resolve_handles(ctx, self.converter, (handle,))
rating_resp = [await cf.user.rating(handle=handle)]
rating_resp = [filt.filter_rating_changes(rating_changes) for rating_changes in rating_resp]
submissions = filt.filter_subs(await cf.user.status(handle=handle))
def extract_time_and_rating(submissions):
return [(dt.datetime.fromtimestamp(sub.creationTimeSeconds), sub.problem.rating)
for sub in submissions]
if not any(submissions):
raise GraphCogError(f'No submissions for user `{handle}`')
solved_by_type = _classify_submissions(submissions)
regular = extract_time_and_rating(solved_by_type['CONTESTANT'] +
solved_by_type['OUT_OF_COMPETITION'])
practice = extract_time_and_rating(solved_by_type['PRACTICE'])
virtual = extract_time_and_rating(solved_by_type['VIRTUAL'])
plt.clf()
_plot_scatter(regular, practice, virtual, point_size)
labels = []
if practice:
labels.append('Practice')
if regular:
labels.append('Regular')
if virtual:
labels.append('Virtual')
if legend:
plt.legend(labels, loc='upper left')
_plot_average(practice, bin_size)
_plot_rating(rating_resp, mark='')
# zoom
ymin, ymax = plt.gca().get_ylim()
plt.ylim(max(ymin, filt.rlo - 100), min(ymax, filt.rhi + 100))
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title=f'Rating vs solved problem rating for {handle}')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
async def _rating_hist(self, ctx, ratings, mode, binsize, title):
if mode not in ('log', 'normal'):
raise GraphCogError('Mode should be either `log` or `normal`')
ratings = [r for r in ratings if r >= 0]
assert ratings, 'Cannot histogram plot empty list of ratings'
assert 100%binsize == 0 # because bins is semi-hardcoded
bins = 39*100//binsize
colors = []
low, high = 0, binsize * bins
for rank in cf.RATED_RANKS:
for r in range(max(rank.low, low), min(rank.high, high), binsize):
colors.append('#' + '%06x' % rank.color_embed)
assert len(colors) == bins, f'Expected {bins} colors, got {len(colors)}'
height = [0] * bins
for r in ratings:
height[r // binsize] += 1
csum = 0
cent = [0]
users = sum(height)
for h in height:
csum += h
cent.append(round(100 * csum / users))
x = [k * binsize for k in range(bins)]
label = [f'{r} ({c})' for r,c in zip(x, cent)]
l,r = 0,bins-1
while not height[l]: l += 1
while not height[r]: r -= 1
x = x[l:r+1]
cent = cent[l:r+1]
label = label[l:r+1]
colors = colors[l:r+1]
height = height[l:r+1]
plt.clf()
fig = plt.figure(figsize=(15, 5))
plt.xticks(rotation=45)
plt.xlim(l * binsize - binsize//2, r * binsize + binsize//2)
plt.bar(x, height, binsize*0.9, color=colors, linewidth=0, tick_label=label, log=(mode == 'log'))
plt.xlabel('Rating')
plt.ylabel('Number of users')
discord_file = gc.get_current_figure_as_file()
plt.close(fig)
embed = discord_common.cf_color_embed(title=title)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief='Show server rating distribution')
async def distrib(self, ctx):
"""Plots rating distribution of users in this server"""
def in_purgatory(userid):
member = ctx.guild.get_member(int(userid))
return not member or 'Purgatory' in {role.name for role in member.roles}
res = cf_common.user_db.get_cf_users_for_guild(ctx.guild.id)
ratings = [cf_user.rating for user_id, cf_user in res
if cf_user.rating is not None and not in_purgatory(user_id)]
await self._rating_hist(ctx,
ratings,
'normal',
binsize=100,
title='Rating distribution of server members')
@plot.command(brief='Show Codeforces rating distribution', usage='[normal/log] [active/all] [contest_cutoff=5]')
async def cfdistrib(self, ctx, mode: str = 'log', activity = 'active', contest_cutoff: int = 5):
"""Plots rating distribution of either active or all users on Codeforces, in either normal or log scale.
Default mode is log, default activity is active (competed in last 90 days)
Default contest cutoff is 5 (competed at least five times overall)
"""
if activity not in ['active', 'all']:
raise GraphCogError('Activity should be either `active` or `all`')
time_cutoff = int(time.time()) - CONTEST_ACTIVE_TIME_CUTOFF if activity == 'active' else 0
handles = cf_common.cache2.rating_changes_cache.get_users_with_more_than_n_contests(time_cutoff, contest_cutoff)
if not handles:
raise GraphCogError('No Codeforces users meet the specified criteria')
ratings = [cf_common.cache2.rating_changes_cache.get_current_rating(handle) for handle in handles]
title = f'Rating distribution of {activity} Codeforces users ({mode} scale)'
await self._rating_hist(ctx,
ratings,
mode,
binsize=100,
title=title)
@plot.command(brief='Show percentile distribution on codeforces', usage='[+zoom] [+nomarker] [handles...] [+exact]')
async def centile(self, ctx, *args: str):
"""Show percentile distribution of codeforces and mark given handles in the plot. If +zoom and handles are given, it zooms to the neighborhood of the handles."""
(zoom, nomarker, exact), args = cf_common.filter_flags(args, ['+zoom', '+nomarker', '+exact'])
# Prepare data
intervals = [(rank.low, rank.high) for rank in cf.RATED_RANKS]
colors = [rank.color_graph for rank in cf.RATED_RANKS]
ratings = cf_common.cache2.rating_changes_cache.get_all_ratings()
ratings = np.array(sorted(ratings))
n = len(ratings)
perc = 100*np.arange(n)/n
users_to_mark = {}
if not nomarker:
handles = args or ('!' + str(ctx.author),)
handles = await cf_common.resolve_handles(ctx,
self.converter,
handles,
mincnt=0,
maxcnt=50)
infos = await cf.user.info(handles=list(set(handles)))
for info in infos:
if info.rating is None:
raise GraphCogError(f'User `{info.handle}` is not rated')
ix = bisect.bisect_left(ratings, info.rating)
cent = 100*ix/len(ratings)
users_to_mark[info.handle] = info.rating,cent
# Plot
plt.clf()
fig,ax = plt.subplots(1)
ax.plot(ratings, perc, color='#00000099')
plt.xlabel('Rating')
plt.ylabel('Percentile')
for pos in ['right','top','bottom','left']:
ax.spines[pos].set_visible(False)
ax.tick_params(axis='both', which='both',length=0)
# Color intervals by rank
for interval,color in zip(intervals,colors):
alpha = '99'
l,r = interval
col = color + alpha
rect = patches.Rectangle((l,-50), r-l, 200,
edgecolor='none',
facecolor=col)
ax.add_patch(rect)
if users_to_mark:
ymin = min(point[1] for point in users_to_mark.values())
ymax = max(point[1] for point in users_to_mark.values())
if zoom:
ymargin = max(0.5, (ymax - ymin) * 0.1)
ymin -= ymargin
ymax += ymargin
else:
ymin = min(-1.5, ymin - 8)
ymax = max(101.5, ymax + 8)
else:
ymin, ymax = -1.5, 101.5
if users_to_mark and zoom:
xmin = min(point[0] for point in users_to_mark.values())
xmax = max(point[0] for point in users_to_mark.values())
xmargin = max(20, (xmax - xmin) * 0.1)
xmin -= xmargin
xmax += xmargin
else:
xmin, xmax = ratings[0], ratings[-1]
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# Mark users in plot
for user, point in users_to_mark.items():
astr = f'{user} ({round(point[1], 2)})' if exact else user
apos = ('left', 'top') if point[0] <= (xmax + xmin) // 2 else ('right', 'bottom')
plt.annotate(astr,
xy=point,
xytext=(0, 0),
textcoords='offset points',
ha=apos[0],
va=apos[1])
plt.plot(*point,
marker='o',
markersize=5,
color='red',
markeredgecolor='darkred')
# Draw tick lines
linecolor = '#00000022'
inf = 10000
def horz_line(y):
l = mlines.Line2D([-inf,inf], [y,y], color=linecolor)
ax.add_line(l)
def vert_line(x):
l = mlines.Line2D([x,x], [-inf,inf], color=linecolor)
ax.add_line(l)
for y in ax.get_yticks():
horz_line(y)
for x in ax.get_xticks():
vert_line(x)
# Discord stuff
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title=f'Rating/percentile relationship')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief='Plot histogram of gudgiting')
async def howgud(self, ctx, *members: discord.Member):
members = members or (ctx.author,)
if len(members) > 5:
raise GraphCogError('Please specify at most 5 gudgitters.')
# shift the [-300, 300] gitgud range to center the text
hist_bins = list(range(-300 - 50, 300 + 50 + 1, 100))
deltas = [[x[0] for x in cf_common.user_db.howgud(member.id)] for member in members]
labels = [gc.StrWrap(f'{member.display_name}: {len(delta)}')
for member, delta in zip(members, deltas)]
plt.clf()
plt.margins(x=0)
plt.hist(deltas, bins=hist_bins, rwidth=1)
plt.xlabel('Problem delta')
plt.ylabel('Number solved')
plt.legend(labels, prop=gc.fontprop)
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title='Histogram of gudgitting')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief='Plot distribution of server members by country')
async def country(self, ctx, *countries):
"""Plots distribution of server members by countries. When no countries are specified, plots
a bar graph of all members by country. When one or more countries are specified, plots a
swarmplot of members by country and rating. Only members with registered handles and
countries set on Codeforces are considered.
"""
max_countries = 8
if len(countries) > max_countries:
raise GraphCogError(f'At most {max_countries} countries may be specified.')
users = cf_common.user_db.get_cf_users_for_guild(ctx.guild.id)
counter = collections.Counter(user.country for _, user in users if user.country)
if not countries:
# list because seaborn complains for tuple.
countries, counts = map(list, zip(*counter.most_common()))
plt.clf()
fig = plt.figure(figsize=(15, 5))
with sns.axes_style(rc={'xtick.bottom': True}):
sns.barplot(x=countries, y=counts)
# Show counts on top of bars.
ax = plt.gca()
for p in ax.patches:
x = p.get_x() + p.get_width() / 2
y = p.get_y() + p.get_height() + 0.5
ax.text(x, y, int(p.get_height()), horizontalalignment='center', color='#30304f',
fontsize='x-small')
plt.xticks(rotation=40, horizontalalignment='right')
ax.tick_params(axis='x', length=4, color=ax.spines['bottom'].get_edgecolor())
plt.xlabel('Country')
plt.ylabel('Number of members')
discord_file = gc.get_current_figure_as_file()
plt.close(fig)
embed = discord_common.cf_color_embed(title='Distribution of server members by country')
else:
countries = [country.title() for country in countries]
data = [[user.country, user.rating]
for _, user in users if user.rating and user.country and user.country in countries]
if not data:
raise GraphCogError('No rated members from the specified countries are present.')
color_map = {rating: f'#{cf.rating2rank(rating).color_embed:06x}' for _, rating in data}
df = pd.DataFrame(data, columns=['Country', 'Rating'])
column_order = sorted((country for country in countries if counter[country]),
key=counter.get, reverse=True)
plt.clf()
if len(column_order) <= 5:
sns.swarmplot(x='Country', y='Rating', hue='Rating', data=df, order=column_order,
palette=color_map)
else:
# Add ticks and rotate tick labels to avoid overlap.
with sns.axes_style(rc={'xtick.bottom': True}):
sns.swarmplot(x='Country', y='Rating', hue='Rating', data=df,
order=column_order, palette=color_map)
plt.xticks(rotation=30, horizontalalignment='right')
ax = plt.gca()
ax.tick_params(axis='x', color=ax.spines['bottom'].get_edgecolor())
plt.legend().remove()
plt.xlabel('Country')
plt.ylabel('Rating')
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title='Rating distribution of server members by '
'country')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief='Show rating changes by rank', usage='contest_id [+server] [+zoom] [handles..]')
async def visualrank(self, ctx, contest_id: int, *args: str):
"""Plot rating changes by rank. Add handles to specify a handle in the plot.
if arguments contains `+server`, it will include just server members and not all codeforces users.
Specify `+zoom` to zoom to the neighborhood of handles."""
args = set(args)
(in_server, zoom), handles = cf_common.filter_flags(args, ['+server', '+zoom'])
handles = await cf_common.resolve_handles(ctx, self.converter, handles, mincnt=0, maxcnt=20)
rating_changes = await cf.contest.ratingChanges(contest_id=contest_id)
if in_server:
guild_handles = set(handle for discord_id, handle
in cf_common.user_db.get_handles_for_guild(ctx.guild.id))
rating_changes = [rating_change for rating_change in rating_changes
if rating_change.handle in guild_handles or rating_change.handle in handles]
if not rating_changes:
raise GraphCogError(f'No rating changes for contest `{contest_id}`')
users_to_mark = {}
for rating_change in rating_changes:
user_delta = rating_change.newRating - rating_change.oldRating
if rating_change.handle in handles:
users_to_mark[rating_change.handle] = (rating_change.rank, user_delta)
ymargin = 50
xmargin = 50
if users_to_mark and zoom:
xmin = min(point[0] for point in users_to_mark.values())
xmax = max(point[0] for point in users_to_mark.values())
ymin = min(point[1] for point in users_to_mark.values())
ymax = max(point[1] for point in users_to_mark.values())
else:
ylim = 0
if users_to_mark:
ylim = max(abs(point[1]) for point in users_to_mark.values())
ylim = max(ylim, 200)
xmin = 0
xmax = max(rating_change.rank for rating_change in rating_changes)
ymin = -ylim
ymax = ylim
ranks = []
delta = []
color = []
for rating_change in rating_changes:
user_delta = rating_change.newRating - rating_change.oldRating
if (xmin - xmargin <= rating_change.rank <= xmax + xmargin
and ymin - ymargin <= user_delta <= ymax + ymargin):
ranks.append(rating_change.rank)
delta.append(user_delta)
color.append(cf.rating2rank(rating_change.oldRating).color_graph)
title = rating_changes[0].contestName
plt.clf()
fig = plt.figure(figsize=(12, 8))
plt.title(title)
plt.xlabel('Rank')
plt.ylabel('Rating Changes')
mark_size = 2e4 / len(ranks)
plt.xlim(xmin - xmargin, xmax + xmargin)
plt.ylim(ymin - ymargin, ymax + ymargin)
plt.scatter(ranks, delta, s=mark_size, c=color)
for handle, point in users_to_mark.items():
plt.annotate(handle,
xy=point,
xytext=(0, 0),
textcoords='offset points',
ha='left',
va='bottom',
fontsize='large')
plt.plot(*point,
marker='o',
markersize=5,
color='black')
discord_file = gc.get_current_figure_as_file()
plt.close(fig)
embed = discord_common.cf_color_embed(title=title)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief='Show speed of solving problems by rating',
usage='[handles...] [+contest] [+virtual] [+outof] [+scatter] [r>=rating] [r<=rating] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy] [s=3]')
async def speed(self, ctx, *args):
"""Plot average time spent on problems of particular rating during contest."""
(add_scatter,), args = cf_common.filter_flags(args, ['+scatter'])
filt = cf_common.SubFilter()
args = filt.parse(args)
if 'PRACTICE' in filt.types:
filt.types.remove('PRACTICE') # can't estimate time for practice submissions
handles, point_size = [], 3
for arg in args:
if arg[0:2] == 's=':
point_size = int(arg[2:])
else:
handles.append(arg)
handles = handles or ['!' + str(ctx.author)]
handles = await cf_common.resolve_handles(ctx, self.converter, handles)
resp = [await cf.user.status(handle=handle) for handle in handles]
all_solved_subs = [filt.filter_subs(submissions) for submissions in resp]
plt.clf()
plt.xlabel('Rating')
plt.ylabel('Minutes spent')
max_time = 0 # for ylim
for submissions in all_solved_subs:
scatter_points = [] # only matters if +scatter
solved_by_contest = collections.defaultdict(lambda: [])
for submission in submissions:
# [solve_time, problem rating, problem index] for each solved problem
solved_by_contest[submission.contestId].append([
submission.relativeTimeSeconds,
submission.problem.rating,
submission.problem.index
])
time_by_rating = collections.defaultdict(lambda: [])
for events in solved_by_contest.values():
events.sort()
solved_subproblems = dict()
last_ac_time = 0
for (current_ac_time, rating, problem_index) in events:
time_to_solve = current_ac_time - last_ac_time
last_ac_time = current_ac_time
# if there are subproblems, add total time for previous subproblems to current one
if len(problem_index) == 2 and problem_index[1].isdigit():
time_to_solve += solved_subproblems.get(problem_index[0], 0)
solved_subproblems[problem_index[0]] = time_to_solve
time_by_rating[rating].append(time_to_solve / 60) # in minutes
for rating in time_by_rating.keys():
times = time_by_rating[rating]
time_by_rating[rating] = sum(times) / len(times)
if add_scatter:
for t in times:
scatter_points.append([rating, t])
max_time = max(max_time, t)
xs = sorted(time_by_rating.keys())
ys = [time_by_rating[rating] for rating in xs]
max_time = max(max_time, max(ys, default=0))
plt.plot(xs, ys)
if add_scatter:
plt.scatter(*zip(*scatter_points), s=point_size)
labels = [gc.StrWrap(handle) for handle in handles]
plt.legend(labels)
plt.ylim(0, max_time + 5)
# make xticks divisible by 100
ticks = plt.gca().get_xticks()
base = ticks[1] - ticks[0]
plt.gca().get_xaxis().set_major_locator(MultipleLocator(base = max(base // 100 * 100, 100)))
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title='Plot of average time spent on a problem')
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@discord_common.send_error_if(GraphCogError, cf_common.ResolveHandleError,
cf_common.FilterError)
async def cog_command_error(self, ctx, error):
pass
def setup(bot):
bot.add_cog(Graphs(bot))
| 43.377049 | 224 | 0.595305 |
dfc637507ab2e5bec7cab6d8f871cdbe0f8df8d1 | 2,548 | py | Python | util/epidemic_model/sir_model.py | yuhsiangfu/network-spreading | eac6bc0d4c2de62dfa6563f40d4b36c522fab9b8 | [
"MIT"
] | 9 | 2018-09-04T12:31:17.000Z | 2021-01-12T12:33:02.000Z | util/epidemic_model/sir_model.py | yuhsiangfu/network-spreading | eac6bc0d4c2de62dfa6563f40d4b36c522fab9b8 | [
"MIT"
] | null | null | null | util/epidemic_model/sir_model.py | yuhsiangfu/network-spreading | eac6bc0d4c2de62dfa6563f40d4b36c522fab9b8 | [
"MIT"
] | 4 | 2018-09-04T06:47:53.000Z | 2021-01-12T12:32:19.000Z | """
SIR model, set version
@auth: Yu-Hsiang Fu
@date: 2014/10/02
@update: 2018/03/22
"""
# --------------------------------------------------------------------------------
# 1.Import modular
# --------------------------------------------------------------------------------
# import packages
import copy as c
import random as r
# --------------------------------------------------------------------------------
# 2.Define functions
# --------------------------------------------------------------------------------
def convert_susceptible_to_infected(g, susceptible_set, infected_set, rate_infection=0.1):
current_infected = set()
for ni in infected_set:
# for nb in r.shuffle(g.neighbors(ni)):
for nb in g.neighbors(ni):
if (r.random() < rate_infection) and (nb in susceptible_set):
current_infected.add(nb)
return current_infected
def convert_infected_to_recovered(infected_set, rate_recovery=1):
if rate_recovery == 1:
# Case 1: if rate_recovery == 1, then move all I nodes to R state
return infected_set
else:
# Case 2: if move I nodes to R state by rate_recovery
current_recovered = set()
for ni in infected_set:
if r.random() < rate_recovery:
current_recovered.add(ni)
return current_recovered
def spreading(g, initial_node, num_time_step=50, rate_infection=0.1, rate_recovery=1):
# SIR model sets
S = set(g.nodes())
I = set()
R = set()
# network-spreading simulation
num_node = c.copy(g.number_of_nodes())
spreading_result = {}
for t in range(0, num_time_step + 1):
if t == 0:
# Case 1: t == 0, initial nodes to I state
I = I | set(initial_node) # I = I + I(t=0)
S = S - I # S = S - I(t=0)
else:
# Case 2: t > 0, infect neighbors of I nodes
# I(t), infected neighbor nodes
I_t = convert_susceptible_to_infected(g, S, I, rate_infection)
# R(t), nodes from I state to R state
R_t = convert_infected_to_recovered(I, rate_recovery)
# update sets
R = R | R_t # R = R + R(t)
I = I | I_t # I = I + I(t)
I = I - R_t # I = I - R(t)
S = S - I_t # S = S - I(t)
# record current result: p(t) = R(t)/|V| or p(t) = 1 - S(t)/|V|
# spreading_result[t] = len(R) / num_node
spreading_result[t] = (1 - (len(S) / num_node))
return spreading_result
| 31.85 | 90 | 0.496075 |
da42e2b906f1df83d3d8fffb3f74bdfd7a380028 | 3,993 | py | Python | acrilib/acrilib/idioms/singleton.py | Acrisel/acrilib | 1a313aaf30e8a4c7732232313ec4859bd6604dc7 | [
"BSD-3-Clause"
] | null | null | null | acrilib/acrilib/idioms/singleton.py | Acrisel/acrilib | 1a313aaf30e8a4c7732232313ec4859bd6604dc7 | [
"BSD-3-Clause"
] | null | null | null | acrilib/acrilib/idioms/singleton.py | Acrisel/acrilib | 1a313aaf30e8a4c7732232313ec4859bd6604dc7 | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
##############################################################################
#
# Acrisel LTD
# Copyright (C) 2008- Acrisel (acrisel.com) . All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from threading import Lock
class SingletonType(type):
'''
Thread friendly Singleton construct
'''
__locker=Lock()
__instance={}
def __call__( self, name='', *args, **kwargs):
SingletonType.__locker.acquire()
try:
instance=self.__instance[name]
except KeyError:
instance=super(SingletonType, self).__call__(*args, **kwargs)
self.__instance[name]=instance
SingletonType.__locker.release()
return instance
class Singleton(metaclass=SingletonType):
pass
class NamedSingletonType(type):
'''
Thread friendly Singleton construct
'''
__locker=Lock()
__instance={}
def __call__( self, name='', *args, **kwargs):
NamedSingletonType.__locker.acquire()
try:
instance=self.__instance[name]
except KeyError:
instance=super(NamedSingletonType, self).__call__(*args, name=name, **kwargs)
self.__instance[name]=instance
NamedSingletonType.__locker.release()
return instance
class NamedSingleton(metaclass=NamedSingletonType):
pass
if __name__ == '__main__':
class SingTest(Singleton):
__data=None
def __init__(self, mydata=None, *args, **kwargs):
print('init', mydata, args, kwargs)
self.__data = mydata
def load(self, mydata=None):
if not self.__data:
self.__data = mydata
return self
def get(self):
return self.__data
s1=SingTest('S1', 55, a=23).load(1)
print('s1', s1.get())
s2=SingTest('S2')
print('s2', s2.get())
s2.load(2)
print('s2', s2.get())
s2.load(3)
print('s2', s2.get())
print('s1', s1.get())
class NamedSingTest(NamedSingleton):
__data=None
def __init__(self, mydata=None, name='', *args, **kwargs):
print('init', mydata, name, args, kwargs)
self.name=name
self.__data = mydata
def load(self, mydata=None):
if not self.__data:
self.__data = mydata
return self
def get(self):
return self.__data
s1=NamedSingTest('S1', 55, a=23).load(1)
print('s1', 'named', s1.name, s1.get())
s2=NamedSingTest('S2')
print('s2', 'named', s2.name, s2.get())
s2.load(2)
print('s2', 'named', s2.name, s2.get())
s2.load(3)
print('s2', 'named', s2.name, s2.get())
print('s1', 'named', s1.name, s1.get())
class Sequence(NamedSingleton):
step_id=0
def __init__(self, name=''):
self.name=name
def __call__(self,):
step_id=self.step_id
self.step_id += 1
return step_id
A=Sequence('A')
print(A.name, A())
print(A.name, A())
B=Sequence('B')
print(B.name, B())
| 27.537931 | 89 | 0.545705 |
72b5e2d2304cdcc8b8317f96f5c719b0f0dd3164 | 1,082 | py | Python | db_report.py | Build-Week-SpotifySong4/DataScience | 6a7a19dea55d3a75d674cb66a0a32060ca453a5c | [
"MIT"
] | 1 | 2020-03-02T18:05:16.000Z | 2020-03-02T18:05:16.000Z | db_report.py | Build-Week-SpotifySong4/DataScience | 6a7a19dea55d3a75d674cb66a0a32060ca453a5c | [
"MIT"
] | null | null | null | db_report.py | Build-Week-SpotifySong4/DataScience | 6a7a19dea55d3a75d674cb66a0a32060ca453a5c | [
"MIT"
] | 1 | 2020-03-03T22:44:45.000Z | 2020-03-03T22:44:45.000Z | # db_report.py
# from ml_component.server import db
from ml_component.server.models import Song
import sys
import os
import psycopg2
def local():
print("DB REPORT (local):")
songs = Song.query.all()
print(f"Song #: {len(songs)}")
print(f"Example track_id: {songs[0].track_id}")
def remote():
print("DB REPORT (remote):")
conn = psycopg2.connect(
dbname=os.getenv("remote_db_dbname"),
user=os.getenv("remote_db_user"),
password=os.getenv("remote_db_password"),
host=os.getenv("remote_db_host")
)
curs = conn.cursor()
song_count = "SELECT COUNT(*) FROM songs;"
curs.execute(song_count)
song_count = curs.fetchone()[0]
print(f"Song #: {song_count}")
# print(f"Example track_id: {songs[0].track_id}")
curs.close()
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Must have argument local or remote")
elif sys.argv[1] == "local":
local()
elif sys.argv[1] == "remote":
remote()
else:
print("Second argument must be local or remote.") | 25.761905 | 57 | 0.622921 |
6e4e362b5d520cf6ac0a21cb76030b256ae19c2a | 1,846 | py | Python | py_gcn/layers.py | Guo-lab/Graph | c4c5fbc8fb5d645c16da20351b9746019cf75aab | [
"MIT"
] | null | null | null | py_gcn/layers.py | Guo-lab/Graph | c4c5fbc8fb5d645c16da20351b9746019cf75aab | [
"MIT"
] | null | null | null | py_gcn/layers.py | Guo-lab/Graph | c4c5fbc8fb5d645c16da20351b9746019cf75aab | [
"MIT"
] | null | null | null | import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
#@ https://blog.csdn.net/Xiao_CangTian/article/details/116474734
class GraphConvolution(Module):
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
#* call
self.reset_parameters()
def reset_parameters(self):
# self.weight.size(1)是weightShape(in_features, out_features)的out_features
stdv = 1. / math.sqrt(self.weight.size(1))
# 均匀分布
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
# %特征矩阵feature为H 邻接矩阵adj为A 权重为W 则输出
# step1. 求 H 和 W 乘积 HW
# step2. 求 A 和 HW 乘积 AHW,
# 这里 A = D_A^-1 · (A+I) 并归一化,
# H = D_H^-1 · H
# %dimension
# adj 2708,2708 A
# features 2708,1433 H0
# labels 2708, 0~6
# 第一次gc后 2708,nhid
# 第二次gc后 2708,7 (7个类别)
def forward(self, input, adj):
# 2D 矩阵乘法 - input * self.weight (3D[with batch] torch.matmul)
support = torch.mm(input, self.weight)
# 矩阵乘法 sparse * dense OR dense * dense
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
# return Class Introduction
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')' | 31.288136 | 109 | 0.609426 |
8c226ba8dda426754be163ca5ba87bff7b7747b6 | 244 | py | Python | docs/conf.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 4 | 2018-08-29T02:51:38.000Z | 2021-11-16T11:36:11.000Z | docs/conf.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 1 | 2019-05-12T08:21:19.000Z | 2019-05-12T08:21:19.000Z | from docs_conf.conf import *
branch = 'latest'
master_doc = 'index'
linkcheck_ignore = [
'http://localhost',
]
intersphinx_mapping = {}
html_last_updated_fmt = '%d-%b-%y %H:%M'
def setup(app):
app.add_stylesheet("css/ribbon.css")
| 14.352941 | 40 | 0.672131 |
27e939403d0fc1879a43e21b56ef61edadaac9f6 | 213 | py | Python | beneficiaries/beneficiaries/doctype/beneficiary_logs/test_beneficiary_logs.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | null | null | null | beneficiaries/beneficiaries/doctype/beneficiary_logs/test_beneficiary_logs.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | null | null | null | beneficiaries/beneficiaries/doctype/beneficiary_logs/test_beneficiary_logs.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | 1 | 2021-08-31T18:47:58.000Z | 2021-08-31T18:47:58.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Baida and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestBeneficiarylogs(unittest.TestCase):
pass
| 19.363636 | 45 | 0.765258 |
d464c7c65d96afd9161c4a7651f75c4380570204 | 2,374 | py | Python | heat/common/netutils.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 265 | 2015-01-02T09:33:22.000Z | 2022-03-26T23:19:54.000Z | heat/common/netutils.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 8 | 2015-09-01T15:43:19.000Z | 2021-12-14T05:18:23.000Z | heat/common/netutils.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 295 | 2015-01-06T07:00:40.000Z | 2021-09-06T08:05:06.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import re
from heat.common.i18n import _
DNS_LABEL_MAX_LEN = 63
DNS_LABEL_REGEX = "[a-z0-9-]{1,%d}$" % DNS_LABEL_MAX_LEN
FQDN_MAX_LEN = 255
def is_prefix_subset(orig_prefixes, new_prefixes):
"""Check whether orig_prefixes is subset of new_prefixes.
This takes valid prefix lists for orig_prefixes and new_prefixes,
returns 'True', if orig_prefixes is subset of new_prefixes.
"""
orig_set = netaddr.IPSet(orig_prefixes)
new_set = netaddr.IPSet(new_prefixes)
return orig_set.issubset(new_set)
def validate_dns_format(data):
if not data:
return
trimmed = data if not data.endswith('.') else data[:-1]
if len(trimmed) > FQDN_MAX_LEN:
raise ValueError(
_("'%(data)s' exceeds the %(max_len)s character FQDN limit") % {
'data': trimmed,
'max_len': FQDN_MAX_LEN})
names = trimmed.split('.')
for name in names:
if not name:
raise ValueError(_("Encountered an empty component."))
if name.endswith('-') or name.startswith('-'):
raise ValueError(
_("Name '%s' must not start or end with a hyphen.") % name)
if not re.match(DNS_LABEL_REGEX, name):
raise ValueError(
_("Name '%(name)s' must be 1-%(max_len)s characters long, "
"each of which can only be alphanumeric or "
"a hyphen.") % {'name': name,
'max_len': DNS_LABEL_MAX_LEN})
# RFC 1123 hints that a Top Level Domain(TLD) can't be all numeric.
# Last part is a TLD, if it's a FQDN.
if (data.endswith('.') and len(names) > 1
and re.match("^[0-9]+$", names[-1])):
raise ValueError(_("TLD '%s' must not be all numeric.") % names[-1])
| 37.68254 | 78 | 0.631003 |
4db85412e1f67a217996e58a112c7faf7cc26a1c | 2,531 | py | Python | tita_do.py | KomeilParseh/TITA-DO | 714685fa18bfd2ef07f5c0d656927039b05d7997 | [
"MIT"
] | 9 | 2020-08-27T10:10:11.000Z | 2021-04-21T04:46:15.000Z | tita_do.py | mdk1384/TITA-DO-1 | 714685fa18bfd2ef07f5c0d656927039b05d7997 | [
"MIT"
] | 2 | 2020-08-27T12:09:57.000Z | 2021-01-05T09:29:19.000Z | tita_do.py | mdk1384/TITA-DO-1 | 714685fa18bfd2ef07f5c0d656927039b05d7997 | [
"MIT"
] | 2 | 2020-08-27T10:10:18.000Z | 2021-01-01T06:20:20.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TITA-DO MIT licance.
"""
#import pickle
from time import sleep
from sys import exit
from models.ask_name import ask_name
from models.enigma import Enigma_starter
print("\n\t :) You Todo with TITA-DO (:")
print(" TITA-DO =.|.= Hyd_Prime version")
print("\n\n")
if ask_name() == False:
print("\n\n\t***TITA-DO Ver Hyd 1 ***")
exit()
def write_file(data, name):
'''write data for TITA.DO'''
file_tita = open(name, 'a')
file_tita.writelines(data)
file_tita.close()
return data
def load_file():
'''load data in TITA.DO'''
file_tita = open('Temp.txt', 'r')
file_tita.readline()
file_tita.close()
return data
#Hi!
data_base = {}
NUMBER_TODO = 1
data = write_file(str(input("==> pleas enter your todo:")), "Temp.txt")
data_base[load_file()] = "==>", NUMBER_TODO
NUMBER_TODO += 1
if load_file() == 'parseh':
print("\n Hi Admin! \n")
#it's operator case
OPERATOR_CASE = str(input("If you want to continue? 1.Yes 2.No = "))
while OPERATOR_CASE != "1" and OPERATOR_CASE != "2":
print("Your answer not found in program")
print("Enter correct answer please")
sleep(0.25)
OPERATOR_CASE = str(input("If you want to continue? 1.Yes 2.No = "))
if OPERATOR_CASE == "1" or OPERATOR_CASE == "2":
break
#پاسخ بله
while OPERATOR_CASE == "1":
try:
print("-> You Todo with TITA-DO! <-")
except:
print("Answer Error")
print("Your answer is not found in program")
print("Enter correct answer please")
sleep(0.5)
print("Your answer not found in program")
print("Enter correct answer please")
data = write_file(input(str("==>pleas enter your todo:")), "Temp.txt")
data_base[load_file()] = "======>", NUMBER_TODO
NUMBER_TODO += 1
OPERATOR_CASE = str(input("If you want to continue? 1.Yes 2.No = "))
while OPERATOR_CASE != "1" and OPERATOR_CASE != "2":
print("Your answer not found in program")
print("Enter correct answer please")
sleep(0.5)
OPERATOR_CASE = str(input("If you want to continue? 1.Yes 2.No = "))
if OPERATOR_CASE == "1" or OPERATOR_CASE == "2":
break
if OPERATOR_CASE == "2":
break
#پاسخ خیر
if OPERATOR_CASE == "2":
print("")
a = write_file(data_base, "tita.do")
print(a)
print(" \n Thanks for using this program ")
print(" Goodbye ")
print("author", "==> TITA-DO <==", "Komeil Parseh")
#تمام شد!
| 26.364583 | 76 | 0.604109 |
1510816be3b8e9ba49df738882724e1fdcaa84e1 | 1,841 | py | Python | baby.py | kusuwada/baby-language | ded4cb1cc2cdb9db21a15cab01e72cd7dac80d59 | [
"MIT"
] | 3 | 2020-02-10T06:22:03.000Z | 2020-02-10T10:38:14.000Z | baby.py | kusuwada/baby-language | ded4cb1cc2cdb9db21a15cab01e72cd7dac80d59 | [
"MIT"
] | null | null | null | baby.py | kusuwada/baby-language | ded4cb1cc2cdb9db21a15cab01e72cd7dac80d59 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import yaml
import morse
class Baby():
lang_type = 'English'
baby_map = {}
morse = morse.Morse()
def __init__(self):
with open('baby.yml', 'r') as f:
self.baby_map = yaml.safe_load(f)
def dec(self, baby):
morse = ''
for m in baby.split(' '):
while m != '':
if m.startswith(self.baby_map[self.lang_type]['first']):
morse += '.'
m = m[len(self.baby_map[self.lang_type]['first']):]
elif m.startswith(self.baby_map[self.lang_type]['second']):
morse += '-'
m = m[len(self.baby_map[self.lang_type]['second']):]
elif m.startswith(self.baby_map[self.lang_type]['newline']):
morse += '\n'
m = m[len(self.baby_map[self.lang_type]['newline']):]
else:
morse += m # for invalid input
m = ''
if not morse.endswith('\n'):
morse += ' '
message = self.morse.dec(morse)
return message
def enc(self, message):
baby = ''
morse = self.morse.enc(message)
for m in morse:
if m == '.':
baby += self.baby_map[self.lang_type]['first']
elif m == '-':
baby += self.baby_map[self.lang_type]['second']
elif m == '\n':
baby += self.baby_map[self.lang_type]['newline']
baby += ' '
elif m == ' ':
baby += ' '
elif m == '*': # for error morse code
baby += ''
else:
baby += '*' + m + '*'
return baby
if __name__ == '__main__':
baby = Baby()
m_mode = input('input mode (d: decode, e: encode) > ')
if m_mode == 'd' or m_mode == 'D' or m_mode == 'decode':
m_mode = 'd'
elif m_mode == 'e' or m_mode == 'E' or m_mode == 'encode':
m_mode = 'e'
else:
raise Exception('[ERRROR] invalid mode input!')
while(True):
source = input('input your message > ')
if m_mode == 'd':
result = baby.dec(source)
elif m_mode == 'e':
result = baby.enc(source)
print(result) | 25.569444 | 64 | 0.575774 |
ba6f8521da72a82aed597fd49811bc1e3c54b365 | 10,790 | py | Python | samples/searchconsole/sc3.py | a1043332/sc_api | 5472924f7f16c8d97b31a8bbda02d9a09abf044b | [
"Apache-2.0"
] | null | null | null | samples/searchconsole/sc3.py | a1043332/sc_api | 5472924f7f16c8d97b31a8bbda02d9a09abf044b | [
"Apache-2.0"
] | null | null | null | samples/searchconsole/sc3.py | a1043332/sc_api | 5472924f7f16c8d97b31a8bbda02d9a09abf044b | [
"Apache-2.0"
] | null | null | null | #copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example for using the Google Search Analytics API (part of Search Console API).
A basic python command-line example that uses the searchAnalytics.query method
of the Google Search Console API. This example demonstrates how to query Google
search results data for your property. Learn more at
https://developers.google.com/webmaster-tools/
To use:
1) Install the Google Python client library, as shown at https://developers.google.com/webmaster-tools/v3/libraries.
2) Sign up for a new project in the Google APIs console at https://code.google.com/apis/console.
3) Register the project to use OAuth2.0 for installed applications.
4) Copy your client ID, client secret, and redirect URL into the client_secrets.json file included in this package.
5) Run the app in the command-line as shown below.
Sample usage:
$ python search_analytics_api_sample.py 'https://www.example.com/' '2015-05-01' '2015-05-30'
"""
from __future__ import print_function
import argparse
import sys
from googleapiclient import sample_tools
allsql=''
allsql2=''
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('property_uri', type=str,
help=('Site or app URI to query data for (including '
'trailing slash).'))
argparser.add_argument('start_date', type=str,
help=('Start date of the requested date range in '
'YYYY-MM-DD format.'))
argparser.add_argument('end_date', type=str,
help=('End date of the requested date range in '
'YYYY-MM-DD format.'))
def main(argv):
global allsql,allsql2
service, flags = sample_tools.init(
argv, 'webmasters', 'v3', __doc__, __file__, parents=[argparser],
scope='https://www.googleapis.com/auth/webmasters.readonly')
# First run a query to learn which dates we have data for. You should always
# check which days in a date range have data before running your main query.
# This query shows data for the entire range, grouped and sorted by day,
# descending; any days without data will be missing from the results.
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['date']
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'Available dates')
# Get totals for the date range.
request = {
'startDate': flags.start_date,
'endDate': flags.end_date
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'Totals')
# Get top 10 queries for the date range, sorted by click count, descending.
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['query'],
'rowLimit': 10
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'Top Queries')
with open("test.txt","w",encoding="utf-8") as f:
print("[", file = f)
print(response, file = f)
f.close()
#2 §âspone¦¨sql¡A¨ÿé
sql = 'insert into query (SCKey,Click,Impressions,CTR,Position) values'
count =0
for x in response['rows']:
count+=1
sql+='(\'' + x['keys'][0] + '\',' + str(x['impressions']) + ',' + str(x['clicks']) + ',' + str(x['ctr']) +',' + str(x['position']) +'),'
sql = sql[:-1]
sql+=';'
print(sql)
with open("qqq.db","w",encoding="utf-8") as f:
print(sql, file = f)
allsql += sql
allsql2 += sql
f.close()
# Get top 11-20 mobile queries for the date range, sorted by click count, descending.
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['query'],
'dimensionFilterGroups': [{
'filters': [{
'dimension': 'device',
'expression': 'mobile'
}]
}],
'rowLimit': 10,
'startRow': 10
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'Top 11-20 Mobile Queries')
# Get top 10 pages for the date range, sorted by click count, descending.
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['page'],
'rowLimit': 10
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'Top Pages')
#2 §âspone¦¨sql¡A¨ÿé
sql = 'insert into page (SCKey,Click,Impressions,CTR,Position) values'
count =0
for x in response['rows']:
count+=1
sql+='(\'' + x['keys'][0] + '\',' + str(x['impressions']) + ',' + str(x['clicks']) + ',' + str(x['ctr']) +',' + str(x['position']) +'),'
sql = sql[:-1]
sql+=';'
print(sql)
with open("qqq.db","a",encoding="utf-8") as f:
print(sql, file = f)
allsql += sql
f.close()
# Get the top 10 queries in India, sorted by click count, descending.
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['query'],
'dimensionFilterGroups': [{
'filters': [{
'dimension': 'country',
'expression': 'ind'
}]
}],
'rowLimit': 10
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'Top queries in India')
# Group by both country and device.
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['country', 'device'],
'rowLimit': 10
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'Group by country and device')
# Group by total number of Search Appearance count.
# Note: It is not possible to use searchAppearance with other
# dimensions.
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['searchAppearance'],
'rowLimit': 10
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'Search Appearance Features')
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['country'],
'rowLimit': 10
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'country')
#2 §âspone¦¨sql¡A¨ÿé
sql = 'insert into country (SCKey,Click,Impressions,CTR,Position) values'
count =0
for x in response['rows']:
count+=1
sql+='(\'' + x['keys'][0] + '\',' + str(x['impressions']) + ',' + str(x['clicks']) + ',' + str(x['ctr']) +',' + str(x['position']) +'),'
sql = sql[:-1]
sql+=';'
print(sql)
with open("qqq.db","a",encoding="utf-8") as f:
print(sql, file = f)
allsql += sql
f.close()
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['device'],
'rowLimit': 10
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'Group by country and device')
#2 §âspone¦¨sql¡A¨ÿé
sql = 'insert into device (SCKey,Click,Impressions,CTR,Position) values'
count =0
for x in response['rows']:
count+=1
sql+='(\'' + x['keys'][0] + '\',' + str(x['impressions']) + ',' + str(x['clicks']) + ',' + str(x['ctr']) +',' + str(x['position']) +'),'
sql = sql[:-1]
sql+=';'
print(sql)
with open("qqq.db","a",encoding="utf-8") as f:
print(sql, file = f)
allsql += sql
f.close()
request = {
'startDate': flags.start_date,
'endDate': flags.end_date,
'dimensions': ['searchAppearance'],
'rowLimit': 10
}
response = execute_request(service, flags.property_uri, request)
print_table(response, 'searchAppearance')
#2 §âspone¦¨sql¡A¨ÿé
count =0
if 'rows' in response:
sql = 'insert into searchAppearance (SCKey,Click,Impressions,CTR,Position) values'
for x in response['rows']:
count+=1
sql+='(\'' + x['keys'][0] + '\',' + str(x['impressions']) + ',' + str(x['clicks']) + ',' + str(x['ctr']) +',' + str(x['position']) +'),'
sql = sql[:-1]
sql+=';'
print(sql)
with open("qqq.db","a",encoding="utf-8") as f:
print(sql, file = f)
allsql += sql
f.close()
def execute_request(service, property_uri, request):
"""Executes a searchAnalytics.query request.
Args:
service: The webmasters service to use when executing the query.
property_uri: The site or app URI to request data for.
request: The request to be executed.
Returns:
An array of response rows.
"""
return service.searchanalytics().query(
siteUrl=property_uri, body=request).execute()
def print_table(response, title):
"""Prints out a response table.
Each row contains key(s), clicks, impressions, CTR, and average position.
Args:
response: The server response to be printed as a table.
title: The title of the table.
"""
print('\n --' + title + ':')
if 'rows' not in response:
print('Empty response')
return
rows = response['rows']
row_format = '{:<20}' + '{:>20}' * 4
print(row_format.format('Keys', 'Clicks', 'Impressions', 'CTR', 'Position'))
for row in rows:
keys = ''
# Keys are returned only if one or more dimensions are requested.
if 'keys' in row:
keys = u','.join(row['keys']).encode('utf-8').decode()
print(row_format.format(
keys, row['clicks'], row['impressions'], row['ctr'], row['position']))
if __name__ == '__main__':
main(sys.argv)
import mysql.connector
from mysql.connector import Error
try:
connection = mysql.connector.connect(
host='localhost',
database='SC',
user='hdd',
password='hdd')
if connection.is_connected():
db_Info = connection.get_server_info()
print("version", db_Info)
cursor = connection.cursor()
sql= allsql2
cursor.execute(sql)
connection.commit()
except Error as e:
print("error", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print("closed")
| 30.480226 | 144 | 0.624467 |
a6a3417600b5aae00c45f91deb27a7c002e7748a | 13,134 | py | Python | cube/generic_networks/ner.py | victor-armegioiu/NLP-Cube | f19ee754e14b400adaabe2fa4ec084395a44e66d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cube/generic_networks/ner.py | victor-armegioiu/NLP-Cube | f19ee754e14b400adaabe2fa4ec084395a44e66d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cube/generic_networks/ner.py | victor-armegioiu/NLP-Cube | f19ee754e14b400adaabe2fa4ec084395a44e66d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
# Author: Tiberiu Boros
#
# Copyright (c) 2018 Adobe Systems Incorporated. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dynet as dy
import numpy as np
def get_link(seq, iSrc, iDst):
l1 = seq[iSrc].label
l2 = seq[iDst].label
if iSrc == 0 and l2 != '*':
return 1
if iDst == 0 and l1 != '*':
return 1
if l1 == "*" or l2 == "*":
return 0
pp_l1 = l1.split(';')
pp_l2 = l2.split(';')
for l1 in pp_l1:
for l2 in pp_l2:
ppl1 = l1.split(':')[0]
ppl2 = l2.split(':')[0]
if ppl1 == ppl2:
return 1
return 0
def _has_index(index, label):
parts = label.split(";")
for part in parts:
pp = part.split(":")
if pp[0] == str(index):
return True
return False
class GDBNer:
def __init__(self, config, encodings, embeddings, runtime=False):
self.model = dy.Model()
self.config = config
self.encodings = encodings
self.embeddings = embeddings
self.trainer = dy.AdamTrainer(self.model, alpha=2e-3, beta_1=0.9, beta_2=0.9)
self.word_lookup = self.model.add_lookup_parameters((len(self.encodings.word2int), self.config.embeddings_size))
self.upos_lookup = self.model.add_lookup_parameters((len(self.encodings.upos2int), self.config.embeddings_size))
self.xpos_lookup = self.model.add_lookup_parameters((len(self.encodings.xpos2int), self.config.embeddings_size))
self.attrs_lookup = self.model.add_lookup_parameters(
(len(self.encodings.attrs2int), self.config.embeddings_size))
if self.config.use_char_embeddings:
from cube.generic_networks.character_embeddings import CharacterNetwork
self.character_network = CharacterNetwork(self.config.embeddings_size, encodings,
rnn_size=self.config.char_rnn_size,
rnn_layers=self.config.char_rnn_layers,
embeddings_size=self.config.embeddings_size, model=self.model,
runtime=runtime)
self.we_proj = self.model.add_parameters(
(self.config.embeddings_size, self.embeddings.word_embeddings_size))
self.encoder_fw = []
self.encoder_bw = []
lstm_builder = dy.VanillaLSTMBuilder
if not runtime:
from cube.generic_networks.utils import orthonormal_VanillaLSTMBuilder
lstm_builder = orthonormal_VanillaLSTMBuilder
input_size = self.config.embeddings_size
for layer_size in self.config.arc_rnn_layers:
self.encoder_fw.append(lstm_builder(1, input_size, layer_size, self.model))
self.encoder_bw.append(lstm_builder(1, input_size, layer_size, self.model))
input_size = layer_size * 2
self.link_w = self.model.add_parameters((1, self.config.proj_size * 2))
self.link_b = self.model.add_parameters((1))
self.label_decoder = lstm_builder(1, self.config.proj_size, self.config.label_rnn_size, self.model)
self.proj_w1 = self.model.add_parameters((self.config.proj_size, input_size))
self.proj_w2 = self.model.add_parameters((self.config.proj_size, input_size))
self.proj_w3 = self.model.add_parameters((self.config.proj_size, input_size))
self.proj_b1 = self.model.add_parameters((self.config.proj_size))
self.proj_b2 = self.model.add_parameters((self.config.proj_size))
self.proj_b3 = self.model.add_parameters((self.config.proj_size))
self.label_w = self.model.add_parameters((len(self.encodings.label2int), self.config.label_rnn_size))
self.label_b = self.model.add_parameters((len(self.encodings.label2int)))
self.losses = []
def start_batch(self):
self.losses = []
dy.renew_cg()
def end_batch(self):
total_loss = 0
if len(self.losses) > 0:
loss = dy.esum(self.losses)
total_loss = loss.value()
loss.backward()
self.trainer.update()
return total_loss
def _make_input(self, seq, runtime=False):
seq_input = []
zero_vec_tag = dy.inputVector([0 for i in range(self.config.embeddings_size)])
for entry in seq:
word = entry.word
upos = entry.upos
xpos = entry.xpos
attrs = entry.attrs
word_vec, found = self.embeddings.get_word_embeddings(word)
if not found:
word_vec, found = self.embeddings.get_word_embeddings('</s>')
word_vector = self.we_proj.expr(update=True) * dy.inputVector(word_vec)
tag_mult = 1.0
if upos in self.encodings.upos2int:
upos_vec = self.upos_lookup[self.encodings.upos2int[upos]]
else:
upos_vec = zero_vec_tag
tag_mult += 1.0
if xpos in self.encodings.xpos2int:
xpos_vec = self.xpos_lookup[self.encodings.xpos2int[xpos]]
else:
xpos_vec = zero_vec_tag
tag_mult += 1.0
if attrs is self.encodings.attrs2int:
attrs_vec = self.attrs_lookup[self.encodings.attrs2int[attrs]]
else:
attrs_vec = zero_vec_tag
tag_mult += 1.0
tag_vector = (upos_vec + xpos_vec + attrs_vec) * dy.scalarInput(tag_mult)
if self.config.use_char_embeddings:
char_vector, states = self.character_network.compute_embeddings(word, runtime=runtime)
else:
char_vector = zero_vec_tag
p1 = np.random.random()
p2 = np.random.random()
p3 = np.random.random()
if not self.config.use_char_embeddings:
p3 = 1.0
scale = 1.0
if not runtime:
if p1 < 0.34:
word_vector = zero_vec_tag
scale += 1.0
if p2 < 0.34:
tag_vector = zero_vec_tag
scale += 1.0
if p3 < 0.34:
char_vector = zero_vec_tag
scale += 1.0
seq_input.append((word_vector + tag_vector + char_vector) * dy.scalarInput(scale))
return seq_input
def save(self, path):
self.model.save(path)
def load(self, path):
self.model.populate(path)
def tag(self, seq):
dy.renew_cg()
seq = [CUPTEntry(0, '<ROOT>', '<ROOT>', '<ROOT>', '<ROOT>', '<ROOT>', '<ROOT>', '*', '<ROOT>',
'<ROOT>')] + seq # append root
output, proj_x = self._predict(seq, runtime=True)
return self._decode(output, proj_x)
def _predict(self, seq, runtime=True):
x_list = self._make_input(seq, runtime=runtime)
for fw, bw in zip(self.encoder_fw, self.encoder_bw):
x_fw = fw.initial_state().transduce(x_list)
x_bw = list(reversed(bw.initial_state().transduce(reversed(x_list))))
x_list = [dy.concatenate([x1, x2]) for x1, x2 in zip(x_fw, x_bw)]
proj_x1 = [dy.tanh(self.proj_w1.expr(update=True) * x + self.proj_b1.expr(update=True)) for x in x_list]
proj_x2 = [dy.tanh(self.proj_w2.expr(update=True) * x + self.proj_b2.expr(update=True)) for x in x_list]
proj_x3 = [dy.tanh(self.proj_w3.expr(update=True) * x + self.proj_b3.expr(update=True)) for x in x_list]
output = []
for iSrc in range(len(seq)):
out_row = []
for iDst in range(len(seq)):
if iDst > iSrc:
x = dy.concatenate([proj_x1[iSrc], proj_x2[iDst]])
out_row.append(dy.logistic(self.link_w.expr(update=True) * x + self.link_b.expr(update=True)))
else:
out_row.append(None)
output.append(out_row)
return output, proj_x3
def _get_gs_chains(self, seq):
indices = []
for row in seq:
if row.label != "*":
parts = row.label.split(";")
for part in parts:
pp = part.split(":")
expr_index = int(pp[0])
if expr_index not in indices:
indices.append(expr_index)
chains = []
labels = []
for index in indices:
first = True
lst = []
label = ""
i = 0
for row in seq:
if _has_index(index, row.label):
if first:
first = False
parts = row.label.split(";")
for part in parts:
pp = part.split(":")
if len(pp) == 1:
print(str(row.index) + "\t" + row.word + "\t" + row.label)
if pp[0] == str(index):
label = pp[1]
break
lst.append(i)
i += 1
if label == "":
for row in seq:
print (row.orig_line)
chains.append(lst)
labels.append(label)
return chains, labels
def _valid(self, a, current_nodes, node):
for other_node in current_nodes:
if a[node, other_node] == 0:
return False
return True
def _backtrack(self, a, current_nodes, solutions):
recursed = False
for i in range(a.shape[0]):
if a[current_nodes[-1], i] == 1:
if i not in current_nodes:
if self._valid(a, current_nodes, i):
current_nodes.append(i) # push
recursed = True
self._backtrack(a, current_nodes, solutions)
current_nodes = current_nodes[:-1] # pop
if not recursed and len(current_nodes) > 1:
import copy
solutions.append(copy.deepcopy(current_nodes))
def learn(self, seq):
output, proj_x3 = self._predict(seq, runtime=False)
# arcs
for iSrc in range(len(seq)):
for iDst in range(len(seq)):
if iDst > iSrc:
o = output[iSrc][iDst] # the softmax portion
t = get_link(seq, iSrc, iDst)
# if t==1:
# self.losses.append(-dy.log(dy.pick(o, t)))
self.losses.append(dy.binary_log_loss(o, dy.scalarInput(t)))
# labels
gs_chains, labels = self._get_gs_chains(seq)
for chain, label in zip(gs_chains, labels):
label_rnn = self.label_decoder.initial_state()
for index in chain:
label_rnn = label_rnn.add_input(proj_x3[index])
label_softmax = dy.softmax(
self.label_w.expr(update=True) * label_rnn.output() + self.label_b.expr(update=True))
self.losses.append(-dy.log(dy.pick(label_softmax, self.encodings.label2int[label])))
def _decode(self, output, proj_x):
expressions = []
labels = []
a = np.zeros((len(output), len(output)))
for iSrc in range(len(output)):
for iDst in range(len(output)):
if iDst > iSrc:
if output[iSrc][iDst].value() >= 0.5:
a[iSrc][iDst] = 1
a[iDst][iSrc] = 1
for iSrc in range(len(output)):
exprs = []
current_nodes = [iSrc]
self._backtrack(a, current_nodes, exprs)
[expr.sort() for expr in exprs]
# check for duplicates
for expr in exprs:
valid = True
for e_expr in expressions:
if e_expr == expr:
valid = False
break
if valid:
expressions.append(expr)
for expression in expressions:
lstm_label = self.label_decoder.initial_state()
for index in expression:
lstm_label = lstm_label.add_input(proj_x[index])
label_soft = self.label_w.expr(update=True) * lstm_label.output() + self.label_b.expr(update=True)
label_index = np.argmax(label_soft.npvalue())
labels.append(self.encodings.labels[label_index])
return expressions, labels
| 37.959538 | 120 | 0.551241 |
89a529b9a3e25587a11feaa73e5ce9883fa6b6b9 | 6,327 | py | Python | tests/test_api.py | irishgordo/newrelic-lambda-cli | 94039961ee2ca28ada1c4e78eba50a5ba3e84597 | [
"Apache-2.0"
] | 29 | 2019-11-21T18:39:02.000Z | 2022-02-11T19:28:25.000Z | tests/test_api.py | irishgordo/newrelic-lambda-cli | 94039961ee2ca28ada1c4e78eba50a5ba3e84597 | [
"Apache-2.0"
] | 118 | 2019-11-21T18:32:00.000Z | 2022-03-31T23:58:31.000Z | tests/test_api.py | irishgordo/newrelic-lambda-cli | 94039961ee2ca28ada1c4e78eba50a5ba3e84597 | [
"Apache-2.0"
] | 28 | 2019-11-21T18:02:38.000Z | 2022-02-26T10:08:38.000Z | from unittest.mock import Mock
from newrelic_lambda_cli.api import (
create_integration_account,
enable_lambda_integration,
NewRelicGQL,
)
from .conftest import integration_install
def test_create_integration_account():
mock_gql = NewRelicGQL("123456789", "foobar")
mock_gql.query = Mock(
return_value={
"actor": {
"account": {
"cloud": {
"linkedAccounts": [
{
"authLabel": "arn:aws:iam::123456789:role/FooBar",
"externalId": "123456789",
"name": "Foo Bar",
}
]
}
}
}
}
)
input = integration_install(nr_account_id=123456789, linked_account_name="Foo Bar")
role = {"Role": {"Arn": "arn:aws:iam::123456789:role/FooBar"}}
assert create_integration_account(mock_gql, input, role) == {
"authLabel": "arn:aws:iam::123456789:role/FooBar",
"externalId": "123456789",
"name": "Foo Bar",
}
mock_gql.query = Mock(
side_effect=(
{"actor": {"account": {"cloud": {"linkedAccounts": []}}}},
{
"cloudLinkAccount": {
"linkedAccounts": [
{
"authLabel": "arn:aws:iam::123456789:role/FooBar",
"externalId": "123456789",
"name": "Foo Bar",
}
]
}
},
)
)
assert create_integration_account(mock_gql, input, role) == {
"authLabel": "arn:aws:iam::123456789:role/FooBar",
"externalId": "123456789",
"name": "Foo Bar",
}
def test_enable_lambda_integration():
mock_gql = NewRelicGQL("123456789", "foobar")
mock_gql.query = Mock(
return_value={"actor": {"account": {"cloud": {"linkedAccounts": []}}}},
)
input = integration_install(nr_account_id=123456789, linked_account_name="Foo Bar")
lambda_enabled = enable_lambda_integration(mock_gql, input, 123456789)
assert (
lambda_enabled is False
), "Account should be linked to enable the lambda integration"
assert mock_gql.query.call_count == 1
mock_gql.query = Mock(
side_effect=(
{
"actor": {
"account": {
"cloud": {
"linkedAccounts": [
{
"authLabel": "arn:aws:iam::123456789:role/FooBar",
"externalId": "123456789",
"id": 123456789,
"name": "Foo Bar",
"metricCollectionMode": "PUSH",
}
]
}
}
}
},
)
)
lambda_enabled = enable_lambda_integration(mock_gql, input, 123456789)
assert mock_gql.query.call_count == 1
assert (
lambda_enabled is True
), "Accounts in PUSH mode (using Cloudwatch Metrics stream) should already have the Lambda integration enabled"
mock_gql.query = Mock(
side_effect=(
{
"actor": {
"account": {
"cloud": {
"linkedAccounts": [
{
"authLabel": "arn:aws:iam::123456789:role/FooBar",
"externalId": "123456789",
"id": 123456789,
"name": "Foo Bar",
"metricCollectionMode": "PULL",
}
]
}
}
}
},
{
"actor": {
"account": {
"cloud": {
"linkedAccount": {
"integrations": [
{"service": {"isEnabled": True, "slug": "lambda"}}
]
}
}
},
}
},
)
)
lambda_enabled = enable_lambda_integration(mock_gql, input, 123456789)
assert mock_gql.query.call_count == 2
assert (
lambda_enabled is True
), "Account is linked and already has the lambda integration enabled"
mock_gql.query = Mock(
side_effect=(
{
"actor": {
"account": {
"cloud": {
"linkedAccounts": [
{
"authLabel": "arn:aws:iam::123456789:role/FooBar",
"externalId": "123456789",
"id": 123456789,
"name": "Foo Bar",
"metricCollectionMode": "PULL",
}
]
}
}
}
},
{
"actor": {
"account": {"cloud": {"linkedAccount": {"integrations": []}}},
}
},
{
"cloudConfigureIntegration": {
"integrations": [
{
"id": 123456789,
"name": "Foo Bar",
"service": {"isEnabled": True, "slug": "lambda"},
}
]
}
},
)
)
lambda_enabled = enable_lambda_integration(mock_gql, input, 123456789)
assert mock_gql.query.call_count == 3
assert (
lambda_enabled is True
), "Account is linked but didn't have the lambda integration enabled, so it should be configured"
| 33.47619 | 115 | 0.3896 |
0cbb70da99f850917f201776aa1fd98ae978b38e | 49,519 | py | Python | test/text_generation_test/build_rl_transformer.py | AnonymousAuthor2013/PostRec | a1461f716d177e28b96ca29d1398f96b5717c1e1 | [
"MIT"
] | null | null | null | test/text_generation_test/build_rl_transformer.py | AnonymousAuthor2013/PostRec | a1461f716d177e28b96ca29d1398f96b5717c1e1 | [
"MIT"
] | null | null | null | test/text_generation_test/build_rl_transformer.py | AnonymousAuthor2013/PostRec | a1461f716d177e28b96ca29d1398f96b5717c1e1 | [
"MIT"
] | null | null | null | import random
import os
import importlib
from torchtext import data
import tensorflow as tf
import texar as tx
import numpy as np
from post_rec.models.rl_utility import utils,data_utils
from post_rec.models.rl_utility.config_data import eos_token_id, bos_token_id
from post_rec.models.GenerationNets.BertRLAnswerNet import BertRLTransformer
from post_rec.models.GenerationNets.BertRLAnswerNet import VocabWrapper
from bert import modeling
import argparse
import tqdm
import logging
from texar.utils import transformer_utils
from texar.modules.decoders import transformer_decoders
from bert import optimization
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
# Uses the best sample by beam search
best_results = {'score': 0, 'epoch': -1}
def computeScore(epoch,sess, hypotheses,references=None,step=None,mode="eval"):
logger.info("computing score and save predicting results: {}/{}".format(len(hypotheses),len(references)))
hypotheses_text=[]
references_text=[]
for i in range(len(hypotheses)):
#hypotheses
if not hypotheses[i]:
hypotheses_text.append("unk")
else:
hypotheses_text .append( tx.utils.map_ids_to_strs(
hypotheses[i], vocab,strip_bos="[BOS]",strip_pad="[PAD]",
strip_eos="[EOS]", join=True)
)
#references
if references and len(references)>0:
references_text.append( tx.utils.map_ids_to_strs(
references[i], vocab,strip_bos="[BOS]",strip_pad="[PAD]",
strip_eos="[EOS]", join=True)
)
logger.info("hypo:%d"%len(hypotheses_text))
[logger.info(h) for h in hypotheses_text[:3]]
logger.info("refs:%d"%len(references_text))
[logger.info(r) for r in references_text[:3]]
if not references:
with open(os.path.join(FLAGS.model_dir, 'tmp.{}.{}.predict'.format(machine_host,mode)),"w") as f:
f.writelines( map(lambda l:l+"\n",hypotheses_text) )
return
fname = os.path.join(FLAGS.model_dir, 'tmp.{}.{}'.format(machine_host,mode))
tx.utils.write_paired_text(
hypotheses_text, references_text, fname, mode='s',src_fname_suffix="predict",tgt_fname_suffix="truth")
# Computes score
bleu_scores=[]
for ref, hyp in zip(references_text, hypotheses_text):
bleu_one = tx.evals.sentence_bleu([ref], hyp, smooth=True)
bleu_scores.append(bleu_one)
eval_bleu = np.mean(bleu_scores)
logger.info('epoch: {}, step: {}, eval_bleu {}'.format(epoch, step, eval_bleu))
if eval_bleu > best_results['score']:
logger.info('epoch: {}, best bleu: {}'.format(epoch, eval_bleu) )
best_results['score'] = eval_bleu
best_results['epoch'] = epoch
model_path = os.path.join(FLAGS.model_dir, model_name)
logger.info('saving model to %s' % model_path)
BertRLTransformer.saveModel(sess,model_name)
def testModel(epoch,src_data,tgt_data=None):
references, hypotheses = [], []
bsize = config_data.batch_size
beam_width = config_model.beam_width
encoder_input, predictions=BertRLTransformer.createInferenceModel()
beam_search_ids = predictions['sample_id'][:, :, 0]
# Uses the best sample by beam search
logger.info("evaluating epoch:{} with beam size={}".format(epoch,beam_width))
with tf.Session() as sess:
logger.info("init variables !")
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
BertRLTransformer.loadModel(sess,model_name)
#computing evaluation output
for i in tqdm.trange(0, len(src_data), bsize):
sources = src_data[i:i+bsize]
if tgt_data is not None:
targets = tgt_data[i:i+bsize]
x_block = data_utils.source_pad_concat_convert(sources)
feed_dict = {
encoder_input: x_block,
}
fetches = {
'beam_search_ids': beam_search_ids,
}
fetches_ = sess.run(fetches, feed_dict=feed_dict)
hypotheses.extend(h.tolist() for h in fetches_['beam_search_ids'])
hypotheses = utils.list_strip_eos(hypotheses, eos_token_id)
if tgt_data is not None:
references.extend(r.tolist() for r in targets)
references = utils.list_strip_eos(references, eos_token_id)
logger.info("get {} h and {} ref".format(len(hypotheses),len(references)))
computeScore(epoch,sess,hypotheses,references,mode="test")
def train_model():
# Build model graph
encoder_input = tf.placeholder(tf.int64, shape=(None, None))
decoder_input = tf.placeholder(tf.int64, shape=(None, None))
# (text sequence length excluding padding)
encoder_input_length = tf.reduce_sum(
1 - tf.to_int32(tf.equal(encoder_input, 0)), axis=1)
decoder_input_length = tf.reduce_sum(
1 - tf.to_int32(tf.equal(decoder_input, 0)), axis=1)
labels = tf.placeholder(tf.int64, shape=(None, None))
is_target = tf.to_float(tf.not_equal(labels, 0))
global_step = tf.Variable(0, dtype=tf.int64, trainable=False)
learning_rate = tf.placeholder(tf.float64, shape=(), name='lr')
vocab_size=BertRLTransformer.bert_config.vocab_size
encoder_output, embedder=BertRLTransformer.bertTransformerEncoder(True,encoder_input)
tgt_embedding=embedder
def __computeEmbedding(embedding_table,input_ids):
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
flat_decoder_input_ids = tf.reshape(input_ids, [-1])
embedded = tf.gather(embedding_table, flat_decoder_input_ids)
input_shape = modeling.get_shape_list(input_ids)
embedded = tf.reshape(embedded,
input_shape[0:-1] + [input_shape[-1] * BertRLTransformer.bert_config.hidden_size])
return embedded
decoder_emb_input=__computeEmbedding(embedder,decoder_input)
decoder = transformer_decoders.TransformerDecoder(embedding=tgt_embedding,
hparams=BertRLTransformer.config_model.decoder)
outputs = decoder(
memory=encoder_output,
memory_sequence_length=encoder_input_length,
inputs=decoder_emb_input, #embedder(decoder_input),
sequence_length=decoder_input_length,
decoding_strategy='train_greedy',
mode=tf.estimator.ModeKeys.TRAIN
)
#test accuracy
accuracy=tx.evals.accuracy(labels=labels,preds=outputs.sample_id)
mle_loss = transformer_utils.smoothing_cross_entropy(
outputs.logits, labels, vocab_size, BertRLTransformer.config_model.loss_label_confidence)
mle_loss = tf.reduce_sum(mle_loss * is_target) / tf.reduce_sum(is_target)
train_op = tx.core.get_train_op(
mle_loss,
learning_rate=learning_rate,
global_step=global_step,
hparams=BertRLTransformer.config_model.opt)
tf.summary.scalar('lr', learning_rate)
tf.summary.scalar('mle_loss', mle_loss)
tf.summary.scalar("accuracy",accuracy)
summary_merged = tf.summary.merge_all()
logger.info("transformer graph defined !")
class AccurayPerformanceRecord:
best_acc=0
step=0
def _eval(epoch, sess:tf.Session):
logger.info("evaluating")
bsize=config_data.batch_size
for i in range(0,len(eval_data),bsize):
in_arrays=data_utils.seq2seq_pad_concat_convert(eval_data[i:i+bsize])
'''feed_dict = {
encoder_input: in_arrays[0],
decoder_input: in_arrays[1],
labels: in_arrays[2],
learning_rate: 0.1,
tx.global_mode(): tf.estimator.ModeKeys.EVAL,
}
fetches={
"sample_ids":predictions
}'''
handle=sess.partial_run_setup([accuracy,mle_loss],[encoder_input,decoder_input,labels,tx.global_mode()])
acc,loss = sess.partial_run(handle,fetches=[accuracy,mle_loss],feed_dict=
{encoder_input:in_arrays[0],decoder_input:in_arrays[1],labels:in_arrays[2],tx.global_mode(): tf.estimator.ModeKeys.EVAL})
if acc>AccurayPerformanceRecord.best_acc:
BertRLTransformer.saveModel(sess,model_name)
AccurayPerformanceRecord.best_acc=acc
AccurayPerformanceRecord.step=step
logger.info("test=> epoch:{}, acc/best_acc:{}/{}, loss:{}".format(epoch,acc,AccurayPerformanceRecord.best_acc,loss))
#begin train or eval
def _train_epoch(sess, epoch, step, smry_writer):
logger.info("training epoch:{}".format(epoch))
random.shuffle(train_data)
train_iter = data.iterator.pool(
train_data,
config_data.batch_size,
key=lambda x: (len(x[0]), len(x[1])),
random_shuffler=data.iterator.RandomShuffler()
)
for train_batch in tqdm.tqdm(train_iter,desc="training"):
#logger.info("batch",len(train_batch),)
in_arrays = data_utils.seq2seq_pad_concat_convert(train_batch)
#logger.info(in_arrays[0].shape,in_arrays[1].shape,in_arrays[2].shape)
feed_dict = {
encoder_input: in_arrays[0],
decoder_input: in_arrays[1],
labels: in_arrays[2],
learning_rate: utils.get_lr(step, config_model.lr),
#tx.global_mode(): tf.estimator.ModeKeys.TRAIN,
}
fetches = {
'step': global_step,
'train_op': train_op,
'smry': summary_merged,
'loss': mle_loss,
'acc':accuracy
}
fetches_ = sess.run(fetches, feed_dict=feed_dict)
step, loss, acc = fetches_['step'], fetches_['loss'], fetches_["acc"]
if step and step % config_data.display_steps == 0:
logger.info('step: %d, loss: %.4f, acc: %.4f'%( step, loss, acc ) )
smry_writer.add_summary(fetches_['smry'], global_step=step)
if step and step%config_data.eval_steps==0:
_eval(epoch,sess)
return step
# Run the graph
with tf.Session() as sess:
logger.info("init variables !")
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
if FLAGS.train_from :
BertRLTransformer.loadModel(sess,model_name)
else:
BertRLTransformer.initBert(sess)
smry_writer = tf.summary.FileWriter(FLAGS.model_dir, graph=sess.graph)
logger.info('Begin running with train_and_evaluate mode')
best_acc=0
step = 0
for epoch in range(config_data.max_train_epoch):
if step>=config_data.train_steps:
break
step = _train_epoch(sess, epoch, step, smry_writer)
def train_rl():
# Build model graph
encoder_input = tf.placeholder(tf.int64, shape=(None, None))
decoder_input = tf.placeholder(tf.int64, shape=(None, None))
# (text sequence length excluding padding)
encoder_input_length = tf.reduce_sum(
1 - tf.to_int32(tf.equal(encoder_input, 0)), axis=1)
decoder_input_length = tf.reduce_sum(
1 - tf.to_int32(tf.equal(decoder_input, 0)), axis=1)
global_step = tf.Variable(0, dtype=tf.int64, trainable=False)
qvalue_inputs = tf.placeholder(dtype=tf.float32,shape=[None, None],name='qvalue_inputs')
def transformer_rl_model(enc_x,dec_x,enc_len,dec_len):
encoder_output, embedder=BertRLTransformer.bertTransformerEncoder(True,enc_x)
tgt_embedding=embedder
def __computeEmbedding(embedding_table,input_ids):
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
flat_decoder_input_ids = tf.reshape(input_ids, [-1])
embedded = tf.gather(embedding_table, flat_decoder_input_ids)
input_shape = modeling.get_shape_list(input_ids)
embedded = tf.reshape(embedded,
input_shape[0:-1] + [input_shape[-1] * BertRLTransformer.bert_config.hidden_size])
return embedded
decoder_emb_input=__computeEmbedding(embedder,dec_x)
decoder = transformer_decoders.TransformerDecoder(embedding=tgt_embedding,
hparams=BertRLTransformer.config_model.decoder)
outputs = decoder(
memory=encoder_output,
memory_sequence_length=enc_len,
inputs=decoder_emb_input,
sequence_length=dec_len,
decoding_strategy='train_greedy',
mode=tf.estimator.ModeKeys.TRAIN
)
# For training
start_tokens = tf.fill([tx.utils.get_batch_size(enc_x)],
bos_token_id)
'''decoder_emb_input=__computeEmbedding(embedder,dec_x)
helper = tx.modules.TopKSampleEmbeddingHelper(
embedding=EmbedderWrapper(embedding_table=embedder),
start_tokens=start_tokens,
end_token=eos_token_id,
top_k=1,
softmax_temperature=0.7)
outputs, sequence_length = decoder(
max_decoding_length=config_model.max_decoding_length,
helper=helper,
mode=tf.estimator.ModeKeys.TRAIN)
'''
outputs, sequence_length = decoder(
memory=encoder_output,
memory_sequence_length=enc_len,
inputs=decoder_emb_input,
sequence_length=dec_len,
start_tokens=start_tokens,
end_token=eos_token_id,
decoding_strategy='infer_sample',
mode=tf.estimator.ModeKeys.TRAIN
)
'''from post_rec.models.rl_utility.seq_agent import SeqPGAgent
agent = SeqPGAgent(
samples=outputs.sample_id,
logits=outputs.logits,
sequence_length=sequence_length,
hparams=BertRLTransformer.config_model.agent)'''
from texar.losses.pg_losses import pg_loss_with_logits
from texar.losses.entropy import sequence_entropy_with_logits
agent_hparams=tx.HParams(config_model.agent,None)
loss_hparams = agent_hparams.loss
pg_loss = pg_loss_with_logits(
actions=outputs.sample_id,
logits=outputs.logits,
sequence_length=sequence_length,
advantages=qvalue_inputs,
batched=True,
average_across_batch=loss_hparams.average_across_batch,
average_across_timesteps=loss_hparams.average_across_timesteps,
sum_over_batch=loss_hparams.sum_over_batch,
sum_over_timesteps=loss_hparams.sum_over_timesteps,
time_major=loss_hparams.time_major)
if agent_hparams.entropy_weight > 0:
entropy=sequence_entropy_with_logits(
outputs.logits,
sequence_length=sequence_length,
average_across_batch=loss_hparams.average_across_batch,
average_across_timesteps=loss_hparams.average_across_timesteps,
sum_over_batch=loss_hparams.sum_over_batch,
sum_over_timesteps=loss_hparams.sum_over_timesteps,
time_major=loss_hparams.time_major)
pg_loss -= agent_hparams.entropy_weight * entropy
return pg_loss, outputs, sequence_length
agent_hparams=tx.HParams(config_model.agent,None)
agent_loss,outputs, dec_out_seq_len =transformer_rl_model(encoder_input,decoder_input,encoder_input_length,decoder_input_length)
predictions=outputs.sample_id
tvars=tf.trainable_variables()
grads=tf.gradients(agent_loss,tvars)
grads,_=tf.clip_by_global_norm(grads,clip_norm=1.0)
grads=list(zip(grads,tvars))
#train method
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=config_data.init_lr, shape=[], dtype=tf.float32, name="lr")
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
config_data.train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if config_data.warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(config_data.warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = config_data.init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
#'''
optimizer = optimization.AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
train_op=optimizer.apply_gradients(grads,global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
tf.summary.scalar('lr', learning_rate)
tf.summary.scalar('agent_loss', agent_loss)
summary_merged = tf.summary.merge_all()
logger.info("parallel gpu computing graph for reinforcement learning defined!")
from texar.losses.rewards import discount_reward
def _eval(epoch, step, sess:tf.Session):
logger.info("evaluating")
hypotheses,references=[],[]
bsize=config_data.batch_size
for i in range(0,len(eval_data),bsize):
in_arrays=data_utils.seq2seq_pad_concat_convert(eval_data[i:i+bsize])
feed_dict = {
encoder_input: in_arrays[0],
#decoder_input: in_arrays[1],
tx.global_mode(): tf.estimator.ModeKeys.EVAL,
}
fetches={
"sample_ids":predictions
}
fetches_ = sess.run(fetches, feed_dict=feed_dict)
hypotheses.extend(h.tolist() for h in fetches_['sample_ids'])
references.extend(r.tolist() for r in in_arrays[1])
hypotheses = utils.list_strip_eos(hypotheses, eos_token_id)
references = utils.list_strip_eos(references, eos_token_id)
computeScore(epoch, sess, hypotheses,references, step)
def _train_epoch(epoch, step):
logger.info("training epoch:{}".format(epoch))
random.shuffle(train_data)
train_iter = data.iterator.pool(
train_data,
config_data.batch_size,
key=lambda x: (len(x[0]), len(x[1])),
random_shuffler=data.iterator.RandomShuffler()
)
#rl train
for train_batch in tqdm.tqdm(train_iter,desc="training"):
in_arrays = data_utils.seq2seq_pad_concat_convert(train_batch)
handle=sess.partial_run_setup(fetches=[predictions,dec_out_seq_len,global_step,agent_loss,train_op,summary_merged],
feeds=[encoder_input,decoder_input,qvalue_inputs])
fetches=sess.partial_run(handle,fetches={"samples":predictions,"dec_len":dec_out_seq_len},
feed_dict={encoder_input:in_arrays[0] } )
samples, decoder_out_length_py=fetches["samples"], fetches["dec_len"]
sample_text = tx.utils.map_ids_to_strs(
samples, vocab,
strip_pad="[PAD]",strip_bos="[BOS]",strip_eos="[EOS]",
join=False)
truth_text = tx.utils.map_ids_to_strs(
in_arrays[1], vocab,
strip_pad="[PAD]",strip_bos="[BOS]",strip_eos="[EOS]",
join=False)
# Computes rewards
reward = []
for ref, hyp in zip(truth_text, sample_text):
r = tx.evals.sentence_bleu([ref], hyp, smooth=True)
reward.append(r)
qvalues = discount_reward(
reward,
decoder_out_length_py,
discount=agent_hparams.discount_factor,
normalize=agent_hparams.normalize_reward)
feed_dict = {
encoder_input: in_arrays[0],
decoder_input: in_arrays[1],
qvalue_inputs:qvalues,
tx.global_mode(): tf.estimator.ModeKeys.TRAIN,
}
# Samples
fetches = {
'step': global_step,
'loss':agent_loss,
'train_op':train_op,
"sumry":summary_merged,
}
fetches = sess.run(fetches,feed_dict=feed_dict)
# Displays
step = fetches['step']
loss=fetches["loss"]
if step and step % config_data.display_steps == 0:
logger.info("rl:epoch={}, step={}, loss={:.4f}, reward={:.4f}".format(
epoch, step, loss, np.mean(reward)))
smry_writer.add_summary(fetches['smry'], global_step=step)
if step and step%config_data.eval_steps==0:
_eval(epoch,step,sess)
return step
# Run the graph
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.Session(config=sess_config) as sess:
logger.info("init variables !")
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
BertRLTransformer.initBert(sess,2)
smry_writer = tf.summary.FileWriter(FLAGS.model_dir, graph=sess.graph)
logger.info('Begin running with train_and_evaluate mode')
step = 0
for epoch in range(config_data.max_train_epoch):
if step>=config_data.train_steps:
break
step = _train_epoch( epoch, step)
#parallel
def average_gradients(tower_grads,verbose=0):
average_grads_and_vars = []
n_group=len(tower_grads)
var_num=len(tower_grads[0])
print(len(tower_grads))
for i in range(var_num):
grads=[]
var=None
for j in range(n_group):
grad, var=tower_grads[j][i]
if grad is not None:
grad=tf.expand_dims(grad,axis=0)
grads.append(grad)
assert var is not None
if not grads:
grad_var=(None,var)
else:
grads=tf.concat(grads,axis=0)
grad_sum=tf.reduce_mean(grads,axis=0)
grad_var=(grad_sum,var)
if verbose>0:
logger.info(grad_var)
average_grads_and_vars.append(grad_var)
logger.info("n_group:{}, var_num:{}/{}".format(n_group,len(average_grads_and_vars),var_num))
return average_grads_and_vars
def train_transformer_parallel():
"""Entrypoint.
"""
from texar.modules.decoders import TransformerDecoder
from texar.utils import transformer_utils
def transformerModel(enc_x,dec_x, enc_len, dec_len):
encoder_output, emb_tabel=BertRLTransformer.bertTransformerEncoder(True,enc_x)
tgt_embedding=emb_tabel
def __computeEmbedding(embedding_table,input_ids):
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
flat_decoder_input_ids = tf.reshape(input_ids, [-1])
embedded = tf.gather(embedding_table, flat_decoder_input_ids)
input_shape = modeling.get_shape_list(input_ids)
embedded = tf.reshape(embedded,
input_shape[0:-1] + [input_shape[-1] * BertRLTransformer.bert_config.hidden_size])
return embedded
decoder_emb_input=__computeEmbedding(emb_tabel,dec_x)
decoder = TransformerDecoder(embedding=tgt_embedding,
hparams=config_model.decoder)
# For training
outputs = decoder(
memory=encoder_output,
memory_sequence_length=enc_len,
inputs=decoder_emb_input, #embedder(decoder_input),
sequence_length=dec_len,
decoding_strategy='train_greedy',
mode=tf.estimator.ModeKeys.TRAIN
)
return outputs
def computeLoss(logits,labels):
is_target = tf.to_float(tf.not_equal(labels, 0))
mle_loss = transformer_utils.smoothing_cross_entropy(
logits, labels, BertRLTransformer.bert_config.vocab_size, config_model.loss_label_confidence)
mle_loss = tf.reduce_sum(mle_loss * is_target) / tf.reduce_sum(is_target)
return mle_loss
# Build model graph
encoder_input = tf.placeholder(tf.int64, shape=(None, None))
decoder_input = tf.placeholder(tf.int64, shape=(None, None))
# (text sequence length excluding padding)
encoder_input_length = tf.reduce_sum(
1 - tf.to_int32(tf.equal(encoder_input, 0)), axis=1)
decoder_input_length = tf.reduce_sum(
1 - tf.to_int32(tf.equal(decoder_input, 0)), axis=1)
labels = tf.placeholder(tf.int64, shape=(None, None))
# '''
'''
optimizer=tx.core.AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"
)
'''
#train steps with data parallel computing graph
tower_grads=[]
n_gpu=FLAGS.gpu_num
batch_size=config_data.batch_size
mle_loss=[]
predictions=[]
with tf.variable_scope(tf.get_variable_scope(),reuse=tf.AUTO_REUSE):
for i in range(n_gpu):
with tf.device("%s:%d"%(device_name,i)):
with tf.name_scope("tower_%d"%i):
enc_x=encoder_input[i*batch_size:(i+1)*batch_size]
enc_len=encoder_input_length[i*batch_size:(i+1)*batch_size]
dec_y=decoder_input[i*batch_size:(i+1)*batch_size]
dec_len=decoder_input_length[i*batch_size:(i+1)*batch_size]
dec_out=transformerModel(enc_x=enc_x,dec_x=dec_y,enc_len=enc_len,dec_len=dec_len)
dec_label=labels[i*batch_size:(i+1)*batch_size]
predictions.append(dec_out.sample_id)
tf.get_variable_scope().reuse_variables()
loss=computeLoss(dec_out.logits,dec_label)
mle_loss.append(loss)
#grads=optimizer.compute_gradients(loss=loss,var_list=tf.trainable_variables())
tvars=tf.trainable_variables()
grads=tf.gradients(loss,tvars)
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
tower_grads.append(list(zip(grads,tvars)))
grads=average_gradients(tower_grads, verbose=2)
mle_loss=tf.reduce_mean(tf.stack(mle_loss,axis=0),axis=0)
predictions=tf.concat(predictions,axis=0,name="predictions")
accuracy=tx.evals.accuracy(labels,predictions)
#train method
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=config_data.init_lr, shape=[], dtype=tf.float32, name="lr")
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
config_data.train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if config_data.warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(config_data.warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = config_data.init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
#'''
from bert import optimization
optimizer = optimization.AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
train_op=optimizer.apply_gradients(grads,global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
tf.summary.scalar('lr', learning_rate)
tf.summary.scalar('mle_loss', mle_loss)
tf.summary.scalar('accuracy', accuracy)
summary_merged = tf.summary.merge_all()
logger.info("parallel gpu computing graph defined!")
# Uses the best sample by beam search
bsize=config_data.batch_size*n_gpu
def _eval(epoch, step, sess:tf.Session):
logger.info("evaluating")
accs=[]
losses=[]
for i in range(0,len(eval_data),bsize):
in_arrays=data_utils.seq2seq_pad_concat_convert(eval_data[i:i+bsize])
feed_dict = {
encoder_input: in_arrays[0],
decoder_input: in_arrays[1],
labels: in_arrays[2],
tx.global_mode(): tf.estimator.ModeKeys.EVAL,
}
fetches={
"acc":accuracy,
"loss":mle_loss
}
fetches_ = sess.run(fetches, feed_dict=feed_dict)
acc, loss=fetches_["acc"], fetches_["loss"]
accs.append(acc)
losses.append(loss)
acc=np.mean(accs)
loss=np.mean(losses)
logger.info("eval epoch:{}, step: {}, acc: {}, loss: {}".format(epoch, step, acc, loss))
def _train_epoch(sess, epoch, step, smry_writer):
random.shuffle(train_data)
train_iter = data.iterator.pool(
train_data,
bsize,
key=lambda x: (len(x[0]), len(x[1])),
#batch_size_fn=utils.batch_size_fn,
random_shuffler=data.iterator.RandomShuffler())
accs=[]
losses=[]
for train_batch in tqdm.tqdm(train_iter):
#logger.info("batch size: {}".format(len(train_batch)))
in_arrays = data_utils.seq2seq_pad_concat_convert(train_batch)
feed_dict = {
encoder_input: in_arrays[0],
decoder_input: in_arrays[1],
labels: in_arrays[2],
learning_rate: utils.get_lr(step, config_model.lr)
}
fetches = {
'step': global_step,
'train_op': train_op,
'smry': summary_merged,
'loss': mle_loss,
'acc':accuracy
}
fetches_ = sess.run(fetches, feed_dict=feed_dict)
step, loss, acc = fetches_['step'], fetches_['loss'], fetches_["acc"]
accs.append(acc)
losses.append(loss)
#logger.info("step:{}".format(step))
if step and step % config_data.display_steps == 0:
logger.info('step: %d, batch_size: %d, loss: %.4f, acc:%.4f' % (step, config_data.batch_size*n_gpu,np.mean(losses),np.mean(accs)))
smry_writer.add_summary(fetches_['smry'], global_step=step)
accs.clear()
losses.clear()
if step and step% config_data.eval_steps==0:
_eval(epoch,step, sess)
return step
# Run the graph
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
logger.info("init vars!")
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
BertRLTransformer.initBert(sess,1)
smry_writer = tf.summary.FileWriter(FLAGS.model_dir, graph=sess.graph)
logger.info('Begin running with train_and_evaluate mode')
step = 0
for epoch in range(config_data.max_train_epoch):
step = _train_epoch(sess, epoch, step, smry_writer)
def train_rl_parallel():
# Build model graph
encoder_input = tf.placeholder(tf.int64, shape=(None, None))
decoder_input = tf.placeholder(tf.int64, shape=(None, None))
# (text sequence length excluding padding)
encoder_input_length = tf.reduce_sum(
1 - tf.to_int32(tf.equal(encoder_input, 0)), axis=1)
decoder_input_length = tf.reduce_sum(
1 - tf.to_int32(tf.equal(decoder_input, 0)), axis=1)
global_step = tf.Variable(0, dtype=tf.int64, trainable=False)
qvalue_inputs = tf.placeholder(dtype=tf.float32,shape=[None, None],name='qvalue_inputs')
agent_hparams=tx.HParams(config_model.agent,None)
def transformer_rl_model(enc_x,dec_x,enc_len,dec_len,q_val,agent_hparams):
encoder_output, embedder=BertRLTransformer.bertTransformerEncoder(True,enc_x)
tgt_embedding=embedder
def __computeEmbedding(embedding_table,input_ids):
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
flat_decoder_input_ids = tf.reshape(input_ids, [-1])
embedded = tf.gather(embedding_table, flat_decoder_input_ids)
input_shape = modeling.get_shape_list(input_ids)
embedded = tf.reshape(embedded,
input_shape[0:-1] + [input_shape[-1] * BertRLTransformer.bert_config.hidden_size])
return embedded
decoder_emb_input=__computeEmbedding(embedder,dec_x)
decoder = transformer_decoders.TransformerDecoder(embedding=tgt_embedding,
hparams=BertRLTransformer.config_model.decoder)
outputs = decoder(
memory=encoder_output,
memory_sequence_length=enc_len,
inputs=decoder_emb_input,
sequence_length=dec_len,
decoding_strategy='train_greedy',
mode=tf.estimator.ModeKeys.TRAIN
)
# For training
start_tokens = tf.fill([tx.utils.get_batch_size(encoder_input)],
bos_token_id)
'''decoder_emb_input=__computeEmbedding(embedder,dec_x)
helper = tx.modules.TopKSampleEmbeddingHelper(
embedding=EmbedderWrapper(embedding_table=embedder),
start_tokens=start_tokens,
end_token=eos_token_id,
top_k=1,
softmax_temperature=0.7)
outputs, sequence_length = decoder(
max_decoding_length=config_model.max_decoding_length,
helper=helper,
mode=tf.estimator.ModeKeys.TRAIN)
'''
outputs, sequence_length = decoder(
memory=encoder_output,
memory_sequence_length=enc_len,
inputs=decoder_emb_input,
sequence_length=dec_len,
start_tokens=start_tokens,
end_token=eos_token_id,
decoding_strategy='infer_sample',
mode=tf.estimator.ModeKeys.TRAIN
)
from texar.losses.pg_losses import pg_loss_with_logits
from texar.losses.entropy import sequence_entropy_with_logits
loss_hparams = agent_hparams.loss
pg_loss = pg_loss_with_logits(
actions=outputs.sample_id,
logits=outputs.logits,
sequence_length=sequence_length,
advantages=q_val,
batched=True,
average_across_batch=loss_hparams.average_across_batch,
average_across_timesteps=loss_hparams.average_across_timesteps,
sum_over_batch=loss_hparams.sum_over_batch,
sum_over_timesteps=loss_hparams.sum_over_timesteps,
time_major=loss_hparams.time_major)
if agent_hparams.entropy_weight > 0:
entropy=sequence_entropy_with_logits(
outputs.logits,
sequence_length=sequence_length,
average_across_batch=loss_hparams.average_across_batch,
average_across_timesteps=loss_hparams.average_across_timesteps,
sum_over_batch=loss_hparams.sum_over_batch,
sum_over_timesteps=loss_hparams.sum_over_timesteps,
time_major=loss_hparams.time_major)
pg_loss -= agent_hparams.entropy_weight * entropy
return pg_loss, outputs, sequence_length
#train steps with data parallel computing graph
tower_grads=[]
n_gpu=FLAGS.gpu_num
batch_size=config_data.batch_size
agent_loss=[]
predictions=[]
dec_out_seq_len=[]
with tf.variable_scope(tf.get_variable_scope(),reuse=tf.AUTO_REUSE):
for i in range(n_gpu):
with tf.device("%s:%d"%(device_name,i)):
with tf.name_scope("tower_%d"%i):
enc_x=encoder_input[i*batch_size:(i+1)*batch_size]
enc_len=encoder_input_length[i*batch_size:(i+1)*batch_size]
dec_y=decoder_input[i*batch_size:(i+1)*batch_size]
dec_len=decoder_input_length[i*batch_size:(i+1)*batch_size]
q_val=qvalue_inputs[i*batch_size:(i+1)*batch_size]
loss, dec_out, seq_len=transformer_rl_model(enc_x=enc_x,dec_x=dec_y,enc_len=enc_len,dec_len=dec_len,q_val=q_val,agent_hparams=agent_hparams)
agent_loss.append(loss)
predictions.append(dec_out.sample_id)
dec_out_seq_len.append(seq_len)
tf.get_variable_scope().reuse_variables()
#grads=optimizer.compute_gradients(loss=loss,var_list=tf.trainable_variables())
tvars=tf.trainable_variables()
grads=tf.gradients(loss,tvars)
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
tower_grads.append(list(zip(grads,tvars)))
grads=average_gradients(tower_grads,verbose=2)
agent_loss=tf.reduce_mean(tf.stack(agent_loss,axis=0),axis=0)
predictions=tf.concat(predictions,axis=0,name="predictions")
dec_out_seq_len=tf.concat(dec_out_seq_len,axis=0,name="decoder_output_length")
#train method
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=config_data.init_lr, shape=[], dtype=tf.float32, name="lr")
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
config_data.train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if config_data.warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(config_data.warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = config_data.init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
#'''
optimizer = optimization.AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
train_op=optimizer.apply_gradients(grads,global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
tf.summary.scalar('lr', learning_rate)
tf.summary.scalar('agent_loss', agent_loss)
summary_merged = tf.summary.merge_all()
logger.info("parallel gpu computing graph for reinforcement learning defined!")
#'''
optimizer = optimization.AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
train_op=optimizer.apply_gradients(grads,global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
tf.summary.scalar('lr', learning_rate)
tf.summary.scalar('agent_loss', agent_loss)
summary_merged = tf.summary.merge_all()
logger.info("parallel gpu computing graph for reinforcement learning defined!")
from texar.losses.rewards import discount_reward
def _eval(epoch, step, sess:tf.Session):
logger.info("evaluating")
hypotheses,references=[],[]
bsize=config_data.batch_size
for i in range(0,len(eval_data),bsize):
in_arrays=data_utils.seq2seq_pad_concat_convert(eval_data[i:i+bsize])
feed_dict = {
encoder_input: in_arrays[0],
#decoder_input: in_arrays[1],
tx.global_mode(): tf.estimator.ModeKeys.EVAL,
}
fetches={
"sample_ids":predictions
}
fetches_ = sess.run(fetches, feed_dict=feed_dict)
hypotheses.extend(h.tolist() for h in fetches_['sample_ids'])
references.extend(r.tolist() for r in in_arrays[1])
hypotheses = utils.list_strip_eos(hypotheses, eos_token_id)
references = utils.list_strip_eos(references, eos_token_id)
computeScore(epoch, sess, hypotheses,references, step)
def _train_epoch(epoch, step):
logger.info("training epoch:{}".format(epoch))
random.shuffle(train_data)
train_iter = data.iterator.pool(
train_data,
config_data.batch_size,
key=lambda x: (len(x[0]), len(x[1])),
random_shuffler=data.iterator.RandomShuffler()
)
#rl train
for train_batch in tqdm.tqdm(train_iter,desc="training"):
in_arrays = data_utils.seq2seq_pad_concat_convert(train_batch)
handle=sess.partial_run_setup(fetches=[predictions,dec_out_seq_len,global_step,agent_loss,train_op,summary_merged],
feeds=[encoder_input,decoder_input,qvalue_inputs])
fetches=sess.partial_run(handle,fetches={"samples":predictions,"dec_len":dec_out_seq_len},
feed_dict={encoder_input:in_arrays[0] } )
samples, decoder_out_length_py=fetches["samples"], fetches["dec_len"]
sample_text = tx.utils.map_ids_to_strs(
samples, vocab,
strip_pad="[PAD]",strip_bos="[BOS]",strip_eos="[EOS]",
join=False)
truth_text = tx.utils.map_ids_to_strs(
in_arrays[1], vocab,
strip_pad="[PAD]",strip_bos="[BOS]",strip_eos="[EOS]",
join=False)
# Computes rewards
reward = []
for ref, hyp in zip(truth_text, sample_text):
r = tx.evals.sentence_bleu([ref], hyp, smooth=True)
reward.append(r)
qvalues = discount_reward(
reward,
decoder_out_length_py,
discount=agent_hparams.discount_factor,
normalize=agent_hparams.normalize_reward)
feed_dict = {
encoder_input: in_arrays[0],
decoder_input: in_arrays[1],
qvalue_inputs:qvalues,
tx.global_mode(): tf.estimator.ModeKeys.TRAIN,
}
# Samples
fetches = {
'step': global_step,
'loss':agent_loss,
'train_op':train_op,
"sumry":summary_merged,
}
fetches = sess.run(fetches,feed_dict=feed_dict)
# Displays
step = fetches['step']
loss=fetches["loss"]
if step and step % config_data.display_steps == 0:
logger.info("rl:epoch={}, step={}, loss={:.4f}, reward={:.4f}".format(
epoch, step, loss, np.mean(reward)))
smry_writer.add_summary(fetches['smry'], global_step=step)
if step and step%config_data.eval_steps==0:
_eval(epoch,step,sess)
return step
# Run the graph
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.Session(config=sess_config) as sess:
logger.info("init variables !")
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
BertRLTransformer.initBert(sess,2)
smry_writer = tf.summary.FileWriter(FLAGS.model_dir, graph=sess.graph)
logger.info('Begin running with train_and_evaluate mode')
step = 0
for epoch in range(config_data.max_train_epoch):
if step>=config_data.train_steps:
break
step = _train_epoch( epoch, step)
if __name__ == '__main__':
flags=argparse.ArgumentParser()
flags.add_argument("--use_rl", action="store_true",
help="wthether or not use reinforcement learning.")
flags.add_argument("--model_name", default="transformer-rl",
help="name of the ouput model file.")
flags.add_argument("--run_mode", default="train_and_evaluate",
help="Either train_and_evaluate or test.")
flags.add_argument("--model_dir", default="/home/LAB/zhangzy/ProjectModels/rlmodel",
help="Directory to save the trained model and logs.")
flags.add_argument("--bert_config", default="/home/LAB/zhangzy/ShareModels/uncased_L-12_H-768_A-12/bert_config.json",
help="Directory to bert config json file.")
flags.add_argument("--bert_ckpt", default="/home/LAB/zhangzy/ShareModels/uncased_L-12_H-768_A-12/bert_model.ckpt",
help="Directory to bert model dir.")
flags.add_argument("--train_from", action="store_true",
help="train from a previous ckpt.")
flags.add_argument("--gpu_num",default=0,type=int,help="how many gpu to use")
flags.add_argument("--device_name",default="/device:GPU",type=str,help="name prefix to gpu device")
FLAGS = flags.parse_args()
config_model = importlib.import_module("post_rec.models.rl_utility.config_model")
config_data = importlib.import_module("post_rec.models.rl_utility.config_data")
utils.set_random_seed(config_model.random_seed)
BertRLTransformer.config_data=config_data
BertRLTransformer.config_model=config_model
BertRLTransformer.bert_config=modeling.BertConfig.from_json_file(FLAGS.bert_config)
BertRLTransformer.bert_model_ckpt=FLAGS.bert_ckpt
BertRLTransformer.transformer_model_dir=FLAGS.model_dir
device_name=FLAGS.device_name
#get host name of the running machine
import socket
machine_host=socket.gethostname()
# Create model dir if not exists
tx.utils.maybe_create_dir(FLAGS.model_dir)
# Load data
train_data, eval_data = data_utils.load_data_numpy(
config_data.input_dir, config_data.filename_prefix)
#eval_data=eval_data[:100]
#train_data=eval_data
# Load vocab
vocab=VocabWrapper(config_data.vocab)
model_name=FLAGS.model_name+".ckpt"
#FLAGS.run_mode="test"
#from tensorflow.python.client import device_lib
#logger.info("devices:{}".format(device_lib.list_local_devices()))
logger.info(FLAGS)
#exit(10)
if FLAGS.run_mode=="train_and_evaluate":
if FLAGS.use_rl:
if FLAGS.gpu_num<2:
logger.info("traditional training use rl")
train_rl()
else:
logger.info("training use rl with multi-gpu({})".format(FLAGS.gpu_num))
train_rl_parallel()
else:
if FLAGS.gpu_num<2:
logger.info("traditional training method")
train_model()
else:
logger.info("traditional training method with mulit-gpu({})".format(FLAGS.gpu_num))
train_transformer_parallel()
elif FLAGS.run_mode=="test":
logger.info("run test")
sources,targets=zip(*eval_data)
testModel(0,sources)
else:
raise ValueError("run mode: {} =>not defined!".format(FLAGS.run_mode))
| 37.945594 | 160 | 0.635554 |
c981135969d2efddb302e26e7c8ccc5096b2c506 | 225 | py | Python | pysqs_extended_client/config.py | danbernstein/boto3-sqs-extended-client-lib | 37c90d58a880167ea6b998c3db267541ef5ea89c | [
"Apache-2.0"
] | 19 | 2018-08-23T20:49:59.000Z | 2022-02-09T09:51:02.000Z | pysqs_extended_client/config.py | danbernstein/boto3-sqs-extended-client-lib | 37c90d58a880167ea6b998c3db267541ef5ea89c | [
"Apache-2.0"
] | 8 | 2019-06-20T19:36:27.000Z | 2021-08-18T06:39:03.000Z | pysqs_extended_client/config.py | danbernstein/boto3-sqs-extended-client-lib | 37c90d58a880167ea6b998c3db267541ef5ea89c | [
"Apache-2.0"
] | 19 | 2018-11-30T20:54:01.000Z | 2022-01-31T23:16:11.000Z | import os
AWS_SQS_QUEUE_URL = os.getenv('AWS_SQS_QUEUE_URL')
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_DEFAULT_REGION = os.getenv('AWS_DEFAULT_REGION')
| 32.142857 | 58 | 0.826667 |
8c1d3cdb4380bb89c2fadc29aa8a74d6950e1109 | 111 | py | Python | LuoguCodes/AT2653.py | Anguei/OI-Codes | 0ef271e9af0619d4c236e314cd6d8708d356536a | [
"MIT"
] | null | null | null | LuoguCodes/AT2653.py | Anguei/OI-Codes | 0ef271e9af0619d4c236e314cd6d8708d356536a | [
"MIT"
] | null | null | null | LuoguCodes/AT2653.py | Anguei/OI-Codes | 0ef271e9af0619d4c236e314cd6d8708d356536a | [
"MIT"
] | null | null | null | n, m = map(int, raw_input().split())
a = map(int, raw_input().split())
a.sort(reverse = True)
print sum(a[:m])
| 22.2 | 36 | 0.621622 |
2d28a9225d3d34c45a42c0d1bd2b0affd7d576da | 2,182 | py | Python | tests/test_utils.py | martinghunt/cluster_vcf_records | 6411723081132b7b3790dfe85548e982525044a7 | [
"MIT"
] | 1 | 2018-01-08T18:28:34.000Z | 2018-01-08T18:28:34.000Z | tests/test_utils.py | martinghunt/cluster_vcf_records | 6411723081132b7b3790dfe85548e982525044a7 | [
"MIT"
] | 10 | 2018-07-24T09:39:57.000Z | 2020-12-16T14:28:13.000Z | tests/test_utils.py | martinghunt/cluster_vcf_records | 6411723081132b7b3790dfe85548e982525044a7 | [
"MIT"
] | 6 | 2018-01-08T14:09:19.000Z | 2019-02-11T16:27:55.000Z | import filecmp
import os
import pytest
import pyfastaq
from cluster_vcf_records import vcf_file_read, utils
this_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(this_dir, "data", "utils")
def test_simplify_vcf():
infile = os.path.join(data_dir, "simplify_vcf.in.vcf")
ref_fa = os.path.join(data_dir, "simplify_vcf.ref.fa")
ref_seqs = {}
pyfastaq.tasks.file_to_dict(ref_fa, ref_seqs)
tmp_out = "tmp.simplify_vcf.out.vcf"
utils.rm_rf(tmp_out)
utils.simplify_vcf(infile, tmp_out, ref_seqs=ref_seqs)
expect = os.path.join(data_dir, "simplify_vcf.expect.vcf")
assert filecmp.cmp(tmp_out, expect, shallow=False)
os.unlink(tmp_out)
utils.simplify_vcf(infile + ".gz", tmp_out, ref_seqs=ref_seqs)
assert filecmp.cmp(tmp_out, expect, shallow=False)
utils.simplify_vcf(infile, tmp_out, keep_ref_calls=True, ref_seqs=ref_seqs)
expect = os.path.join(data_dir, "simplify_vcf.expect_keep_ref_calls.vcf")
assert filecmp.cmp(tmp_out, expect, shallow=False)
os.unlink(tmp_out)
def test_normalise_vcf():
infile = os.path.join(data_dir, "normalise_vcf.in.vcf")
ref_fa = os.path.join(data_dir, "normalise_vcf.in.fa")
expect = os.path.join(data_dir, "normalise_vcf.out.vcf")
tmp_out = "tmp.normalise_vcf.vcf"
utils.rm_rf(tmp_out)
utils.normalise_vcf(infile, ref_fa, tmp_out)
expected_header, expected_vcf_records = vcf_file_read.vcf_file_to_list(expect)
got_header, got_vcf_records = vcf_file_read.vcf_file_to_list(tmp_out)
# The normalizing commands add lots of lines to the header.
# We don't care about those, so just check the actual records.
assert got_vcf_records == expected_vcf_records
os.unlink(tmp_out)
# test again but without breaking alleles into separate records
utils.normalise_vcf(infile, ref_fa, tmp_out, break_alleles=False)
expect = os.path.join(data_dir, "normalise_vcf.out.no_break_alleles.vcf")
expected_header, expected_vcf_records = vcf_file_read.vcf_file_to_list(expect)
got_header, got_vcf_records = vcf_file_read.vcf_file_to_list(tmp_out)
assert got_vcf_records == expected_vcf_records
os.unlink(tmp_out)
| 41.961538 | 82 | 0.752979 |
81005e7deead3e79d85f7f3a72344768cb10cc14 | 5,117 | py | Python | cogs/calendar/cog.py | mathis-chapuis/TCCalendarBot | 244365227b55f3e9286b07fb258f8658fe1f56c2 | [
"MIT"
] | 2 | 2021-03-21T20:26:30.000Z | 2021-03-26T07:18:43.000Z | cogs/calendar/cog.py | mathis-chapuis/TCCalendarBot | 244365227b55f3e9286b07fb258f8658fe1f56c2 | [
"MIT"
] | null | null | null | cogs/calendar/cog.py | mathis-chapuis/TCCalendarBot | 244365227b55f3e9286b07fb258f8658fe1f56c2 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from discord.ext import commands, tasks
from .utils import format_response, get_course_by_date, download_calendar, get_week_calendar, get_offset
import discord
import os
import re
ROOT_CALENDAR = "cogs/calendar/Assets"
def setup(bot):
"""
Function to setup the bot by enabling CogCalendar
"""
print("Help command load")
bot.add_cog(CogCalendar(bot))
class CogCalendar(commands.Cog):
"""
Class CogCalendar having all the commands to get the calendar
"""
def __init__(self, bot):
"""
Init the CogCalendar
"""
self.bot = bot
self.update_calendars.start()
self.dobby = None
def cog_unload(self):
"""
Cancel calendar updates on cog unload
"""
self.update_calendars.cancel()
@commands.command(aliases=["Calendar", "cal", "week"])
async def calendar(self, ctx, arg="3TCA", offset="+0"):
"""
Get the calendar of a week
"""
# Get the dobby emoji from the server
self.dobby = self.bot.get_emoji(823315794472730655)
if re.match(r"(([34])(TC|tc|Tc|tC)([123Aa])|([5])(TC|tc|Tc|tC)([123]))", arg):
await self.bot.change_presence(
activity=discord.Activity(
name=f"Calendrier des {arg}",
type=discord.ActivityType.watching
)
)
year = arg[0]
if arg[-1].isnumeric():
group = arg[-1]
else:
group = "A"
calendar_path = ROOT_CALENDAR + f"/{year}TC{group}.ical"
offset = get_offset(offset)
calendar = get_week_calendar(
calendar_path=calendar_path,
offset=offset,
dobby=self.dobby,
group_displayed=year + "TC" + group
)
await ctx.send(
content="<@629369223377977364> pour que tu ne sois pas en retard...",
embed=calendar
)
else:
await ctx.send("please enter a valid input <year>TC<group>")
@commands.command(aliases=["Today", "aujourd'hui", "auj", "tod"])
async def today(self, ctx, arg="3TCA"):
"""
Get the agenda of the current day
"""
if re.match(r"(([34])(TC|tc|Tc|tC)([123Aa])|([5])(TC|tc|Tc|tC)([123]))", arg):
await self.bot.change_presence(
activity=discord.Activity(
name=f"Calendrier des {arg}",
type=discord.ActivityType.watching
)
)
year = arg[0]
if arg[-1].isnumeric():
group = arg[-1]
else:
group = "A"
response = "<@629369223377977364> pour que tu ne sois pas en retard...\n"
courses = get_course_by_date(
prompt_date=datetime.now().date(),
calendar_path=ROOT_CALENDAR + f"/{year}TC{group}.ical"
)
if not courses:
await ctx.send("It's <:dobby:823315794472730655> time")
else:
for course in courses:
response += format_response(course) + "\n"
await ctx.send(response)
else:
await ctx.send("please enter a valid prompt <year>TC<group>")
@commands.command(aliases=["Tomorow", "demain", "tom"])
async def tomorrow(self, ctx, arg="3TCA"):
"""
Get the agenda of the next day
"""
if re.match(r"(([34])(TC|tc|Tc|tC)([123Aa])|([5])(TC|tc|Tc|tC)([123]))", arg):
await self.bot.change_presence(
activity=discord.Activity(
name=f"Calendrier des {arg}",
type=discord.ActivityType.watching
)
)
year = arg[0]
if arg[-1].isnumeric():
group = arg[-1]
else:
group = "A"
response = "<@629369223377977364> pour que tu ne sois pas en retard...\n"
tomorrow = datetime.now() + timedelta(days=1)
courses = get_course_by_date(
prompt_date=tomorrow.date(),
calendar_path=ROOT_CALENDAR + f"/{year}TC{group}.ical"
)
if not courses:
await ctx.send("It's <:dobby:823315794472730655> time")
else:
for course in courses:
response += format_response(course) + "\n"
await ctx.send(response)
else:
await ctx.send("please enter a valid input <year>TC<group>")
@tasks.loop(hours=24)
async def update_calendars(self):
"""
Update the calendars on init and each 24 hours
"""
print("Deleting calendar assets")
assets_dir = "cogs/calendar/Assets"
if not os.path.exists(assets_dir):
os.makedirs(assets_dir)
for file in os.listdir(assets_dir):
os.remove(os.path.join(assets_dir, file))
download_calendar()
| 33.664474 | 104 | 0.525503 |
bffe3443b994fd339b5e28f95198806da22703b1 | 1,529 | py | Python | mysite/wsgi.py | gizmo126/django-app-starter | 76a454c447216dec92983de7f7fc236a409dccf8 | [
"MIT"
] | null | null | null | mysite/wsgi.py | gizmo126/django-app-starter | 76a454c447216dec92983de7f7fc236a409dccf8 | [
"MIT"
] | 3 | 2020-02-11T21:47:49.000Z | 2021-03-22T17:10:49.000Z | mysite/wsgi.py | gizmo126/django-app-starter | 76a454c447216dec92983de7f7fc236a409dccf8 | [
"MIT"
] | null | null | null | """
WSGI config for mysite project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
print(BASE_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(application)
| 41.324324 | 79 | 0.803793 |
a8c1dc3aa422322ea4105a824845aa405690b4ec | 428 | py | Python | PeptideConstructor/__init__.py | CharlesHahn/DL-PeptideBuilder | 098c873a2e428f9a48b1bee2e1a21dd92806d119 | [
"MIT"
] | 1 | 2021-12-24T14:35:24.000Z | 2021-12-24T14:35:24.000Z | PeptideConstructor/__init__.py | CharlesHahn/DL-PeptideBuilder | 098c873a2e428f9a48b1bee2e1a21dd92806d119 | [
"MIT"
] | null | null | null | PeptideConstructor/__init__.py | CharlesHahn/DL-PeptideBuilder | 098c873a2e428f9a48b1bee2e1a21dd92806d119 | [
"MIT"
] | 1 | 2021-12-20T13:48:02.000Z | 2021-12-20T13:48:02.000Z | """``PeptideConstructor`` package for creating (DL-)peptide models in PDB format based on geometrical parameters.
Written by Charles Hahn. This package is based on Lun4m/PeptideBuilder.git and clauswilke/PeptideBuilder.git.
Python modules
----------------
The package consists of the following Python modules:
* PeptideBuilder
* Geometry
* PCcli
"""
__version__ = "0.2.1"
from .PeptideBuilder import *
from .Geometry import *
| 30.571429 | 113 | 0.754673 |
634beef0f20688eedb85c2a7fb846b57f15f63d5 | 4,397 | py | Python | tests/test_router_bandwidth.py | liuzhengzheng12/probe_generation_algorithm | 1636b43c3053593bfae9ab2eeec6cd203c13633d | [
"MIT"
] | null | null | null | tests/test_router_bandwidth.py | liuzhengzheng12/probe_generation_algorithm | 1636b43c3053593bfae9ab2eeec6cd203c13633d | [
"MIT"
] | null | null | null | tests/test_router_bandwidth.py | liuzhengzheng12/probe_generation_algorithm | 1636b43c3053593bfae9ab2eeec6cd203c13633d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from topology.fat_tree import FatTreeTopology
from optimize_algorithm.generate_probe import optimize_generate_probe_set
from query_primitives.query import PathQuery, NodeQuery
from utils.const import metadata_list
from random import randint
# 计算100,500,1000Hz下的500个query
def get_query_list_dict(port_range, router_range):
query_list_dict = {100: [], 500: [], 1000: []}
for index in xrange(250):
node_query_where = {}
node_query_return = {}
for _ in xrange(index):
node_query_where[metadata_list[randint(0, index) % metadata_len]] = {}
node_query_return[metadata_list[randint(0, index) % metadata_len]] = {}
node = randint(0, router_range)
port = randint(0, port_range)
cat = category[randint(0, 1)]
query_list_dict[100].append(NodeQuery(node, port, node_query_where, 100, node_query_return, cat))
query_list_dict[500].append(NodeQuery(node, port, node_query_where, 500, node_query_return, cat))
query_list_dict[1000].append(NodeQuery(node, port, node_query_where, 1000, node_query_return, cat))
path_query_where = {}
path_query_return = {}
for _ in xrange(index):
path_query_where[metadata_list[randint(0, index) % metadata_len]] = {}
path_query_return[metadata_list[randint(0, index) % metadata_len]] = {}
src_node = randint(0, router_range)
src_port = randint(0, port_range)
dst_node = randint(0, router_range)
dst_port = randint(0, port_range)
cat = category[randint(0, 1)]
query_list_dict[100].append(PathQuery(src_node, src_port, dst_node, dst_port,
path_query_where, 100, path_query_return,
cat))
query_list_dict[500].append(PathQuery(src_node, src_port, dst_node, dst_port,
path_query_where, 500, path_query_return,
cat))
query_list_dict[1000].append(PathQuery(src_node, src_port, dst_node, dst_port,
path_query_where, 1000, path_query_return,
cat))
return query_list_dict
if __name__ == '__main__':
k_fwd = 100
k_tele = 10
metadata_len = len(metadata_list)
category = ['performance', 'failure']
f = open('testsdata/fig_router_bandwidth.txt', 'w')
for k in xrange(4, 100, 2):
fat_tree = FatTreeTopology(k=k)
router_cnt = 5 * k * k / 4
query_list_dict = get_query_list_dict(k / 2 - 1, router_cnt - 1)
optimize_probe_pkt_list = optimize_generate_probe_set(query_list_dict[100], fat_tree, k_fwd, k_tele)
bandwidth_100 = 0
for probe_type in ['performance', 'failure']:
probe_set_dict = optimize_probe_pkt_list[probe_type]
for freq, probe_set in probe_set_dict.iteritems():
for probe in probe_set:
if probe:
bandwidth_100 += len(probe)
bandwidth_100 *= 100
bandwidth_100 /= 1000000.0
optimize_probe_pkt_list = optimize_generate_probe_set(query_list_dict[500], fat_tree, k_fwd, k_tele)
bandwidth_500 = 0
for probe_type in ['performance', 'failure']:
probe_set_dict = optimize_probe_pkt_list[probe_type]
for freq, probe_set in probe_set_dict.iteritems():
for probe in probe_set:
if probe:
bandwidth_500 += len(probe)
bandwidth_500 *= 500
bandwidth_500 /= 1000000.0
optimize_probe_pkt_list = optimize_generate_probe_set(query_list_dict[1000], fat_tree, k_fwd, k_tele)
bandwidth_1000 = 0
for probe_type in ['performance', 'failure']:
probe_set_dict = optimize_probe_pkt_list[probe_type]
for freq, probe_set in probe_set_dict.iteritems():
for probe in probe_set:
if probe:
bandwidth_1000 += len(probe)
bandwidth_1000 *= 1000
bandwidth_1000 /= 1000000.0
print k, router_cnt, bandwidth_100, bandwidth_500, bandwidth_1000
f.write('{} {} {} {} {} \n'.format(k, router_cnt, bandwidth_100, bandwidth_500, bandwidth_1000))
f.close()
| 45.329897 | 109 | 0.615647 |
6c81de12c48a91b5fa5d0fb63869d4a8aa9914b3 | 1,088 | py | Python | scripts/convert/convert_bedgraph_to_gff.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 10 | 2015-04-28T14:15:04.000Z | 2021-03-15T00:07:38.000Z | scripts/convert/convert_bedgraph_to_gff.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | null | null | null | scripts/convert/convert_bedgraph_to_gff.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 6 | 2017-03-16T22:38:41.000Z | 2021-08-11T00:22:52.000Z | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Routines import AnnotationsRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
help="Input bedgraph file(0-based, python notation)")
parser.add_argument("-o", "--output", action="store", dest="output", required=True,
help="Output gff file")
parser.add_argument("-f", "--feature_type", action="store", dest="feature_type", required=True,
help="Feature type to use in gff file")
parser.add_argument("-s", "--source", action="store", dest="source", default="source",
help="Source to use in gff file")
parser.add_argument("-d", "--id_prefix", action="store", dest="id_prefix",
default="ID", help="Id prefix for gff file")
args = parser.parse_args()
AnnotationsRoutines.convert_bedgraph_to_gff(args.input, args.output, args.feature_type,
id_prefix=args.id_prefix, source=args.source)
| 43.52 | 95 | 0.641544 |
5009020a9c808128b899d503ed4810822e2d83e8 | 4,429 | py | Python | static/js/jsxgraph/server/fft.py | evheubel/content-mit-latex2edx-demo | 4a9c37be58ae7defd095d2ab811fa9f8d13e4d96 | [
"MIT"
] | 4 | 2015-02-28T02:30:18.000Z | 2021-05-30T17:09:41.000Z | static/js/jsxgraph/server/fft.py | evheubel/content-mit-latex2edx-demo | 4a9c37be58ae7defd095d2ab811fa9f8d13e4d96 | [
"MIT"
] | 2 | 2015-07-15T13:49:56.000Z | 2015-07-15T15:43:50.000Z | static/js/jsxgraph/server/fft.py | evheubel/content-mit-latex2edx-demo | 4a9c37be58ae7defd095d2ab811fa9f8d13e4d96 | [
"MIT"
] | 10 | 2015-01-11T20:21:14.000Z | 2021-06-10T04:41:43.000Z | from JXGServerModule import JXGServerModule
import numpy
import numpy.fft
import wave, struct, uuid
import os, subprocess
import StringIO, gzip, base64
import datetime, math, random
# Should be changed to something more persistent but must be writable by
# the webserver (usually user www-data)
#if not 'MPLCONFIGDIR' in os.environ:
# os.environ['MPLCONFIGDIR'] = '/tmp/'
# os.environ['MPLCONFIGDIR'] = 'C:/xampp/tmp'
#import matplotlib
#import matplotlib.pyplot as plt
class FFT(JXGServerModule):
def __init__(self):
JXGServerModule.__init__(self)
def init(self, resp):
resp.addHandler(self.fft, 'function(data) { }')
resp.addHandler(self.ifft, 'function(data) { }')
resp.addHandler(self.cutoutrange, 'function(data) { }')
resp.addHandler(self.makeAudio, 'function(data) { }')
resp.addHandler(self.loadAudio, 'function(data) { }')
resp.addHandler(self.sampleifft, 'function(data) { }')
return
def fft(self, resp, x):
y = numpy.fft.rfft(x)
y = map(abs, y);
resp.addData('y', y)
return
def _real(self, val):
return val.real
def ifft(self, resp, x):
y = numpy.fft.irfft(x)
y = map(self._real, y);
resp.addData('y', y)
return
def _set0(val):
return 0
def sampleifft(self, resp, name, s, e, factor):
# read wav
pathtowavefiles = '/share8/home/michael/www-store/audio/'
fname = pathtowavefiles + os.path.basename(name) + '.wav'
w = wave.open(fname, 'r')
(nchannels, sampwidth, framerate, nframes, comptype, compname) = w.getparams()
frames = w.readframes(nframes*nchannels)
out = map(lambda value: value/8192., struct.unpack_from("%dh" % nframes * nchannels, frames))
w.close()
# apply fft
x = numpy.fft.rfft(out)
# filters
l = len(x)
for i in range(0, s):
x[i] = x[i] * factor
for i in range(e, l):
x[i] = x[i] * factor
#ifft
y = numpy.fft.irfft(x)
y = map(self._real, y);
resp.addData('y', y)
self.makeAudio(resp, 'ogg', framerate, y)
return
# s: 0 < Start < len(x)/2
# e: 0 < End < len(x)/2
def cutoutrange(self, resp, x, s, e, factor):
l = len(x)
for i in range(0, s):
x[i] = x[i] * factor
for i in range(e, l):
x[i] = x[i] * factor
resp.addData('y', x)
return
def loadAudio(self, resp, type, name):
pathtowavefiles = '/share8/home/michael/www-store/audio/'
fname = pathtowavefiles + os.path.basename(name) + '.wav'
fogg = pathtowavefiles + os.path.basename(name) + '.ogg'
# read ogg
f = open(fogg, "r")
audio = f.read()
audio = "data:audio/ogg;base64," + base64.b64encode(audio)
resp.addData('audioB64', audio)
# read wav
w = wave.open(fname, 'r')
(nchannels, sampwidth, framerate, nframes, comptype, compname) = w.getparams()
frames = w.readframes(nframes*nchannels)
out = map(lambda value: value/8192., struct.unpack_from("%dh" % nframes * nchannels, frames))
w.close()
step = math.floor(len(out)/7500);
#resp.addData('audioData', [out[i] for i in range(len(out)) if i % step == 0]);
resp.addData('audioData', out);
resp.addData('seconds', (nframes*1.0)/framerate)
resp.addData('samplerate', framerate)
return
def makeAudio(self, resp, type, samplerate, data):
fname = '/tmp/'+str(uuid.uuid4())
fogg = fname + '.ogg'
w = wave.open(fname, 'w')
w.setnchannels(1)
w.setsampwidth(2)
w.setframerate(samplerate)
w.setnframes(len(data))
for s in data:
if s < -4:
s = -4
if s > 4:
s = 4
w.writeframes(struct.pack('h', int(s*4000)))
w.close()
ogg_process = subprocess.Popen(["oggenc", fname, "-Q", "-o", fogg], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
output = ogg_process.communicate('')[0]
f = open(fogg, "r")
audio = f.read()
audio = "data:audio/ogg;base64," + base64.b64encode(audio)
resp.addData('audioB64', audio)
os.remove(fname)
os.remove(fogg)
return
| 33.80916 | 159 | 0.567171 |
0f19dc0e3993f475210b6f1b4d6a6d109697084d | 9,826 | py | Python | tests/relational_db_test.py | mdemeke/mickysbenchmarker | a61896f273577ea61a646f74bd3c4aa3d5996db2 | [
"Apache-2.0"
] | null | null | null | tests/relational_db_test.py | mdemeke/mickysbenchmarker | a61896f273577ea61a646f74bd3c4aa3d5996db2 | [
"Apache-2.0"
] | null | null | null | tests/relational_db_test.py | mdemeke/mickysbenchmarker | a61896f273577ea61a646f74bd3c4aa3d5996db2 | [
"Apache-2.0"
] | null | null | null | # Copyright 20121 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.relational_db."""
import unittest
from unittest import mock
from absl import flags
from perfkitbenchmarker import relational_db
from perfkitbenchmarker.configs import benchmark_config_spec
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
_COMPONENT = 'test_component'
def CreateTestLinuxVm():
vm_spec = pkb_common_test_case.CreateTestVmSpec()
return pkb_common_test_case.TestLinuxVirtualMachine(vm_spec=vm_spec)
class FakeRelationalDb(relational_db.BaseRelationalDb):
def GetEndpoint(self):
pass
def GetPort(self):
pass
def _Create(self):
pass
def _Delete(self):
pass
def GetDefaultEngineVersion(self, _):
pass
def _FailoverHA(self):
pass
class RelationalDbUnamangedTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(RelationalDbUnamangedTestCase, self).setUp()
self.min_mysql_spec = {
'cloud': 'GCP',
'engine': 'mysql',
'engine_version': '5.7',
'db_spec': {
'GCP': {
'machine_type': 'n1-standard-1'
}
},
'db_disk_spec': {
'GCP': {
'disk_size': 500
}
}
}
self.min_postgres_spec = {
'cloud': 'GCP',
'engine': 'postgres',
'engine_version': '11',
'db_spec': {
'GCP': {
'machine_type': 'n1-standard-1'
}
},
'db_disk_spec': {
'GCP': {
'disk_size': 500
}
}
}
self.min_sqlserver_spec = {
'cloud': 'GCP',
'engine': 'sqlserver',
'engine_version': '2019',
'db_spec': {
'GCP': {
'machine_type': 'n1-standard-1'
}
},
'db_disk_spec': {
'GCP': {
'disk_size': 500
}
}
}
self.mysql_spec = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.min_mysql_spec)
self.postgres_spec = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.min_postgres_spec)
self.sqlserver_spec = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.min_sqlserver_spec)
def testMakePostgresClientCommand(self):
FLAGS['use_managed_db'].parse(False)
db = FakeRelationalDb(self.postgres_spec)
db.endpoint = '1.1.1.1'
db.client_vm = CreateTestLinuxVm()
db.server_vm = CreateTestLinuxVm()
self.assertEqual(
db.client_vm_query_tools.MakeSqlCommand(
'Select 1', database_name='postgresql'),
'psql \'host=1.1.1.1 user=root password=perfkitbenchmarker dbname=postgresql\' -c "Select 1"'
)
def testIssuePostgresClientCommand(self):
FLAGS['use_managed_db'].parse(False)
db = FakeRelationalDb(self.postgres_spec)
db.endpoint = '1.1.1.1'
db.client_vm = CreateTestLinuxVm()
db.server_vm = CreateTestLinuxVm()
with mock.patch.object(db.client_vm, 'RemoteCommand') as remote_command:
db.client_vm_query_tools.IssueSqlCommand('Select 1', database_name='abc')
command = [
mock.call(
'psql \'host=1.1.1.1 user=root password=perfkitbenchmarker'
' dbname=abc\' -c "Select 1"')
]
self.assertCountEqual(remote_command.call_args_list, command)
def testMakePostgresServerCommand(self):
FLAGS['use_managed_db'].parse(False)
db = FakeRelationalDb(self.postgres_spec)
db.client_vm = CreateTestLinuxVm()
db.server_vm = CreateTestLinuxVm()
db.endpoint = '1.1.1.1'
self.assertEqual(
db.server_vm_query_tools.MakeSqlCommand(
'Select 1', database_name='postgresql'),
'psql \'host=localhost user=root password=perfkitbenchmarker dbname=postgresql\' -c "Select 1"'
)
def testMakeMysqlCientCommand(self):
FLAGS['use_managed_db'].parse(False)
db = FakeRelationalDb(self.mysql_spec)
db.client_vm = CreateTestLinuxVm()
db.server_vm = CreateTestLinuxVm()
db.endpoint = '1.1.1.1'
self.assertEqual(
db.client_vm_query_tools.MakeSqlCommand('Select 1'),
'mysql -h 1.1.1.1 -P 3306 -u root'
' -pperfkitbenchmarker -e "Select 1"')
def testMakeMysqlCommandWithLocalHost(self):
FLAGS['use_managed_db'].parse(False)
db = FakeRelationalDb(self.mysql_spec)
db.client_vm = CreateTestLinuxVm()
db.server_vm = CreateTestLinuxVm()
db.endpoint = '1.1.1.1'
self.assertEqual(
db.server_vm_query_tools.MakeSqlCommand('Select 1'),
'mysql -h localhost -P 3306 -u root '
'-pperfkitbenchmarker -e "Select 1"')
def testMakeSqlserverCommand(self):
FLAGS['use_managed_db'].parse(False)
db = FakeRelationalDb(self.sqlserver_spec)
db.client_vm = CreateTestLinuxVm()
db.server_vm = CreateTestLinuxVm()
db.endpoint = '1.1.1.1'
self.assertEqual(
db.client_vm_query_tools.MakeSqlCommand('Select 1'),
'/opt/mssql-tools/bin/sqlcmd -S 1.1.1.1 -U root -P perfkitbenchmarker -Q "Select 1"'
)
def testMakeSqlserverCommandWithLocalHost(self):
FLAGS['use_managed_db'].parse(False)
db = FakeRelationalDb(self.sqlserver_spec)
db.client_vm = CreateTestLinuxVm()
db.server_vm = CreateTestLinuxVm()
db.endpoint = '1.1.1.1'
self.assertEqual(
db.server_vm_query_tools.MakeSqlCommand('Select 1'),
'/opt/mssql-tools/bin/sqlcmd -S localhost -U root -P perfkitbenchmarker -Q "Select 1"'
)
def testInstallMYSQLServer(self):
FLAGS['use_managed_db'].parse(False)
FLAGS['innodb_buffer_pool_size'].parse(100)
db = FakeRelationalDb(self.mysql_spec)
db.endpoint = '1.1.1.1'
db.client_vm = CreateTestLinuxVm()
db.server_vm = CreateTestLinuxVm()
db.server_vm.IS_REBOOTABLE = False
db.client_vm.IS_REBOOTABLE = False
db.server_vm.GetScratchDir = mock.MagicMock(return_value='scratch')
with mock.patch.object(db.server_vm, 'RemoteCommand') as remote_command:
db._InstallMySQLServer()
command = [
mock.call('chmod 777 scratch'),
mock.call('sudo service None stop'),
mock.call('sudo mkdir -p /scratch/mysql'),
mock.call('sudo mkdir -p /scratch/tmp'),
mock.call('sudo chown mysql:mysql /scratch/mysql'),
mock.call('sudo chown mysql:mysql /scratch/tmp'),
mock.call('sudo rsync -avzh /var/lib/mysql/ /scratch/mysql'),
mock.call('sudo rsync -avzh /tmp/ /scratch/tmp'),
mock.call('df', should_log=True),
mock.call(
'echo "alias /var/lib/mysql -> /scratch/mysql," | sudo tee -a /etc/apparmor.d/tunables/alias'
),
mock.call(
'echo "alias /tmp -> /scratch/tmp," | sudo tee -a /etc/apparmor.d/tunables/alias'
),
mock.call(
'sudo sed -i "s|# Allow data files dir access| /scratch/mysql/ r, /scratch/mysql/** rwk, /scratch/tmp/ r, /scratch/tmp/** rwk, /proc/*/status r, /sys/devices/system/node/ r, /sys/devices/system/node/node*/meminfo r, /sys/devices/system/node/*/* r, /sys/devices/system/node/* r, # Allow data files dir access|g" /etc/apparmor.d/usr.sbin.mysqld'
),
mock.call('sudo apparmor_parser -r /etc/apparmor.d/usr.sbin.mysqld'),
mock.call('sudo systemctl restart apparmor'),
mock.call(
'sudo sed -i "s|datadir\t\t= /var/lib/mysql|datadir\t\t= /scratch/mysql|g" None'
),
mock.call(
'sudo sed -i "s|tmpdir\t\t= /tmp|tmpdir\t\t= /scratch/tmp|g" None'),
mock.call(
'echo "\ninnodb_buffer_pool_size = 100G\ninnodb_flush_method = O_DIRECT\ninnodb_flush_neighbors = 0\ninnodb_log_file_size = 1000M" | sudo tee -a None'
),
mock.call(
'echo "\nskip-name-resolve\nconnect_timeout = 86400\nwait_timeout = 86400\ninteractive_timeout = 86400" | sudo tee -a None'
),
mock.call('sudo sed -i "s/bind-address/#bind-address/g" None'),
mock.call(
'sudo sed -i "s/max_allowed_packet\t= 16M/max_allowed_packet\t= 1024M/g" None'
),
mock.call('echo "\nlog_error_verbosity = 3" | sudo tee -a None'),
mock.call(
'sudo cat /etc/mysql/mysql.conf.d/mysql.sock',
ignore_failure=True,
should_log=True),
mock.call('sudo service None restart'),
mock.call('sudo cat None', should_log=True),
mock.call(
'sudo mysql -h localhost -P 3306 -u root -pperfkitbenchmarker '
'-e "SET GLOBAL max_connections=8000;"'),
mock.call(
'sudo mysql -h localhost -P 3306 -u root -pperfkitbenchmarker -e '
'"CREATE USER \'root\'@\'None\' '
'IDENTIFIED BY \'perfkitbenchmarker\';"'),
mock.call(
'sudo mysql -h localhost -P 3306 -u root -pperfkitbenchmarker -e '
'"GRANT ALL PRIVILEGES ON *.* TO \'root\'@\'None\';"'),
mock.call(
'sudo mysql -h localhost -P 3306 -u root -pperfkitbenchmarker -e '
'"FLUSH PRIVILEGES;"')
]
self.assertCountEqual(remote_command.call_args_list, command)
if __name__ == '__main__':
unittest.main()
| 35.601449 | 356 | 0.638103 |
9227817dcff0732d942678701a58332f2195db22 | 6,493 | py | Python | databricks_sync/sdk/generators/__init__.py | databrickslabs/databricks-sync | 24bece3390df5f07ca3c5556da384c4b4087e57a | [
"Apache-2.0"
] | 20 | 2020-11-13T22:19:02.000Z | 2022-01-21T22:17:31.000Z | databricks_sync/sdk/generators/__init__.py | databrickslabs/databricks-sync | 24bece3390df5f07ca3c5556da384c4b4087e57a | [
"Apache-2.0"
] | 54 | 2020-11-17T19:50:59.000Z | 2022-03-29T21:31:34.000Z | databricks_sync/sdk/generators/__init__.py | databrickslabs/databricks-sync | 24bece3390df5f07ca3c5556da384c4b4087e57a | [
"Apache-2.0"
] | 6 | 2020-12-03T18:03:22.000Z | 2022-02-19T20:21:26.000Z | import copy
import fnmatch
import os
import re
from typing import List, Dict, Any
from dotty_dict import Dotty
from databricks_sync import log
def drop_all_but(dictionary, *fields, dictionary_name=None):
result = copy.deepcopy(dictionary)
invalid_keys = []
for key in result:
if key not in fields:
log.debug(f"Found key: {key} in {dictionary_name} but it should not be there for terraform.")
invalid_keys.append(key)
for invalid_key in invalid_keys:
result.pop(invalid_key)
return result
def match_patterns(string, patterns) -> (bool, List[str]):
matched_patterns = []
match_results = []
for ex_path in patterns:
matched = fnmatch.fnmatch(string, ex_path)
match_results.append(matched)
if matched is True:
matched_patterns.append(ex_path)
return any(match_results), matched_patterns
class PathInclusionParser(object):
def __init__(self, path_patterns: List[str], resource_type):
self.__resource_type = resource_type
self.__path_patterns = [self.__add_implicit_recursive_glob(path) for path in path_patterns]
self.__base_paths = self.get_base_paths()
# Normalize for dbfs and remove any file system for processing pattern matching
self.__all_path_patterns = [pat.lstrip("dbfs:") for pat in list(set(self.__path_patterns + self.__base_paths))]
self.__processed_paths = set()
def __add_implicit_recursive_glob(self, path: str):
if "*" in path:
return path
if path.endswith("/"):
return path + "**"
else:
return path + "/**"
def __path_parts(self, path):
path_part_list = []
while True:
parts = os.path.split(path)
if parts[0] == path:
break
else:
path = parts[0]
path_part_list.insert(0, parts[1])
return path_part_list
def __get_base_path(self, path):
parts = self.__path_parts(path.lstrip("dbfs:"))
actual_path_parts = []
for part in parts:
if "*" in part:
break
actual_path_parts.append(part)
# Normalize for dbfs and remove any file system
if path.startswith("dbfs:"):
return "dbfs:/" + "/".join(actual_path_parts)
else:
return "/" + "/".join(actual_path_parts)
def get_base_paths(self):
return [self.__get_base_path(path) for path in self.__path_patterns]
def __is_path_processed(self, path):
if path in self.__processed_paths:
return True
else:
self.__processed_paths.add(path)
return False
def is_path_included(self, path):
# If no exclude paths are not defined then skip this step
if self.__is_path_processed(path) is True:
log.debug(f"[PathInclusion] Path: {path} has been processed will skip going down this tree.")
return False
# Normalize for dbfs and remove any file system
is_included, matched_patterns = match_patterns(path.lstrip("dbfs:"), self.__all_path_patterns)
if is_included is True:
log.debug(f"[PathInclusion] {self.__resource_type}: {path} path matched the following inclusion patterns: "
f"{matched_patterns} from the full set of: {self.__all_path_patterns}")
return is_included
@property
def base_paths(self):
return self.__base_paths
class PathExclusionParser(object):
def __init__(self, exclude_path, resource_type):
self.__resource_type = resource_type
# Properly store exclude paths
if exclude_path is None:
self.__exclude_paths = None
elif isinstance(exclude_path, str):
self.__exclude_paths = [exclude_path]
else:
self.__exclude_paths = exclude_path
def is_path_excluded(self, path):
# If no exclude paths are not defined then skip this step
if self.__exclude_paths is None:
return False
# Normalize for dbfs and remove any file system
is_excluded, matched_patterns = match_patterns(path.lstrip("dbfs:"),
[pat.lstrip("dbfs:") for pat in self.__exclude_paths])
if is_excluded is True:
log.debug(f"[PathExclusion] {self.__resource_type}: {path} path matched the following exclusion patterns: "
f"{matched_patterns} from the full set of: {self.__exclude_paths}")
return is_excluded
class RegexFilterCompileError(ValueError):
pass
class LocalFilterBy:
def __init__(self, filter_dictionary: Dict[str, Any], resource, raw_id_func):
self._filter_dictionary = filter_dictionary
self._compiled_dictionary = {}
self._resource = resource
self._raw_id_func = raw_id_func
self.__compile()
def __compile(self):
if self._filter_dictionary is None:
return
for key, value in self._filter_dictionary.items():
self._compiled_dictionary[key] = []
values = self._listify(value)
for pattern in values:
try:
self._compiled_dictionary[key].append(re.compile(pattern))
except re.error as e:
raise RegexFilterCompileError(f'Failed to compile "{pattern}" for key: "{key}"')
def _listify(self, val) -> List[Any]:
if isinstance(val, str) or isinstance(val, int) or isinstance(val, float):
return [val]
elif isinstance(val, list):
return val
else:
return []
def is_in_criteria(self, input_data: Dict[str, Any]):
if self._filter_dictionary is None:
return True
results = {
True: [],
False: []
}
d = Dotty(input_data)
for key, value in self._compiled_dictionary.items():
values = self._listify(value)
for pattern in values:
actual_value = d.get(key, None)
if actual_value is not None and re.fullmatch(pattern, actual_value):
results[True].append(pattern)
else:
results[False].append(pattern)
log.debug(f"[{self._resource}][{self._raw_id_func(input_data)}] Key: {key} Matched {results[True]}")
return any(results[True])
| 35.872928 | 119 | 0.615894 |
4604dbc7bdea5ede54c22d5077851a0ff418036d | 107 | py | Python | project/transactions/apps.py | njncalub/mapera | e9f636d74de2f0df568dcba95d159dfe2a4d4b5f | [
"MIT"
] | null | null | null | project/transactions/apps.py | njncalub/mapera | e9f636d74de2f0df568dcba95d159dfe2a4d4b5f | [
"MIT"
] | 14 | 2019-07-13T10:35:04.000Z | 2021-09-08T01:13:56.000Z | project/transactions/apps.py | njncalub/ippon | e9f636d74de2f0df568dcba95d159dfe2a4d4b5f | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class TransactionsConfig(AppConfig):
name = 'project.transactions'
| 17.833333 | 36 | 0.785047 |
9e7374abd1bc77472e599eb87f86c25a3ab4e603 | 668 | py | Python | qiskit/aqua/parser/__init__.py | chunfuchen/aqua | fde435203a2799433a4e50897554fa226c8ff1dc | [
"Apache-2.0"
] | null | null | null | qiskit/aqua/parser/__init__.py | chunfuchen/aqua | fde435203a2799433a4e50897554fa226c8ff1dc | [
"Apache-2.0"
] | null | null | null | qiskit/aqua/parser/__init__.py | chunfuchen/aqua | fde435203a2799433a4e50897554fa226c8ff1dc | [
"Apache-2.0"
] | 2 | 2020-02-13T02:17:58.000Z | 2020-08-09T07:56:25.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Parser and schema packages """
from .json_schema import JSONSchema
from .base_parser import BaseParser
__all__ = ['JSONSchema',
'BaseParser']
| 30.363636 | 77 | 0.730539 |
caac9ecf55342b1354e40f8aac10c9a43934d0f6 | 424 | py | Python | foodx_devops_tools/release_flow_entry.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 3 | 2021-06-23T20:53:43.000Z | 2022-01-26T14:19:43.000Z | foodx_devops_tools/release_flow_entry.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 33 | 2021-08-09T15:44:51.000Z | 2022-03-03T18:28:02.000Z | foodx_devops_tools/release_flow_entry.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 1 | 2021-06-23T20:53:52.000Z | 2021-06-23T20:53:52.000Z | #!python3
# Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
"""Release flow utility."""
from .release_flow import release_flow
def flit_entry() -> None:
"""Flit script entry function for ``foodx-release-flow`` utility."""
release_flow()
| 24.941176 | 72 | 0.724057 |
149f2f951760a17643da9d57d8741f501d39e656 | 41,237 | py | Python | MQTT Shims.indigoPlugin/Contents/Server Plugin/plugin.py | hishamk/Indigo-Shims | 802002aa866d9ad96b04f8553c0a82f37e25deca | [
"Unlicense"
] | null | null | null | MQTT Shims.indigoPlugin/Contents/Server Plugin/plugin.py | hishamk/Indigo-Shims | 802002aa866d9ad96b04f8553c0a82f37e25deca | [
"Unlicense"
] | null | null | null | MQTT Shims.indigoPlugin/Contents/Server Plugin/plugin.py | hishamk/Indigo-Shims | 802002aa866d9ad96b04f8553c0a82f37e25deca | [
"Unlicense"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
####################
import logging
import json
import pystache
from Queue import Queue
kCurDevVersCount = 0 # current version of plugin devices
################################################################################
class Plugin(indigo.PluginBase):
########################################
# Main Plugin methods
########################################
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
pfmt = logging.Formatter('%(asctime)s.%(msecs)03d\t[%(levelname)8s] %(name)20s.%(funcName)-25s%(msg)s', datefmt='%Y-%m-%d %H:%M:%S')
self.plugin_file_handler.setFormatter(pfmt)
try:
self.logLevel = int(self.pluginPrefs[u"logLevel"])
except:
self.logLevel = logging.INFO
self.indigo_log_handler.setLevel(self.logLevel)
self.logger.threaddebug(u"logLevel = " + str(self.logLevel))
def startup(self):
indigo.server.log(u"Starting MQTT Shims")
self.triggers = {}
self.shimDevices = []
self.messageTypesWanted = []
self.messageQueue = Queue()
self.mqttPlugin = indigo.server.getPlugin("com.flyingdiver.indigoplugin.mqtt")
indigo.server.subscribeToBroadcast(u"com.flyingdiver.indigoplugin.mqtt", u"com.flyingdiver.indigoplugin.mqtt-message_queued", "message_handler")
def message_handler(self, notification):
self.logger.debug(u"received notification of MQTT message type {} from {}".format(notification["message_type"], indigo.devices[int(notification["brokerID"])].name))
self.messageQueue.put(notification)
def shutdown(self):
indigo.server.log(u"Shutting down MQTT Shims")
def deviceStartComm(self, device):
self.logger.info(u"{}: Starting Device".format(device.name))
instanceVers = int(device.pluginProps.get('devVersCount', 0))
if instanceVers >= kCurDevVersCount:
self.logger.threaddebug(u"{}: Device Version is up to date".format(device.name))
elif instanceVers < kCurDevVersCount:
newProps = device.pluginProps
instanceVers = int(device.pluginProps.get('devVersCount', 0))
newProps["devVersCount"] = kCurDevVersCount
device.replacePluginPropsOnServer(newProps)
device.stateListOrDisplayStateIdChanged()
self.logger.threaddebug(u"{}: Updated to version {}".format(device.name, kCurDevVersCount))
else:
self.logger.error(u"{}: Unknown device version: {}".format(device.name. instanceVers))
assert device.id not in self.shimDevices
self.shimDevices.append(device.id)
self.messageTypesWanted.append(device.pluginProps['message_type'])
def deviceStopComm(self, device):
self.logger.info(u"{}: Stopping Device".format(device.name))
assert device.id in self.shimDevices
self.shimDevices.remove(device.id)
self.messageTypesWanted.remove(device.pluginProps['message_type'])
def didDeviceCommPropertyChange(self, oldDevice, newDevice):
if oldDevice.pluginProps.get('SupportsBatteryLevel', None) != newDevice.pluginProps.get('SupportsBatteryLevel', None):
return True
if oldDevice.pluginProps.get('message_type', None) != newDevice.pluginProps.get('message_type', None):
return True
return False
def triggerStartProcessing(self, trigger):
self.logger.debug("{}: Adding Trigger".format(trigger.name))
assert trigger.pluginTypeId in ["deviceUpdated", "stateUpdated"]
assert trigger.id not in self.triggers
self.triggers[trigger.id] = trigger
def triggerStopProcessing(self, trigger):
self.logger.debug("{}: Removing Trigger".format(trigger.name))
assert trigger.id in self.triggers
del self.triggers[trigger.id]
def runConcurrentThread(self):
try:
while True:
if not self.mqttPlugin.isEnabled():
self.logger.error(u"processMessages: MQTT Connector plugin not enabled, aborting.")
self.sleep(60)
else:
self.processMessages()
self.sleep(0.1)
except self.stopThread:
pass
def processMessages(self):
while not self.messageQueue.empty():
notification = self.messageQueue.get()
if not notification:
return
if notification["message_type"] in self.messageTypesWanted:
props = { 'message_type': notification["message_type"] }
brokerID = int(notification['brokerID'])
while True:
message_data = self.mqttPlugin.executeAction("fetchQueuedMessage", deviceId=brokerID, props=props, waitUntilDone=True)
if message_data == None:
break
for deviceID in self.shimDevices:
device = indigo.devices[deviceID]
if device.pluginProps['message_type'] == notification["message_type"]:
self.logger.debug(u"{}: processMessages: '{}' {} -> {}".format(device.name, notification["message_type"], '/'.join(message_data["topic_parts"]), message_data["payload"]))
self.update(device, message_data)
def update(self, device, message_data):
try:
if device.pluginProps['uid_location'] == "topic":
message_address = message_data["topic_parts"][int(device.pluginProps['uid_location_topic_field'])]
self.logger.threaddebug(u"{}: update topic message_address = {}".format(device.name, message_address))
elif device.pluginProps['uid_location'] == "payload":
try:
payload = json.loads(message_data["payload"])
except:
self.logger.debug(u"{}: JSON decode error for uid_location = payload, aborting".format(device.name))
return
message_address = payload[device.pluginProps['uid_location_payload_key']]
self.logger.debug(u"{}: update json message_address = {}".format(device.name, message_address))
else:
self.logger.debug(u"{}: update can't determine address location".format(device.name))
return
except Exception as e:
self.logger.error(u"{}: Failed to find Unique ID in '{}': {}".format(device.name, device.pluginProps['uid_location'], e))
return
if device.pluginProps['address'] != message_address:
self.logger.debug(u"{}: update address mismatch: {} != {}".format(device.name, device.pluginProps['address'], message_address))
return
try:
if device.pluginProps.get('state_location', None) == "topic":
i = int(device.pluginProps['state_location_topic'])
self.logger.threaddebug(u"{}: update state_location_topic = {}".format(device.name, i))
state_key = 'value'
state_data = { state_key : message_data["topic_parts"][i] }
elif (device.pluginProps.get('state_location', None) == "payload") and (device.pluginProps.get('state_location_payload_type', None) == "raw"):
state_key = 'value'
state_data = { state_key : message_data["payload"]}
elif (device.pluginProps.get('state_location', None) == "payload") and (device.pluginProps.get('state_location_payload_type', None) == "json"):
state_key = device.pluginProps.get('state_location_payload_key', None)
try:
state_data = json.loads(message_data["payload"])
except:
self.logger.debug(u"{}: JSON decode error for state_location = payload, aborting".format(device.name))
return
self.logger.threaddebug(u"{}: update state_location_payload, key = {}".format(device.name, state_key))
else:
self.logger.debug(u"{}: update can't determine value location".format(device.name))
except Exception as e:
self.logger.error(u"{}: update error determining message value: {}".format(device.name, e))
return
if device.deviceTypeId == "shimRelay":
value = self.recurseDict(state_key, state_data)
self.logger.debug(u"{}: shimRelay, state_key = {}, state_data = {}, value = {}".format(device.name, state_key, state_data, value))
if value == None:
self.logger.debug(u"{}: state_key {} not found in payload".format(device.name, state_key))
return
on_value = device.pluginProps.get('state_on_value', None)
if not on_value:
if value.lower() in ['off', 'false', '0']:
value = False
else:
value = True
else:
value = (value == on_value)
self.logger.debug(u"{}: Updating state to {}".format(device.name, value))
if device.pluginProps["shimSensorSubtype"] == "Generic":
device.updateStateOnServer(key='onOffState', value=value)
if value:
device.updateStateImageOnServer(indigo.kStateImageSel.SensorOn)
else:
device.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)
elif device.pluginProps["shimSensorSubtype"] == "MotionSensor":
device.updateStateOnServer(key='onOffState', value=value)
if value:
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensorTripped)
else:
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensor)
elif device.pluginProps["shimSensorSubtype"] == "Power":
device.updateStateOnServer(key='onOffState', value=value)
if value:
device.updateStateImageOnServer(indigo.kStateImageSel.PowerOn)
else:
device.updateStateImageOnServer(indigo.kStateImageSel.PowerOff)
if bool(device.pluginProps.get('SupportsBatteryLevel', False)):
battery = self.recurseDict(device.pluginProps['battery_payload_key'], state_data)
device.updateStateOnServer('batteryLevel', battery, uiValue='{}%'.format(battery))
if bool(device.pluginProps.get('SupportsEnergyMeter', False)) and ("accumEnergyTotal" in device.states):
energy = self.recurseDict(device.pluginProps['energy_payload_key'], state_data)
device.updateStateOnServer('accumEnergyTotal', energy, uiValue='{} kWh'.format(energy))
if bool(device.pluginProps.get('SupportsEnergyMeterCurPower', False)) and ("curEnergyLevel" in device.states):
power = self.recurseDict(device.pluginProps['power_payload_key'], state_data)
device.updateStateOnServer('curEnergyLevel', power, uiValue='{} W'.format(power))
states_key = device.pluginProps.get('state_dict_payload_key', None)
if not states_key:
return
try:
data = json.loads(message_data["payload"])
except:
self.logger.debug(u"{}: JSON decode error for payload, aborting".format(device.name))
return
self.logger.threaddebug(u"{}: update state_dict_payload_key, key = {}".format(device.name, states_key))
states_dict = self.recurseDict(states_key, data)
if not states_dict:
return
elif type(states_dict) != dict:
self.logger.error(u"{}: Device config error, bad Multi-States Key value: {}".format(device.name, states_key))
return
elif not len(states_dict) > 0:
self.logger.warning(u"{}: Possible device config error, Multi-States Key {} returns empty dict.".format(device.name, states_key))
return
old_states = device.pluginProps.get("states_list", indigo.List())
new_states = indigo.List()
states_list = []
for key in states_dict:
new_states.append(key)
states_list.append({'key': key, 'value': states_dict[key], 'decimalPlaces': 2})
if old_states != new_states:
self.logger.threaddebug(u"{}: update, new states_list: {}".format(device.name, new_states))
newProps = device.pluginProps
newProps["states_list"] = new_states
device.replacePluginPropsOnServer(newProps)
device.stateListOrDisplayStateIdChanged()
device.updateStatesOnServer(states_list)
elif device.deviceTypeId == "shimDimmer":
state = self.recurseDict(state_key, state_data)
if state == None:
self.logger.debug(u"{}: state_key {} not found in payload".format(device.name, state_key))
return
self.logger.debug(u"{}: state = {}".format(device.name, state))
value_key = device.pluginProps['value_location_payload_key']
value = self.recurseDict(value_key, state_data)
self.logger.debug(u"{}: shimDimmer, state_key = {}, value_key = {}, data = {}, state = {}, value = {}".format(device.name, state_key, value_key, state_data, state, value))
if value != None:
self.logger.debug(u"{}: Updating brightnessLevel to {}".format(device.name, value))
device.updateStateOnServer(key='brightnessLevel', value=value)
else:
if state.lower() in ['off', 'false', '0']:
state = False
else:
state = True
self.logger.debug(u"{}: No brightnessLevel, setting onOffState to {}".format(device.name, state))
device.updateStateOnServer(key='onOffState', value=state)
if bool(device.pluginProps.get('SupportsBatteryLevel', False)):
battery = self.recurseDict(device.pluginProps['battery_payload_key'], state_data)
device.updateStateOnServer('batteryLevel', battery, uiValue='{}%'.format(battery))
if bool(device.pluginProps.get('SupportsEnergyMeter', False)) and ("accumEnergyTotal" in device.states):
energy = self.recurseDict(device.pluginProps['energy_payload_key'], state_data)
device.updateStateOnServer('accumEnergyTotal', energy, uiValue='{} kWh'.format(energy))
if bool(device.pluginProps.get('SupportsEnergyMeterCurPower', False)) and ("curEnergyLevel" in device.states):
power = self.recurseDict(device.pluginProps['power_payload_key'], state_data)
device.updateStateOnServer('curEnergyLevel', power, uiValue='{} W'.format(power))
states_key = device.pluginProps.get('state_dict_payload_key', None)
if not states_key:
return
try:
data = json.loads(message_data["payload"])
except:
self.logger.debug(u"{}: JSON decode error for payload, aborting".format(device.name))
return
self.logger.threaddebug(u"{}: update state_dict_payload_key, key = {}".format(device.name, states_key))
states_dict = self.recurseDict(states_key, data)
if not states_dict:
return
elif type(states_dict) != dict:
self.logger.error(u"{}: Device config error, bad Multi-States Key value: {}".format(device.name, states_key))
return
elif not len(states_dict) > 0:
self.logger.warning(u"{}: Possible device config error, Multi-States Key {} returns empty dict.".format(device.name, states_key))
return
old_states = device.pluginProps.get("states_list", indigo.List())
new_states = indigo.List()
states_list = []
for key in states_dict:
new_states.append(key)
states_list.append({'key': key, 'value': states_dict[key], 'decimalPlaces': 2})
if old_states != new_states:
self.logger.threaddebug(u"{}: update, new states_list: {}".format(device.name, new_states))
newProps = device.pluginProps
newProps["states_list"] = new_states
device.replacePluginPropsOnServer(newProps)
device.stateListOrDisplayStateIdChanged()
device.updateStatesOnServer(states_list)
elif device.deviceTypeId == "shimOnOffSensor":
value = self.recurseDict(state_key, state_data)
self.logger.debug(u"{}: shimOnOffSensor, state_key = {}, data = {}, value = {}".format(device.name, state_key, state_data, value))
on_value = device.pluginProps.get('state_on_value', None)
if not on_value:
if value in ['off', 'Off', 'OFF', False, '0', 0]:
value = False
else:
value = True
else:
value = (value == on_value)
self.logger.debug(u"{}: Updating state to {}".format(device.name, value))
if device.pluginProps["shimSensorSubtype"] == "Generic":
device.updateStateOnServer(key='onOffState', value=value)
if value:
device.updateStateImageOnServer(indigo.kStateImageSel.SensorOn)
else:
device.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)
elif device.pluginProps["shimSensorSubtype"] == "MotionSensor":
device.updateStateOnServer(key='onOffState', value=value)
if value:
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensorTripped)
else:
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensor)
elif device.pluginProps["shimSensorSubtype"] == "Power":
device.updateStateOnServer(key='onOffState', value=value)
if value:
device.updateStateImageOnServer(indigo.kStateImageSel.PowerOn)
else:
device.updateStateImageOnServer(indigo.kStateImageSel.PowerOff)
if bool(device.pluginProps.get('SupportsBatteryLevel', False)):
battery = self.recurseDict(device.pluginProps['battery_payload_key'], state_data)
device.updateStateOnServer('batteryLevel', battery, uiValue='{}%'.format(battery))
if bool(device.pluginProps.get('SupportsEnergyMeter', False)) and ("accumEnergyTotal" in device.states):
energy = self.recurseDict(device.pluginProps['energy_payload_key'], state_data)
device.updateStateOnServer('accumEnergyTotal', energy, uiValue='{} kWh'.format(energy))
if bool(device.pluginProps.get('SupportsEnergyMeterCurPower', False)) and ("curEnergyLevel" in device.states):
power = self.recurseDict(device.pluginProps['power_payload_key'], state_data)
device.updateStateOnServer('curEnergyLevel', power, uiValue='{} W'.format(power))
states_key = device.pluginProps.get('state_dict_payload_key', None)
if not states_key:
return
try:
data = json.loads(message_data["payload"])
except:
self.logger.debug(u"{}: JSON decode error for payload, aborting".format(device.name))
return
self.logger.threaddebug(u"{}: update state_dict_payload_key, key = {}".format(device.name, states_key))
states_dict = self.recurseDict(states_key, data)
if not states_dict:
return
elif type(states_dict) != dict:
self.logger.error(u"{}: Device config error, bad Multi-States Key value: {}".format(device.name, states_key))
return
elif not len(states_dict) > 0:
self.logger.warning(u"{}: Possible device config error, Multi-States Key {} returns empty dict.".format(device.name, states_key))
return
old_states = device.pluginProps.get("states_list", indigo.List())
new_states = indigo.List()
states_list = []
for key in states_dict:
new_states.append(key)
states_list.append({'key': key, 'value': states_dict[key], 'decimalPlaces': 2})
if old_states != new_states:
self.logger.threaddebug(u"{}: update, new states_list: {}".format(device.name, new_states))
newProps = device.pluginProps
newProps["states_list"] = new_states
device.replacePluginPropsOnServer(newProps)
device.stateListOrDisplayStateIdChanged()
device.updateStatesOnServer(states_list)
elif device.deviceTypeId == "shimValueSensor":
value = self.recurseDict(state_key, state_data)
self.logger.debug(u"{}: shimValueSensor, key = {}, data = {}, value = {}".format(device.name, state_key, state_data, value))
if value == None:
self.logger.debug(u"{}: state_key {} not found in payload".format(device.name, state_key))
return
try:
value = float(value)
except (TypeError, ValueError) as e:
self.logger.error(u"{}: update unable to convert '{}' to float: {}".format(device.name, value, e))
return
function = device.pluginProps.get("adjustmentFunction", None)
self.logger.threaddebug(u"{}: update adjustmentFunction: '{}'".format(device.name, function))
if function:
prohibited = ['indigo', 'requests', 'pyserial', 'oauthlib']
if any(x in function for x in prohibited):
self.logger.warning(u"{}: Invalid method in adjustmentFunction: '{}'".format(device.name, function))
else:
x = value
value = eval(function)
self.logger.debug(u"{}: Updating state to {}".format(device.name, value))
if device.pluginProps["shimSensorSubtype"] == "Generic":
precision = device.pluginProps.get("shimSensorPrecision", "2")
device.updateStateImageOnServer(indigo.kStateImageSel.None)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f}'.format(value, prec=precision))
elif device.pluginProps["shimSensorSubtype"] == "Temperature-F":
precision = device.pluginProps.get("shimSensorPrecision", "1")
device.updateStateImageOnServer(indigo.kStateImageSel.TemperatureSensorOn)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f} °F'.format(value, prec=precision))
elif device.pluginProps["shimSensorSubtype"] == "Temperature-C":
precision = device.pluginProps.get("shimSensorPrecision", "1")
device.updateStateImageOnServer(indigo.kStateImageSel.TemperatureSensorOn)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f} °C'.format(value, prec=precision))
elif device.pluginProps["shimSensorSubtype"] == "Humidity":
precision = device.pluginProps.get("shimSensorPrecision", "0")
device.updateStateImageOnServer(indigo.kStateImageSel.HumiditySensorOn)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f}%'.format(value, prec=precision))
elif device.pluginProps["shimSensorSubtype"] == "Pressure-inHg":
precision = device.pluginProps.get("shimSensorPrecision", "2")
device.updateStateImageOnServer(indigo.kStateImageSel.None)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f} inHg'.format(value, prec=precision))
elif device.pluginProps["shimSensorSubtype"] == "Pressure-mb":
precision = device.pluginProps.get("shimSensorPrecision", "2")
device.updateStateImageOnServer(indigo.kStateImageSel.None)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f} mb'.format(value, prec=precision))
elif device.pluginProps["shimSensorSubtype"] == "Power-W":
precision = device.pluginProps.get("shimSensorPrecision", "0")
device.updateStateImageOnServer(indigo.kStateImageSel.EnergyMeterOn)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f} W'.format(value, prec=precision))
elif device.pluginProps["shimSensorSubtype"] == "Luminance":
precision = device.pluginProps.get("shimSensorPrecision", "0")
device.updateStateImageOnServer(indigo.kStateImageSel.LightSensorOn)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f} lux'.format(value, prec=precision))
elif device.pluginProps["shimSensorSubtype"] == "Luminance%":
precision = device.pluginProps.get("shimSensorPrecision", "0")
device.updateStateImageOnServer(indigo.kStateImageSel.LightSensorOn)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f}%'.format(value, prec=precision))
elif device.pluginProps["shimSensorSubtype"] == "ppm":
precision = device.pluginProps.get("shimSensorPrecision", "0")
device.updateStateImageOnServer(indigo.kStateImageSel.None)
device.updateStateOnServer(key='sensorValue', value=value, decimalPlaces=int(precision), uiValue=u'{:.{prec}f} ppm'.format(value, prec=precision))
else:
self.logger.debug(u"{}: update, unknown shimSensorSubtype: {}".format(device.name, device.pluginProps["shimSensorSubtype"]))
if bool(device.pluginProps.get('SupportsBatteryLevel', False)):
battery = self.recurseDict(device.pluginProps['battery_payload_key'], state_data)
device.updateStateOnServer('batteryLevel', battery, uiValue='{}%'.format(battery))
if bool(device.pluginProps.get('SupportsEnergyMeter', False)) and ("accumEnergyTotal" in device.states):
energy = self.recurseDict(device.pluginProps['energy_payload_key'], state_data)
device.updateStateOnServer('accumEnergyTotal', energy, uiValue='{} kWh'.format(energy))
if bool(device.pluginProps.get('SupportsEnergyMeterCurPower', False)) and ("curEnergyLevel" in device.states):
power = self.recurseDict(device.pluginProps['power_payload_key'], state_data)
device.updateStateOnServer('curEnergyLevel', power, uiValue='{} W'.format(power))
states_key = device.pluginProps.get('state_dict_payload_key', None)
if not states_key:
return
try:
data = json.loads(message_data["payload"])
except:
self.logger.debug(u"{}: JSON decode error for payload, aborting".format(device.name))
return
self.logger.threaddebug(u"{}: update state_dict_payload_key, key = {}".format(device.name, states_key))
states_dict = self.recurseDict(states_key, data)
if not states_dict:
return
elif type(states_dict) != dict:
self.logger.error(u"{}: Device config error, bad Multi-States Key value: {}".format(device.name, states_key))
return
elif not len(states_dict) > 0:
self.logger.warning(u"{}: Possible device config error, Multi-States Key {} returns empty dict.".format(device.name, states_key))
return
old_states = device.pluginProps.get("states_list", indigo.List())
new_states = indigo.List()
states_list = []
for key in states_dict:
new_states.append(key)
states_list.append({'key': key, 'value': states_dict[key], 'decimalPlaces': 2})
if old_states != new_states:
self.logger.threaddebug(u"{}: update, new states_list: {}".format(device.name, new_states))
newProps = device.pluginProps
newProps["states_list"] = new_states
device.replacePluginPropsOnServer(newProps)
device.stateListOrDisplayStateIdChanged()
device.updateStatesOnServer(states_list)
elif device.deviceTypeId == "shimGeneric":
if bool(device.pluginProps.get('SupportsBatteryLevel', False)):
battery = self.recurseDict(device.pluginProps['battery_payload_key'], state_data)
device.updateStateOnServer('batteryLevel', battery, uiValue='{}%'.format(battery))
if bool(device.pluginProps.get('SupportsEnergyMeter', False)) and ("accumEnergyTotal" in device.states):
energy = self.recurseDict(device.pluginProps['energy_payload_key'], state_data)
device.updateStateOnServer('accumEnergyTotal', energy, uiValue='{} kWh'.format(energy))
if bool(device.pluginProps.get('SupportsEnergyMeterCurPower', False)) and ("curEnergyLevel" in device.states):
power = self.recurseDict(device.pluginProps['power_payload_key'], state_data)
device.updateStateOnServer('curEnergyLevel', power, uiValue='{} W'.format(power))
states_key = device.pluginProps.get('state_dict_payload_key', None)
if not states_key:
return
try:
data = json.loads(message_data["payload"])
except:
self.logger.debug(u"{}: JSON decode error for payload, aborting".format(device.name))
return
self.logger.threaddebug(u"{}: update state_dict_payload_key, key = {}".format(device.name, states_key))
states_dict = self.recurseDict(states_key, data)
if not states_dict:
return
elif type(states_dict) != dict:
self.logger.error(u"{}: Device config error, bad Multi-States Key value: {}".format(device.name, states_key))
return
elif not len(states_dict) > 0:
self.logger.warning(u"{}: Possible device config error, Multi-States Key {} returns empty dict.".format(device.name, states_key))
return
old_states = device.pluginProps.get("states_list", indigo.List())
new_states = indigo.List()
states_list = []
for key in states_dict:
new_states.append(key)
states_list.append({'key': key, 'value': states_dict[key], 'decimalPlaces': 2})
if old_states != new_states:
self.logger.threaddebug(u"{}: update, new states_list: {}".format(device.name, new_states))
newProps = device.pluginProps
newProps["states_list"] = new_states
device.replacePluginPropsOnServer(newProps)
device.stateListOrDisplayStateIdChanged()
device.updateStatesOnServer(states_list)
else:
self.logger.warning(u"{}: Invalid device type: {}".format(device.name, device.deviceTypeId))
# Now do any triggers
for trigger in self.triggers.values():
if trigger.pluginProps["shimDevice"] == str(device.id):
if trigger.pluginTypeId == "deviceUpdated":
indigo.trigger.execute(trigger)
elif trigger.pluginTypeId == "stateUpdated":
state_name = trigger.pluginProps["deviceState"]
if state_name in states_dict:
indigo.trigger.execute(trigger)
def recurseDict(self, key_string, data_dict):
self.logger.threaddebug(u"recurseDict key_string = {}, data_dict= {}".format(key_string, data_dict))
try:
if key_string == u'.':
return data_dict
elif '.' not in key_string:
try:
if key_string[0] == '[':
new_data = data_dict[int(key_string[1:-1])]
else:
new_data = data_dict.get(key_string, None)
except:
return None
else:
return new_data
else:
split = key_string.split('.', 1)
self.logger.threaddebug(u"recurseDict split[0] = {}, split[1] = {}".format(split[0], split[1]))
try:
if split[0][0] == '[':
new_data = data_dict[int(split[0][1:-1])]
else:
new_data = data_dict[split[0]]
except:
return None
else:
return self.recurseDict(split[1], new_data)
except Exception as e:
self.logger.error(u"recurseDict error: {}".format(e))
def getStateList(self, filter, valuesDict, typeId, deviceId):
returnList = list()
if 'states_list' in valuesDict:
for topic in valuesDict['states_list']:
returnList.append(topic)
return returnList
def getDeviceStateList(self, device):
stateList = indigo.PluginBase.getDeviceStateList(self, device)
add_states = device.pluginProps.get("states_list", indigo.List())
for key in add_states:
dynamic_state = self.getDeviceStateDictForStringType(unicode(key), unicode(key), unicode(key))
stateList.append(dynamic_state)
self.logger.threaddebug(u"{}: getDeviceStateList returning: {}".format(device.name, stateList))
return stateList
def getBrokerDevices(self, filter="", valuesDict=None, typeId="", targetId=0):
retList = []
devicePlugin = valuesDict.get("devicePlugin", None)
for dev in indigo.devices.iter():
if dev.protocol == indigo.kProtocol.Plugin and \
dev.pluginId == "com.flyingdiver.indigoplugin.mqtt" and \
dev.deviceTypeId != 'aggregator' :
retList.append((dev.id, dev.name))
retList.sort(key=lambda tup: tup[1])
return retList
########################################
# Relay / Dimmer Action callback
########################################
def actionControlDevice(self, action, device):
if action.deviceAction == indigo.kDeviceAction.TurnOn:
action_template = device.pluginProps.get("action_template", None)
if not action_template:
self.logger.error(u"{}: actionControlDevice: no action template".format(device.name))
return
payload = device.pluginProps.get("on_action_payload", "on")
topic = pystache.render(action_template, {'uniqueID': device.address})
self.publish_topic(device, topic, payload)
self.logger.info(u"Sent '{}' On".format(device.name))
elif action.deviceAction == indigo.kDeviceAction.TurnOff:
action_template = device.pluginProps.get("action_template", None)
if not action_template:
self.logger.error(u"{}: actionControlDevice: no action template".format(device.name))
return
payload = device.pluginProps.get("off_action_payload", "off")
topic = pystache.render(action_template, {'uniqueID': device.address})
self.publish_topic(device, topic, payload)
self.logger.info(u"Sent '{}' Off".format(device.name))
elif action.deviceAction == indigo.kDeviceAction.SetBrightness:
newBrightness = action.actionValue
action_template = device.pluginProps.get("dimmer_action_template", None)
if not action_template:
self.logger.error(u"{}: actionControlDevice: no action template".format(device.name))
return
payload_template = device.pluginProps.get("dimmer_action_payload", None)
if not payload_template:
self.logger.error(u"{}: actionControlDevice: no payload template".format(device.name))
return
topic = pystache.render(action_template, {'uniqueID': device.address})
payload = pystache.render(payload_template, {'brightness': newBrightness})
self.publish_topic(device, topic, payload)
self.logger.info(u"Sent '{}' Brightness = {}".format(device.name, newBrightness))
else:
self.logger.error(u"{}: actionControlDevice: Unsupported action requested: {}".format(device.name, action.deviceAction))
########################################
# General Action callback
########################################
def actionControlUniversal(self, action, device):
action_template = device.pluginProps.get("action_template", None)
if not action_template:
self.logger.error(u"{}: actionControlDevice: no action template".format(device.name))
return
topic = pystache.render(action_template, {'uniqueID': device.address})
if action.deviceAction == indigo.kUniversalAction.RequestStatus or action.deviceAction == indigo.kUniversalAction.EnergyUpdate:
self.logger.debug(u"{}: actionControlUniversal: RequestStatus".format(device.name))
if not bool(device.pluginProps.get('SupportsStatusRequest', False)):
self.logger.warning(u"{}: actionControlUniversal: device does not support status requests".format(device.name))
else:
action_template = device.pluginProps.get("status_action_template", None)
if not action_template:
self.logger.error(u"{}: actionControlUniversal: no action template".format(device.name))
return
payload = device.pluginProps.get("status_action_payload", "")
topic = pystache.render(action_template, {'uniqueID': device.address})
self.publish_topic(device, topic, payload)
self.logger.info(u"Sent '{}' Status Request".format(device.name))
# elif action.deviceAction == indigo.kUniversalAction.EnergyReset:
# self.logger.debug(u"{}: actionControlUniversal: EnergyReset".format(device.name))
# dev.updateStateOnServer("accumEnergyTotal", 0.0)
else:
self.logger.error(u"{}: actionControlUniversal: Unsupported action requested: {}".format(device.name, action.deviceAction))
def publish_topic(self, device, topic, payload):
mqttPlugin = indigo.server.getPlugin("com.flyingdiver.indigoplugin.mqtt")
if not mqttPlugin.isEnabled():
self.logger.error(u"MQTT plugin not enabled, publish_topic aborting.")
return
brokerID = int(device.pluginProps['brokerID'])
props = {
'topic': topic,
'payload': payload,
'qos': 0,
'retain': 0,
}
mqttPlugin.executeAction("publish", deviceId=brokerID, props=props, waitUntilDone=False)
self.logger.debug(u"{}: publish_topic: {} -> {}".format(device.name, topic, payload))
########################################
# PluginConfig methods
########################################
def closedPrefsConfigUi(self, valuesDict, userCancelled):
if not userCancelled:
try:
self.logLevel = int(valuesDict[u"logLevel"])
except:
self.logLevel = logging.INFO
self.indigo_log_handler.setLevel(self.logLevel)
| 52.935815 | 198 | 0.597012 |
4ea1c4e429d144861d24aaa285ff6dfe1771af60 | 3,152 | py | Python | src/test/test_sign.py | cvxgrp/qcml | ff5e378cfeeebcf3f85a6e30c3449585f9af869f | [
"BSD-2-Clause-FreeBSD"
] | 26 | 2015-02-06T02:59:17.000Z | 2021-11-15T18:13:27.000Z | src/test/test_sign.py | cvxgrp/qcml | ff5e378cfeeebcf3f85a6e30c3449585f9af869f | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2015-06-14T04:43:43.000Z | 2019-10-27T11:03:30.000Z | src/test/test_sign.py | cvxgrp/qcml | ff5e378cfeeebcf3f85a6e30c3449585f9af869f | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2015-03-14T07:40:56.000Z | 2019-12-30T23:11:36.000Z | from .. properties.sign import Positive, Negative, Neither
#from nose.tools import assert_raises
#import operator
signs = ['positive', 'negative', 'neither']
def create_sign(s):
if s == 'positive':
sign = Positive()
elif s == 'negative':
sign = Negative()
else:
sign = Neither()
return sign
def make_sign(s):
sign = create_sign(s)
assert(str(sign) == s)
def add_sign(s1,s2, exp):
p1 = create_sign(s1)
p2 = create_sign(s2)
result = p1+p2
print result
print exp
assert(str(result) == exp)
def sub_sign(s1,s2, exp):
p1 = create_sign(s1)
p2 = create_sign(s2)
result = p1-p2
assert(str(result) == exp)
def negate_sign(s, exp):
p = create_sign(s)
v = -p
assert(str(v) == exp)
def mul_sign(s1,s2, exp):
p1 = create_sign(s1)
p2 = create_sign(s2)
result = p1*p2
assert(str(result) == exp)
def test_add():
add_list = [
('positive','positive', 'positive'),
('positive','negative', 'neither'),
('positive','neither', 'neither'),
('negative','positive', 'neither'),
('negative','negative', 'negative'),
('negative','neither', 'neither'),
('neither','positive', 'neither'),
('neither','negative', 'neither'),
('neither','neither', 'neither'),
]
for s1,s2,exp in add_list:
yield add_sign, s1,s2,exp
def test_sub():
sub_list = [
('positive','positive', 'neither'),
('positive','negative', 'positive'),
('positive','neither', 'neither'),
('negative','positive', 'negative'),
('negative','negative', 'neither'),
('negative','neither', 'neither'),
('neither','positive', 'neither'),
('neither','negative', 'neither'),
('neither','neither', 'neither'),
]
for s1,s2,exp in sub_list:
yield sub_sign, s1,s2,exp
def test_negate():
add_list = [
('positive','negative'),
('negative','positive'),
('neither','neither')
]
for s,exp in add_list:
yield negate_sign, s, exp
def test_mul():
mul_list = [
('positive','positive', 'positive'),
('positive','negative', 'negative'),
('positive','neither', 'neither'),
('negative','positive', 'negative'),
('negative','negative', 'positive'),
('negative','neither', 'neither'),
('neither','positive', 'neither'),
('neither','negative', 'neither'),
('neither','neither', 'neither'),
]
for s1,s2,exp in mul_list:
yield mul_sign, s1,s2,exp
# def equals(s1,s2):
# p1 = Sign(str.upper(s1))
# p2 = Sign(str.upper(s2))
# result = (p1 == p2)
# exp = (s1 == s2)
# assert(result == exp)
#
# def not_equals(s1,s2):
# p1 = Sign(str.upper(s1))
# p2 = Sign(str.upper(s2))
# result = (p1 != p2)
# exp = (s1 != s2)
# assert(result == exp)
#
# def test_sign_bools():
# for s1 in signs:
# for s2 in signs:
# yield equals, s1, s2
# yield not_equals, s1, s2
def test_sign_creation():
for s in signs:
yield make_sign, s
| 25.015873 | 58 | 0.545368 |
de7dd461de702da0b58750ad0483780583a92915 | 5,116 | py | Python | ppdet/modeling/architectures/ssd.py | wanghaoshuang/PaddleDetection | d2829c5e6da5943e509f454b99407c2a55a28807 | [
"Apache-2.0"
] | null | null | null | ppdet/modeling/architectures/ssd.py | wanghaoshuang/PaddleDetection | d2829c5e6da5943e509f454b99407c2a55a28807 | [
"Apache-2.0"
] | null | null | null | ppdet/modeling/architectures/ssd.py | wanghaoshuang/PaddleDetection | d2829c5e6da5943e509f454b99407c2a55a28807 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import paddle.fluid as fluid
from ppdet.experimental import mixed_precision_global_state
from ppdet.core.workspace import register
from ppdet.modeling.ops import SSDOutputDecoder
__all__ = ['SSD']
@register
class SSD(object):
"""
Single Shot MultiBox Detector, see https://arxiv.org/abs/1512.02325
Args:
backbone (object): backbone instance
multi_box_head (object): `MultiBoxHead` instance
output_decoder (object): `SSDOutputDecoder` instance
num_classes (int): number of output classes
"""
__category__ = 'architecture'
__inject__ = ['backbone', 'multi_box_head', 'output_decoder']
__shared__ = ['num_classes']
def __init__(self,
backbone,
multi_box_head='MultiBoxHead',
output_decoder=SSDOutputDecoder().__dict__,
num_classes=21):
super(SSD, self).__init__()
self.backbone = backbone
self.multi_box_head = multi_box_head
self.num_classes = num_classes
self.output_decoder = output_decoder
if isinstance(output_decoder, dict):
self.output_decoder = SSDOutputDecoder(**output_decoder)
def build(self, feed_vars, mode='train'):
im = feed_vars['image']
if mode == 'train' or mode == 'eval':
gt_bbox = feed_vars['gt_bbox']
gt_class = feed_vars['gt_class']
mixed_precision_enabled = mixed_precision_global_state() is not None
# cast inputs to FP16
if mixed_precision_enabled:
im = fluid.layers.cast(im, 'float16')
# backbone
body_feats = self.backbone(im)
if isinstance(body_feats, OrderedDict):
body_feat_names = list(body_feats.keys())
body_feats = [body_feats[name] for name in body_feat_names]
# cast features back to FP32
if mixed_precision_enabled:
body_feats = [fluid.layers.cast(v, 'float32') for v in body_feats]
locs, confs, box, box_var = self.multi_box_head(
inputs=body_feats, image=im, num_classes=self.num_classes)
if mode == 'train':
loss = fluid.layers.ssd_loss(locs, confs, gt_bbox, gt_class, box,
box_var)
loss = fluid.layers.reduce_sum(loss)
return {'loss': loss}
else:
pred = self.output_decoder(locs, confs, box, box_var)
return {'bbox': pred}
def _inputs_def(self, image_shape):
im_shape = [None] + image_shape
# yapf: disable
inputs_def = {
'image': {'shape': im_shape, 'dtype': 'float32', 'lod_level': 0},
'im_id': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 0},
'gt_bbox': {'shape': [None, 4], 'dtype': 'float32', 'lod_level': 1},
'gt_class': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1},
'im_shape': {'shape': [None, 3], 'dtype': 'int32', 'lod_level': 0},
'is_difficult': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1},
}
# yapf: enable
return inputs_def
def build_inputs(
self,
image_shape=[3, None, None],
fields=['image', 'im_id', 'gt_bbox', 'gt_class'], # for train
use_dataloader=True,
iterable=False):
inputs_def = self._inputs_def(image_shape)
feed_vars = OrderedDict([(key, fluid.data(
name=key,
shape=inputs_def[key]['shape'],
dtype=inputs_def[key]['dtype'],
lod_level=inputs_def[key]['lod_level'])) for key in fields])
loader = fluid.io.DataLoader.from_generator(
feed_list=list(feed_vars.values()),
capacity=64,
use_double_buffer=True,
iterable=iterable) if use_dataloader else None
return feed_vars, loader
def train(self, feed_vars):
return self.build(feed_vars, 'train')
def eval(self, feed_vars):
return self.build(feed_vars, 'eval')
def test(self, feed_vars):
return self.build(feed_vars, 'test')
def is_bbox_normalized(self):
# SSD use output_decoder in output layers, bbox is normalized
# to range [0, 1], is_bbox_normalized is used in eval.py and infer.py
return True
| 36.805755 | 85 | 0.620602 |
5a8c596b694f9bb7e3f5dca41399df39f7b24c4f | 2,830 | py | Python | endpoint/admin/taskMerge.py | fi-ksi/web-backend | 57c4e52d4722bd083a74bf4cf742a7222a532fac | [
"MIT"
] | 4 | 2017-12-11T00:14:22.000Z | 2022-02-07T15:08:13.000Z | endpoint/admin/taskMerge.py | fi-ksi/web-backend | 57c4e52d4722bd083a74bf4cf742a7222a532fac | [
"MIT"
] | 94 | 2016-04-29T10:38:37.000Z | 2022-02-10T13:41:29.000Z | endpoint/admin/taskMerge.py | fi-ksi/web-backend | 57c4e52d4722bd083a74bf4cf742a7222a532fac | [
"MIT"
] | null | null | null | import falcon
import json
import git
from lockfile import LockFile
from sqlalchemy.exc import SQLAlchemyError
from db import session
import model
import util
class TaskMerge(object):
def on_post(self, req, resp, id):
"""
Vraci JSON:
{
"result": "ok" | "error",
"error": String
}
"""
try:
user = req.context['user']
# Kontrola existence ulohy
task = session.query(model.Task).get(id)
if task is None:
req.context['result'] = 'Neexistujici uloha'
resp.status = falcon.HTTP_404
return
# Kontrola existence git_branch a git_path
if (task.git_path is None) or (task.git_branch is None):
req.context['result'] = ('Uloha nema zadanou gitovskou vetev '
'nebo adresar')
resp.status = falcon.HTTP_400
return
if task.git_branch == "master":
req.context['result'] = 'Uloha je jiz ve vetvi master'
resp.status = falcon.HTTP_400
return
wave = session.query(model.Wave).get(task.wave)
# Merge mohou provadet pouze administratori a garant vlny
if (not user.is_logged_in()) or (not user.is_admin() and
user.id != wave.garant):
req.context['result'] = 'Nedostatecna opravneni'
resp.status = falcon.HTTP_400
return
# Kontrola zamku
lock = util.lock.git_locked()
if lock:
req.context['result'] = ('GIT uzamcen zámkem '+lock +
'\nNekdo momentalne provadi akci s '
'gitem, opakujte prosim akci za 20 '
'sekund.')
resp.status = falcon.HTTP_409
return
try:
mergeLock = LockFile(util.admin.taskMerge.LOCKFILE)
mergeLock.acquire(60) # Timeout zamku je 1 minuta
# Fetch repozitare
repo = git.Repo(util.git.GIT_SEMINAR_PATH)
if task.git_branch in repo.heads:
# Cannot delete branch we are on
repo.git.checkout("master")
repo.git.branch('-D', task.git_branch)
task.git_branch = 'master'
session.commit()
resp.status = falcon.HTTP_200
req.context['result'] = {}
finally:
mergeLock.release()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
| 31.797753 | 78 | 0.488339 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.