hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b0150ee290b442cd65c440717a7b6965f923dc2 | 9,823 | py | Python | gen_config.py | dfresh613/couchbase-python-client | c77af56490ed4c6d364fcf8fc1a374570de0239b | [
"Apache-2.0"
] | null | null | null | gen_config.py | dfresh613/couchbase-python-client | c77af56490ed4c6d364fcf8fc1a374570de0239b | [
"Apache-2.0"
] | null | null | null | gen_config.py | dfresh613/couchbase-python-client | c77af56490ed4c6d364fcf8fc1a374570de0239b | [
"Apache-2.0"
] | null | null | null | import abc
import logging
import warnings
import pathlib
import os
from abc import abstractmethod
from enum import IntEnum
import json
import sys
import ssl
import platform
import posixpath
from enum import Enum
import argparse
import urllib.request
import datetime
import time
import traceback
curdir = pathlib.Path(__file__).parent
lcb_min_version_baseline = (3, 0, 1)
def win_cmake_path(orig_path):
import posixpath
return posixpath.normpath(orig_path).replace('\\','/')
def get_lcb_min_version():
result = lcb_min_version_baseline
try:
# check the version listed in README.rst isn't greater than lcb_min_version
# bump it up to the specified version if it is
import docutils.parsers.rst
import docutils.utils
import docutils.frontend
parser = docutils.parsers.rst.Parser()
with open(str(curdir.joinpath("README.rst"))) as README:
settings = docutils.frontend.OptionParser().get_default_values()
settings.update(
dict(tab_width=4, report_level=1, pep_references=False, rfc_references=False, syntax_highlight=False),
docutils.frontend.OptionParser())
document = docutils.utils.new_document(README.name, settings=settings)
parser.parse(README.read(), document)
readme_min_version = tuple(
map(int, document.substitution_defs.get("libcouchbase_version").astext().split('.')))
result = max(result, readme_min_version)
logging.info("min version is {}".format(result))
except Exception as e:
warnings.warn("problem: {}".format(traceback.format_exc()))
return result
lcb_min_version = get_lcb_min_version()
class SSL_MinVer(IntEnum):
dev = 0
beta_1 = 0x1
beta_2 = 0x2
beta_3 = 0x3
beta_4 = 0x4
beta_5 = 0x5
beta_6 = 0x6
beta_7 = 0x7
beta_8 = 0x8
beta_9 = 0x9
beta_10 = 0xa
beta_11 = 0xb
beta_12 = 0xc
beta_13 = 0xd
beta_14 = 0xe
release = 0xf
ssl_letter = bytes.decode(bytes((str.encode('a', 'utf-8')[0] + ssl.OPENSSL_VERSION_INFO[-2] - 1,)), 'utf-8')
ssl_major = "{}{}".format(".".join(map(str, ssl.OPENSSL_VERSION_INFO[:-2])), ssl_letter)
class DownloadableRepo(object):
def __init__(self,
repository_name, # type: str
gh_client=None, # type: github.Github
timeout=None # type: datetime.timedelta
):
import github
self._deadline = datetime.datetime.now().__add__(timeout or datetime.timedelta(minutes=1))
self._last_op = None # type: datetime.datetime
self.__rate_limit = None
self._gh_client = gh_client or github.Github(login_or_token=os.getenv("PYCBC_GH_TOKEN_ENCRYPTED"))
self._ghrepo = self.throttle_command(self._gh_client.get_repo, repository_name)
@property
def _rate_limit(self):
if not self.__rate_limit or self.__rate_limit.core.reset>datetime.datetime.now():
self._last_op = None
self.__rate_limit=self._gh_client.get_rate_limit()
return self.__rate_limit
@property
def min_wait(self):
return datetime.timedelta(seconds=60*60/self._rate_limit.core.limit)
@property
def op_wait_time(self):
if not self._last_op or self._last_op+self.min_wait>datetime.datetime.now():
return datetime.timedelta(seconds=0)
return self._last_op+self.min_wait-datetime.datetime.now()
def throttle_command(self, cmd, *args, **kwargs):
from github.GithubException import RateLimitExceededException
while True:
if not self._rate_limit.core.remaining:
remainder=self._rate_limit.core.reset-datetime.datetime.now()
if self._rate_limit.core.reset>self._deadline:
raise TimeoutError("Can't download all files in time, reset is {} away, but deadline is {} away".format(remainder,self._deadline-datetime.datetime.now()))
else:
remainder = self.op_wait_time
logging.info("remainder = {}".format(remainder))
if remainder:
logging.warning("Rate limit exceeded, waiting {}".format(remainder))
time.sleep(remainder.seconds)
self._last_op = datetime.datetime.now()
assert(self._last_op)
try:
return cmd(*args, **kwargs)
except RateLimitExceededException as e:
logging.warning(traceback.format_exc())
def get_sha_for_tag(self, # type: github.Repository
tag # type: str
):
"""
Returns a commit PyGithub object for the specified repository and tag.
"""
branches = self._ghrepo.get_branches()
matched_branches = [match for match in branches if match.name == tag]
if matched_branches:
return matched_branches[0].commit.sha
y = next(iter({x for x in self._ghrepo.get_tags() if x.name == tag}), None)
return y.commit.sha if y else None
def download_directory(self, sha, server_path, dest):
"""
Download all contents at server_path with commit tag sha in
the repository.
"""
contents = self.throttle_command(self._ghrepo.get_dir_contents, server_path, ref=sha)
if os.path.exists(dest):
return
os.makedirs(dest,exist_ok=True)
for content in contents:
print("Processing %s" % content.path)
if content.type == 'dir':
self.download_directory(sha, content.path, os.path.join(dest, content.path))
else:
dl_url=content.download_url
dest_path=os.path.join(dest, content.name)
print("Donwloading {} to {} from {}".format(content.path, dest_path, dl_url))
urllib.request.urlretrieve(dl_url, dest_path)
class AbstractOpenSSL(abc.ABC):
def get_headers(self, dest=os.path.abspath(os.path.curdir)):
self.get_arch_content(dest, ('include',))
def get_all(self, dest=os.path.abspath(os.path.curdir)):
self.get_arch_content(dest, tuple())
@abstractmethod
def get_arch_content(self, dest, rel_path):
pass
class Windows(object):
class Machine(Enum):
x86_64 = 'amd64'
x86_32 = 'win32'
aarch_be = 'arm64'
aarch = 'arm64'
armv8b = 'arm64'
armv8l = 'arm64'
AMD64 = 'amd64'
WIN32 = 'win32'
class OpenSSL(AbstractOpenSSL):
def __init__(self,
arch # type: Windows.Machine
):
self.arch = arch
self.repo = DownloadableRepo('python/cpython-bin-deps')
self.sha = self.repo.get_sha_for_tag("openssl-bin-{}".format(ssl_major))
def get_arch_content(self, dest, rel_path):
if self.sha:
self.repo.download_directory(self.sha, posixpath.join(self.arch.value, *rel_path), dest)
@classmethod
def get_arch(cls):
return cls.Machine[platform.machine()]
@classmethod
def get_openssl(cls):
return cls.OpenSSL(cls.get_arch())
def get_system():
if platform.system().lower().startswith('win'):
return Windows
return None
def get_openssl():
system = get_system()
try:
return system().get_openssl() if system else None
except Exception as e:
logging.warning("Couldn't initialise OpenSSL repository {}".format(traceback.format_exc()))
return None
def gen_config(temp_build_dir=None, ssl_relative_path=None, couchbase_core='couchbase_core'):
build_dir = curdir.joinpath('build')
if not os.path.exists(str(build_dir)):
os.mkdir(str(build_dir))
with open(str(build_dir.joinpath("lcb_min_version.h")), "w+") as LCB_MIN_VERSION:
LCB_MIN_VERSION.write('\n'.join(
["#define LCB_MIN_VERSION 0x{}".format(''.join(map(lambda x: "{0:02d}".format(x), lcb_min_version))),
'#define LCB_MIN_VERSION_TEXT "{}"'.format('.'.join(map(str, lcb_min_version))),
'#define PYCBC_PACKAGE_NAME "{}"'.format(couchbase_core)]))
if temp_build_dir:
posix_temp_build_dir=os.path.normpath(temp_build_dir)
ssl_abs_path=os.path.join(os.path.abspath(posix_temp_build_dir), ssl_relative_path or 'openssl')
print("From: temp_build_dir {} and ssl_relative_path {} Got ssl_abs_path {}".format(temp_build_dir, ssl_relative_path, ssl_abs_path))
#ssl_root_dir_pattern = os.getenv("OPENSSL_ROOT_DIR", ssl_abs_path)
ssl_root_dir = win_cmake_path(ssl_abs_path.format(ssl_major))
ssl_info = dict(major=ssl_major,
minor=SSL_MinVer(ssl.OPENSSL_VERSION_INFO[-1]).name.replace('_', ' '),
original=ssl.OPENSSL_VERSION,
ssl_root_dir=ssl_root_dir,
python_version=sys.version_info,
raw_version_info=".".join(map(str,ssl.OPENSSL_VERSION_INFO[:-2])))
with open("openssl_version.json", "w+") as OUTPUT:
json.dump(ssl_info, OUTPUT)
if ssl_relative_path is not None:
openssl = get_openssl()
if openssl:
try:
openssl.get_all(ssl_abs_path)
except Exception as e:
logging.warning("Couldn't get OpenSSL headers: {}".format(traceback.format_exc()))
return ssl_info
return None
if __name__ == "__main__":
parser=argparse.ArgumentParser()
parser.add_argument('--temp_build_dir', type=str,default=None)
parser.add_argument('--ssl_relative_path', type=str,default=None)
parser.parse_args()
gen_config(**(parser.parse_args().__dict__))
| 35.72 | 174 | 0.634837 |
aa123ec8701bca8093bfe1a533ac7750eb85714b | 8,986 | py | Python | test/offline/test_gail.py | BFAnas/tianshou | 6e86a0bed7d1117c5ad6a421b483b45a6adfe336 | [
"MIT"
] | null | null | null | test/offline/test_gail.py | BFAnas/tianshou | 6e86a0bed7d1117c5ad6a421b483b45a6adfe336 | [
"MIT"
] | null | null | null | test/offline/test_gail.py | BFAnas/tianshou | 6e86a0bed7d1117c5ad6a421b483b45a6adfe336 | [
"MIT"
] | null | null | null | import argparse
import os
import pickle
import pprint
import gym
import numpy as np
import torch
from torch.distributions import Independent, Normal
from torch.utils.tensorboard import SummaryWriter
from tianshou.data import Collector, VectorReplayBuffer
from tianshou.env import DummyVectorEnv
from tianshou.policy import GAILPolicy
from tianshou.trainer import onpolicy_trainer
from tianshou.utils import TensorboardLogger
from tianshou.utils.net.common import ActorCritic, Net
from tianshou.utils.net.continuous import ActorProb, Critic
if __name__ == "__main__":
from gather_pendulum_data import expert_file_name, gather_data
else: # pytest
from test.offline.gather_pendulum_data import expert_file_name, gather_data
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='Pendulum-v1')
parser.add_argument("--reward-threshold", type=float, default=None)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--buffer-size', type=int, default=20000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--disc-lr', type=float, default=5e-4)
parser.add_argument('--gamma', type=float, default=0.95)
parser.add_argument('--epoch', type=int, default=5)
parser.add_argument('--step-per-epoch', type=int, default=150000)
parser.add_argument('--episode-per-collect', type=int, default=16)
parser.add_argument('--repeat-per-collect', type=int, default=2)
parser.add_argument('--disc-update-num', type=int, default=2)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--hidden-sizes', type=int, nargs='*', default=[64, 64])
parser.add_argument('--training-num', type=int, default=16)
parser.add_argument('--test-num', type=int, default=100)
parser.add_argument('--logdir', type=str, default='log')
parser.add_argument('--render', type=float, default=0.)
parser.add_argument(
'--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu'
)
# ppo special
parser.add_argument('--vf-coef', type=float, default=0.25)
parser.add_argument('--ent-coef', type=float, default=0.0)
parser.add_argument('--eps-clip', type=float, default=0.2)
parser.add_argument('--max-grad-norm', type=float, default=0.5)
parser.add_argument('--gae-lambda', type=float, default=0.95)
parser.add_argument('--rew-norm', type=int, default=1)
parser.add_argument('--dual-clip', type=float, default=None)
parser.add_argument('--value-clip', type=int, default=1)
parser.add_argument('--norm-adv', type=int, default=1)
parser.add_argument('--recompute-adv', type=int, default=0)
parser.add_argument('--resume', action="store_true")
parser.add_argument("--save-interval", type=int, default=4)
parser.add_argument("--load-buffer-name", type=str, default=expert_file_name())
args = parser.parse_known_args()[0]
return args
def test_gail(args=get_args()):
if os.path.exists(args.load_buffer_name) and os.path.isfile(args.load_buffer_name):
if args.load_buffer_name.endswith(".hdf5"):
buffer = VectorReplayBuffer.load_hdf5(args.load_buffer_name)
else:
buffer = pickle.load(open(args.load_buffer_name, "rb"))
else:
buffer = gather_data()
env = gym.make(args.task)
if args.reward_threshold is None:
default_reward_threshold = {"Pendulum-v0": -1100, "Pendulum-v1": -1100}
args.reward_threshold = default_reward_threshold.get(
args.task, env.spec.reward_threshold
)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
args.max_action = env.action_space.high[0]
# you can also use tianshou.env.SubprocVectorEnv
# train_envs = gym.make(args.task)
train_envs = DummyVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.training_num)]
)
# test_envs = gym.make(args.task)
test_envs = DummyVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.test_num)]
)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_envs.seed(args.seed)
test_envs.seed(args.seed)
# model
net = Net(args.state_shape, hidden_sizes=args.hidden_sizes, device=args.device)
actor = ActorProb(
net, args.action_shape, max_action=args.max_action, device=args.device
).to(args.device)
critic = Critic(
Net(args.state_shape, hidden_sizes=args.hidden_sizes, device=args.device),
device=args.device
).to(args.device)
actor_critic = ActorCritic(actor, critic)
# orthogonal initialization
for m in actor_critic.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.orthogonal_(m.weight)
torch.nn.init.zeros_(m.bias)
optim = torch.optim.Adam(actor_critic.parameters(), lr=args.lr)
# discriminator
disc_net = Critic(
Net(
args.state_shape,
action_shape=args.action_shape,
hidden_sizes=args.hidden_sizes,
activation=torch.nn.Tanh,
device=args.device,
concat=True,
),
device=args.device
).to(args.device)
for m in disc_net.modules():
if isinstance(m, torch.nn.Linear):
# orthogonal initialization
torch.nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
torch.nn.init.zeros_(m.bias)
disc_optim = torch.optim.Adam(disc_net.parameters(), lr=args.disc_lr)
# replace DiagGuassian with Independent(Normal) which is equivalent
# pass *logits to be consistent with policy.forward
def dist(*logits):
return Independent(Normal(*logits), 1)
policy = GAILPolicy(
actor,
critic,
optim,
dist,
buffer,
disc_net,
disc_optim,
disc_update_num=args.disc_update_num,
discount_factor=args.gamma,
max_grad_norm=args.max_grad_norm,
eps_clip=args.eps_clip,
vf_coef=args.vf_coef,
ent_coef=args.ent_coef,
reward_normalization=args.rew_norm,
advantage_normalization=args.norm_adv,
recompute_advantage=args.recompute_adv,
dual_clip=args.dual_clip,
value_clip=args.value_clip,
gae_lambda=args.gae_lambda,
action_space=env.action_space,
)
# collector
train_collector = Collector(
policy, train_envs, VectorReplayBuffer(args.buffer_size, len(train_envs))
)
test_collector = Collector(policy, test_envs)
# log
log_path = os.path.join(args.logdir, args.task, "gail")
writer = SummaryWriter(log_path)
logger = TensorboardLogger(writer, save_interval=args.save_interval)
def save_best_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, "policy.pth"))
def stop_fn(mean_rewards):
return mean_rewards >= args.reward_threshold
def save_checkpoint_fn(epoch, env_step, gradient_step):
# see also: https://pytorch.org/tutorials/beginner/saving_loading_models.html
ckpt_path = os.path.join(log_path, "checkpoint.pth")
# Example: saving by epoch num
# ckpt_path = os.path.join(log_path, f"checkpoint_{epoch}.pth")
torch.save(
{
"model": policy.state_dict(),
"optim": optim.state_dict(),
}, ckpt_path
)
return ckpt_path
if args.resume:
# load from existing checkpoint
print(f"Loading agent under {log_path}")
ckpt_path = os.path.join(log_path, "checkpoint.pth")
if os.path.exists(ckpt_path):
checkpoint = torch.load(ckpt_path, map_location=args.device)
policy.load_state_dict(checkpoint["model"])
optim.load_state_dict(checkpoint["optim"])
print("Successfully restore policy and optim.")
else:
print("Fail to restore policy and optim.")
# trainer
result = onpolicy_trainer(
policy,
train_collector,
test_collector,
args.epoch,
args.step_per_epoch,
args.repeat_per_collect,
args.test_num,
args.batch_size,
episode_per_collect=args.episode_per_collect,
stop_fn=stop_fn,
save_best_fn=save_best_fn,
logger=logger,
resume_from_log=args.resume,
save_checkpoint_fn=save_checkpoint_fn,
)
assert stop_fn(result["best_reward"])
if __name__ == "__main__":
pprint.pprint(result)
# Let's watch its performance!
env = gym.make(args.task)
policy.eval()
collector = Collector(policy, env)
result = collector.collect(n_episode=1, render=args.render)
rews, lens = result["rews"], result["lens"]
print(f"Final reward: {rews.mean()}, length: {lens.mean()}")
if __name__ == "__main__":
test_gail()
| 38.566524 | 87 | 0.671044 |
f68929ad3b36d5a0bf145a93b30172f0422dc9f9 | 1,996 | py | Python | python/paddle/fluid/tests/unittests/collective_scatter_api.py | Huangheyl/Paddle | a1b640bc66a5cc9583de503e7406aeba67565e8d | [
"Apache-2.0"
] | 8 | 2019-06-16T12:36:11.000Z | 2021-03-05T05:33:21.000Z | python/paddle/fluid/tests/unittests/collective_scatter_api.py | zlsh80826/Paddle | c560a7d57aad990f374ebadd330351f18e2ca65f | [
"Apache-2.0"
] | 1 | 2020-09-10T09:05:52.000Z | 2020-09-10T09:06:22.000Z | python/paddle/fluid/tests/unittests/collective_scatter_api.py | zlsh80826/Paddle | c560a7d57aad990f374ebadd330351f18e2ca65f | [
"Apache-2.0"
] | 25 | 2019-12-07T02:14:14.000Z | 2021-12-30T06:16:30.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import argparse
import os
import sys
import signal
import time
import socket
from contextlib import closing
from six import string_types
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
class TestCollectiveScatterAPI(TestCollectiveAPIRunnerBase):
def __init__(self):
self.global_ring_id = 0
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata",
shape=[10, 1000],
dtype='float64',
append_batch_size=False)
toutdata = layers.fill_constant(
shape=[5, 1000], dtype='float64', value=1.0)
tensor_list = None
if rank == 1:
tensor_list = paddle.split(tindata, 2, axis=0)
paddle.distributed.scatter(toutdata, tensor_list, src=1)
return [toutdata]
if __name__ == "__main__":
runtime_main(TestCollectiveScatterAPI, "scatter")
| 32.721311 | 78 | 0.720441 |
34448367c8aff0711f459de7cc6564e7ce8a3d16 | 1,219 | py | Python | nycrecords_flask/public/forms.py | joelbcastillo/nycrecords_flask | 0ee139a632db9a7e1b35875d0dbd05a7dd11f4fa | [
"MIT"
] | null | null | null | nycrecords_flask/public/forms.py | joelbcastillo/nycrecords_flask | 0ee139a632db9a7e1b35875d0dbd05a7dd11f4fa | [
"MIT"
] | 2 | 2019-04-13T00:09:11.000Z | 2019-04-13T00:09:13.000Z | nycrecords_flask/public/forms.py | joelbcastillo/nycrecords_flask | 0ee139a632db9a7e1b35875d0dbd05a7dd11f4fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Public forms."""
from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired
from nycrecords_flask.user.models import User
class LoginForm(FlaskForm):
"""Login form."""
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append("Unknown username")
return False
if not self.user.check_password(self.password.data):
self.password.errors.append("Invalid password")
return False
if not self.user.active:
self.username.errors.append("User not activated")
return False
return True
| 30.475 | 77 | 0.645611 |
23d7c308453406890a1db74979d4ac568352f093 | 7,556 | py | Python | tensorflow/compiler/plugin/poplar/tests/casts_elimination_test.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/compiler/plugin/poplar/tests/casts_elimination_test.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 9 | 2020-10-13T23:25:29.000Z | 2022-02-10T06:54:48.000Z | tensorflow/compiler/plugin/poplar/tests/casts_elimination_test.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pva
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.compiler.tests import xla_test
from tensorflow.python.platform import googletest
from tensorflow.python.framework import ops
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
class IpuFuseOpsTest(xla_test.XLATestCase):
def testReductionSumVectorF16NoConverts(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float16, [4096], name="a")
output = math_ops.reduce_sum(pa, axis=[0])
fd = {pa: np.ones([4096])}
result = sess.run(output, fd)
self.assertAllClose(result, 4096)
report = pva.openReport(report_helper.find_report())
# Check that there are no casts to float at the beginning.
ok = [
'Sum/reduce*/ReduceOnTile/InToIntermediateNoExchange/Reduce',
'Sum/reduce*/ReduceFinalStage/IntermediateToOutput/Reduce'
]
self.assert_all_compute_sets_and_list(report, ok)
def testNoCastsF32ToF16ToF32(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [3])
b = math_ops.cast(pa, np.float16)
c = math_ops.cast(b, np.float32)
fd = {pa: [2.0, 0.5, 1.0]}
result = sess.run(c, fd)
self.assertAllClose(result, [2.0, 0.5, 1.0])
self.assert_num_reports(report_helper, 0)
def testNoCastsF16ReduceWithReshape(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float16, [3, 4])
a = gen_array_ops.reshape(pa, [4, 3])
a = math_ops.reduce_sum(a, axis=(1))
fd = {pa: np.ones([3, 4])}
result = sess.run(a, fd)
self.assertAllClose(result, [3.0, 3.0, 3.0, 3.0])
report = pva.openReport(report_helper.find_report())
ok = [
'Sum/reduce*/Reduce',
]
self.assert_all_compute_sets_and_list(report, ok)
def testMultipleReduces(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float16, [3])
pb = array_ops.placeholder(np.float16, [3])
a = math_ops.cast(pa, np.float32)
a = math_ops.reduce_sum(a)
a = math_ops.cast(a, np.float16)
b = math_ops.cast(pb, np.float32)
b = math_ops.reduce_sum(b)
b = math_ops.cast(b, np.float16)
c = a + b
fd = {pa: [2.0, 0.5, 1.0], pb: [1.0, 1.0, 2.0]}
result = sess.run(c, fd)
self.assertAllClose(result, 7.5)
report = pva.openReport(report_helper.find_report())
ok = ['Sum/reduce*/Reduce', 'Sum_1/reduce*/Reduce', 'add/add']
self.assert_all_compute_sets_and_list(report, ok)
def testNoCastsF16ToF32ToF16(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float16, [3])
b = math_ops.cast(pa, np.float32)
c = math_ops.cast(b, np.float16)
fd = {pa: [2.0, 0.5, 1.0]}
result = sess.run(c, fd)
self.assertAllClose(result, [2.0, 0.5, 1.0])
self.assert_num_reports(report_helper, 0)
def testDontRemoveCastsIfUsed(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float16, [3])
b = math_ops.cast(pa, np.float32)
const = array_ops.constant(1.0, np.float32)
b = b + const
c = math_ops.cast(b, np.float16)
fd = {pa: [2.0, 0.5, 1.0]}
result = sess.run(c, fd)
self.assertAllClose(result, [3.0, 1.5, 2.0])
report = pva.openReport(report_helper.find_report())
ok = [
'add/*/expression/Cast', 'add/*/expression/Op/Add',
'Cast_1/convert.*/Cast'
]
self.assert_all_compute_sets_and_list(report, ok)
def testReduceMean(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
shape = [2, 10000]
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float16, shape)
output = math_ops.reduce_mean(pa, axis=[1])
val = np.finfo(np.float16).max / 2
result = sess.run(output, {pa: np.full(shape, val)})
self.assertAllClose(result, [val, val])
report = pva.openReport(report_helper.find_report())
ok = [
'Mean/fusion/Reduce', 'Mean/fusion*/Op/Multiply', 'Mean/convert*/Cast'
]
self.assert_all_compute_sets_and_list(report, ok)
def testReduceMax(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
shape = [2, 10000]
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float16, shape)
a = math_ops.cast(pa, np.float32)
output = math_ops.reduce_max(a, axis=[1])
val = np.finfo(np.float16).max / 2
result = sess.run(output, {pa: np.full(shape, val)})
self.assertAllClose(result, [val, val])
report = pva.openReport(report_helper.find_report())
ok = ['Max/reduce*/Reduce', 'Cast/convert*/Cast']
self.assert_all_compute_sets_and_list(report, ok)
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=2 ' +
os.environ.get('TF_XLA_FLAGS', ''))
googletest.main()
| 33.883408 | 80 | 0.663579 |
a798e3c3b06f1c0042644c481274a0c2777ebe13 | 20,612 | py | Python | omni_diff_rl/scripts/TD3-master/real_env.py | CHH3213/two_loggers | 11b2016e424753aac85404e6a48f3b15b2448734 | [
"MIT"
] | null | null | null | omni_diff_rl/scripts/TD3-master/real_env.py | CHH3213/two_loggers | 11b2016e424753aac85404e6a48f3b15b2448734 | [
"MIT"
] | null | null | null | omni_diff_rl/scripts/TD3-master/real_env.py | CHH3213/two_loggers | 11b2016e424753aac85404e6a48f3b15b2448734 | [
"MIT"
] | null | null | null | # encoding: utf-8
#!/usr/bin/env python
"""
Doubld escape environment with discrete action space
"""
from __future__ import absolute_import, division, print_function
from gym import spaces, core
from gym import spaces
from gym.envs.registration import EnvSpec
import sys
import os
import math
import numpy as np
from numpy import pi
from numpy import random
import time
from math import *
import rospy
# import tf
from std_srvs.srv import Empty
from geometry_msgs.msg import Pose, Twist
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
# from gazebo_msgs.srv import SetModelState, GetModelState # 设置模型状态、得到模型状态
# from gazebo_msgs.msg import ModelState, ModelStates
import copy
def quart_to_rpy(x,y,z,w):
r = math.atan2(2*(w*x+y*z),1-2*(x*x+y*y))
p = math.asin(2*(w*y-z*x))
y = math.atan2(2*(w*z+x*y),1-2*(z*z+y*y))
return r,p,y
def euler_to_quaternion(roll, pitch, yaw):
x=sin(pitch/2)*sin(yaw/2)*cos(roll/2)+cos(pitch/2)*cos(yaw/2)*sin(roll/2)
y=sin(pitch/2)*cos(yaw/2)*cos(roll/2)+cos(pitch/2)*sin(yaw/2)*sin(roll/2)
z=cos(pitch/2)*sin(yaw/2)*cos(roll/2)-sin(pitch/2)*cos(yaw/2)*sin(roll/2)
w=cos(pitch/2)*cos(yaw/2)*cos(roll/2)-sin(pitch/2)*sin(yaw/2)*sin(roll/2)
# import tf
# (x, y, z, w) = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
return x, y, z, w
class DoubleEscape(core.Env):
def __init__(self):
self.env_type = 'continuous'
self.name = 'omni_diff_environment'
rospy.init_node(self.name, anonymous=True, log_level=rospy.DEBUG)
# env properties
self.rate = rospy.Rate(1000)
self.flag = True
# map parameter
self.ve = 0.787 # 0.787/4
self.vp = 1.0 # 1/4
self.v_barrier = 0.7
self.p_l = 1
self.p_b = 1
self.theta_bs = 0
self.init_angle = 0
self.old_state = ["straight", "cb_T1"]
self.number = 1
self.angular_p = 0
# state parameter
self.fuhao = 1
self.close_barrier = 0
self.angle_amount = 0
self.model_states = ModelStates()
# position quaternion euler
self.logger0_pos_x = 0.0
self.logger0_pos_y = 0.0
self.logger0_x = 0.0
self.logger0_y = 0.0
self.logger0_z = 0.0
self.logger1_y = 0.0
self.logger1_z = 0.0
self.logger1_w = 1.0
self.logger1_euler = [0, 0, 0]
# relative position
self.distance = 0.0
self.theta = 0
self.re_x = 0
self.re_y = 0
self.u_range = 2*np.pi
self.obs_dim = 2 # 每个agent的observation的维度,全乡+差分+移动障碍:3+3+3,障碍物的为:位置+速度
self.act_dim = 1 # action的维度(个数)
self.obs = np.zeros([3,3])
self.action_space=spaces.Box(low=-self.u_range, high=+self.u_range, shape=(self.act_dim,), dtype=np.float32)
self.observation_space=spaces.Box(low=-np.inf, high=+np.inf, shape=(self.obs_dim,), dtype=np.float32)
# self.set_model_state_proxy = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
# robot properties
self.status = ['deactivated']*2
# 初始距离
self.target = np.array([3,2])
self.distance_pv =3
self.distance_barrier = 3
self.distance_goal = np.sqrt(np.sum(np.square(np.array(self.obs[1][:2]) - self.target )))
self.cmd_vel0_pub = rospy.Publisher("/logger0/cmd_vel", Twist, queue_size=1)
self.cmd_vel1_pub = rospy.Publisher("/logger1/cmd_vel", Twist, queue_size=1)
self.cmd_vel2_pub = rospy.Publisher("/barrier/cmd_vel", Twist, queue_size=1)
# topic subscriber
self.logger_0_pose = rospy.Subscriber('/logger0/odom', Odometry, self.logger_0_pose_callback)
self.logger_1_pose = rospy.Subscriber('/logger1/odom', Odometry, self.logger_1_pose_callback)
self.barrier_pose = rospy.Subscriber('/barrier/odom', Odometry, self.barrier_pose_callback)
# multisenser subscriber callback
def logger_0_pose_callback(self, pos):
'''logger0 position--pursuer'''
self.logger0_x = pos.pose.pose.orientation.x
self.logger0_y = pos.pose.pose.orientation.y
self.logger0_z = pos.pose.pose.orientation.z
self.logger0_w = pos.pose.pose.orientation.w
# initial position(0,0)
self.logger0_pos_x = pos.pose.pose.position.x
self.logger0_pos_y = pos.pose.pose.position.y
self.logger0_linear_x = pos.twist.twist.linear.x
self.logger0_linear_y = pos.twist.twist.linear.y
self.logger0_angular_z = pos.twist.twist.angular.z
#rospy.loginfo("logger0_w\n %f", pos.pose.pose.orientation.w)
rospy.loginfo("logger0_x\n %f", pos.pose.pose.position.x)
rospy.loginfo("logger0_y\n %f", pos.pose.pose.position.y)
def logger_1_pose_callback(self, pos):
'''
logger1 position--evader'''
self.logger1_x = pos.pose.pose.orientation.x
self.logger1_y = pos.pose.pose.orientation.y
self.logger1_z = pos.pose.pose.orientation.z
self.logger1_w = pos.pose.pose.orientation.w
# initial position(1,0)
self.logger1_pos_x = pos.pose.pose.position.x + 1.0 # initial distance between two loggers
self.logger1_pos_y = pos.pose.pose.position.y
self.logger1_linear_x = pos.twist.twist.linear.x
self.logger1_linear_y = pos.twist.twist.linear.y
self.logger1_angular_z = pos.twist.twist.angular.z
#rospy.loginfo("logger1_w\n %f", pos.pose.pose.orientation.w)
rospy.loginfo("logger1_x\n %f", pos.pose.pose.position.x)
rospy.loginfo("logger1_y\n %f", pos.pose.pose.position.y)
def barrier_pose_callback(self, pos):
'''
barrier position--moving obstacle'''
self.barrier_x = pos.pose.pose.orientation.x
self.barrier_y = pos.pose.pose.orientation.y
self.barrier_z = pos.pose.pose.orientation.z
self.barrier_w = pos.pose.pose.orientation.w
# initial position(1,0)
self.barrier_pos_x = pos.pose.pose.position.x + 1.0 # initial distance between two loggers
self.barrier_pos_y = pos.pose.pose.position.y
self.barrier_linear_x = pos.twist.twist.linear.x
self.barrier_linear_y = pos.twist.twist.linear.y
self.barrier_angular_z = pos.twist.twist.angular.z
#rospy.loginfo("logger1_w\n %f", pos.pose.pose.orientation.w)
rospy.loginfo("barrier_x\n %f", pos.pose.pose.position.x)
rospy.loginfo("barrier_y\n %f", pos.pose.pose.position.y)
# def get_pose_gazebo(self):
# id_logger0 = self.model_states.name.index("logger0") # 追
# id_logger1 = self.model_states.name.index("logger1") # 逃
# id_barrier = self.model_states.name.index("barrier") # 逃
# logger_pose0 = self.model_states.pose[id_logger0]
# logger_pose1 = self.model_states.pose[id_logger1]
# barrier_pose = self.model_states.pose[id_barrier]
# self.logger0_x = logger_pose0.orientation.x
# self.logger0_y = logger_pose0.orientation.y
# self.logger0_z = logger_pose0.orientation.z
# self.logger0_w = logger_pose0.orientation.w
# self.logger0_pos_x = logger_pose0.position.x
# self.logger0_pos_y = logger_pose0.position.y
# self.logger1_x = logger_pose1.orientation.x
# self.logger1_y = logger_pose1.orientation.y
# self.logger1_z = logger_pose1.orientation.z
# self.logger1_w = logger_pose1.orientation.w
# self.logger1_pos_x = logger_pose1.position.x
# self.logger1_pos_y = logger_pose1.position.y
# self.barrier_pos_x = barrier_pose.position.x
# self.barrier_pos_y = barrier_pose.position.y
# self.barrier_x = barrier_pose.orientation.x
# self.barrier_y = barrier_pose.orientation.y
# self.barrier_z = barrier_pose.orientation.z
# self.barrier_w = barrier_pose.orientation.w
# establish the strategy map
def calculate_state(self):
self.old_state = copy.deepcopy(self.status)
# print('oldstate',self.old_state)
if self.distance <= self.p_l:
self.status[0] = "catch it"
self.status[1] = "trapped"
pv = (self.ve / self.vp)
pd = (self.p_b / self.p_l)
yc = (self.p_l / pv)
cos_bs = pv
self.theta_bs = np.arccos(cos_bs)
bs_x = (self.p_l * np.sin(self.theta_bs))
bs_y = (self.p_l * np.cos(self.theta_bs))
tao_c = (self.p_l / self.ve)
tao_s = (self.p_b / (self.vp * np.tan(self.theta_bs)))
# calculate if the barrier is closed
if tao_c <= tao_s:
self.close_barrier = 1
#print("closed barrier!!!!!")
else:
self.close_barrier = 0
# if barrier is closed
if self.close_barrier == 1:
k = ((yc-bs_y)/(-bs_x))
# -----------------------------------------------------------------直行范围------------------------------------------------------#
if ( abs(self.theta) < 0.2) and (abs(self.re_y) > yc):
# if (abs(abs(self.logger0_euler[2]) - abs(self.logger1_euler[2])) < 0.1) and (abs(self.re_y) > (self.p_l / pv + 0.3)):
# print('ppppppppppppppppppp')
self.status[0] = "straight"
self.status[1] = "cb_T1"
# elif (self.status[0] == "straight") and (self.theta < 0.45):
elif (self.status[0] == "straight") and (self.theta <self.theta_bs):
self.status[0] = "straight"
self.status[1] = "cb_T23"
else:
self.status[0] = "rotate"
self.status[1] = "cb_T23"
# elif (k * abs(self.re_x) + yc + 0.5 - abs(self.re_y)) < 0:
# self.status[0] = "rotate"
# self.status[1] = "cb_T23"
# else:
# self.status[0] = "straight"
# self.status[1] = "cb_T23"
# #print("---------------------------------------------------------------------------")
# reset at the start of the round
def reset(self):
"""
Reset environment
Usage:
obs = env.reset()
"""
# rospy.logdebug("\nStart Environment Reset")
# set init pose
state = self.get_place()
# rospy.logerr("\nEnvironment Reset!!!\n")
return state
# get present place from multisensor
def get_place(self):
self.get_pose_gazebo()
quat0 = [
self.logger0_x,
self.logger0_y,
self.logger0_z,
self.logger0_w
]
quat1 = [
self.logger1_x,
self.logger1_y,
self.logger1_z,
self.logger1_w
]
#barrier
quat2 = [
self.barrier_x,
self.barrier_y,
self.barrier_z,
self.barrier_w
]
# self.logger0_euler = tf.transformations.euler_from_quaternion(quat0)
# self.logger1_euler = tf.transformations.euler_from_quaternion(quat1)
self.logger0_euler = quart_to_rpy(quat0[0],quat0[1],quat0[2],quat0[3])
self.logger1_euler = quart_to_rpy(quat1[0],quat1[1],quat1[2],quat1[3])
self.barrier_euler = quart_to_rpy(quat2[0], quat2[1], quat2[2], quat2[3])
delta_x = self.logger1_pos_x - self.logger0_pos_x
delta_y = self.logger1_pos_y - self.logger0_pos_y
pos = [0, 0]
pos[0] = delta_x
pos[1] = delta_y
# #print('c',delta_x)
dist = np.sqrt(np.sum(np.square(pos)))
# position in reduced space
self.re_x = (pos[0] * np.sin(self.logger0_euler[2]) - pos[1] * np.cos(self.logger0_euler[2]))
self.re_y = (pos[0] * np.cos(self.logger0_euler[2]) + pos[1] * np.sin(self.logger0_euler[2]))
# self.distance = np.sqrt(np.sum(np.square(pos)))
self.distance = np.sqrt(np.square(self.re_x) + np.square(self.re_y))
self.theta = np.arctan(self.re_x/self.re_y)
# self.re_x = self.re_x[0]
# self.re_y = self.re_y[0]
# #print('a', self.re_x)
# all the information needed to calculate strategy
# self.obs[0][0] = dist
# self.obs[0][1] = self.theta
# self.obs[0][2] = self.logger0_euler[2]
# self.obs[0][3] = self.re_x #添加
# self.obs[0][4] = self.re_y
# self.obs[1][0] = dist
# self.obs[1][1] = self.theta
# self.obs[1][2] = self.logger1_euler[2]
# self.obs[1][3] = self.re_x
# self.obs[1][4] = self.re_y
self.obs[0][0] = self.logger0_pos_x
self.obs[0][1] = self.logger0_pos_y
self.obs[0][2] = self.logger0_euler[2]
self.obs[1][0] = self.logger1_pos_x
self.obs[1][1] = self.logger1_pos_y
self.obs[1][2] = self.logger1_euler[2]
self.obs[2][0] = self.barrier_pos_x
self.obs[2][1] = self.barrier_pos_y
self.obs[2][2] = self.barrier_euler[2]
self.obs = np.array(self.obs)
state = [self.obs[0][0], self.obs[0][1],self.obs[0][2],
self.obs[1][0], self.obs[1][1], self.obs[1][2],
self.obs[2][0], self.obs[2][1], self.obs[2][2]]
# print(state)
return state
def get_reward_done(self):
"""
Compute reward and done based on current status
Return:
reward
done
"""
done = False
r = 0
self.distance_pv = np.sqrt(np.sum(np.square(np.array(self.obs[0][:2]) - np.array(self.obs[1][:2]))))
self.distance_goal = np.sqrt(np.sum(np.square(np.array(self.obs[1][:2]) - self.target )))
self.distance_barrier = np.sqrt(np.sum(np.square(np.array(self.obs[1][:2]-self.obs[2][:2]))))
# print('distance',distance_pv)
if self.distance_pv <= 0.6:
r -=500
done =True
# elif self.distance_pv>1:
# r += 0.01*self.distance_pv
#############1##################
# if distance_goal<=0.3:
# r +=1000
# done =True
# elif distance > 1 and distance_goal > 0.3:
# r -=0.01*distance_goal
# elif distance <=1 and distance_goal>0.3:
# r+=0.1*distance
##############################
##########2##########
if self.distance_goal<=0.3:
r +=1000
done =True
else:
r -= 0.01*self.distance_goal
##########################
# if self.obs[1][0] > 8 or self.obs[1][0] < -8 or self.obs[1][1] > 8 or self.obs[1][1] < -8:
if self.distance_barrier<=0.6:
r -=500
done =True
# print('done',done)
return r, done
def take_action(self):
"""
Publish pursuer control
Returns:cmd_vel
"""
# rospy.logdebug("\nStart Taking Action")
cmd_vel0 = Twist()
if self.status[0] == "straight":
if(self.re_y > 0):
cmd_vel0.linear.x = self.vp
cmd_vel0.angular.z = 0
else:
cmd_vel0.linear.x = -self.vp
cmd_vel0.angular.z = 0
if self.status[0] == "rotate" and (self.theta > 0):
cmd_vel0.linear.x = 0
cmd_vel0.angular.z = (self.vp/self.p_b)
if self.status[0] == "rotate" and (self.theta < 0):
cmd_vel0.linear.x = 0
cmd_vel0.angular.z = (-self.vp/self.p_b)
if self.status[0] == "catch it":
# cmd_vel0.linear.x = 0
# cmd_vel0.angular.z = 0
if(self.re_y > 0):
cmd_vel0.linear.x = self.vp
cmd_vel0.angular.z = 0
else:
cmd_vel0.linear.x = -self.vp
cmd_vel0.angular.z = 0
# cmd_vel1 = Twist()
# print('state',self.status)
# print('oldstate',self.old_state)
# if self.status[1] == "cb_T23":
# # print('ksksksksdfsfs')
# # if (self.old_state == "cb_T1"):
# # self.angle_amount += 1
# # self.init_angle = (self.angle_amount * self.theta_bs)
# if (self.old_state[1] == "cb_T1"):
# print('hahdfsasffsdfgghghahaah')
# self.init_angle = (self.logger0_euler[2] - self.theta_bs)
# # 好像没转上角度?
# self.number = self.number + 1
# cmd_vel1.linear.y = self.ve * np.sin(self.init_angle)
# cmd_vel1.linear.x = self.ve * np.cos(self.init_angle)
# if self.status[1] == "cb_T1":
# cmd_vel1.linear.y = self.ve * np.sin(self.init_angle)
# cmd_vel1.linear.x = self.ve * np.cos(self.init_angle)
# if self.status[1] == "trapped":
# cmd_vel1.linear.y = self.ve * np.sin(self.init_angle)
# cmd_vel1.linear.x = self.ve * np.cos(self.init_angle)
# print('init_angle',self.init_angle)
# print('x',cmd_vel1.linear.x)
# print('y',cmd_vel1.linear.y)
# rospy.logdebug("cmd_vel0: {} \ncmd_vel1: {}".format(cmd_vel0, cmd_vel1))
# self.pausePhysics()
# rospy.logdebug("\nEnd Taking Action\n")
return cmd_vel0
def take_e_action(self):
'''
it's the evader's optimal control
'''
cmd_vel1 = Twist()
# print('state',self.status)
# print('oldstate',self.old_state)
if self.status[1] == "cb_T23":
# print('ksksksksdfsfs')
# if (self.old_state == "cb_T1"):
# self.angle_amount += 1
# self.init_angle = (self.angle_amount * self.theta_bs)
if (self.old_state[1] == "cb_T1"):
# print('hahdfsasffsdfgghghahaah')
if (self.theta > 0):
self.init_angle = (self.logger0_euler[2] - self.theta_bs)
else:
self.init_angle = (self.logger0_euler[2] + self.theta_bs)
# self.init_angle = (self.logger0_euler[2] - self.theta_bs)
# 好像没转上角度?
self.number = self.number + 1
cmd_vel1.linear.y = self.ve * np.sin(self.init_angle)
cmd_vel1.linear.x = self.ve * np.cos(self.init_angle)
if self.status[1] == "cb_T1":
cmd_vel1.linear.y = self.ve * np.sin(self.init_angle)
cmd_vel1.linear.x = self.ve * np.cos(self.init_angle)
if self.status[1] == "trapped":
cmd_vel1.linear.y = self.ve * np.sin(self.init_angle)
cmd_vel1.linear.x = self.ve * np.cos(self.init_angle)
# print('init_angle',self.init_angle)
# print('x',cmd_vel1.linear.x)
# print('y',cmd_vel1.linear.y)
return self.init_angle
def take_barrier_action(self):
'''
it's a nominal control to barrier
evader碰到障碍物后朝反方向逃跑
'''
angle = self.init_angle+np.pi
return angle
def take_target_action(self):
'''
it's a nominal control to barrier
evader朝着目标前进
'''
delta_xy = np.array(self.obs[1][:2]) - self.target
if delta_xy[0] !=0:
self.target2evader = np.arctan(delta_xy[1]/delta_xy[0]) # 目标点与evader之间的指向
else:
self.target2evader = np.pi/2
angle = self.target2evader
return angle
# with algorithm
def step(self, action_e):
"""
obs, rew, done, info = env.step(action_indices)
"""
# rospy.logdebug("\nStart environment step")
# #print(action)
info = self.status
self.calculate_state() # to evader train
cmd_vel0 = self.take_action() # pursuer optimal control
cmd_vel1 = Twist()
cmd_vel1.linear.y = self.ve * np.sin(action_e)
cmd_vel1.linear.x = self.ve * np.cos(action_e)
# print('vy', cmd_vel1.linear.y)
# print('vx', cmd_vel1.linear.x)
cmd_vel2 = Twist()
cmd_vel2.linear.x = self.v_barrier
for _ in range(1):
# self.cmd_vel0_pub.publish(cmd_vel0) # pursuer
rospy.sleep(0.0001)
self.cmd_vel1_pub.publish(cmd_vel1) # evader
# self.cmd_vel2_pub.publish(cmd_vel2) #obstacle
# self.flag=False
# update status
reward, done = self.get_reward_done()
state = self.get_place()
self.rate.sleep()
# rospy.logdebug("\nEnd environment step\n")
# self.angular_p = cmd_vel0.angular.z
return state, reward, done, info
if __name__ =='__main__':
print(int(3e5))
| 39.260952 | 138 | 0.566854 |
67ce3117335de3265c1cd921d9246800c228cd54 | 13,568 | py | Python | shap/explainers/deep/deep_pytorch.py | YONGHAN-KIM/shap | 4c76cdb0a5ba2e5769a1d35c22eef117dd65e924 | [
"MIT"
] | 1 | 2019-05-22T02:48:13.000Z | 2019-05-22T02:48:13.000Z | shap/explainers/deep/deep_pytorch.py | YONGHAN-KIM/shap | 4c76cdb0a5ba2e5769a1d35c22eef117dd65e924 | [
"MIT"
] | null | null | null | shap/explainers/deep/deep_pytorch.py | YONGHAN-KIM/shap | 4c76cdb0a5ba2e5769a1d35c22eef117dd65e924 | [
"MIT"
] | 1 | 2018-12-21T16:11:00.000Z | 2018-12-21T16:11:00.000Z | import numpy as np
import warnings
from shap.explainers.explainer import Explainer
from distutils.version import LooseVersion
torch = None
class PyTorchDeepExplainer(Explainer):
def __init__(self, model, data):
# try and import pytorch
global torch
if torch is None:
import torch
if LooseVersion(torch.__version__) < LooseVersion("0.4"):
warnings.warn("Your PyTorch version is older than 0.4 and not supported.")
# check if we have multiple inputs
self.multi_input = False
if type(data) == list:
self.multi_input = True
if type(data) != list:
data = [data]
self.data = data
self.layer = None
self.input_handle = None
self.interim = False
self.interim_inputs_shape = None
self.expected_value = None # to keep the DeepExplainer base happy
if type(model) == tuple:
self.interim = True
model, layer = model
model = model.eval()
self.layer = layer
self.add_target_handle(self.layer)
# if we are taking an interim layer, the 'data' is going to be the input
# of the interim layer; we will capture this using a forward hook
with torch.no_grad():
_ = model(*data)
interim_inputs = self.layer.target_input
if type(interim_inputs) is tuple:
# this should always be true, but just to be safe
self.interim_inputs_shape = [i.shape for i in interim_inputs]
else:
self.interim_inputs_shape = [interim_inputs.shape]
self.target_handle.remove()
del self.layer.target_input
self.model = model.eval()
self.multi_output = False
self.num_outputs = 1
with torch.no_grad():
outputs = model(*data)
if outputs.shape[1] > 1:
self.multi_output = True
self.num_outputs = outputs.shape[1]
def add_target_handle(self, layer):
input_handle = layer.register_forward_hook(self.get_target_input)
self.target_handle = input_handle
def add_handles(self, model, forward_handle, backward_handle):
"""
Add handles to all non-container layers in the model.
Recursively for non-container layers
"""
handles_list = []
for child in model.children():
if 'nn.modules.container' in str(type(child)):
handles_list.extend(self.add_handles(child, forward_handle, backward_handle))
else:
handles_list.append(child.register_forward_hook(forward_handle))
handles_list.append(child.register_backward_hook(backward_handle))
return handles_list
def remove_attributes(self, model):
"""
Removes the x and y attributes which were added by the forward handles
Recursively searches for non-container layers
"""
for child in model.children():
if 'nn.modules.container' in str(type(child)):
self.remove_attributes(child)
else:
try:
del child.x
except AttributeError:
pass
try:
del child.y
except AttributeError:
pass
@staticmethod
def get_target_input(module, input, output):
"""Saves the tensor - attached to its graph.
Used if we want to explain the interim outputs of a model
"""
try:
del module.target_input
except AttributeError:
pass
setattr(module, 'target_input', input)
@staticmethod
def add_interim_values(module, input, output):
"""If necessary, saves interim tensors detached from the graph.
Used to calculate multipliers
"""
try:
del module.x
except AttributeError:
pass
try:
del module.y
except AttributeError:
pass
module_type = module.__class__.__name__
if module_type in op_handler:
func_name = op_handler[module_type].__name__
# First, check for cases where we don't need to save the x and y tensors
if func_name == 'passthrough':
pass
else:
# check only the 0th input varies
for i in range(len(input)):
if i != 0 and type(output) is tuple:
assert input[i] == output[i], "Only the 0th input may vary!"
# if a new method is added, it must be added here too. This ensures tensors
# are only saved if necessary
if func_name in ['maxpool', 'nonlinear_1d']:
# only save tensors if necessary
if type(input) is tuple:
setattr(module, 'x', input[0].detach())
else:
setattr(module, 'x', input.detach())
if type(output) is tuple:
setattr(module, 'y', output[0].detach())
else:
setattr(module, 'y', output.detach())
@staticmethod
def deeplift_grad(module, grad_input, grad_output):
# first, get the module type
module_type = module.__class__.__name__
# first, check the module is supported
if module_type in op_handler:
if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']:
return op_handler[module_type](module, grad_input, grad_output)
else:
print('Warning: unrecognized nn.Module: {}'.format(type))
return grad_input
def gradient(self, idx, inputs):
self.model.zero_grad()
X = [x.requires_grad_() for x in inputs]
outputs = self.model(*X)
selected = [val for val in outputs[:, idx]]
if self.interim:
interim_inputs = self.layer.target_input
grads = [torch.autograd.grad(selected, input)[0].cpu().numpy() for input in interim_inputs]
del self.layer.target_input
return grads, [i.detach().cpu().numpy() for i in interim_inputs]
else:
grads = [torch.autograd.grad(selected, x)[0].cpu().numpy() for x in X]
return grads
def shap_values(self, X, ranked_outputs=None, output_rank_order="max"):
# X ~ self.model_input
# X_data ~ self.data
# check if we have multiple inputs
if not self.multi_input:
assert type(X) != list, "Expected a single tensor model input!"
X = [X]
else:
assert type(X) == list, "Expected a list of model inputs!"
if ranked_outputs is not None and self.multi_output:
with torch.no_grad():
model_output_values = self.model(*X)
# rank and determine the model outputs that we will explain
if output_rank_order == "max":
_, model_output_ranks = torch.sort(model_output_values, descending=True)
elif output_rank_order == "min":
_, model_output_ranks = torch.sort(model_output_values, descending=False)
elif output_rank_order == "max_abs":
_, model_output_ranks = torch.sort(torch.abs(model_output_values), descending=True)
else:
assert False, "output_rank_order must be max, min, or max_abs!"
model_output_ranks = model_output_ranks[:, :ranked_outputs]
else:
model_output_ranks = (torch.ones((X[0].shape[0], self.num_outputs)).int() *
torch.arange(0, self.num_outputs).int())
# add the gradient handles
handles = self.add_handles(self.model, self.add_interim_values, self.deeplift_grad)
if self.interim:
self.add_target_handle(self.layer)
# compute the attributions
output_phis = []
for i in range(model_output_ranks.shape[1]):
phis = []
if self.interim:
for k in range(len(self.interim_inputs_shape)):
phis.append(np.zeros((X[0].shape[0], ) + self.interim_inputs_shape[k][1: ]))
else:
for k in range(len(X)):
phis.append(np.zeros(X[k].shape))
for j in range(X[0].shape[0]):
# tile the inputs to line up with the background data samples
tiled_X = [X[l][j:j + 1].repeat(
(self.data[l].shape[0],) + tuple([1 for k in range(len(X[l].shape) - 1)])) for l
in range(len(X))]
joint_x = [torch.cat((tiled_X[l], self.data[l]), dim=0) for l in range(len(X))]
# run attribution computation graph
feature_ind = model_output_ranks[j, i]
sample_phis = self.gradient(feature_ind, joint_x)
# assign the attributions to the right part of the output arrays
if self.interim:
sample_phis, output = sample_phis
x, data = [], []
for i in range(len(output)):
x_temp, data_temp = np.split(output[i], 2)
x.append(x_temp)
data.append(data_temp)
for l in range(len(self.interim_inputs_shape)):
phis[l][j] = (sample_phis[l][self.data[l].shape[0]:] * (x[l] - data[l])).mean(0)
else:
for l in range(len(X)):
phis[l][j] = (sample_phis[l][self.data[l].shape[0]:] * (X[l][j: j + 1] - self.data[l])).mean(0)
output_phis.append(phis[0] if not self.multi_input else phis)
# cleanup; remove all gradient handles
for handle in handles:
handle.remove()
self.remove_attributes(self.model)
if self.interim:
self.target_handle.remove()
if not self.multi_output:
return output_phis[0]
elif ranked_outputs is not None:
return output_phis, model_output_ranks
else:
return output_phis
def passthrough(module, grad_input, grad_output):
"""No change made to gradients"""
return None
def maxpool(module, grad_input, grad_output):
pool_to_unpool = {
'MaxPool1d': torch.nn.functional.max_unpool1d,
'MaxPool2d': torch.nn.functional.max_unpool2d,
'MaxPool3d': torch.nn.functional.max_unpool3d
}
pool_to_function = {
'MaxPool1d': torch.nn.functional.max_pool1d,
'MaxPool2d': torch.nn.functional.max_pool2d,
'MaxPool3d': torch.nn.functional.max_pool3d
}
delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):]
dup0 = [2] + [1 for i in delta_in.shape[1:]]
# we also need to check if the output is a tuple
y, ref_output = torch.chunk(module.y, 2)
cross_max = torch.max(y, ref_output)
diffs = torch.cat([cross_max - ref_output, y - cross_max], 0)
# all of this just to unpool the outputs
with torch.no_grad():
_, indices = pool_to_function[module.__class__.__name__](
module.x, module.kernel_size, module.stride, module.padding,
module.dilation, module.ceil_mode, True)
xmax_pos, rmax_pos = torch.chunk(pool_to_unpool[module.__class__.__name__](
grad_output[0] * diffs, indices, module.kernel_size, module.stride,
module.padding, delta_in.shape), 2)
grad_input = [None for _ in grad_input]
grad_input[0] = torch.where(torch.abs(delta_in) < 1e-7, torch.zeros_like(delta_in),
(xmax_pos + rmax_pos) / delta_in).repeat(dup0)
# delete the attributes
del module.x
del module.y
return tuple(grad_input)
def linear_1d(module, grad_input, grad_output):
"""No change made to gradients."""
return None
def nonlinear_1d(module, grad_input, grad_output):
delta_out = module.y[: int(module.y.shape[0] / 2)] - module.y[int(module.y.shape[0] / 2):]
delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):]
dup0 = [2] + [1 for i in delta_in.shape[1:]]
# handles numerical instabilities where delta_in is very small by
# just taking the gradient in those cases
grads = [None for _ in grad_input]
grads[0] = torch.where(torch.abs(delta_in.repeat(dup0)) < 1e-6, grad_input[0],
grad_output[0] * (delta_out / delta_in).repeat(dup0))
# delete the attributes
del module.x
del module.y
return tuple(grads)
op_handler = {}
# passthrough ops, where we make no change to the gradient
op_handler['Dropout3d'] = passthrough
op_handler['Dropout2d'] = passthrough
op_handler['Dropout'] = passthrough
op_handler['AlphaDropout'] = passthrough
op_handler['Conv2d'] = linear_1d
op_handler['Linear'] = linear_1d
op_handler['AvgPool1d'] = linear_1d
op_handler['AvgPool2d'] = linear_1d
op_handler['AvgPool3d'] = linear_1d
op_handler['ReLU'] = nonlinear_1d
op_handler['ELU'] = nonlinear_1d
op_handler['Sigmoid'] = nonlinear_1d
op_handler["Tanh"] = nonlinear_1d
op_handler["Softplus"] = nonlinear_1d
op_handler['Softmax'] = nonlinear_1d
op_handler['MaxPool1d'] = maxpool
op_handler['MaxPool2d'] = maxpool
op_handler['MaxPool3d'] = maxpool
| 40.261128 | 119 | 0.585938 |
3b832e24a31f28f2b2bcec5ab6db5263befc1a32 | 466 | py | Python | OpenSeesAPI/Analysis/__init__.py | alok230460/Open | ccd7c43c82c13bc87e6c208302f7448060b856ba | [
"MIT",
"Unlicense"
] | 41 | 2015-09-08T09:41:13.000Z | 2022-03-26T08:40:02.000Z | OpenSeesAPI/Analysis/__init__.py | alok230460/Open | ccd7c43c82c13bc87e6c208302f7448060b856ba | [
"MIT",
"Unlicense"
] | 4 | 2015-09-11T17:20:46.000Z | 2016-05-02T20:42:58.000Z | OpenSeesAPI/Analysis/__init__.py | alok230460/Open | ccd7c43c82c13bc87e6c208302f7448060b856ba | [
"MIT",
"Unlicense"
] | 31 | 2015-09-11T17:01:36.000Z | 2021-11-08T17:48:27.000Z | __author__ = 'Nasser'
# Import Folder with Several Classes
from OpenSeesAPI.Analysis import Algorithm
from OpenSeesAPI.Analysis import Analysis
from OpenSeesAPI.Analysis import Analyze
from OpenSeesAPI.Analysis import Constraints
from OpenSeesAPI.Analysis import Integrator
from OpenSeesAPI.Analysis import Numberer
from OpenSeesAPI.Analysis import System
from OpenSeesAPI.Analysis import Test
# Import One Class Files
from OpenSeesAPI.Analysis.Eigen import Eigen
| 31.066667 | 44 | 0.856223 |
db0e979272b12cfcfe0c1d6ad9676e51616150ff | 349 | py | Python | toolkit/routes/templates/lib.py | salonimalhotra-ui/seo-audits-toolkit | 99af8b53dffad45f679eaf06b4a8080df75fcd72 | [
"MIT"
] | 1 | 2020-12-21T18:21:34.000Z | 2020-12-21T18:21:34.000Z | toolkit/routes/templates/lib.py | x0rzkov/seo-audits-toolkit | 29994cbab51bd0697c717b675df3c176096e4f03 | [
"MIT"
] | null | null | null | toolkit/routes/templates/lib.py | x0rzkov/seo-audits-toolkit | 29994cbab51bd0697c717b675df3c176096e4f03 | [
"MIT"
] | null | null | null | from flask import current_app as app
from datetime import datetime
@app.template_filter('formatdatetime')
def format_datetime(value, format="%d %b %Y %I:%M %p"):
"""Format a date time to (Default): d Mon YYYY HH:MM P"""
if value is None:
return ""
if isinstance(value, str):
return value
return value.strftime(format) | 31.727273 | 61 | 0.676218 |
825a0f9d9e69ca9b0478674e9e5a6275644b13fc | 1,148 | py | Python | src/zsl/application/modules/logger_module.py | AtteqCom/zsl | 0d418ef957c9780263b1031dbc59482cd974bc04 | [
"MIT"
] | 2 | 2017-05-17T08:08:52.000Z | 2019-03-25T00:24:51.000Z | src/zsl/application/modules/logger_module.py | AtteqCom/zsl | 0d418ef957c9780263b1031dbc59482cd974bc04 | [
"MIT"
] | 100 | 2017-01-11T13:43:11.000Z | 2022-02-10T09:27:18.000Z | src/zsl/application/modules/logger_module.py | AtteqCom/zsl | 0d418ef957c9780263b1031dbc59482cd974bc04 | [
"MIT"
] | 1 | 2017-05-10T10:27:01.000Z | 2017-05-10T10:27:01.000Z | """
:mod:`zsl.application.modules.logger_module`
--------------------------------------------
"""
from __future__ import unicode_literals
import logging.config
from injector import Binder, Module
from zsl import Config, Zsl, inject
class LoggerModule(Module):
"""Configure the application logger."""
LOGGING_CONFIG_NAME = 'LOGGING'
def configure(self, binder):
# type: (Binder) -> None
super(LoggerModule, self).configure(binder)
self.configure_logging()
@inject(config=Config, app=Zsl)
def configure_logging(self, config, app):
# type: (Config) -> None
default_config = dict(
version=1,
root=dict(
level='DEBUG' if config.get('DEBUG', False) else 'WARNING'
)
)
logging.config.dictConfig(config.get(LoggerModule.LOGGING_CONFIG_NAME, default_config))
self._recreate_app_logger(app)
def _recreate_app_logger(self, app):
logging._acquireLock()
del logging.getLogger(app.name).manager.loggerDict[app.name]
logging._releaseLock()
app._logger = logging.getLogger(app.name)
| 28 | 95 | 0.629791 |
a65a9c5af8456d362ca1a18da4440eb522006b49 | 4,034 | py | Python | tests/io_utils_test.py | angelolab/mibi-bin-tools | 369f62dfb64af41966d25ca188e96dce676f5e38 | [
"Apache-2.0"
] | null | null | null | tests/io_utils_test.py | angelolab/mibi-bin-tools | 369f62dfb64af41966d25ca188e96dce676f5e38 | [
"Apache-2.0"
] | 18 | 2021-11-23T14:39:27.000Z | 2022-03-08T23:52:27.000Z | tests/io_utils_test.py | angelolab/mibi-bin-tools | 369f62dfb64af41966d25ca188e96dce676f5e38 | [
"Apache-2.0"
] | 1 | 2021-11-21T11:20:36.000Z | 2021-11-21T11:20:36.000Z | import os
import tempfile
import pathlib
import pytest
from mibi_bin_tools import io_utils as iou
def test_list_files():
# test extension matching
with tempfile.TemporaryDirectory() as temp_dir:
# set up temp_dir files
filenames = [
'tf.txt',
'othertf.txt',
'test.out',
'test.csv',
]
for filename in filenames:
pathlib.Path(os.path.join(temp_dir, filename)).touch()
# add extra folder (shouldn't be picked up)
os.mkdir(os.path.join(temp_dir, 'badfolder_test'))
# test substrs is None (default)
get_all = iou.list_files(temp_dir)
assert sorted(get_all) == sorted(filenames)
# test substrs is not list (single string)
get_txt = iou.list_files(temp_dir, substrs='.txt')
assert sorted(get_txt) == sorted(filenames[0:2])
# test substrs is list
get_test_and_other = iou.list_files(temp_dir, substrs=['.txt', '.out'])
assert sorted(get_test_and_other) == sorted(filenames[:3])
# test file name exact matching
with tempfile.TemporaryDirectory() as temp_dir:
filenames = [
'chan0.tif',
'chan.tif',
'c.tif'
]
for filename in filenames:
pathlib.Path(os.path.join(temp_dir, filename)).touch()
# add extra folder (shouldn't be picked up)
os.mkdir(os.path.join(temp_dir, 'badfolder_test'))
# test substrs is None (default)
get_all = iou.list_files(temp_dir, exact_match=True)
assert sorted(get_all) == sorted(filenames)
# test substrs is not list (single string)
get_txt = iou.list_files(temp_dir, substrs='c', exact_match=True)
assert sorted(get_txt) == [filenames[2]]
# test substrs is list
get_test_and_other = iou.list_files(temp_dir, substrs=['c', 'chan'], exact_match=True)
assert sorted(get_test_and_other) == sorted(filenames[1:])
def test_remove_file_extensions():
# test a mixture of file paths and extensions
files = [
'fov1.tiff',
'fov2.tif',
'fov3.png',
'fov4.jpg'
]
assert iou.remove_file_extensions(None) is None
assert iou.remove_file_extensions([]) == []
files_sans_ext = ['fov1', 'fov2', 'fov3', 'fov4']
new_files = iou.remove_file_extensions(files)
assert new_files == files_sans_ext
with pytest.warns(UserWarning):
new_files = iou.remove_file_extensions(['fov5.tar.gz', 'fov6.sample.csv'])
assert new_files == ['fov5.tar', 'fov6.sample']
def test_extract_delimited_names():
filenames = [
'fov1_restofname',
'fov2',
]
# test no files given (None/[])
assert iou.extract_delimited_names(None) is None
assert iou.extract_delimited_names([]) == []
# non-optional delimiter warning
with pytest.warns(UserWarning):
iou.extract_delimited_names(['fov2'], delimiter='_', delimiter_optional=False)
# test regular files list
assert ['fov1', 'fov2'] == iou.extract_delimited_names(filenames, delimiter='_')
def test_list_folders():
with tempfile.TemporaryDirectory() as temp_dir:
# set up temp_dir subdirs
dirnames = [
'tf_txt',
'othertf_txt',
'test_csv',
'test_out',
]
for dirname in dirnames:
os.mkdir(os.path.join(temp_dir, dirname))
# add extra file
pathlib.Path(os.path.join(temp_dir, 'test_badfile.txt')).touch()
# test substrs is None (default)
get_all = iou.list_folders(temp_dir)
assert get_all.sort() == dirnames.sort()
# test substrs is not list (single string)
get_txt = iou.list_folders(temp_dir, substrs='_txt')
assert get_txt.sort() == dirnames[0:2].sort()
# test substrs is list
get_test_and_other = iou.list_folders(temp_dir, substrs=['test_', 'other'])
assert get_test_and_other.sort() == dirnames[1:].sort()
| 31.030769 | 94 | 0.619732 |
d73fec548dd141572c64dd01d3a8193f314ba845 | 6,297 | py | Python | oldclient/models.py | dativebase/oldclient | ac15e0a2d8a84954f40a75ffd86a1aff76af64d5 | [
"Apache-2.0"
] | null | null | null | oldclient/models.py | dativebase/oldclient | ac15e0a2d8a84954f40a75ffd86a1aff76af64d5 | [
"Apache-2.0"
] | null | null | null | oldclient/models.py | dativebase/oldclient | ac15e0a2d8a84954f40a75ffd86a1aff76af64d5 | [
"Apache-2.0"
] | null | null | null | APPLICATION_SETTINGS = {
'object_language_name': u'',
'object_language_id': u'',
'metalanguage_name': u'',
'metalanguage_id': u'',
'metalanguage_inventory': u'',
'orthographic_validation': u'None', # Value should be one of [u'None', u'Warning', u'Error']
'narrow_phonetic_inventory': u'',
'narrow_phonetic_validation': u'None',
'broad_phonetic_inventory': u'',
'broad_phonetic_validation': u'None',
'morpheme_break_is_orthographic': u'',
'morpheme_break_validation': u'None',
'phonemic_inventory': u'',
'morpheme_delimiters': u'',
'punctuation': u'',
'grammaticalities': u'',
'unrestricted_users': [], # A list of user ids
'storage_orthography': u'', # An orthography id
'input_orthography': u'', # An orthography id
'output_orthography': u'' # An orthography id
}
COLLECTION = {
'title': u'',
'type': u'',
'url': u'',
'description': u'',
'markup_language': u'',
'contents': u'',
'speaker': u'',
'source': u'',
'elicitor': u'',
'enterer': u'',
'date_elicited': u'',
'tags': [],
'files': []
}
CORPUS = {
'name': u'',
'description': u'',
'content': u'',
'form_search': u'',
'tags': []
}
FILE = {
'name': u'',
'description': u'',
'date_elicited': u'', # mm/dd/yyyy
'elicitor': u'',
'speaker': u'',
'utterance_type': u'',
'embedded_file_markup': u'',
'embedded_file_password': u'',
'tags': [],
'forms': [],
'file': '' # file data Base64 encoded
}
FILE_BASE64 = {
'filename': u'', # Will be filtered out on update requests
'description': u'',
'date_elicited': u'', # mm/dd/yyyy
'elicitor': u'',
'speaker': u'',
'utterance_type': u'',
'tags': [],
'forms': [],
'base64_encoded_file': '' # file data Base64 encoded; will be filtered out on update requests
}
FILE_MPFD = {
'filename': u'', # Will be filtered out on update requests
'description': u'',
'date_elicited': u'', # mm/dd/yyyy
'elicitor': u'',
'speaker': u'',
'utterance_type': u'',
'tags-0': u'',
'forms-0': u''
}
FILE_SUB_REF = {
'parent_file': u'',
'name': u'',
'start': u'',
'end': u'',
'description': u'',
'date_elicited': u'', # mm/dd/yyyy
'elicitor': u'',
'speaker': u'',
'utterance_type': u'',
'tags': [],
'forms': []
}
FILE_EXT_HOST = {
'url': u'',
'name': u'',
'password': u'',
'MIME_type': u'',
'description': u'',
'date_elicited': u'', # mm/dd/yyyy
'elicitor': u'',
'speaker': u'',
'utterance_type': u'',
'tags': [],
'forms': []
}
FORM = {
'transcription': u'',
'phonetic_transcription': u'',
'narrow_phonetic_transcription': u'',
'morpheme_break': u'',
'grammaticality': u'',
'morpheme_gloss': u'',
'translations': [],
'comments': u'',
'speaker_comments': u'',
'elicitation_method': u'',
'tags': [],
'syntactic_category': u'',
'speaker': u'',
'elicitor': u'',
'verifier': u'',
'source': u'',
'status': u'tested',
'date_elicited': u'', # mm/dd/yyyy
'syntax': u'',
'semantics': u''
}
FORM_SEARCH = {
'name': u'',
'search': u'',
'description': u'',
'searcher': u''
}
MORPHEME_LANGUAGE_MODEL = {
'name': u'',
'description': u'',
'corpus': u'',
'vocabulary_morphology': u'',
'toolkit': u'',
'order': u'',
'smoothing': u'',
'categorial': False
}
MORPHOLOGY = {
'name': u'',
'description': u'',
'lexicon_corpus': u'',
'rules_corpus': u'',
'script_type': u'lexc',
'extract_morphemes_from_rules_corpus': False,
'rules': u'',
'rich_upper': True,
'rich_lower': False,
'include_unknowns': False
}
MORPHOLOGICAL_PARSER = {
'name': u'',
'phonology': u'',
'morphology': u'',
'language_model': u'',
'description': u''
}
ORTHOGRAPHY = {
'name': u'',
'orthography': u'',
'lowercase': False,
'initial_glottal_stops': True
}
PAGE = {
'name': u'',
'heading': u'',
'markup_language': u'',
'content': u'',
'html': u''
}
PHONOLOGY = {
'name': u'',
'description': u'',
'script': u''
}
SOURCE = {
'file': u'',
'type': u'',
'key': u'',
'address': u'',
'annote': u'',
'author': u'',
'booktitle': u'',
'chapter': u'',
'crossref': u'',
'edition': u'',
'editor': u'',
'howpublished': u'',
'institution': u'',
'journal': u'',
'key_field': u'',
'month': u'',
'note': u'',
'number': u'',
'organization': u'',
'pages': u'',
'publisher': u'',
'school': u'',
'series': u'',
'title': u'',
'type_field': u'',
'url': u'',
'volume': u'',
'year': u'',
'affiliation': u'',
'abstract': u'',
'contents': u'',
'copyright': u'',
'ISBN': u'',
'ISSN': u'',
'keywords': u'',
'language': u'',
'location': u'',
'LCCN': u'',
'mrnumber': u'',
'price': u'',
'size': u'',
}
SPEAKER = {
'first_name': u'',
'last_name': u'',
'page_content': u'',
'dialect': u'dialect',
'markup_language': u'reStructuredText'
}
SYNTACTIC_CATEGORY = {
'name': u'',
'type': u'',
'description': u''
}
USER = {
'username': u'',
'password': u'',
'password_confirm': u'',
'first_name': u'',
'last_name': u'',
'email': u'',
'affiliation': u'',
'role': u'',
'markup_language': u'',
'page_content': u'',
'input_orthography': None,
'output_orthography': None
}
MODELS = {
'application_settings': APPLICATION_SETTINGS,
'collection': COLLECTION,
'corpus': CORPUS,
'file': FILE,
'file_base64': FILE_BASE64,
'file_mpfd': FILE_MPFD,
'file_sub_ref': FILE_SUB_REF,
'file_ext_host': FILE_EXT_HOST,
'form': FORM,
'form_search': FORM_SEARCH,
'morpheme_language_model': MORPHEME_LANGUAGE_MODEL,
'morphology': MORPHOLOGY,
'morphological_parser': MORPHOLOGICAL_PARSER,
'orthography': ORTHOGRAPHY,
'page': PAGE,
'phonology': PHONOLOGY,
'source': SOURCE,
'speaker': SPEAKER,
'syntactic_category': SYNTACTIC_CATEGORY,
'user': USER,
}
| 21.639175 | 97 | 0.522471 |
56fdf3d490b9eff1f65f1ab36bf1cf5cc01f68fb | 19,654 | py | Python | mmpose/models/heads/interhand_3d_head.py | SummerVideoAnalysis/mmpose | 70d60e03b7eaa0ae1ec66cc7a22c00916f00c9e1 | [
"Apache-2.0"
] | 38 | 2021-06-01T01:31:36.000Z | 2022-03-23T14:42:20.000Z | mmpose/models/heads/interhand_3d_head.py | connor-john/mmpose | f5eabbf33ba514a1ddaf914e835d6abc7accee39 | [
"Apache-2.0"
] | 5 | 2021-07-26T15:43:16.000Z | 2021-10-31T07:34:50.000Z | mmpose/models/heads/interhand_3d_head.py | connor-john/mmpose | f5eabbf33ba514a1ddaf914e835d6abc7accee39 | [
"Apache-2.0"
] | 8 | 2021-06-22T09:49:23.000Z | 2022-02-14T03:45:18.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import (build_conv_layer, build_norm_layer, build_upsample_layer,
constant_init, normal_init)
from mmpose.core.evaluation.top_down_eval import (
keypoints_from_heatmaps3d, multilabel_classification_accuracy)
from mmpose.core.post_processing import flip_back
from mmpose.models.builder import build_loss
from mmpose.models.necks import GlobalAveragePooling
from ..builder import HEADS
class Heatmap3DHead(nn.Module):
"""Heatmap3DHead is a sub-module of Interhand3DHead, and outputs 3D
heatmaps. Heatmap3DHead is composed of (>=0) number of deconv layers and a
simple conv2d layer.
Args:
in_channels (int): Number of input channels
out_channels (int): Number of output channels
depth_size (int): Number of depth discretization size
num_deconv_layers (int): Number of deconv layers.
num_deconv_layers should >= 0. Note that 0 means no deconv layers.
num_deconv_filters (list|tuple): Number of filters.
num_deconv_kernels (list|tuple): Kernel sizes.
extra (dict): Configs for extra conv layers. Default: None
"""
def __init__(self,
in_channels,
out_channels,
depth_size=64,
num_deconv_layers=3,
num_deconv_filters=(256, 256, 256),
num_deconv_kernels=(4, 4, 4),
extra=None):
super().__init__()
assert out_channels % depth_size == 0
self.depth_size = depth_size
self.in_channels = in_channels
if extra is not None and not isinstance(extra, dict):
raise TypeError('extra should be dict or None.')
if num_deconv_layers > 0:
self.deconv_layers = self._make_deconv_layer(
num_deconv_layers,
num_deconv_filters,
num_deconv_kernels,
)
elif num_deconv_layers == 0:
self.deconv_layers = nn.Identity()
else:
raise ValueError(
f'num_deconv_layers ({num_deconv_layers}) should >= 0.')
identity_final_layer = False
if extra is not None and 'final_conv_kernel' in extra:
assert extra['final_conv_kernel'] in [0, 1, 3]
if extra['final_conv_kernel'] == 3:
padding = 1
elif extra['final_conv_kernel'] == 1:
padding = 0
else:
# 0 for Identity mapping.
identity_final_layer = True
kernel_size = extra['final_conv_kernel']
else:
kernel_size = 1
padding = 0
if identity_final_layer:
self.final_layer = nn.Identity()
else:
conv_channels = num_deconv_filters[
-1] if num_deconv_layers > 0 else self.in_channels
layers = []
if extra is not None:
num_conv_layers = extra.get('num_conv_layers', 0)
num_conv_kernels = extra.get('num_conv_kernels',
[1] * num_conv_layers)
for i in range(num_conv_layers):
layers.append(
build_conv_layer(
dict(type='Conv2d'),
in_channels=conv_channels,
out_channels=conv_channels,
kernel_size=num_conv_kernels[i],
stride=1,
padding=(num_conv_kernels[i] - 1) // 2))
layers.append(
build_norm_layer(dict(type='BN'), conv_channels)[1])
layers.append(nn.ReLU(inplace=True))
layers.append(
build_conv_layer(
cfg=dict(type='Conv2d'),
in_channels=conv_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding))
if len(layers) > 1:
self.final_layer = nn.Sequential(*layers)
else:
self.final_layer = layers[0]
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
"""Make deconv layers."""
if num_layers != len(num_filters):
error_msg = f'num_layers({num_layers}) ' \
f'!= length of num_filters({len(num_filters)})'
raise ValueError(error_msg)
if num_layers != len(num_kernels):
error_msg = f'num_layers({num_layers}) ' \
f'!= length of num_kernels({len(num_kernels)})'
raise ValueError(error_msg)
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i])
planes = num_filters[i]
layers.append(
build_upsample_layer(
dict(type='deconv'),
in_channels=self.in_channels,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=False))
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.ReLU(inplace=True))
self.in_channels = planes
return nn.Sequential(*layers)
@staticmethod
def _get_deconv_cfg(deconv_kernel):
"""Get configurations for deconv layers."""
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
else:
raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')
return deconv_kernel, padding, output_padding
def forward(self, x):
"""Forward function."""
x = self.deconv_layers(x)
x = self.final_layer(x)
N, C, H, W = x.shape
# reshape the 2D heatmap to 3D heatmap
x = x.reshape(N, C // self.depth_size, self.depth_size, H, W)
return x
def init_weights(self):
"""Initialize model weights."""
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
normal_init(m, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
for m in self.final_layer.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001, bias=0)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
class Heatmap1DHead(nn.Module):
"""Heatmap1DHead is a sub-module of Interhand3DHead, and outputs 1D
heatmaps.
Args:
in_channels (int): Number of input channels
heatmap_size (int): Heatmap size
hidden_dims (list|tuple): Number of feature dimension of FC layers.
"""
def __init__(self, in_channels=2048, heatmap_size=64, hidden_dims=(512, )):
super().__init__()
self.in_channels = in_channels
self.heatmap_size = heatmap_size
feature_dims = [in_channels, *hidden_dims, heatmap_size]
self.fc = self._make_linear_layers(feature_dims, relu_final=False)
def soft_argmax_1d(self, heatmap1d):
heatmap1d = F.softmax(heatmap1d, 1)
accu = heatmap1d * torch.arange(
self.heatmap_size, dtype=heatmap1d.dtype,
device=heatmap1d.device)[None, :]
coord = accu.sum(dim=1)
return coord
def _make_linear_layers(self, feat_dims, relu_final=False):
"""Make linear layers."""
layers = []
for i in range(len(feat_dims) - 1):
layers.append(nn.Linear(feat_dims[i], feat_dims[i + 1]))
if i < len(feat_dims) - 2 or \
(i == len(feat_dims) - 2 and relu_final):
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward function."""
heatmap1d = self.fc(x)
value = self.soft_argmax_1d(heatmap1d).view(-1, 1)
return value
def init_weights(self):
"""Initialize model weights."""
for m in self.fc.modules():
if isinstance(m, nn.Linear):
normal_init(m, mean=0, std=0.01, bias=0)
class MultilabelClassificationHead(nn.Module):
"""MultilabelClassificationHead is a sub-module of Interhand3DHead, and
outputs hand type classification.
Args:
in_channels (int): Number of input channels
num_labels (int): Number of labels
hidden_dims (list|tuple): Number of hidden dimension of FC layers.
"""
def __init__(self, in_channels=2048, num_labels=2, hidden_dims=(512, )):
super().__init__()
self.in_channels = in_channels
self.num_labesl = num_labels
feature_dims = [in_channels, *hidden_dims, num_labels]
self.fc = self._make_linear_layers(feature_dims, relu_final=False)
def _make_linear_layers(self, feat_dims, relu_final=False):
"""Make linear layers."""
layers = []
for i in range(len(feat_dims) - 1):
layers.append(nn.Linear(feat_dims[i], feat_dims[i + 1]))
if i < len(feat_dims) - 2 or \
(i == len(feat_dims) - 2 and relu_final):
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward function."""
labels = torch.sigmoid(self.fc(x))
return labels
def init_weights(self):
for m in self.fc.modules():
if isinstance(m, nn.Linear):
normal_init(m, mean=0, std=0.01, bias=0)
@HEADS.register_module()
class Interhand3DHead(nn.Module):
"""Interhand 3D head of paper ref: Gyeongsik Moon. "InterHand2.6M: A
Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single
RGB Image".
Args:
keypoint_head_cfg (dict): Configs of Heatmap3DHead for hand
keypoint estimation.
root_head_cfg (dict): Configs of Heatmap1DHead for relative
hand root depth estimation.
hand_type_head_cfg (dict): Configs of MultilabelClassificationHead
for hand type classification.
loss_keypoint (dict): Config for keypoint loss. Default: None.
loss_root_depth (dict): Config for relative root depth loss.
Default: None.
loss_hand_type (dict): Config for hand type classification
loss. Default: None.
"""
def __init__(self,
keypoint_head_cfg,
root_head_cfg,
hand_type_head_cfg,
loss_keypoint=None,
loss_root_depth=None,
loss_hand_type=None,
train_cfg=None,
test_cfg=None):
super().__init__()
# build sub-module heads
self.right_hand_head = Heatmap3DHead(**keypoint_head_cfg)
self.left_hand_head = Heatmap3DHead(**keypoint_head_cfg)
self.root_head = Heatmap1DHead(**root_head_cfg)
self.hand_type_head = MultilabelClassificationHead(
**hand_type_head_cfg)
self.neck = GlobalAveragePooling()
# build losses
self.keypoint_loss = build_loss(loss_keypoint)
self.root_depth_loss = build_loss(loss_root_depth)
self.hand_type_loss = build_loss(loss_hand_type)
self.train_cfg = {} if train_cfg is None else train_cfg
self.test_cfg = {} if test_cfg is None else test_cfg
self.target_type = self.test_cfg.get('target_type', 'GaussianHeatMap')
def init_weights(self):
self.left_hand_head.init_weights()
self.right_hand_head.init_weights()
self.root_head.init_weights()
self.hand_type_head.init_weights()
def get_loss(self, output, target, target_weight):
"""Calculate loss for hand keypoint heatmaps, relative root depth and
hand type.
Args:
output (list[Tensor]): a list of outputs from multiple heads.
target (list[Tensor]): a list of targets for multiple heads.
target_weight (list[Tensor]): a list of targets weight for
multiple heads.
"""
losses = dict()
# hand keypoint loss
assert not isinstance(self.keypoint_loss, nn.Sequential)
out, tar, tar_weight = output[0], target[0], target_weight[0]
assert tar.dim() == 5 and tar_weight.dim() == 3
losses['hand_loss'] = self.keypoint_loss(out, tar, tar_weight)
# relative root depth loss
assert not isinstance(self.root_depth_loss, nn.Sequential)
out, tar, tar_weight = output[1], target[1], target_weight[1]
assert tar.dim() == 2 and tar_weight.dim() == 2
losses['rel_root_loss'] = self.root_depth_loss(out, tar, tar_weight)
# hand type loss
assert not isinstance(self.hand_type_loss, nn.Sequential)
out, tar, tar_weight = output[2], target[2], target_weight[2]
assert tar.dim() == 2 and tar_weight.dim() in [1, 2]
losses['hand_type_loss'] = self.hand_type_loss(out, tar, tar_weight)
return losses
def get_accuracy(self, output, target, target_weight):
"""Calculate accuracy for hand type.
Args:
output (list[Tensor]): a list of outputs from multiple heads.
target (list[Tensor]): a list of targets for multiple heads.
target_weight (list[Tensor]): a list of targets weight for
multiple heads.
"""
accuracy = dict()
accuracy['acc_classification'] = multilabel_classification_accuracy(
output[2].detach().cpu().numpy(),
target[2].detach().cpu().numpy(),
target_weight[2].detach().cpu().numpy(),
)
return accuracy
def forward(self, x):
"""Forward function."""
outputs = []
outputs.append(
torch.cat([self.right_hand_head(x),
self.left_hand_head(x)], dim=1))
x = self.neck(x)
outputs.append(self.root_head(x))
outputs.append(self.hand_type_head(x))
return outputs
def inference_model(self, x, flip_pairs=None):
"""Inference function.
Returns:
output (list[np.ndarray]): list of output hand keypoint
heatmaps, relative root depth and hand type.
Args:
x (torch.Tensor[NxKxHxW]): Input features.
flip_pairs (None | list[tuple()):
Pairs of keypoints which are mirrored.
"""
output = self.forward(x)
if flip_pairs is not None:
# flip 3D heatmap
heatmap_3d = output[0]
N, K, D, H, W = heatmap_3d.shape
# reshape 3D heatmap to 2D heatmap
heatmap_3d = heatmap_3d.reshape(N, K * D, H, W)
# 2D heatmap flip
heatmap_3d_flipped_back = flip_back(
heatmap_3d.detach().cpu().numpy(),
flip_pairs,
target_type=self.target_type)
# reshape back to 3D heatmap
heatmap_3d_flipped_back = heatmap_3d_flipped_back.reshape(
N, K, D, H, W)
# feature is not aligned, shift flipped heatmap for higher accuracy
if self.test_cfg.get('shift_heatmap', False):
heatmap_3d_flipped_back[...,
1:] = heatmap_3d_flipped_back[..., :-1]
output[0] = heatmap_3d_flipped_back
# flip relative hand root depth
output[1] = -output[1].detach().cpu().numpy()
# flip hand type
hand_type = output[2].detach().cpu().numpy()
hand_type_flipped_back = hand_type.copy()
hand_type_flipped_back[:, 0] = hand_type[:, 1]
hand_type_flipped_back[:, 1] = hand_type[:, 0]
output[2] = hand_type_flipped_back
else:
output = [out.detach().cpu().numpy() for out in output]
return output
def decode(self, img_metas, output, **kwargs):
"""Decode hand keypoint, relative root depth and hand type.
Args:
img_metas (list(dict)): Information about data augmentation
By default this includes:
- "image_file: path to the image file
- "center": center of the bbox
- "scale": scale of the bbox
- "rotation": rotation of the bbox
- "bbox_score": score of bbox
- "heatmap3d_depth_bound": depth bound of hand keypoint
3D heatmap
- "root_depth_bound": depth bound of relative root depth
1D heatmap
output (list[np.ndarray]): model predicted 3D heatmaps, relative
root depth and hand type.
"""
batch_size = len(img_metas)
result = {}
heatmap3d_depth_bound = np.ones(batch_size, dtype=np.float32)
root_depth_bound = np.ones(batch_size, dtype=np.float32)
center = np.zeros((batch_size, 2), dtype=np.float32)
scale = np.zeros((batch_size, 2), dtype=np.float32)
image_paths = []
score = np.ones(batch_size, dtype=np.float32)
if 'bbox_id' in img_metas[0]:
bbox_ids = []
else:
bbox_ids = None
for i in range(batch_size):
heatmap3d_depth_bound[i] = img_metas[i]['heatmap3d_depth_bound']
root_depth_bound[i] = img_metas[i]['root_depth_bound']
center[i, :] = img_metas[i]['center']
scale[i, :] = img_metas[i]['scale']
image_paths.append(img_metas[i]['image_file'])
if 'bbox_score' in img_metas[i]:
score[i] = np.array(img_metas[i]['bbox_score']).reshape(-1)
if bbox_ids is not None:
bbox_ids.append(img_metas[i]['bbox_id'])
all_boxes = np.zeros((batch_size, 6), dtype=np.float32)
all_boxes[:, 0:2] = center[:, 0:2]
all_boxes[:, 2:4] = scale[:, 0:2]
# scale is defined as: bbox_size / 200.0, so we
# need multiply 200.0 to get bbox size
all_boxes[:, 4] = np.prod(scale * 200.0, axis=1)
all_boxes[:, 5] = score
result['boxes'] = all_boxes
result['image_paths'] = image_paths
result['bbox_ids'] = bbox_ids
# decode 3D heatmaps of hand keypoints
heatmap3d = output[0]
preds, maxvals = keypoints_from_heatmaps3d(heatmap3d, center, scale)
keypoints_3d = np.zeros((batch_size, preds.shape[1], 4),
dtype=np.float32)
keypoints_3d[:, :, 0:3] = preds[:, :, 0:3]
keypoints_3d[:, :, 3:4] = maxvals
# transform keypoint depth to camera space
keypoints_3d[:, :, 2] = \
(keypoints_3d[:, :, 2] / self.right_hand_head.depth_size - 0.5) \
* heatmap3d_depth_bound[:, np.newaxis]
result['preds'] = keypoints_3d
# decode relative hand root depth
# transform relative root depth to camera space
result['rel_root_depth'] = (output[1] / self.root_head.heatmap_size -
0.5) * root_depth_bound
# decode hand type
result['hand_type'] = output[2] > 0.5
return result
| 37.796154 | 79 | 0.578254 |
a192133bb754497c69a441f4900c60afb9bd0a81 | 2,855 | py | Python | src/scenic/simulators/carla/actions.py | BehaviorPredictionTestingPlatform/scenic-1 | a2e0d08d8dd0be06aaf0071058ba337209361328 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T08:40:23.000Z | 2020-12-02T08:40:23.000Z | src/scenic/simulators/carla/actions.py | BehaviorPredictionTestingPlatform/scenic-1 | a2e0d08d8dd0be06aaf0071058ba337209361328 | [
"BSD-3-Clause"
] | null | null | null | src/scenic/simulators/carla/actions.py | BehaviorPredictionTestingPlatform/scenic-1 | a2e0d08d8dd0be06aaf0071058ba337209361328 | [
"BSD-3-Clause"
] | 4 | 2021-08-09T18:21:26.000Z | 2022-03-22T22:20:02.000Z | import math as _math
import carla as _carla
from scenic.domains.driving.actions import *
import scenic.simulators.carla.utils.utils as _utils
import scenic.simulators.carla.model as _carlaModel
################################################
# Actions available to all carla.Actor objects #
################################################
SetLocationAction = SetPositionAction # TODO refactor
class SetAngularVelocityAction(Action):
def __init__(self, angularVel):
self.angularVel = angularVel
def applyTo(self, obj, sim):
xAngularVel = self.angularVel * _math.cos(obj.heading)
yAngularVel = self.angularVel * _math.sin(obj.heading)
newAngularVel = _utils.scalarToCarlaVector3D(xAngularVel, yAngularVel)
obj.carlaActor.set_angular_velocity(newAngularVel)
class SetTransformAction(Action): # TODO eliminate
def __init__(self, pos, heading):
self.pos = pos
self.heading = heading
def applyTo(self, obj, sim):
loc = _utils.scenicToCarlaLocation(pos, z=obj.elevation)
rot = _utils.scenicToCarlaRotation(heading)
transform = _carla.Transform(loc, rot)
obj.carlaActor.set_transform(transform)
#############################################
# Actions specific to carla.Vehicle objects #
#############################################
class VehicleAction(Action):
def canBeTakenBy(self, agent):
return isinstance(agent, _carlaModel.Vehicle)
class SetManualGearShiftAction(VehicleAction):
def __init__(self, manualGearShift):
if not isinstance(manualGearShift, bool):
raise RuntimeError('Manual gear shift must be a boolean.')
self.manualGearShift = manualGearShift
def applyTo(self, obj, sim):
vehicle = obj.carlaActor
ctrl = vehicle.get_control()
ctrl.manual_gear_shift = self.manualGearShift
vehicle.apply_control(ctrl)
class SetGearAction(VehicleAction):
def __init__(self, gear):
if not isinstance(gear, int):
raise RuntimeError('Gear must be an int.')
self.gear = gear
def applyTo(self, obj, sim):
vehicle = obj.carlaActor
ctrl = vehicle.get_control()
ctrl.gear = self.gear
vehicle.apply_control(ctrl)
class SetManualFirstGearShiftAction(VehicleAction): # TODO eliminate
def applyTo(self, obj, sim):
ctrl = _carla.VehicleControl(manual_gear_shift=True, gear=1)
obj.carlaActor.apply_control(ctrl)
#################################################
# Actions available to all carla.Walker objects #
#################################################
class PedestrianAction(Action):
def canBeTakenBy(self, agent):
return isinstance(agent, _carlaModel.Pedestrian)
class SetJumpAction(PedestrianAction):
def __init__(self, jump):
if not isinstance(jump, bool):
raise RuntimeError('Jump must be a boolean.')
self.jump = jump
def applyTo(self, obj, sim):
walker = obj.carlaActor
ctrl = walker.get_control()
ctrl.jump = self.jump
walker.apply_control(ctrl)
| 29.739583 | 72 | 0.691769 |
e5d2e848de20341f0d6efd660a72194e3b55ad9f | 551 | py | Python | 1-pack_web_static.py | devephy/AirBnB_clone_v2 | b9f0ba65d76f730c0b2ef98b10764424af426570 | [
"Apache-2.0"
] | null | null | null | 1-pack_web_static.py | devephy/AirBnB_clone_v2 | b9f0ba65d76f730c0b2ef98b10764424af426570 | [
"Apache-2.0"
] | null | null | null | 1-pack_web_static.py | devephy/AirBnB_clone_v2 | b9f0ba65d76f730c0b2ef98b10764424af426570 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
'''Fabric script to generate .tgz archive'''
from fabric.api import local
from datetime import datetime
from fabric.decorators import runs_once
@runs_once
def do_pack():
'''generates .tgz archive from the contents of the web_static folder'''
local("mkdir -p versions")
path = ("versions/web_static_{}.tgz"
.format(datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")))
result = local("tar -cvzf {} web_static"
.format(path))
if result.failed:
return None
return path
| 25.045455 | 75 | 0.653358 |
ce61f2a54b9ffc84cbbeef47b59ec06a011ba961 | 6,211 | py | Python | contrib/zmq/zmq_sub3.4.py | zentoshi/zentoshi | 99ef65d56ac5d702556f2b718298c34f07168498 | [
"MIT"
] | 4 | 2019-12-08T19:54:02.000Z | 2020-02-04T20:11:14.000Z | contrib/zmq/zmq_sub3.4.py | zentoshi/zentoshi | 99ef65d56ac5d702556f2b718298c34f07168498 | [
"MIT"
] | 1 | 2019-11-10T14:06:54.000Z | 2019-11-10T14:06:54.000Z | contrib/zmq/zmq_sub3.4.py | zentoshi/zentoshi | 99ef65d56ac5d702556f2b718298c34f07168498 | [
"MIT"
] | 13 | 2019-11-09T17:09:35.000Z | 2021-12-21T07:07:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
ZenX should be started with the command line arguments:
zenxd -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
print("This example only works with Python 3.4 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashchainlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtxlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernancevote")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernanceobject")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashinstantsenddoublespend")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlocksig")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlocksig")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernancevote")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernanceobject")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawinstantsenddoublespend")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashchainlock":
print('- HASH CHAINLOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashtx":
print ('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashtxlock":
print('- HASH TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashgovernancevote":
print('- HASH GOVERNANCE VOTE ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashgovernanceobject":
print('- HASH GOVERNANCE OBJECT ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashinstantsenddoublespend":
print('- HASH IS DOUBLE SPEND ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawchainlock":
print('- RAW CHAINLOCK ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawchainlocksig":
print('- RAW CHAINLOCK SIG ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawtxlock":
print('- RAW TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawtxlocksig":
print('- RAW TX LOCK SIG ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawgovernancevote":
print('- RAW GOVERNANCE VOTE ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawgovernanceobject":
print('- RAW GOVERNANCE OBJECT ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawinstantsenddoublespend":
print('- RAW IS DOUBLE SPEND ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| 45.007246 | 107 | 0.6379 |
04d599d9d3ee8bb47cbde96c3efed7d13109ebc8 | 2,957 | py | Python | gent/objects/textBox.py | flywinged/TGL | a567ae717e7f9390eb9a5ce3383e5b796389af05 | [
"MIT"
] | 1 | 2020-06-19T18:59:02.000Z | 2020-06-19T18:59:02.000Z | gent/objects/textBox.py | flywinged/TGL | a567ae717e7f9390eb9a5ce3383e5b796389af05 | [
"MIT"
] | 16 | 2020-06-17T22:26:18.000Z | 2020-07-28T21:39:10.000Z | gent/objects/textBox.py | flywinged/TGL | a567ae717e7f9390eb9a5ce3383e5b796389af05 | [
"MIT"
] | null | null | null | # Copyright Clayton Brown 2019. See LICENSE file.
from ..internal import GameObject, Box
from ..utilities import recursiveSplit
from .textLine import TextLine
from typing import List
from dataclasses import dataclass
@dataclass
class TextLineData:
'''
Handles the meta information necessary for the text line data
'''
# TextLine object
textLine: TextLine = None
# Which position in the original textBox text this textLineData starts at
textIndex: int = 0
class TextBox(GameObject):
'''
TextBoxes are a gameobject which display text at a certain position on a canvas.
Parameters
----------
x, y: Position of the text box on the screen
text: The text the TextBox is displaying on the screen
characterStyle: Character style object used to get the style. The character is ignored
justify: "L", "R", or "C". (Left, Right, or Center). Justifies the text display in the texBox. If a value is ommitted or incorrectly given, will left justify
'''
def __init__(self, box: Box, text: str, textColor: tuple, backgroundColor: tuple, justify: str = "L", **kwargs):
GameObject.__init__(self, box, **kwargs)
# Set the initialization paramters
self.text: str = text
self.textColor: tuple = textColor
self.backgroundColor: tuple = backgroundColor
self.justify: str = justify
# A List of text lines for each line in the textBox
self.textLineDataList: List[TextLineData] = []
for i in range(self.h):
textLineBox = Box(0, i, self.w, 1)
textLine = TextLine(textLineBox, "", self.textColor, self.backgroundColor, justify = self.justify)
self.textLineDataList.append(TextLineData(textLine, 0))
self.clearText()
def clearText(self):
for textLineData in self.textLineDataList:
textLineData.textLine.text = ""
textLineData.textLine._render()
textLineData.textIndex = 0
def render(self):
'''
**kwargs
lines: Presplit text data
'''
# Get all the text to split
lines = recursiveSplit(self.text, self.w)
# Keep track of the start index for each line
lineStart = 0
for i in range(len(self.textLineDataList)):
textLineData = self.textLineDataList[i]
if i < len(lines):
textLineData.textIndex = lineStart
textLineData.textLine.text = lines[i]
lineStart += len(lines[i])
else:
textLineData.textLine.text = ""
textLineData.textIndex = (i - len(lines)) * self.w + lineStart
textLineData.textLine._render()
textLineData.textLine.draw(self.bufferCanvas, (self.xOffset, self.yOffset))
| 32.494505 | 162 | 0.607711 |
244a9437bcf6b14bf46f4906632237e5cd0c7d44 | 4,216 | py | Python | gen_SIP.py | thegreatunclean/bloodstained-castle-graph | 66301cb3f8103fa36955000700b1eb0d61fd275d | [
"MIT"
] | null | null | null | gen_SIP.py | thegreatunclean/bloodstained-castle-graph | 66301cb3f8103fa36955000700b1eb0d61fd275d | [
"MIT"
] | null | null | null | gen_SIP.py | thegreatunclean/bloodstained-castle-graph | 66301cb3f8103fa36955000700b1eb0d61fd275d | [
"MIT"
] | null | null | null | def generate_Galleon_Minerva(g):
count = g.number_of_nodes()
#Galleon Minerva
g.add_bi_path(['SIP000_Tutorial','SIP000','SIP001','SIP002','SIP003','SIP005','SIP006','SIP008'])
g.add_bi_path(['SIP008-1','SIP009','SIP010','SIP011','SIP012','SIP013','SIP014','SIP015','SIP016','SIP017','SIP018','SIP019','SIP020','Vepar'])
g.add_edge('SIP001','SIP022',req='(DJ|INV|HJ|RR|DS)') #Checked
g.add_edge('SIP022','SIP001')
g.add_bi_edge('SIP021','SIP024')
g.add_bi_edge('SIP003','SIP004')
g.add_edge('SIP003','SIP003-1', req='(INV|HJ)') #Checked
g.add_edge('SIP003-1','SIP003')
g.add_bi_star('SIP006',['SIP007','SIP025'])
g.add_bi_edge('SIP008','SIP008-1', req='SIP008_SWITCH') #Cannon fired from SIP9 side
g.add_bi_edge('SIP006','SIP017',req='(SIP17_SWITCH)') #Needs cannon fired from SIP18 to open
g.add_edge('SIP019','SIP026', req='( (ACC&DJ) |INV|HJ|RR|DS)') #Checked
g.add_edge('SIP026','SIP019')
g.add_bi_edge('SIP020','SIP021')
g.add_edge('SIP014','SIP014-1', req='(RR|DS|DJ|INV|HJ)') #Checked
g.add_edge('SIP014-1', 'SIP014')
#Galleon Mineva attributes
#Triggers
g.add_attribute('SIP008', {'key':'SIP008_SWITCH'}) #Trigger cannon from SIP9
g.add_attribute('SIP017', {'key':'SIP17_SWITCH'}) #Trigger cannon from SIP18
g.add_attribute('Vepar', {'key':'VEPAR_DEAD'}) #Kill Vepar
#Hard Mode Mobs
g.add_attribute('SIP000_Tutorial', {'mobs' : ['148992']}) #Opening demo #TODO: ensure valid shard is dropped
g.add_attribute('SIP001', {'mobs' : ['Dullahammer', 'Morte']})
g.add_attribute('SIP003', {'mobs' : ['Aello', 'Seama', 'Dullahammer']})
g.add_attribute('SIP004', {'mobs' : ['Aello']})
g.add_attribute('SIP005', {'mobs' : ['Morte']})
g.add_attribute('SIP006', {'mobs' : ['Seama']})
g.add_attribute('SIP008', {'mobs' : ['149504', 'Morte']}) #FireCannon special drop
g.add_attribute('SIP009', {'mobs' : ['Bat', 'Morte']})
g.add_attribute('SIP010', {'mobs' : ['Bone Morte', 'Seama']})
g.add_attribute('SIP011', {'mobs' : ['Cannon Morte', 'Seama', 'Bat', 'Bone Morte', 'Morte']})
g.add_attribute('SIP014', {'mobs' : ['Morte', 'Bone Morte', 'Seama', 'Dullahammer']})
g.add_attribute('SIP015', {'mobs' : ['Ghost', 'Bone Morte', 'Cannon Morte']})
g.add_attribute('SIP015', {'mobs' : ['Seama']})
g.add_attribute('SIP016', {'mobs' : ['Seama']})
g.add_attribute('SIP017', {'mobs' : ['Seama', 'Giant Rat', 'Dullahammer']})
g.add_attribute('SIP019', {'mobs' : ['Aello','Seama','Dullahammer']})
g.add_attribute('SIP021', {'mobs' : ['Giant Rat']})
g.add_attribute('SIP021', {'mobs' : ['Poltergeist']})
g.add_attribute('Vepar', {'mobs' : ['Vepar']})
#Chests
g.add_attribute('SIP000_Tutorial', {'chests': ['13568']})
g.add_attribute('SIP002', {'chests' : ['13824']})
g.add_attribute('SIP003', {'chests' : ['14080']})
g.add_attribute('SIP003-1', {'chests' : ['125952']})
g.add_attribute('SIP004', {'chests' : ['14336']})
g.add_attribute('SIP005', {'chests' : ['14592', '14848']})
g.add_attribute('SIP006', {'chests' : ['15104']})
g.add_attribute('SIP007', {'chests' : ['15360', '15616']})
g.add_attribute('SIP009', {'chests' : ['15872']})
g.add_attribute('SIP011', {'chests' : ['16128','16384','16640','16896']})
g.add_attribute('SIP012', {'chests' : ['17152']})
g.add_attribute('SIP013', {'chests' : ['17408']})
g.add_attribute('SIP014-1', {'chests' : ['17664']})
g.add_attribute('SIP015', {'chests' : ['18176']})
g.add_attribute('SIP016', {'chests' : ['18432']})
g.add_attribute('SIP017', {'chests' : ['18688']})
g.add_attribute('SIP018', {'chests' : ['18944']})
g.add_attribute('SIP019', {'chests' : ['19200']})
g.add_attribute('SIP020', {'chests' : ['19456']})
g.add_attribute('SIP021', {'chests' : ['19968']})
g.add_attribute('SIP024', {'chests' : ['20480']})
g.add_attribute('SIP025', {'chests' : ['20736']})
g.add_attribute('SIP026', {'chests' : ['21248']})
#Walls
g.add_attribute('SIP004', {'walls' : ['127744']})
g.add_attribute('SIP009', {'walls' : ['128000']})
g.add_attribute('SIP014-1', {'walls' : ['128256']})
g.add_attribute('SIP016', {'walls' : ['128512']})
#print("Galleon Minerva: {} nodes".format( g.number_of_nodes() - count)) | 47.370787 | 145 | 0.625 |
94c2788fa2959ca57a295cf1003e5bba06629db8 | 3,530 | py | Python | benchmark/compare/Mesa/Flocking/boid.py | Corvince/Agents.jl | 6276084756287bd0a7b1e0dc9fcf5638f9840c73 | [
"MIT"
] | null | null | null | benchmark/compare/Mesa/Flocking/boid.py | Corvince/Agents.jl | 6276084756287bd0a7b1e0dc9fcf5638f9840c73 | [
"MIT"
] | null | null | null | benchmark/compare/Mesa/Flocking/boid.py | Corvince/Agents.jl | 6276084756287bd0a7b1e0dc9fcf5638f9840c73 | [
"MIT"
] | null | null | null | import numpy as np
from mesa import Agent
class Boid(Agent):
"""
A Boid-style flocker agent.
The agent follows three behaviors to flock:
- Cohesion: steering towards neighboring agents.
- Separation: avoiding getting too close to any other agent.
- Alignment: try to fly in the same direction as the neighbors.
Boids have a vision that defines the radius in which they look for their
neighbors to flock with. Their speed (a scalar) and velocity (a vector)
define their movement. Separation is their desired minimum distance from
any other Boid.
"""
def __init__(
self,
unique_id,
model,
pos,
speed,
velocity,
vision,
separation,
cohere=0.03,
separate=0.015,
match=0.05,
):
"""
Create a new Boid flocker agent.
Args:
unique_id: Unique agent identifyer.
pos: Starting position
speed: Distance to move per step.
heading: numpy vector for the Boid's direction of movement.
vision: Radius to look around for nearby Boids.
separation: Minimum distance to maintain from other Boids.
cohere: the relative importance of matching neighbors' positions
separate: the relative importance of avoiding close neighbors
match: the relative importance of matching neighbors' headings
"""
super().__init__(unique_id, model)
self.pos = np.array(pos)
self.speed = speed
self.velocity = velocity
self.vision = vision
self.separation = separation
self.cohere_factor = cohere
self.separate_factor = separate
self.match_factor = match
def cohere(self, neighbors):
"""
Return the vector toward the center of mass of the local neighbors.
"""
cohere = np.zeros(2)
if neighbors:
for neighbor in neighbors:
cohere += self.model.space.get_heading(self.pos, neighbor.pos)
cohere /= len(neighbors)
return cohere
def separate(self, neighbors):
"""
Return a vector away from any neighbors closer than separation dist.
"""
me = self.pos
them = (n.pos for n in neighbors)
separation_vector = np.zeros(2)
for other in them:
if self.model.space.get_distance(me, other) < self.separation:
separation_vector -= self.model.space.get_heading(me, other)
return separation_vector
def match_heading(self, neighbors):
"""
Return a vector of the neighbors' average heading.
"""
match_vector = np.zeros(2)
if neighbors:
for neighbor in neighbors:
match_vector += neighbor.velocity
match_vector /= len(neighbors)
return match_vector
def step(self):
"""
Get the Boid's neighbors, compute the new vector, and move accordingly.
"""
neighbors = self.model.space.get_neighbors(self.pos, self.vision, False)
self.velocity += (
self.cohere(neighbors) * self.cohere_factor
+ self.separate(neighbors) * self.separate_factor
+ self.match_heading(neighbors) * self.match_factor
) / 2
self.velocity /= np.linalg.norm(self.velocity)
new_pos = self.pos + self.velocity * self.speed
self.model.space.move_agent(self, new_pos)
| 32.990654 | 80 | 0.607082 |
c8feb8ad63b5b5de935f1234dda9bea2a69784c4 | 6,072 | py | Python | drawing/drawer/mesh/group/outline.py | akyruu/blender-cartography-addon | 4f34b029d9b6a72619227ab3ceaed9393506934e | [
"Apache-2.0"
] | null | null | null | drawing/drawer/mesh/group/outline.py | akyruu/blender-cartography-addon | 4f34b029d9b6a72619227ab3ceaed9393506934e | [
"Apache-2.0"
] | null | null | null | drawing/drawer/mesh/group/outline.py | akyruu/blender-cartography-addon | 4f34b029d9b6a72619227ab3ceaed9393506934e | [
"Apache-2.0"
] | null | null | null | """
Module for outline of mesh group drawer
"""
import logging
from typing import Dict, List, Optional, Tuple
from bmesh.types import BMEdge, BMesh, BMVert
from model import CartographyCategory, CartographyGroup, CartographyPoint, CartographyRoom
from utils.blender import bmesh as bmesh_utils
from utils.collection import list as list_utils, dict as dict_utils
from .common import CartographyMeshGroupContext, CartographyMeshGroupGeometry
from .extruded import CartographyMeshExtrudedGroupDrawer
# CLASSES =====================================================================
class CartographyMeshOutlineGroupDrawer(CartographyMeshExtrudedGroupDrawer):
"""Drawer of mesh for cartography outline group"""
# Fields ------------------------------------------------------------------
__logger = logging.getLogger('CartographyMeshOutlineGroupDrawer')
# Constructor -------------------------------------------------------------
def __init__(self):
CartographyMeshExtrudedGroupDrawer.__init__(self)
self.__gate_vertices: List[BMVert] = []
self.__group: Optional[CartographyGroup] = None
# Methods -----------------------------------------------------------------
# Reset
def _reset(self, context: CartographyMeshGroupContext): # override
CartographyMeshExtrudedGroupDrawer._reset(self, context)
self.__gate_vertices = []
self.__group = context.group
# Vertices
def _create_vertex(self, bm: BMesh, point: CartographyPoint, append=True) -> BMVert: # overridden
vertex = CartographyMeshExtrudedGroupDrawer._create_vertex(self, bm, point, append)
if point.category == CartographyCategory.GATE:
self.__gate_vertices.append(vertex)
return vertex
# Edges
def _close_edge(self, bm: BMesh, vertices: List[BMVert]):
vert1 = vertices[0]
if vert1 in self.__gate_vertices:
reverse_vertices = vertices.copy()
reverse_vertices.reverse()
vert2: Optional[BMVert] = None
for i in range(0, len(reverse_vertices)):
vert = reverse_vertices[i]
if vert in self.__gate_vertices:
if not vert2 or vert.co.z > vert2.co.z:
vert2 = vert
elif vert2:
break
else:
vert2 = vertices[len(vertices) - 1]
self._create_edge(bm, vert2, vert1)
def _is_edge_to_level(self, edge: BMEdge): # overridden
return CartographyMeshExtrudedGroupDrawer._is_edge_to_level(self, edge) \
and not self.__is_gate_edge(edge)
def __is_gate_edge(self, edge: BMEdge):
vert1, vert2 = edge.verts
return vert1 in self.__gate_vertices and vert2 in self.__gate_vertices
# Faces - Ground
def _draw_ground_face(self, context: CartographyMeshGroupContext): # overridden
# The ground must be draw at the end (after all others structural forms)
pass
def draw_ground_face(self, context: CartographyMeshGroupContext):
"""Delayed draw of ground face to the end (after all others structural geometries)"""
CartographyMeshExtrudedGroupDrawer._draw_ground_face(self, context)
def _get_ground_edges(self, context: CartographyMeshGroupContext) -> List[BMEdge]: # overridden
edges = self._edges
junction, standalone = self.__split_geoms(context.room, context.geom_by_group)
# Insert junction geometries
for group_name, geom in junction.items():
self.__insert_junction_edges(edges, group_name, geom)
# Add standalone geometries at the end
for group_name, geom in standalone.items():
edges += geom.based_edges
return edges
def __split_geoms(self, room: CartographyRoom, geom_by_group: Dict[str, CartographyMeshGroupGeometry]) \
-> Tuple[Dict[str, CartographyMeshGroupGeometry], Dict[str, CartographyMeshGroupGeometry]]:
junction = {}
standalone = {}
# Filter geoms
filtered_geom_by_group = geom_by_group.copy()
for group_name, geom in geom_by_group.items():
group = room.groups.get(group_name)
linked_names = [g.name for g in group.linked]
if linked_names:
self.__logger.debug('Delete linked geometry to group <%s>: <%s>', group_name, str(linked_names))
dict_utils.pop_all(filtered_geom_by_group, linked_names)
# Split filtered geoms
for group_name, geom in filtered_geom_by_group.items(): # Outline group isn't in this dictionary
group = room.groups.get(group_name)
geoms = junction if room.has_junction(group, self.__group) else standalone
geoms[group_name] = geom
return junction, standalone
def __insert_junction_edges(self, edges: List[BMEdge], group_name: str, geom: CartographyMeshGroupGeometry):
# Collect edges in junction
junction_edges = []
based_edges = geom.based_edges
for i, edge in enumerate(edges):
for based_edge in based_edges:
if bmesh_utils.edge.has_3d_junction(edge, based_edge):
junction_edges.append(edge)
# Remove outline edges in collision and insert based edges of geometry
if junction_edges:
start_index = edges.index(junction_edges[0]) + 1
end_index = edges.index(junction_edges[len(junction_edges) - 1])
if start_index < end_index:
self.__logger.debug(
'Replace <%d> outline edges by <%d> based edges from group <%s>',
end_index - start_index, len(based_edges), group_name
)
list_utils.remove_sublist(edges, start_index, end_index)
list_utils.insert_values(edges, start_index, based_edges)
else:
# TODO
print('TODO')
else:
self.__logger.warning('No junction edge found for group <%s>', group_name)
| 42.760563 | 112 | 0.63307 |
15316da6dcb46dfb69a6992f854780dd113c431b | 18,188 | py | Python | cartography/intel/github/repos.py | Relys/cartography | 0f71b3f0246665d5fa065afa2e3dc46c22d6c689 | [
"Apache-2.0"
] | 1 | 2021-03-26T12:00:26.000Z | 2021-03-26T12:00:26.000Z | cartography/intel/github/repos.py | srics/cartography | 19a06766e304d657d956246179a2bb01a6d9aef6 | [
"Apache-2.0"
] | 1 | 2021-02-23T18:08:04.000Z | 2021-03-31T08:17:23.000Z | cartography/intel/github/repos.py | srics/cartography | 19a06766e304d657d956246179a2bb01a6d9aef6 | [
"Apache-2.0"
] | 1 | 2021-03-31T17:55:31.000Z | 2021-03-31T17:55:31.000Z | import logging
from string import Template
from packaging.requirements import InvalidRequirement
from packaging.requirements import Requirement
from packaging.utils import canonicalize_name
from cartography.intel.github.util import fetch_all
from cartography.util import run_cleanup_job
from cartography.util import timeit
logger = logging.getLogger(__name__)
GITHUB_ORG_REPOS_PAGINATED_GRAPHQL = """
query($login: String!, $cursor: String) {
organization(login: $login)
{
url
login
repositories(first: 100, after: $cursor){
pageInfo{
endCursor
hasNextPage
}
nodes{
name
nameWithOwner
primaryLanguage{
name
}
url
sshUrl
createdAt
description
updatedAt
homepageUrl
languages(first: 25){
totalCount
nodes{
name
}
}
defaultBranchRef{
name
id
}
isPrivate
isArchived
isDisabled
isLocked
owner{
url
login
__typename
}
collaborators(affiliation: OUTSIDE, first: 100) {
edges {
permission
}
nodes {
url
login
name
email
company
}
}
requirements:object(expression: "HEAD:requirements.txt") {
... on Blob {
text
}
}
}
}
}
}
"""
# Note: In the above query, `HEAD` references the default branch.
# See https://stackoverflow.com/questions/48935381/github-graphql-api-default-branch-in-repository
@timeit
def get(token, api_url, organization):
"""
Retrieve a list of repos from a Github organization as described in
https://docs.github.com/en/graphql/reference/objects#repository.
:param token: The Github API token as string.
:param api_url: The Github v4 API endpoint as string.
:param organization: The name of the target Github organization as string.
:return: A list of dicts representing repos. See tests.data.github.repos for data shape.
"""
# TODO: link the Github organization to the repositories
repos, _ = fetch_all(token, api_url, organization, GITHUB_ORG_REPOS_PAGINATED_GRAPHQL, 'repositories', 'nodes')
return repos
def transform(repos_json):
"""
Parses the JSON returned from GitHub API to create data for graph ingestion
:param repos_json: the list of individual repository nodes from GitHub. See tests.data.github.repos.GET_REPOS for
data shape.
:return: Dict containing the repos, repo->language mapping, owners->repo mapping, outside collaborators->repo
mapping, and Python requirements files (if any) in a repo.
"""
transformed_repo_list = []
transformed_repo_languages = []
transformed_repo_owners = []
# See https://docs.github.com/en/graphql/reference/enums#repositorypermission
transformed_collaborators = {'ADMIN': [], 'MAINTAIN': [], 'READ': [], 'TRIAGE': [], 'WRITE': []}
transformed_requirements_files = []
for repo_object in repos_json:
_transform_repo_languages(repo_object['url'], repo_object, transformed_repo_languages)
_transform_repo_objects(repo_object, transformed_repo_list)
_transform_repo_owners(repo_object['owner']['url'], repo_object, transformed_repo_owners)
_transform_collaborators(repo_object['collaborators'], repo_object['url'], transformed_collaborators)
_transform_python_requirements(repo_object, transformed_requirements_files)
results = {
'repos': transformed_repo_list,
'repo_languages': transformed_repo_languages,
'repo_owners': transformed_repo_owners,
'repo_collaborators': transformed_collaborators,
'python_requirements': transformed_requirements_files,
}
return results
def _create_default_branch_id(repo_url, default_branch_ref_id):
"""
Return a unique node id for a repo's defaultBranchId using the given repo_url and default_branch_ref_id.
This ensures that default branches for each GitHub repo are unique nodes in the graph.
"""
return f"{repo_url}:{default_branch_ref_id}"
def _create_git_url_from_ssh_url(ssh_url):
"""
Return a git:// URL from the given ssh_url
"""
return ssh_url.replace("/", ":").replace("git@", "git://")
def _transform_repo_objects(input_repo_object, out_repo_list):
"""
Performs data transforms including creating necessary IDs for unique nodes in the graph related to GitHub repos,
their default branches, and languages.
:param input_repo_object: A repository node from GitHub; see tests.data.github.repos.GET_REPOS for data shape.
:param out_repo_list: Out-param to append transformed repos to.
:return: Nothing
"""
# Create a unique ID for a GitHubBranch node representing the default branch of this repo object.
dbr = input_repo_object['defaultBranchRef']
default_branch_name = dbr['name'] if dbr else None
default_branch_id = _create_default_branch_id(input_repo_object['url'], dbr['id']) if dbr else None
# Create a git:// URL from the given SSH URL, if it exists.
ssh_url = input_repo_object.get('sshUrl')
git_url = _create_git_url_from_ssh_url(ssh_url) if ssh_url else None
out_repo_list.append({
'id': input_repo_object['url'],
'createdat': input_repo_object['createdAt'],
'name': input_repo_object['name'],
'fullname': input_repo_object['nameWithOwner'],
'description': input_repo_object['description'],
'primarylanguage': input_repo_object['primaryLanguage'],
'homepage': input_repo_object['homepageUrl'],
'defaultbranch': default_branch_name,
'defaultbranchid': default_branch_id,
'private': input_repo_object['isPrivate'],
'disabled': input_repo_object['isDisabled'],
'archived': input_repo_object['isArchived'],
'locked': input_repo_object['isLocked'],
'giturl': git_url,
'url': input_repo_object['url'],
'sshurl': ssh_url,
'updatedat': input_repo_object['updatedAt'],
})
def _transform_repo_owners(owner_id, repo, repo_owners):
"""
Helper function to transform repo owners.
:param owner_id: The URL of the owner object (either of type Organization or User).
:param repo: The repo object; see tests.data.github.repos.GET_REPOS for data shape.
:param repo_owners: Output array to append transformed results to.
:return: Nothing.
"""
repo_owners.append({
'repo_id': repo['url'],
'owner': repo['owner']['login'],
'owner_id': owner_id,
'type': repo['owner']['__typename'],
})
def _transform_repo_languages(repo_url, repo, repo_languages):
"""
Helper function to transform the languages in a GitHub repo.
:param repo_url: The URL of the repo.
:param repo: The repo object; see tests.data.github.repos.GET_REPOS for data shape.
:param repo_languages: Output array to append transformed results to.
:return: Nothing.
"""
if repo['languages']['totalCount'] > 0:
for language in repo['languages']['nodes']:
repo_languages.append({
'repo_id': repo_url,
'language_name': language['name'],
})
def _transform_collaborators(collaborators, repo_url, transformed_collaborators):
"""
Performs data adjustments for outside collaborators in a GitHub repo.
Output data shape = [{permission, repo_url, url (the user's URL), login, name}, ...]
:param collaborators: See cartography.tests.data.github.repos for data shape.
:param repo_url: The URL of the GitHub repo.
:param transformed_collaborators: Output dict. Data shape =
{'ADMIN': [{ user }, ...], 'MAINTAIN': [{ user }, ...], 'READ': [ ... ], 'TRIAGE': [ ... ], 'WRITE': [ ... ]}
:return: Nothing.
"""
# `collaborators` is sometimes None
if collaborators:
for idx, user in enumerate(collaborators['nodes']):
user_permission = collaborators['edges'][idx]['permission']
user['repo_url'] = repo_url
transformed_collaborators[user_permission].append(user)
def _transform_python_requirements(repo_object, out_requirements_files):
"""
Performs data transformations for the requirements.txt files in a GitHub repo, if available.
:param repo_object: The repo object.
:param out_requirements_files: Output array to append transformed results to.
:return: Nothing.
"""
req_file_contents = repo_object['requirements']
if req_file_contents and req_file_contents.get('text'):
text_contents = req_file_contents['text']
parsed_list = []
for line in text_contents.split("\n"):
try:
# Remove trailing comments and extra whitespace
line = line.partition('#')[0].strip()
req = Requirement(line)
parsed_list.append(req)
except InvalidRequirement as e:
logger.info(
f"Failed to parse line \"{line}\" in repo {repo_object['url']}'s requirements.txt; skipping line. "
f"Details: {e}. This is probably ok since we don't support all ways to specify Python "
f"requirements.",
)
continue
for req in parsed_list:
pinned_version = None
if len(req.specifier) == 1:
specifier = next(iter(req.specifier))
if specifier.operator == '==':
pinned_version = specifier.version
# Set `spec` to a default value. Example values for str(req.specifier): "<4.0,>=3.0" or "==1.0.0".
spec = str(req.specifier)
# Set spec to `None` instead of empty string so that the Neo4j driver will leave the library.specifier field
# undefined. As convention, we prefer undefined values over empty strings in the graph.
if spec == '':
spec = None
canon_name = canonicalize_name(req.name)
requirement_id = f"{canon_name}|{pinned_version}" if pinned_version else canon_name
out_requirements_files.append({
"id": requirement_id,
"name": canon_name,
"specifier": spec,
"version": pinned_version,
"repo_url": repo_object['url'],
})
@timeit
def load_github_repos(neo4j_session, update_tag, repo_data):
"""
Ingest the GitHub repository information
:param neo4j_session: Neo4J session object for server communication
:param update_tag: Timestamp used to determine data freshness
:param repo_data: repository data objects
:return: None
"""
ingest_repo = """
UNWIND {RepoData} as repository
MERGE (repo:GitHubRepository{id: repository.id})
ON CREATE SET repo.firstseen = timestamp(),
repo.createdat = repository.createdat
SET repo.name = repository.name,
repo.fullname = repository.fullname,
repo.description = repository.description,
repo.primarylanguage = repository.primarylanguage.name,
repo.homepage = repository.homepage,
repo.defaultbranch = repository.defaultbranch,
repo.defaultbranchid = repository.defaultbranchid,
repo.private = repository.private,
repo.disabled = repository.disabled,
repo.archived = repository.archived,
repo.locked = repository.locked,
repo.giturl = repository.giturl,
repo.url = repository.url,
repo.sshurl = repository.sshurl,
repo.updatedat = repository.updatedat,
repo.lastupdated = {UpdateTag}
WITH repo
WHERE repo.defaultbranch IS NOT NULL AND repo.defaultbranchid IS NOT NULL
MERGE (branch:GitHubBranch{id: repo.defaultbranchid})
ON CREATE SET branch.firstseen = timestamp()
SET branch.name = repo.defaultbranch,
branch.lastupdated = {UpdateTag}
MERGE (repo)-[r:BRANCH]->(branch)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = r.UpdateTag
"""
neo4j_session.run(
ingest_repo,
RepoData=repo_data,
UpdateTag=update_tag,
)
@timeit
def load_github_languages(neo4j_session, update_tag, repo_languages):
"""
Ingest the relationships for repo languages
:param neo4j_session: Neo4J session object for server communication
:param update_tag: Timestamp used to determine data freshness
:param repo_languages: list of language to repo mappings
:return: Nothing
"""
ingest_languages = """
UNWIND {Languages} as lang
MERGE (pl:ProgrammingLanguage{id: lang.language_name})
ON CREATE SET pl.firstseen = timestamp(),
pl.name = lang.language_name
SET pl.lastupdated = {UpdateTag}
WITH pl, lang
MATCH (repo:GitHubRepository{id: lang.repo_id})
MERGE (pl)<-[r:LANGUAGE]-(repo)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {UpdateTag}"""
neo4j_session.run(
ingest_languages,
Languages=repo_languages,
UpdateTag=update_tag,
)
@timeit
def load_github_owners(neo4j_session, update_tag, repo_owners):
"""
Ingest the relationships for repo owners
:param neo4j_session: Neo4J session object for server communication
:param update_tag: Timestamp used to determine data freshness
:param repo_owners: list of owner to repo mappings
:return: Nothing
"""
for owner in repo_owners:
ingest_owner_template = Template("""
MERGE (user:$account_type{id: {Id}})
ON CREATE SET user.firstseen = timestamp()
SET user.username = {UserName},
user.lastupdated = {UpdateTag}
WITH user
MATCH (repo:GitHubRepository{id: {RepoId}})
MERGE (user)<-[r:OWNER]-(repo)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {UpdateTag}""")
account_type = {'User': "GitHubUser", 'Organization': "GitHubOrganization"}
neo4j_session.run(
ingest_owner_template.safe_substitute(account_type=account_type[owner['type']]),
Id=owner['owner_id'],
UserName=owner['owner'],
RepoId=owner['repo_id'],
UpdateTag=update_tag,
)
@timeit
def load_collaborators(neo4j_session, update_tag, collaborators):
query = Template("""
UNWIND {UserData} as user
MERGE (u:GitHubUser{id: user.url})
ON CREATE SET u.firstseen = timestamp()
SET u.fullname = user.name,
u.username = user.login,
u.permission = user.permission,
u.email = user.email,
u.company = user.company,
u.lastupdated = {UpdateTag}
WITH u, user
MATCH (repo:GitHubRepository{id: user.repo_url})
MERGE (repo)<-[o:$rel_label]-(u)
ON CREATE SET o.firstseen = timestamp()
SET o.lastupdated = {UpdateTag}
""")
for collab_type in collaborators.keys():
relationship_label = f"OUTSIDE_COLLAB_{collab_type}"
neo4j_session.run(
query.safe_substitute(rel_label=relationship_label),
UserData=collaborators[collab_type],
UpdateTag=update_tag,
)
@timeit
def load(neo4j_session, common_job_parameters, repo_data):
load_github_repos(neo4j_session, common_job_parameters['UPDATE_TAG'], repo_data['repos'])
load_github_owners(neo4j_session, common_job_parameters['UPDATE_TAG'], repo_data['repo_owners'])
load_github_languages(neo4j_session, common_job_parameters['UPDATE_TAG'], repo_data['repo_languages'])
load_collaborators(neo4j_session, common_job_parameters['UPDATE_TAG'], repo_data['repo_collaborators'])
load_python_requirements(neo4j_session, common_job_parameters['UPDATE_TAG'], repo_data['python_requirements'])
@timeit
def load_python_requirements(neo4j_session, update_tag, requirements_objects):
query = """
UNWIND {Requirements} AS req
MERGE (lib:PythonLibrary:Dependency{id: req.id})
ON CREATE SET lib.firstseen = timestamp(),
lib.name = req.name
SET lib.lastupdated = {UpdateTag},
lib.version = req.version
WITH lib, req
MATCH (repo:GitHubRepository{id: req.repo_url})
MERGE (repo)-[r:REQUIRES]->(lib)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {UpdateTag},
r.specifier = req.specifier
"""
neo4j_session.run(
query,
Requirements=requirements_objects,
UpdateTag=update_tag,
)
def sync(neo4j_session, common_job_parameters, github_api_key, github_url, organization):
"""
Performs the sequential tasks to collect, transform, and sync github data
:param neo4j_session: Neo4J session for database interface
:param common_job_parameters: Common job parameters containing UPDATE_TAG
:param github_api_key: The API key to access the GitHub v4 API
:param github_url: The URL for the GitHub v4 endpoint to use
:param organization: The organization to query GitHub for
:return: Nothing
"""
logger.info("Syncing GitHub repos")
repos_json = get(github_api_key, github_url, organization)
repo_data = transform(repos_json)
load(neo4j_session, common_job_parameters, repo_data)
run_cleanup_job('github_repos_cleanup.json', neo4j_session, common_job_parameters)
| 38.371308 | 120 | 0.637453 |
95fbe0ea61b8aee02b1682cab7eea850dd5ad29b | 11,803 | py | Python | Python27/Lib/test/test_sysconfig.py | xuyue1998/Json2Lua_Tools | 539c4e52cb8ac1257f660bf902d964e1a476257c | [
"MIT"
] | 66 | 2015-01-17T23:13:44.000Z | 2018-04-20T21:44:35.000Z | Python27/Lib/test/test_sysconfig.py | xuyue1998/Json2Lua_Tools | 539c4e52cb8ac1257f660bf902d964e1a476257c | [
"MIT"
] | 20 | 2015-04-20T12:09:12.000Z | 2022-03-12T01:25:04.000Z | Python27/Lib/test/test_sysconfig.py | xuyue1998/Json2Lua_Tools | 539c4e52cb8ac1257f660bf902d964e1a476257c | [
"MIT"
] | 24 | 2015-07-22T08:08:54.000Z | 2021-12-28T06:56:09.000Z | """Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
import sys
import os
import shutil
import subprocess
from copy import copy, deepcopy
from test.test_support import run_unittest, TESTFN, unlink, get_attribute
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var)
class TestSysConfig(unittest.TestCase):
def setUp(self):
"""Make a copy of sys.path"""
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
self.makefile = None
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._uname = None
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = copy(sysconfig._CONFIG_VARS)
self.old_environ = deepcopy(os.environ)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
if self.makefile is not None:
os.unlink(self.makefile)
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = copy(self._config_vars)
for key, value in self.old_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
for key in os.environ.keys():
if key not in self.old_environ:
del os.environ[key]
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = uname
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = wanted.items()
wanted.sort()
scheme = scheme.items()
scheme.sort()
self.assertEqual(scheme, wanted)
def test_get_path(self):
# xxx make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxint
try:
sys.maxint = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxint = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxint = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxint
try:
sys.maxint = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxint = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxint = maxint
# macbook with fat binaries (fat, universal or fat64)
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3'%(arch,))
self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,))
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user')
self.assertEqual(get_scheme_names(), wanted)
def test_symlink(self):
# Issue 7880
symlink = get_attribute(os, "symlink")
def get(python):
cmd = [python, '-c',
'import sysconfig; print sysconfig.get_platform()']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue 8759 : make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user))
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
with open('/dev/null', 'w') as devnull_fp:
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=devnull_fp,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=open('/dev/null'),
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
def test_main():
run_unittest(TestSysConfig)
if __name__ == "__main__":
test_main()
| 37.233438 | 97 | 0.546895 |
d4d86e28e8bd5f81ee612d3941b6bab52f203b43 | 3,027 | py | Python | ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/package/scripts/zookeeper.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 25 | 2019-12-04T03:09:55.000Z | 2022-03-08T10:52:06.000Z | ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/package/scripts/zookeeper.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 29 | 2019-12-04T03:00:39.000Z | 2022-03-02T06:25:44.000Z | ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/package/scripts/zookeeper.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 33 | 2019-12-04T02:51:30.000Z | 2022-03-24T02:47:38.000Z | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import os
from resource_management import *
import sys
def zookeeper(type = None):
import params
Directory(params.config_dir,
owner=params.zk_user,
create_parents = True,
group=params.user_group
)
File(format("{config_dir}/zookeeper-env.sh"),
content=InlineTemplate(params.zk_env_sh_template),
owner=params.zk_user,
group=params.user_group
)
configFile("zoo.cfg", template_name="zoo.cfg.j2")
configFile("configuration.xsl", template_name="configuration.xsl.j2")
Directory(params.zk_pid_dir,
owner=params.zk_user,
create_parents = True,
group=params.user_group
)
Directory(params.zk_log_dir,
owner=params.zk_user,
create_parents = True,
group=params.user_group
)
Directory(params.zk_data_dir,
owner=params.zk_user,
create_parents = True,
group=params.user_group
)
if type == 'server':
myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
File(format("{zk_data_dir}/myid"),
mode = 0644,
content = myid
)
if (params.log4j_props != None):
File(format("{params.config_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.zk_user,
content=params.log4j_props
)
elif (os.path.exists(format("{params.config_dir}/log4j.properties"))):
File(format("{params.config_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.zk_user
)
if params.security_enabled:
if type == "server":
configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
else:
configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
File(format("{config_dir}/zoo_sample.cfg"),
owner=params.zk_user,
group=params.user_group
)
def configFile(name, template_name=None):
import params
File(format("{config_dir}/{name}"),
content=Template(template_name),
owner=params.zk_user,
group=params.user_group
)
| 27.27027 | 93 | 0.68814 |
a7ae20e633d4de6682cc5f619088bd79c50d15dc | 7,696 | py | Python | test/functional/test_framework/test_node.py | SinduNagalingam/Runway | f1c7f6295f03b89358529c18d19c9443d4afb574 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_node.py | SinduNagalingam/Runway | f1c7f6295f03b89358529c18d19c9443d4afb574 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_node.py | SinduNagalingam/Runway | f1c7f6295f03b89358529c18d19c9443d4afb574 | [
"MIT"
] | 1 | 2018-10-08T07:13:43.000Z | 2018-10-08T07:13:43.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for runwaycoind node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import subprocess
import time
from .util import (
assert_equal,
get_rpc_proxy,
rpc_url,
wait_until,
)
from .authproxy import JSONRPCException
BITCOIND_PROC_WAIT_TIMEOUT = 60
class TestNode():
"""A class for representing a runwaycoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
To make things easier for the test writer, a bit of magic is happening under the covers.
Any unrecognised messages will be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
if binary is None:
self.binary = os.getenv("LITECOIND", "runwaycoind")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("LITECOINCLI", "runwaycoin-cli"), self.datadir)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
def __getattr__(self, *args, **kwargs):
"""Dispatches any unrecognised messages to the RPC connection."""
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return self.rpc.__getattr__(*args, **kwargs)
def start(self, extra_args=None, stderr=None):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr)
self.running = True
self.log.debug("runwaycoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the runwaycoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "runwaycoind exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to runwaycoind")
def get_wallet_rpc(self, wallet_name):
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes runwaycoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
class TestNodeCLI():
"""Interface to runwaycoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.args = []
self.binary = binary
self.datadir = datadir
self.input = None
def __call__(self, *args, input=None):
# TestNodeCLI is callable with runwaycoin-cli command-line args
self.args = [str(arg) for arg in args]
self.input = input
return self
def __getattr__(self, command):
def dispatcher(*args, **kwargs):
return self.send_cli(command, *args, **kwargs)
return dispatcher
def send_cli(self, command, *args, **kwargs):
"""Run runwaycoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same runwaycoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.args
if named_args:
p_args += ["-named"]
p_args += [command] + pos_args + named_args
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
return json.loads(cli_stdout, parse_float=decimal.Decimal)
| 40.293194 | 248 | 0.641502 |
6f24f58d5e87916ed6ab6e723a8ee3b9f0859241 | 21,505 | py | Python | coreemu-read-only/daemon/core/services/utility.py | ermin-sakic/Common-Open-Research-Emulator-CORE- | 9c246b0ae0e9182dcf61acc4faee41841d5cbd51 | [
"BSD-3-Clause"
] | 3 | 2016-05-19T09:46:30.000Z | 2021-01-31T16:41:06.000Z | coreemu-read-only/daemon/core/services/utility.py | ermin-sakic/Common-Open-Research-Emulator-CORE- | 9c246b0ae0e9182dcf61acc4faee41841d5cbd51 | [
"BSD-3-Clause"
] | null | null | null | coreemu-read-only/daemon/core/services/utility.py | ermin-sakic/Common-Open-Research-Emulator-CORE- | 9c246b0ae0e9182dcf61acc4faee41841d5cbd51 | [
"BSD-3-Clause"
] | 1 | 2018-09-02T09:37:14.000Z | 2018-09-02T09:37:14.000Z | #
# CORE
# Copyright (c)2010-2014 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
utility.py: defines miscellaneous utility services.
'''
import os
from core.service import CoreService, addservice
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
from core.misc.utils import *
from core.constants import *
class UtilService(CoreService):
''' Parent class for utility services.
'''
_name = "UtilityProcess"
_group = "Utility"
_depends = ()
_dirs = ()
_configs = ()
_startindex = 80
_startup = ()
_shutdown = ()
@classmethod
def generateconfig(cls, node, filename, services):
return ""
class IPForwardService(UtilService):
_name = "IPForward"
_configs = ("ipforward.sh", )
_startindex = 5
_startup = ("sh ipforward.sh", )
@classmethod
def generateconfig(cls, node, filename, services):
if os.uname()[0] == "Linux":
return cls.generateconfiglinux(node, filename, services)
elif os.uname()[0] == "FreeBSD":
return cls.generateconfigbsd(node, filename, services)
else:
raise Exception, "unknown platform"
@classmethod
def generateconfiglinux(cls, node, filename, services):
cfg = """\
#!/bin/sh
# auto-generated by IPForward service (utility.py)
%s -w net.ipv4.conf.all.forwarding=1
%s -w net.ipv6.conf.all.forwarding=1
%s -w net.ipv4.conf.all.send_redirects=0
%s -w net.ipv4.conf.all.rp_filter=0
%s -w net.ipv4.conf.default.rp_filter=0
""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN)
for ifc in node.netifs():
name = sysctldevname(ifc.name)
cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (SYSCTL_BIN, name)
cfg += "%s -w net.ipv4.conf.%s.send_redirects=0\n" % \
(SYSCTL_BIN, name)
cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (SYSCTL_BIN, name)
return cfg
@classmethod
def generateconfigbsd(cls, node, filename, services):
return """\
#!/bin/sh
# auto-generated by IPForward service (utility.py)
%s -w net.inet.ip.forwarding=1
%s -w net.inet6.ip6.forwarding=1
%s -w net.inet.icmp.bmcastecho=1
%s -w net.inet.icmp.icmplim=0
""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN)
addservice(IPForwardService)
class DefaultRouteService(UtilService):
_name = "DefaultRoute"
_configs = ("defaultroute.sh",)
_startup = ("sh defaultroute.sh",)
@classmethod
def generateconfig(cls, node, filename, services):
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by DefaultRoute service (utility.py)\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\n".join(map(cls.addrstr, ifc.addrlist))
cfg += "\n"
return cfg
@staticmethod
def addrstr(x):
if x.find(":") >= 0:
net = IPv6Prefix(x)
fam = "inet6 ::"
else:
net = IPv4Prefix(x)
fam = "inet 0.0.0.0"
if net.maxaddr() == net.minaddr():
return ""
else:
if os.uname()[0] == "Linux":
rtcmd = "ip route add default via"
elif os.uname()[0] == "FreeBSD":
rtcmd = "route add -%s" % fam
else:
raise Exception, "unknown platform"
return "%s %s" % (rtcmd, net.minaddr())
addservice(DefaultRouteService)
class DefaultMulticastRouteService(UtilService):
_name = "DefaultMulticastRoute"
_configs = ("defaultmroute.sh",)
_startup = ("sh defaultmroute.sh",)
@classmethod
def generateconfig(cls, node, filename, services):
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by DefaultMulticastRoute service (utility.py)\n"
cfg += "# the first interface is chosen below; please change it "
cfg += "as needed\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
if os.uname()[0] == "Linux":
rtcmd = "ip route add 224.0.0.0/4 dev"
elif os.uname()[0] == "FreeBSD":
rtcmd = "route add 224.0.0.0/4 -iface"
else:
raise Exception, "unknown platform"
cfg += "%s %s\n" % (rtcmd, ifc.name)
cfg += "\n"
break
return cfg
addservice(DefaultMulticastRouteService)
class StaticRouteService(UtilService):
_name = "StaticRoute"
_configs = ("staticroute.sh",)
_startup = ("sh staticroute.sh",)
_custom_needed = True
@classmethod
def generateconfig(cls, node, filename, services):
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by StaticRoute service (utility.py)\n#\n"
cfg += "# NOTE: this service must be customized to be of any use\n"
cfg += "# Below are samples that you can uncomment and edit.\n#\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\n".join(map(cls.routestr, ifc.addrlist))
cfg += "\n"
return cfg
@staticmethod
def routestr(x):
if x.find(":") >= 0:
net = IPv6Prefix(x)
fam = "inet6"
dst = "3ffe:4::/64"
else:
net = IPv4Prefix(x)
fam = "inet"
dst = "10.9.8.0/24"
if net.maxaddr() == net.minaddr():
return ""
else:
if os.uname()[0] == "Linux":
rtcmd = "#/sbin/ip route add %s via" % dst
elif os.uname()[0] == "FreeBSD":
rtcmd = "#/sbin/route add -%s %s" % (fam, dst)
else:
raise Exception, "unknown platform"
return "%s %s" % (rtcmd, net.minaddr())
addservice(StaticRouteService)
class SshService(UtilService):
_name = "SSH"
if os.uname()[0] == "FreeBSD":
_configs = ("startsshd.sh", "sshd_config",)
_dirs = ()
else:
_configs = ("startsshd.sh", "/etc/ssh/sshd_config",)
_dirs = ("/etc/ssh", "/var/run/sshd",)
_startup = ("sh startsshd.sh",)
_shutdown = ("killall sshd",)
_validate = ()
@classmethod
def generateconfig(cls, node, filename, services):
''' Use a startup script for launching sshd in order to wait for host
key generation.
'''
if os.uname()[0] == "FreeBSD":
sshcfgdir = node.nodedir
sshstatedir = node.nodedir
sshlibdir = "/usr/libexec"
else:
sshcfgdir = cls._dirs[0]
sshstatedir = cls._dirs[1]
sshlibdir = "/usr/lib/openssh"
if filename == "startsshd.sh":
return """\
#!/bin/sh
# auto-generated by SSH service (utility.py)
ssh-keygen -q -t rsa -N "" -f %s/ssh_host_rsa_key
chmod 655 %s
# wait until RSA host key has been generated to launch sshd
/usr/sbin/sshd -f %s/sshd_config
""" % (sshcfgdir, sshstatedir, sshcfgdir)
else:
return """\
# auto-generated by SSH service (utility.py)
Port 22
Protocol 2
HostKey %s/ssh_host_rsa_key
UsePrivilegeSeparation yes
PidFile %s/sshd.pid
KeyRegenerationInterval 3600
ServerKeyBits 768
SyslogFacility AUTH
LogLevel INFO
LoginGraceTime 120
PermitRootLogin yes
StrictModes yes
RSAAuthentication yes
PubkeyAuthentication yes
IgnoreRhosts yes
RhostsRSAAuthentication no
HostbasedAuthentication no
PermitEmptyPasswords no
ChallengeResponseAuthentication no
X11Forwarding yes
X11DisplayOffset 10
PrintMotd no
PrintLastLog yes
TCPKeepAlive yes
AcceptEnv LANG LC_*
Subsystem sftp %s/sftp-server
UsePAM yes
UseDNS no
""" % (sshcfgdir, sshstatedir, sshlibdir)
addservice(SshService)
class DhcpService(UtilService):
_name = "DHCP"
_configs = ("/etc/dhcp/dhcpd.conf",)
_dirs = ("/etc/dhcp",)
_startup = ("dhcpd",)
_shutdown = ("killall dhcpd",)
_validate = ("pidof dhcpd",)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a dhcpd config file using the network address of
each interface.
'''
cfg = """\
# auto-generated by DHCP service (utility.py)
# NOTE: move these option lines into the desired pool { } block(s) below
#option domain-name "test.com";
#option domain-name-servers 10.0.0.1;
#option routers 10.0.0.1;
log-facility local6;
default-lease-time 600;
max-lease-time 7200;
ddns-update-style none;
"""
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "\n".join(map(cls.subnetentry, ifc.addrlist))
cfg += "\n"
return cfg
@staticmethod
def subnetentry(x):
''' Generate a subnet declaration block given an IPv4 prefix string
for inclusion in the dhcpd3 config file.
'''
if x.find(":") >= 0:
return ""
else:
addr = x.split("/")[0]
net = IPv4Prefix(x)
# divide the address space in half
rangelow = net.addr(net.numaddr() / 2)
rangehigh = net.maxaddr()
return """
subnet %s netmask %s {
pool {
range %s %s;
default-lease-time 600;
option routers %s;
}
}
""" % (net.prefixstr(), net.netmaskstr(), rangelow, rangehigh, addr)
addservice(DhcpService)
class DhcpClientService(UtilService):
''' Use a DHCP client for all interfaces for addressing.
'''
_name = "DHCPClient"
_configs = ("startdhcpclient.sh",)
_startup = ("sh startdhcpclient.sh",)
_shutdown = ("killall dhclient",)
_validate = ("pidof dhclient",)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a script to invoke dhclient on all interfaces.
'''
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by DHCPClient service (utility.py)\n"
cfg += "# uncomment this mkdir line and symlink line to enable client-"
cfg += "side DNS\n# resolution based on the DHCP server response.\n"
cfg += "#mkdir -p /var/run/resolvconf/interface\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += "#ln -s /var/run/resolvconf/interface/%s.dhclient" % ifc.name
cfg += " /var/run/resolvconf/resolv.conf\n"
cfg += "/sbin/dhclient -nw -pf /var/run/dhclient-%s.pid" % ifc.name
cfg += " -lf /var/run/dhclient-%s.lease %s\n" % (ifc.name, ifc.name)
return cfg
addservice(DhcpClientService)
class FtpService(UtilService):
''' Start a vsftpd server.
'''
_name = "FTP"
_configs = ("vsftpd.conf",)
_dirs = ("/var/run/vsftpd/empty", "/var/ftp",)
_startup = ("vsftpd ./vsftpd.conf",)
_shutdown = ("killall vsftpd",)
_validate = ("pidof vsftpd",)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a vsftpd.conf configuration file.
'''
return """\
# vsftpd.conf auto-generated by FTP service (utility.py)
listen=YES
anonymous_enable=YES
local_enable=YES
dirmessage_enable=YES
use_localtime=YES
xferlog_enable=YES
connect_from_port_20=YES
xferlog_file=/var/log/vsftpd.log
ftpd_banner=Welcome to the CORE FTP service
secure_chroot_dir=/var/run/vsftpd/empty
anon_root=/var/ftp
"""
addservice(FtpService)
class HttpService(UtilService):
''' Start an apache server.
'''
_name = "HTTP"
_configs = ("/etc/apache2/apache2.conf", "/etc/apache2/envvars",
"/var/www/index.html",)
_dirs = ("/etc/apache2", "/var/run/apache2", "/var/log/apache2",
"/run/lock", "/var/lock/apache2", "/var/www", )
_startup = ("chown www-data /var/lock/apache2", "apache2ctl start",)
_shutdown = ("apache2ctl stop",)
_validate = ("pidof apache2",)
APACHEVER22, APACHEVER24 = (22, 24)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate an apache2.conf configuration file.
'''
if filename == cls._configs[0]:
return cls.generateapache2conf(node, filename, services)
elif filename == cls._configs[1]:
return cls.generateenvvars(node, filename, services)
elif filename == cls._configs[2]:
return cls.generatehtml(node, filename, services)
else:
return ""
@classmethod
def detectversionfromcmd(cls):
''' Detect the apache2 version using the 'a2query' command.
'''
try:
status, result = cmdresult(['a2query', '-v'])
except Exception:
status = -1
if status == 0 and result[:3] == '2.4':
return cls.APACHEVER24
return cls.APACHEVER22
@classmethod
def generateapache2conf(cls, node, filename, services):
lockstr = { cls.APACHEVER22:
'LockFile ${APACHE_LOCK_DIR}/accept.lock\n',
cls.APACHEVER24:
'Mutex file:${APACHE_LOCK_DIR} default\n', }
mpmstr = { cls.APACHEVER22: '', cls.APACHEVER24:
'LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so\n', }
permstr = { cls.APACHEVER22:
' Order allow,deny\n Deny from all\n Satisfy all\n',
cls.APACHEVER24:
' Require all denied\n', }
authstr = { cls.APACHEVER22:
'LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so\n',
cls.APACHEVER24:
'LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so\n', }
permstr2 = { cls.APACHEVER22:
'\t\tOrder allow,deny\n\t\tallow from all\n',
cls.APACHEVER24:
'\t\tRequire all granted\n', }
version = cls.detectversionfromcmd()
cfg ="# apache2.conf generated by utility.py:HttpService\n"
cfg += lockstr[version]
cfg += """\
PidFile ${APACHE_PID_FILE}
Timeout 300
KeepAlive On
MaxKeepAliveRequests 100
KeepAliveTimeout 5
"""
cfg += mpmstr[version]
cfg += """\
<IfModule mpm_prefork_module>
StartServers 5
MinSpareServers 5
MaxSpareServers 10
MaxClients 150
MaxRequestsPerChild 0
</IfModule>
<IfModule mpm_worker_module>
StartServers 2
MinSpareThreads 25
MaxSpareThreads 75
ThreadLimit 64
ThreadsPerChild 25
MaxClients 150
MaxRequestsPerChild 0
</IfModule>
<IfModule mpm_event_module>
StartServers 2
MinSpareThreads 25
MaxSpareThreads 75
ThreadLimit 64
ThreadsPerChild 25
MaxClients 150
MaxRequestsPerChild 0
</IfModule>
User ${APACHE_RUN_USER}
Group ${APACHE_RUN_GROUP}
AccessFileName .htaccess
<Files ~ "^\.ht">
"""
cfg += permstr[version]
cfg += """\
</Files>
DefaultType None
HostnameLookups Off
ErrorLog ${APACHE_LOG_DIR}/error.log
LogLevel warn
#Include mods-enabled/*.load
#Include mods-enabled/*.conf
LoadModule alias_module /usr/lib/apache2/modules/mod_alias.so
LoadModule auth_basic_module /usr/lib/apache2/modules/mod_auth_basic.so
"""
cfg += authstr[version]
cfg += """\
LoadModule authz_host_module /usr/lib/apache2/modules/mod_authz_host.so
LoadModule authz_user_module /usr/lib/apache2/modules/mod_authz_user.so
LoadModule autoindex_module /usr/lib/apache2/modules/mod_autoindex.so
LoadModule dir_module /usr/lib/apache2/modules/mod_dir.so
LoadModule env_module /usr/lib/apache2/modules/mod_env.so
NameVirtualHost *:80
Listen 80
<IfModule mod_ssl.c>
Listen 443
</IfModule>
<IfModule mod_gnutls.c>
Listen 443
</IfModule>
LogFormat "%v:%p %h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" vhost_combined
LogFormat "%h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" combined
LogFormat "%h %l %u %t \\"%r\\" %>s %O" common
LogFormat "%{Referer}i -> %U" referer
LogFormat "%{User-agent}i" agent
ServerTokens OS
ServerSignature On
TraceEnable Off
<VirtualHost *:80>
ServerAdmin webmaster@localhost
DocumentRoot /var/www
<Directory />
Options FollowSymLinks
AllowOverride None
</Directory>
<Directory /var/www/>
Options Indexes FollowSymLinks MultiViews
AllowOverride None
"""
cfg += permstr2[version]
cfg += """\
</Directory>
ErrorLog ${APACHE_LOG_DIR}/error.log
LogLevel warn
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
"""
return cfg
@classmethod
def generateenvvars(cls, node, filename, services):
return """\
# this file is used by apache2ctl - generated by utility.py:HttpService
# these settings come from a default Ubuntu apache2 installation
export APACHE_RUN_USER=www-data
export APACHE_RUN_GROUP=www-data
export APACHE_PID_FILE=/var/run/apache2.pid
export APACHE_RUN_DIR=/var/run/apache2
export APACHE_LOCK_DIR=/var/lock/apache2
export APACHE_LOG_DIR=/var/log/apache2
export LANG=C
export LANG
"""
@classmethod
def generatehtml(cls, node, filename, services):
body = """\
<!-- generated by utility.py:HttpService -->
<h1>%s web server</h1>
<p>This is the default web page for this server.</p>
<p>The web server software is running but no content has been added, yet.</p>
""" % node.name
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
body += "<li>%s - %s</li>\n" % (ifc.name, ifc.addrlist)
return "<html><body>%s</body></html>" % body
addservice(HttpService)
class PcapService(UtilService):
''' Pcap service for logging packets.
'''
_name = "pcap"
_configs = ("pcap.sh", )
_dirs = ()
_startindex = 1
_startup = ("sh pcap.sh start",)
_shutdown = ("sh pcap.sh stop",)
_validate = ("pidof tcpdump",)
_meta = "logs network traffic to pcap packet capture files"
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a startpcap.sh traffic logging script.
'''
cfg = """
#!/bin/sh
# set tcpdump options here (see 'man tcpdump' for help)
# (-s snap length, -C limit pcap file length, -n disable name resolution)
DUMPOPTS="-s 12288 -C 10 -n"
if [ "x$1" = "xstart" ]; then
"""
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
cfg += '# '
redir = "< /dev/null"
cfg += "tcpdump ${DUMPOPTS} -w %s.%s.pcap -i %s %s &\n" % \
(node.name, ifc.name, ifc.name, redir)
cfg += """
elif [ "x$1" = "xstop" ]; then
mkdir -p ${SESSION_DIR}/pcap
mv *.pcap ${SESSION_DIR}/pcap
fi;
"""
return cfg
addservice(PcapService)
class RadvdService(UtilService):
_name = "radvd"
_configs = ("/etc/radvd/radvd.conf",)
_dirs = ("/etc/radvd",)
_startup = ("radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log",)
_shutdown = ("pkill radvd",)
_validate = ("pidof radvd",)
@classmethod
def generateconfig(cls, node, filename, services):
''' Generate a RADVD router advertisement daemon config file
using the network address of each interface.
'''
cfg = "# auto-generated by RADVD service (utility.py)\n"
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
prefixes = map(cls.subnetentry, ifc.addrlist)
if len(prefixes) < 1:
continue
cfg += """\
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
AdvDefaultPreference low;
AdvHomeAgentFlag off;
""" % ifc.name
for prefix in prefixes:
if prefix == "":
continue
cfg += """\
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
AdvRouterAddr on;
};
""" % prefix
cfg += "};\n"
return cfg
@staticmethod
def subnetentry(x):
''' Generate a subnet declaration block given an IPv6 prefix string
for inclusion in the RADVD config file.
'''
if x.find(":") >= 0:
net = IPv6Prefix(x)
return str(net)
else:
return ""
addservice(RadvdService)
class AtdService(UtilService):
''' Atd service for scheduling at jobs
'''
_name = "atd"
_configs = ("startatd.sh",)
_dirs = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool")
_startup = ("sh startatd.sh", )
_shutdown = ("pkill atd", )
@classmethod
def generateconfig(cls, node, filename, services):
return """
#!/bin/sh
echo 00001 > /var/spool/cron/atjobs/.SEQ
chown -R daemon /var/spool/cron/*
chmod -R 700 /var/spool/cron/*
atd
"""
addservice(AtdService)
class UserDefinedService(UtilService):
''' Dummy service allowing customization of anything.
'''
_name = "UserDefined"
_startindex = 50
_meta = "Customize this service to do anything upon startup."
addservice(UserDefinedService)
| 29.785319 | 102 | 0.599163 |
8d61e420089ca748f5ec4826806792fc02c44fc7 | 703 | py | Python | notes/bio/src/py/split-tabfile-by-column1.py | csiu/tokens | 8a7f865d921d91aae4019e43677435ad78a8a703 | [
"MIT"
] | null | null | null | notes/bio/src/py/split-tabfile-by-column1.py | csiu/tokens | 8a7f865d921d91aae4019e43677435ad78a8a703 | [
"MIT"
] | 1 | 2015-04-20T03:28:48.000Z | 2015-04-20T03:40:44.000Z | notes/bio/src/py/split-tabfile-by-column1.py | csiu/tokens | 8a7f865d921d91aae4019e43677435ad78a8a703 | [
"MIT"
] | null | null | null | # author: csiu
# date: Monday Nov 2, 2015
import sys
"""
USAGE: python <this-script>.py INFILE
Split file by chromosome
- Assumes is tab separated
- Assumes more than 1 columns
- Assumes first column is sorted and is chromosome field
- Assumes chromosomes are sorted
"""
original_file = sys.argv[1]
with open(original_file) as f:
prev_chrom = ""
for line in f:
now_chrom, _ = line.split('\t', 1)
if now_chrom != prev_chrom:
if 'out' in locals(): out.close()
## start new file
outfile = original_file + '.' + now_chrom
out = open(outfile, 'w')
prev_chrom = now_chrom
out.write(line)
out.close()
| 20.676471 | 56 | 0.607397 |
d38264a656f30f2e4bb1f423fd65a2e88f32a00b | 1,911 | py | Python | Obsidian2Org/obsidian-to-org.py | AnweshGangula/PKMigrator | d6a070eb0453b31505e595191b724fb736c8438a | [
"MIT"
] | 24 | 2021-10-02T11:52:08.000Z | 2022-02-28T17:59:58.000Z | Obsidian2Org/obsidian-to-org.py | AnweshGangula/PKMigrator | d6a070eb0453b31505e595191b724fb736c8438a | [
"MIT"
] | null | null | null | Obsidian2Org/obsidian-to-org.py | AnweshGangula/PKMigrator | d6a070eb0453b31505e595191b724fb736c8438a | [
"MIT"
] | 3 | 2021-11-13T16:02:44.000Z | 2021-12-12T01:12:01.000Z | #!/usr/bin/python
import sys,re,os
if not os.path.isdir("out/"):
os.mkdir("out/")
md_file = sys.argv[1]
org_file = md_file[:-3] + ".org"
def replace(pattern, substitution, filename):
f = open(filename, "r+")
content = f.read()
content = re.sub(pattern, substitution, content)
f.seek(0)
f.write(content)
f.truncate()
f.close()
# Treat all comments in file
re_comm = re.compile(r"^%%(.*?)%%", re.MULTILINE)
replace(re_comm, r"#!#comment: \1", md_file)
# Ensure space after "---"
re_ruler = re.compile(r"^---\n(.+)", re.MULTILINE)
replace(re_ruler, r"---\n\n\1", md_file)
# Convert from md to org
pandoc_command = 'pandoc -f markdown "{0}" --lua-filter=remove-header-attr.lua'\
' --wrap=preserve -o out/"{1}"'.format(md_file,org_file)
os.system(pandoc_command)
# Regularize comments
re_comm_org = re.compile(r"^#!#comment:(.*?)$", re.MULTILINE)
replace(re_comm_org, r"#\1", "out/" + org_file)
# Convert all kinds of links
re_url = re.compile(r"\[\[(.*?)\]\[(.*?)\]\]")
re_link = re.compile(r"\[\[(.*?)\]\]")
re_link_description = re.compile(r"\[\[(.*?)\|(.*?)\]\]")
with open("out/" + org_file, "r+") as f:
content = f.read()
new_content = ""
matches = re.finditer(r"\[\[.*?\]\]", content)
pos = 0
for m in matches:
s = m.start()
e = m.end()
m_string = m.group(0)
if "://" in m_string:
new_content = new_content + content[pos:s] + re.sub(re_url, r"[[\1][\2]]", m_string)
elif "|" in m_string:
new_content = new_content + content[pos:s] + re.sub(re_link_description, r"[[file:\1.org][\2]]", m_string)
else:
new_content = new_content + content[pos:s] + re.sub(re_link, r"[[file:\1.org][\1]]", m_string)
pos = e
new_content = new_content + content[pos:]
f.seek(0)
f.write(new_content)
f.truncate()
print("Converted " + org_file) | 29.859375 | 118 | 0.581371 |
124a58fc015de09b942656aa1025cc8edcd8c009 | 1,754 | py | Python | geneticAlgorithm/population.py | Morgan-Dee/VirtualCreatures | fd581687c027bbd50c03cebb75225c9c00f0ffa9 | [
"MIT"
] | 1 | 2021-03-18T13:10:33.000Z | 2021-03-18T13:10:33.000Z | geneticAlgorithm/population.py | Morgan-Dee/VirtualCreatures | fd581687c027bbd50c03cebb75225c9c00f0ffa9 | [
"MIT"
] | null | null | null | geneticAlgorithm/population.py | Morgan-Dee/VirtualCreatures | fd581687c027bbd50c03cebb75225c9c00f0ffa9 | [
"MIT"
] | 1 | 2022-02-02T13:09:38.000Z | 2022-02-02T13:09:38.000Z | from geneticAlgorithm import individual as I
from copy import deepcopy
import random as rd
class POPULATION:
def __init__(self, popSize=5, initialize=True):
if initialize:
self.p = [I.INDIVIDUAL(i) for i in range(popSize)]
else:
self.p = []
def print(self, precede=''):
if len(self.p) > 0:
print(precede, end=' ')
[p.print() for p in self.p]
print()
def evaluate(self, play_blind=False, best=False):
if best:
self.eval_best()
else:
[p.Start_Evaluation(play_blind=play_blind) for p in self.p]
[p.Compute_Fitness() for p in self.p]
def eval_best(self):
best = max(self.p, key=lambda i: i.fitness)
best.Start_Evaluation()
best.Compute_Fitness()
def mutate(self):
[p.mutate() for p in self.p]
def replaceWith(self, other):
for i in range(len(self.p)):
self.p[i] = other.p[i] if other.p[i].fitness > self.p[i].fitness else self.p[i]
def fill_from(self, other):
self.copy_best_from(other)
self.collect_children_from(other)
def copy_best_from(self, other):
self.p.append(deepcopy(max(other.p, key=lambda i: i.fitness)))
def collect_children_from(self, other):
for i in other.p[1:]:
winner = deepcopy(self.winner_of_tournament_selection(other))
self.p.append(winner if not winner in self.p else winner.mutate())
def winner_of_tournament_selection(self, other):
p1, p2 = 0, 0
while p1 == p2:
p1, p2 = [other.p[rd.randint(0, len(other.p) - 1)] for i in [1,2]]
return max(p1, p2, key=lambda p: p.fitness)
| 31.321429 | 91 | 0.582098 |
cef01c48ab61e3602a94bd3d807d68b16da4d429 | 88,911 | py | Python | django/db/models/sql/query.py | brstrat/django | 90c76564669fa03caefcf4318ffdf9ba8fa4d40b | [
"BSD-3-Clause"
] | null | null | null | django/db/models/sql/query.py | brstrat/django | 90c76564669fa03caefcf4318ffdf9ba8fa4d40b | [
"BSD-3-Clause"
] | null | null | null | django/db/models/sql/query.py | brstrat/django | 90c76564669fa03caefcf4318ffdf9ba8fa4d40b | [
"BSD-3-Clause"
] | null | null | null | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text
from django.utils.tree import Node
from django.utils import six
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import ExpressionNode
from django.db.models.fields import FieldDoesNotExist
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import (QUERY_TERMS, ORDER_DIR, SINGLE,
ORDER_PATTERN, JoinInfo)
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR)
from django.core.exceptions import FieldError
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %r>" % (self.sql % tuple(self.params))
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = SortedDict()
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# type they are. The key is the alias of the joined table (possibly
# the table name) and the value is JoinInfo from constants.py.
self.alias_map = {}
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.ordering_aliases = []
self.related_select_fields = []
self.dupe_avoidance = {}
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
self.select = []
# For each to-be-selected field in self.select there must be a
# corresponding entry in self.select - git seems to need this.
self.select_fields = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
self.related_select_cols = []
# SQL aggregate-related attributes
self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
subsituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def __getstate__(self):
"""
Pickling support.
"""
obj_dict = self.__dict__.copy()
obj_dict['related_select_fields'] = []
obj_dict['related_select_cols'] = []
# Fields can't be pickled, so if a field list has been
# specified, we pickle the list of field names instead.
# None is also a possible value; that can pass as-is
obj_dict['select_fields'] = [
f is not None and f.name or None
for f in obj_dict['select_fields']
]
return obj_dict
def __setstate__(self, obj_dict):
"""
Unpickling support.
"""
# Rebuild list of field instances
opts = obj_dict['model']._meta
obj_dict['select_fields'] = [
name is not None and opts.get_field(name) or None
for name in obj_dict['select_fields']
]
self.__dict__.update(obj_dict)
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.ordering_aliases = []
obj.select_fields = self.select_fields[:]
obj.related_select_fields = self.related_select_fields[:]
obj.dupe_avoidance = self.dupe_avoidance.copy()
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = copy.deepcopy(self.where, memo=memo)
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = copy.deepcopy(self.having, memo=memo)
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_related = self.select_related
obj.related_select_cols = []
obj.aggregates = copy.deepcopy(self.aggregates, memo=memo)
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.deepcopy(self.deferred_loading, memo=memo)
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
return self.convert_values(value, aggregate.field, connection)
def get_aggregation(self, using):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregate_select[alias] = aggregate
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self.extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_for_update = False
query.select_related = False
query.related_select_cols = []
query.related_select_fields = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict([
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
])
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields):
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_results(self, using):
q = self.clone()
q.clear_select_clause()
q.add_extra({'a': 1}, None, None, None, None, None)
q.set_extra_mask(['a'])
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return bool(compiler.execute_sql(SINGLE))
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
used = set()
conjunction = (connector == AND)
# Add the joins in the rhs query into the new query.
first = True
for alias in rhs.tables:
if not rhs.alias_refcount[alias]:
# An unused alias.
continue
table, _, join_type, lhs, lhs_col, col, _ = rhs.alias_map[alias]
promote = join_type == self.LOUTER
# If the left side of the join was already relabeled, use the
# updated alias.
lhs = change_map.get(lhs, lhs)
new_alias = self.join((lhs, table, lhs_col, col),
conjunction and not first, used, promote, not conjunction)
used.add(new_alias)
change_map[alias] = new_alias
first = False
# So that we don't exclude valid results in an "or" query combination,
# all joins exclusive to either the lhs or the rhs must be converted
# to an outer join.
if not conjunction:
l_tables = set(self.tables)
r_tables = set(rhs.tables)
# Update r_tables aliases.
for alias in change_map:
if alias in r_tables:
# r_tables may contain entries that have a refcount of 0
# if the query has references to a table that can be
# trimmed because only the foreign key is used.
# We only need to fix the aliases for the tables that
# actually have aliases.
if rhs.alias_refcount[alias]:
r_tables.remove(alias)
r_tables.add(change_map[alias])
# Find aliases that are exclusive to rhs or lhs.
# These are promoted to outer joins.
outer_tables = (l_tables | r_tables) - (l_tables & r_tables)
for alias in outer_tables:
# Again, some of the tables won't have aliases due to
# the trimming of unnecessary tables.
if self.alias_refcount.get(alias) or rhs.alias_refcount.get(alias):
self.promote_joins([alias], True)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = copy.deepcopy(rhs.where)
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
if isinstance(col, (list, tuple)):
self.select.append((change_map.get(col[0], col[0]), col[1]))
else:
item = copy.deepcopy(col)
item.relabel_aliases(change_map)
self.select.append(item)
self.select_fields = rhs.select_fields[:]
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialised on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.model._meta
seen = {}
must_include = {orig_opts.concrete_model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
cur_model = source.rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# to the things we select.
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in six.iteritems(seen):
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in six.iteritems(must_include):
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in six.iteritems(workset):
callback(target, model, values)
else:
for model, values in six.iteritems(must_include):
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in six.iteritems(seen):
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
current = self.table_map.get(table_name)
if not create and current:
alias = current[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if current:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
current.append(alias)
else:
# The first occurence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases, unconditional=False):
"""
Promotes recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, the join is only promoted if
it is nullable or the parent join is an outer join.
Note about join promotion: When promoting any alias, we make sure all
joins which start from that alias are promoted, too. When adding a join
in join(), we make sure any join added to already existing LOUTER join
is generated as LOUTER. This ensures we don't ever have broken join
chains which contain first a LOUTER join, then an INNER JOIN, that is
this kind of join should never be generated: a LOUTER b INNER c. The
reason for avoiding this type of join chain is that the INNER after
the LOUTER will effectively remove any effect the LOUTER had.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].rhs_join_col is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
parent_alias = self.alias_map[alias].lhs_alias
parent_louter = (parent_alias
and self.alias_map[parent_alias].join_type == self.LOUTER)
already_louter = self.alias_map[alias].join_type == self.LOUTER
if ((unconditional or self.alias_map[alias].nullable
or parent_louter) and not already_louter):
data = self.alias_map[alias]._replace(join_type=self.LOUTER)
self.alias_map[alias] = data
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map.keys()
if (self.alias_map[join].lhs_alias == alias
and join not in aliases))
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def promote_unused_aliases(self, initial_refcounts, used_aliases):
"""
Given a "before" copy of the alias_refcounts dictionary (as
'initial_refcounts') and a collection of aliases that may have been
changed or created, works out which aliases have been created since
then and which ones haven't been used and promotes all of those
aliases, plus any children of theirs in the alias tree, to outer joins.
"""
for alias in self.tables:
if alias in used_aliases and (alias not in initial_refcounts or
self.alias_refcount[alias] == initial_refcounts[alias]):
self.promote_joins([alias])
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
for columns in [self.select, self.group_by or []]:
for pos, col in enumerate(columns):
if isinstance(col, (list, tuple)):
old_alias = col[0]
columns[pos] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
for mapping in [self.aggregates]:
for key, col in mapping.items():
if isinstance(col, (list, tuple)):
old_alias = col[0]
mapping[key] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
# 2. Rename the alias in the internal table/alias datastructures.
for k, aliases in self.join_map.items():
aliases = tuple([change_map.get(a, a) for a in aliases])
self.join_map[k] = aliases
for old_alias, new_alias in six.iteritems(change_map):
alias_data = self.alias_map[old_alias]
alias_data = alias_data._replace(rhs_alias=new_alias)
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = alias_data
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in six.iteritems(self.alias_map):
lhs = data.lhs_alias
if lhs in change_map:
data = data._replace(lhs_alias=change_map[lhs])
self.alias_map[alias] = data
def bump_prefix(self, exceptions=()):
"""
Changes the alias prefix to the next letter in the alphabet and
relabels all the aliases. Even tables that previously had no alias will
get an alias after this call (it's mostly used for nested queries and
the outer query will already be using the non-aliased table name).
Subclasses who create their own prefix should override this method to
produce a similar result (a new prefix and relabelled aliases).
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
current = ord(self.alias_prefix)
assert current < ord('Z')
prefix = chr(current + 1)
self.alias_prefix = prefix
change_map = SortedDict()
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
new_alias = '%s%d' % (prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.model._meta.db_table, None, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in six.itervalues(self.alias_refcount) if count])
def join(self, connection, always_create=False, exclusions=(),
promote=False, outer_if_first=False, nullable=False, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, lhs_col, col) where 'lhs' is either an existing
table alias or a table name. The join correspods to the SQL equivalent
of::
lhs.lhs_col = table.col
If 'always_create' is True and 'reuse' is None, a new alias is always
created, regardless of whether one already exists or not. If
'always_create' is True and 'reuse' is a set, an alias in 'reuse' that
matches the connection will be returned, if possible. If
'always_create' is False, the first existing alias that matches the
'connection' is returned, if any. Otherwise a new join is created.
If 'exclusions' is specified, it is something satisfying the container
protocol ("foo in exclusions" must work) and specifies a list of
aliases that should not be returned, even if they satisfy the join.
If 'promote' is True, the join type for the alias will be LOUTER (if
the alias previously existed, the join type will be promoted from INNER
to LOUTER, if necessary).
If 'outer_if_first' is True and a new join is created, it will have the
LOUTER join type. This is used when joining certain types of querysets
and Q-objects together.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure we do not generate chains like a LOUTER b INNER c.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
"""
lhs, table, lhs_col, col = connection
if lhs in self.alias_map:
lhs_table = self.alias_map[lhs].table_name
else:
lhs_table = lhs
if reuse and always_create and table in self.table_map:
# Convert the 'reuse' to case to be "exclude everything but the
# reusable set, minus exclusions, for this table".
exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
always_create = False
t_ident = (lhs_table, table, lhs_col, col)
if not always_create:
for alias in self.join_map.get(t_ident, ()):
if alias not in exclusions:
if lhs_table and not self.alias_refcount[self.alias_map[alias].lhs_alias]:
# The LHS of this join tuple is no longer part of the
# query, so skip this possibility.
continue
if self.alias_map[alias].lhs_alias != lhs:
continue
self.ref_alias(alias)
if promote or (lhs and self.alias_map[lhs].join_type == self.LOUTER):
self.promote_joins([alias])
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif (promote or outer_if_first
or self.alias_map[lhs].join_type == self.LOUTER):
# We need to use LOUTER join if asked by promote or outer_if_first,
# or if the LHS table is left-joined in the query. Adding inner join
# to an existing outer join effectively cancels the effect of the
# outer join.
join_type = self.LOUTER
else:
join_type = self.INNER
join = JoinInfo(table, alias, join_type, lhs, lhs_col, col, nullable)
self.alias_map[alias] = join
if t_ident in self.join_map:
self.join_map[t_ident] += (alias,)
else:
self.join_map[t_ident] = (alias,)
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
# Skip all proxy models
opts = self.model._meta.concrete_model._meta
root_alias = self.tables[0]
seen = {None: root_alias}
for field, model in opts.get_fields_with_model():
if model not in seen:
link_field = opts.get_ancestor_link(model)
seen[model] = self.join((root_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
self.included_inherited_models = seen
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def need_force_having(self, q_object):
"""
Returns whether or not all elements of this q_object need to be put
together in the HAVING clause.
"""
for child in q_object.children:
if isinstance(child, Node):
if self.need_force_having(child):
return True
else:
if child[0].split(LOOKUP_SEP)[0] in self.aggregates:
return True
return False
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
field, source, opts, join_list, last, _ = self.setup_joins(
field_list, opts, self.get_initial_alias(), False)
# Process the join chain to see if it can be trimmed
col, _, join_list = self.trim_joins(source, join_list, last, False)
# If the aggregate references a model or field that requires a join,
# those joins must be LEFT OUTER - empty join rows must be returned
# in order for zeros to be returned for those aggregates.
self.promote_joins(join_list, True)
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True, force_having=False):
"""
Add a single filter to the query. The 'filter_expr' is a pair:
(filter_string, value). E.g. ('name__contains', 'fred')
If 'negate' is True, this is an exclude() filter. It's important to
note that this method does not negate anything in the where-clause
object when inserting the filter constraints. This is because negated
filters often require multiple calls to add_filter() and the negation
should only happen once. So the caller is responsible for this (the
caller will normally be add_q(), so that as an example).
If 'trim' is True, we automatically trim the final join group (used
internally when constructing nested queries).
If 'can_reuse' is a set, we are processing a component of a
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
will be a set of table aliases that can be reused in this filter, even
if we would otherwise force the creation of new aliases for a join
(needed for nested Q-filters). The set is updated by this method.
If 'process_extras' is set, any extra filters returned from the table
joining process will be processed. This parameter is set to False
during the processing of extra filters to avoid infinite recursion.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
lookup_type = 'exact' # Default lookup type
num_parts = len(parts)
if (len(parts) > 1 and parts[-1] in self.query_terms
and arg not in self.aggregates):
# Traverse the lookup query to distinguish related fields from
# lookup types.
lookup_model = self.model
for counter, field_name in enumerate(parts):
try:
lookup_field = lookup_model._meta.get_field(field_name)
except FieldDoesNotExist:
# Not a field. Bail out.
lookup_type = parts.pop()
break
# Unless we're at the end of the list of lookups, let's attempt
# to continue traversing relations.
if (counter + 1) < num_parts:
try:
lookup_model = lookup_field.rel.to
except AttributeError:
# Not a related field. Bail out.
lookup_type = parts.pop()
break
# By default, this is a WHERE clause. If an aggregate is referenced
# in the value, the filter will be promoted to a HAVING
having_clause = False
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
elif isinstance(value, ExpressionNode):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self)
having_clause = value.contains_aggregate
for alias, aggregate in self.aggregates.items():
if alias in (parts[0], LOOKUP_SEP.join(parts)):
entry = self.where_class()
entry.add((aggregate, lookup_type, value), AND)
if negate:
entry.negate()
self.having.add(entry, connector)
return
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, allow_explicit_fk=True,
can_reuse=can_reuse, negate=negate,
process_extras=process_extras)
except MultiJoin as e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
table_promote = False
join_promote = False
if (lookup_type == 'isnull' and value is True and not negate and
len(join_list) > 1):
# If the comparison is against NULL, we may need to use some left
# outer joins when creating the join chain. This is only done when
# needed, as it's less efficient at the database level.
self.promote_joins(join_list)
join_promote = True
# Process the join list to see if we can remove any inner joins from
# the far end (fewer tables in a query is better).
nonnull_comparison = (lookup_type == 'isnull' and value is False)
col, alias, join_list = self.trim_joins(target, join_list, last, trim,
nonnull_comparison)
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
next(join_it), next(table_it)
unconditional = False
for join in join_it:
table = next(table_it)
# Once we hit an outer join, all subsequent joins must
# also be promoted, regardless of whether they have been
# promoted as a result of this pass through the tables.
unconditional = (unconditional or
self.alias_map[join].join_type == self.LOUTER)
if join == table and self.alias_refcount[join] > 1:
# We have more than one reference to this join table.
# This means that we are dealing with two different query
# subtrees, so we don't need to do any join promotion.
continue
join_promote = join_promote or self.promote_joins([join], unconditional)
if table != join:
table_promote = self.promote_joins([table])
# We only get here if we have found a table that exists
# in the join list, but isn't on the original tables list.
# This means we've reached the point where we only have
# new tables, so we can break out of this promotion loop.
break
self.promote_joins(join_it, join_promote)
self.promote_joins(table_it, table_promote or join_promote)
if having_clause or force_having:
if (alias, col) not in self.group_by:
self.group_by.append((alias, col))
self.having.add((Constraint(alias, col, field), lookup_type, value),
connector)
else:
self.where.add((Constraint(alias, col, field), lookup_type, value),
connector)
if negate:
self.promote_joins(join_list)
if lookup_type != 'isnull':
if len(join_list) > 1:
for alias in join_list:
if self.alias_map[alias].join_type == self.LOUTER:
j_col = self.alias_map[alias].rhs_join_col
# The join promotion logic should never produce
# a LOUTER join for the base join - assert that.
assert j_col is not None
entry = self.where_class()
entry.add(
(Constraint(alias, j_col, None), 'isnull', True),
AND
)
entry.negate()
self.where.add(entry, AND)
break
if self.is_nullable(field):
# In SQL NULL = anyvalue returns unknown, and NOT unknown
# is still unknown. However, in Python None = anyvalue is False
# (and not False is True...), and we want to return this Python's
# view of None handling. So we need to specifically exclude the
# NULL values, and because we are inside NOT branch they will
# be included in the final resultset. We are essentially creating
# SQL like this here: NOT (col IS NOT NULL), where the first NOT
# is added in upper layers of the code.
self.where.add((Constraint(alias, col, None), 'isnull', False), AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def add_q(self, q_object, used_aliases=None, force_having=False):
"""
Adds a Q-object to the current filter.
Can also be used to add anything that has an 'add_to_query()' method.
"""
if used_aliases is None:
used_aliases = self.used_aliases
if hasattr(q_object, 'add_to_query'):
# Complex custom objects are responsible for adding themselves.
q_object.add_to_query(self, used_aliases)
else:
if self.where and q_object.connector != AND and len(q_object) > 1:
self.where.start_subtree(AND)
subtree = True
else:
subtree = False
connector = AND
if q_object.connector == OR and not force_having:
force_having = self.need_force_having(q_object)
for child in q_object.children:
if connector == OR:
refcounts_before = self.alias_refcount.copy()
if force_having:
self.having.start_subtree(connector)
else:
self.where.start_subtree(connector)
if isinstance(child, Node):
self.add_q(child, used_aliases, force_having=force_having)
else:
self.add_filter(child, connector, q_object.negated,
can_reuse=used_aliases, force_having=force_having)
if force_having:
self.having.end_subtree()
else:
self.where.end_subtree()
if connector == OR:
# Aliases that were newly added or not used at all need to
# be promoted to outer joins if they are nullable relations.
# (they shouldn't turn the whole conditional into the empty
# set just because they don't match anything).
self.promote_unused_aliases(refcounts_before, used_aliases)
connector = q_object.connector
if q_object.negated:
self.where.negate()
if subtree:
self.where.end_subtree()
if self.filter_is_sticky:
self.used_aliases = used_aliases
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters). If can_reuse is not None, it's a list of aliases
that can be reused in these joins (nothing else can be reused in this
case). Finally, 'negate' is used in the same sense as for add_filter()
-- it indicates an exclude() filter, or something similar. It is only
passed in here so that it can be passed to a field's extra_filter() for
customized behavior.
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
int_alias = None
for pos, name in enumerate(names):
if int_alias is not None:
exclusions.add(int_alias)
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names() + list(self.aggregate_select)
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col,
alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions,
nullable=self.is_nullable(field))
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
# In case of a recursive FK, use the to_field for
# reverse lookups as well
if orig_field.model is local_field.model:
target = opts.get_field_by_name(
field.rel.field_name)[0]
else:
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
if int_alias is None:
to_avoid = alias
else:
to_avoid = int_alias
self.update_dupe_avoidance(dupe_opts, dupe_col, to_avoid)
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def trim_joins(self, target, join_list, last, trim, nonnull_check=False):
"""
Sometimes joins at the end of a multi-table sequence can be trimmed. If
the final join is against the same column as we are comparing against,
and is an inner join, we can go back one step in a join chain and
compare against the LHS of the join instead (and then repeat the
optimization). The result, potentially, involves fewer table joins.
The 'target' parameter is the final field being joined to, 'join_list'
is the full list of join aliases.
The 'last' list contains offsets into 'join_list', corresponding to
each component of the filter. Many-to-many relations, for example, add
two tables to the join list and we want to deal with both tables the
same way, so 'last' has an entry for the first of the two tables and
then the table immediately after the second table, in that case.
The 'trim' parameter forces the final piece of the join list to be
trimmed before anything. See the documentation of add_filter() for
details about this.
The 'nonnull_check' parameter is True when we are using inner joins
between tables explicitly to exclude NULL entries. In that case, the
tables shouldn't be trimmed, because the very action of joining to them
alters the result set.
Returns the final active column and table alias and the new active
join_list.
"""
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and final > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]].lhs_join_col
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
join = self.alias_map[alias]
if (col != join.rhs_join_col or join.join_type != self.INNER or
nonnull_check):
break
self.unref_alias(alias)
alias = join.lhs_alias
col = join.lhs_join_col
join_list.pop()
final -= 1
if final == penultimate:
penultimate = last.pop()
return col, alias, join_list
def update_dupe_avoidance(self, opts, col, alias):
"""
For a column that is one of multiple pointing to the same table, update
the internal data structures to note that this alias shouldn't be used
for those other columns.
"""
ident = id(opts)
for name in opts.duplicate_targets[col]:
try:
self.dupe_avoidance[ident, name].add(alias)
except KeyError:
self.dupe_avoidance[ident, name] = set([alias])
def split_exclude(self, filter_expr, prefix, can_reuse):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
"""
query = Query(self.model)
query.add_filter(filter_expr)
query.bump_prefix()
query.clear_ordering(True)
query.set_start(prefix)
# Adding extra check to make sure the selected field will not be null
# since we are adding a IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
alias, col = query.select[0]
query.where.add((Constraint(alias, col, None), 'isnull', False), AND)
self.add_filter(('%s__in' % prefix, query), negate=True, trim=True,
can_reuse=can_reuse)
# If there's more than one join in the inner query (before any initial
# bits were trimmed -- which means the last active table is more than
# two places into the alias list), we need to also handle the
# possibility that the earlier joins don't match anything by adding a
# comparison to NULL (e.g. in
# Tag.objects.exclude(parent__parent__name='t1'), a tag with no parent
# would otherwise be overlooked).
active_positions = [pos for (pos, count) in
enumerate(six.itervalues(query.alias_refcount)) if count]
if active_positions[-1] > 1:
self.add_filter(('%s__isnull' % prefix, False), negate=True,
trim=True, can_reuse=can_reuse)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""
Removes all fields from SELECT clause.
"""
self.select = []
self.select_fields = []
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_aggregate_mask(())
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.select_fields = []
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
field, target, u2, joins, u3, u4 = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, False, allow_m2m,
True)
final_alias = joins[-1]
col = target.column
if len(joins) > 1:
join = self.alias_map[final_alias]
if col == join.rhs_join_col:
self.unref_alias(final_alias)
final_alias = join.lhs_alias
col = join.lhs_join_col
joins = joins[:-1]
self.promote_joins(joins[1:])
self.select.append((final_alias, col))
self.select_fields.append(field)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(opts.get_all_field_names() + list(self.extra)
+ list(self.aggregate_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty=False):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for sel in self.select:
self.group_by.append(sel)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0])
else:
opts = self.model._meta
if not self.select:
count = self.aggregates_module.Count((self.join((None, opts.db_table, None, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0], distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self.aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
field_dict = {}
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
self.related_select_fields = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = SortedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is a SortedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL colum names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.model._meta.pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of it's fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set([f.name for f in fields])
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def _aggregate_select(self):
"""The SortedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = SortedDict([
(k,v) for k,v in self.aggregates.items()
if k in self.aggregate_select_mask
])
return self._aggregate_select_cache
else:
return self.aggregates
aggregate_select = property(_aggregate_select)
def _extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
elif self.extra_select_mask is not None:
self._extra_select_cache = SortedDict([
(k,v) for k,v in self.extra.items()
if k in self.extra_select_mask
])
return self._extra_select_cache
else:
return self.extra
extra_select = property(_extra_select)
def set_start(self, start):
"""
Sets the table from which to start joining. The start position is
specified by the related attribute from the base model. This will
automatically set to the select column to be the column linked from the
previous table.
This method is primarily for internal use and the error checking isn't
as friendly as add_filter(). Mostly useful for querying directly
against the join table of many-to-many relation in a subquery.
"""
opts = self.model._meta
alias = self.get_initial_alias()
field, col, opts, joins, last, extra = self.setup_joins(
start.split(LOOKUP_SEP), opts, alias, False)
select_col = self.alias_map[joins[1]].lhs_join_col
select_alias = alias
# The call to setup_joins added an extra reference to everything in
# joins. Reverse that.
for alias in joins:
self.unref_alias(alias)
# We might be able to trim some joins from the front of this query,
# providing that we only traverse "always equal" connections (i.e. rhs
# is *always* the same value as lhs).
for alias in joins[1:]:
join_info = self.alias_map[alias]
if (join_info.lhs_join_col != select_col
or join_info.join_type != self.INNER):
break
self.unref_alias(select_alias)
select_alias = join_info.rhs_alias
select_col = join_info.rhs_join_col
self.select = [(select_alias, select_col)]
self.remove_inherited_models()
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)
and field.empty_strings_allowed):
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def setup_join_cache(sender, **kwargs):
"""
The information needed to join between model fields is something that is
invariant over the life of the model, so we cache it in the model's Options
class, rather than recomputing it all the time.
This method initialises the (empty) cache when the model is created.
"""
sender._meta._join_cache = {}
signals.class_prepared.connect(setup_join_cache)
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
| 43.690909 | 133 | 0.588555 |
d2786b1195c9145fbc39291d0355ab877150d331 | 1,589 | py | Python | pqviz/check_suppressed.py | mitre/PQViz | 229e662c408e0532df44585d134b8e79eb6c4cf8 | [
"Apache-2.0"
] | null | null | null | pqviz/check_suppressed.py | mitre/PQViz | 229e662c408e0532df44585d134b8e79eb6c4cf8 | [
"Apache-2.0"
] | null | null | null | pqviz/check_suppressed.py | mitre/PQViz | 229e662c408e0532df44585d134b8e79eb6c4cf8 | [
"Apache-2.0"
] | 1 | 2022-01-18T21:00:39.000Z | 2022-01-18T21:00:39.000Z | import glob
from pathlib import Path
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def check_suppressed(df, attribute):
suppressed_values = df.groupby(["Weight Category", attribute])
suppressed_values = suppressed_values.count().rsub(suppressed_values.size(), axis=0)
suppressed_values = suppressed_values[
suppressed_values["Prevalence"] > 0
].reset_index()
if suppressed_values.empty:
print("There are no suppressed values for this demographic level.")
else:
kept_cols = ["Weight Category", attribute, "Prevalence"]
suppressed_values = suppressed_values.loc[:, kept_cols].copy()
suppressed_values = suppressed_values.rename(
columns={"Prevalence": "Number of subpopulations with suppressed values"}
)
return suppressed_values
def suppressed_zcta3(df, category, prevalence_type):
"""
Return an array of suppressed ZCTA3 values for the specified category and
prevalence type suppressed zcta3s.
Parameters:
df: DataFrame of prevalence data
category: Weight category/class
prevalence_type: Prevalence type, one of ['Age-Adjusted', 'Crude', 'Weighted']
Returns:
A numpy array of ZCTA3s with suppressed prevalence values.
"""
return df.loc[
(df["Weight Category"] == category)
& (df["Prevalence type"] == prevalence_type)
& (df["Prevalence"].isna())
]["zcta3"].values.tolist()
| 33.808511 | 88 | 0.705475 |
8ccbeb129b8038555fb9c85a50ac7986000415df | 294 | py | Python | xlrdconvert.py | cropleyc/sturdy-bassoon | 4245656753fffeb06e87ae9b6f579a51da1a9e63 | [
"MIT"
] | null | null | null | xlrdconvert.py | cropleyc/sturdy-bassoon | 4245656753fffeb06e87ae9b6f579a51da1a9e63 | [
"MIT"
] | null | null | null | xlrdconvert.py | cropleyc/sturdy-bassoon | 4245656753fffeb06e87ae9b6f579a51da1a9e63 | [
"MIT"
] | null | null | null | import xlrd
import csv
with xlrd.open_workbook('Query (2).xls') as wb:
sh = wb.sheet_by_index(0) # wb.sheet_by_name('sheet_name')
with open('new.csv', 'w', newline="") as f:
col = csv.writer(f)
for row in range(sh.nrows):
col.writerow(sh.row_values(row))
| 29.4 | 63 | 0.615646 |
45e3628c468b6275f5646d4484a431496e0fca04 | 2,137 | py | Python | tfx/components/bulk_inferrer/component_test.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 1,813 | 2019-02-04T17:17:30.000Z | 2022-03-29T13:39:30.000Z | tfx/components/bulk_inferrer/component_test.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 2,710 | 2019-02-14T00:41:00.000Z | 2022-03-31T07:23:00.000Z | tfx/components/bulk_inferrer/component_test.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 731 | 2019-02-04T17:59:18.000Z | 2022-03-31T06:45:51.000Z | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.bulk_inferrer.component."""
import tensorflow as tf
from tfx.components.bulk_inferrer import component
from tfx.proto import bulk_inferrer_pb2
from tfx.types import channel_utils
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
class ComponentTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._examples = channel_utils.as_channel([standard_artifacts.Examples()])
self._model = channel_utils.as_channel([standard_artifacts.Model()])
self._model_blessing = channel_utils.as_channel(
[standard_artifacts.ModelBlessing()])
def testConstructInferenceResult(self):
bulk_inferrer = component.BulkInferrer(
examples=self._examples,
model=self._model,
model_blessing=self._model_blessing)
self.assertEqual(
'InferenceResult', bulk_inferrer.outputs[
standard_component_specs.INFERENCE_RESULT_KEY].type_name)
self.assertNotIn('output_examples', bulk_inferrer.outputs.keys())
def testConstructOutputExample(self):
bulk_inferrer = component.BulkInferrer(
examples=self._examples,
model=self._model,
model_blessing=self._model_blessing,
output_example_spec=bulk_inferrer_pb2.OutputExampleSpec())
self.assertEqual(
'Examples', bulk_inferrer.outputs[
standard_component_specs.OUTPUT_EXAMPLES_KEY].type_name)
self.assertNotIn('inference_result', bulk_inferrer.outputs.keys())
if __name__ == '__main__':
tf.test.main()
| 36.844828 | 78 | 0.755264 |
49e0661732666518e7c0080c1b9b1d2539b97088 | 1,992 | py | Python | Dynamic Programming/unique_binary_search_trees.py | lim1202/LeetCode | 931c8d5a8d80206fb329dc7792416d45804d2ba3 | [
"MIT"
] | null | null | null | Dynamic Programming/unique_binary_search_trees.py | lim1202/LeetCode | 931c8d5a8d80206fb329dc7792416d45804d2ba3 | [
"MIT"
] | null | null | null | Dynamic Programming/unique_binary_search_trees.py | lim1202/LeetCode | 931c8d5a8d80206fb329dc7792416d45804d2ba3 | [
"MIT"
] | null | null | null | r"""Unique Binary Search Trees
Given n, how many structurally unique BSTs (binary search trees) that store values 1...n?
For example, Given n = 3, there are a total of 5 unique BSTs.
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
"""
from binarytree import Node
def num_trees(n):
if n == 0:
return 0
dp = [0 for i in range(n+1)]
dp[0] = 1
dp[1] = 1
for i in range(2, n + 1):
for j in range(i):
# 如果左子树的个数为j,那么右子树为i - j - 1
dp[i] = dp[i] + dp[j] * dp[i - j - 1]
pass
return dp[n]
r"""
Given n, generate all structurally unique BST's (binary search trees) that store values 1...n.
For example, Given n = 3, your program should return all 5 unique BST's shown below.
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
"""
def generate_trees(n):
return generate(1, n)
def generate(start, stop):
vs = []
if start > stop:
# 没有子树了,返回null
vs.append(None)
return vs
for i in range(start, stop + 1):
left_list = generate(start, i - 1)
right_list = generate(i + 1, stop)
# 获取左子树和右子树所有排列之后,放到root为i的节点的下面
for j in range(len(left_list)):
for k in range(len(right_list)):
node = Node(i)
node.left = left_list[j]
node.right = right_list[k]
vs.append(node)
return vs
if __name__ == '__main__':
for i in range(1, 10):
print('Given n = {0}, there are a total of {1} unique BST''s.'.format(i, num_trees(i)))
pass
print('Given n = 3, return all 5 unique BST''s shown below')
nodes = generate_trees(3)
for node in nodes:
print(node)
pass
| 25.21519 | 95 | 0.471888 |
d33328a234f96b34d00e2710300f323f474dcc2c | 14,531 | py | Python | fixture/contact.py | mlaxmla/python_training | 3b6854ca92826d19091aa473ec1ba11e7be7ba56 | [
"Apache-2.0"
] | null | null | null | fixture/contact.py | mlaxmla/python_training | 3b6854ca92826d19091aa473ec1ba11e7be7ba56 | [
"Apache-2.0"
] | null | null | null | fixture/contact.py | mlaxmla/python_training | 3b6854ca92826d19091aa473ec1ba11e7be7ba56 | [
"Apache-2.0"
] | null | null | null | __author__ = 'mla'
from selenium.webdriver.support.ui import Select
from model.contact import Contact
import re
# from fixture.application import Application #ASK4IT: czy tego nie potrzebujemy dzieki temu ze przenieslismy fixtury do conftest.py i 'przedrostek' "app." odwoluje sie do nich?
class ContactHelper:
def __init__(self, app):
self.app = app
def open_home_page2(self):
wd = self.app.wd
if not (wd.current_url.endswith("/addressbook/")):
wd.get("http://localhost/addressbook/")
def create(self, contact):
wd = self.app.wd
# init new-contact creation
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.contact_cache = None
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_form(self, contact):
wd = self.app.wd
# fill contact form
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home)
self.change_field_value("mobile", contact.mobile)
self.change_field_value("work", contact.work)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("firstname", contact.firstname)
# to-do
#wd.find_element_by_name("bday").click()
#Select(wd.find_element_by_name("bday")).select_by_visible_text("1")
#wd.find_element_by_xpath("//option[@value='1']").click()
#wd.find_element_by_name("bmonth").click()
#Select(wd.find_element_by_name("bmonth")).select_by_visible_text("June")
#wd.find_element_by_xpath("//option[@value='June']").click()
self.change_field_value("byear", contact.byear)
# to-do
#wd.find_element_by_name("aday").click()
#Select(wd.find_element_by_name("aday")).select_by_visible_text("2")
#wd.find_element_by_css_selector("select[name=\"aday\"] > option[value=\"2\"]").click()
#wd.find_element_by_name("amonth").click()
# to-do
#Select(wd.find_element_by_name("amonth")).select_by_visible_text("August")
#wd.find_element_by_xpath("(//option[@value='August'])[2]").click()
self.change_field_value("ayear", contact.ayear)
self.change_field_value("address2", contact.address2)
# wd.find_element_by_name("phone").clear()
# wd.find_element_by_name("phone").send_keys(contact.phone)
self.change_field_value("phone2", contact.phone2)
self.change_field_value("notes", contact.notes)
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_home_page2() #ASK4IT: jak zrobic aby dzialalo przez "app." z conftest.py? Niech mi to ktos wytlumaczy prosze...
# select first contact
wd.find_elements_by_name("selected[]")[index].click()
# submin delation
wd.find_element_by_xpath("//input[@value='Delete']").click()
# close alert
wd.switch_to.alert.accept()
# home_page opened Application.open_home_page()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.open_home_page2()
# select first contact
wd.find_element_by_css_selector("input[value='%s']" % id).click()
# submin delation
wd.find_element_by_xpath("//input[@value='Delete']").click()
# close alert
wd.switch_to.alert.accept()
# home_page opened Application.open_home_page()
self.contact_cache = None
def modify_first_contact(self, new_contact_data):
self.modify_contact_by_index(0, new_contact_data)
def modify_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.open_home_page2()
# init first contact edit
wd.find_elements_by_xpath("//a//img[@title='Edit']")[index].click() # and @xpath='1'
#self.modify(new_group_data)
self.fill_contact_form(new_contact_data)
# submit update
wd.find_element_by_name("update").click()
self.contact_cache = None
def modify_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
self.open_home_page2()
self.choose_contact_by_id(id)
self.fill_contact_form(new_contact_data)
# Zapisz zmianę
wd.find_element_by_xpath("//input[@value='Update']").click()
self.contact_cache = None
def choose_contact_by_id(self, id):
wd = self.app.wd
self.open_home_page2()
wd.find_element_by_xpath("//a[contains(@href, 'edit.php?id=%s')]" % id).click()
def choose_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def del_contact_from_group(self):
wd = self.app.wd
wd.find_element_by_xpath("//input[@name='remove']").click()
# powrót do grupy z której usunięto kontakt
wd.find_element_by_xpath("//a[contains(@href, './?group=')]").click()
def modify(self, contact):
wd = self.app.wd
self.fill_contact_form(contact)
# submit update
wd.find_element_by_name("update").click()
def choose_group_for_contact_by_id(self, id):
wd = self.app.wd
# check webdriver issue...
wd.find_element_by_xpath("//select[@name='to_group']//option[@value='%s']" % id).click()
# Potwierdź wybór przyciskiem
wd.find_element_by_xpath("//input[@value='Add to']").click()
self.open_home_page2()
def choose_group_for_contact_del_by_id(self, id):
wd = self.app.wd
#self.otworz_strone_startowa()
wd.find_element_by_xpath("//select[@name='group']//option[@value='%s']" % id).click()
def count(self):
wd = self.app.wd
self.open_home_page2()
return len(wd.find_elements_by_name("selected[]"))
def count_contacts_in_choosen_group(self):
wd = self.app.wd
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contacts_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_home_page2()
self.contact_cache = []
r = 1
for element in wd.find_elements_by_xpath("//*[@name='entry']"): # "//a//img[@title='Edit']"
id = element.find_element_by_name("selected[]").get_attribute("value")
lastname_text = element.find_element_by_xpath("//*[@id='maintable']/tbody/tr["+str(r+1)+"]/td[2]").text
firstname_text = element.find_element_by_xpath("//*[@id='maintable']/tbody/tr["+str(r+1)+"]/td[3]").text
all_phones_from_contactlist = element.find_element_by_xpath("//*[@id='maintable']/tbody/tr["+str(r+1)+"]/td[6]").text.splitlines()
all_phones = element.find_element_by_xpath("//*[@id='maintable']/tbody/tr[" + str(r + 1) + "]/td[6]").text
r = r + 1
self.contact_cache.append(Contact(lastname=lastname_text, firstname=firstname_text, id=id, all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def get_all_contacts_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_home_page2()
self.contact_cache = []
r = 1
for element in wd.find_elements_by_xpath("//*[@name='entry']"): # "//a//img[@title='Edit']"
id = element.find_element_by_name("selected[]").get_attribute("value")
lastname_text = element.find_element_by_xpath("//*[@id='maintable']/tbody/tr["+str(r+1)+"]/td[2]").text
firstname_text = element.find_element_by_xpath("//*[@id='maintable']/tbody/tr["+str(r+1)+"]/td[3]").text
address_text = element.find_element_by_xpath("//*[@id='maintable']/tbody/tr["+str(r+1)+"]/td[4]").text
all_emails = element.find_element_by_xpath("//*[@id='maintable']/tbody/tr[" + str(r + 1) + "]/td[5]").text
all_phones = element.find_element_by_xpath("//*[@id='maintable']/tbody/tr[" + str(r + 1) + "]/td[6]").text
r = r + 1
self.contact_cache.append(Contact(lastname=lastname_text, firstname=firstname_text, id=id, address=address_text, all_emails_from_home_page=all_emails, all_phones_from_home_page=all_phones))
return list(self.contact_cache)
#how2FIX-IT-Alexei?
#def get_contacts_list_webinar(self):
# if self.contact_cache is None:
# wd = self.app.wd
# self.open_home_page2()
# self.contact_cache = []
# for row in wd.find_elements_by_name("entry"):
# cells = row.find_elements_by_tag_name("td")
# id = cells[0].find_element_by_tag_name("input").get_attribute("value")
# firstname_text = cells[1].text
# lastname_text = cells[2].text
# self.contact_cache.append(Contact(lastname=lastname_text, firstname=firstname_text, id=id))
# return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_home_page2()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_home_page2()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def clear(self, s):
return re.sub("[() -=]", "", s)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home = (None if wd.find_element_by_name("home").get_attribute("value") == "" else wd.find_element_by_name("home").get_attribute("value"))
work = (None if wd.find_element_by_name("work").get_attribute("value") == "" else wd.find_element_by_name("work").get_attribute("value"))
mobile = (None if wd.find_element_by_name("mobile").get_attribute("value") == "" else wd.find_element_by_name("mobile").get_attribute("value"))
phone2 = (None if wd.find_element_by_name("phone2").get_attribute("value") == "" else wd.find_element_by_name("phone2").get_attribute("value"))
return Contact(firstname=firstname, lastname=lastname, id=id, home=home, work=work, mobile=mobile, phone2=phone2) # all_phones=all_phones_from_edit_witohout_nulls
def get_all_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home = (None if wd.find_element_by_name("home").get_attribute("value") == "" else wd.find_element_by_name("home").get_attribute("value"))
work = (None if wd.find_element_by_name("work").get_attribute("value") == "" else wd.find_element_by_name("work").get_attribute("value"))
mobile = (None if wd.find_element_by_name("mobile").get_attribute("value") == "" else wd.find_element_by_name("mobile").get_attribute("value"))
phone2 = (None if wd.find_element_by_name("phone2").get_attribute("value") == "" else wd.find_element_by_name("phone2").get_attribute("value"))
address = (None if wd.find_element_by_name("address").get_attribute("value") == "" else wd.find_element_by_name("address").get_attribute("value"))
email = (None if wd.find_element_by_name("email").get_attribute("value") == "" else wd.find_element_by_name("email").get_attribute("value"))
email2 = (None if wd.find_element_by_name("email2").get_attribute("value") == "" else wd.find_element_by_name("email2").get_attribute("value"))
email3 = (None if wd.find_element_by_name("email3").get_attribute("value") == "" else wd.find_element_by_name("email3").get_attribute("value"))
return Contact(firstname=firstname, lastname=lastname, id=id, home=home, work=work, mobile=mobile, phone2=phone2, address=address, email=email, email2=email2, email3=email3) # all_phones=all_phones_from_edit_witohout_nulls
def get_contacts_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
home = (None if re.search("H: (.*)", text) is None else re.search("H: (.*)", text).group(1))
work = (None if re.search("W: (.*)", text) is None else re.search("W: (.*)", text).group(1))
mobile = (None if re.search("M: (.*)", text) is None else re.search("M: (.*)", text).group(1))
phone2 = (None if re.search("P: (.*)", text) is None else re.search("P: (.*)", text).group(1))
return Contact(home=home, work=work, mobile=mobile, phone2=phone2) # home=home, work=work, mobile=mobile, phone2=phone2 # firstname=firstname, lastname=lastname, id=id, all_phones=all_phones_from_edit_witohout_nulls
| 53.422794 | 231 | 0.642282 |
f21fdfd7a07c33781679b9954dcbd99a220196b5 | 1,061 | py | Python | nginx_router/backend/synth_project/urls.py | BennettDixon/sleep_tracker_full_stack | 83b46635762db857f0c291c62c4415a1a550ec3d | [
"MIT"
] | null | null | null | nginx_router/backend/synth_project/urls.py | BennettDixon/sleep_tracker_full_stack | 83b46635762db857f0c291c62c4415a1a550ec3d | [
"MIT"
] | null | null | null | nginx_router/backend/synth_project/urls.py | BennettDixon/sleep_tracker_full_stack | 83b46635762db857f0c291c62c4415a1a550ec3d | [
"MIT"
] | null | null | null | """synth_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from graphene_django.views import GraphQLView
from synth_project.schema import schema
# allow cross site resource sharing
from django.views.decorators.csrf import csrf_exempt
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('synth_app.urls')),
path('graphql/v1/', csrf_exempt(GraphQLView.as_view(graphiql=True))),
]
| 37.892857 | 77 | 0.73327 |
81fdc3ae15badf96bc94474f9b6e275c73edd415 | 5,539 | py | Python | src_old/tests/scripts/mpi/core/poisson_fd_2d.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | src_old/tests/scripts/mpi/core/poisson_fd_2d.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | src_old/tests/scripts/mpi/core/poisson_fd_2d.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | # coding: utf-8
from pyccel.stdlib.parallel.mpi import mpi_init
from pyccel.stdlib.parallel.mpi import mpi_finalize
from pyccel.stdlib.parallel.mpi import mpi_comm_size
from pyccel.stdlib.parallel.mpi import mpi_comm_rank
from pyccel.stdlib.parallel.mpi import mpi_comm_world
from pyccel.stdlib.parallel.mpi import mpi_status_size
from pyccel.stdlib.parallel.mpi import MPI_INTEGER
from pyccel.stdlib.parallel.mpi import MPI_DOUBLE
from pyccel.stdlib.parallel.mpi import MPI_SUM
from pyccel.stdlib.parallel.mpi import mpi_sendrecv
from pyccel.stdlib.parallel.mpi import mpi_dims_create
from pyccel.stdlib.parallel.mpi import mpi_cart_create
from pyccel.stdlib.parallel.mpi import mpi_cart_coords
from pyccel.stdlib.parallel.mpi import mpi_cart_shift
from pyccel.stdlib.parallel.mpi import mpi_comm_free
from pyccel.stdlib.parallel.mpi import mpi_type_contiguous
from pyccel.stdlib.parallel.mpi import mpi_type_vector
from pyccel.stdlib.parallel.mpi import mpi_type_commit
from pyccel.stdlib.parallel.mpi import mpi_type_free
from pyccel.stdlib.parallel.mpi import mpi_allreduce
# ...
ntx = 16
nty = 16
# Grid spacing
hx = 1.0/(ntx+1)
hy = 1.0/(nty+1)
# Equation Coefficients
c0 = (0.5*hx*hx*hy*hy)/(hx*hx+hy*hy)
c1 = 1.0/(hx*hx)
c2 = 1.0/(hy*hy)
# ...
# we need to declare these variables somehow,
# since we are calling mpi subroutines
ierr = -1
size = -1
# rank in comm worl
rank = -1
# rank is 2d cart
rank_in_topo = -1
# 2d cart communicator
comm_2d = -1
mpi_init(ierr)
comm = mpi_comm_world
mpi_comm_size(comm, size, ierr)
mpi_comm_rank(comm, rank, ierr)
north = 0
east = 1
south = 2
west = 3
ndims = 2
steps = [1, 1]
periods = [False, True]
reorder = False
neighbour = zeros(4, int)
coords = zeros(2, int)
dims = zeros(ndims, int)
# Know the number of processes along x and y
mpi_dims_create (size, ndims, dims, ierr)
# ...
# Create a 2d mpi cart
mpi_cart_create (comm, ndims, dims, periods, reorder, comm_2d, ierr)
# Know my coordinates in the topology
mpi_comm_rank (comm_2d, rank_in_topo, ierr)
mpi_cart_coords (comm_2d, rank_in_topo, ndims, coords, ierr)
# X-axis limits
sx = (coords[0]*ntx)/dims[0]
ex = ((coords[0]+1)*ntx)/dims[0] - 1
# Y-axis limits
sy = (coords[1]*nty)/dims[1]
ey = ((coords[1]+1)*nty)/dims[1] - 1
# ... Neighbours
# Search of my West and East neigbours
mpi_cart_shift (comm_2d, 0, steps[0], neighbour[west], neighbour[east], ierr)
# Search of my South and North neighbours
mpi_cart_shift (comm_2d, 1, steps[1], neighbour[south], neighbour[north], ierr)
# ...
# ... Derived Types
# Creation of the type_line derived datatype to exchange points
# with northern to southern neighbours
type_line = -1
mpi_type_vector (ey-sy+1, 1, ex-sx+3, MPI_DOUBLE, type_line, ierr)
mpi_type_commit (type_line, ierr)
# Creation of the type_column derived datatype to exchange points
# with western to eastern neighbours
type_column = -1
mpi_type_contiguous (ex - sx + 1, MPI_DOUBLE, type_column, ierr)
mpi_type_commit (type_column, ierr)
# ...
# ...
u = vector((sx-1,sy-1), (ex+1, ey+1))
u_new = vector((sx-1,sy-1), (ex+1, ey+1))
u_exact = vector((sx-1,sy-1), (ex+1, ey+1))
f = vector((sx-1,sy-1), (ex+1, ey+1))
# Initialization
x = 0.0
y = 0.0
for i in range(sx, ex+1):
for j in range(sy, ey+1):
x = i*hx
y = j*hy
# print('> rank : ',rank_in_topo, '(i,j) = ',i,j)
f[i, j] = 2.0*(x*x-x+y*y-y)
u_exact[i, j] = x*y*(x-1.0)*(y-1.0)
# ...
# Linear solver tolerance
tol = 1.0e-10
tag = 1234
status = zeros (mpi_status_size, int)
n_iterations = 1000
for it in range(0, n_iterations):
u[sx:ex+1,sy:ey+1] = u_new[sx:ex+1,sy:ey+1]
# ... Communication
# Send to neighbour north and receive from neighbour south
mpi_sendrecv ( u[sx, sy], 1, type_line, neighbour[north], tag, u[ex+1, sy], 1, type_line, neighbour[south], tag, comm_2d, status, ierr)
# Send to neighbour south and receive from neighbour north
mpi_sendrecv ( u[ex, sy], 1, type_line, neighbour[south], tag, u[sx-1, sy], 1, type_line, neighbour[north], tag, comm_2d, status, ierr)
# Send to neighbour west and receive from neighbour east
mpi_sendrecv ( u[sx, sy], 1, type_column, neighbour[west], tag, u[sx, ey+1], 1, type_column, neighbour[east], tag, comm_2d, status, ierr)
# Send to neighbour east and receive from neighbour west
mpi_sendrecv ( u[sx, ey], 1, type_column, neighbour[east], tag, u[sx, sy-1], 1, type_column, neighbour[west], tag, comm_2d, status, ierr)
# ...
# ... Computation of u at the n+1 iteration
for i in range(sx, ex+1):
for j in range(sy, ey+1):
u_new[i, j] = c0 * (c1*(u[i+1, j] + u[i-1, j]) + c2*(u[i, j+1] + u[i, j-1]) - f[i, j])
# ...
# ... Computation of the global error
u_error = 0.0
for i in range(sx, ex+1):
for j in range(sy, ey+1):
u_error += abs(u[i,j]-u_new[i,j])
local_error = u_error/(ntx*nty)
# Reduction
global_error = 0.0
mpi_allreduce (local_error, global_error, 1, MPI_DOUBLE, MPI_SUM, comm_2d, ierr)
# ...
# ...
if (global_error < tol) or (it == n_iterations - 1):
if rank == 0:
print ("> convergence after ", it, " iterations")
print (" local error = ", local_error)
print (" global error = ", global_error)
break
# ...
# Free the datatype
mpi_type_free (type_line, ierr)
mpi_type_free (type_column, ierr)
# Destruction of the communicators
mpi_comm_free (comm_2d, ierr)
mpi_finalize(ierr)
| 28.848958 | 142 | 0.674671 |
331cf383a7d98bda2c88070060e1fab8b16224c7 | 337 | py | Python | src/phocnet/evaluation/time.py | FlorianWestphal/phocnet | 737b7bdd58441fc0a1fa35013db885bfa2cfdfe0 | [
"BSD-3-Clause"
] | 39 | 2016-09-12T12:58:00.000Z | 2021-09-09T01:24:34.000Z | src/phocnet/evaluation/time.py | FlorianWestphal/phocnet | 737b7bdd58441fc0a1fa35013db885bfa2cfdfe0 | [
"BSD-3-Clause"
] | 13 | 2017-11-07T09:35:11.000Z | 2021-02-05T12:13:48.000Z | src/phocnet/evaluation/time.py | FlorianWestphal/phocnet | 737b7bdd58441fc0a1fa35013db885bfa2cfdfe0 | [
"BSD-3-Clause"
] | 32 | 2016-09-12T12:58:54.000Z | 2022-02-20T19:10:54.000Z | '''
Created on Jul 10, 2016
@author: ssudholt
'''
def convert_secs2HHMMSS(secs):
'''
Takes as input a float/int representing a timing interval in seconds
and converts it to a string in the format hh:mm:ss
'''
secs = int(secs)
m, s = divmod(secs, 60)
h, m = divmod(m, 60)
return'%dh%02dm%02ds' % (h, m, s) | 24.071429 | 72 | 0.620178 |
621383832a66b63d8c9e6e6ea0bc8475b5daee9e | 6,292 | py | Python | mbbl_envs/mbbl/trainer/mbmf_trainer.py | hbutsuak95/iv_rl | 0f72a8f077a238237027ea96b7d1160c35ac9959 | [
"MIT"
] | 9 | 2022-01-16T11:27:00.000Z | 2022-03-13T14:04:48.000Z | mbbl_envs/mbbl/trainer/mbmf_trainer.py | hbutsuak95/iv_rl | 0f72a8f077a238237027ea96b7d1160c35ac9959 | [
"MIT"
] | null | null | null | mbbl_envs/mbbl/trainer/mbmf_trainer.py | hbutsuak95/iv_rl | 0f72a8f077a238237027ea96b7d1160c35ac9959 | [
"MIT"
] | null | null | null | import time
import numpy as np
from .base_trainer import base_trainer
from mbbl.util.common import logger
from mbbl.util.common import misc_utils
from mbbl.util.common import parallel_util
class trainer(base_trainer):
def __init__(self, args, network_type, task_queue, result_queue,
name_scope='trainer'):
# the base agent
super(trainer, self).__init__(
args=args, network_type=network_type,
task_queue=task_queue, result_queue=result_queue,
name_scope=name_scope
)
# self._base_path = init_path.get_abs_base_dir()
def _update_parameters(self, rollout_data, training_info):
# get the observation list
self._update_whitening_stats(rollout_data)
training_data = self._preprocess_data(rollout_data)
training_stats = {'avg_reward': training_data['avg_reward']}
# train the policy
for key in training_info['network_to_train']:
for i_network in range(self._num_model_ensemble[key]):
i_stats = self._network[key][i_network].train(
training_data, self._replay_buffer, training_info={}
)
if i_stats is not None:
training_stats.update(i_stats)
self._replay_buffer.add_data(training_data)
return training_stats
def _update_initial_parameters(self, rollout_data, training_info):
# get the observation list
self._update_whitening_stats(rollout_data)
training_data = self._preprocess_dagger_data(rollout_data)
training_stats = {'avg_reward': training_data['avg_reward']}
# train the policy
key = 'policy'
for i_network in range(self._num_model_ensemble[key]):
i_stats = self._network[key][i_network].train_initial_policy(
training_data, self._replay_buffer, training_info={}
)
if i_stats is not None:
training_stats.update(i_stats)
return training_stats
def run(self):
self._set_io_size()
self._build_models()
self._init_replay_buffer()
self._init_whitening_stats()
# load the model if needed
if self.args.ckpt_name is not None:
self._restore_all()
# the main training process
while True:
next_task = self._task_queue.get()
if next_task[0] is None or next_task[0] == parallel_util.END_SIGNAL:
# kill the learner
self._task_queue.task_done()
break
elif next_task[0] == parallel_util.START_SIGNAL:
# get network weights
self._task_queue.task_done()
self._result_queue.put(self._get_weights())
elif next_task[0] == parallel_util.MBMF_INITIAL:
stats = self._update_initial_parameters(
next_task[1]['data'], next_task[1]['training_info']
)
self._task_queue.task_done()
self._iteration += 1
return_data = {
'network_weights': self._get_weights(),
'stats': stats,
'totalsteps': self._timesteps_so_far,
'iteration': self._iteration,
'replay_buffer': self._replay_buffer
}
self._result_queue.put(return_data)
elif next_task[0] == parallel_util.GET_POLICY_WEIGHT:
self._task_queue.task_done()
#self._result_queue.put(self._get_weights())
self._result_queue.put(self._get_weights()['policy'][0])
elif next_task[0] == parallel_util.SET_POLICY_WEIGHT:
# set parameters of the actor policy
self._network['policy'][0].set_weights(next_task[1])
time.sleep(0.001) # yield the process
self._task_queue.task_done()
else:
# training
assert next_task[0] == parallel_util.TRAIN_SIGNAL
stats = self._update_parameters(
next_task[1]['data'], next_task[1]['training_info']
)
self._task_queue.task_done()
self._iteration += 1
return_data = {
'network_weights': self._get_weights(),
'stats': stats,
'totalsteps': self._timesteps_so_far,
'iteration': self._iteration,
'replay_buffer': self._replay_buffer
}
self._result_queue.put(return_data)
def _preprocess_dagger_data(self, rollout_data):
""" @brief:
Process the data, collect the element of
['start_state', 'end_state', 'action', 'reward', 'return',
'ob', 'action_dist_mu', 'action_dist_logstd']
"""
# get the observations
training_data = {}
# get the returns (might be needed to train policy)
for i_episode in rollout_data:
i_episode["returns"] = \
misc_utils.get_return(i_episode["rewards"], self.args.gamma)
training_data['start_state'] = np.concatenate(
[i_episode['obs'][:-1] for i_episode in rollout_data]
)
training_data['end_state'] = np.concatenate(
[i_episode['obs'][1:] for i_episode in rollout_data]
)
for key in ['action', 'reward', 'return']:
training_data[key] = np.concatenate(
[i_episode[key + 's'][:] for i_episode in rollout_data]
)
# record the length
training_data['episode_length'] = \
[len(i_episode['rewards']) for i_episode in rollout_data]
# get the episodic reward
for i_episode in rollout_data:
i_episode['episodic_reward'] = sum(i_episode['rewards'])
avg_reward = np.mean([i_episode['episodic_reward']
for i_episode in rollout_data])
logger.info('Mean reward: {}'.format(avg_reward))
training_data['whitening_stats'] = self._whitening_stats
training_data['avg_reward'] = avg_reward
return training_data
| 38.133333 | 80 | 0.582645 |
e3f25e0f9f933076bed861116b0e4bec5c1c3baf | 19,643 | py | Python | zaqar/tests/unit/transport/wsgi/v2_0/test_queue_lifecycle.py | mail2nsrajesh/zaqar | a68a03a228732050b33c2a7f35d1caa9f3467718 | [
"Apache-2.0"
] | null | null | null | zaqar/tests/unit/transport/wsgi/v2_0/test_queue_lifecycle.py | mail2nsrajesh/zaqar | a68a03a228732050b33c2a7f35d1caa9f3467718 | [
"Apache-2.0"
] | null | null | null | zaqar/tests/unit/transport/wsgi/v2_0/test_queue_lifecycle.py | mail2nsrajesh/zaqar | a68a03a228732050b33c2a7f35d1caa9f3467718 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import uuid
import ddt
import falcon
import mock
from oslo_serialization import jsonutils
import six
from zaqar.storage import errors as storage_errors
from zaqar import tests as testing
from zaqar.tests.unit.transport.wsgi import base
@ddt.ddt
class TestQueueLifecycleMongoDB(base.V2Base):
config_file = 'wsgi_mongodb.conf'
@testing.requires_mongodb
def setUp(self):
super(TestQueueLifecycleMongoDB, self).setUp()
self.queue_path = self.url_prefix + '/queues'
self.gumshoe_queue_path = self.queue_path + '/gumshoe'
self.fizbat_queue_path = self.queue_path + '/fizbat'
self.headers = {
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': '3387309841abc_'
}
def tearDown(self):
control = self.boot.control
storage = self.boot.storage._storage
connection = storage.connection
connection.drop_database(control.queues_database)
for db in storage.message_databases:
connection.drop_database(db)
super(TestQueueLifecycleMongoDB, self).tearDown()
def test_without_project_id(self):
headers = {
'Client-ID': str(uuid.uuid4()),
}
self.simulate_put(self.gumshoe_queue_path, headers=headers,
need_project_id=False)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_delete(self.gumshoe_queue_path, headers=headers,
need_project_id=False)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_empty_project_id(self):
headers = {
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': ''
}
self.simulate_put(self.gumshoe_queue_path, headers=headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_delete(self.gumshoe_queue_path, headers=headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data('480924', 'foo')
def test_basics_thoroughly(self, project_id):
headers = {
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': project_id
}
gumshoe_queue_path_stats = self.gumshoe_queue_path + '/stats'
# Stats are empty - queue not created yet
self.simulate_get(gumshoe_queue_path_stats, headers=headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Create
doc = '{"messages": {"ttl": 600}}'
self.simulate_put(self.gumshoe_queue_path,
headers=headers, body=doc)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
location = self.srmock.headers_dict['Location']
self.assertEqual(location, self.gumshoe_queue_path)
# Fetch metadata
result = self.simulate_get(self.gumshoe_queue_path,
headers=headers)
result_doc = jsonutils.loads(result[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
ref_doc = jsonutils.loads(doc)
ref_doc['_default_message_ttl'] = 3600
ref_doc['_max_messages_post_size'] = 262144
self.assertEqual(ref_doc, result_doc)
# Stats empty queue
self.simulate_get(gumshoe_queue_path_stats, headers=headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Delete
self.simulate_delete(self.gumshoe_queue_path, headers=headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Get non-existent stats
self.simulate_get(gumshoe_queue_path_stats, headers=headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
def test_name_restrictions(self):
self.simulate_put(self.queue_path + '/Nice-Boat_2',
headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
self.simulate_put(self.queue_path + '/Nice-Bo@t',
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_put(self.queue_path + '/_' + 'niceboat' * 8,
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_project_id_restriction(self):
muvluv_queue_path = self.queue_path + '/Muv-Luv'
self.simulate_put(muvluv_queue_path,
headers={'Client-ID': str(uuid.uuid4()),
'X-Project-ID': 'JAM Project' * 24})
self.assertEqual(falcon.HTTP_400, self.srmock.status)
# no charset restrictions
self.simulate_put(muvluv_queue_path,
headers={'Client-ID': str(uuid.uuid4()),
'X-Project-ID': 'JAM Project'})
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def test_non_ascii_name(self):
test_params = ((u'/queues/non-ascii-n\u0153me', 'utf-8'),
(u'/queues/non-ascii-n\xc4me', 'iso8859-1'))
for uri, enc in test_params:
uri = self.url_prefix + uri
if six.PY2:
uri = uri.encode(enc)
self.simulate_put(uri, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_delete(uri, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_no_metadata(self):
self.simulate_put(self.fizbat_queue_path,
headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
self.simulate_put(self.fizbat_queue_path, body='',
headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
result = self.simulate_get(self.fizbat_queue_path,
headers=self.headers)
result_doc = jsonutils.loads(result[0])
self.assertEqual(256 * 1024,
result_doc.get('_max_messages_post_size'))
self.assertEqual(3600,
result_doc.get('_default_message_ttl'))
@ddt.data('{', '[]', '.', ' ')
def test_bad_metadata(self, document):
self.simulate_put(self.fizbat_queue_path,
headers=self.headers,
body=document)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_too_much_metadata(self):
self.simulate_put(self.fizbat_queue_path, headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'
max_size = self.transport_cfg.max_queue_metadata
padding_len = max_size - (len(doc) - 10) + 1
doc = doc.format(pad='x' * padding_len)
self.simulate_put(self.fizbat_queue_path,
headers=self.headers,
body=doc)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_way_too_much_metadata(self):
self.simulate_put(self.fizbat_queue_path, headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'
max_size = self.transport_cfg.max_queue_metadata
padding_len = max_size * 100
doc = doc.format(pad='x' * padding_len)
self.simulate_put(self.fizbat_queue_path,
headers=self.headers, body=doc)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_custom_metadata(self):
# Set
doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'
max_size = self.transport_cfg.max_queue_metadata
padding_len = max_size - (len(doc) - 2)
doc = doc.format(pad='x' * padding_len)
self.simulate_put(self.fizbat_queue_path,
headers=self.headers,
body=doc)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
# Get
result = self.simulate_get(self.fizbat_queue_path,
headers=self.headers)
result_doc = jsonutils.loads(result[0])
ref_doc = jsonutils.loads(doc)
ref_doc['_default_message_ttl'] = 3600
ref_doc['_max_messages_post_size'] = 262144
self.assertEqual(ref_doc, result_doc)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
def test_update_metadata(self):
xyz_queue_path = self.url_prefix + '/queues/xyz'
xyz_queue_path_metadata = xyz_queue_path
headers = {
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': str(uuid.uuid4())
}
# Create
self.simulate_put(xyz_queue_path, headers=headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
headers.update({'Content-Type':
"application/openstack-messaging-v2.0-json-patch"})
# add metadata
doc1 = ('[{"op":"add", "path": "/metadata/key1", "value": 1},'
'{"op":"add", "path": "/metadata/key2", "value": 1}]')
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc1)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# remove reserved metadata, zaqar will do nothing and return 200,
# because
doc3 = '[{"op":"remove", "path": "/metadata/_default_message_ttl"}]'
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc3)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# replace metadata
doc2 = '[{"op":"replace", "path": "/metadata/key1", "value": 2}]'
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc2)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# replace reserved metadata, zaqar will store the reserved metadata
doc2 = ('[{"op":"replace", "path": "/metadata/_default_message_ttl",'
'"value": 300}]')
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc2)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Get
result = self.simulate_get(xyz_queue_path_metadata,
headers=headers)
result_doc = jsonutils.loads(result[0])
self.assertEqual({'key1': 2, 'key2': 1,
'_default_message_ttl': 300,
'_max_messages_post_size': 262144}, result_doc)
# remove metadata
doc3 = '[{"op":"remove", "path": "/metadata/key1"}]'
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc3)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# remove reserved metadata
doc3 = '[{"op":"remove", "path": "/metadata/_default_message_ttl"}]'
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc3)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Get
result = self.simulate_get(xyz_queue_path_metadata,
headers=headers)
result_doc = jsonutils.loads(result[0])
self.assertEqual({'key2': 1, '_default_message_ttl': 3600,
'_max_messages_post_size': 262144}, result_doc)
# replace non-existent metadata
doc4 = '[{"op":"replace", "path": "/metadata/key3", "value":2}]'
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc4)
self.assertEqual(falcon.HTTP_409, self.srmock.status)
# remove non-existent metadata
doc5 = '[{"op":"remove", "path": "/metadata/key3"}]'
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc5)
self.assertEqual(falcon.HTTP_409, self.srmock.status)
self.simulate_delete(xyz_queue_path, headers=headers)
# add metadata to non-existent queue
doc1 = ('[{"op":"add", "path": "/metadata/key1", "value": 1},'
'{"op":"add", "path": "/metadata/key2", "value": 1}]')
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc1)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# replace metadata in non-existent queue
doc4 = '[{"op":"replace", "path": "/metadata/key3", "value":2}]'
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc4)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# remove metadata from non-existent queue
doc5 = '[{"op":"remove", "path": "/metadata/key3"}]'
self.simulate_patch(xyz_queue_path_metadata,
headers=headers,
body=doc5)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_list(self):
arbitrary_number = 644079696574693
project_id = str(arbitrary_number)
client_id = str(uuid.uuid4())
header = {
'X-Project-ID': project_id,
'Client-ID': client_id
}
# NOTE(kgriffs): It's important that this one sort after the one
# above. This is in order to prove that bug/1236605 is fixed, and
# stays fixed!
alt_project_id = str(arbitrary_number + 1)
# List empty
result = self.simulate_get(self.queue_path, headers=header)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
results = jsonutils.loads(result[0])
self.assertEqual([], results['queues'])
self.assertIn('links', results)
self.assertEqual(0, len(results['links']))
# Payload exceeded
self.simulate_get(self.queue_path, headers=header,
query_string='limit=21')
self.assertEqual(falcon.HTTP_400, self.srmock.status)
# Create some
def create_queue(name, project_id, body):
altheader = {'Client-ID': client_id}
if project_id is not None:
altheader['X-Project-ID'] = project_id
uri = self.queue_path + '/' + name
self.simulate_put(uri, headers=altheader, body=body)
create_queue('q1', project_id, '{"node": 31}')
create_queue('q2', project_id, '{"node": 32}')
create_queue('q3', project_id, '{"node": 33}')
create_queue('q3', alt_project_id, '{"alt": 1}')
# List (limit)
result = self.simulate_get(self.queue_path, headers=header,
query_string='limit=2')
result_doc = jsonutils.loads(result[0])
self.assertEqual(2, len(result_doc['queues']))
# List (no metadata, get all)
result = self.simulate_get(self.queue_path,
headers=header, query_string='limit=5')
result_doc = jsonutils.loads(result[0])
[target, params] = result_doc['links'][0]['href'].split('?')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Ensure we didn't pick up the queue from the alt project.
queues = result_doc['queues']
self.assertEqual(3, len(queues))
# List with metadata
result = self.simulate_get(self.queue_path, headers=header,
query_string='detailed=true')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
result_doc = jsonutils.loads(result[0])
[target, params] = result_doc['links'][0]['href'].split('?')
queue = result_doc['queues'][0]
result = self.simulate_get(queue['href'], headers=header)
result_doc = jsonutils.loads(result[0])
self.assertEqual(queue['metadata'], result_doc)
self.assertEqual({'node': 31, '_default_message_ttl': 3600,
'_max_messages_post_size': 262144}, result_doc)
# List tail
self.simulate_get(target, headers=header, query_string=params)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# List manually-constructed tail
self.simulate_get(target, headers=header, query_string='marker=zzz')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
def test_list_returns_503_on_nopoolfound_exception(self):
arbitrary_number = 644079696574693
project_id = str(arbitrary_number)
client_id = str(uuid.uuid4())
header = {
'X-Project-ID': project_id,
'Client-ID': client_id
}
queue_controller = self.boot.storage.queue_controller
with mock.patch.object(queue_controller, 'list') as mock_queue_list:
def queue_generator():
raise storage_errors.NoPoolFound()
# This generator tries to be like queue controller list generator
# in some ways.
def fake_generator():
yield queue_generator()
yield {}
mock_queue_list.return_value = fake_generator()
self.simulate_get(self.queue_path, headers=header)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
class TestQueueLifecycleFaultyDriver(base.V2BaseFaulty):
config_file = 'wsgi_faulty.conf'
def test_simple(self):
self.headers = {
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': '338730984abc_1'
}
gumshoe_queue_path = self.url_prefix + '/queues/gumshoe'
doc = '{"messages": {"ttl": 600}}'
self.simulate_put(gumshoe_queue_path,
headers=self.headers,
body=doc)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
location = ('Location', gumshoe_queue_path)
self.assertNotIn(location, self.srmock.headers)
result = self.simulate_get(gumshoe_queue_path,
headers=self.headers)
result_doc = jsonutils.loads(result[0])
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.assertNotEqual(result_doc, jsonutils.loads(doc))
self.simulate_get(gumshoe_queue_path + '/stats',
headers=self.headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_get(self.url_prefix + '/queues',
headers=self.headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_delete(gumshoe_queue_path, headers=self.headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
| 39.129482 | 79 | 0.600672 |
3b9825f776c01a024ff865d95a1bc16983cf6f46 | 21,901 | py | Python | src/autoencoder.py | pcheng2/SindyAutoencoders | 9d6cb364a9806090f0184150818c5fa6c37601f1 | [
"MIT"
] | 190 | 2019-05-23T07:04:18.000Z | 2022-03-31T13:09:07.000Z | src/autoencoder.py | Jimmy-INL/SindyAutoencoders | d50acb28cb97f2af50a3ef4e1b3fdd0d30d3a6f8 | [
"MIT"
] | null | null | null | src/autoencoder.py | Jimmy-INL/SindyAutoencoders | d50acb28cb97f2af50a3ef4e1b3fdd0d30d3a6f8 | [
"MIT"
] | 61 | 2019-05-17T09:09:12.000Z | 2022-03-18T13:07:02.000Z | import tensorflow as tf
def full_network(params):
"""
Define the full network architecture.
Arguments:
params - Dictionary object containing the parameters that specify the training.
See README file for a description of the parameters.
Returns:
network - Dictionary containing the tensorflow objects that make up the network.
"""
input_dim = params['input_dim']
latent_dim = params['latent_dim']
activation = params['activation']
poly_order = params['poly_order']
if 'include_sine' in params.keys():
include_sine = params['include_sine']
else:
include_sine = False
library_dim = params['library_dim']
model_order = params['model_order']
network = {}
x = tf.placeholder(tf.float32, shape=[None, input_dim], name='x')
dx = tf.placeholder(tf.float32, shape=[None, input_dim], name='dx')
if model_order == 2:
ddx = tf.placeholder(tf.float32, shape=[None, input_dim], name='ddx')
if activation == 'linear':
z, x_decode, encoder_weights, encoder_biases, decoder_weights, decoder_biases = linear_autoencoder(x, input_dim, latent_dim)
else:
z, x_decode, encoder_weights, encoder_biases, decoder_weights, decoder_biases = nonlinear_autoencoder(x, input_dim, latent_dim, params['widths'], activation=activation)
if model_order == 1:
dz = z_derivative(x, dx, encoder_weights, encoder_biases, activation=activation)
Theta = sindy_library_tf(z, latent_dim, poly_order, include_sine)
else:
dz,ddz = z_derivative_order2(x, dx, ddx, encoder_weights, encoder_biases, activation=activation)
Theta = sindy_library_tf_order2(z, dz, latent_dim, poly_order, include_sine)
if params['coefficient_initialization'] == 'xavier':
sindy_coefficients = tf.get_variable('sindy_coefficients', shape=[library_dim,latent_dim], initializer=tf.contrib.layers.xavier_initializer())
elif params['coefficient_initialization'] == 'specified':
sindy_coefficients = tf.get_variable('sindy_coefficients', initializer=params['init_coefficients'])
elif params['coefficient_initialization'] == 'constant':
sindy_coefficients = tf.get_variable('sindy_coefficients', shape=[library_dim,latent_dim], initializer=tf.constant_initializer(1.0))
elif params['coefficient_initialization'] == 'normal':
sindy_coefficients = tf.get_variable('sindy_coefficients', shape=[library_dim,latent_dim], initializer=tf.initializers.random_normal())
if params['sequential_thresholding']:
coefficient_mask = tf.placeholder(tf.float32, shape=[library_dim,latent_dim], name='coefficient_mask')
sindy_predict = tf.matmul(Theta, coefficient_mask*sindy_coefficients)
network['coefficient_mask'] = coefficient_mask
else:
sindy_predict = tf.matmul(Theta, sindy_coefficients)
if model_order == 1:
dx_decode = z_derivative(z, sindy_predict, decoder_weights, decoder_biases, activation=activation)
else:
dx_decode,ddx_decode = z_derivative_order2(z, dz, sindy_predict, decoder_weights, decoder_biases,
activation=activation)
network['x'] = x
network['dx'] = dx
network['z'] = z
network['dz'] = dz
network['x_decode'] = x_decode
network['dx_decode'] = dx_decode
network['encoder_weights'] = encoder_weights
network['encoder_biases'] = encoder_biases
network['decoder_weights'] = decoder_weights
network['decoder_biases'] = decoder_biases
network['Theta'] = Theta
network['sindy_coefficients'] = sindy_coefficients
if model_order == 1:
network['dz_predict'] = sindy_predict
else:
network['ddz'] = ddz
network['ddz_predict'] = sindy_predict
network['ddx'] = ddx
network['ddx_decode'] = ddx_decode
return network
def define_loss(network, params):
"""
Create the loss functions.
Arguments:
network - Dictionary object containing the elements of the network architecture.
This will be the output of the full_network() function.
"""
x = network['x']
x_decode = network['x_decode']
if params['model_order'] == 1:
dz = network['dz']
dz_predict = network['dz_predict']
dx = network['dx']
dx_decode = network['dx_decode']
else:
ddz = network['ddz']
ddz_predict = network['ddz_predict']
ddx = network['ddx']
ddx_decode = network['ddx_decode']
sindy_coefficients = params['coefficient_mask']*network['sindy_coefficients']
losses = {}
losses['decoder'] = tf.reduce_mean((x - x_decode)**2)
if params['model_order'] == 1:
losses['sindy_z'] = tf.reduce_mean((dz - dz_predict)**2)
losses['sindy_x'] = tf.reduce_mean((dx - dx_decode)**2)
else:
losses['sindy_z'] = tf.reduce_mean((ddz - ddz_predict)**2)
losses['sindy_x'] = tf.reduce_mean((ddx - ddx_decode)**2)
losses['sindy_regularization'] = tf.reduce_mean(tf.abs(sindy_coefficients))
loss = params['loss_weight_decoder'] * losses['decoder'] \
+ params['loss_weight_sindy_z'] * losses['sindy_z'] \
+ params['loss_weight_sindy_x'] * losses['sindy_x'] \
+ params['loss_weight_sindy_regularization'] * losses['sindy_regularization']
loss_refinement = params['loss_weight_decoder'] * losses['decoder'] \
+ params['loss_weight_sindy_z'] * losses['sindy_z'] \
+ params['loss_weight_sindy_x'] * losses['sindy_x']
return loss, losses, loss_refinement
def linear_autoencoder(x, input_dim, d):
# z,encoder_weights,encoder_biases = encoder(x, input_dim, latent_dim, [], None, 'encoder')
# x_decode,decoder_weights,decoder_biases = decoder(z, input_dim, latent_dim, [], None, 'decoder')
z,encoder_weights,encoder_biases = build_network_layers(x, input_dim, latent_dim, [], None, 'encoder')
x_decode,decoder_weights,decoder_biases = build_network_layers(z, latent_dim, input_dim, [], None, 'decoder')
return z, x_decode, encoder_weights, encoder_biases,decoder_weights,decoder_biases
def nonlinear_autoencoder(x, input_dim, latent_dim, widths, activation='elu'):
"""
Construct a nonlinear autoencoder.
Arguments:
Returns:
z -
x_decode -
encoder_weights - List of tensorflow arrays containing the encoder weights
encoder_biases - List of tensorflow arrays containing the encoder biases
decoder_weights - List of tensorflow arrays containing the decoder weights
decoder_biases - List of tensorflow arrays containing the decoder biases
"""
if activation == 'relu':
activation_function = tf.nn.relu
elif activation == 'elu':
activation_function = tf.nn.elu
elif activation == 'sigmoid':
activation_function = tf.sigmoid
else:
raise ValueError('invalid activation function')
# z,encoder_weights,encoder_biases = encoder(x, input_dim, latent_dim, widths, activation_function, 'encoder')
# x_decode,decoder_weights,decoder_biases = decoder(z, input_dim, latent_dim, widths[::-1], activation_function, 'decoder')
z,encoder_weights,encoder_biases = build_network_layers(x, input_dim, latent_dim, widths, activation_function, 'encoder')
x_decode,decoder_weights,decoder_biases = build_network_layers(z, latent_dim, input_dim, widths[::-1], activation_function, 'decoder')
return z, x_decode, encoder_weights, encoder_biases, decoder_weights, decoder_biases
def build_network_layers(input, input_dim, output_dim, widths, activation, name):
"""
Construct one portion of the network (either encoder or decoder).
Arguments:
input - 2D tensorflow array, input to the network (shape is [?,input_dim])
input_dim - Integer, number of state variables in the input to the first layer
output_dim - Integer, number of state variables to output from the final layer
widths - List of integers representing how many units are in each network layer
activation - Tensorflow function to be used as the activation function at each layer
name - String, prefix to be used in naming the tensorflow variables
Returns:
input - Tensorflow array, output of the network layers (shape is [?,output_dim])
weights - List of tensorflow arrays containing the network weights
biases - List of tensorflow arrays containing the network biases
"""
weights = []
biases = []
last_width=input_dim
for i,n_units in enumerate(widths):
W = tf.get_variable(name+'_W'+str(i), shape=[last_width,n_units],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name+'_b'+str(i), shape=[n_units],
initializer=tf.constant_initializer(0.0))
input = tf.matmul(input, W) + b
if activation is not None:
input = activation(input)
last_width = n_units
weights.append(W)
biases.append(b)
W = tf.get_variable(name+'_W'+str(len(widths)), shape=[last_width,output_dim],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name+'_b'+str(len(widths)), shape=[output_dim],
initializer=tf.constant_initializer(0.0))
input = tf.matmul(input,W) + b
weights.append(W)
biases.append(b)
return input, weights, biases
# def encoder(input, input_dim, d, widths, activation, name):
# """
# Construct the encoder.
# Arguments:
# input - 2D tensorflow array, input to the network (shape is [?,input_dim])
# input_dim - Integer, number of state variables in the original data space
# d - Integer, number of state variables in the decoder space
# widths - List of integers representing how many units are in each network layer
# activation - Tensorflow function to be used as the activation function at each layer
# name - String, prefix to be used in naming the tensorflow variables
# Returns:
# input - Tensorflow array, output of the encoder (shape is [?,d])
# weights - List of tensorflow arrays containing the network weights
# biases - List of tensorflow arrays containing the network biases
# """
# weights = []
# biases = []
# last_width=input_dim
# for i,n_units in enumerate(widths):
# W = tf.get_variable(name+'_W'+str(i), shape=[last_width,n_units],
# initializer=tf.contrib.layers.xavier_initializer())
# b = tf.get_variable(name+'_b'+str(i), shape=[n_units],
# initializer=tf.constant_initializer(0.0))
# input = tf.matmul(input, W) + b
# if activation is not None:
# input = activation(input)
# last_width = n_units
# weights.append(W)
# biases.append(b)
# W = tf.get_variable(name+'_W'+str(len(widths)), shape=[last_width,latent_dim],
# initializer=tf.contrib.layers.xavier_initializer())
# b = tf.get_variable(name+'_b'+str(len(widths)), shape=[latent_dim],
# initializer=tf.constant_initializer(0.0))
# input = tf.matmul(input,W) + b
# weights.append(W)
# biases.append(b)
# return input, weights, biases
# def decoder(input, input_dim, latent_dim, widths, activation, name):
# """
# Construct the decoder.
# Arguments:
# input - 2D tensorflow array, input to the network (shape is [?,latent_dim])
# input_dim - Integer, number of state variables in the original data space
# latent_dim - Integer, number of state variables in the decoder space
# widths - List of integers representing how many units are in each network layer
# activation - Tensorflow function to be used as the activation function at each layer
# name - String, prefix to be used in naming the tensorflow variables
# Returns:
# input - Tensorflow array, output of the decoder (shape is [?,input_dim])
# weights - List of tensorflow arrays containing the network weights
# biases - List of tensorflow arrays containing the network biases
# """
# weights = []
# biases = []
# last_width=latent_dim
# for i,n_units in enumerate(widths):
# W = tf.get_variable(name+'_W'+str(i), shape=[last_width,n_units],
# initializer=tf.contrib.layers.xavier_initializer())
# b = tf.get_variable(name+'_b'+str(i), shape=[n_units],
# initializer=tf.constant_initializer(0.0))
# input = tf.matmul(input, W) + b
# if activation is not None:
# input = activation(input)
# last_width = n_units
# weights.append(W)
# biases.append(b)
# W = tf.get_variable(name+'_W'+str(len(widths)), shape=[last_width,input_dim],
# initializer=tf.contrib.layers.xavier_initializer())
# b = tf.get_variable(name+'_b'+str(len(widths)), shape=[input_dim],
# initializer=tf.constant_initializer(0.0))
# input = tf.matmul(input,W) + b
# weights.append(W)
# biases.append(b)
# return input, weights, biases
def sindy_library_tf(z, latent_dim, poly_order, include_sine=False):
"""
Build the SINDy library.
Arguments:
z - 2D tensorflow array of the snapshots on which to build the library. Shape is number of
time points by the number of state variables.
latent_dim - Integer, number of state variable in z.
poly_order - Integer, polynomial order to which to build the library. Max value is 5.
include_sine - Boolean, whether or not to include sine terms in the library. Default False.
Returns:
2D tensorflow array containing the constructed library. Shape is number of time points by
number of library functions. The number of library functions is determined by the number
of state variables of the input, the polynomial order, and whether or not sines are included.
"""
library = [tf.ones(tf.shape(z)[0])]
for i in range(latent_dim):
library.append(z[:,i])
if poly_order > 1:
for i in range(latent_dim):
for j in range(i,latent_dim):
library.append(tf.multiply(z[:,i], z[:,j]))
if poly_order > 2:
for i in range(latent_dim):
for j in range(i,latent_dim):
for k in range(j,latent_dim):
library.append(z[:,i]*z[:,j]*z[:,k])
if poly_order > 3:
for i in range(latent_dim):
for j in range(i,latent_dim):
for k in range(j,latent_dim):
for p in range(k,latent_dim):
library.append(z[:,i]*z[:,j]*z[:,k]*z[:,p])
if poly_order > 4:
for i in range(latent_dim):
for j in range(i,latent_dim):
for k in range(j,latent_dim):
for p in range(k,latent_dim):
for q in range(p,latent_dim):
library.append(z[:,i]*z[:,j]*z[:,k]*z[:,p]*z[:,q])
if include_sine:
for i in range(latent_dim):
library.append(tf.sin(z[:,i]))
return tf.stack(library, axis=1)
def sindy_library_tf_order2(z, dz, latent_dim, poly_order, include_sine=False):
"""
Build the SINDy library for a second order system. This is essentially the same as for a first
order system, but library terms are also built for the derivatives.
"""
library = [tf.ones(tf.shape(z)[0])]
z_combined = tf.concat([z, dz], 1)
for i in range(2*latent_dim):
library.append(z_combined[:,i])
if poly_order > 1:
for i in range(2*latent_dim):
for j in range(i,2*latent_dim):
library.append(tf.multiply(z_combined[:,i], z_combined[:,j]))
if poly_order > 2:
for i in range(2*latent_dim):
for j in range(i,2*latent_dim):
for k in range(j,2*latent_dim):
library.append(z_combined[:,i]*z_combined[:,j]*z_combined[:,k])
if poly_order > 3:
for i in range(2*latent_dim):
for j in range(i,2*latent_dim):
for k in range(j,2*latent_dim):
for p in range(k,2*latent_dim):
library.append(z_combined[:,i]*z_combined[:,j]*z_combined[:,k]*z_combined[:,p])
if poly_order > 4:
for i in range(2*latent_dim):
for j in range(i,2*latent_dim):
for k in range(j,2*latent_dim):
for p in range(k,2*latent_dim):
for q in range(p,2*latent_dim):
library.append(z_combined[:,i]*z_combined[:,j]*z_combined[:,k]*z_combined[:,p]*z_combined[:,q])
if include_sine:
for i in range(2*latent_dim):
library.append(tf.sin(z_combined[:,i]))
return tf.stack(library, axis=1)
def z_derivative(input, dx, weights, biases, activation='elu'):
"""
Compute the first order time derivatives by propagating through the network.
Arguments:
input - 2D tensorflow array, input to the network. Dimensions are number of time points
by number of state variables.
dx - First order time derivatives of the input to the network.
weights - List of tensorflow arrays containing the network weights
biases - List of tensorflow arrays containing the network biases
activation - String specifying which activation function to use. Options are
'elu' (exponential linear unit), 'relu' (rectified linear unit), 'sigmoid',
or linear.
Returns:
dz - Tensorflow array, first order time derivatives of the network output.
"""
dz = dx
if activation == 'elu':
for i in range(len(weights)-1):
input = tf.matmul(input, weights[i]) + biases[i]
dz = tf.multiply(tf.minimum(tf.exp(input),1.0),
tf.matmul(dz, weights[i]))
input = tf.nn.elu(input)
dz = tf.matmul(dz, weights[-1])
elif activation == 'relu':
for i in range(len(weights)-1):
input = tf.matmul(input, weights[i]) + biases[i]
dz = tf.multiply(tf.to_float(input>0), tf.matmul(dz, weights[i]))
input = tf.nn.relu(input)
dz = tf.matmul(dz, weights[-1])
elif activation == 'sigmoid':
for i in range(len(weights)-1):
input = tf.matmul(input, weights[i]) + biases[i]
input = tf.sigmoid(input)
dz = tf.multiply(tf.multiply(input, 1-input), tf.matmul(dz, weights[i]))
dz = tf.matmul(dz, weights[-1])
else:
for i in range(len(weights)-1):
dz = tf.matmul(dz, weights[i])
dz = tf.matmul(dz, weights[-1])
return dz
def z_derivative_order2(input, dx, ddx, weights, biases, activation='elu'):
"""
Compute the first and second order time derivatives by propagating through the network.
Arguments:
input - 2D tensorflow array, input to the network. Dimensions are number of time points
by number of state variables.
dx - First order time derivatives of the input to the network.
ddx - Second order time derivatives of the input to the network.
weights - List of tensorflow arrays containing the network weights
biases - List of tensorflow arrays containing the network biases
activation - String specifying which activation function to use. Options are
'elu' (exponential linear unit), 'relu' (rectified linear unit), 'sigmoid',
or linear.
Returns:
dz - Tensorflow array, first order time derivatives of the network output.
ddz - Tensorflow array, second order time derivatives of the network output.
"""
dz = dx
ddz = ddx
if activation == 'elu':
for i in range(len(weights)-1):
input = tf.matmul(input, weights[i]) + biases[i]
dz_prev = tf.matmul(dz, weights[i])
elu_derivative = tf.minimum(tf.exp(input),1.0)
elu_derivative2 = tf.multiply(tf.exp(input), tf.to_float(input<0))
dz = tf.multiply(elu_derivative, dz_prev)
ddz = tf.multiply(elu_derivative2, tf.square(dz_prev)) \
+ tf.multiply(elu_derivative, tf.matmul(ddz, weights[i]))
input = tf.nn.elu(input)
dz = tf.matmul(dz, weights[-1])
ddz = tf.matmul(ddz, weights[-1])
elif activation == 'relu':
# NOTE: currently having trouble assessing accuracy of 2nd derivative due to discontinuity
for i in range(len(weights)-1):
input = tf.matmul(input, weights[i]) + biases[i]
relu_derivative = tf.to_float(input>0)
dz = tf.multiply(relu_derivative, tf.matmul(dz, weights[i]))
ddz = tf.multiply(relu_derivative, tf.matmul(ddz, weights[i]))
input = tf.nn.relu(input)
dz = tf.matmul(dz, weights[-1])
ddz = tf.matmul(ddz, weights[-1])
elif activation == 'sigmoid':
for i in range(len(weights)-1):
input = tf.matmul(input, weights[i]) + biases[i]
input = tf.sigmoid(input)
dz_prev = tf.matmul(dz, weights[i])
sigmoid_derivative = tf.multiply(input, 1-input)
sigmoid_derivative2 = tf.multiply(sigmoid_derivative, 1 - 2*input)
dz = tf.multiply(sigmoid_derivative, dz_prev)
ddz = tf.multiply(sigmoid_derivative2, tf.square(dz_prev)) \
+ tf.multiply(sigmoid_derivative, tf.matmul(ddz, weights[i]))
dz = tf.matmul(dz, weights[-1])
ddz = tf.matmul(ddz, weights[-1])
else:
for i in range(len(weights)-1):
dz = tf.matmul(dz, weights[i])
ddz = tf.matmul(ddz, weights[i])
dz = tf.matmul(dz, weights[-1])
ddz = tf.matmul(ddz, weights[-1])
return dz,ddz
| 43.714571 | 176 | 0.641386 |
5fa009cfe078a529c246bcfbf937f1affbfd168d | 1,813 | py | Python | 2020/Python/day24/part1.py | tymscar/Advent-Of-Code | cd7b96b0253191e236bd704b0d8b5540fb3e8ef6 | [
"MIT"
] | 4 | 2019-12-08T08:20:53.000Z | 2021-12-17T12:04:11.000Z | 2020/Python/day24/part1.py | tymscar/AdventOfCode2018 | 9742ddb6bbbc917062baad87d6b6de75375f1ae8 | [
"MIT"
] | null | null | null | 2020/Python/day24/part1.py | tymscar/AdventOfCode2018 | 9742ddb6bbbc917062baad87d6b6de75375f1ae8 | [
"MIT"
] | 4 | 2020-12-11T22:10:24.000Z | 2021-12-25T22:39:05.000Z | class Tile():
def __init__(self, pos):
self.white = True
self.pos = pos
def flip(self):
self.white = not self.white
def get_new_coord(from_tile, instructions):
curr = [from_tile.pos[0], from_tile.pos[1]]
for instruction in instructions:
if instruction == "ne":
curr[1] += 1
elif instruction == "e":
curr[0] += 1
elif instruction == "se":
curr[0] += 1
curr[1] -= 1
elif instruction == "sw":
curr[1] -= 1
elif instruction == "w":
curr[0] -= 1
elif instruction == "nw":
curr[0] -= 1
curr[1] += 1
return (curr[0], curr[1])
def part_1():
file = open('input.txt', 'r')
tiles = {}
starting_tile = Tile((0,0))
tiles[starting_tile.pos] = starting_tile
for line in file:
line = line.strip("\n")
instructions_on_line = []
index = 0
while index < len(line):
if line[index] == "w" or line[index] == "e":
instructions_on_line.append(line[index])
else:
if line[index+1] == "w" or line[index+1] == "e":
instructions_on_line.append(line[index:index+2])
index += 1
else:
instructions_on_line.append(line[index])
index += 1
line_coord = get_new_coord(starting_tile,instructions_on_line)
if line_coord in tiles:
tiles[line_coord].flip()
else:
new_tile = Tile(line_coord)
new_tile.flip()
tiles[line_coord] = new_tile
black_tiles = 0
for coord in tiles:
if tiles[coord].white == False:
black_tiles += 1
return black_tiles
print(part_1()) | 27.469697 | 70 | 0.506343 |
4189b8192a7296f660a1c130442ad801cb6f3460 | 43,622 | py | Python | sympy/utilities/enumerative.py | Sushmita10062002/sympy | a6051c36e180e980e9f430502d906ada221f08a2 | [
"BSD-3-Clause"
] | 1 | 2022-01-31T16:02:46.000Z | 2022-01-31T16:02:46.000Z | sympy/utilities/enumerative.py | Sushmita10062002/sympy | a6051c36e180e980e9f430502d906ada221f08a2 | [
"BSD-3-Clause"
] | 3 | 2022-02-04T14:45:16.000Z | 2022-02-04T14:45:45.000Z | sympy/utilities/enumerative.py | Sushmita10062002/sympy | a6051c36e180e980e9f430502d906ada221f08a2 | [
"BSD-3-Clause"
] | 1 | 2022-02-04T13:50:29.000Z | 2022-02-04T13:50:29.000Z | """
Algorithms and classes to support enumerative combinatorics.
Currently just multiset partitions, but more could be added.
Terminology (following Knuth, algorithm 7.1.2.5M TAOCP)
*multiset* aaabbcccc has a *partition* aaabc | bccc
The submultisets, aaabc and bccc of the partition are called
*parts*, or sometimes *vectors*. (Knuth notes that multiset
partitions can be thought of as partitions of vectors of integers,
where the ith element of the vector gives the multiplicity of
element i.)
The values a, b and c are *components* of the multiset. These
correspond to elements of a set, but in a multiset can be present
with a multiplicity greater than 1.
The algorithm deserves some explanation.
Think of the part aaabc from the multiset above. If we impose an
ordering on the components of the multiset, we can represent a part
with a vector, in which the value of the first element of the vector
corresponds to the multiplicity of the first component in that
part. Thus, aaabc can be represented by the vector [3, 1, 1]. We
can also define an ordering on parts, based on the lexicographic
ordering of the vector (leftmost vector element, i.e., the element
with the smallest component number, is the most significant), so
that [3, 1, 1] > [3, 1, 0] and [3, 1, 1] > [2, 1, 4]. The ordering
on parts can be extended to an ordering on partitions: First, sort
the parts in each partition, left-to-right in decreasing order. Then
partition A is greater than partition B if A's leftmost/greatest
part is greater than B's leftmost part. If the leftmost parts are
equal, compare the second parts, and so on.
In this ordering, the greatest partition of a given multiset has only
one part. The least partition is the one in which the components
are spread out, one per part.
The enumeration algorithms in this file yield the partitions of the
argument multiset in decreasing order. The main data structure is a
stack of parts, corresponding to the current partition. An
important invariant is that the parts on the stack are themselves in
decreasing order. This data structure is decremented to find the
next smaller partition. Most often, decrementing the partition will
only involve adjustments to the smallest parts at the top of the
stack, much as adjacent integers *usually* differ only in their last
few digits.
Knuth's algorithm uses two main operations on parts:
Decrement - change the part so that it is smaller in the
(vector) lexicographic order, but reduced by the smallest amount possible.
For example, if the multiset has vector [5,
3, 1], and the bottom/greatest part is [4, 2, 1], this part would
decrement to [4, 2, 0], while [4, 0, 0] would decrement to [3, 3,
1]. A singleton part is never decremented -- [1, 0, 0] is not
decremented to [0, 3, 1]. Instead, the decrement operator needs
to fail for this case. In Knuth's pseudocode, the decrement
operator is step m5.
Spread unallocated multiplicity - Once a part has been decremented,
it cannot be the rightmost part in the partition. There is some
multiplicity that has not been allocated, and new parts must be
created above it in the stack to use up this multiplicity. To
maintain the invariant that the parts on the stack are in
decreasing order, these new parts must be less than or equal to
the decremented part.
For example, if the multiset is [5, 3, 1], and its most
significant part has just been decremented to [5, 3, 0], the
spread operation will add a new part so that the stack becomes
[[5, 3, 0], [0, 0, 1]]. If the most significant part (for the
same multiset) has been decremented to [2, 0, 0] the stack becomes
[[2, 0, 0], [2, 0, 0], [1, 3, 1]]. In the pseudocode, the spread
operation for one part is step m2. The complete spread operation
is a loop of steps m2 and m3.
In order to facilitate the spread operation, Knuth stores, for each
component of each part, not just the multiplicity of that component
in the part, but also the total multiplicity available for this
component in this part or any lesser part above it on the stack.
One added twist is that Knuth does not represent the part vectors as
arrays. Instead, he uses a sparse representation, in which a
component of a part is represented as a component number (c), plus
the multiplicity of the component in that part (v) as well as the
total multiplicity available for that component (u). This saves
time that would be spent skipping over zeros.
"""
class PartComponent:
"""Internal class used in support of the multiset partitions
enumerators and the associated visitor functions.
Represents one component of one part of the current partition.
A stack of these, plus an auxiliary frame array, f, represents a
partition of the multiset.
Knuth's pseudocode makes c, u, and v separate arrays.
"""
__slots__ = ('c', 'u', 'v')
def __init__(self):
self.c = 0 # Component number
self.u = 0 # The as yet unpartitioned amount in component c
# *before* it is allocated by this triple
self.v = 0 # Amount of c component in the current part
# (v<=u). An invariant of the representation is
# that the next higher triple for this component
# (if there is one) will have a value of u-v in
# its u attribute.
def __repr__(self):
"for debug/algorithm animation purposes"
return 'c:%d u:%d v:%d' % (self.c, self.u, self.v)
def __eq__(self, other):
"""Define value oriented equality, which is useful for testers"""
return (isinstance(other, self.__class__) and
self.c == other.c and
self.u == other.u and
self.v == other.v)
def __ne__(self, other):
"""Defined for consistency with __eq__"""
return not self == other
# This function tries to be a faithful implementation of algorithm
# 7.1.2.5M in Volume 4A, Combinatoral Algorithms, Part 1, of The Art
# of Computer Programming, by Donald Knuth. This includes using
# (mostly) the same variable names, etc. This makes for rather
# low-level Python.
# Changes from Knuth's pseudocode include
# - use PartComponent struct/object instead of 3 arrays
# - make the function a generator
# - map (with some difficulty) the GOTOs to Python control structures.
# - Knuth uses 1-based numbering for components, this code is 0-based
# - renamed variable l to lpart.
# - flag variable x takes on values True/False instead of 1/0
#
def multiset_partitions_taocp(multiplicities):
"""Enumerates partitions of a multiset.
Parameters
==========
multiplicities
list of integer multiplicities of the components of the multiset.
Yields
======
state
Internal data structure which encodes a particular partition.
This output is then usually processed by a visitor function
which combines the information from this data structure with
the components themselves to produce an actual partition.
Unless they wish to create their own visitor function, users will
have little need to look inside this data structure. But, for
reference, it is a 3-element list with components:
f
is a frame array, which is used to divide pstack into parts.
lpart
points to the base of the topmost part.
pstack
is an array of PartComponent objects.
The ``state`` output offers a peek into the internal data
structures of the enumeration function. The client should
treat this as read-only; any modification of the data
structure will cause unpredictable (and almost certainly
incorrect) results. Also, the components of ``state`` are
modified in place at each iteration. Hence, the visitor must
be called at each loop iteration. Accumulating the ``state``
instances and processing them later will not work.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import multiset_partitions_taocp
>>> # variables components and multiplicities represent the multiset 'abb'
>>> components = 'ab'
>>> multiplicities = [1, 2]
>>> states = multiset_partitions_taocp(multiplicities)
>>> list(list_visitor(state, components) for state in states)
[[['a', 'b', 'b']],
[['a', 'b'], ['b']],
[['a'], ['b', 'b']],
[['a'], ['b'], ['b']]]
See Also
========
sympy.utilities.iterables.multiset_partitions: Takes a multiset
as input and directly yields multiset partitions. It
dispatches to a number of functions, including this one, for
implementation. Most users will find it more convenient to
use than multiset_partitions_taocp.
"""
# Important variables.
# m is the number of components, i.e., number of distinct elements
m = len(multiplicities)
# n is the cardinality, total number of elements whether or not distinct
n = sum(multiplicities)
# The main data structure, f segments pstack into parts. See
# list_visitor() for example code indicating how this internal
# state corresponds to a partition.
# Note: allocation of space for stack is conservative. Knuth's
# exercise 7.2.1.5.68 gives some indication of how to tighten this
# bound, but this is not implemented.
pstack = [PartComponent() for i in range(n * m + 1)]
f = [0] * (n + 1)
# Step M1 in Knuth (Initialize)
# Initial state - entire multiset in one part.
for j in range(m):
ps = pstack[j]
ps.c = j
ps.u = multiplicities[j]
ps.v = multiplicities[j]
# Other variables
f[0] = 0
a = 0
lpart = 0
f[1] = m
b = m # in general, current stack frame is from a to b - 1
while True:
while True:
# Step M2 (Subtract v from u)
j = a
k = b
x = False
while j < b:
pstack[k].u = pstack[j].u - pstack[j].v
if pstack[k].u == 0:
x = True
elif not x:
pstack[k].c = pstack[j].c
pstack[k].v = min(pstack[j].v, pstack[k].u)
x = pstack[k].u < pstack[j].v
k = k + 1
else: # x is True
pstack[k].c = pstack[j].c
pstack[k].v = pstack[k].u
k = k + 1
j = j + 1
# Note: x is True iff v has changed
# Step M3 (Push if nonzero.)
if k > b:
a = b
b = k
lpart = lpart + 1
f[lpart + 1] = b
# Return to M2
else:
break # Continue to M4
# M4 Visit a partition
state = [f, lpart, pstack]
yield state
# M5 (Decrease v)
while True:
j = b-1
while (pstack[j].v == 0):
j = j - 1
if j == a and pstack[j].v == 1:
# M6 (Backtrack)
if lpart == 0:
return
lpart = lpart - 1
b = a
a = f[lpart]
# Return to M5
else:
pstack[j].v = pstack[j].v - 1
for k in range(j + 1, b):
pstack[k].v = pstack[k].u
break # GOTO M2
# --------------- Visitor functions for multiset partitions ---------------
# A visitor takes the partition state generated by
# multiset_partitions_taocp or other enumerator, and produces useful
# output (such as the actual partition).
def factoring_visitor(state, primes):
"""Use with multiset_partitions_taocp to enumerate the ways a
number can be expressed as a product of factors. For this usage,
the exponents of the prime factors of a number are arguments to
the partition enumerator, while the corresponding prime factors
are input here.
Examples
========
To enumerate the factorings of a number we can think of the elements of the
partition as being the prime factors and the multiplicities as being their
exponents.
>>> from sympy.utilities.enumerative import factoring_visitor
>>> from sympy.utilities.enumerative import multiset_partitions_taocp
>>> from sympy import factorint
>>> primes, multiplicities = zip(*factorint(24).items())
>>> primes
(2, 3)
>>> multiplicities
(3, 1)
>>> states = multiset_partitions_taocp(multiplicities)
>>> list(factoring_visitor(state, primes) for state in states)
[[24], [8, 3], [12, 2], [4, 6], [4, 2, 3], [6, 2, 2], [2, 2, 2, 3]]
"""
f, lpart, pstack = state
factoring = []
for i in range(lpart + 1):
factor = 1
for ps in pstack[f[i]: f[i + 1]]:
if ps.v > 0:
factor *= primes[ps.c] ** ps.v
factoring.append(factor)
return factoring
def list_visitor(state, components):
"""Return a list of lists to represent the partition.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import multiset_partitions_taocp
>>> states = multiset_partitions_taocp([1, 2, 1])
>>> s = next(states)
>>> list_visitor(s, 'abc') # for multiset 'a b b c'
[['a', 'b', 'b', 'c']]
>>> s = next(states)
>>> list_visitor(s, [1, 2, 3]) # for multiset '1 2 2 3
[[1, 2, 2], [3]]
"""
f, lpart, pstack = state
partition = []
for i in range(lpart+1):
part = []
for ps in pstack[f[i]:f[i+1]]:
if ps.v > 0:
part.extend([components[ps.c]] * ps.v)
partition.append(part)
return partition
class MultisetPartitionTraverser():
"""
Has methods to ``enumerate`` and ``count`` the partitions of a multiset.
This implements a refactored and extended version of Knuth's algorithm
7.1.2.5M [AOCP]_."
The enumeration methods of this class are generators and return
data structures which can be interpreted by the same visitor
functions used for the output of ``multiset_partitions_taocp``.
Examples
========
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> m.count_partitions([4,4,4,2])
127750
>>> m.count_partitions([3,3,3])
686
See Also
========
multiset_partitions_taocp
sympy.utilities.iterables.multiset_partitions
References
==========
.. [AOCP] Algorithm 7.1.2.5M in Volume 4A, Combinatoral Algorithms,
Part 1, of The Art of Computer Programming, by Donald Knuth.
.. [Factorisatio] On a Problem of Oppenheim concerning
"Factorisatio Numerorum" E. R. Canfield, Paul Erdos, Carl
Pomerance, JOURNAL OF NUMBER THEORY, Vol. 17, No. 1. August
1983. See section 7 for a description of an algorithm
similar to Knuth's.
.. [Yorgey] Generating Multiset Partitions, Brent Yorgey, The
Monad.Reader, Issue 8, September 2007.
"""
def __init__(self):
self.debug = False
# TRACING variables. These are useful for gathering
# statistics on the algorithm itself, but have no particular
# benefit to a user of the code.
self.k1 = 0
self.k2 = 0
self.p1 = 0
self.pstack = None
self.f = None
self.lpart = 0
self.discarded = 0
# dp_stack is list of lists of (part_key, start_count) pairs
self.dp_stack = []
# dp_map is map part_key-> count, where count represents the
# number of multiset which are descendants of a part with this
# key, **or any of its decrements**
# Thus, when we find a part in the map, we add its count
# value to the running total, cut off the enumeration, and
# backtrack
if not hasattr(self, 'dp_map'):
self.dp_map = {}
def db_trace(self, msg):
"""Useful for understanding/debugging the algorithms. Not
generally activated in end-user code."""
if self.debug:
# XXX: animation_visitor is undefined... Clearly this does not
# work and was not tested. Previous code in comments below.
raise RuntimeError
#letters = 'abcdefghijklmnopqrstuvwxyz'
#state = [self.f, self.lpart, self.pstack]
#print("DBG:", msg,
# ["".join(part) for part in list_visitor(state, letters)],
# animation_visitor(state))
#
# Helper methods for enumeration
#
def _initialize_enumeration(self, multiplicities):
"""Allocates and initializes the partition stack.
This is called from the enumeration/counting routines, so
there is no need to call it separately."""
num_components = len(multiplicities)
# cardinality is the total number of elements, whether or not distinct
cardinality = sum(multiplicities)
# pstack is the partition stack, which is segmented by
# f into parts.
self.pstack = [PartComponent() for i in
range(num_components * cardinality + 1)]
self.f = [0] * (cardinality + 1)
# Initial state - entire multiset in one part.
for j in range(num_components):
ps = self.pstack[j]
ps.c = j
ps.u = multiplicities[j]
ps.v = multiplicities[j]
self.f[0] = 0
self.f[1] = num_components
self.lpart = 0
# The decrement_part() method corresponds to step M5 in Knuth's
# algorithm. This is the base version for enum_all(). Modified
# versions of this method are needed if we want to restrict
# sizes of the partitions produced.
def decrement_part(self, part):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
If you think of the v values in the part as a multi-digit
integer (least significant digit on the right) this is
basically decrementing that integer, but with the extra
constraint that the leftmost digit cannot be decremented to 0.
Parameters
==========
part
The part, represented as a list of PartComponent objects,
which is to be decremented.
"""
plen = len(part)
for j in range(plen - 1, -1, -1):
if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0:
# found val to decrement
part[j].v -= 1
# Reset trailing parts back to maximum
for k in range(j + 1, plen):
part[k].v = part[k].u
return True
return False
# Version to allow number of parts to be bounded from above.
# Corresponds to (a modified) step M5.
def decrement_part_small(self, part, ub):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
ub
the maximum number of parts allowed in a partition
returned by the calling traversal.
Notes
=====
The goal of this modification of the ordinary decrement method
is to fail (meaning that the subtree rooted at this part is to
be skipped) when it can be proved that this part can only have
child partitions which are larger than allowed by ``ub``. If a
decision is made to fail, it must be accurate, otherwise the
enumeration will miss some partitions. But, it is OK not to
capture all the possible failures -- if a part is passed that
shouldn't be, the resulting too-large partitions are filtered
by the enumeration one level up. However, as is usual in
constrained enumerations, failing early is advantageous.
The tests used by this method catch the most common cases,
although this implementation is by no means the last word on
this problem. The tests include:
1) ``lpart`` must be less than ``ub`` by at least 2. This is because
once a part has been decremented, the partition
will gain at least one child in the spread step.
2) If the leading component of the part is about to be
decremented, check for how many parts will be added in
order to use up the unallocated multiplicity in that
leading component, and fail if this number is greater than
allowed by ``ub``. (See code for the exact expression.) This
test is given in the answer to Knuth's problem 7.2.1.5.69.
3) If there is *exactly* enough room to expand the leading
component by the above test, check the next component (if
it exists) once decrementing has finished. If this has
``v == 0``, this next component will push the expansion over the
limit by 1, so fail.
"""
if self.lpart >= ub - 1:
self.p1 += 1 # increment to keep track of usefulness of tests
return False
plen = len(part)
for j in range(plen - 1, -1, -1):
# Knuth's mod, (answer to problem 7.2.1.5.69)
if j == 0 and (part[0].v - 1)*(ub - self.lpart) < part[0].u:
self.k1 += 1
return False
if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0:
# found val to decrement
part[j].v -= 1
# Reset trailing parts back to maximum
for k in range(j + 1, plen):
part[k].v = part[k].u
# Have now decremented part, but are we doomed to
# failure when it is expanded? Check one oddball case
# that turns out to be surprisingly common - exactly
# enough room to expand the leading component, but no
# room for the second component, which has v=0.
if (plen > 1 and part[1].v == 0 and
(part[0].u - part[0].v) ==
((ub - self.lpart - 1) * part[0].v)):
self.k2 += 1
self.db_trace("Decrement fails test 3")
return False
return True
return False
def decrement_part_large(self, part, amt, lb):
"""Decrements part, while respecting size constraint.
A part can have no children which are of sufficient size (as
indicated by ``lb``) unless that part has sufficient
unallocated multiplicity. When enforcing the size constraint,
this method will decrement the part (if necessary) by an
amount needed to ensure sufficient unallocated multiplicity.
Returns True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
amt
Can only take values 0 or 1. A value of 1 means that the
part must be decremented, and then the size constraint is
enforced. A value of 0 means just to enforce the ``lb``
size constraint.
lb
The partitions produced by the calling enumeration must
have more parts than this value.
"""
if amt == 1:
# In this case we always need to increment, *before*
# enforcing the "sufficient unallocated multiplicity"
# constraint. Easiest for this is just to call the
# regular decrement method.
if not self.decrement_part(part):
return False
# Next, perform any needed additional decrementing to respect
# "sufficient unallocated multiplicity" (or fail if this is
# not possible).
min_unalloc = lb - self.lpart
if min_unalloc <= 0:
return True
total_mult = sum(pc.u for pc in part)
total_alloc = sum(pc.v for pc in part)
if total_mult <= min_unalloc:
return False
deficit = min_unalloc - (total_mult - total_alloc)
if deficit <= 0:
return True
for i in range(len(part) - 1, -1, -1):
if i == 0:
if part[0].v > deficit:
part[0].v -= deficit
return True
else:
return False # This shouldn't happen, due to above check
else:
if part[i].v >= deficit:
part[i].v -= deficit
return True
else:
deficit -= part[i].v
part[i].v = 0
def decrement_part_range(self, part, lb, ub):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
ub
the maximum number of parts allowed in a partition
returned by the calling traversal.
lb
The partitions produced by the calling enumeration must
have more parts than this value.
Notes
=====
Combines the constraints of _small and _large decrement
methods. If returns success, part has been decremented at
least once, but perhaps by quite a bit more if needed to meet
the lb constraint.
"""
# Constraint in the range case is just enforcing both the
# constraints from _small and _large cases. Note the 0 as the
# second argument to the _large call -- this is the signal to
# decrement only as needed to for constraint enforcement. The
# short circuiting and left-to-right order of the 'and'
# operator is important for this to work correctly.
return self.decrement_part_small(part, ub) and \
self.decrement_part_large(part, 0, lb)
def spread_part_multiplicity(self):
"""Returns True if a new part has been created, and
adjusts pstack, f and lpart as needed.
Notes
=====
Spreads unallocated multiplicity from the current top part
into a new part created above the current on the stack. This
new part is constrained to be less than or equal to the old in
terms of the part ordering.
This call does nothing (and returns False) if the current top
part has no unallocated multiplicity.
"""
j = self.f[self.lpart] # base of current top part
k = self.f[self.lpart + 1] # ub of current; potential base of next
base = k # save for later comparison
changed = False # Set to true when the new part (so far) is
# strictly less than (as opposed to less than
# or equal) to the old.
for j in range(self.f[self.lpart], self.f[self.lpart + 1]):
self.pstack[k].u = self.pstack[j].u - self.pstack[j].v
if self.pstack[k].u == 0:
changed = True
else:
self.pstack[k].c = self.pstack[j].c
if changed: # Put all available multiplicity in this part
self.pstack[k].v = self.pstack[k].u
else: # Still maintaining ordering constraint
if self.pstack[k].u < self.pstack[j].v:
self.pstack[k].v = self.pstack[k].u
changed = True
else:
self.pstack[k].v = self.pstack[j].v
k = k + 1
if k > base:
# Adjust for the new part on stack
self.lpart = self.lpart + 1
self.f[self.lpart + 1] = k
return True
return False
def top_part(self):
"""Return current top part on the stack, as a slice of pstack.
"""
return self.pstack[self.f[self.lpart]:self.f[self.lpart + 1]]
# Same interface and functionality as multiset_partitions_taocp(),
# but some might find this refactored version easier to follow.
def enum_all(self, multiplicities):
"""Enumerate the partitions of a multiset.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_all([2,2])
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a', 'b', 'b']],
[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'a'], ['b'], ['b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a', 'b'], ['a'], ['b']],
[['a'], ['a'], ['b', 'b']],
[['a'], ['a'], ['b'], ['b']]]
See Also
========
multiset_partitions_taocp():
which provides the same result as this method, but is
about twice as fast. Hence, enum_all is primarily useful
for testing. Also see the function for a discussion of
states and visitors.
"""
self._initialize_enumeration(multiplicities)
while True:
while self.spread_part_multiplicity():
pass
# M4 Visit a partition
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
def enum_small(self, multiplicities, ub):
"""Enumerate multiset partitions with no more than ``ub`` parts.
Equivalent to enum_range(multiplicities, 0, ub)
Parameters
==========
multiplicities
list of multiplicities of the components of the multiset.
ub
Maximum number of parts
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_small([2,2], 2)
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a', 'b', 'b']],
[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']]]
The implementation is based, in part, on the answer given to
exercise 69, in Knuth [AOCP]_.
See Also
========
enum_all, enum_large, enum_range
"""
# Keep track of iterations which do not yield a partition.
# Clearly, we would like to keep this number small.
self.discarded = 0
if ub <= 0:
return
self._initialize_enumeration(multiplicities)
while True:
while self.spread_part_multiplicity():
self.db_trace('spread 1')
if self.lpart >= ub:
self.discarded += 1
self.db_trace(' Discarding')
self.lpart = ub - 2
break
else:
# M4 Visit a partition
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_small(self.top_part(), ub):
self.db_trace("Failed decrement, going to backtrack")
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
self.db_trace("Backtracked to")
self.db_trace("decrement ok, about to expand")
def enum_large(self, multiplicities, lb):
"""Enumerate the partitions of a multiset with lb < num(parts)
Equivalent to enum_range(multiplicities, lb, sum(multiplicities))
Parameters
==========
multiplicities
list of multiplicities of the components of the multiset.
lb
Number of parts in the partition must be greater than
this lower bound.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_large([2,2], 2)
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a'], ['b'], ['b']],
[['a', 'b'], ['a'], ['b']],
[['a'], ['a'], ['b', 'b']],
[['a'], ['a'], ['b'], ['b']]]
See Also
========
enum_all, enum_small, enum_range
"""
self.discarded = 0
if lb >= sum(multiplicities):
return
self._initialize_enumeration(multiplicities)
self.decrement_part_large(self.top_part(), 0, lb)
while True:
good_partition = True
while self.spread_part_multiplicity():
if not self.decrement_part_large(self.top_part(), 0, lb):
# Failure here should be rare/impossible
self.discarded += 1
good_partition = False
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_large(self.top_part(), 1, lb):
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
def enum_range(self, multiplicities, lb, ub):
"""Enumerate the partitions of a multiset with
``lb < num(parts) <= ub``.
In particular, if partitions with exactly ``k`` parts are
desired, call with ``(multiplicities, k - 1, k)``. This
method generalizes enum_all, enum_small, and enum_large.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_range([2,2], 1, 2)
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']]]
"""
# combine the constraints of the _large and _small
# enumerations.
self.discarded = 0
if ub <= 0 or lb >= sum(multiplicities):
return
self._initialize_enumeration(multiplicities)
self.decrement_part_large(self.top_part(), 0, lb)
while True:
good_partition = True
while self.spread_part_multiplicity():
self.db_trace("spread 1")
if not self.decrement_part_large(self.top_part(), 0, lb):
# Failure here - possible in range case?
self.db_trace(" Discarding (large cons)")
self.discarded += 1
good_partition = False
break
elif self.lpart >= ub:
self.discarded += 1
good_partition = False
self.db_trace(" Discarding small cons")
self.lpart = ub - 2
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_range(self.top_part(), lb, ub):
self.db_trace("Failed decrement, going to backtrack")
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
self.db_trace("Backtracked to")
self.db_trace("decrement ok, about to expand")
def count_partitions_slow(self, multiplicities):
"""Returns the number of partitions of a multiset whose elements
have the multiplicities given in ``multiplicities``.
Primarily for comparison purposes. It follows the same path as
enumerate, and counts, rather than generates, the partitions.
See Also
========
count_partitions
Has the same calling interface, but is much faster.
"""
# number of partitions so far in the enumeration
self.pcount = 0
self._initialize_enumeration(multiplicities)
while True:
while self.spread_part_multiplicity():
pass
# M4 Visit (count) a partition
self.pcount += 1
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
if self.lpart == 0:
return self.pcount
self.lpart -= 1
def count_partitions(self, multiplicities):
"""Returns the number of partitions of a multiset whose components
have the multiplicities given in ``multiplicities``.
For larger counts, this method is much faster than calling one
of the enumerators and counting the result. Uses dynamic
programming to cut down on the number of nodes actually
explored. The dictionary used in order to accelerate the
counting process is stored in the ``MultisetPartitionTraverser``
object and persists across calls. If the user does not
expect to call ``count_partitions`` for any additional
multisets, the object should be cleared to save memory. On
the other hand, the cache built up from one count run can
significantly speed up subsequent calls to ``count_partitions``,
so it may be advantageous not to clear the object.
Examples
========
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> m.count_partitions([9,8,2])
288716
>>> m.count_partitions([2,2])
9
>>> del m
Notes
=====
If one looks at the workings of Knuth's algorithm M [AOCP]_, it
can be viewed as a traversal of a binary tree of parts. A
part has (up to) two children, the left child resulting from
the spread operation, and the right child from the decrement
operation. The ordinary enumeration of multiset partitions is
an in-order traversal of this tree, and with the partitions
corresponding to paths from the root to the leaves. The
mapping from paths to partitions is a little complicated,
since the partition would contain only those parts which are
leaves or the parents of a spread link, not those which are
parents of a decrement link.
For counting purposes, it is sufficient to count leaves, and
this can be done with a recursive in-order traversal. The
number of leaves of a subtree rooted at a particular part is a
function only of that part itself, so memoizing has the
potential to speed up the counting dramatically.
This method follows a computational approach which is similar
to the hypothetical memoized recursive function, but with two
differences:
1) This method is iterative, borrowing its structure from the
other enumerations and maintaining an explicit stack of
parts which are in the process of being counted. (There
may be multisets which can be counted reasonably quickly by
this implementation, but which would overflow the default
Python recursion limit with a recursive implementation.)
2) Instead of using the part data structure directly, a more
compact key is constructed. This saves space, but more
importantly coalesces some parts which would remain
separate with physical keys.
Unlike the enumeration functions, there is currently no _range
version of count_partitions. If someone wants to stretch
their brain, it should be possible to construct one by
memoizing with a histogram of counts rather than a single
count, and combining the histograms.
"""
# number of partitions so far in the enumeration
self.pcount = 0
# dp_stack is list of lists of (part_key, start_count) pairs
self.dp_stack = []
self._initialize_enumeration(multiplicities)
pkey = part_key(self.top_part())
self.dp_stack.append([(pkey, 0), ])
while True:
while self.spread_part_multiplicity():
pkey = part_key(self.top_part())
if pkey in self.dp_map:
# Already have a cached value for the count of the
# subtree rooted at this part. Add it to the
# running counter, and break out of the spread
# loop. The -1 below is to compensate for the
# leaf that this code path would otherwise find,
# and which gets incremented for below.
self.pcount += (self.dp_map[pkey] - 1)
self.lpart -= 1
break
else:
self.dp_stack.append([(pkey, self.pcount), ])
# M4 count a leaf partition
self.pcount += 1
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
for key, oldcount in self.dp_stack.pop():
self.dp_map[key] = self.pcount - oldcount
if self.lpart == 0:
return self.pcount
self.lpart -= 1
# At this point have successfully decremented the part on
# the stack and it does not appear in the cache. It needs
# to be added to the list at the top of dp_stack
pkey = part_key(self.top_part())
self.dp_stack[-1].append((pkey, self.pcount),)
def part_key(part):
"""Helper for MultisetPartitionTraverser.count_partitions that
creates a key for ``part``, that only includes information which can
affect the count for that part. (Any irrelevant information just
reduces the effectiveness of dynamic programming.)
Notes
=====
This member function is a candidate for future exploration. There
are likely symmetries that can be exploited to coalesce some
``part_key`` values, and thereby save space and improve
performance.
"""
# The component number is irrelevant for counting partitions, so
# leave it out of the memo key.
rval = []
for ps in part:
rval.append(ps.u)
rval.append(ps.v)
return tuple(rval)
| 37.670121 | 79 | 0.590826 |
9de6d2075fbd9e8a5ba259a8ea1b20e500c79f4d | 31,958 | py | Python | pysteps/io/exporters.py | wdewettin/pysteps | 7a093281ab3e00642054708d5073aef72a134b4a | [
"BSD-3-Clause"
] | 285 | 2018-07-11T10:42:43.000Z | 2022-03-23T13:44:54.000Z | pysteps/io/exporters.py | wdewettin/pysteps | 7a093281ab3e00642054708d5073aef72a134b4a | [
"BSD-3-Clause"
] | 246 | 2018-07-16T06:17:12.000Z | 2022-03-22T15:45:08.000Z | pysteps/io/exporters.py | wdewettin/pysteps | 7a093281ab3e00642054708d5073aef72a134b4a | [
"BSD-3-Clause"
] | 97 | 2018-07-12T12:05:45.000Z | 2022-03-31T14:56:31.000Z | # -*- coding: utf-8 -*-
"""
pysteps.io.exporters
====================
Methods for exporting forecasts of 2d precipitation fields into various file
formats.
Each exporter method in this module has its own initialization function that
implements the following interface::
initialize_forecast_exporter_xxx(outpath, outfnprefix, startdate, timestep,
n_timesteps, shape, metadata,
n_ens_members=1,
incremental=None, **kwargs)
where xxx specifies the file format.
This function creates the output files and writes the metadata. See the
documentation of the initialization methods for the format of the output files
and their names. The datasets are written by calling
:py:func:`pysteps.io.exporters.export_forecast_dataset`, and the files are
closed by calling :py:func:`pysteps.io.exporters.close_forecast_files`.
The arguments of initialize_forecast_exporter_xxx are described in the
following table:
.. tabularcolumns:: |p{2cm}|p{2cm}|L|
+---------------+-------------------+-----------------------------------------+
| Argument | Type/values | Description |
+===============+===================+=========================================+
| outpath | str | output path |
+---------------+-------------------+-----------------------------------------+
| outfnprefix | str | prefix of output file names |
+---------------+-------------------+-----------------------------------------+
| startdate | datetime.datetime | start date of the forecast |
+---------------+-------------------+-----------------------------------------+
| timestep | int | length of the forecast time step |
| | | (minutes) |
+---------------+-------------------+-----------------------------------------+
| n_timesteps | int | number of time steps in the forecast |
| | | this argument is ignored if |
| | | incremental is set to 'timestep'. |
+---------------+-------------------+-----------------------------------------+
| shape | tuple | two-element tuple defining the shape |
| | | (height,width) of the forecast grids |
+---------------+-------------------+-----------------------------------------+
| metadata | dict | metadata dictionary containing the |
| | | projection,x1,x2,y1,y2 and unit |
| | | attributes described in the |
| | | documentation of pysteps.io.importers |
+---------------+-------------------+-----------------------------------------+
| n_ens_members | int | number of ensemble members in the |
| | | forecast |
| | | this argument is ignored if incremental |
| | | is set to 'member' |
+---------------+-------------------+-----------------------------------------+
| incremental | {None, 'timestep',| allow incremental writing of datasets |
| | 'member'} | the available options are: |
| | | 'timestep' = write a forecast or a |
| | | forecast ensemble for a given |
| | | time step |
| | | 'member' = write a forecast sequence |
| | | for a given ensemble member |
+---------------+-------------------+-----------------------------------------+
Optional exporter-specific arguments are passed with ``kwargs``.
The return value is a dictionary containing an exporter object.
This can be used with :py:func:`pysteps.io.exporters.export_forecast_dataset`
to write the datasets to the output files.
Available Exporters
-------------------
.. autosummary::
:toctree: ../generated/
initialize_forecast_exporter_geotiff
initialize_forecast_exporter_kineros
initialize_forecast_exporter_netcdf
Generic functions
-----------------
.. autosummary::
:toctree: ../generated/
export_forecast_dataset
close_forecast_files
"""
import os
from datetime import datetime
import numpy as np
from pysteps.exceptions import MissingOptionalDependency
try:
from osgeo import gdal, osr
GDAL_IMPORTED = True
except ImportError:
GDAL_IMPORTED = False
try:
import netCDF4
NETCDF4_IMPORTED = True
except ImportError:
NETCDF4_IMPORTED = False
try:
import pyproj
PYPROJ_IMPORTED = True
except ImportError:
PYPROJ_IMPORTED = False
def initialize_forecast_exporter_geotiff(
outpath,
outfnprefix,
startdate,
timestep,
n_timesteps,
shape,
metadata,
n_ens_members=1,
incremental=None,
**kwargs,
):
"""Initialize a GeoTIFF forecast exporter.
The output files are named as '<outfnprefix>_<startdate>_<t>.tif', where
startdate is in YYmmddHHMM format and t is lead time (minutes). GDAL needs
to be installed to use this exporter.
Parameters
----------
outpath: str
Output path.
outfnprefix: str
Prefix for output file names.
startdate: datetime.datetime
Start date of the forecast.
timestep: int
Time step of the forecast (minutes).
n_timesteps: int
Number of time steps in the forecast. This argument is ignored if
incremental is set to 'timestep'.
shape: tuple of int
Two-element tuple defining the shape (height,width) of the forecast
grids.
metadata: dict
Metadata dictionary containing the projection,x1,x2,y1,y2 and unit
attributes described in the documentation of
:py:mod:`pysteps.io.importers`.
n_ens_members: int
Number of ensemble members in the forecast.
incremental: {None,'timestep'}, optional
Allow incremental writing of datasets into the GeoTIFF files. Set to
'timestep' to enable writing forecasts or forecast ensembles separately
for each time step. If set to None, incremental writing is disabled and
the whole forecast is written in a single function call. The 'member'
option is not currently implemented.
Returns
-------
exporter: dict
The return value is a dictionary containing an exporter object.
This can be used with
:py:func:`pysteps.io.exporters.export_forecast_dataset`
to write the datasets.
"""
if len(shape) != 2:
raise ValueError("shape has %d elements, 2 expected" % len(shape))
del kwargs # kwargs not used
if not GDAL_IMPORTED:
raise MissingOptionalDependency(
"gdal package is required for GeoTIFF " "exporters but it is not installed"
)
if incremental == "member":
raise ValueError(
"incremental writing of GeoTIFF files with"
+ " the 'member' option is not supported"
)
exporter = dict(
method="geotiff",
outfnprefix=outfnprefix,
startdate=startdate,
timestep=timestep,
num_timesteps=n_timesteps,
shape=shape,
metadata=metadata,
num_ens_members=n_ens_members,
incremental=incremental,
dst=[],
)
driver = gdal.GetDriverByName("GTiff")
exporter["driver"] = driver
if incremental != "timestep":
for i in range(n_timesteps):
outfn = _get_geotiff_filename(
outfnprefix, startdate, n_timesteps, timestep, i
)
outfn = os.path.join(outpath, outfn)
dst = _create_geotiff_file(outfn, driver, shape, metadata, n_ens_members)
exporter["dst"].append(dst)
else:
exporter["num_files_written"] = 0
return exporter
# TODO(exporters): This is a draft version of the kineros exporter.
# Revise the variable names and
# the structure of the file if necessary.
def initialize_forecast_exporter_kineros(
outpath,
outfnprefix,
startdate,
timestep,
n_timesteps,
shape,
metadata,
n_ens_members=1,
incremental=None,
**kwargs,
):
"""Initialize a KINEROS2 format exporter for the rainfall ".pre" files
specified in https://www.tucson.ars.ag.gov/kineros/.
Grid points are treated as individual rain gauges and a separate file is
produced for each ensemble member. The output files are named as
<outfnprefix>_N<n>.pre, where <n> is the index of ensemble member starting
from zero.
Parameters
----------
outpath: str
Output path.
outfnprefix: str
Prefix for output file names.
startdate: datetime.datetime
Start date of the forecast.
timestep: int
Time step of the forecast (minutes).
n_timesteps: int
Number of time steps in the forecast this argument is ignored if
incremental is set to 'timestep'.
shape: tuple of int
Two-element tuple defining the shape (height,width) of the forecast
grids.
metadata: dict
Metadata dictionary containing the projection,x1,x2,y1,y2 and unit
attributes described in the documentation of
:py:mod:`pysteps.io.importers`.
n_ens_members: int
Number of ensemble members in the forecast. This argument is ignored if
incremental is set to 'member'.
incremental: {None}, optional
Currently not implemented for this method.
Returns
-------
exporter: dict
The return value is a dictionary containing an exporter object. This c
an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset`
to write datasets into the given file format.
"""
if incremental is not None:
raise ValueError(
"unknown option %s: " + "incremental writing is not supported" % incremental
)
exporter = {}
# one file for each member
n_ens_members = np.min((99, n_ens_members))
fns = []
for i in range(n_ens_members):
outfn = "%s_N%02d%s" % (outfnprefix, i, ".pre")
outfn = os.path.join(outpath, outfn)
with open(outfn, "w") as fd:
# write header
fd.writelines("! pysteps-generated nowcast.\n")
fd.writelines("! created the %s.\n" % datetime.now().strftime("%c"))
# TODO(exporters): Add pySTEPS version here
fd.writelines("! Member = %02d.\n" % i)
fd.writelines("! Startdate = %s.\n" % startdate.strftime("%c"))
fns.append(outfn)
fd.close()
h, w = shape
if metadata["unit"] == "mm/h":
var_name = "Intensity"
var_long_name = "Intensity in mm/hr"
var_unit = "mm/hr"
elif metadata["unit"] == "mm":
var_name = "Depth"
var_long_name = "Accumulated depth in mm"
var_unit = "mm"
else:
raise ValueError("unsupported unit %s" % metadata["unit"])
xr = np.linspace(metadata["x1"], metadata["x2"], w + 1)[:-1]
xr += 0.5 * (xr[1] - xr[0])
yr = np.linspace(metadata["y1"], metadata["y2"], h + 1)[:-1]
yr += 0.5 * (yr[1] - yr[0])
xy_coords = np.stack(np.meshgrid(xr, yr))
exporter["method"] = "kineros"
exporter["ncfile"] = fns
exporter["XY_coords"] = xy_coords
exporter["var_name"] = var_name
exporter["var_long_name"] = var_long_name
exporter["var_unit"] = var_unit
exporter["startdate"] = startdate
exporter["timestep"] = timestep
exporter["metadata"] = metadata
exporter["incremental"] = incremental
exporter["num_timesteps"] = n_timesteps
exporter["num_ens_members"] = n_ens_members
exporter["shape"] = shape
return exporter
# TODO(exporters): This is a draft version of the netcdf exporter.
# Revise the variable names and
# the structure of the file if necessary.
def initialize_forecast_exporter_netcdf(
outpath,
outfnprefix,
startdate,
timestep,
n_timesteps,
shape,
metadata,
n_ens_members=1,
incremental=None,
**kwargs,
):
"""Initialize a netCDF forecast exporter. All outputs are written to a
single file named as '<outfnprefix>_.nc'.
Parameters
----------
outpath: str
Output path.
outfnprefix: str
Prefix for output file names.
startdate: datetime.datetime
Start date of the forecast.
timestep: int
Time step of the forecast (minutes).
n_timesteps: int
Number of time steps in the forecast this argument is ignored if
incremental is set to 'timestep'.
shape: tuple of int
Two-element tuple defining the shape (height,width) of the forecast
grids.
metadata: dict
Metadata dictionary containing the projection, x1, x2, y1, y2,
unit attributes (projection and variable units) described in the
documentation of :py:mod:`pysteps.io.importers`.
n_ens_members: int
Number of ensemble members in the forecast. This argument is ignored if
incremental is set to 'member'.
incremental: {None,'timestep','member'}, optional
Allow incremental writing of datasets into the netCDF files.\n
The available options are: 'timestep' = write a forecast or a forecast
ensemble for a given time step; 'member' = write a forecast sequence
for a given ensemble member. If set to None, incremental writing is
disabled.
Returns
-------
exporter: dict
The return value is a dictionary containing an exporter object. This c
an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset`
to write datasets into the given file format.
"""
if not NETCDF4_IMPORTED:
raise MissingOptionalDependency(
"netCDF4 package is required for netcdf "
"exporters but it is not installed"
)
if not PYPROJ_IMPORTED:
raise MissingOptionalDependency(
"pyproj package is required for netcdf " "exporters but it is not installed"
)
if incremental not in [None, "timestep", "member"]:
raise ValueError(
f"unknown option {incremental}: incremental must be "
+ "'timestep' or 'member'"
)
if incremental == "timestep":
n_timesteps = None
elif incremental == "member":
n_ens_members = None
elif incremental is not None:
raise ValueError(
f"unknown argument value incremental='{str(incremental)}': "
+ "must be 'timestep' or 'member'"
)
n_ens_gt_one = False
if n_ens_members is not None:
if n_ens_members > 1:
n_ens_gt_one = True
exporter = {}
outfn = os.path.join(outpath, outfnprefix + ".nc")
ncf = netCDF4.Dataset(outfn, "w", format="NETCDF4")
ncf.Conventions = "CF-1.7"
ncf.title = "pysteps-generated nowcast"
ncf.institution = "the pySTEPS community (https://pysteps.github.io)"
ncf.source = "pysteps" # TODO(exporters): Add pySTEPS version here
ncf.history = ""
ncf.references = ""
ncf.comment = ""
h, w = shape
ncf.createDimension("ens_number", size=n_ens_members)
ncf.createDimension("time", size=n_timesteps)
ncf.createDimension("y", size=h)
ncf.createDimension("x", size=w)
if metadata["unit"] == "mm/h":
var_name = "precip_intensity"
var_standard_name = None
var_long_name = "instantaneous precipitation rate"
var_unit = "mm h-1"
elif metadata["unit"] == "mm":
var_name = "precip_accum"
var_standard_name = None
var_long_name = "accumulated precipitation"
var_unit = "mm"
elif metadata["unit"] == "dBZ":
var_name = "reflectivity"
var_long_name = "equivalent reflectivity factor"
var_standard_name = "equivalent_reflectivity_factor"
var_unit = "dBZ"
else:
raise ValueError("unknown unit %s" % metadata["unit"])
xr = np.linspace(metadata["x1"], metadata["x2"], w + 1)[:-1]
xr += 0.5 * (xr[1] - xr[0])
yr = np.linspace(metadata["y1"], metadata["y2"], h + 1)[:-1]
yr += 0.5 * (yr[1] - yr[0])
# flip yr vector if yorigin is upper
if metadata["yorigin"] == "upper":
yr = np.flip(yr)
var_xc = ncf.createVariable("x", np.float32, dimensions=("x",))
var_xc[:] = xr
var_xc.axis = "X"
var_xc.standard_name = "projection_x_coordinate"
var_xc.long_name = "x-coordinate in Cartesian system"
var_xc.units = metadata["cartesian_unit"]
var_yc = ncf.createVariable("y", np.float32, dimensions=("y",))
var_yc[:] = yr
var_yc.axis = "Y"
var_yc.standard_name = "projection_y_coordinate"
var_yc.long_name = "y-coordinate in Cartesian system"
var_yc.units = metadata["cartesian_unit"]
x_2d, y_2d = np.meshgrid(xr, yr)
pr = pyproj.Proj(metadata["projection"])
lon, lat = pr(x_2d.flatten(), y_2d.flatten(), inverse=True)
var_lon = ncf.createVariable("lon", float, dimensions=("y", "x"))
var_lon[:] = lon.reshape(shape)
var_lon.standard_name = "longitude"
var_lon.long_name = "longitude coordinate"
# TODO(exporters): Don't hard-code the unit.
var_lon.units = "degrees_east"
var_lat = ncf.createVariable("lat", float, dimensions=("y", "x"))
var_lat[:] = lat.reshape(shape)
var_lat.standard_name = "latitude"
var_lat.long_name = "latitude coordinate"
# TODO(exporters): Don't hard-code the unit.
var_lat.units = "degrees_north"
ncf.projection = metadata["projection"]
(
grid_mapping_var_name,
grid_mapping_name,
grid_mapping_params,
) = _convert_proj4_to_grid_mapping(metadata["projection"])
# skip writing the grid mapping if a matching name was not found
if grid_mapping_var_name is not None:
var_gm = ncf.createVariable(grid_mapping_var_name, int, dimensions=())
var_gm.grid_mapping_name = grid_mapping_name
for i in grid_mapping_params.items():
var_gm.setncattr(i[0], i[1])
if incremental == "member" or n_ens_gt_one:
var_ens_num = ncf.createVariable("ens_number", int, dimensions=("ens_number",))
if incremental != "member":
var_ens_num[:] = list(range(1, n_ens_members + 1))
var_ens_num.long_name = "ensemble member"
var_ens_num.standard_name = "realization"
var_ens_num.units = ""
var_time = ncf.createVariable("time", int, dimensions=("time",))
if incremental != "timestep":
var_time[:] = [i * timestep * 60 for i in range(1, n_timesteps + 1)]
var_time.long_name = "forecast time"
startdate_str = datetime.strftime(startdate, "%Y-%m-%d %H:%M:%S")
var_time.units = "seconds since %s" % startdate_str
if incremental == "member" or n_ens_gt_one:
var_f = ncf.createVariable(
var_name,
np.float32,
dimensions=("ens_number", "time", "y", "x"),
zlib=True,
complevel=9,
)
else:
var_f = ncf.createVariable(
var_name, np.float32, dimensions=("time", "y", "x"), zlib=True, complevel=9
)
if var_standard_name is not None:
var_f.standard_name = var_standard_name
var_f.long_name = var_long_name
var_f.coordinates = "y x"
var_f.units = var_unit
if grid_mapping_var_name is not None:
var_f.grid_mapping = grid_mapping_var_name
exporter["method"] = "netcdf"
exporter["ncfile"] = ncf
exporter["var_F"] = var_f
if incremental == "member" or n_ens_gt_one:
exporter["var_ens_num"] = var_ens_num
exporter["var_time"] = var_time
exporter["var_name"] = var_name
exporter["startdate"] = startdate
exporter["timestep"] = timestep
exporter["metadata"] = metadata
exporter["incremental"] = incremental
exporter["num_timesteps"] = n_timesteps
exporter["num_ens_members"] = n_ens_members
exporter["shape"] = shape
return exporter
def export_forecast_dataset(field, exporter):
"""Write a forecast array into a file.
If the exporter was initialized with n_ens_members>1, the written dataset
has dimensions (n_ens_members,num_timesteps,shape[0],shape[1]), where shape
refers to the shape of the two-dimensional forecast grids. Otherwise, the
dimensions are (num_timesteps,shape[0],shape[1]). If the exporter was
initialized with incremental!=None, the array is appended to the existing
dataset either along the ensemble member or time axis.
Parameters
----------
exporter: dict
An exporter object created with any initialization method implemented
in :py:mod:`pysteps.io.exporters`.
field: array_like
The array to write. The required shape depends on the choice of the
'incremental' parameter the exporter was initialized with:
+-----------------+---------------------------------------------------+
| incremental | required shape |
+=================+===================================================+
| None | (num_ens_members,num_timesteps,shape[0],shape[1]) |
+-----------------+---------------------------------------------------+
| 'timestep' | (num_ens_members,shape[0],shape[1]) |
+-----------------+---------------------------------------------------+
| 'member' | (num_timesteps,shape[0],shape[1]) |
+-----------------+---------------------------------------------------+
If the exporter was initialized with num_ens_members=1,
the num_ens_members dimension is dropped.
"""
if exporter["method"] == "netcdf" and not NETCDF4_IMPORTED:
raise MissingOptionalDependency(
"netCDF4 package is required for netcdf "
"exporters but it is not installed"
)
if exporter["incremental"] is None:
if exporter["num_ens_members"] > 1:
shp = (
exporter["num_ens_members"],
exporter["num_timesteps"],
exporter["shape"][0],
exporter["shape"][1],
)
else:
shp = (
exporter["num_timesteps"],
exporter["shape"][0],
exporter["shape"][1],
)
if field.shape != shp:
raise ValueError(
"field has invalid shape: %s != %s" % (str(field.shape), str(shp))
)
elif exporter["incremental"] == "timestep":
if exporter["num_ens_members"] > 1:
shp = (
exporter["num_ens_members"],
exporter["shape"][0],
exporter["shape"][1],
)
else:
shp = exporter["shape"]
if field.shape != shp:
raise ValueError(
"field has invalid shape: %s != %s" % (str(field.shape), str(shp))
)
elif exporter["incremental"] == "member":
shp = (exporter["num_timesteps"], exporter["shape"][0], exporter["shape"][1])
if field.shape != shp:
raise ValueError(
"field has invalid shape: %s != %s" % (str(field.shape), str(shp))
)
if exporter["method"] == "geotiff":
_export_geotiff(field, exporter)
elif exporter["method"] == "netcdf":
_export_netcdf(field, exporter)
elif exporter["method"] == "kineros":
_export_kineros(field, exporter)
else:
raise ValueError("unknown exporter method %s" % exporter["method"])
def close_forecast_files(exporter):
"""Close the files associated with a forecast exporter.
Finish writing forecasts and close the output files opened by a forecast
exporter.
Parameters
----------
exporter: dict
An exporter object created with any initialization method implemented
in :py:mod:`pysteps.io.exporters`.
"""
if exporter["method"] == "geotiff":
pass # NOTE: There is no explicit "close" method in GDAL.
# The files are closed when all objects referencing to the GDAL
# datasets are deleted (i.e. when the exporter object is deleted).
if exporter["method"] == "kineros":
pass # no need to close the file
else:
exporter["ncfile"].close()
def _export_geotiff(F, exporter):
def init_band(band):
band.SetScale(1.0)
band.SetOffset(0.0)
band.SetUnitType(exporter["metadata"]["unit"])
if exporter["incremental"] is None:
for i in range(exporter["num_timesteps"]):
if exporter["num_ens_members"] == 1:
band = exporter["dst"][i].GetRasterBand(1)
init_band(band)
band.WriteArray(F[i, :, :])
else:
for j in range(exporter["num_ens_members"]):
band = exporter["dst"][i].GetRasterBand(j + 1)
init_band(band)
band.WriteArray(F[j, i, :, :])
elif exporter["incremental"] == "timestep":
i = exporter["num_files_written"]
outfn = _get_geotiff_filename(
exporter["outfnprefix"],
exporter["startdate"],
exporter["num_timesteps"],
exporter["timestep"],
i,
)
dst = _create_geotiff_file(
outfn,
exporter["driver"],
exporter["shape"],
exporter["metadata"],
exporter["num_ens_members"],
)
for j in range(exporter["num_ens_members"]):
band = dst.GetRasterBand(j + 1)
init_band(band)
if exporter["num_ens_members"] > 1:
band.WriteArray(F[j, :, :])
else:
band.WriteArray(F)
exporter["num_files_written"] += 1
elif exporter["incremental"] == "member":
for i in range(exporter["num_timesteps"]):
# NOTE: This does not work because the GeoTIFF driver does not
# support adding bands. An alternative solution needs to be
# implemented.
exporter["dst"][i].AddBand(gdal.GDT_Float32)
band = exporter["dst"][i].GetRasterBand(exporter["dst"][i].RasterCount)
init_band(band)
band.WriteArray(F[i, :, :])
def _export_kineros(field, exporter):
num_timesteps = exporter["num_timesteps"]
num_ens_members = exporter["num_ens_members"]
timestep = exporter["timestep"]
xgrid = exporter["XY_coords"][0, :, :].flatten()
ygrid = exporter["XY_coords"][1, :, :].flatten()
timemin = [(t + 1) * timestep for t in range(num_timesteps)]
if field.ndim == 3:
field = field.reshape((1,) + field.shape)
for n in range(num_ens_members):
file_name = exporter["ncfile"][n]
field_tmp = field[n, :, :, :].reshape((num_timesteps, -1))
if exporter["var_name"] == "Depth":
field_tmp = np.cumsum(field_tmp, axis=0)
with open(file_name, "a") as fd:
for m in range(field_tmp.shape[1]):
fd.writelines("BEGIN RG%03d\n" % (m + 1))
fd.writelines(" X = %.2f, Y = %.2f\n" % (xgrid[m], ygrid[m]))
fd.writelines(" N = %i\n" % num_timesteps)
fd.writelines(" TIME %s\n" % exporter["var_name"].upper())
fd.writelines("! (min) (%s)\n" % exporter["var_unit"])
for t in range(num_timesteps):
line_new = "{:6.1f} {:11.2f}\n".format(timemin[t], field_tmp[t, m])
fd.writelines(line_new)
fd.writelines("END\n\n")
def _export_netcdf(field, exporter):
var_f = exporter["var_F"]
if exporter["incremental"] is None:
var_f[:] = field
elif exporter["incremental"] == "timestep":
if exporter["num_ens_members"] > 1:
var_f[:, var_f.shape[1], :, :] = field
else:
var_f[var_f.shape[1], :, :] = field
var_time = exporter["var_time"]
var_time[len(var_time) - 1] = len(var_time) * exporter["timestep"] * 60
else:
var_f[var_f.shape[0], :, :, :] = field
var_ens_num = exporter["var_ens_num"]
var_ens_num[len(var_ens_num) - 1] = len(var_ens_num)
# TODO(exporters): Write methods for converting Proj.4 projection definitions
# into CF grid mapping attributes. Currently this has been implemented for
# the stereographic projection.
# The conversions implemented here are take from:
# https://github.com/cf-convention/cf-convention.github.io/blob/master/wkt-proj-4.md
def _convert_proj4_to_grid_mapping(proj4str):
tokens = proj4str.split("+")
d = {}
for t in tokens[1:]:
t = t.split("=")
if len(t) > 1:
d[t[0]] = t[1].strip()
params = {}
# TODO(exporters): implement more projection types here
if d["proj"] == "stere":
grid_mapping_var_name = "polar_stereographic"
grid_mapping_name = "polar_stereographic"
v = d["lon_0"] if d["lon_0"][-1] not in ["E", "W"] else d["lon_0"][:-1]
params["straight_vertical_longitude_from_pole"] = float(v)
v = d["lat_0"] if d["lat_0"][-1] not in ["N", "S"] else d["lat_0"][:-1]
params["latitude_of_projection_origin"] = float(v)
if "lat_ts" in list(d.keys()):
params["standard_parallel"] = float(d["lat_ts"])
elif "k_0" in list(d.keys()):
params["scale_factor_at_projection_origin"] = float(d["k_0"])
params["false_easting"] = float(d["x_0"])
params["false_northing"] = float(d["y_0"])
elif d["proj"] == "aea": # Albers Conical Equal Area
grid_mapping_var_name = "proj"
grid_mapping_name = "albers_conical_equal_area"
params["false_easting"] = float(d["x_0"]) if "x_0" in d else float(0)
params["false_northing"] = float(d["y_0"]) if "y_0" in d else float(0)
v = d["lon_0"] if "lon_0" in d else float(0)
params["longitude_of_central_meridian"] = float(v)
v = d["lat_0"] if "lat_0" in d else float(0)
params["latitude_of_projection_origin"] = float(v)
v1 = d["lat_1"] if "lat_1" in d else float(0)
v2 = d["lat_2"] if "lat_2" in d else float(0)
params["standard_parallel"] = (float(v1), float(v2))
else:
print("unknown projection", d["proj"])
return None, None, None
return grid_mapping_var_name, grid_mapping_name, params
def _create_geotiff_file(outfn, driver, shape, metadata, num_bands):
dst = driver.Create(
outfn,
shape[1],
shape[0],
num_bands,
gdal.GDT_Float32,
["COMPRESS=DEFLATE", "PREDICTOR=3"],
)
sx = (metadata["x2"] - metadata["x1"]) / shape[1]
sy = (metadata["y2"] - metadata["y1"]) / shape[0]
dst.SetGeoTransform([metadata["x1"], sx, 0.0, metadata["y2"], 0.0, -sy])
sr = osr.SpatialReference()
sr.ImportFromProj4(metadata["projection"])
dst.SetProjection(sr.ExportToWkt())
return dst
def _get_geotiff_filename(prefix, startdate, n_timesteps, timestep, timestep_index):
if n_timesteps * timestep == 0:
raise ValueError("n_timesteps x timestep can't be 0.")
timestep_format_str = (
f"{{time_str:0{int(np.floor(np.log10(n_timesteps * timestep))) + 1}d}}"
)
startdate_str = datetime.strftime(startdate, "%Y%m%d%H%M")
timestep_str = timestep_format_str.format(time_str=(timestep_index + 1) * timestep)
return f"{prefix}_{startdate_str}_{timestep_str}.tif"
| 35.667411 | 88 | 0.577821 |
8d20716986c50e1cfccaaecd88f6da1c17a05437 | 848 | py | Python | pytknvim/test_screen.py | timeyyy/pytknvim | d5fae053c3d39ded52afeb76b4d0d4d0e28ed20c | [
"0BSD"
] | 26 | 2016-05-23T22:40:14.000Z | 2021-11-23T00:46:17.000Z | pytknvim/test_screen.py | timeyyy/pytknvim | d5fae053c3d39ded52afeb76b4d0d4d0e28ed20c | [
"0BSD"
] | 12 | 2016-05-23T22:29:17.000Z | 2019-06-24T22:57:53.000Z | pytknvim/test_screen.py | timeyyy/pytknvim | d5fae053c3d39ded52afeb76b4d0d4d0e28ed20c | [
"0BSD"
] | 5 | 2016-05-23T23:18:44.000Z | 2016-07-05T08:46:55.000Z |
import pytest
from pytknvim.screen import DirtyScreen
from pytknvim.screen import Screen
dscreen = DirtyScreen()
def assrt(screen, *values):
assert list(dscreen.get()) == [*values]
def test_simple():
dscreen.reset()
dscreen.changed(1, 1, 1, 2)
assrt(dscreen, (1, 1, 1, 2))
def test_second_range_added_after():
dscreen.reset()
dscreen.changed(1, 1, 1, 2)
dscreen.changed(1, 3, 1, 5)
assrt(dscreen, (1, 1, 1, 2), (1, 3, 1, 5))
def test_second_range_added_touching_previous():
dscreen.reset()
dscreen.changed(1, 1, 1, 2)
dscreen.changed(1, 2, 1, 5)
assrt(dscreen, (1, 1, 1, 5))
def test_second_range_added_before():
dscreen.reset()
dscreen.changed(1, 5, 1, 6)
dscreen.changed(1, 2, 1, 3)
assrt(dscreen, (1, 5, 1, 6), (1, 2, 1, 3))
# screen = Screen()
# def test_iter_works():
| 21.74359 | 48 | 0.636792 |
1680dee33afef206e0805476f390969bb16368e3 | 4,928 | py | Python | test2.py | FCChinen/lab2 | b04b4e0788678e4de2365b74db505ef1a72b7103 | [
"MIT"
] | 1 | 2020-05-31T20:58:38.000Z | 2020-05-31T20:58:38.000Z | test2.py | FCChinen/lab2 | b04b4e0788678e4de2365b74db505ef1a72b7103 | [
"MIT"
] | null | null | null | test2.py | FCChinen/lab2 | b04b4e0788678e4de2365b74db505ef1a72b7103 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import random
from collections import OrderedDict
from mytcputils import *
from mytcp import Servidor
class CamadaRede:
def __init__(self):
self.callback = None
self.fila = []
def registrar_recebedor(self, callback):
self.callback = callback
def enviar(self, segmento, dest_addr):
self.fila.append((segmento, dest_addr))
seq_list = []
ack_list = OrderedDict()
esperado = b''
recebido = b''
def dados_recebidos(conexao, dados):
global recebido
recebido += dados
def conexao_aceita(conexao):
conexao.registrar_recebedor(dados_recebidos)
rede = CamadaRede()
dst_port = random.randint(10, 1023)
servidor = Servidor(rede, dst_port)
servidor.registrar_monitor_de_conexoes_aceitas(conexao_aceita)
src_port = random.randint(1024, 0xffff)
seq_no = random.randint(0, 0xffff)
src_addr, dst_addr = '172.16.%d.%d'%(random.randint(1, 10), random.randint(0,255)), '172.16.%d.%d'%(random.randint(11, 20), random.randint(0, 255))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, 0, FLAGS_SYN), src_addr, dst_addr))
segmento, _ = rede.fila[0]
_, _, ack_no, ack, flags, _, _, _ = read_header(segmento)
assert 4*(flags>>12) == len(segmento), 'O SYN+ACK não deveria ter payload'
assert (flags & FLAGS_ACK) == FLAGS_ACK
ack_list[ack] = None
rede.fila.clear()
seq_no += 1
ack_no += 1
payload = os.urandom(random.randint(16, MSS))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no+random.randint(1, 128), ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
payload = os.urandom(random.randint(4, MSS))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no+random.randint(1, 128), ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
payload = b'ola'
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
seq_list.append(seq_no)
seq_no += len(payload)
esperado += payload
payload = b', '
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no-3, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
seq_list.append(seq_no)
seq_no += len(payload)
esperado += payload
payload = b'mundo'
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no-random.randint(1, 128), ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no-2, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no+2, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no+random.randint(1, 128), ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
seq_list.append(seq_no)
seq_no += len(payload)
esperado += payload
print('esperado: %r' % esperado)
print('recebido: %r' % recebido)
assert esperado == recebido
payload = os.urandom(random.randint(16, MSS))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no+random.randint(1, 128), ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
payload = os.urandom(MSS)
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no+random.randint(1, 128), ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
payload = os.urandom(random.randint(16, MSS))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
seq_list.append(seq_no)
seq_no += len(payload)
esperado += payload
payload = os.urandom(MSS)
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
seq_list.append(seq_no)
seq_no += len(payload)
seq_list.append(seq_no)
esperado += payload
assert esperado == recebido
for segmento, _ in rede.fila:
_, _, _, ack, flags, _, _, _ = read_header(segmento)
assert 4*(flags>>12) == len(segmento), 'Este teste não gera envios: não deveria haver payloads'
if (flags & FLAGS_ACK) == FLAGS_ACK:
ack_list[ack] = None
ack_list = list(ack_list.keys())
print('ACKs esperados: %r' % seq_list) # AQUI
print('ACKs recebidos: %r' % ack_list)
assert seq_list == ack_list
| 44.396396 | 160 | 0.755885 |
076bf76150dcf7d5313876f896e7365330ae5d99 | 2,118 | py | Python | txstatsd/tests/metrics/test_metermetric.py | drawks/txstatsd | da674d7a86e0e5ec40eaa53fe81310ef19d1ed9e | [
"MIT"
] | null | null | null | txstatsd/tests/metrics/test_metermetric.py | drawks/txstatsd | da674d7a86e0e5ec40eaa53fe81310ef19d1ed9e | [
"MIT"
] | 1 | 2020-07-10T23:35:49.000Z | 2020-07-10T23:35:49.000Z | txstatsd/tests/metrics/test_metermetric.py | drawks/txstatsd | da674d7a86e0e5ec40eaa53fe81310ef19d1ed9e | [
"MIT"
] | 1 | 2020-07-13T05:31:58.000Z | 2020-07-13T05:31:58.000Z | # Copyright (C) 2011-2012 Canonical Services Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import random
from twisted.trial.unittest import TestCase
from txstatsd.metrics.metermetric import MeterMetricReporter
class TestDeriveMetricReporter(TestCase):
def test_fastpoll(self):
wall_time = 42
reporter = MeterMetricReporter(
"test", wall_time_func=lambda: wall_time)
self.assertEquals([], reporter.report(wall_time))
def test_interface(self):
random.seed(1)
wall_time = [0]
reporter = MeterMetricReporter("test", prefix="some.prefix",
wall_time_func=lambda: wall_time[0])
reporter.mark(42)
reporter.mark(60)
reporter.mark(38)
wall_time = [10]
reported = reporter.report(10)
self.assertEqual(2, len(reported))
self.assertEqual(140, reported[0][1])
self.assertEqual(14, reported[1][1])
self.assertEquals(
['some.prefix.test.count', 'some.prefix.test.rate'],
[reported[0][0], reported[1][0]])
| 38.509091 | 75 | 0.702077 |
2153968cebda82058e392fd9abbff6e95e5da2a4 | 68,670 | py | Python | discord/ext/commands/core.py | TurnrDev/discord.py | 4203e7a5a830ee2a05e9642ef83721d05c9e21f6 | [
"MIT"
] | null | null | null | discord/ext/commands/core.py | TurnrDev/discord.py | 4203e7a5a830ee2a05e9642ef83721d05c9e21f6 | [
"MIT"
] | null | null | null | discord/ext/commands/core.py | TurnrDev/discord.py | 4203e7a5a830ee2a05e9642ef83721d05c9e21f6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2020 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import functools
import inspect
import typing
import datetime
import discord
from .errors import *
from .cooldowns import Cooldown, BucketType, CooldownMapping, MaxConcurrency
from . import converter as converters
from ._types import _BaseCommand
from .cog import Cog
__all__ = (
'Command',
'Group',
'GroupMixin',
'command',
'group',
'has_role',
'has_permissions',
'has_any_role',
'check',
'check_any',
'before_invoke',
'after_invoke',
'bot_has_role',
'bot_has_permissions',
'bot_has_any_role',
'cooldown',
'max_concurrency',
'dm_only',
'guild_only',
'is_owner',
'is_nsfw',
'has_guild_permissions',
'bot_has_guild_permissions'
)
def wrap_callback(coro):
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
try:
ret = await coro(*args, **kwargs)
except CommandError:
raise
except asyncio.CancelledError:
return
except Exception as exc:
raise CommandInvokeError(exc) from exc
return ret
return wrapped
def hooked_wrapped_callback(command, ctx, coro):
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
try:
ret = await coro(*args, **kwargs)
except CommandError:
ctx.command_failed = True
raise
except asyncio.CancelledError:
ctx.command_failed = True
return
except Exception as exc:
ctx.command_failed = True
raise CommandInvokeError(exc) from exc
finally:
if command._max_concurrency is not None:
await command._max_concurrency.release(ctx)
await command.call_after_hooks(ctx)
return ret
return wrapped
def _convert_to_bool(argument):
lowered = argument.lower()
if lowered in ('yes', 'y', 'true', 't', '1', 'enable', 'on'):
return True
elif lowered in ('no', 'n', 'false', 'f', '0', 'disable', 'off'):
return False
else:
raise BadArgument(lowered + ' is not a recognised boolean option')
class _CaseInsensitiveDict(dict):
def __contains__(self, k):
return super().__contains__(k.casefold())
def __delitem__(self, k):
return super().__delitem__(k.casefold())
def __getitem__(self, k):
return super().__getitem__(k.casefold())
def get(self, k, default=None):
return super().get(k.casefold(), default)
def pop(self, k, default=None):
return super().pop(k.casefold(), default)
def __setitem__(self, k, v):
super().__setitem__(k.casefold(), v)
class Command(_BaseCommand):
r"""A class that implements the protocol for a bot text command.
These are not created manually, instead they are created via the
decorator or functional interface.
Attributes
-----------
name: :class:`str`
The name of the command.
callback: :ref:`coroutine <coroutine>`
The coroutine that is executed when the command is called.
help: :class:`str`
The long help text for the command.
brief: Optional[:class:`str`]
The short help text for the command.
usage: :class:`str`
A replacement for arguments in the default help text.
aliases: Union[List[:class:`str`], Tuple[:class:`str`]]
The list of aliases the command can be invoked under.
enabled: :class:`bool`
A boolean that indicates if the command is currently enabled.
If the command is invoked while it is disabled, then
:exc:`.DisabledCommand` is raised to the :func:`.on_command_error`
event. Defaults to ``True``.
parent: Optional[:class:`Command`]
The parent command that this command belongs to. ``None`` if there
isn't one.
cog: Optional[:class:`Cog`]
The cog that this command belongs to. ``None`` if there isn't one.
checks: List[Callable[..., :class:`bool`]]
A list of predicates that verifies if the command could be executed
with the given :class:`.Context` as the sole parameter. If an exception
is necessary to be thrown to signal failure, then one inherited from
:exc:`.CommandError` should be used. Note that if the checks fail then
:exc:`.CheckFailure` exception is raised to the :func:`.on_command_error`
event.
description: :class:`str`
The message prefixed into the default help command.
hidden: :class:`bool`
If ``True``\, the default help command does not show this in the
help output.
rest_is_raw: :class:`bool`
If ``False`` and a keyword-only argument is provided then the keyword
only argument is stripped and handled as if it was a regular argument
that handles :exc:`.MissingRequiredArgument` and default values in a
regular matter rather than passing the rest completely raw. If ``True``
then the keyword-only argument will pass in the rest of the arguments
in a completely raw matter. Defaults to ``False``.
invoked_subcommand: Optional[:class:`Command`]
The subcommand that was invoked, if any.
ignore_extra: :class:`bool`
If ``True``\, ignores extraneous strings passed to a command if all its
requirements are met (e.g. ``?foo a b c`` when only expecting ``a``
and ``b``). Otherwise :func:`.on_command_error` and local error handlers
are called with :exc:`.TooManyArguments`. Defaults to ``True``.
cooldown_after_parsing: :class:`bool`
If ``True``\, cooldown processing is done after argument parsing,
which calls converters. If ``False`` then cooldown processing is done
first and then the converters are called second. Defaults to ``False``.
"""
def __new__(cls, *args, **kwargs):
# if you're wondering why this is done, it's because we need to ensure
# we have a complete original copy of **kwargs even for classes that
# mess with it by popping before delegating to the subclass __init__.
# In order to do this, we need to control the instance creation and
# inject the original kwargs through __new__ rather than doing it
# inside __init__.
self = super().__new__(cls)
# we do a shallow copy because it's probably the most common use case.
# this could potentially break if someone modifies a list or something
# while it's in movement, but for now this is the cheapest and
# fastest way to do what we want.
self.__original_kwargs__ = kwargs.copy()
return self
def __init__(self, func, **kwargs):
if not asyncio.iscoroutinefunction(func):
raise TypeError('Callback must be a coroutine.')
self.name = name = kwargs.get('name') or func.__name__
if not isinstance(name, str):
raise TypeError('Name of a command must be a string.')
self.callback = func
self.enabled = kwargs.get('enabled', True)
help_doc = kwargs.get('help')
if help_doc is not None:
help_doc = inspect.cleandoc(help_doc)
else:
help_doc = inspect.getdoc(func)
if isinstance(help_doc, bytes):
help_doc = help_doc.decode('utf-8')
self.help = help_doc
self.brief = kwargs.get('brief')
self.usage = kwargs.get('usage')
self.rest_is_raw = kwargs.get('rest_is_raw', False)
self.aliases = kwargs.get('aliases', [])
if not isinstance(self.aliases, (list, tuple)):
raise TypeError("Aliases of a command must be a list or a tuple of strings.")
self.description = inspect.cleandoc(kwargs.get('description', ''))
self.hidden = kwargs.get('hidden', False)
try:
checks = func.__commands_checks__
checks.reverse()
except AttributeError:
checks = kwargs.get('checks', [])
finally:
self.checks = checks
try:
cooldown = func.__commands_cooldown__
except AttributeError:
cooldown = kwargs.get('cooldown')
finally:
self._buckets = CooldownMapping(cooldown)
try:
max_concurrency = func.__commands_max_concurrency__
except AttributeError:
max_concurrency = kwargs.get('max_concurrency')
finally:
self._max_concurrency = max_concurrency
self.ignore_extra = kwargs.get('ignore_extra', True)
self.cooldown_after_parsing = kwargs.get('cooldown_after_parsing', False)
self.cog = None
# bandaid for the fact that sometimes parent can be the bot instance
parent = kwargs.get('parent')
self.parent = parent if isinstance(parent, _BaseCommand) else None
try:
before_invoke = func.__before_invoke__
except AttributeError:
self._before_invoke = None
else:
self.before_invoke(before_invoke)
try:
after_invoke = func.__after_invoke__
except AttributeError:
self._after_invoke = None
else:
self.after_invoke(after_invoke)
@property
def callback(self):
return self._callback
@callback.setter
def callback(self, function):
self._callback = function
self.module = function.__module__
signature = inspect.signature(function)
self.params = signature.parameters.copy()
# PEP-563 allows postponing evaluation of annotations with a __future__
# import. When postponed, Parameter.annotation will be a string and must
# be replaced with the real value for the converters to work later on
for key, value in self.params.items():
if isinstance(value.annotation, str):
self.params[key] = value = value.replace(annotation=eval(value.annotation, function.__globals__))
# fail early for when someone passes an unparameterized Greedy type
if value.annotation is converters.Greedy:
raise TypeError('Unparameterized Greedy[...] is disallowed in signature.')
def add_check(self, func):
"""Adds a check to the command.
This is the non-decorator interface to :func:`.check`.
.. versionadded:: 1.3
Parameters
-----------
func
The function that will be used as a check.
"""
self.checks.append(func)
def remove_check(self, func):
"""Removes a check from the command.
This function is idempotent and will not raise an exception
if the function is not in the command's checks.
.. versionadded:: 1.3
Parameters
-----------
func
The function to remove from the checks.
"""
try:
self.checks.remove(func)
except ValueError:
pass
def update(self, **kwargs):
"""Updates :class:`Command` instance with updated attribute.
This works similarly to the :func:`.command` decorator in terms
of parameters in that they are passed to the :class:`Command` or
subclass constructors, sans the name and callback.
"""
self.__init__(self.callback, **dict(self.__original_kwargs__, **kwargs))
async def __call__(self, *args, **kwargs):
"""|coro|
Calls the internal callback that the command holds.
.. note::
This bypasses all mechanisms -- including checks, converters,
invoke hooks, cooldowns, etc. You must take care to pass
the proper arguments and types to this function.
.. versionadded:: 1.3
"""
if self.cog is not None:
return await self.callback(self.cog, *args, **kwargs)
else:
return await self.callback(*args, **kwargs)
def _ensure_assignment_on_copy(self, other):
other._before_invoke = self._before_invoke
other._after_invoke = self._after_invoke
if self.checks != other.checks:
other.checks = self.checks.copy()
if self._buckets.valid and not other._buckets.valid:
other._buckets = self._buckets.copy()
if self._max_concurrency != other._max_concurrency:
other._max_concurrency = self._max_concurrency.copy()
try:
other.on_error = self.on_error
except AttributeError:
pass
return other
def copy(self):
"""Creates a copy of this command.
Returns
--------
:class:`Command`
A new instance of this command.
"""
ret = self.__class__(self.callback, **self.__original_kwargs__)
return self._ensure_assignment_on_copy(ret)
def _update_copy(self, kwargs):
if kwargs:
kw = kwargs.copy()
kw.update(self.__original_kwargs__)
copy = self.__class__(self.callback, **kw)
return self._ensure_assignment_on_copy(copy)
else:
return self.copy()
async def dispatch_error(self, ctx, error):
ctx.command_failed = True
cog = self.cog
try:
coro = self.on_error
except AttributeError:
pass
else:
injected = wrap_callback(coro)
if cog is not None:
await injected(cog, ctx, error)
else:
await injected(ctx, error)
try:
if cog is not None:
local = Cog._get_overridden_method(cog.cog_command_error)
if local is not None:
wrapped = wrap_callback(local)
await wrapped(ctx, error)
finally:
ctx.bot.dispatch('command_error', ctx, error)
async def _actual_conversion(self, ctx, converter, argument, param):
if converter is bool:
return _convert_to_bool(argument)
try:
module = converter.__module__
except AttributeError:
pass
else:
if module is not None and (module.startswith('discord.') and not module.endswith('converter')):
converter = getattr(converters, converter.__name__ + 'Converter', converter)
try:
if inspect.isclass(converter):
if issubclass(converter, converters.Converter):
instance = converter()
ret = await instance.convert(ctx, argument)
return ret
else:
method = getattr(converter, 'convert', None)
if method is not None and inspect.ismethod(method):
ret = await method(ctx, argument)
return ret
elif isinstance(converter, converters.Converter):
ret = await converter.convert(ctx, argument)
return ret
except CommandError:
raise
except Exception as exc:
raise ConversionError(converter, exc) from exc
try:
return converter(argument)
except CommandError:
raise
except Exception as exc:
try:
name = converter.__name__
except AttributeError:
name = converter.__class__.__name__
raise BadArgument('Converting to "{}" failed for parameter "{}".'.format(name, param.name)) from exc
async def do_conversion(self, ctx, converter, argument, param):
try:
origin = converter.__origin__
except AttributeError:
pass
else:
if origin is typing.Union:
errors = []
_NoneType = type(None)
for conv in converter.__args__:
# if we got to this part in the code, then the previous conversions have failed
# so we should just undo the view, return the default, and allow parsing to continue
# with the other parameters
if conv is _NoneType and param.kind != param.VAR_POSITIONAL:
ctx.view.undo()
return None if param.default is param.empty else param.default
try:
value = await self._actual_conversion(ctx, conv, argument, param)
except CommandError as exc:
errors.append(exc)
else:
return value
# if we're here, then we failed all the converters
raise BadUnionArgument(param, converter.__args__, errors)
return await self._actual_conversion(ctx, converter, argument, param)
def _get_converter(self, param):
converter = param.annotation
if converter is param.empty:
if param.default is not param.empty:
converter = str if param.default is None else type(param.default)
else:
converter = str
return converter
async def transform(self, ctx, param):
required = param.default is param.empty
converter = self._get_converter(param)
consume_rest_is_special = param.kind == param.KEYWORD_ONLY and not self.rest_is_raw
view = ctx.view
view.skip_ws()
# The greedy converter is simple -- it keeps going until it fails in which case,
# it undos the view ready for the next parameter to use instead
if type(converter) is converters._Greedy:
if param.kind == param.POSITIONAL_OR_KEYWORD:
return await self._transform_greedy_pos(ctx, param, required, converter.converter)
elif param.kind == param.VAR_POSITIONAL:
return await self._transform_greedy_var_pos(ctx, param, converter.converter)
else:
# if we're here, then it's a KEYWORD_ONLY param type
# since this is mostly useless, we'll helpfully transform Greedy[X]
# into just X and do the parsing that way.
converter = converter.converter
if view.eof:
if param.kind == param.VAR_POSITIONAL:
raise RuntimeError() # break the loop
if required:
if self._is_typing_optional(param.annotation):
return None
raise MissingRequiredArgument(param)
return param.default
previous = view.index
if consume_rest_is_special:
argument = view.read_rest().strip()
else:
argument = view.get_quoted_word()
view.previous = previous
return await self.do_conversion(ctx, converter, argument, param)
async def _transform_greedy_pos(self, ctx, param, required, converter):
view = ctx.view
result = []
while not view.eof:
# for use with a manual undo
previous = view.index
view.skip_ws()
try:
argument = view.get_quoted_word()
value = await self.do_conversion(ctx, converter, argument, param)
except (CommandError, ArgumentParsingError):
view.index = previous
break
else:
result.append(value)
if not result and not required:
return param.default
return result
async def _transform_greedy_var_pos(self, ctx, param, converter):
view = ctx.view
previous = view.index
try:
argument = view.get_quoted_word()
value = await self.do_conversion(ctx, converter, argument, param)
except (CommandError, ArgumentParsingError):
view.index = previous
raise RuntimeError() from None # break loop
else:
return value
@property
def clean_params(self):
"""OrderedDict[:class:`str`, :class:`inspect.Parameter`]:
Retrieves the parameter OrderedDict without the context or self parameters.
Useful for inspecting signature.
"""
result = self.params.copy()
if self.cog is not None:
# first parameter is self
result.popitem(last=False)
try:
# first/second parameter is context
result.popitem(last=False)
except Exception:
raise ValueError('Missing context parameter') from None
return result
@property
def full_parent_name(self):
""":class:`str`: Retrieves the fully qualified parent command name.
This the base command name required to execute it. For example,
in ``?one two three`` the parent name would be ``one two``.
"""
entries = []
command = self
while command.parent is not None:
command = command.parent
entries.append(command.name)
return ' '.join(reversed(entries))
@property
def parents(self):
"""List[:class:`Command`]: Retrieves the parents of this command.
If the command has no parents then it returns an empty :class:`list`.
For example in commands ``?a b c test``, the parents are ``[c, b, a]``.
.. versionadded:: 1.1
"""
entries = []
command = self
while command.parent is not None:
command = command.parent
entries.append(command)
return entries
@property
def root_parent(self):
"""Optional[:class:`Command`]: Retrieves the root parent of this command.
If the command has no parents then it returns ``None``.
For example in commands ``?a b c test``, the root parent is ``a``.
"""
if not self.parent:
return None
return self.parents[-1]
@property
def qualified_name(self):
""":class:`str`: Retrieves the fully qualified command name.
This is the full parent name with the command name as well.
For example, in ``?one two three`` the qualified name would be
``one two three``.
"""
parent = self.full_parent_name
if parent:
return parent + ' ' + self.name
else:
return self.name
def __str__(self):
return self.qualified_name
async def _parse_arguments(self, ctx):
ctx.args = [ctx] if self.cog is None else [self.cog, ctx]
ctx.kwargs = {}
args = ctx.args
kwargs = ctx.kwargs
view = ctx.view
iterator = iter(self.params.items())
if self.cog is not None:
# we have 'self' as the first parameter so just advance
# the iterator and resume parsing
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "self" parameter.'
raise discord.ClientException(fmt.format(self))
# next we have the 'ctx' as the next parameter
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "ctx" parameter.'
raise discord.ClientException(fmt.format(self))
for name, param in iterator:
if param.kind == param.POSITIONAL_OR_KEYWORD:
transformed = await self.transform(ctx, param)
args.append(transformed)
elif param.kind == param.KEYWORD_ONLY:
# kwarg only param denotes "consume rest" semantics
if self.rest_is_raw:
converter = self._get_converter(param)
argument = view.read_rest()
kwargs[name] = await self.do_conversion(ctx, converter, argument, param)
else:
kwargs[name] = await self.transform(ctx, param)
break
elif param.kind == param.VAR_POSITIONAL:
while not view.eof:
try:
transformed = await self.transform(ctx, param)
args.append(transformed)
except RuntimeError:
break
if not self.ignore_extra:
if not view.eof:
raise TooManyArguments('Too many arguments passed to ' + self.qualified_name)
async def call_before_hooks(self, ctx):
# now that we're done preparing we can call the pre-command hooks
# first, call the command local hook:
cog = self.cog
if self._before_invoke is not None:
try:
instance = self._before_invoke.__self__
# should be cog if @commands.before_invoke is used
except AttributeError:
# __self__ only exists for methods, not functions
# however, if @command.before_invoke is used, it will be a function
if self.cog:
await self._before_invoke(cog, ctx)
else:
await self._before_invoke(ctx)
else:
await self._before_invoke(instance, ctx)
# call the cog local hook if applicable:
if cog is not None:
hook = Cog._get_overridden_method(cog.cog_before_invoke)
if hook is not None:
await hook(ctx)
# call the bot global hook if necessary
hook = ctx.bot._before_invoke
if hook is not None:
await hook(ctx)
async def call_after_hooks(self, ctx):
cog = self.cog
if self._after_invoke is not None:
try:
instance = self._after_invoke.__self__
except AttributeError:
if self.cog:
await self._after_invoke(cog, ctx)
else:
await self._after_invoke(ctx)
else:
await self._after_invoke(instance, ctx)
# call the cog local hook if applicable:
if cog is not None:
hook = Cog._get_overridden_method(cog.cog_after_invoke)
if hook is not None:
await hook(ctx)
hook = ctx.bot._after_invoke
if hook is not None:
await hook(ctx)
def _prepare_cooldowns(self, ctx):
if self._buckets.valid:
dt = ctx.message.edited_at or ctx.message.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
bucket = self._buckets.get_bucket(ctx.message, current)
retry_after = bucket.update_rate_limit(current)
if retry_after:
raise CommandOnCooldown(bucket, retry_after)
async def prepare(self, ctx):
ctx.command = self
if not await self.can_run(ctx):
raise CheckFailure('The check functions for command {0.qualified_name} failed.'.format(self))
if self.cooldown_after_parsing:
await self._parse_arguments(ctx)
self._prepare_cooldowns(ctx)
else:
self._prepare_cooldowns(ctx)
await self._parse_arguments(ctx)
if self._max_concurrency is not None:
await self._max_concurrency.acquire(ctx)
await self.call_before_hooks(ctx)
def is_on_cooldown(self, ctx):
"""Checks whether the command is currently on cooldown.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to use when checking the commands cooldown status.
Returns
--------
:class:`bool`
A boolean indicating if the command is on cooldown.
"""
if not self._buckets.valid:
return False
bucket = self._buckets.get_bucket(ctx.message)
dt = ctx.message.edited_at or ctx.message.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_tokens(current) == 0
def reset_cooldown(self, ctx):
"""Resets the cooldown on this command.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to reset the cooldown under.
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(ctx.message)
bucket.reset()
def get_cooldown_retry_after(self, ctx):
"""Retrieves the amount of seconds before this command can be tried again.
.. versionadded:: 1.4
Parameters
-----------
ctx: :class:`.Context`
The invocation context to retrieve the cooldown from.
Returns
--------
:class:`float`
The amount of time left on this command's cooldown in seconds.
If this is ``0.0`` then the command isn't on cooldown.
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(ctx.message)
dt = ctx.message.edited_at or ctx.message.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_retry_after(current)
return 0.0
async def invoke(self, ctx):
await self.prepare(ctx)
# terminate the invoked_subcommand chain.
# since we're in a regular command (and not a group) then
# the invoked subcommand is None.
ctx.invoked_subcommand = None
ctx.subcommand_passed = None
injected = hooked_wrapped_callback(self, ctx, self.callback)
await injected(*ctx.args, **ctx.kwargs)
async def reinvoke(self, ctx, *, call_hooks=False):
ctx.command = self
await self._parse_arguments(ctx)
if call_hooks:
await self.call_before_hooks(ctx)
ctx.invoked_subcommand = None
try:
await self.callback(*ctx.args, **ctx.kwargs)
except:
ctx.command_failed = True
raise
finally:
if call_hooks:
await self.call_after_hooks(ctx)
def error(self, coro):
"""A decorator that registers a coroutine as a local error handler.
A local error handler is an :func:`.on_command_error` event limited to
a single command. However, the :func:`.on_command_error` is still
invoked afterwards as the catch-all.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the local error handler.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The error handler must be a coroutine.')
self.on_error = coro
return coro
def before_invoke(self, coro):
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.before_invoke` for more info.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro):
"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.after_invoke` for more info.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
@property
def cog_name(self):
"""Optional[:class:`str`]: The name of the cog this command belongs to, if any."""
return type(self.cog).__cog_name__ if self.cog is not None else None
@property
def short_doc(self):
""":class:`str`: Gets the "short" documentation of a command.
By default, this is the :attr:`brief` attribute.
If that lookup leads to an empty string then the first line of the
:attr:`help` attribute is used instead.
"""
if self.brief is not None:
return self.brief
if self.help is not None:
return self.help.split('\n', 1)[0]
return ''
def _is_typing_optional(self, annotation):
try:
origin = annotation.__origin__
except AttributeError:
return False
if origin is not typing.Union:
return False
return annotation.__args__[-1] is type(None)
@property
def signature(self):
""":class:`str`: Returns a POSIX-like signature useful for help command output."""
if self.usage is not None:
return self.usage
params = self.clean_params
if not params:
return ''
result = []
for name, param in params.items():
greedy = isinstance(param.annotation, converters._Greedy)
if param.default is not param.empty:
# We don't want None or '' to trigger the [name=value] case and instead it should
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append('[%s=%s]' % (name, param.default) if not greedy else
'[%s=%s]...' % (name, param.default))
continue
else:
result.append('[%s]' % name)
elif param.kind == param.VAR_POSITIONAL:
result.append('[%s...]' % name)
elif greedy:
result.append('[%s]...' % name)
elif self._is_typing_optional(param.annotation):
result.append('[%s]' % name)
else:
result.append('<%s>' % name)
return ' '.join(result)
async def can_run(self, ctx):
"""|coro|
Checks if the command can be executed by checking all the predicates
inside the :attr:`.checks` attribute. This also checks whether the
command is disabled.
.. versionchanged:: 1.3
Checks whether the command is disabled or not
Parameters
-----------
ctx: :class:`.Context`
The ctx of the command currently being invoked.
Raises
-------
:class:`CommandError`
Any command error that was raised during a check call will be propagated
by this function.
Returns
--------
:class:`bool`
A boolean indicating if the command can be invoked.
"""
if not self.enabled:
raise DisabledCommand('{0.name} command is disabled'.format(self))
original = ctx.command
ctx.command = self
try:
if not await ctx.bot.can_run(ctx):
raise CheckFailure('The global check functions for command {0.qualified_name} failed.'.format(self))
cog = self.cog
if cog is not None:
local_check = Cog._get_overridden_method(cog.cog_check)
if local_check is not None:
ret = await discord.utils.maybe_coroutine(local_check, ctx)
if not ret:
return False
predicates = self.checks
if not predicates:
# since we have no checks, then we just return True.
return True
return await discord.utils.async_all(predicate(ctx) for predicate in predicates)
finally:
ctx.command = original
class GroupMixin:
"""A mixin that implements common functionality for classes that behave
similar to :class:`.Group` and are allowed to register commands.
Attributes
-----------
all_commands: :class:`dict`
A mapping of command name to :class:`.Command`
objects.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``.
"""
def __init__(self, *args, **kwargs):
case_insensitive = kwargs.get('case_insensitive', False)
self.all_commands = _CaseInsensitiveDict() if case_insensitive else {}
self.case_insensitive = case_insensitive
super().__init__(*args, **kwargs)
@property
def commands(self):
"""Set[:class:`.Command`]: A unique set of commands without aliases that are registered."""
return set(self.all_commands.values())
def recursively_remove_all_commands(self):
for command in self.all_commands.copy().values():
if isinstance(command, GroupMixin):
command.recursively_remove_all_commands()
self.remove_command(command.name)
def add_command(self, command):
"""Adds a :class:`.Command` into the internal list of commands.
This is usually not called, instead the :meth:`~.GroupMixin.command` or
:meth:`~.GroupMixin.group` shortcut decorators are used instead.
.. versionchanged:: 1.4
Raise :exc:`.CommandRegistrationError` instead of generic :exc:`.ClientException`
Parameters
-----------
command: :class:`Command`
The command to add.
Raises
-------
:exc:`.CommandRegistrationError`
If the command or its alias is already registered by different command.
TypeError
If the command passed is not a subclass of :class:`.Command`.
"""
if not isinstance(command, Command):
raise TypeError('The command passed must be a subclass of Command')
if isinstance(self, Command):
command.parent = self
if command.name in self.all_commands:
raise CommandRegistrationError(command.name)
self.all_commands[command.name] = command
for alias in command.aliases:
if alias in self.all_commands:
raise CommandRegistrationError(alias, alias_conflict=True)
self.all_commands[alias] = command
def remove_command(self, name):
"""Remove a :class:`.Command` from the internal list
of commands.
This could also be used as a way to remove aliases.
Parameters
-----------
name: :class:`str`
The name of the command to remove.
Returns
--------
Optional[:class:`.Command`]
The command that was removed. If the name is not valid then
``None`` is returned instead.
"""
command = self.all_commands.pop(name, None)
# does not exist
if command is None:
return None
if name in command.aliases:
# we're removing an alias so we don't want to remove the rest
return command
# we're not removing the alias so let's delete the rest of them.
for alias in command.aliases:
self.all_commands.pop(alias, None)
return command
def walk_commands(self):
"""An iterator that recursively walks through all commands and subcommands.
.. versionchanged:: 1.4
Duplicates due to aliases are no longer returned
"""
for command in self.commands:
yield command
if isinstance(command, GroupMixin):
yield from command.walk_commands()
def get_command(self, name):
"""Get a :class:`.Command` from the internal list
of commands.
This could also be used as a way to get aliases.
The name could be fully qualified (e.g. ``'foo bar'``) will get
the subcommand ``bar`` of the group command ``foo``. If a
subcommand is not found then ``None`` is returned just as usual.
Parameters
-----------
name: :class:`str`
The name of the command to get.
Returns
--------
Optional[:class:`Command`]
The command that was requested. If not found, returns ``None``.
"""
# fast path, no space in name.
if ' ' not in name:
return self.all_commands.get(name)
names = name.split()
if not names:
return None
obj = self.all_commands.get(names[0])
if not isinstance(obj, GroupMixin):
return obj
for name in names[1:]:
try:
obj = obj.all_commands[name]
except (AttributeError, KeyError):
return None
return obj
def command(self, *args, **kwargs):
"""A shortcut decorator that invokes :func:`.command` and adds it to
the internal command list via :meth:`~.GroupMixin.add_command`.
"""
def decorator(func):
kwargs.setdefault('parent', self)
result = command(*args, **kwargs)(func)
self.add_command(result)
return result
return decorator
def group(self, *args, **kwargs):
"""A shortcut decorator that invokes :func:`.group` and adds it to
the internal command list via :meth:`~.GroupMixin.add_command`.
"""
def decorator(func):
kwargs.setdefault('parent', self)
result = group(*args, **kwargs)(func)
self.add_command(result)
return result
return decorator
class Group(GroupMixin, Command):
"""A class that implements a grouping protocol for commands to be
executed as subcommands.
This class is a subclass of :class:`.Command` and thus all options
valid in :class:`.Command` are valid in here as well.
Attributes
-----------
invoke_without_command: :class:`bool`
Indicates if the group callback should begin parsing and
invocation only if no subcommand was found. Useful for
making it an error handling function to tell the user that
no subcommand was found or to have different functionality
in case no subcommand was found. If this is ``False``, then
the group callback will always be invoked first. This means
that the checks and the parsing dictated by its parameters
will be executed. Defaults to ``False``.
case_insensitive: :class:`bool`
Indicates if the group's commands should be case insensitive.
Defaults to ``False``.
"""
def __init__(self, *args, **attrs):
self.invoke_without_command = attrs.pop('invoke_without_command', False)
super().__init__(*args, **attrs)
def copy(self):
"""Creates a copy of this :class:`Group`.
Returns
--------
:class:`Group`
A new instance of this group.
"""
ret = super().copy()
for cmd in self.commands:
ret.add_command(cmd.copy())
return ret
async def invoke(self, ctx):
ctx.invoked_subcommand = None
ctx.subcommand_passed = None
early_invoke = not self.invoke_without_command
if early_invoke:
await self.prepare(ctx)
view = ctx.view
previous = view.index
view.skip_ws()
trigger = view.get_word()
if trigger:
ctx.subcommand_passed = trigger
ctx.invoked_subcommand = self.all_commands.get(trigger, None)
if early_invoke:
injected = hooked_wrapped_callback(self, ctx, self.callback)
await injected(*ctx.args, **ctx.kwargs)
if trigger and ctx.invoked_subcommand:
ctx.invoked_with = trigger
await ctx.invoked_subcommand.invoke(ctx)
elif not early_invoke:
# undo the trigger parsing
view.index = previous
view.previous = previous
await super().invoke(ctx)
async def reinvoke(self, ctx, *, call_hooks=False):
ctx.invoked_subcommand = None
early_invoke = not self.invoke_without_command
if early_invoke:
ctx.command = self
await self._parse_arguments(ctx)
if call_hooks:
await self.call_before_hooks(ctx)
view = ctx.view
previous = view.index
view.skip_ws()
trigger = view.get_word()
if trigger:
ctx.subcommand_passed = trigger
ctx.invoked_subcommand = self.all_commands.get(trigger, None)
if early_invoke:
try:
await self.callback(*ctx.args, **ctx.kwargs)
except:
ctx.command_failed = True
raise
finally:
if call_hooks:
await self.call_after_hooks(ctx)
if trigger and ctx.invoked_subcommand:
ctx.invoked_with = trigger
await ctx.invoked_subcommand.reinvoke(ctx, call_hooks=call_hooks)
elif not early_invoke:
# undo the trigger parsing
view.index = previous
view.previous = previous
await super().reinvoke(ctx, call_hooks=call_hooks)
# Decorators
def command(name=None, cls=None, **attrs):
"""A decorator that transforms a function into a :class:`.Command`
or if called with :func:`.group`, :class:`.Group`.
By default the ``help`` attribute is received automatically from the
docstring of the function and is cleaned up with the use of
``inspect.cleandoc``. If the docstring is ``bytes``, then it is decoded
into :class:`str` using utf-8 encoding.
All checks added using the :func:`.check` & co. decorators are added into
the function. There is no way to supply your own checks through this
decorator.
Parameters
-----------
name: :class:`str`
The name to create the command with. By default this uses the
function name unchanged.
cls
The class to construct with. By default this is :class:`.Command`.
You usually do not change this.
attrs
Keyword arguments to pass into the construction of the class denoted
by ``cls``.
Raises
-------
TypeError
If the function is not a coroutine or is already a command.
"""
if cls is None:
cls = Command
def decorator(func):
if isinstance(func, Command):
raise TypeError('Callback is already a command.')
return cls(func, name=name, **attrs)
return decorator
def group(name=None, **attrs):
"""A decorator that transforms a function into a :class:`.Group`.
This is similar to the :func:`.command` decorator but the ``cls``
parameter is set to :class:`Group` by default.
.. versionchanged:: 1.1
The ``cls`` parameter can now be passed.
"""
attrs.setdefault('cls', Group)
return command(name=name, **attrs)
def check(predicate):
r"""A decorator that adds a check to the :class:`.Command` or its
subclasses. These checks could be accessed via :attr:`.Command.checks`.
These checks should be predicates that take in a single parameter taking
a :class:`.Context`. If the check returns a ``False``\-like value then
during invocation a :exc:`.CheckFailure` exception is raised and sent to
the :func:`.on_command_error` event.
If an exception should be thrown in the predicate then it should be a
subclass of :exc:`.CommandError`. Any exception not subclassed from it
will be propagated while those subclassed will be sent to
:func:`.on_command_error`.
A special attribute named ``predicate`` is bound to the value
returned by this decorator to retrieve the predicate passed to the
decorator. This allows the following introspection and chaining to be done:
.. code-block:: python3
def owner_or_permissions(**perms):
original = commands.has_permissions(**perms).predicate
async def extended_check(ctx):
if ctx.guild is None:
return False
return ctx.guild.owner_id == ctx.author.id or await original(ctx)
return commands.check(extended_check)
.. note::
The function returned by ``predicate`` is **always** a coroutine,
even if the original function was not a coroutine.
.. versionchanged:: 1.3
The ``predicate`` attribute was added.
Examples
---------
Creating a basic check to see if the command invoker is you.
.. code-block:: python3
def check_if_it_is_me(ctx):
return ctx.message.author.id == 85309593344815104
@bot.command()
@commands.check(check_if_it_is_me)
async def only_for_me(ctx):
await ctx.send('I know you!')
Transforming common checks into its own decorator:
.. code-block:: python3
def is_me():
def predicate(ctx):
return ctx.message.author.id == 85309593344815104
return commands.check(predicate)
@bot.command()
@is_me()
async def only_me(ctx):
await ctx.send('Only you!')
Parameters
-----------
predicate: Callable[[:class:`Context`], :class:`bool`]
The predicate to check if the command should be invoked.
"""
def decorator(func):
if isinstance(func, Command):
func.checks.append(predicate)
else:
if not hasattr(func, '__commands_checks__'):
func.__commands_checks__ = []
func.__commands_checks__.append(predicate)
return func
if inspect.iscoroutinefunction(predicate):
decorator.predicate = predicate
else:
@functools.wraps(predicate)
async def wrapper(ctx):
return predicate(ctx)
decorator.predicate = wrapper
return decorator
def check_any(*checks):
r"""A :func:`check` that is added that checks if any of the checks passed
will pass, i.e. using logical OR.
If all checks fail then :exc:`.CheckAnyFailure` is raised to signal the failure.
It inherits from :exc:`.CheckFailure`.
.. note::
The ``predicate`` attribute for this function **is** a coroutine.
.. versionadded:: 1.3
Parameters
------------
\*checks: Callable[[:class:`Context`], :class:`bool`]
An argument list of checks that have been decorated with
the :func:`check` decorator.
Raises
-------
TypeError
A check passed has not been decorated with the :func:`check`
decorator.
Examples
---------
Creating a basic check to see if it's the bot owner or
the server owner:
.. code-block:: python3
def is_guild_owner():
def predicate(ctx):
return ctx.guild is not None and ctx.guild.owner_id == ctx.author.id
return commands.check(predicate)
@bot.command()
@commands.check_any(commands.is_owner(), is_guild_owner())
async def only_for_owners(ctx):
await ctx.send('Hello mister owner!')
"""
unwrapped = []
for wrapped in checks:
try:
pred = wrapped.predicate
except AttributeError:
raise TypeError('%r must be wrapped by commands.check decorator' % wrapped) from None
else:
unwrapped.append(pred)
async def predicate(ctx):
errors = []
for func in unwrapped:
try:
value = await func(ctx)
except CheckFailure as e:
errors.append(e)
else:
if value:
return True
# if we're here, all checks failed
raise CheckAnyFailure(unwrapped, errors)
return check(predicate)
def has_role(item):
"""A :func:`.check` that is added that checks if the member invoking the
command has the role specified via the name or ID specified.
If a string is specified, you must give the exact name of the role, including
caps and spelling.
If an integer is specified, you must give the exact snowflake ID of the role.
If the message is invoked in a private message context then the check will
return ``False``.
This check raises one of two special exceptions, :exc:`.MissingRole` if the user
is missing a role, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.MissingRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
item: Union[:class:`int`, :class:`str`]
The name or ID of the role to check.
"""
def predicate(ctx):
if not isinstance(ctx.channel, discord.abc.GuildChannel):
raise NoPrivateMessage()
if isinstance(item, int):
role = discord.utils.get(ctx.author.roles, id=item)
else:
role = discord.utils.get(ctx.author.roles, name=item)
if role is None:
raise MissingRole(item)
return True
return check(predicate)
def has_any_role(*items):
r"""A :func:`.check` that is added that checks if the member invoking the
command has **any** of the roles specified. This means that if they have
one out of the three roles specified, then this check will return `True`.
Similar to :func:`.has_role`\, the names or IDs passed in must be exact.
This check raises one of two special exceptions, :exc:`.MissingAnyRole` if the user
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.MissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
items: List[Union[:class:`str`, :class:`int`]]
An argument list of names or IDs to check that the member has roles wise.
Example
--------
.. code-block:: python3
@bot.command()
@commands.has_any_role('Library Devs', 'Moderators', 492212595072434186)
async def cool(ctx):
await ctx.send('You are cool indeed')
"""
def predicate(ctx):
if not isinstance(ctx.channel, discord.abc.GuildChannel):
raise NoPrivateMessage()
getter = functools.partial(discord.utils.get, ctx.author.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise MissingAnyRole(items)
return check(predicate)
def bot_has_role(item):
"""Similar to :func:`.has_role` except checks if the bot itself has the
role.
This check raises one of two special exceptions, :exc:`.BotMissingRole` if the bot
is missing the role, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.BotMissingRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
"""
def predicate(ctx):
ch = ctx.channel
if not isinstance(ch, discord.abc.GuildChannel):
raise NoPrivateMessage()
me = ch.guild.me
if isinstance(item, int):
role = discord.utils.get(me.roles, id=item)
else:
role = discord.utils.get(me.roles, name=item)
if role is None:
raise BotMissingRole(item)
return True
return check(predicate)
def bot_has_any_role(*items):
"""Similar to :func:`.has_any_role` except checks if the bot itself has
any of the roles listed.
This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic checkfailure
"""
def predicate(ctx):
ch = ctx.channel
if not isinstance(ch, discord.abc.GuildChannel):
raise NoPrivateMessage()
me = ch.guild.me
getter = functools.partial(discord.utils.get, me.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise BotMissingAnyRole(items)
return check(predicate)
def has_permissions(**perms):
"""A :func:`.check` that is added that checks if the member has all of
the permissions necessary.
Note that this check operates on the current channel permissions, not the
guild wide permissions.
The permissions passed in must be exactly like the properties shown under
:class:`.discord.Permissions`.
This check raises a special exception, :exc:`.MissingPermissions`
that is inherited from :exc:`.CheckFailure`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python3
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test(ctx):
await ctx.send('You can manage messages.')
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError('Invalid permission(s): %s' % (', '.join(invalid)))
def predicate(ctx):
ch = ctx.channel
permissions = ch.permissions_for(ctx.author)
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise MissingPermissions(missing)
return check(predicate)
def bot_has_permissions(**perms):
"""Similar to :func:`.has_permissions` except checks if the bot itself has
the permissions listed.
This check raises a special exception, :exc:`.BotMissingPermissions`
that is inherited from :exc:`.CheckFailure`.
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError('Invalid permission(s): %s' % (', '.join(invalid)))
def predicate(ctx):
guild = ctx.guild
me = guild.me if guild is not None else ctx.bot.user
permissions = ctx.channel.permissions_for(me)
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise BotMissingPermissions(missing)
return check(predicate)
def has_guild_permissions(**perms):
"""Similar to :func:`.has_permissions`, but operates on guild wide
permissions instead of the current channel permissions.
If this check is called in a DM context, it will raise an
exception, :exc:`.NoPrivateMessage`.
.. versionadded:: 1.3
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError('Invalid permission(s): %s' % (', '.join(invalid)))
def predicate(ctx):
if not ctx.guild:
raise NoPrivateMessage
permissions = ctx.author.guild_permissions
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise MissingPermissions(missing)
return check(predicate)
def bot_has_guild_permissions(**perms):
"""Similar to :func:`.has_guild_permissions`, but checks the bot
members guild permissions.
.. versionadded:: 1.3
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError('Invalid permission(s): %s' % (', '.join(invalid)))
def predicate(ctx):
if not ctx.guild:
raise NoPrivateMessage
permissions = ctx.me.guild_permissions
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise BotMissingPermissions(missing)
return check(predicate)
def dm_only():
"""A :func:`.check` that indicates this command must only be used in a
DM context. Only private messages are allowed when
using the command.
This check raises a special exception, :exc:`.PrivateMessageOnly`
that is inherited from :exc:`.CheckFailure`.
.. versionadded:: 1.1
"""
def predicate(ctx):
if ctx.guild is not None:
raise PrivateMessageOnly()
return True
return check(predicate)
def guild_only():
"""A :func:`.check` that indicates this command must only be used in a
guild context only. Basically, no private messages are allowed when
using the command.
This check raises a special exception, :exc:`.NoPrivateMessage`
that is inherited from :exc:`.CheckFailure`.
"""
def predicate(ctx):
if ctx.guild is None:
raise NoPrivateMessage()
return True
return check(predicate)
def is_owner():
"""A :func:`.check` that checks if the person invoking this command is the
owner of the bot.
This is powered by :meth:`.Bot.is_owner`.
This check raises a special exception, :exc:`.NotOwner` that is derived
from :exc:`.CheckFailure`.
"""
async def predicate(ctx):
if not await ctx.bot.is_owner(ctx.author):
raise NotOwner('You do not own this bot.')
return True
return check(predicate)
def is_nsfw():
"""A :func:`.check` that checks if the channel is a NSFW channel.
This check raises a special exception, :exc:`.NSFWChannelRequired`
that is derived from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.NSFWChannelRequired` instead of generic :exc:`.CheckFailure`.
DM channels will also now pass this check.
"""
def pred(ctx):
ch = ctx.channel
if ctx.guild is None or (isinstance(ch, discord.TextChannel) and ch.is_nsfw()):
return True
raise NSFWChannelRequired(ch)
return check(pred)
def cooldown(rate, per, type=BucketType.default):
"""A decorator that adds a cooldown to a :class:`.Command`
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
Parameters
------------
rate: :class:`int`
The number of times a command can be used before triggering a cooldown.
per: :class:`float`
The amount of seconds to wait for a cooldown when it's been triggered.
type: :class:`.BucketType`
The type of cooldown to have.
"""
def decorator(func):
if isinstance(func, Command):
func._buckets = CooldownMapping(Cooldown(rate, per, type))
else:
func.__commands_cooldown__ = Cooldown(rate, per, type)
return func
return decorator
def max_concurrency(number, per=BucketType.default, *, wait=False):
"""A decorator that adds a maximum concurrency to a :class:`.Command` or its subclasses.
This enables you to only allow a certain number of command invocations at the same time,
for example if a command takes too long or if only one user can use it at a time. This
differs from a cooldown in that there is no set waiting period or token bucket -- only
a set number of people can run the command.
.. versionadded:: 1.3
Parameters
-------------
number: :class:`int`
The maximum number of invocations of this command that can be running at the same time.
per: :class:`.BucketType`
The bucket that this concurrency is based on, e.g. ``BucketType.guild`` would allow
it to be used up to ``number`` times per guild.
wait: :class:`bool`
Whether the command should wait for the queue to be over. If this is set to ``False``
then instead of waiting until the command can run again, the command raises
:exc:`.MaxConcurrencyReached` to its error handler. If this is set to ``True``
then the command waits until it can be executed.
"""
def decorator(func):
value = MaxConcurrency(number, per=per, wait=wait)
if isinstance(func, Command):
func._max_concurrency = value
else:
func.__commands_max_concurrency__ = value
return func
return decorator
def before_invoke(coro):
"""A decorator that registers a coroutine as a pre-invoke hook.
This allows you to refer to one before invoke hook for several commands that
do not have to be within the same cog.
.. versionadded:: 1.4
Example
---------
.. code-block:: python3
async def record_usage(ctx):
print(ctx.author, 'used', ctx.command, 'at', ctx.message.created_at)
@bot.command()
@commands.before_invoke(record_usage)
async def who(ctx): # Output: <User> used who at <Time>
await ctx.send('i am a bot')
class What(commands.Cog):
@commands.before_invoke(record_usage)
@commands.command()
async def when(self, ctx): # Output: <User> used when at <Time>
await ctx.send('and i have existed since {}'.format(ctx.bot.user.created_at))
@commands.command()
async def where(self, ctx): # Output: <Nothing>
await ctx.send('on Discord')
@commands.command()
async def why(self, ctx): # Output: <Nothing>
await ctx.send('because someone made me')
bot.add_cog(What())
"""
def decorator(func):
if isinstance(func, Command):
func.before_invoke(coro)
else:
func.__before_invoke__ = coro
return func
return decorator
def after_invoke(coro):
"""A decorator that registers a coroutine as a post-invoke hook.
This allows you to refer to one after invoke hook for several commands that
do not have to be within the same cog.
.. versionadded:: 1.4
"""
def decorator(func):
if isinstance(func, Command):
func.after_invoke(coro)
else:
func.__after_invoke__ = coro
return func
return decorator
| 33.860947 | 122 | 0.608563 |
b8c459009f6061ebdc23feabd3096598c24025c4 | 1,041 | py | Python | ipython/mcs_pure_python.py | portfolioscout/py4fi | 9a65df340189ed52037456da221bf66fe89e787f | [
"CNRI-Python"
] | 15 | 2018-07-10T09:18:23.000Z | 2021-12-30T06:35:09.000Z | ipython/mcs_pure_python.py | ioancw/py4fi | bbf7b41d375e4f7b0344bc9b1e97d7910ad1e6ec | [
"CNRI-Python"
] | null | null | null | ipython/mcs_pure_python.py | ioancw/py4fi | bbf7b41d375e4f7b0344bc9b1e97d7910ad1e6ec | [
"CNRI-Python"
] | 13 | 2018-01-08T01:10:22.000Z | 2021-05-26T17:35:35.000Z | #
# Monte Carlo valuation of European call options with pure Python
# mcs_pure_python.py
#
from time import time
from math import exp, sqrt, log
from random import gauss, seed
seed(20000)
t0 = time()
# Parameters
S0 = 100. # initial value
K = 105. # strike price
T = 1.0 # maturity
r = 0.05 # riskless short rate
sigma = 0.2 # volatility
M = 50 # number of time steps
dt = T / M # length of time interval
I = 250000 # number of paths
# Simulating I paths with M time steps
S = []
for i in range(I):
path = []
for t in range(M + 1):
if t == 0:
path.append(S0)
else:
z = gauss(0.0, 1.0)
St = path[t - 1] * exp((r - 0.5 * sigma ** 2) * dt
+ sigma * sqrt(dt) * z)
path.append(St)
S.append(path)
# Calculating the Monte Carlo estimator
C0 = exp(-r * T) * sum([max(path[-1] - K, 0) for path in S]) / I
# Results output
tpy = time() - t0
print "European Option Value %7.3f" % C0
print "Duration in Seconds %7.3f" % tpy | 24.209302 | 65 | 0.574448 |
28381b67299c6f7258a6e74319c83508a527d0ac | 16,761 | py | Python | openstates/data/tests/test_models.py | twentyforty/openstates-core | af80fa8caa2f435e66bae6d373fb220e24a1e385 | [
"MIT"
] | null | null | null | openstates/data/tests/test_models.py | twentyforty/openstates-core | af80fa8caa2f435e66bae6d373fb220e24a1e385 | [
"MIT"
] | null | null | null | openstates/data/tests/test_models.py | twentyforty/openstates-core | af80fa8caa2f435e66bae6d373fb220e24a1e385 | [
"MIT"
] | null | null | null | import pytest
from openstates.data.models import Jurisdiction, Division, Organization, Person, Post
from django.core.exceptions import ValidationError
def test_division_subtypes_from_id():
# simplest case
assert Division.subtypes_from_id("ocd-division/country:us") == (
{"country": "us"},
1,
)
# ocd-divison part is optional
assert Division.subtypes_from_id("country:us/state:ak/county:wild") == (
{
"country": "us",
"subtype1": "state",
"subid1": "ak",
"subtype2": "county",
"subid2": "wild",
},
3,
)
# country is not optional
with pytest.raises(ValueError):
Division.subtypes_from_id("state:nc/city:raleigh")
@pytest.mark.django_db
def test_division_create():
division_id = "ocd-division/country:us/state:ak/county:wild"
d = Division.objects.create(id=division_id, name="Wild County")
assert d.country == "us"
assert d.subtype1 == "state"
assert d.subid1 == "ak"
assert d.subtype2 == "county"
assert d.subid2 == "wild"
assert division_id in str(d)
@pytest.mark.django_db
def test_division_children_of():
Division.objects.create("ocd-division/country:us", name="US")
ak = Division.objects.create("ocd-division/country:us/state:ak", name="Alaska")
Division.objects.create("ocd-division/country:us/state:ak/county:wild", name="Wild")
Division.objects.create("ocd-division/country:us/state:ak/county:mild", name="Mild")
Division.objects.create(
"ocd-division/country:us/state:ak/county:wild/place:a", name="A"
)
Division.objects.create(
"ocd-division/country:us/state:ak/county:wild/place:b", name="B"
)
Division.objects.create(
"ocd-division/country:us/state:ak/county:wild/school:a", name="A"
)
Division.objects.create(
"ocd-division/country:us/state:ak/county:mild/place:a", name="A"
)
Division.objects.create(
"ocd-division/country:us/state:ak/county:mild/place:a/x:y", name="A"
)
# simplest ==
assert Division.objects.children_of("ocd-division/country:us")[0].id == ak.id
# 3 divisions within wild county
assert (
Division.objects.children_of(
"ocd-division/country:us/state:ak/county:wild"
).count()
== 3
)
# only one school in wild county
assert (
Division.objects.children_of(
"ocd-division/country:us/state:ak/county:wild", subtype="school"
).count()
== 1
)
# 6 divisions beneath alaska up to 2 levels
assert (
Division.objects.children_of(
"ocd-division/country:us/state:ak", depth=2
).count()
== 6
)
# 7 divisions beneath alaska up to 3 levels
assert (
Division.objects.children_of(
"ocd-division/country:us/state:ak", depth=3
).count()
== 7
)
@pytest.mark.django_db
def test_ocdid_default():
o = Organization.objects.create(name="test org")
assert o.id.startswith("ocd-organization/")
assert o.pk == o.id
p = Person.objects.create(name="test person")
assert p.id.startswith("ocd-person/")
@pytest.mark.django_db
def test_ocdid_default_nondup():
"""ensure that defaults actually vary"""
p1 = Person(name="test person 1")
p2 = Person(name="test person 2")
assert p1.id != p2.id
@pytest.mark.django_db
def test_ocdid_validation_jurisdiction():
# this fails
with pytest.raises(ValidationError):
j = Jurisdiction(
name="test juris",
id="ocd-division/country:us/test:something/else",
url="http://example.com",
)
j.full_clean(exclude=["division"])
# this succeeds
j = Jurisdiction(
name="test juris",
id="ocd-jurisdiction/country:us/test:something/else",
url="http://example.com",
)
j.full_clean(exclude=["division"])
assert str(j) == j.name
@pytest.mark.django_db
def test_ocdid_validation_other():
# this test should handle everything that isn't a jurisdiction
# this succeeds
o = Organization(name="test org")
o.full_clean(exclude=["parent", "jurisdiction"])
# this raises
with pytest.raises(ValidationError):
o = Organization(name="this is a test", id="ocd-organization/3")
o.full_clean(exclude=["parent", "jurisdiction"])
@pytest.mark.django_db
def test_organization_get_parents():
o1 = Organization.objects.create(
name="National Organization on Bread-and-Cheese Products"
)
o2 = Organization.objects.create(name="Committee on Pizza", parent=o1)
o3 = Organization.objects.create(name="Subcommittee on Sicilian Pizza", parent=o2)
assert list(o3.get_parents()) == [o2, o1]
@pytest.mark.django_db
def test_organization_str():
o = Organization.objects.create(name="test org")
assert "test org" in str(o)
@pytest.mark.django_db
def test_organization_post():
o = Organization.objects.create(name="test org")
o.posts.create(label="CEO")
assert "CEO" in str(o.posts.all()[0])
assert "test org" in str(o.posts.all()[0])
@pytest.mark.django_db
def test_organization_membership():
o = Organization.objects.create(name="test org")
p = Person.objects.create(name="test person")
o.memberships.create(person=p, role="CEO")
assert "CEO" in str(o.memberships.all()[0])
assert "test person" in str(o.memberships.all()[0])
assert "test org" in str(o.memberships.all()[0])
assert len(o.get_current_members()) == 1
assert len(Person.objects.member_of("test org")) == 1
assert len(Person.objects.member_of(o.id)) == 1
@pytest.mark.django_db
def test_organization_membership_multiple_divisions():
o = Organization.objects.create(name="state")
p1 = Person.objects.create(name="rep1")
p2 = Person.objects.create(name="rep2")
d1 = Division.objects.create(id="ocd-division/country:aa/place:locality1", name="locality1")
d2 = Division.objects.create(id="ocd-division/country:aa/place:locality2", name="locality2")
post1 = Post.objects.create(label="district rep", role="vip", organization=o, division=d1)
post2 = Post.objects.create(label="district rep", role="vip", organization=o, division=d2)
o.memberships.create(person=p1, post=post1)
o.memberships.create(person=p2, post=post2)
people = Person.objects.member_of(o.id, post="district rep").all()
assert len(people) == 2
assert p1 in people
assert p2 in people
people = Person.objects.member_of(o.id, post="district rep", division_id=d1.id).all()
assert len(people) == 1
assert p1 in people
people = Person.objects.member_of(o.id, post="district rep", division_id=d2.id).all()
assert len(people) == 1
assert p2 in people
@pytest.mark.django_db
def test_member_of_with_post():
o = Organization.objects.create(name="The Org")
p = Post.objects.create(organization=o, label="1")
cur = Person.objects.create(name="current")
prev = Person.objects.create(name="previous")
o.memberships.create(person=cur, post=p)
o.memberships.create(person=prev, post=p, end_date="2019-01-01")
assert len(o.get_current_members()) == 1
assert len(Person.objects.member_of("The Org")) == 1
assert len(Person.objects.member_of("The Org", post="1")) == 1
assert len(Person.objects.member_of("The Org", current_only=False)) == 2
@pytest.mark.django_db
def test_member_of_prior_role_conflict():
# test for https://github.com/openstates/openstates.org/issues/304
o = Organization.objects.create(name="The Org")
p = Post.objects.create(organization=o, label="1")
p2 = Post.objects.create(organization=o, label="2")
cur = Person.objects.create(name="current")
prev = Person.objects.create(name="previous")
o.memberships.create(person=cur, post=p)
o.memberships.create(person=prev, post=p, end_date="2019-01-01")
o.memberships.create(person=prev, post=p2)
assert len(o.get_current_members()) == 2
assert len(Person.objects.member_of("The Org")) == 2
assert len(Person.objects.member_of("The Org", post="1")) == 1
assert len(Person.objects.member_of("The Org", post="1", current_only=False)) == 2
@pytest.mark.django_db
def test_person_query_active_members(senator):
assert Person.objects.count() == 1
people = Person.objects.active()
assert len(people) == 1
assert people[0].name == "Willy Worm"
@pytest.mark.django_db
def test_person_query_current_with_roles(senator, django_assert_num_queries):
senate = senator.memberships.filter(organization__classification="upper")[
0
].organization
# prefetch grabs membership, org, post too
with django_assert_num_queries(4):
willy = Person.objects.current_legislators_with_roles([senate])[0]
assert willy.name == "Willy Worm"
@pytest.mark.django_db
def test_person_search(senator, person):
assert Person.objects.search("Worm").count() == 1
assert Person.objects.search("Worm", state="mo").count() == 1
assert Person.objects.search("Worm", state="wy").count() == 0
assert Person.objects.search("Pig").count() == 0
@pytest.mark.django_db
def test_person_str(person):
assert person.name in str(person)
@pytest.mark.django_db
def test_legislative_session_str(legislative_session):
assert legislative_session.name in str(legislative_session)
@pytest.mark.django_db
def test_vote_event_str(vote_event):
assert vote_event.motion_text in str(vote_event)
# test adding identifier and alternate string repr
vote_event.identifier = "Roll Call #2372"
vote_event.save()
assert "Roll Call #2372" in str(vote_event)
@pytest.mark.django_db
def test_vote_event_count(vote_event):
vote_event.counts.create(option="yes", value=36)
assert "yes" in str(vote_event.counts.all()[0])
@pytest.mark.django_db
def test_vote_event_vote(vote_event):
p = Person.objects.create(name="Maria Chappelle-Nadal")
vote_event.votes.create(option="yes", voter_name="Maria Chappelle-Nadal", voter=p)
assert "Maria Chappelle-Nadal" in str(vote_event.votes.all()[0])
@pytest.mark.django_db
def test_bill_str(bill):
assert bill.identifier in str(bill)
@pytest.mark.django_db
def test_bill_abstract(bill):
bill.abstracts.create(
abstract="This is the Senate's health care bill. The bill started off "
"with text regarding an unrelated matter but the Senate is "
"co-opted this bill as a vehicle for passage of their reform and "
"changed the text in whole to the health care bill. They do this "
"because the Constitution requires all revenue bills to start in "
"the House, and their health reform plan involves revenue. So "
"they have chosen to work off of a bill that started in the "
"House, even if that bill is unrelated.",
note="Library of Congress Summary",
)
assert bill.identifier in str(bill.abstracts.all()[0])
@pytest.mark.django_db
def test_bill_title(bill):
o_t = bill.other_titles.create(title="Affordable Care Act")
assert o_t.title in str(o_t)
assert bill.identifier in str(o_t)
@pytest.mark.django_db
def test_bill_sponsorship(bill):
spon = bill.sponsorships.create(classification="sponsor", name="Nancy Pelosi")
assert spon.name in str(spon)
assert bill.identifier in str(spon)
@pytest.mark.django_db
def test_bill_identifier(bill):
bill.other_identifiers.create(identifier="1001")
assert "1001" == bill.other_identifiers.all()[0].identifier
@pytest.mark.django_db
def test_related_bill(bill):
r_b = bill.related_bills.create(
legislative_session=bill.legislative_session,
identifier="SB 22",
relation_type="companion",
)
assert r_b.relation_type in str(r_b)
assert bill.identifier in str(r_b)
@pytest.mark.django_db
def test_bill_action(bill):
o = Organization.objects.create(name="Missouri State Senate")
a = bill.actions.create(
organization=o,
description="Third Reading and Final Passage",
date="2016-02-16",
order=1,
)
assert a.date in str(a)
assert bill.identifier in str(a)
# test adding related entity to bill action
p = Person.objects.create(name="Maria Chappelle-Nadal")
a.related_entities.create(person=p, name=p.name, entity_type="person")
assert p.name in str(a.related_entities.all()[0])
@pytest.mark.django_db
def test_bill_version_with_links(bill):
v = bill.versions.create(note="Engrossed", date="2017-03-15")
assert v.date in str(v)
assert bill.identifier in str(v)
# test adding link bill version
v.links.create(
url="http://committee.example.com/billversion1.pdf",
media_type="application/pdf",
)
assert "http://committee.example.com/billversion1.pdf" in str(v.links.all()[0])
assert bill.identifier in str(v.links.all()[0])
@pytest.mark.django_db
def test_bill_document_with_links(bill):
doc = bill.documents.create(note="Fiscal Note", date="2017-03-01")
assert bill.identifier in str(doc)
# test adding link bill version
doc.links.create(
url="http://committee.example.com/bill_document.pdf",
media_type="application/pdf",
)
assert "http://committee.example.com/bill_document.pdf" in str(doc.links.all()[0])
assert bill.identifier in str(doc.links.all()[0])
@pytest.mark.django_db
def test_event_str(event):
assert event.name in str(event)
assert event.start_date in str(event)
@pytest.mark.django_db
def test_event_location_str(event_location):
assert event_location.name in str(event_location)
@pytest.mark.django_db
def test_event_participant_person(event):
p = Person.objects.create(name="Andrew Tobin")
e_p = event.participants.create(
name=p.name, person=p, entity_type="person", note="Speaker"
)
assert e_p.name in str(e_p)
assert e_p.name in e_p.entity_name
assert e_p.entity_id
# remove entity_type value and re-try
e_p.entity_type = ""
assert e_p.name in e_p.entity_name
assert e_p.entity_id is None
@pytest.mark.django_db
def test_event_participant_organization(event):
o = Organization.objects.create(name="Committee on Energy")
e_p = event.participants.create(
name=o.name, organization=o, entity_type="organization", note="Host Committee"
)
assert e_p.name in str(e_p)
assert e_p.name in e_p.entity_name
assert e_p.entity_id
# remove entity_type value and re-try
e_p.entity_type = ""
assert e_p.name in e_p.entity_name
assert e_p.entity_id is None
@pytest.mark.django_db
def test_event_link(event):
event.links.append(dict(note="EPA Website", url="http://www.epa.gov/"))
event.save()
assert "http://www.epa.gov/" in str(event.links[0])
@pytest.mark.django_db
def test_event_media_w_links(event):
# test adding media to event
e_m = event.media.create(
note="Recording of the meeting",
date="2014-04-12",
offset="19",
links=[dict(media_type="video/webm", url="http://example.com/video.webm")],
)
assert "Recording of the meeting" in str(e_m)
# test link event media
assert "http://example.com/video.webm" in str(e_m.links[0])
@pytest.mark.django_db
def test_event_agenda(event, vote_event, bill):
# test adding agenda item to event
e_a = event.agenda.create(
description="Presentation by Director Henry Darwin, Arizona Department "
"of Environmental Quality, regarding the Environmental "
"Protection Agency (EPA) Clean Power Plan proposed rule",
order=2,
subjects=["epa", "green energy", "environmental issues"],
)
assert event.name in str(e_a)
# test adding media to event agenda item
e_a_med = e_a.media.create(
note="Recording Darwin presentation",
date="2014-04-12",
links=[dict(media_type="video/mp4", url="http://example.com/video.mp4")],
)
assert "Recording Darwin presentation" in str(e_a_med)
# test adding link to event agenda item media
assert "http://example.com/video.mp4" in str(e_a_med.links[0])
# test adding related entities to event agenda
e_a.related_entities.create(bill=bill, entity_type="bill", name=bill.identifier)
e_a.related_entities.create(
vote_event=vote_event, entity_type="vote", name=vote_event.identifier
)
for r_e in e_a.related_entities.all():
assert r_e.name in str(r_e)
assert r_e.name in r_e.entity_name
assert r_e.entity_id
# remove entity_type value and re-try
r_e.entity_type = ""
assert r_e.name in r_e.entity_name
assert r_e.entity_id is None
| 32.672515 | 96 | 0.684506 |
ee6484e996ac617c7e81832b123b7b407ad856c6 | 3,377 | py | Python | generation/overland/from_platec.py | BenSmithers/MultiHex2 | 3a241d7b6e8681b56ac8f6dcc7f707bed47420ea | [
"MIT"
] | null | null | null | generation/overland/from_platec.py | BenSmithers/MultiHex2 | 3a241d7b6e8681b56ac8f6dcc7f707bed47420ea | [
"MIT"
] | null | null | null | generation/overland/from_platec.py | BenSmithers/MultiHex2 | 3a241d7b6e8681b56ac8f6dcc7f707bed47420ea | [
"MIT"
] | null | null | null |
from argparse import ArgumentError
from MultiHex2.tools import Clicker
from MultiHex2.core import hex_to_screen, screen_to_hex, Hex
from MultiHex2.core import DRAWSIZE
from ..utils import perlin
from PyQt5.QtCore import QPointF
from PyQt5.QtGui import QColor
import numpy.random as rnd
import numpy as np
from math import pi, acos, exp, sin
import platec
def sigmoid(val):
return 1./(1+exp(-val))
def get_color(alt):
top = (138, 123, 63)
bot = (230, 217, 165)
return QColor(
bot[0] + (top[0]-bot[0])*alt,
bot[1] + (top[1]-bot[1])*alt,
bot[2] + (top[2]-bot[2])*alt
)
def gen_land(map:Clicker, seed=None, **kwargs):
"""
Generates the ridgelines, can provide an optional seed
"""
if seed is not None:
rnd.seed(seed)
else:
seed = rnd.randint(1,10000)
requried_args = ["dimx","dimy"]
for arg in requried_args:
if arg not in kwargs:
raise ArgumentError("Could not find requied arg {} in kwargs".format(arg))
scale = 5
dimensions = [kwargs['dimx'],kwargs['dimy']]
map.dimensions=tuple(dimensions)
sea_level = 0.65
print("doing platec stuff, seed {}".format(seed))
# these were some of the generation presets, decided against changing them, not sure what they all do
# seed, width, height, sea_level, erosion_period, folding_ratio, aggr_overlap_abs, aggr_overlap rel, cycle count, number of plates
p = platec.create(seed, int(dimensions[1]/scale), int(dimensions[0]/scale),sea_level, 61, 0.010, 5000, 0.10, 2, 8)
print("starting")
while platec.is_finished(p)==0:
platec.step(p)
heightmap = np.reshape( platec.get_heightmap(p), (int(dimensions[0]/scale), int(dimensions[1]/scale) ))
peak = np.max(heightmap)
trough = np.min(heightmap)
print("Min/max alt {} and {}".format( 2*sigmoid(trough-sea_level)-1.0, 2*sigmoid(peak-sea_level)-1.0 ))
pnoise = perlin(dimensions[0], seed)
print("Max alt and min alt: {}, {}".format(trough, peak))
for i in range(len(heightmap)):
for j in range(len(heightmap[i])):
pos = QPointF(scale*i, scale*j)
loc = screen_to_hex(pos)
if loc not in map.hexCatalog:
new_hex = Hex(hex_to_screen(loc))
new_hex.set_param("is_land", 10*int(heightmap[i][j]>sea_level))
new_hex.is_land=heightmap[i][j]>sea_level
new_hex.set_param("altitude_base", 2*sigmoid(heightmap[i][j]-sea_level)-1.0 )
new_hex.set_param("rainfall_base",0.0)
fract = 1.0 - heightmap[i][j]/(2*peak) # will range from 0 to 0.5, use it to make high places colder
new_hex.set_param("temperature_base",pnoise[i][j]*0.5 + fract*sin(pi*j/len(heightmap[i])))
if heightmap[i][j]>16:
new_hex.geography="ridge"
new_hex.set_fill(QColor(99,88,60))
elif heightmap[i][j]>3.2:
new_hex.geography="mountain"
new_hex.set_fill(QColor(97, 78, 46))
else:
if new_hex.is_land:
new_hex.set_fill(QColor(153, 171, 104))
else:
new_hex.set_fill(QColor(135, 208, 232))
map.addHex(new_hex, loc)
| 35.925532 | 142 | 0.596091 |
6392a470b4c2247a1df0b53f54ca6a4c4557643a | 208 | py | Python | Recursion/CodingNinjas/5_fibo.py | sounak95/100_days_of_code | 50fbf088ce6ab2137aa216a30e3b3f828b278a22 | [
"Apache-2.0"
] | null | null | null | Recursion/CodingNinjas/5_fibo.py | sounak95/100_days_of_code | 50fbf088ce6ab2137aa216a30e3b3f828b278a22 | [
"Apache-2.0"
] | null | null | null | Recursion/CodingNinjas/5_fibo.py | sounak95/100_days_of_code | 50fbf088ce6ab2137aa216a30e3b3f828b278a22 | [
"Apache-2.0"
] | null | null | null |
# 0 1 1 2 3 5
def fibo(n):
if n==1 or n==2:
return 1
fib_n_1=fibo(n-1)
fib_n_2 = fibo(n-2)
output= fib_n_1+fib_n_2
return output
if __name__ == "__main__":
print(fibo(5))
| 13 | 27 | 0.552885 |
3539842815d4d8ebf7e23967426820b73bc77c94 | 23,171 | py | Python | datacatalog/google/cloud/datacatalog_v1beta1/proto/datacatalog_pb2_grpc.py | Kami/google-cloud-python | a14ffbaa50f7823c2792e91413a37cbc3ce687f5 | [
"Apache-2.0"
] | 1 | 2019-06-14T10:11:59.000Z | 2019-06-14T10:11:59.000Z | datacatalog/google/cloud/datacatalog_v1beta1/proto/datacatalog_pb2_grpc.py | Kami/google-cloud-python | a14ffbaa50f7823c2792e91413a37cbc3ce687f5 | [
"Apache-2.0"
] | null | null | null | datacatalog/google/cloud/datacatalog_v1beta1/proto/datacatalog_pb2_grpc.py | Kami/google-cloud-python | a14ffbaa50f7823c2792e91413a37cbc3ce687f5 | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.datacatalog_v1beta1.proto import (
datacatalog_pb2 as google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2,
)
from google.cloud.datacatalog_v1beta1.proto import (
tags_pb2 as google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class DataCatalogStub(object):
"""Data Catalog API service allows clients to discover, understand, and manage
their data.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SearchCatalog = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/SearchCatalog",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.SearchCatalogRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.SearchCatalogResponse.FromString,
)
self.UpdateEntry = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/UpdateEntry",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.UpdateEntryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.Entry.FromString,
)
self.GetEntry = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/GetEntry",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.GetEntryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.Entry.FromString,
)
self.LookupEntry = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/LookupEntry",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.LookupEntryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.Entry.FromString,
)
self.CreateTagTemplate = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/CreateTagTemplate",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.CreateTagTemplateRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplate.FromString,
)
self.GetTagTemplate = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/GetTagTemplate",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.GetTagTemplateRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplate.FromString,
)
self.UpdateTagTemplate = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/UpdateTagTemplate",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.UpdateTagTemplateRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplate.FromString,
)
self.DeleteTagTemplate = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/DeleteTagTemplate",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.DeleteTagTemplateRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateTagTemplateField = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/CreateTagTemplateField",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.CreateTagTemplateFieldRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplateField.FromString,
)
self.UpdateTagTemplateField = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/UpdateTagTemplateField",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.UpdateTagTemplateFieldRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplateField.FromString,
)
self.RenameTagTemplateField = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/RenameTagTemplateField",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.RenameTagTemplateFieldRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplateField.FromString,
)
self.DeleteTagTemplateField = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/DeleteTagTemplateField",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.DeleteTagTemplateFieldRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateTag = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/CreateTag",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.CreateTagRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.Tag.FromString,
)
self.UpdateTag = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/UpdateTag",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.UpdateTagRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.Tag.FromString,
)
self.DeleteTag = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/DeleteTag",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.DeleteTagRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListTags = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/ListTags",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.ListTagsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.ListTagsResponse.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.DataCatalog/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
class DataCatalogServicer(object):
"""Data Catalog API service allows clients to discover, understand, and manage
their data.
"""
def SearchCatalog(self, request, context):
"""Searches Data Catalog for multiple resources like entries, tags that
match a query.
This is a custom method
(https://cloud.google.com/apis/design/custom_methods) and does not return
the complete resource, only the resource identifier and high level
fields. Clients can subsequentally call Get methods.
See [Data Catalog Search
Syntax](/data-catalog/docs/how-to/search-reference)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateEntry(self, request, context):
"""Updates an existing entry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetEntry(self, request, context):
"""Gets an entry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LookupEntry(self, request, context):
"""Get an entry by target resource name. This method allows clients to use
the resource name from the source Google Cloud Platform service to get the
Data Catalog Entry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateTagTemplate(self, request, context):
"""Creates a tag template.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetTagTemplate(self, request, context):
"""Gets a tag template.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateTagTemplate(self, request, context):
"""Updates a tag template. This method cannot be used to update the fields of
a template. The tag template fields are represented as separate resources
and should be updated using their own create/update/delete methods.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTagTemplate(self, request, context):
"""Deletes a tag template and all tags using the template.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateTagTemplateField(self, request, context):
"""Creates a field in a tag template.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateTagTemplateField(self, request, context):
"""Updates a field in a tag template. This method cannot be used to update the
field type.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RenameTagTemplateField(self, request, context):
"""Renames a field in a tag template.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTagTemplateField(self, request, context):
"""Deletes a field in a tag template and all uses of that field.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateTag(self, request, context):
"""Creates a tag on an [Entry][google.cloud.datacatalog.v1beta1.Entry].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateTag(self, request, context):
"""Updates an existing tag.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTag(self, request, context):
"""Deletes a tag.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListTags(self, request, context):
"""Lists the tags on an [Entry][google.cloud.datacatalog.v1beta1.Entry].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetIamPolicy(self, request, context):
"""Sets the access control policy for a resource. Replaces any existing
policy.
Supported resources are:
- Tag templates.
Note, this method cannot be used to manage policies for BigQuery, Cloud
Pub/Sub and any external Google Cloud Platform resources synced to Cloud
Data Catalog.
Callers must have following Google IAM permission
`datacatalog.tagTemplates.setIamPolicy` to set policies on tag templates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIamPolicy(self, request, context):
"""Gets the access control policy for a resource. A `NOT_FOUND` error
is returned if the resource does not exist. An empty policy is returned
if the resource exists but does not have a policy set on it.
Supported resources are:
- Tag templates.
Note, this method cannot be used to manage policies for BigQuery, Cloud
Pub/Sub and any external Google Cloud Platform resources synced to Cloud
Data Catalog.
Callers must have following Google IAM permission
`datacatalog.tagTemplates.getIamPolicy` to get policies on tag templates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def TestIamPermissions(self, request, context):
"""Returns the caller's permissions on a resource.
If the resource does not exist, an empty set of permissions is returned
(We don't return a `NOT_FOUND` error).
Supported resource are:
- tag templates.
Note, this method cannot be used to manage policies for BigQuery, Cloud
Pub/Sub and any external Google Cloud Platform resources synced to Cloud
Data Catalog.
A caller is not required to have Google IAM permission to make this
request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_DataCatalogServicer_to_server(servicer, server):
rpc_method_handlers = {
"SearchCatalog": grpc.unary_unary_rpc_method_handler(
servicer.SearchCatalog,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.SearchCatalogRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.SearchCatalogResponse.SerializeToString,
),
"UpdateEntry": grpc.unary_unary_rpc_method_handler(
servicer.UpdateEntry,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.UpdateEntryRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.Entry.SerializeToString,
),
"GetEntry": grpc.unary_unary_rpc_method_handler(
servicer.GetEntry,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.GetEntryRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.Entry.SerializeToString,
),
"LookupEntry": grpc.unary_unary_rpc_method_handler(
servicer.LookupEntry,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.LookupEntryRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.Entry.SerializeToString,
),
"CreateTagTemplate": grpc.unary_unary_rpc_method_handler(
servicer.CreateTagTemplate,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.CreateTagTemplateRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplate.SerializeToString,
),
"GetTagTemplate": grpc.unary_unary_rpc_method_handler(
servicer.GetTagTemplate,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.GetTagTemplateRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplate.SerializeToString,
),
"UpdateTagTemplate": grpc.unary_unary_rpc_method_handler(
servicer.UpdateTagTemplate,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.UpdateTagTemplateRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplate.SerializeToString,
),
"DeleteTagTemplate": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTagTemplate,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.DeleteTagTemplateRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"CreateTagTemplateField": grpc.unary_unary_rpc_method_handler(
servicer.CreateTagTemplateField,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.CreateTagTemplateFieldRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplateField.SerializeToString,
),
"UpdateTagTemplateField": grpc.unary_unary_rpc_method_handler(
servicer.UpdateTagTemplateField,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.UpdateTagTemplateFieldRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplateField.SerializeToString,
),
"RenameTagTemplateField": grpc.unary_unary_rpc_method_handler(
servicer.RenameTagTemplateField,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.RenameTagTemplateFieldRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.TagTemplateField.SerializeToString,
),
"DeleteTagTemplateField": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTagTemplateField,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.DeleteTagTemplateFieldRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"CreateTag": grpc.unary_unary_rpc_method_handler(
servicer.CreateTag,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.CreateTagRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.Tag.SerializeToString,
),
"UpdateTag": grpc.unary_unary_rpc_method_handler(
servicer.UpdateTag,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.UpdateTagRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_tags__pb2.Tag.SerializeToString,
),
"DeleteTag": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTag,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.DeleteTagRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ListTags": grpc.unary_unary_rpc_method_handler(
servicer.ListTags,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.ListTagsRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_datacatalog__pb2.ListTagsResponse.SerializeToString,
),
"SetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"GetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"TestIamPermissions": grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.datacatalog.v1beta1.DataCatalog", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| 56.652812 | 152 | 0.764706 |
bf917d75e3ced46c1be0743218d8464143236661 | 2,057 | py | Python | run_calendar2.py | themrmax/councilmatic | f8799011d08b9b4355934dd9f2ff31854938deb6 | [
"MIT"
] | null | null | null | run_calendar2.py | themrmax/councilmatic | f8799011d08b9b4355934dd9f2ff31854938deb6 | [
"MIT"
] | null | null | null | run_calendar2.py | themrmax/councilmatic | f8799011d08b9b4355934dd9f2ff31854938deb6 | [
"MIT"
] | null | null | null | import argparse
from scraper.controller.calendar import Calendar
from scraper.model.calendar import Calendar as CalendarModel
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-sd", "--show_dates", help="show date values",
dest='show_dates', action='store_true')
parser.add_argument("-sdn", "--show_dept_names", help="show dept names",
dest='show_depts', action='store_true')
parser.add_argument("-s", "--search", help="search words", type=str,
default="")
parser.add_argument("-d", "--date", help="date", type=str,
default="All Years")
parser.add_argument("-dn", "--dept", help="dept name", type=str,
default="All Departments")
parser.add_argument("-n", "--notes", help="notes flag (i.e. 0, 1)", type=int,
default=0)
parser.add_argument("-c", "--cc", help="closed caption flag (i.e. 0, 1)",
type=int, default=0)
parser.add_argument("-w", "--wait_time", help="wait time", type=int,
default=5)
return parser.parse_args()
def show_dates():
cal = Calendar()
cal.go_to_cal_page()
date_strs = cal.get_dates()
cal.close()
print("Dates:")
print("------")
for date_str in date_strs:
print(date_str)
def show_depts():
cal = Calendar()
cal.go_to_cal_page()
dept_strs = cal.get_depts()
cal.close()
print("Depts:")
print("------")
for dept_str in dept_strs:
print(dept_str)
def scrape(args):
cal = Calendar()
cal.go_to_cal_page()
cal_rows = cal.query(
search_str=args.search,
date_sel=args.date,
dept=args.dept,
notes=args.notes,
closed_caption=args.cc,
sleep_time=args.wait_time,
wait_time=args.wait_time)
cal.close()
print(CalendarModel.to_csv(cal_rows))
def main():
args = get_args()
if args.show_dates:
show_dates()
elif args.show_depts:
show_depts()
else:
scrape(args)
if __name__ == "__main__":
main()
| 26.714286 | 79 | 0.606709 |
bd30f549f513b81730506fa63e54a0a8512fe39d | 12,382 | py | Python | ci/launcher/utils/ec2.py | vvekic/serve | f02a56bf1f0de1705fd9f399c1115d36e343c90c | [
"Apache-2.0"
] | 2 | 2022-03-26T05:17:45.000Z | 2022-03-26T05:44:53.000Z | ci/launcher/utils/ec2.py | vvekic/serve | f02a56bf1f0de1705fd9f399c1115d36e343c90c | [
"Apache-2.0"
] | 3 | 2022-03-12T01:08:09.000Z | 2022-03-15T10:56:14.000Z | ci/launcher/utils/ec2.py | vvekic/serve | f02a56bf1f0de1705fd9f399c1115d36e343c90c | [
"Apache-2.0"
] | null | null | null | import boto3
import os
import time
import re
from inspect import signature
import random
from retrying import retry
from fabric2 import Connection
from botocore.config import Config
from botocore.exceptions import ClientError
from invoke import run
from invoke.context import Context
from . import DEFAULT_REGION, LOGGER
EC2_INSTANCE_ROLE_NAME = "ec2InstanceCIRole"
def generate_ssh_keypair(ec2_client, key_name):
pwd = run("pwd", hide=True).stdout.strip("\n")
key_filename = os.path.join(pwd, f"{key_name}.pem")
if os.path.exists(key_filename):
run(f"chmod 400 {key_filename}")
return key_filename
try:
key_pair = ec2_client.create_key_pair(KeyName=key_name)
except ClientError as e:
if "InvalidKeyPair.Duplicate" in f"{e}":
# Wait 10 seconds for key to be created to avoid race condition
time.sleep(10)
if os.path.exists(key_filename):
run(f"chmod 400 {key_filename}")
return key_filename
raise e
run(f"echo '{key_pair['KeyMaterial']}' > {key_filename}")
run(f"chmod 400 {key_filename}")
return key_filename
def destroy_ssh_keypair(ec2_client, key_filename):
key_name = os.path.basename(key_filename).split(".pem")[0]
response = ec2_client.delete_key_pair(KeyName=key_name)
run(f"rm -f {key_filename}")
return response, key_name
def launch_instance(
ami_id,
instance_type,
ec2_key_name=None,
region="us-west-2",
user_data=None,
iam_instance_profile_name=None,
instance_name="",
):
"""
Launch an instance
:param ami_id: AMI ID to be used for launched instance
:param instance_type: Instance type of launched instance
:param region: Region where instance will be launched
:param user_data: Script to run when instance is launched as a str
:param iam_instance_profile_arn: EC2 Role to be attached
:param instance_name: Tag to display as Name on EC2 Console
:return: <dict> Information about the instance that was launched
"""
if not ami_id:
raise Exception("No ami_id provided")
if not ec2_key_name:
raise Exception("Ec2 Key name must be provided")
client = boto3.Session(region_name=region).client("ec2")
# Construct the dictionary with the arguments for API call
arguments_dict = {
"KeyName": ec2_key_name,
"ImageId": ami_id,
"InstanceType": instance_type,
"MaxCount": 1,
"MinCount": 1,
"TagSpecifications": [
{
"ResourceType": "instance",
"Tags": [{"Key": "Name", "Value": f"CI-CD {instance_name}"}],
},
],
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"VolumeSize": 200,
},
}
],
}
if user_data:
arguments_dict["UserData"] = user_data
if iam_instance_profile_name:
arguments_dict["IamInstanceProfile"] = {"Name": iam_instance_profile_name}
LOGGER.info(f"Launching instance with name: {instance_name}, and key: {ec2_key_name}")
response = client.run_instances(**arguments_dict)
if not response or len(response["Instances"]) < 1:
raise Exception("Unable to launch the instance. Did not return any response")
LOGGER.info(f"Instance launched successfully.")
return response["Instances"][0]
def get_ec2_client(region):
return boto3.client("ec2", region_name=region, config=Config(retries={"max_attempts": 10}))
def get_instance_from_id(instance_id, region=DEFAULT_REGION):
"""
Get instance information using instance ID
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <dict> Information about instance with matching instance ID
"""
if not instance_id:
raise Exception("No instance id provided")
client = boto3.Session(region_name=region).client("ec2")
instance = client.describe_instances(InstanceIds=[instance_id])
if not instance:
raise Exception(
"Unable to launch the instance. \
Did not return any reservations object"
)
return instance["Reservations"][0]["Instances"][0]
@retry(stop_max_attempt_number=16, wait_fixed=60000)
def get_public_ip(instance_id, region=DEFAULT_REGION):
"""
Get Public IP of instance using instance ID
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <str> IP Address of instance with matching instance ID
"""
instance = get_instance_from_id(instance_id, region)
if not instance["PublicIpAddress"]:
raise Exception("IP address not yet available")
return instance["PublicIpAddress"]
@retry(stop_max_attempt_number=16, wait_fixed=60000)
def get_public_ip_from_private_dns(private_dns, region=DEFAULT_REGION):
"""
Get Public IP of instance using private DNS
:param private_dns:
:param region:
:return: <str> IP Address of instance with matching private DNS
"""
client = boto3.Session(region_name=region).client("ec2")
response = client.describe_instances(Filters={"Name": "private-dns-name", "Value": [private_dns]})
return response.get("Reservations")[0].get("Instances")[0].get("PublicIpAddress")
@retry(stop_max_attempt_number=16, wait_fixed=60000)
def get_instance_user(instance_id, region=DEFAULT_REGION):
"""
Get "ubuntu" or "ec2-user" based on AMI used to launch instance
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <str> user name
"""
instance = get_instance_from_id(instance_id, region)
# Modify here if an AMI other than Ubuntu AMI must be used.
user = "ubuntu"
return user
def get_instance_state(instance_id, region=DEFAULT_REGION):
"""
Get state of instance using instance ID
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <str> State of instance with matching instance ID
"""
instance = get_instance_from_id(instance_id, region)
return instance["State"]["Name"]
@retry(stop_max_attempt_number=16, wait_fixed=60000)
def check_instance_state(instance_id, state="running", region=DEFAULT_REGION):
"""
Compares the instance state with the state argument.
Retries 8 times with 120 seconds gap between retries.
:param instance_id: Instance ID to be queried
:param state: Expected instance state
:param region: Region where query will be performed
:return: <str> State of instance with matching instance ID
"""
instance_state = get_instance_state(instance_id, region)
if state != instance_state:
raise Exception(f"Instance {instance_id} not in {state} state")
return instance_state
def get_system_state(instance_id, region=DEFAULT_REGION):
"""
Returns health checks state for instances
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <tuple> System state and Instance state of instance with matching instance ID
"""
if not instance_id:
raise Exception("No instance id provided")
client = boto3.Session(region_name=region).client("ec2")
response = client.describe_instance_status(InstanceIds=[instance_id])
if not response:
raise Exception(
"Unable to launch the instance. \
Did not return any reservations object"
)
instance_status_list = response["InstanceStatuses"]
if not instance_status_list:
raise Exception(
"Unable to launch the instance. \
Did not return any reservations object"
)
if len(instance_status_list) < 1:
raise Exception(
"The instance id seems to be incorrect {}. \
reservations seems to be empty".format(
instance_id
)
)
instance_status = instance_status_list[0]
return (
instance_status["SystemStatus"]["Status"],
instance_status["InstanceStatus"]["Status"],
)
@retry(stop_max_attempt_number=96, wait_fixed=10000)
def check_system_state(instance_id, system_status="ok", instance_status="ok", region=DEFAULT_REGION):
"""
Compares the system state (Health Checks).
Retries 96 times with 10 seconds gap between retries
:param instance_id: Instance ID to be queried
:param system_status: Expected system state
:param instance_status: Expected instance state
:param region: Region where query will be performed
:return: <tuple> System state and Instance state of instance with matching instance ID
"""
instance_state = get_system_state(instance_id, region=region)
if system_status != instance_state[0] or instance_status != instance_state[1]:
raise Exception(
"Instance {} not in \
required state".format(
instance_id
)
)
return instance_state
def terminate_instance(instance_id, region=DEFAULT_REGION):
"""
Terminate EC2 instances with matching instance ID
:param instance_id: Instance ID to be terminated
:param region: Region where instance is located
"""
if not instance_id:
raise Exception("No instance id provided")
client = boto3.Session(region_name=region).client("ec2")
response = client.terminate_instances(InstanceIds=[instance_id])
if not response:
raise Exception("Unable to terminate instance. No response received.")
instances_terminated = response["TerminatingInstances"]
if not instances_terminated:
raise Exception("Failed to terminate instance.")
if instances_terminated[0]["InstanceId"] != instance_id:
raise Exception("Failed to terminate instance. Unknown error.")
def get_instance_type_details(instance_type, region=DEFAULT_REGION):
"""
Get instance type details for a given instance type
:param instance_type: Instance type to be queried
:param region: Region where query will be performed
:return: <dict> Information about instance type
"""
client = boto3.client("ec2", region_name=region)
response = client.describe_instance_types(InstanceTypes=[instance_type])
if not response or not response["InstanceTypes"]:
raise Exception("Unable to get instance details. No response received.")
if response["InstanceTypes"][0]["InstanceType"] != instance_type:
raise Exception(
f"Bad response received. Requested {instance_type} "
f"but got {response['InstanceTypes'][0]['InstanceType']}"
)
return response["InstanceTypes"][0]
def get_instance_details(instance_id, region=DEFAULT_REGION):
"""
Get instance details for instance with given instance ID
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <dict> Information about instance with matching instance ID
"""
if not instance_id:
raise Exception("No instance id provided")
instance = get_instance_from_id(instance_id, region=region)
if not instance:
raise Exception("Could not find instance")
return get_instance_type_details(instance["InstanceType"], region=region)
def get_ec2_fabric_connection(instance_id, instance_pem_file, region):
"""
establish connection with EC2 instance if necessary
:param instance_id: ec2_instance id
:param instance_pem_file: instance key name
:param region: Region where ec2 instance is launched
:return: Fabric connection object
"""
user = get_instance_user(instance_id, region=region)
conn = Connection(
user=user,
host=get_public_ip(instance_id, region),
inline_ssh_env=True,
connect_kwargs={"key_filename": [instance_pem_file]},
)
return conn
def get_ec2_instance_tags(instance_id, region=DEFAULT_REGION, ec2_client=None):
ec2_client = ec2_client or get_ec2_client(region)
response = ec2_client.describe_tags(Filters=[{"Name": "resource-id", "Values": [instance_id]}])
return {tag["Key"]: tag["Value"] for tag in response.get("Tags")}
| 36.417647 | 102 | 0.687046 |
9f2b342e436a1ca71e24af95ee822cf81f350795 | 3,998 | py | Python | src/wagtail_2fa/middleware.py | tcss-tech/wagtail-2fa | 1a3fefbfab1515362a4a934ad5b8885d0674fd9c | [
"MIT"
] | 55 | 2018-09-13T15:59:25.000Z | 2022-03-23T08:09:25.000Z | src/wagtail_2fa/middleware.py | tcss-tech/wagtail-2fa | 1a3fefbfab1515362a4a934ad5b8885d0674fd9c | [
"MIT"
] | 108 | 2018-09-14T11:06:02.000Z | 2022-03-18T04:17:55.000Z | src/wagtail_2fa/middleware.py | tcss-tech/wagtail-2fa | 1a3fefbfab1515362a4a934ad5b8885d0674fd9c | [
"MIT"
] | 27 | 2018-09-13T14:10:26.000Z | 2022-02-21T08:03:41.000Z | from functools import partial
import django_otp
from django.conf import settings
from django.contrib.auth.views import redirect_to_login
from django.urls import resolve, reverse
from django.utils.functional import SimpleLazyObject
from django_otp.middleware import OTPMiddleware as _OTPMiddleware
class VerifyUserMiddleware(_OTPMiddleware):
_allowed_url_names = [
"wagtail_2fa_auth",
"wagtailadmin_login",
"wagtailadmin_logout",
]
# These URLs do not require verification if the user has no devices
_allowed_url_names_no_device = [
"wagtail_2fa_device_list",
"wagtail_2fa_device_new",
"wagtail_2fa_device_qrcode",
]
def __call__(self, request):
if hasattr(self, "process_request"):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, "process_response"):
response = self.process_response(request, response)
return response
def process_request(self, request):
if request.user:
request.user = SimpleLazyObject(
partial(self._verify_user, request, request.user)
)
user = request.user
if self._require_verified_user(request):
user_has_device = django_otp.user_has_device(user, confirmed=True)
if user_has_device and not user.is_verified():
return redirect_to_login(
request.get_full_path(), login_url=reverse("wagtail_2fa_auth")
)
elif not user_has_device and settings.WAGTAIL_2FA_REQUIRED:
# only allow the user to visit the admin index page and the
# admin setup page
return redirect_to_login(
request.get_full_path(), login_url=reverse("wagtail_2fa_device_new")
)
def _require_verified_user(self, request):
user = request.user
if not settings.WAGTAIL_2FA_REQUIRED:
# If two factor authentication is disabled in the settings
return False
if not user.is_authenticated:
return False
# If the user has no access to the admin anyway then don't require a
# verified user here
if not (
user.is_staff
or user.is_superuser
or user.has_perms(["wagtailadmin.access_admin"])
):
return False
# Don't require verification for specified URL names
request_url_name = resolve(request.path_info).url_name
if request_url_name in self._allowed_url_names:
return False
# If the user does not have a device, don't require verification
# for the specified URL names
if request_url_name in self._allowed_url_names_no_device:
user_has_device = django_otp.user_has_device(user, confirmed=True)
if not user_has_device:
return False
# For all other cases require that the user is verified via otp
return True
class VerifyUserPermissionsMiddleware(VerifyUserMiddleware):
"""A variant of VerifyUserMiddleware which makes 2FA optional."""
def process_request(self, request):
result = super().process_request(request)
# Add an attribute to the user so we can easily determine if 2FA should
# be enabled for them.
request.user.enable_2fa = request.user.has_perms(["wagtailadmin.enable_2fa"])
return result
def _require_verified_user(self, request):
result = super()._require_verified_user(request)
# Always require verification if the user has a device, even if they have
# 2FA disabled.
user_has_device = django_otp.user_has_device(request.user, confirmed=True)
if not user_has_device and not request.user.has_perms(
["wagtailadmin.enable_2fa"]
):
return False
return result
| 35.070175 | 88 | 0.656078 |
78c18f7c99d14284f13407720c7d16122436a4f4 | 660 | py | Python | settings/production.py | tsatsujnr139/vbs-management-app-api | dbc6360803cec1d100606b759893da747848a32c | [
"MIT"
] | null | null | null | settings/production.py | tsatsujnr139/vbs-management-app-api | dbc6360803cec1d100606b759893da747848a32c | [
"MIT"
] | null | null | null | settings/production.py | tsatsujnr139/vbs-management-app-api | dbc6360803cec1d100606b759893da747848a32c | [
"MIT"
] | null | null | null | """
Django settings for vbs_registration project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from .base import * # noqa
from decouple import config
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = config('DJANGO_ALLOWED_HOSTS', default=[])
CORS_ORIGIN_ALLOW_ALL = True
| 27.5 | 66 | 0.774242 |
4d48d16f6e693aa3832d630024c64cc7af8668d2 | 4,954 | py | Python | pi/images.py | vmagamedov/pi | 6ee98af69b757d96aa4eddc32513309e0fe05d1d | [
"BSD-3-Clause"
] | 7 | 2016-06-24T04:49:48.000Z | 2020-06-29T17:34:12.000Z | pi/images.py | vmagamedov/pi | 6ee98af69b757d96aa4eddc32513309e0fe05d1d | [
"BSD-3-Clause"
] | 11 | 2016-06-19T13:16:59.000Z | 2019-11-02T13:14:19.000Z | pi/images.py | vmagamedov/pi | 6ee98af69b757d96aa4eddc32513309e0fe05d1d | [
"BSD-3-Clause"
] | null | null | null | import json
import hashlib
from .http import HTTPError
from .types import DockerImage, Image, ActionType
class Hasher:
def visit(self, obj):
return obj.accept(self)
def visit_image(self, obj):
yield obj.repository.encode('utf-8')
for task in obj.tasks:
yield from self.visit(task)
def visit_task(self, obj):
yield obj.run.encode('utf-8')
for value in obj.where.values():
if isinstance(value, ActionType):
yield from self.visit(value)
else:
yield str(value).encode('utf-8')
def visit_download(self, obj):
yield obj.url.encode('utf-8')
def visit_file(self, obj):
with open(obj.path, 'rb') as f:
while True:
chunk = f.read(2**16)
if not chunk:
break
else:
yield chunk
def visit_bundle(self, obj):
yield obj.path.encode('utf-8')
def image_hashes(images_map, images, *, _cache=None):
_cache = _cache or {}
hasher = Hasher()
hashes = []
for image in images:
if image.name in _cache:
hashes.append(_cache[image.name])
continue
if isinstance(image.from_, DockerImage):
parent_hashable = image.from_.name
else:
parent = images_map.get(image.from_)
parent_hashable, = image_hashes(images_map, [parent],
_cache=_cache)
h = hashlib.sha1()
h.update(parent_hashable.encode('utf-8'))
for chunk in hasher.visit(image):
h.update(chunk)
hex_digest = _cache[image.name] = h.hexdigest()
hashes.append(hex_digest)
return hashes
def image_versions(images_map, images):
hashes = image_hashes(images_map, images)
return [h[:12] for h in hashes]
def docker_image(images_map, image):
if isinstance(image, str):
image = images_map.get(image)
version, = image_versions(images_map, [image])
return DockerImage.from_image(image, version)
elif isinstance(image, DockerImage):
return image
else:
raise TypeError(repr(image))
def resolve_deps(deps):
while deps:
resolved = set()
for name, parent_name in deps.items():
if parent_name not in deps:
resolved.add(name)
if not resolved:
raise TypeError('Images hierarchy build error, '
'circular dependency found in these images: {}'
.format(', '.join(sorted(deps.keys()))))
for name in resolved:
yield name, deps[name]
deps = {k: v for k, v in deps.items() if k not in resolved}
def get_images(config):
return [i for i in config if isinstance(i, Image)]
def _process_pull_progress(status, image):
key = status.add_task('=> Pulling image {}'.format(image))
steps = {}
while True:
event = yield
if event.get('status', '').startswith('Pulling from '):
continue
if 'id' in event:
title = ' [{}] '.format(event['id']) + event['status']
if 'progress' in event:
title += ': ' + event['progress']
if event['id'] in steps:
status.update(steps[event['id']], title)
else:
steps[event['id']] = status.add_step(key, title)
def _process_push_progress(status, image):
key = status.add_task('=> Pushing image {}'.format(image))
steps = {}
while True:
event = yield
if 'id' in event:
title = ' [{}] '.format(event['id']) + event['status']
if 'progress' in event:
title += ': ' + event['progress']
if event['id'] in steps:
status.update(steps[event['id']], title)
else:
steps[event['id']] = status.add_step(key, title)
async def pull(docker, docker_image_: DockerImage, *, status):
repository, _, tag = docker_image_.name.partition(':')
params = {'fromImage': repository, 'tag': tag}
try:
gen = _process_pull_progress(status, docker_image_.name)
gen.send(None)
async for chunk in docker.create_image(params=params):
for doc in chunk.decode('utf-8').splitlines():
gen.send(json.loads(doc))
except HTTPError:
return False
else:
return True
async def push(docker, docker_image_, *, status):
name, _, tag = docker_image_.name.partition(':')
params = {'tag': tag}
try:
gen = _process_push_progress(status, docker_image_.name)
gen.send(None)
async for chunk in docker.push(name, params=params):
for doc in chunk.decode('utf-8').splitlines():
gen.send(json.loads(doc))
except HTTPError:
return False
else:
return True
| 30.392638 | 75 | 0.563585 |
60a6c34b24b44a8945dc9839b908760aa260358a | 417 | py | Python | time/calculateRuntime.py | aTechGuide/python | aa4fdfdd1676e3524a14b7e7b694c00672b2980c | [
"MIT"
] | null | null | null | time/calculateRuntime.py | aTechGuide/python | aa4fdfdd1676e3524a14b7e7b694c00672b2980c | [
"MIT"
] | null | null | null | time/calculateRuntime.py | aTechGuide/python | aa4fdfdd1676e3524a14b7e7b694c00672b2980c | [
"MIT"
] | null | null | null | import time
def powers(limit):
return [x**2 for x in range(limit)]
# Decorator Function (HOF)
def measure_runtime(func):
start = time.time()
func()
end = time.time()
print(end - start)
#measure_runtime(lambda : powers(500000))
"""
timeit
- Average accross many iterations
"""
import timeit
print(timeit.timeit("[x**2 for x in range(10)]"))
print(timeit.timeit("list(map(lambda x: x**2, range(10)))")) | 18.954545 | 61 | 0.676259 |
b1224214bb9f63d01f83ffd200abe4e27fd6acdf | 1,098 | py | Python | app/core/tests/test_models.py | ribicnejc/django-recipe-api | 61b123b8b5be455a92c224ba9cf65779730beddb | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | ribicnejc/django-recipe-api | 61b123b8b5be455a92c224ba9cf65779730beddb | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | ribicnejc/django-recipe-api | 61b123b8b5be455a92c224ba9cf65779730beddb | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'test@tiddlylabs.com'
password = '123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'test@TIDDLYLABS.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, "test123")
def test_create_new_superuser(self):
"""Creating new super user test"""
user = get_user_model().objects.create_superuser(
'nejc@tiddlylabs.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 32.294118 | 69 | 0.661202 |
f03ce356e80c9d1af4ac7d6c652df8bcb4e5a69f | 29,876 | py | Python | cinder/tests/unit/test_hitachi_hbsd_snm2_fc.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | 1 | 2019-02-08T05:24:58.000Z | 2019-02-08T05:24:58.000Z | cinder/tests/unit/test_hitachi_hbsd_snm2_fc.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | 1 | 2021-03-21T11:38:29.000Z | 2021-03-21T11:38:29.000Z | cinder/tests/unit/test_hitachi_hbsd_snm2_fc.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | 15 | 2017-01-12T10:35:10.000Z | 2019-04-19T08:22:10.000Z | # Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Self test for Hitachi Block Storage Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi import hbsd_basiclib
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_fc
from cinder.volume.drivers.hitachi import hbsd_snm2
def _exec_hsnm(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_vals.get(args)
def _exec_hsnm_get_lu_ret_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_ret_err.get(args)
def _exec_hsnm_get_lu_vol_type_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_vol_type_err.get(args)
def _exec_hsnm_get_lu_dppool_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_dppool_err.get(args)
def _exec_hsnm_get_lu_size_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_size_err.get(args)
def _exec_hsnm_get_lu_num_port_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_num_port_err.get(args)
class HBSDSNM2FCDriverTest(test.TestCase):
"""Test HBSDSNM2FCDriver."""
audppool_result = " DP RAID \
Current Utilization Current Over Replication\
Available Current Replication Rotational \
\
Stripe \
Needing Preparation\n\
Pool Tier Mode Level Total Capacity Consumed Capacity \
Percent Provisioning Percent Capacity \
Utilization Percent Type Speed Encryption Status \
\
Reconstruction Progress Size Capacity\n\
30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \
1% 24835% 532.0 GB \
1% SAS 10000rpm N/A Normal \
N/A \
256KB 0.0 GB"
aureplicationlocal_result = "Pair Name LUN Pair \
LUN Status Copy Type Group \
Point-in-Time MU Number\n\
0 10 0 Split( 99%) \
ShadowImage ---:Ungrouped N/A\
"
auluref_result = " Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 Normal"
auluref_result1 = " Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 DUMMY"
auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \
Name Port Name Host Group\n\
HBSD-00 10000000C97BCE7A 001:HBSD-01\n\
Assigned WWN\n Name Port Name \
Host Group\n abcdefg 10000000C97BCE7A \
001:HBSD-01"
aufibre1_result = "Port Information\n\
Port Address\n CTL Port\
Node Name Port Name Setting Current\n 0 0 \
50060E801053C2E0 50060E801053C2E0 0000EF 272700"
auhgmap_result = "Mapping Mode = ON\nPort Group \
H-LUN LUN\n 00 001:HBSD-00 0 1000"
hsnm_vals = {
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
('aureplicationlocal',
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
[0, "", ""],
('aureplicationlocal',
'-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
[1, "", ""],
('aureplicationlocal', '-unit None -refer -pvol 1'):
[0, "%s" % aureplicationlocal_result, ""],
('aureplicationlocal', '-unit None -refer -pvol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -refer -svol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
[0, "", ""],
('auluchgsize', '-unit None -lu 1 -size 256g'):
[0, "", ""],
('auludel', '-unit None -lu 1 -f'): [0, 0, ""],
('auludel', '-unit None -lu 3 -f'): [1, 0, ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, 0, ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""],
('auhgmap', '-unit None -add 0 0 1 1 1'): [0, 0, ""],
('auhgwwn', '-unit None -refer'): [0, "%s" % auhgwwn_result, ""],
('aufibre1', '-unit None -refer'): [0, "%s" % aufibre1_result, ""],
('auhgmap', '-unit None -refer'): [0, "%s" % auhgmap_result, ""]}
auluref_ret_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 Normal"
hsnm_get_lu_ret_err = {
('auluref', '-unit None -lu 0'): [1, "%s" % auluref_ret_err, ""],
}
auluref_vol_type_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 DUMMY"
hsnm_get_lu_vol_type_err = {
('auluref', '-unit None -lu 0'):
[0, "%s" % auluref_vol_type_err, ""],
}
auluref_dppool_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 N/A Enable 0 Normal"
hsnm_get_lu_dppool_err = {
('auluref', '-unit None -lu 0'):
[0, "%s" % auluref_dppool_err, ""],
}
auluref_size_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097151 blocks 256KB N/A 0 Enable 0 Normal"
hsnm_get_lu_size_err = {
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_size_err, ""],
}
auluref_num_port_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 1 Normal"
hsnm_get_lu_num_port_err = {
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_num_port_err, ""],
}
# The following information is passed on to tests, when creating a volume
_VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
'provider_location': '1', 'name': 'test',
'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
test_volume = {'name': 'test_volume', 'size': 128,
'id': 'test-volume-0',
'provider_location': '1', 'status': 'available'}
test_volume_error = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '3', 'status': 'available'}
test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
'id': 'test-volume-error',
'provider_location': None, 'status': 'available'}
test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '1', 'status': 'available'}
test_volume_error3 = {'name': 'test_volume3', 'size': 128,
'id': 'test-volume3',
'volume_metadata': [{'key': 'type',
'value': 'V-VOL'}],
'provider_location': '1', 'status': 'available'}
test_volume_error4 = {'name': 'test_volume4', 'size': 128,
'id': 'test-volume2',
'provider_location': '3', 'status': 'available'}
test_snapshot = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
'provider_location': '1', 'status': 'available'}
test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0',
'volume': test_volume_error,
'provider_location': None, 'status': 'available'}
UNIT_NAME = 'HUS110_91122819'
test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME}
test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME}
test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME}
test_existing_no_ldev_ref = {'unit_name': UNIT_NAME}
test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None}
test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'}
test_existing_no_unit_ref = {'ldev': '0'}
def __init__(self, *args, **kwargs):
super(HBSDSNM2FCDriverTest, self).__init__(*args, **kwargs)
def setUp(self):
super(HBSDSNM2FCDriverTest, self).setUp()
self._setup_config()
self._setup_driver()
def _setup_config(self):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.hitachi_pool_id = 30
self.configuration.hitachi_target_ports = "00"
self.configuration.hitachi_debug_level = 0
self.configuration.hitachi_serial_number = "None"
self.configuration.hitachi_unit_name = "None"
self.configuration.hitachi_group_request = False
self.configuration.hitachi_zoning_request = False
self.configuration.config_group = "None"
self.configuration.hitachi_ldev_range = [0, 100]
self.configuration.hitachi_default_copy_method = 'SI'
self.configuration.hitachi_copy_check_interval = 1
self.configuration.hitachi_copy_speed = 3
def _setup_driver(self):
self.driver = hbsd_fc.HBSDFCDriver(
configuration=self.configuration)
context = None
db = None
self.driver.common = hbsd_common.HBSDCommon(
self.configuration, self.driver, context, db)
self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration)
self.driver.common.pair_flock = \
self.driver.common.command.set_pair_flock()
self.driver.common.horcmgr_flock = \
self.driver.common.command.set_horcmgr_flock()
self.driver.do_setup_status.set()
# API test cases
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume(self, arg1, arg2, arg3):
"""test create_volume."""
ret = self.driver.create_volume(self._VOLUME)
vol = self._VOLUME.copy()
vol['provider_location'] = ret['provider_location']
self.assertEqual('1', vol['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_error(self, arg1, arg2, arg3):
"""test create_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_volume,
self.test_volume_error)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats(self, arg1, arg2):
"""test get_volume_stats."""
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats_error(self, arg1, arg2):
"""test get_volume_stats."""
self.configuration.hitachi_pool_id = 29
stats = self.driver.get_volume_stats(True)
self.assertEqual({}, stats)
self.configuration.hitachi_pool_id = 30
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume(self, arg1, arg2):
"""test extend_volume."""
self.driver.extend_volume(self._VOLUME, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume_error(self, arg1, arg2):
"""test extend_volume."""
self.assertRaises(exception.HBSDError, self.driver.extend_volume,
self.test_volume_error3, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume(self, arg1, arg2):
"""test delete_volume."""
self.driver.delete_volume(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume_error(self, arg1, arg2):
"""test delete_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.delete_volume,
self.test_volume_error4)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
ret = self.driver.create_volume(self._VOLUME)
ret = self.driver.create_snapshot(self.test_snapshot)
self.assertEqual('1', ret['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_snapshot,
self.test_snapshot_error2)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot_error(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot_error2)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
vol = self.driver.create_volume_from_snapshot(self._VOLUME,
self.test_snapshot)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
self.assertRaises(exception.HBSDError,
self.driver.create_volume_from_snapshot,
self.test_volume_error2, self.test_snapshot)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
vol = self.driver.create_cloned_volume(self._VOLUME,
self.test_volume)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error1)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
self.assertRaises(exception.HBSDError,
self.driver.create_cloned_volume,
self._VOLUME, self.test_volume_error1)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection(self, arg1, arg2):
"""test initialize connection."""
connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
rc = self.driver.initialize_connection(self._VOLUME, connector)
self.assertEqual('fibre_channel', rc['driver_volume_type'])
self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn'])
self.assertEqual(1, rc['data']['target_lun'])
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection_error(self, arg1, arg2):
"""test initialize connection."""
connector = {'wwpns': 'x', 'ip': '0xc0a80100'}
self.assertRaises(exception.HBSDError,
self.driver.initialize_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection(self, arg1):
"""test terminate connection."""
connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
rc = self.driver.terminate_connection(self._VOLUME, connector)
self.assertEqual('fibre_channel', rc['driver_volume_type'])
self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn'])
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection_error(self, arg1):
"""test terminate connection."""
connector = {'ip': '0xc0a80100'}
self.assertRaises(exception.HBSDError,
self.driver.terminate_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_manage_existing(self, arg1, arg2):
rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref)
self.assertEqual(0, rc['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
size = self.driver.manage_existing_get_size(self._VOLUME,
self.test_existing_ref)
self.assertEqual(1, size)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_ret_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_ret_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_vol_type_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_vol_type_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_dppool_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_dppool_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_size_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_size_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_num_port_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_num_port_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage(self, arg1, arg2):
self.driver.unmanage(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage_busy(self, arg1, arg2):
self.assertRaises(exception.HBSDVolumeIsBusy,
self.driver.unmanage, self.test_volume_error3)
| 49.876461 | 79 | 0.618992 |
b82d069f11c362e5aa50974e88e0653676e86df9 | 390 | py | Python | v2.5.7/toontown/toonfest/DayAndNightGlobals.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 4 | 2019-07-01T15:46:43.000Z | 2021-07-23T16:26:48.000Z | v2.5.7/toontown/toonfest/DayAndNightGlobals.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 1 | 2019-06-29T03:40:05.000Z | 2021-06-13T01:15:16.000Z | v2.5.7/toontown/toonfest/DayAndNightGlobals.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 4 | 2019-07-28T21:18:46.000Z | 2021-02-25T06:37:25.000Z | SUNRISE = 0
MORNING = 1
DAY = 2
SUNSET = 3
NIGHT = 4
TIME_OF_DAY_ZONES = [
7000, 7100]
if config.GetBool('toonfest-short-tod-cycle', True):
DAY_TIME = 20.0
SUNSET_TIME = 10.0
NIGHT_TIME = 20.0
SUNRISE_TIME = 10.0
else:
DAY_TIME = 1200.0
SUNSET_TIME = 600.0
NIGHT_TIME = 1200.0
SUNRISE_TIME = 600.0
LIST_OF_TOD_STAGES = [SUNRISE, MORNING, DAY, SUNSET, NIGHT] | 21.666667 | 59 | 0.666667 |
b1538a6d6a100e912cb21e27fd384d6093ddeab4 | 4,085 | py | Python | src/dataset/prep_splits_BigEarthNet-19.py | kungfuai/d3m-segmentation-research | 5bc44ddd0e8522fb2b369866ad47aa62a24a8f63 | [
"MIT"
] | 1 | 2020-12-07T02:25:53.000Z | 2020-12-07T02:25:53.000Z | src/dataset/prep_splits_BigEarthNet-19.py | kungfuai/d3m-segmentation-research | 5bc44ddd0e8522fb2b369866ad47aa62a24a8f63 | [
"MIT"
] | null | null | null | src/dataset/prep_splits_BigEarthNet-19.py | kungfuai/d3m-segmentation-research | 5bc44ddd0e8522fb2b369866ad47aa62a24a8f63 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This script creates splits with TFRecord files from 1) BigEarthNet
# image patches based on csv files that contain patch names and
# 2) the new class nomenclature (BigEarthNet-19)
#
# prep_splits_BigEarthNet-19.py --help can be used to learn how to use this script.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 16 Jan 2020
# Version: 1.0.1
# Usage: prep_splits_BigEarthNet-19.py [-h] [-r ROOT_FOLDER] [-o OUT_FOLDER] [--update_json]
# [-n PATCH_NAMES [PATCH_NAMES ...]]
import argparse
import os
import csv
import json
from tensorflow_utils import prep_tf_record_files
GDAL_EXISTED = False
RASTERIO_EXISTED = False
UPDATE_JSON = False
with open('data/label_indices.json', 'rb') as f:
label_indices = json.load(f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=
'This script creates TFRecord files for the BigEarthNet train, validation and test splits')
parser.add_argument('-r', '--root_folder', dest = 'root_folder',
help = 'root folder path contains multiple patch folders')
parser.add_argument('-o', '--out_folder', dest = 'out_folder',
help = 'folder path containing resulting TFRecord or LMDB files')
parser.add_argument('--update_json', default = False, action = "store_true", help =
'flag for adding BigEarthNet-19 labels to the json file of each patch')
parser.add_argument('-n', '--splits', dest = 'splits', help =
'csv files each of which contain list of patch names, patches with snow, clouds, and shadows already excluded', nargs = '+')
parser.add_argument('-l', '--library', type=str, dest = 'library', help="Limit search to Sentinel mission", choices=['tensorflow'])
args = parser.parse_args()
# Checks the existence of patch folders and populate the list of patch folder paths
folder_path_list = []
if args.root_folder:
if not os.path.exists(args.root_folder):
print('ERROR: folder', args.root_folder, 'does not exist')
exit()
else:
print('ERROR: folder', args.patch_folder, 'does not exist')
exit()
# Checks the existence of required python packages
try:
import gdal
GDAL_EXISTED = True
print('INFO: GDAL package will be used to read GeoTIFF files')
except ImportError:
try:
import rasterio
RASTERIO_EXISTED = True
print('INFO: rasterio package will be used to read GeoTIFF files')
except ImportError:
print('ERROR: please install either GDAL or rasterio package to read GeoTIFF files')
exit()
try:
import numpy as np
except ImportError:
print('ERROR: please install numpy package')
exit()
if args.splits:
try:
patch_names_list = []
split_names = []
for csv_file in args.splits:
patch_names_list.append([])
split_names.append(os.path.basename(csv_file).split('.')[0])
with open(csv_file, 'r') as fp:
csv_reader = csv.reader(fp, delimiter=',')
for row in csv_reader:
patch_names_list[-1].append(row[0].strip())
except:
print('ERROR: some csv files either do not exist or have been corrupted')
exit()
if args.update_json:
UPDATE_JSON = True
if args.library == 'tensorflow':
try:
import tensorflow as tf
except ImportError:
print('ERROR: please install tensorflow package to create TFRecord files')
exit()
prep_tf_record_files(
args.root_folder,
args.out_folder,
split_names,
patch_names_list,
label_indices,
GDAL_EXISTED,
RASTERIO_EXISTED,
UPDATE_JSON
)
| 36.473214 | 148 | 0.614933 |
eb3fb75f5d7a17606cf53586695260aeb45ce2c8 | 224 | py | Python | tests/data/import_self.py | griggheo/cheesecake | 2b7aa5c980e8becb163cbcb801b202b17f715054 | [
"CNRI-Python-GPL-Compatible"
] | 12 | 2015-01-15T01:13:42.000Z | 2022-03-04T21:14:27.000Z | tests/data/import_self.py | griggheo/cheesecake | 2b7aa5c980e8becb163cbcb801b202b17f715054 | [
"CNRI-Python-GPL-Compatible"
] | 6 | 2015-01-19T19:46:51.000Z | 2019-02-05T20:20:26.000Z | tests/data/import_self.py | griggheo/cheesecake | 2b7aa5c980e8becb163cbcb801b202b17f715054 | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2015-10-24T20:08:09.000Z | 2019-02-04T20:53:21.000Z | """This module will get W0406 warning for importing self,
which should be ignored in Cheesecake score.
"""
__revision__ = 'satisfy pylint checker'
import import_self
print(import_self.__revision__) # use imported module
| 22.4 | 57 | 0.785714 |
88d549d84bec79feba1cc2405cd61a83b45953ec | 5,532 | py | Python | tests/runner/test_runner.py | admariner/datapane | c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f | [
"Apache-2.0"
] | null | null | null | tests/runner/test_runner.py | admariner/datapane | c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f | [
"Apache-2.0"
] | 7 | 2021-11-19T21:40:47.000Z | 2021-12-24T16:10:06.000Z | tests/runner/test_runner.py | admariner/datapane | c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f | [
"Apache-2.0"
] | null | null | null | # flake8: noqa isort:skip
import os
import sys
from pathlib import Path
from unittest import mock
import subprocess
import pytest
if not (sys.platform == "linux" and sys.version_info.minor >= 7):
pytest.skip("skipping linux-only 3.7+ tests", allow_module_level=True)
import datapane as dp
from datapane.client.api.runtime import _report
from datapane.client.apps import build_bundle, DatapaneCfg
from datapane.common.config import RunnerConfig
from datapane.common import SDict, SSDict
from datapane.runner import __main__ as m
from datapane.runner.exec_script import exec_mod
from datapane.runner.typedefs import RunResult
# disabled for now - may re-enable when we support local
# running/rendering with our runner calling into user code
pytestmark = pytest.mark.skipif(
not (sys.platform == "linux" and sys.version_info.minor >= 7),
reason="Only supported on Linux/py3.7+",
)
def test_make_env():
res = m.make_env(os.environ)
assert "PATH" in res
assert "PWD" not in res
def test_exec(datadir: Path, monkeypatch, capsys):
"""Test running an isolated code snippet"""
monkeypatch.chdir(datadir)
res = exec_mod(Path("sample_module.py"), init_state={"x": 4})
# print(res)
assert "x" in res
assert res["x"] == 4
assert res["y"] == 4
assert res["foo"]() == "my_df"
# run_path makes res static due to sandboxing
# module is no longer "alive", it's a script that's finished executing
assert res["y"] != 5
assert res["__name__"] == "__datapane__"
(out, err) = capsys.readouterr()
assert out == "x is 4\nin foo\nmy_df\n5\n"
class MockApp(dp.App):
"""Use custom mock class to disable constructor but keep other methods"""
script = ""
id = "a"
requirements = ["pytil"]
pre_commands = ["echo PRE1", "echo PRE2"]
post_commands = ["echo POST1", "echo POST2"]
api_version = dp.__version__
def __init__(self, *a, **kw):
pass
@classmethod
def get(cls, *a, **kw):
return cls()
@classmethod
def by_id(cls, id_or_url: str):
return cls()
def mock_report_upload(self, **kwargs):
"""Mock creating a report object"""
report = mock.Mock()
report.id = "ABC"
_report.append(report)
return mock.DEFAULT
@mock.patch("datapane.client.api.App", new=MockApp)
def _runner(params: SDict, env: SSDict, script: Path, sdist: Path = Path(".")) -> RunResult:
with mock.patch.object(MockApp, "script", new_callable=mock.PropertyMock) as ep, mock.patch.object(
MockApp, "download_pkg"
) as dp:
# setup app object
ep.return_value = script
dp.return_value = sdist
# main fn
x = RunnerConfig(app_id="ZBAmDk1", config=params, env=env)
res = m.run_api(x)
return res
# TODO - fix exception handling stacktraces
@mock.patch("datapane.runner.exec_script.setup_script", autospec=True)
@mock.patch("datapane.client.api.Report.upload", autospec=True, side_effect=mock_report_upload)
def test_run_single_app(rc, isc, datadir: Path, monkeypatch, capfd):
"""Test running an isolated code snippet with params
NOTE - we can simplify by calling exec_script.run directly, doesn't test as much of API however
"""
monkeypatch.chdir(datadir)
# monkeypatch.setenv("DATAPANE_ON_DATAPANE", "true")
monkeypatch.setenv("DATAPANE_BY_DATAPANE", "true")
monkeypatch.setenv("ENV_VAR", "env value")
@mock.patch("datapane.runner.exec_script.script_env", autospec=True)
def f(val: str, app_env):
# test twice to ensure stateful params are handled correctly
res = _runner({"p1": val}, {"ENV_VAR": "env_value"}, Path("dp_app.py"))
# (out, err) = capsys.readouterr()
(out, err) = capfd.readouterr()
assert "on datapane" not in out
assert "by datapane" in out
# asserts
isc.assert_called()
(rc_args, rc_kwargs) = rc.call_args
assert rc_kwargs["description"] == "Description"
_r: dp.Report = rc_args[0]
_blocks = _r.pages[0].blocks
assert isinstance(_blocks, list)
assert len(_blocks) == 3
assert val in _blocks[0].content
assert res.report_id == "ABC"
# pre/post commands
assert "PRE2" in out
assert "POST2" in out
f("HELLO")
f("WORLD")
@mock.patch("datapane.client.api.Report.upload", autospec=True, side_effect=mock_report_upload)
def test_run_bundle(rc, datadir: Path, monkeypatch, capsys):
monkeypatch.chdir(datadir)
# monkeypatch.setenv("DATAPANE_ON_DATAPANE", "true")
monkeypatch.setenv("DATAPANE_BY_DATAPANE", "true")
# TODO - we should prob use a pre-built sdist here...
dp_config = DatapaneCfg.create_initial(config_file=Path("dp_test_mod.yaml"))
with build_bundle(dp_config) as sdist:
# whl_file = build_bundle(dp_config, sdist, shared_datadir, username="test", version=1)
try:
# NOTE - need to pass in all params as we're not setting defaults via dp-server
res = _runner(
{"p1": "VAL", "p2": "xyz", "p3": True}, {"ENV_VAR": "env_value"}, dp_config.script, sdist=sdist
)
finally:
subprocess.run([sys.executable, "-m", "pip", "uninstall", "--yes", "pytil"], check=True)
# asserts
(out, err) = capsys.readouterr()
assert "ran app" in out
assert "p2=xyz" in out
assert "ENV_VAR=env_value" in out
assert "WORLD" in out
assert dp.Result.get() == "hello , world!"
assert res.report_id is None
| 34.148148 | 111 | 0.662509 |
59e71317b34e7ead45f7a0a0b792ff1c02c724bf | 32,413 | py | Python | modules/meta.py | mikenobbs/Plex-Meta-Manager | 6788fa8ae4d3fe8ed97ca4d7751397226d02a90a | [
"MIT"
] | null | null | null | modules/meta.py | mikenobbs/Plex-Meta-Manager | 6788fa8ae4d3fe8ed97ca4d7751397226d02a90a | [
"MIT"
] | 25 | 2021-11-01T08:23:01.000Z | 2022-02-22T08:22:25.000Z | modules/meta.py | mikenobbs/Plex-Meta-Manager | 6788fa8ae4d3fe8ed97ca4d7751397226d02a90a | [
"MIT"
] | null | null | null | import logging, os, re
from datetime import datetime
from modules import plex, util
from modules.util import Failed, ImageData
from plexapi.exceptions import NotFound
from ruamel import yaml
logger = logging.getLogger("Plex Meta Manager")
github_base = "https://raw.githubusercontent.com/meisnate12/Plex-Meta-Manager-Configs/master/"
def get_dict(attribute, attr_data, check_list=None):
if check_list is None:
check_list = []
if attr_data and attribute in attr_data:
if attr_data[attribute]:
if isinstance(attr_data[attribute], dict):
new_dict = {}
for _name, _data in attr_data[attribute].items():
if _name in check_list:
logger.error(
f"Config Warning: Skipping duplicate {attribute[:-1] if attribute[-1] == 's' else attribute}: {_name}")
elif _data is None:
logger.error(
f"Config Warning: {attribute[:-1] if attribute[-1] == 's' else attribute}: {_name} has no data")
elif not isinstance(_data, dict):
logger.error(
f"Config Warning: {attribute[:-1] if attribute[-1] == 's' else attribute}: {_name} must be a dictionary")
else:
new_dict[str(_name)] = _data
return new_dict
else:
logger.warning(f"Config Warning: {attribute} must be a dictionary")
else:
logger.warning(f"Config Warning: {attribute} attribute is blank")
return None
class DataFile:
def __init__(self, config, file_type, path):
self.config = config
self.type = file_type
self.path = path
self.data_type = ""
self.templates = {}
def load_file(self):
try:
if self.type in ["URL", "Git"]:
content_path = self.path if self.type == "URL" else f"{github_base}{self.path}.yml"
response = self.config.get(content_path)
if response.status_code >= 400:
raise Failed(f"URL Error: No file found at {content_path}")
content = response.content
elif os.path.exists(os.path.abspath(self.path)):
content = open(self.path, encoding="utf-8")
else:
raise Failed(f"File Error: File does not exist {os.path.abspath(self.path)}")
data, _, _ = yaml.util.load_yaml_guess_indent(content)
return data
except yaml.scanner.ScannerError as ye:
raise Failed(f"YAML Error: {util.tab_new_lines(ye)}")
except Exception as e:
util.print_stacktrace()
raise Failed(f"YAML Error: {e}")
def apply_template(self, name, data, template):
if not self.templates:
raise Failed(f"{self.data_type} Error: No templates found")
elif not template:
raise Failed(f"{self.data_type} Error: template attribute is blank")
else:
logger.debug(f"Value: {template}")
for variables in util.get_list(template, split=False):
if not isinstance(variables, dict):
raise Failed(f"{self.data_type} Error: template attribute is not a dictionary")
elif "name" not in variables:
raise Failed(f"{self.data_type} Error: template sub-attribute name is required")
elif not variables["name"]:
raise Failed(f"{self.data_type} Error: template sub-attribute name is blank")
elif variables["name"] not in self.templates:
raise Failed(f"{self.data_type} Error: template {variables['name']} not found")
elif not isinstance(self.templates[variables["name"]], dict):
raise Failed(f"{self.data_type} Error: template {variables['name']} is not a dictionary")
else:
for tm in variables:
if not variables[tm]:
raise Failed(f"{self.data_type} Error: template sub-attribute {tm} is blank")
if self.data_type == "Collection" and "collection_name" not in variables:
variables["collection_name"] = str(name)
if self.data_type == "Playlist" and "playlist_name" not in variables:
variables["playlist_name"] = str(name)
template_name = variables["name"]
template = self.templates[template_name]
default = {}
if "default" in template:
if template["default"]:
if isinstance(template["default"], dict):
for dv in template["default"]:
if template["default"][dv]:
default[dv] = template["default"][dv]
else:
raise Failed(f"{self.data_type} Error: template default sub-attribute {dv} is blank")
else:
raise Failed(f"{self.data_type} Error: template sub-attribute default is not a dictionary")
else:
raise Failed(f"{self.data_type} Error: template sub-attribute default is blank")
optional = []
if "optional" in template:
if template["optional"]:
for op in util.get_list(template["optional"]):
if op not in default:
optional.append(str(op))
else:
logger.warning(f"Template Warning: variable {op} cannot be optional if it has a default")
else:
raise Failed(f"{self.data_type} Error: template sub-attribute optional is blank")
if "move_prefix" in template or "move_collection_prefix" in template:
prefix = None
if "move_prefix" in template:
prefix = template["move_prefix"]
elif "move_collection_prefix" in template:
logger.warning(f"{self.data_type} Error: template sub-attribute move_collection_prefix will run as move_prefix")
prefix = template["move_collection_prefix"]
if prefix:
for op in util.get_list(prefix):
variables["collection_name"] = variables["collection_name"].replace(f"{str(op).strip()} ", "") + f", {str(op).strip()}"
else:
raise Failed(f"{self.data_type} Error: template sub-attribute move_prefix is blank")
def check_data(_method, _data):
if isinstance(_data, dict):
final_data = {}
for sm, sd in _data.items():
try:
final_data[sm] = check_data(_method, sd)
except Failed:
continue
elif isinstance(_data, list):
final_data = []
for li in _data:
try:
final_data.append(check_data(_method, li))
except Failed:
continue
else:
txt = str(_data)
def scan_text(og_txt, var, var_value):
if og_txt == f"<<{var}>>":
return str(var_value)
elif f"<<{var}>>" in str(og_txt):
return str(og_txt).replace(f"<<{var}>>", str(var_value))
else:
return og_txt
for option in optional:
if option not in variables and f"<<{option}>>" in txt:
raise Failed
for variable, variable_data in variables.items():
if (variable == "collection_name" or variable == "playlist_name") and _method in ["radarr_tag", "item_radarr_tag", "sonarr_tag", "item_sonarr_tag"]:
txt = scan_text(txt, variable, variable_data.replace(",", ""))
elif variable != "name":
txt = scan_text(txt, variable, variable_data)
for dm, dd in default.items():
txt = scan_text(txt, dm, dd)
if txt in ["true", "True"]:
final_data = True
elif txt in ["false", "False"]:
final_data = False
else:
try:
num_data = float(txt)
final_data = int(num_data) if num_data.is_integer() else num_data
except (ValueError, TypeError):
final_data = txt
return final_data
new_attributes = {}
for method_name, attr_data in template.items():
if method_name not in data and method_name not in ["default", "optional", "move_collection_prefix", "move_prefix"]:
if attr_data is None:
logger.error(f"Template Error: template attribute {method_name} is blank")
continue
try:
new_attributes[method_name] = check_data(method_name, attr_data)
except Failed:
continue
return new_attributes
class MetadataFile(DataFile):
def __init__(self, config, library, file_type, path):
super().__init__(config, file_type, path)
self.data_type = "Collection"
self.library = library
if file_type == "Data":
self.metadata = None
self.collections = get_dict("collections", path, library.collections)
self.templates = get_dict("templates", path)
else:
logger.info("")
logger.info(f"Loading Metadata {file_type}: {path}")
data = self.load_file()
self.metadata = get_dict("metadata", data, library.metadatas)
self.templates = get_dict("templates", data)
self.collections = get_dict("collections", data, library.collections)
if self.metadata is None and self.collections is None:
raise Failed("YAML Error: metadata or collections attribute is required")
logger.info(f"Metadata File Loaded Successfully")
def get_collections(self, requested_collections):
if requested_collections:
return {c: self.collections[c] for c in util.get_list(requested_collections) if c in self.collections}
else:
return self.collections
def update_metadata(self):
if not self.metadata:
return None
logger.info("")
util.separator("Running Metadata")
logger.info("")
for mapping_name, meta in self.metadata.items():
methods = {mm.lower(): mm for mm in meta}
if self.config.test_mode and ("test" not in methods or meta[methods["test"]] is not True):
continue
updated = False
edits = {}
advance_edits = {}
def add_edit(name, current_item, group, alias, key=None, value=None, var_type="str"):
if value or name in alias:
if value or group[alias[name]]:
if key is None: key = name
if value is None: value = group[alias[name]]
try:
current = str(getattr(current_item, key, ""))
if var_type == "date":
final_value = util.validate_date(value, name, return_as="%Y-%m-%d")
current = current[:-9]
elif var_type == "float":
if value is None:
raise Failed(f"Metadata Error: {name} attribute is blank")
final_value = None
try:
value = float(str(value))
if 0 <= value <= 10:
final_value = value
except ValueError:
pass
if final_value is None:
raise Failed(f"Metadata Error: {name} attribute must be a number between 0 and 10")
else:
final_value = value
if current != str(final_value):
edits[f"{key}.value"] = final_value
edits[f"{key}.locked"] = 1
logger.info(f"Detail: {name} updated to {final_value}")
except Failed as ee:
logger.error(ee)
else:
logger.error(f"Metadata Error: {name} attribute is blank")
def add_advanced_edit(attr, obj, group, alias, show_library=False, new_agent=False):
key, options = plex.item_advance_keys[f"item_{attr}"]
if attr in alias:
if new_agent and self.library.agent not in plex.new_plex_agents:
logger.error(f"Metadata Error: {attr} attribute only works for with the New Plex Movie Agent and New Plex TV Agent")
elif show_library and not self.library.is_show:
logger.error(f"Metadata Error: {attr} attribute only works for show libraries")
elif group[alias[attr]]:
method_data = str(group[alias[attr]]).lower()
if method_data not in options:
logger.error(f"Metadata Error: {group[alias[attr]]} {attr} attribute invalid")
elif getattr(obj, key) != options[method_data]:
advance_edits[key] = options[method_data]
logger.info(f"Detail: {attr} updated to {method_data}")
else:
logger.error(f"Metadata Error: {attr} attribute is blank")
def edit_tags(attr, obj, group, alias, extra=None, movie_library=False):
if movie_library and not self.library.is_movie and (attr in alias or f"{attr}.sync" in alias or f"{attr}.remove" in alias):
logger.error(f"Metadata Error: {attr} attribute only works for movie libraries")
elif attr in alias and f"{attr}.sync" in alias:
logger.error(f"Metadata Error: Cannot use {attr} and {attr}.sync together")
elif f"{attr}.remove" in alias and f"{attr}.sync" in alias:
logger.error(f"Metadata Error: Cannot use {attr}.remove and {attr}.sync together")
elif attr in alias and group[alias[attr]] is None:
logger.error(f"Metadata Error: {attr} attribute is blank")
elif f"{attr}.remove" in alias and group[alias[f"{attr}.remove"]] is None:
logger.error(f"Metadata Error: {attr}.remove attribute is blank")
elif f"{attr}.sync" in alias and group[alias[f"{attr}.sync"]] is None:
logger.error(f"Metadata Error: {attr}.sync attribute is blank")
elif attr in alias or f"{attr}.remove" in alias or f"{attr}.sync" in alias:
add_tags = util.get_list(group[alias[attr]]) if attr in alias else []
if extra:
add_tags.extend(extra)
remove_tags = util.get_list(group[alias[f"{attr}.remove"]]) if f"{attr}.remove" in alias else None
sync_tags = util.get_list(group[alias[f"{attr}.sync"]] if group[alias[f"{attr}.sync"]] else []) if f"{attr}.sync" in alias else None
return self.library.edit_tags(attr, obj, add_tags=add_tags, remove_tags=remove_tags, sync_tags=sync_tags)
return False
def set_image(attr, group, alias, is_poster=True, is_url=True):
if group[alias[attr]]:
return ImageData(attr, group[alias[attr]], is_poster=is_poster, is_url=is_url)
else:
logger.error(f"Metadata Error: {attr} attribute is blank")
def set_images(obj, group, alias):
poster = None
background = None
if "url_poster" in alias:
poster = set_image("url_poster", group, alias)
elif "file_poster" in alias:
poster = set_image("file_poster", group, alias, is_url=False)
if "url_background" in alias:
background = set_image("url_background", group, alias, is_poster=False)
elif "file_background" in alias:
background = set_image("file_background", group, alias, is_poster=False, is_url=False)
if poster or background:
self.library.upload_images(obj, poster=poster, background=background)
logger.info("")
util.separator()
logger.info("")
year = None
if "year" in methods:
next_year = datetime.now().year + 1
if meta[methods["year"]] is None:
raise Failed("Metadata Error: year attribute is blank")
try:
year_value = int(str(meta[methods["year"]]))
if 1800 <= year_value <= next_year:
year = year_value
except ValueError:
pass
if year is None:
raise Failed(f"Metadata Error: year attribute must be an integer between 1800 and {next_year}")
title = mapping_name
if "title" in methods:
if meta[methods["title"]] is None:
logger.error("Metadata Error: title attribute is blank")
else:
title = meta[methods["title"]]
item = self.library.search_item(title, year=year)
if item is None:
item = self.library.search_item(f"{title} (SUB)", year=year)
if item is None and "alt_title" in methods:
if meta[methods["alt_title"]] is None:
logger.error("Metadata Error: alt_title attribute is blank")
else:
alt_title = meta["alt_title"]
item = self.library.search_item(alt_title, year=year)
if item is None:
logger.error(f"Plex Error: Item {mapping_name} not found")
logger.error(f"Skipping {mapping_name}")
continue
item_type = "Movie" if self.library.is_movie else "Show"
logger.info(f"Updating {item_type}: {title}...")
tmdb_item = None
tmdb_is_movie = None
if ("tmdb_show" in methods or "tmdb_id" in methods) and "tmdb_movie" in methods:
logger.error("Metadata Error: Cannot use tmdb_movie and tmdb_show when editing the same metadata item")
if "tmdb_show" in methods or "tmdb_id" in methods or "tmdb_movie" in methods:
try:
if "tmdb_show" in methods or "tmdb_id" in methods:
data = meta[methods["tmdb_show" if "tmdb_show" in methods else "tmdb_id"]]
if data is None:
logger.error("Metadata Error: tmdb_show attribute is blank")
else:
tmdb_is_movie = False
tmdb_item = self.config.TMDb.get_show(util.regex_first_int(data, "Show"))
elif "tmdb_movie" in methods:
if meta[methods["tmdb_movie"]] is None:
logger.error("Metadata Error: tmdb_movie attribute is blank")
else:
tmdb_is_movie = True
tmdb_item = self.config.TMDb.get_movie(util.regex_first_int(meta[methods["tmdb_movie"]], "Movie"))
except Failed as e:
logger.error(e)
originally_available = None
original_title = None
rating = None
studio = None
tagline = None
summary = None
genres = []
if tmdb_item:
originally_available = tmdb_item.release_date if tmdb_is_movie else tmdb_item.first_air_date
if tmdb_item and tmdb_is_movie is True and tmdb_item.original_title != tmdb_item.title:
original_title = tmdb_item.original_title
elif tmdb_item and tmdb_is_movie is False and tmdb_item.original_name != tmdb_item.name:
original_title = tmdb_item.original_name
rating = tmdb_item.vote_average
if tmdb_is_movie is True and tmdb_item.production_companies:
studio = tmdb_item.production_companies[0].name
elif tmdb_is_movie is False and tmdb_item.networks:
studio = tmdb_item.networks[0].name
tagline = tmdb_item.tagline if len(tmdb_item.tagline) > 0 else None
summary = tmdb_item.overview
genres = [genre.name for genre in tmdb_item.genres]
edits = {}
add_edit("title", item, meta, methods, value=title)
add_edit("sort_title", item, meta, methods, key="titleSort")
add_edit("originally_available", item, meta, methods, key="originallyAvailableAt", value=originally_available, var_type="date")
add_edit("critic_rating", item, meta, methods, value=rating, key="rating", var_type="float")
add_edit("audience_rating", item, meta, methods, key="audienceRating", var_type="float")
add_edit("user_rating", item, meta, methods, key="userRating", var_type="float")
add_edit("content_rating", item, meta, methods, key="contentRating")
add_edit("original_title", item, meta, methods, key="originalTitle", value=original_title)
add_edit("studio", item, meta, methods, value=studio)
add_edit("tagline", item, meta, methods, value=tagline)
add_edit("summary", item, meta, methods, value=summary)
if self.library.edit_item(item, mapping_name, item_type, edits):
updated = True
advance_edits = {}
for advance_edit in ["episode_sorting", "keep_episodes", "delete_episodes", "season_display", "episode_ordering", "metadata_language", "use_original_title"]:
is_show = advance_edit in ["episode_sorting", "keep_episodes", "delete_episodes", "season_display", "episode_ordering"]
is_new_agent = advance_edit in ["metadata_language", "use_original_title"]
add_advanced_edit(advance_edit, item, meta, methods, show_library=is_show, new_agent=is_new_agent)
if self.library.edit_item(item, mapping_name, item_type, advance_edits, advanced=True):
updated = True
for tag_edit in ["genre", "label", "collection", "country", "director", "producer", "writer"]:
is_movie = tag_edit in ["country", "director", "producer", "writer"]
has_extra = genres if tag_edit == "genre" else None
if edit_tags(tag_edit, item, meta, methods, movie_library=is_movie, extra=has_extra):
updated = True
logger.info(f"{item_type}: {mapping_name} Details Update {'Complete' if updated else 'Not Needed'}")
set_images(item, meta, methods)
if "seasons" in methods and self.library.is_show:
if meta[methods["seasons"]]:
for season_id in meta[methods["seasons"]]:
updated = False
logger.info("")
logger.info(f"Updating season {season_id} of {mapping_name}...")
if isinstance(season_id, int):
season = None
for s in item.seasons():
if s.index == season_id:
season = s
break
if season is None:
logger.error(f"Metadata Error: Season: {season_id} not found")
else:
season_dict = meta[methods["seasons"]][season_id]
season_methods = {sm.lower(): sm for sm in season_dict}
if "title" in season_methods and season_dict[season_methods["title"]]:
title = season_dict[season_methods["title"]]
else:
title = season.title
if "sub" in season_methods:
if season_dict[season_methods["sub"]] is None:
logger.error("Metadata Error: sub attribute is blank")
elif season_dict[season_methods["sub"]] is True and "(SUB)" not in title:
title = f"{title} (SUB)"
elif season_dict[season_methods["sub"]] is False and title.endswith(" (SUB)"):
title = title[:-6]
else:
logger.error("Metadata Error: sub attribute must be True or False")
edits = {}
add_edit("title", season, season_dict, season_methods, value=title)
add_edit("summary", season, season_dict, season_methods)
if self.library.edit_item(season, season_id, "Season", edits):
updated = True
set_images(season, season_dict, season_methods)
else:
logger.error(f"Metadata Error: Season: {season_id} invalid, it must be an integer")
logger.info(f"Season {season_id} of {mapping_name} Details Update {'Complete' if updated else 'Not Needed'}")
else:
logger.error("Metadata Error: seasons attribute is blank")
elif "seasons" in methods:
logger.error("Metadata Error: seasons attribute only works for show libraries")
if "episodes" in methods and self.library.is_show:
if meta[methods["episodes"]]:
for episode_str in meta[methods["episodes"]]:
updated = False
logger.info("")
match = re.search("[Ss]\\d+[Ee]\\d+", episode_str)
if match:
output = match.group(0)[1:].split("E" if "E" in match.group(0) else "e")
season_id = int(output[0])
episode_id = int(output[1])
logger.info(f"Updating episode S{season_id}E{episode_id} of {mapping_name}...")
try:
episode = item.episode(season=season_id, episode=episode_id)
except NotFound:
logger.error(f"Metadata Error: episode {episode_id} of season {season_id} not found")
else:
episode_dict = meta[methods["episodes"]][episode_str]
episode_methods = {em.lower(): em for em in episode_dict}
if "title" in episode_methods and episode_dict[episode_methods["title"]]:
title = episode_dict[episode_methods["title"]]
else:
title = episode.title
if "sub" in episode_dict:
if episode_dict[episode_methods["sub"]] is None:
logger.error("Metadata Error: sub attribute is blank")
elif episode_dict[episode_methods["sub"]] is True and "(SUB)" not in title:
title = f"{title} (SUB)"
elif episode_dict[episode_methods["sub"]] is False and title.endswith(" (SUB)"):
title = title[:-6]
else:
logger.error("Metadata Error: sub attribute must be True or False")
edits = {}
add_edit("title", episode, episode_dict, episode_methods, value=title)
add_edit("sort_title", episode, episode_dict, episode_methods, key="titleSort")
add_edit("rating", episode, episode_dict, episode_methods, var_type="float")
add_edit("originally_available", episode, episode_dict, episode_methods, key="originallyAvailableAt", var_type="date")
add_edit("summary", episode, episode_dict, episode_methods)
if self.library.edit_item(episode, f"{season_id} Episode: {episode_id}", "Season", edits):
updated = True
if edit_tags("director", episode, episode_dict, episode_methods):
updated = True
if edit_tags("writer", episode, episode_dict, episode_methods):
updated = True
set_images(episode, episode_dict, episode_methods)
logger.info(f"Episode S{season_id}E{episode_id} of {mapping_name} Details Update {'Complete' if updated else 'Not Needed'}")
else:
logger.error(f"Metadata Error: episode {episode_str} invalid must have S##E## format")
else:
logger.error("Metadata Error: episodes attribute is blank")
elif "episodes" in methods:
logger.error("Metadata Error: episodes attribute only works for show libraries")
class PlaylistFile(DataFile):
def __init__(self, config, file_type, path):
super().__init__(config, file_type, path)
self.data_type = "Playlist"
self.playlists = {}
logger.info("")
logger.info(f"Loading Playlist File {file_type}: {path}")
data = self.load_file()
self.playlists = get_dict("playlists", data, self.config.playlist_names)
self.templates = get_dict("templates", data)
if not self.playlists:
raise Failed("YAML Error: playlists attribute is required")
logger.info(f"Playlist File Loaded Successfully")
| 57.165785 | 180 | 0.505229 |
be2239430d0d812035f5a1f58a5f6d6ac4faeae0 | 2,253 | py | Python | docs/conf.py | tamaswells/pysimm | 2586679a9eacdf1046baa2312c8f92c9247ac5be | [
"MIT"
] | 55 | 2017-01-14T09:11:54.000Z | 2022-01-28T17:17:34.000Z | docs/conf.py | tamaswells/pysimm | 2586679a9eacdf1046baa2312c8f92c9247ac5be | [
"MIT"
] | 13 | 2017-03-27T19:32:57.000Z | 2022-01-04T17:49:50.000Z | docs/conf.py | tamaswells/pysimm | 2586679a9eacdf1046baa2312c8f92c9247ac5be | [
"MIT"
] | 28 | 2016-08-29T12:30:58.000Z | 2021-12-04T07:24:07.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'pysimm'
copyright = '2019, Mike Fortunato'
author = 'Mike Fortunato'
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'autoapi.extension',
'sphinx_rtd_theme'
]
autoapi_dirs = ['../pysimm']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Explicitly specify the name of the index file to avoid readTheDocs miss-interpretation
master_doc = 'index'
| 33.132353 | 88 | 0.669774 |
880c3c883300df3ccab841510028d60597441875 | 2,233 | py | Python | harvey/containers.py | gurneesh/harvey | 393308bfc2a833ddbbfe7aca4ddf157a7593aa73 | [
"MIT"
] | null | null | null | harvey/containers.py | gurneesh/harvey | 393308bfc2a833ddbbfe7aca4ddf157a7593aa73 | [
"MIT"
] | null | null | null | harvey/containers.py | gurneesh/harvey | 393308bfc2a833ddbbfe7aca4ddf157a7593aa73 | [
"MIT"
] | null | null | null | import requests
from harvey.globals import Global
class Container:
@staticmethod
def create_container(container_id):
"""Create a Docker container. Requires an image tag and container name."""
response = requests.post(
f'{Global.BASE_URL}containers/create',
params={'name': container_id},
json={'Image': container_id},
headers=Global.JSON_HEADERS,
)
return response
@staticmethod
def start_container(container_id):
"""Start a Docker container."""
response = requests.post(f'{Global.BASE_URL}containers/{container_id}/start')
return response
@staticmethod
def stop_container(container_id):
"""Stop a Docker container."""
response = requests.post(f'{Global.BASE_URL}containers/{container_id}/stop')
return response
@staticmethod
def inspect_container(container_id):
"""Inspect the details of a Docker container."""
response = requests.get(f'{Global.BASE_URL}containers/{container_id}/json')
return response
@staticmethod
def list_containers():
"""List all Docker containers."""
response = requests.get(f'{Global.BASE_URL}containers/json')
return response
@staticmethod
def inspect_container_logs(container_id):
"""Retrieve logs (and write to file) of a Docker container."""
params = {
'stdout': True,
'stderr': True,
}
response = requests.get(
f'{Global.BASE_URL}containers/{container_id}/logs',
params=params,
)
return response.content.decode()
@staticmethod
def wait_container(container_id):
"""Wait for a Docker container to exit."""
response = requests.post(f'{Global.BASE_URL}containers/{container_id}/wait')
return response
@staticmethod
def remove_container(container_id):
"""Remove (delete) a Docker container."""
response = requests.delete(
f'{Global.BASE_URL}containers/{container_id}',
json={
'force': True,
},
headers=Global.JSON_HEADERS,
)
return response
| 30.589041 | 85 | 0.616659 |
d42ae6e7ed7a365702a95c69d7a29d7c3718f7f1 | 3,019 | py | Python | python/nlusvc/coords/dmo/character_level_tokenizer.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/nlusvc/coords/dmo/character_level_tokenizer.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/nlusvc/coords/dmo/character_level_tokenizer.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pprint
from base import BaseObject
class CharacterLevelTokenizer(BaseObject):
""" Perform Character Level Tokenization """
def __init__(self,
input_text: str,
is_debug: bool = False):
"""
Created:
14-Jan-2020
craig.trim@ibm.com
* the GitHub documentation is extremely detailed:
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1732
:param input_text:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._input_text = input_text
def _mask_input(self) -> list:
def _is_punctuation(some_char: str) -> bool:
if some_char == '-':
return True
if some_char.isdigit():
return False
if some_char.isnumeric():
return False
if some_char.isalpha():
return False
return True
return [_is_punctuation(x) for x in self._input_text]
@staticmethod
def _apply_mask(mask: list) -> list:
buffer = []
master = []
flag_F = False
flag_T = False
for i in range(0, len(mask)):
if not mask[i]:
if flag_F and flag_T:
master.append(buffer)
buffer = []
flag_T = False
flag_F = True
buffer.append(mask[i])
else:
flag_T = True
buffer.append(mask[i])
if len(buffer) > 0:
master.append(buffer)
return master
def _tokenize(self,
master: list) -> dict:
ctr = 0
d_tokens = {}
original_buffer = []
token_buffer = []
for i in range(0, len(master)):
buffer = master[i]
for j in range(0, len(buffer)):
mask = buffer[j]
if not mask:
token_buffer.append(self._input_text[ctr])
original_buffer.append(self._input_text[ctr])
ctr += 1
d_tokens[i] = {
"original": ''.join(original_buffer),
"normalized": ''.join(token_buffer)}
original_buffer = []
token_buffer = []
return d_tokens
def process(self) -> dict:
# Step 1: Mask the Input
mask = self._mask_input() # GIT-1732-17144827
# Step 2: Apply the Mask
master = self._apply_mask(mask) # GIT-1732-17144919
# Step 3: Tokenize the Input
d_tokens = self._tokenize(master) # GIT-1732-17145376
if self._is_debug:
self.logger.debug('\n'.join([
"Character Level Tokenization Complete",
f"\tInput Text: {self._input_text}",
pprint.pformat(d_tokens)]))
return d_tokens
| 25.369748 | 81 | 0.504803 |
05a9c35b3247ad8af186f013e02aed8784228f58 | 18,594 | py | Python | tests/test_pipeline/test_pipeline.py | bve81/etna | 5ef9223ce9c03b4e475ab4277cd2a1d4fde065be | [
"Apache-2.0"
] | null | null | null | tests/test_pipeline/test_pipeline.py | bve81/etna | 5ef9223ce9c03b4e475ab4277cd2a1d4fde065be | [
"Apache-2.0"
] | null | null | null | tests/test_pipeline/test_pipeline.py | bve81/etna | 5ef9223ce9c03b4e475ab4277cd2a1d4fde065be | [
"Apache-2.0"
] | null | null | null | import re
from copy import deepcopy
from datetime import datetime
from typing import List
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.metrics import MAE
from etna.metrics import MSE
from etna.metrics import SMAPE
from etna.metrics import Metric
from etna.metrics import MetricAggregationMode
from etna.models import LinearPerSegmentModel
from etna.models import MovingAverageModel
from etna.models import NaiveModel
from etna.models import ProphetModel
from etna.models import SARIMAXModel
from etna.pipeline import Pipeline
from etna.transforms import AddConstTransform
from etna.transforms import DateFlagsTransform
DEFAULT_METRICS = [MAE(mode=MetricAggregationMode.per_segment)]
@pytest.mark.parametrize("horizon,quantiles,prediction_interval_cv", ([(1, [0.025, 0.975], 2)]))
def test_init_pass(horizon, quantiles, prediction_interval_cv):
"""Check that Pipeline initialization works correctly in case of valid parameters."""
pipeline = Pipeline(
model=LinearPerSegmentModel(),
transforms=[],
horizon=horizon,
quantiles=quantiles,
n_folds=prediction_interval_cv,
)
assert pipeline.horizon == horizon
assert pipeline.quantiles == quantiles
assert prediction_interval_cv == prediction_interval_cv
@pytest.mark.parametrize(
"horizon,quantiles,prediction_interval_cv,error_msg",
(
[
(-1, [0.025, 0.975], 2, "At least one point in the future is expected"),
(2, [0.05, 1.5], 2, "Quantile should be a number from"),
(2, [0.025, 0.975], 1, "At least two folds for backtest are expected"),
]
),
)
def test_init_fail(horizon, quantiles, prediction_interval_cv, error_msg):
"""Check that Pipeline initialization works correctly in case of invalid parameters."""
with pytest.raises(ValueError, match=error_msg):
_ = Pipeline(
model=LinearPerSegmentModel(),
transforms=[],
horizon=horizon,
quantiles=quantiles,
n_folds=prediction_interval_cv,
)
def test_fit(example_tsds):
"""Test that Pipeline correctly transforms dataset on fit stage."""
original_ts = deepcopy(example_tsds)
model = LinearPerSegmentModel()
transforms = [AddConstTransform(in_column="target", value=10, inplace=True), DateFlagsTransform()]
pipeline = Pipeline(model=model, transforms=transforms, horizon=5)
pipeline.fit(example_tsds)
original_ts.fit_transform(transforms)
original_ts.inverse_transform()
assert np.all(original_ts.df.values == pipeline.ts.df.values)
def test_forecast(example_tsds):
"""Test that the forecast from the Pipeline is correct."""
original_ts = deepcopy(example_tsds)
model = LinearPerSegmentModel()
transforms = [AddConstTransform(in_column="target", value=10, inplace=True), DateFlagsTransform()]
pipeline = Pipeline(model=model, transforms=transforms, horizon=5)
pipeline.fit(example_tsds)
forecast_pipeline = pipeline.forecast()
original_ts.fit_transform(transforms)
model.fit(original_ts)
future = original_ts.make_future(5)
forecast_manual = model.forecast(future)
assert np.all(forecast_pipeline.df.values == forecast_manual.df.values)
@pytest.mark.parametrize("model", (ProphetModel(), SARIMAXModel()))
def test_forecast_prediction_interval_builtin(example_tsds, model):
"""Test that forecast method uses built-in prediction intervals for the listed models."""
np.random.seed(1234)
pipeline = Pipeline(model=model, transforms=[], horizon=5)
pipeline.fit(example_tsds)
forecast_pipeline = pipeline.forecast(prediction_interval=True)
np.random.seed(1234)
model = model.fit(example_tsds)
future = example_tsds.make_future(5)
forecast_model = model.forecast(ts=future, prediction_interval=True)
assert forecast_model.df.equals(forecast_pipeline.df)
@pytest.mark.parametrize("model", (MovingAverageModel(), LinearPerSegmentModel()))
def test_forecast_prediction_interval_interface(example_tsds, model):
"""Test the forecast interface for the models without built-in prediction intervals."""
pipeline = Pipeline(model=model, transforms=[DateFlagsTransform()], horizon=5, quantiles=[0.025, 0.975])
pipeline.fit(example_tsds)
forecast = pipeline.forecast(prediction_interval=True)
for segment in forecast.segments:
segment_slice = forecast[:, segment, :][segment]
assert {"target_0.025", "target_0.975", "target"}.issubset(segment_slice.columns)
assert (segment_slice["target_0.975"] - segment_slice["target_0.025"] >= 0).all()
@pytest.mark.parametrize("model", (MovingAverageModel(), LinearPerSegmentModel()))
def test_forecast_no_warning_prediction_intervals(example_tsds, model):
"""Test that forecast doesn't warn when called with prediction intervals."""
pipeline = Pipeline(model=model, transforms=[DateFlagsTransform()], horizon=5)
pipeline.fit(example_tsds)
with pytest.warns(None) as record:
_ = pipeline.forecast(prediction_interval=True)
# check absence of warnings about prediction intervals
assert (
len([warning for warning in record.list if re.match("doesn't support prediction intervals", str(warning))]) == 0
)
def test_forecast_prediction_interval(splited_piecewise_constant_ts):
"""Test that the prediction interval for piecewise-constant dataset is correct."""
train, test = splited_piecewise_constant_ts
pipeline = Pipeline(model=NaiveModel(lag=1), transforms=[], horizon=5)
pipeline.fit(train)
forecast = pipeline.forecast(prediction_interval=True)
assert np.allclose(forecast.df.values, test.df.values)
@pytest.mark.parametrize("quantiles_narrow,quantiles_wide", ([([0.2, 0.8], [0.025, 0.975])]))
def test_forecast_prediction_interval_size(example_tsds, quantiles_narrow, quantiles_wide):
"""Test that narrow quantile levels gives more narrow interval than wide quantile levels."""
pipeline = Pipeline(model=MovingAverageModel(), transforms=[], horizon=5, quantiles=quantiles_narrow)
pipeline.fit(example_tsds)
forecast = pipeline.forecast(prediction_interval=True)
narrow_interval_length = (
forecast[:, :, f"target_{quantiles_narrow[1]}"].values - forecast[:, :, f"target_{quantiles_narrow[0]}"].values
)
pipeline = Pipeline(model=MovingAverageModel(), transforms=[], horizon=5, quantiles=quantiles_wide)
pipeline.fit(example_tsds)
forecast = pipeline.forecast(prediction_interval=True)
wide_interval_length = (
forecast[:, :, f"target_{quantiles_wide[1]}"].values - forecast[:, :, f"target_{quantiles_wide[0]}"].values
)
assert (narrow_interval_length <= wide_interval_length).all()
def test_forecast_prediction_interval_noise(constant_ts, constant_noisy_ts):
"""Test that prediction interval for noisy dataset is wider then for the dataset without noise."""
pipeline = Pipeline(model=MovingAverageModel(), transforms=[], horizon=5, quantiles=[0.025, 0.975])
pipeline.fit(constant_ts)
forecast = pipeline.forecast(prediction_interval=True)
constant_interval_length = forecast[:, :, "target_0.975"].values - forecast[:, :, "target_0.025"].values
pipeline = Pipeline(model=MovingAverageModel(), transforms=[], horizon=5)
pipeline.fit(constant_noisy_ts)
forecast = pipeline.forecast(prediction_interval=True)
noisy_interval_length = forecast[:, :, "target_0.975"].values - forecast[:, :, "target_0.025"].values
assert (constant_interval_length <= noisy_interval_length).all()
@pytest.mark.parametrize("n_folds", (0, -1))
def test_invalid_n_folds(catboost_pipeline: Pipeline, n_folds: int, example_tsdf: TSDataset):
"""Test Pipeline.backtest behavior in case of invalid n_folds."""
with pytest.raises(ValueError):
_ = catboost_pipeline.backtest(ts=example_tsdf, metrics=DEFAULT_METRICS, n_folds=n_folds)
@pytest.mark.parametrize("metrics", ([], [MAE(mode=MetricAggregationMode.macro)]))
def test_invalid_backtest_metrics(catboost_pipeline: Pipeline, metrics: List[Metric], example_tsdf: TSDataset):
"""Test Pipeline.backtest behavior in case of invalid metrics."""
with pytest.raises(ValueError):
_ = catboost_pipeline.backtest(ts=example_tsdf, metrics=metrics, n_folds=2)
def test_validate_backtest_dataset(catboost_pipeline_big: Pipeline, imbalanced_tsdf: TSDataset):
"""Test Pipeline.backtest behavior in case of small dataframe that
can't be divided to required number of splits.
"""
with pytest.raises(ValueError):
_ = catboost_pipeline_big.backtest(ts=imbalanced_tsdf, n_folds=3, metrics=DEFAULT_METRICS)
def test_generate_expandable_timeranges_days():
"""Test train-test timeranges generation in expand mode with daily freq"""
df = pd.DataFrame({"timestamp": pd.date_range("2021-01-01", "2021-04-01")})
df["segment"] = "seg"
df["target"] = 1
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
ts = TSDataset(df, freq="D")
true_borders = (
(("2021-01-01", "2021-02-24"), ("2021-02-25", "2021-03-08")),
(("2021-01-01", "2021-03-08"), ("2021-03-09", "2021-03-20")),
(("2021-01-01", "2021-03-20"), ("2021-03-21", "2021-04-01")),
)
for i, stage_dfs in enumerate(Pipeline._generate_folds_datasets(ts, n_folds=3, horizon=12, mode="expand")):
for stage_df, borders in zip(stage_dfs, true_borders[i]):
assert stage_df.index.min() == datetime.strptime(borders[0], "%Y-%m-%d").date()
assert stage_df.index.max() == datetime.strptime(borders[1], "%Y-%m-%d").date()
def test_generate_expandable_timeranges_hours():
"""Test train-test timeranges generation in expand mode with hour freq"""
df = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", "2020-02-01", freq="H")})
df["segment"] = "seg"
df["target"] = 1
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
ts = TSDataset(df, freq="H")
true_borders = (
(("2020-01-01 00:00:00", "2020-01-30 12:00:00"), ("2020-01-30 13:00:00", "2020-01-31 00:00:00")),
(("2020-01-01 00:00:00", "2020-01-31 00:00:00"), ("2020-01-31 01:00:00", "2020-01-31 12:00:00")),
(("2020-01-01 00:00:00", "2020-01-31 12:00:00"), ("2020-01-31 13:00:00", "2020-02-01 00:00:00")),
)
for i, stage_dfs in enumerate(Pipeline._generate_folds_datasets(ts, horizon=12, n_folds=3, mode="expand")):
for stage_df, borders in zip(stage_dfs, true_borders[i]):
assert stage_df.index.min() == datetime.strptime(borders[0], "%Y-%m-%d %H:%M:%S").date()
assert stage_df.index.max() == datetime.strptime(borders[1], "%Y-%m-%d %H:%M:%S").date()
def test_generate_constant_timeranges_days():
"""Test train-test timeranges generation with constant mode with daily freq"""
df = pd.DataFrame({"timestamp": pd.date_range("2021-01-01", "2021-04-01")})
df["segment"] = "seg"
df["target"] = 1
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
ts = TSDataset(df, freq="D")
true_borders = (
(("2021-01-01", "2021-02-24"), ("2021-02-25", "2021-03-08")),
(("2021-01-13", "2021-03-08"), ("2021-03-09", "2021-03-20")),
(("2021-01-25", "2021-03-20"), ("2021-03-21", "2021-04-01")),
)
for i, stage_dfs in enumerate(Pipeline._generate_folds_datasets(ts, horizon=12, n_folds=3, mode="constant")):
for stage_df, borders in zip(stage_dfs, true_borders[i]):
assert stage_df.index.min() == datetime.strptime(borders[0], "%Y-%m-%d").date()
assert stage_df.index.max() == datetime.strptime(borders[1], "%Y-%m-%d").date()
def test_generate_constant_timeranges_hours():
"""Test train-test timeranges generation with constant mode with hours freq"""
df = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", "2020-02-01", freq="H")})
df["segment"] = "seg"
df["target"] = 1
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
ts = TSDataset(df, freq="H")
true_borders = (
(("2020-01-01 00:00:00", "2020-01-30 12:00:00"), ("2020-01-30 13:00:00", "2020-01-31 00:00:00")),
(("2020-01-01 12:00:00", "2020-01-31 00:00:00"), ("2020-01-31 01:00:00", "2020-01-31 12:00:00")),
(("2020-01-02 00:00:00", "2020-01-31 12:00:00"), ("2020-01-31 13:00:00", "2020-02-01 00:00:00")),
)
for i, stage_dfs in enumerate(Pipeline._generate_folds_datasets(ts, horizon=12, n_folds=3, mode="constant")):
for stage_df, borders in zip(stage_dfs, true_borders[i]):
assert stage_df.index.min() == datetime.strptime(borders[0], "%Y-%m-%d %H:%M:%S").date()
assert stage_df.index.max() == datetime.strptime(borders[1], "%Y-%m-%d %H:%M:%S").date()
@pytest.mark.parametrize(
"aggregate_metrics,expected_columns",
(
(
False,
["fold_number", "MAE", "MSE", "segment", "SMAPE"],
),
(
True,
["MAE", "MSE", "segment", "SMAPE"],
),
),
)
def test_get_metrics_interface(
catboost_pipeline: Pipeline, aggregate_metrics: bool, expected_columns: List[str], big_daily_example_tsdf: TSDataset
):
"""Check that Pipeline.backtest returns metrics in correct format."""
metrics_df, _, _ = catboost_pipeline.backtest(
ts=big_daily_example_tsdf,
aggregate_metrics=aggregate_metrics,
metrics=[MAE("per-segment"), MSE("per-segment"), SMAPE("per-segment")],
)
assert sorted(expected_columns) == sorted(metrics_df.columns)
def test_get_forecasts_interface_daily(catboost_pipeline: Pipeline, big_daily_example_tsdf: TSDataset):
"""Check that Pipeline.backtest returns forecasts in correct format."""
_, forecast, _ = catboost_pipeline.backtest(ts=big_daily_example_tsdf, metrics=DEFAULT_METRICS)
expected_columns = sorted(
["regressor_lag_feature_10", "regressor_lag_feature_11", "regressor_lag_feature_12", "fold_number", "target"]
)
assert expected_columns == sorted(set(forecast.columns.get_level_values("feature")))
def test_get_forecasts_interface_hours(catboost_pipeline: Pipeline, example_tsdf: TSDataset):
"""Check that Pipeline.backtest returns forecasts in correct format with non-daily seasonality."""
_, forecast, _ = catboost_pipeline.backtest(ts=example_tsdf, metrics=DEFAULT_METRICS)
expected_columns = sorted(
["regressor_lag_feature_10", "regressor_lag_feature_11", "regressor_lag_feature_12", "fold_number", "target"]
)
assert expected_columns == sorted(set(forecast.columns.get_level_values("feature")))
def test_get_fold_info_interface_daily(catboost_pipeline: Pipeline, big_daily_example_tsdf: TSDataset):
"""Check that Pipeline.backtest returns info dataframe in correct format."""
_, _, info_df = catboost_pipeline.backtest(ts=big_daily_example_tsdf, metrics=DEFAULT_METRICS)
expected_columns = ["fold_number", "test_end_time", "test_start_time", "train_end_time", "train_start_time"]
assert expected_columns == list(sorted(info_df.columns))
def test_get_fold_info_interface_hours(catboost_pipeline: Pipeline, example_tsdf: TSDataset):
"""Check that Pipeline.backtest returns info dataframe in correct format with non-daily seasonality."""
_, _, info_df = catboost_pipeline.backtest(ts=example_tsdf, metrics=DEFAULT_METRICS)
expected_columns = ["fold_number", "test_end_time", "test_start_time", "train_end_time", "train_start_time"]
assert expected_columns == list(sorted(info_df.columns))
@pytest.mark.long
def test_backtest_with_n_jobs(catboost_pipeline: Pipeline, big_example_tsdf: TSDataset):
"""Check that Pipeline.backtest gives the same results in case of single and multiple jobs modes."""
ts1 = deepcopy(big_example_tsdf)
ts2 = deepcopy(big_example_tsdf)
pipeline_1 = deepcopy(catboost_pipeline)
pipeline_2 = deepcopy(catboost_pipeline)
_, forecast_1, _ = pipeline_1.backtest(ts=ts1, n_jobs=1, metrics=DEFAULT_METRICS)
_, forecast_2, _ = pipeline_2.backtest(ts=ts2, n_jobs=3, metrics=DEFAULT_METRICS)
assert (forecast_1 == forecast_2).all().all()
@pytest.fixture
def step_ts() -> Tuple[TSDataset, pd.DataFrame, pd.DataFrame]:
horizon = 5
n_folds = 3
train_size = 20
start_value = 10.0
add_value = 5.0
segment = 0
timestamp = pd.date_range(start="2020-01-01", periods=train_size + n_folds * horizon, freq="D")
target = [start_value] * train_size
for i in range(n_folds):
target += [target[-1] + add_value] * horizon
df = pd.DataFrame({"timestamp": timestamp, "target": target, "segment": 0})
ts = TSDataset(TSDataset.to_dataset(df), freq="D")
metrics_df = pd.DataFrame(
{"segment": [segment, segment, segment], "MAE": [add_value, add_value, add_value], "fold_number": [0, 1, 2]}
)
timestamp_forecast = timestamp[train_size:]
target_forecast = []
fold_number_forecast = []
for i in range(n_folds):
target_forecast += [start_value + i * add_value] * horizon
fold_number_forecast += [i] * horizon
forecast_df = pd.DataFrame(
{"target": target_forecast, "fold_number": fold_number_forecast},
index=timestamp_forecast,
)
forecast_df.columns = pd.MultiIndex.from_product(
[[segment], ["target", "fold_number"]], names=("segment", "feature")
)
return ts, metrics_df, forecast_df
def test_backtest_forecasts_sanity(step_ts):
"""Check that Pipeline.backtest gives correct forecasts according to the simple case."""
ts, expected_metrics_df, expected_forecast_df = step_ts
pipeline = Pipeline(model=NaiveModel(), horizon=5)
metrics_df, forecast_df, _ = pipeline.backtest(ts, metrics=[MAE()], n_folds=3)
assert np.all(metrics_df.reset_index(drop=True) == expected_metrics_df)
assert np.all(forecast_df == expected_forecast_df)
def test_forecast_raise_error_if_not_fitted():
"""Test that Pipeline raise error when calling forecast without being fit."""
pipeline = Pipeline(model=NaiveModel(), horizon=5)
with pytest.raises(ValueError, match="Pipeline is not fitted!"):
_ = pipeline.forecast()
| 46.138958 | 120 | 0.703184 |
c51657c8e65e93349a0c71f63ce1c4e048031470 | 6,632 | py | Python | t66y-spider/src/main.py | cd871127/hodgepodge-cloud | 3f1c943228d621d5a357bd1aa18ee5af50e2a75e | [
"Apache-2.0"
] | 2 | 2019-01-16T10:02:14.000Z | 2020-03-06T18:18:30.000Z | t66y-spider/src/main.py | cd871127/hodgepodge-cloud | 3f1c943228d621d5a357bd1aa18ee5af50e2a75e | [
"Apache-2.0"
] | null | null | null | t66y-spider/src/main.py | cd871127/hodgepodge-cloud | 3f1c943228d621d5a357bd1aa18ee5af50e2a75e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import getopt
import json
import sys
import pymysql
from spider import Spider
fid_dict = {
"2": "亞洲無碼原創區",
"15": "亞洲有碼原創區",
"4": "歐美原創區",
"25": "國產原創區",
"5": "動漫原創區",
"26": "中字原創區",
"27": "轉帖交流區"
}
def usage():
print("usage: python package [OPTIONS]")
print("Options:")
print(" -t, --target")
print(" -c, --config")
print(" --thread-num default ")
print(" --base-url default ")
print(" --fid-list default")
print(" --batch-count default")
print(" --file-path default")
print(" -s, --t66y-session-id default")
print(" --config-file")
print(" --redis-host")
print(" --redis-port default")
print(" --redis-db")
print(" --mysql-host")
print(" --mysql-port default")
print(" --mysql-user")
print(" --mysql-password")
print(" --mysql-db")
print(" --mysql-charset default")
sys.exit(0)
def get_config(_opts):
_headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"Upgrade-Insecure-Requests": 1,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
}
_default_redis_config = {
"port": 6379,
"db": 0
}
_default_mysql_config = {
"cursorclass": pymysql.cursors.DictCursor,
"use_unicode": True,
"port": 3306,
"charset": "utf8"
}
_default_config = {
"threadNum": 1,
"fidList": ["2", "4", "5", "15", "25", "26", "27"],
"baseUrl": "www.t66y.com",
"batchCount": 15
}
_cmd_config = dict()
_cmd_redis_config = dict()
_cmd_mysql_config = dict()
_file_config = None
_t66y_session_id = None
for opt, value in _opts:
if opt == '-h' or opt == '--help':
usage()
sys.exit(0)
if opt == '-c' or opt == '--config-file':
with open(value, 'r') as file:
_file_config = json.load(file)
if opt == '--target' or opt == '-t':
_cmd_config["target"] = value
if opt == '--thread-num':
_cmd_config["threadNum"] = int(value)
if opt == '--base-url':
_cmd_config["baseUrl"] = int(value)
if opt == '--fid-list':
_cmd_config["fidList"] = value.split(",")
if opt == '--redis-host':
_cmd_redis_config["host"] = value
if opt == '--redis-port':
_cmd_redis_config["port"] = int(value)
if opt == '--redis-db':
_cmd_redis_config["db"] = int(value)
if opt == '--mysql-host':
_cmd_mysql_config["host"] = value
if opt == '--mysql-port':
_cmd_mysql_config["port"] = int(value)
if opt == '--mysql-user':
_cmd_mysql_config["user"] = value
if opt == '--mysql-password':
_cmd_mysql_config["password"] = value
if opt == '--mysql-db':
_cmd_mysql_config["db"] = value
if opt == '--mysql-charset':
_cmd_mysql_config["charset"] = value
if opt == '--batch-count':
_cmd_config["batchCount"] = value
if opt == '--file-path':
_cmd_config["filePath"] = value
if opt == '--t66y-session-id' or opt == '-s':
_t66y_session_id = "PHPSESSID=" + value
_default_config.update(_cmd_config)
_default_mysql_config.update(_cmd_mysql_config)
_default_redis_config.update(_cmd_redis_config)
if _file_config is not None:
_file_mysql_config = _file_config.get("mysqlConfig", dict())
_file_redis_config = _file_config.get("redisConfig", dict())
_file_config.update(_cmd_config)
_file_mysql_config.update(_cmd_mysql_config)
_file_redis_config.update(_cmd_redis_config)
_cmd_config = _file_config
_cmd_mysql_config = _file_mysql_config
_cmd_redis_config = _file_redis_config
_default_config.update(_cmd_config)
_default_mysql_config.update(_cmd_mysql_config)
_default_redis_config.update(_cmd_redis_config)
if _default_config.get("target") in ["page", "topic"] and _t66y_session_id is not None:
_headers["Cookie"] = _t66y_session_id
_default_config["headers"] = _headers
_default_config["mysqlConfig"] = _default_mysql_config
_default_config["redisConfig"] = _default_redis_config
return _default_config
def check_config(_config):
_error_list = list()
if _config.get("target") is None:
_error_list.append("参数target不能为空")
err_fid = list(filter(lambda fid: fid not in ["2", "4", "5", "15", "25", "26", "27"], _config.get("fidList")))
if len(err_fid) != 0:
_error_list.append(",".join(err_fid) + "不在列表中")
if _config.get("mysqlConfig").get("host") is None:
_error_list.append("参数mysql-host不能为空")
if _config.get("mysqlConfig").get("user") is None:
_error_list.append("参数mysql-user不能为空")
if _config.get("mysqlConfig").get("db") is None:
_error_list.append("参数mysql-db不能为空")
if _config.get("mysqlConfig").get("password") is None:
_error_list.append("参数mysql-password不能为空")
if _config.get("target") == "page":
if _config.get("redisConfig").get("host") is None:
_error_list.append("参数redis-host不能为空")
if _config.get("target") in ["image", "torrent"]:
if _config.get("filePath") is None:
_error_list.append("参数file-path不能为空")
return _error_list
def run():
short_opts = "hc:t:s"
long_opts = ["help", "target=", "thread-num=", "base-url=", "fid-list=",
"redis-host=", "redis-port=", "redis-db=", "mysql-host=",
"mysql-port=", "mysql-user=", "mysql-password=", "mysql-db=",
"mysql-charset=", "batch-count=", "file-path=",
"t66y-session-id=", "config-file="]
opts = None
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except getopt.GetoptError as e:
print(e)
usage()
config = get_config(opts)
error_list = check_config(config)
if len(error_list) != 0:
for err in error_list:
print(err)
usage()
spider = Spider(config)
spider.run()
if __name__ == '__main__':
run()
| 34.010256 | 139 | 0.569361 |
ba87ce2e87fe4d6ebefda1b964e3ca9ba34e93b9 | 4,535 | py | Python | src/object_detection/builders/optimizer_builder.py | Tobias-Fischer/ros_people_object_detection_tensorflow | 2a0af311b4eef55c053bd2349e1dff10abe1f32a | [
"Apache-2.0"
] | null | null | null | src/object_detection/builders/optimizer_builder.py | Tobias-Fischer/ros_people_object_detection_tensorflow | 2a0af311b4eef55c053bd2349e1dff10abe1f32a | [
"Apache-2.0"
] | null | null | null | src/object_detection/builders/optimizer_builder.py | Tobias-Fischer/ros_people_object_detection_tensorflow | 2a0af311b4eef55c053bd2349e1dff10abe1f32a | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
import tensorflow.compat.v1 as tf
from object_detection.utils import learning_schedules
def build(optimizer_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
summary_vars = []
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
learning_rate = _create_learning_rate(config.learning_rate)
summary_vars.append(learning_rate)
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=config.decay,
momentum=config.momentum_optimizer_value,
epsilon=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
learning_rate = _create_learning_rate(config.learning_rate)
summary_vars.append(learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=config.momentum_optimizer_value)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
learning_rate = _create_learning_rate(config.learning_rate)
summary_vars.append(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
optimizer = tf.contrib.opt.MovingAverageOptimizer(
optimizer, average_decay=optimizer_config.moving_average_decay)
return optimizer, summary_vars
def _create_learning_rate(learning_rate_config):
"""Create optimizer learning rate based on config.
Args:
learning_rate_config: A LearningRate proto message.
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
learning_rate = None
learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
if learning_rate_type == 'constant_learning_rate':
config = learning_rate_config.constant_learning_rate
learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,
name='learning_rate')
if learning_rate_type == 'exponential_decay_learning_rate':
config = learning_rate_config.exponential_decay_learning_rate
learning_rate = tf.train.exponential_decay(
config.initial_learning_rate,
tf.train.get_or_create_global_step(),
config.decay_steps,
config.decay_factor,
staircase=config.staircase, name='learning_rate')
if learning_rate_type == 'manual_step_learning_rate':
config = learning_rate_config.manual_step_learning_rate
if not config.schedule:
raise ValueError('Empty learning rate schedule.')
learning_rate_step_boundaries = [x.step for x in config.schedule]
learning_rate_sequence = [config.initial_learning_rate]
learning_rate_sequence += [x.learning_rate for x in config.schedule]
learning_rate = learning_schedules.manual_stepping(
tf.train.get_or_create_global_step(), learning_rate_step_boundaries,
learning_rate_sequence, config.warmup)
if learning_rate_type == 'cosine_decay_learning_rate':
config = learning_rate_config.cosine_decay_learning_rate
learning_rate = learning_schedules.cosine_decay_with_warmup(
tf.train.get_or_create_global_step(),
config.learning_rate_base,
config.total_steps,
config.warmup_learning_rate,
config.warmup_steps,
config.hold_base_rate_steps)
if learning_rate is None:
raise ValueError('Learning_rate %s not supported.' % learning_rate_type)
return learning_rate
| 36.28 | 80 | 0.744212 |
f93a85d864bd22ed0f4b9570c72ec2f6dee31239 | 37,980 | py | Python | instances/passenger_demand/pas-20210421-2109-int2e-1/65.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210421-2109-int2e-1/65.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210421-2109-int2e-1/65.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 475
passenger_arriving = (
(1, 1, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0), # 0
(0, 1, 2, 0, 1, 0, 2, 1, 0, 0, 0, 0), # 1
(0, 3, 1, 4, 1, 0, 1, 1, 2, 1, 0, 0), # 2
(0, 2, 0, 0, 0, 0, 4, 1, 2, 0, 0, 0), # 3
(0, 2, 4, 1, 1, 0, 1, 1, 1, 3, 0, 0), # 4
(1, 2, 2, 2, 2, 0, 2, 0, 0, 1, 1, 0), # 5
(1, 2, 0, 0, 0, 0, 2, 1, 1, 0, 0, 0), # 6
(0, 1, 0, 2, 0, 0, 3, 4, 1, 1, 1, 0), # 7
(0, 2, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0), # 8
(1, 0, 2, 2, 1, 0, 1, 1, 0, 1, 0, 0), # 9
(0, 2, 1, 0, 2, 0, 2, 0, 1, 0, 1, 0), # 10
(0, 0, 0, 1, 2, 0, 1, 1, 0, 0, 0, 0), # 11
(0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0), # 12
(0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0), # 13
(2, 2, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0), # 14
(0, 1, 0, 1, 0, 0, 2, 2, 2, 0, 0, 0), # 15
(1, 0, 1, 0, 1, 0, 1, 0, 2, 2, 0, 0), # 16
(0, 2, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0), # 17
(1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0), # 18
(1, 1, 1, 0, 0, 0, 0, 2, 0, 1, 1, 0), # 19
(1, 0, 1, 0, 0, 0, 1, 1, 1, 2, 0, 0), # 20
(0, 1, 0, 1, 0, 0, 4, 2, 2, 0, 0, 0), # 21
(1, 4, 0, 2, 1, 0, 0, 3, 0, 0, 0, 0), # 22
(1, 0, 1, 0, 1, 0, 1, 2, 2, 0, 0, 0), # 23
(2, 4, 1, 1, 0, 0, 0, 2, 1, 2, 0, 0), # 24
(0, 3, 1, 1, 0, 0, 2, 2, 0, 1, 0, 0), # 25
(1, 1, 2, 0, 1, 0, 0, 1, 1, 1, 0, 0), # 26
(1, 2, 0, 3, 0, 0, 0, 1, 0, 1, 0, 0), # 27
(1, 1, 1, 0, 0, 0, 0, 2, 1, 1, 0, 0), # 28
(1, 2, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0), # 29
(0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0), # 30
(0, 1, 2, 1, 0, 0, 3, 2, 1, 1, 0, 0), # 31
(3, 2, 0, 0, 0, 0, 2, 1, 0, 1, 1, 0), # 32
(1, 1, 3, 1, 0, 0, 0, 0, 2, 0, 0, 0), # 33
(1, 0, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0), # 34
(0, 1, 1, 1, 1, 0, 0, 3, 0, 1, 0, 0), # 35
(2, 2, 1, 0, 0, 0, 1, 2, 1, 1, 0, 0), # 36
(0, 0, 2, 0, 0, 0, 0, 2, 0, 2, 1, 0), # 37
(0, 1, 1, 0, 0, 0, 0, 1, 1, 2, 0, 0), # 38
(2, 1, 0, 0, 0, 0, 0, 2, 1, 1, 0, 0), # 39
(0, 0, 0, 0, 0, 0, 1, 1, 0, 4, 1, 0), # 40
(0, 0, 1, 1, 1, 0, 1, 2, 0, 0, 0, 0), # 41
(1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0), # 42
(2, 2, 2, 3, 0, 0, 0, 1, 0, 0, 0, 0), # 43
(0, 1, 3, 0, 1, 0, 2, 4, 4, 1, 1, 0), # 44
(1, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0), # 45
(2, 2, 2, 0, 2, 0, 0, 0, 1, 2, 1, 0), # 46
(1, 2, 1, 1, 0, 0, 1, 2, 0, 0, 0, 0), # 47
(0, 3, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0), # 48
(0, 2, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0), # 49
(1, 0, 0, 2, 0, 0, 1, 0, 1, 0, 0, 0), # 50
(0, 2, 1, 1, 0, 0, 0, 2, 2, 1, 0, 0), # 51
(1, 2, 3, 0, 1, 0, 1, 2, 1, 1, 0, 0), # 52
(2, 1, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0), # 53
(0, 3, 0, 0, 2, 0, 1, 2, 0, 0, 0, 0), # 54
(1, 1, 2, 2, 1, 0, 0, 3, 2, 1, 1, 0), # 55
(0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0), # 56
(0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0), # 57
(0, 1, 2, 1, 0, 0, 0, 2, 1, 1, 0, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(0.5299303116769096, 1.3592921401515152, 1.59884720437018, 1.2672554347826086, 1.4286057692307692, 0.951358695652174), # 0
(0.53490440200956, 1.3744083197425647, 1.607483107504999, 1.274312877415459, 1.4393133012820511, 0.9510344278381644), # 1
(0.5398216954443468, 1.3893002805836139, 1.6159140245644101, 1.2812149758454108, 1.4497948717948719, 0.9507002415458937), # 2
(0.544678017998244, 1.4039519531250002, 1.6241337965938305, 1.2879558423913042, 1.4600408653846157, 0.9503561820652175), # 3
(0.5494691956882256, 1.4183472678170597, 1.6321362646386746, 1.2945295893719808, 1.470041666666667, 0.9500022946859903), # 4
(0.5541910545312653, 1.4324701551101293, 1.6399152697443586, 1.3009303291062801, 1.4797876602564102, 0.9496386246980676), # 5
(0.5588394205443372, 1.4463045454545456, 1.6474646529562986, 1.3071521739130436, 1.4892692307692308, 0.9492652173913044), # 6
(0.5634101197444152, 1.4598343693006453, 1.6547782553199086, 1.3131892361111113, 1.4984767628205131, 0.9488821180555557), # 7
(0.5678989781484733, 1.4730435570987652, 1.6618499178806059, 1.3190356280193236, 1.507400641025641, 0.9484893719806764), # 8
(0.5723018217734855, 1.4859160392992425, 1.668673481683805, 1.3246854619565218, 1.51603125, 0.9480870244565218), # 9
(0.5766144766364257, 1.4984357463524132, 1.6752427877749214, 1.3301328502415461, 1.5243589743589745, 0.9476751207729468), # 10
(0.5808327687542679, 1.5105866087086142, 1.6815516771993715, 1.335371905193237, 1.5323741987179487, 0.9472537062198069), # 11
(0.584952524143986, 1.5223525568181817, 1.6875939910025708, 1.3403967391304348, 1.5400673076923077, 0.9468228260869564), # 12
(0.5889695688225538, 1.5337175211314538, 1.6933635702299341, 1.3452014643719807, 1.5474286858974362, 0.9463825256642512), # 13
(0.5928797288069457, 1.5446654320987658, 1.6988542559268778, 1.3497801932367148, 1.5544487179487179, 0.9459328502415458), # 14
(0.5966788301141351, 1.5551802201704543, 1.7040598891388172, 1.3541270380434782, 1.5611177884615386, 0.9454738451086957), # 15
(0.6003626987610965, 1.5652458157968576, 1.7089743109111684, 1.3582361111111112, 1.567426282051282, 0.9450055555555557), # 16
(0.6039271607648035, 1.5748461494283108, 1.713591362289346, 1.3621015247584543, 1.5733645833333334, 0.9445280268719808), # 17
(0.6073680421422301, 1.5839651515151516, 1.7179048843187663, 1.365717391304348, 1.5789230769230773, 0.9440413043478261), # 18
(0.6106811689103502, 1.592586752507716, 1.7219087180448445, 1.3690778230676328, 1.5840921474358975, 0.9435454332729469), # 19
(0.613862367086138, 1.600694882856341, 1.725596704512996, 1.37217693236715, 1.5888621794871796, 0.943040458937198), # 20
(0.6169074626865673, 1.6082734730113633, 1.728962684768638, 1.3750088315217392, 1.5932235576923075, 0.9425264266304348), # 21
(0.6198122817286118, 1.6153064534231203, 1.7320004998571836, 1.3775676328502415, 1.5971666666666664, 0.9420033816425122), # 22
(0.6225726502292459, 1.6217777545419474, 1.7347039908240505, 1.3798474486714978, 1.600681891025641, 0.9414713692632852), # 23
(0.6251843942054434, 1.6276713068181818, 1.7370669987146528, 1.3818423913043478, 1.6037596153846154, 0.9409304347826087), # 24
(0.6276433396741781, 1.6329710407021605, 1.7390833645744075, 1.383546573067633, 1.6063902243589743, 0.9403806234903382), # 25
(0.6299453126524241, 1.6376608866442197, 1.740746929448729, 1.384954106280193, 1.6085641025641024, 0.9398219806763285), # 26
(0.6320861391571554, 1.6417247750946968, 1.7420515343830332, 1.3860591032608698, 1.610271634615385, 0.9392545516304349), # 27
(0.6340616452053459, 1.6451466365039282, 1.7429910204227366, 1.3868556763285025, 1.611503205128205, 0.9386783816425122), # 28
(0.6358676568139694, 1.6479104013222505, 1.7435592286132533, 1.3873379378019326, 1.6122491987179488, 0.9380935160024155), # 29
(0.6375000000000001, 1.6500000000000001, 1.7437500000000001, 1.3875000000000002, 1.6125, 0.9375), # 30
(0.6390274056905372, 1.6517357599431817, 1.7436069897342994, 1.3874707312091505, 1.6124087322695038, 0.9366752519573547), # 31
(0.6405218350383632, 1.6534485795454548, 1.743182004830918, 1.387383496732026, 1.612136879432624, 0.9354049516908214), # 32
(0.6419839593989769, 1.6551382457386365, 1.742481114130435, 1.3872391544117648, 1.6116873670212766, 0.933701536731634), # 33
(0.6434144501278772, 1.6568045454545457, 1.74151038647343, 1.3870385620915036, 1.611063120567376, 0.9315774446110279), # 34
(0.6448139785805627, 1.6584472656249998, 1.7402758907004832, 1.3867825776143792, 1.610267065602837, 0.9290451128602366), # 35
(0.646183216112532, 1.6600661931818186, 1.7387836956521738, 1.3864720588235295, 1.6093021276595747, 0.9261169790104948), # 36
(0.6475228340792839, 1.6616611150568183, 1.7370398701690821, 1.3861078635620916, 1.6081712322695034, 0.9228054805930368), # 37
(0.6488335038363171, 1.6632318181818182, 1.7350504830917874, 1.3856908496732026, 1.606877304964539, 0.919123055139097), # 38
(0.6501158967391305, 1.6647780894886364, 1.7328216032608694, 1.385221875, 1.6054232712765957, 0.91508214017991), # 39
(0.6513706841432225, 1.666299715909091, 1.7303592995169084, 1.384701797385621, 1.6038120567375884, 0.9106951732467099), # 40
(0.6525985374040921, 1.6677964843749997, 1.7276696407004832, 1.3841314746732027, 1.6020465868794327, 0.9059745918707315), # 41
(0.6538001278772378, 1.6692681818181823, 1.724758695652174, 1.3835117647058823, 1.6001297872340428, 0.9009328335832084), # 42
(0.6549761269181587, 1.6707145951704545, 1.7216325332125604, 1.3828435253267974, 1.5980645833333333, 0.8955823359153756), # 43
(0.656127205882353, 1.6721355113636365, 1.7182972222222224, 1.382127614379085, 1.59585390070922, 0.8899355363984673), # 44
(0.6572540361253196, 1.6735307173295455, 1.7147588315217395, 1.3813648897058823, 1.5935006648936172, 0.8840048725637182), # 45
(0.6583572890025576, 1.6749000000000003, 1.711023429951691, 1.3805562091503267, 1.59100780141844, 0.8778027819423623), # 46
(0.6594376358695653, 1.6762431463068184, 1.707097086352657, 1.3797024305555556, 1.5883782358156031, 0.8713417020656339), # 47
(0.6604957480818415, 1.6775599431818184, 1.7029858695652174, 1.3788044117647058, 1.5856148936170213, 0.8646340704647678), # 48
(0.6615322969948849, 1.6788501775568179, 1.698695848429952, 1.3778630106209153, 1.58272070035461, 0.8576923246709978), # 49
(0.6625479539641944, 1.680113636363636, 1.6942330917874397, 1.3768790849673205, 1.5796985815602838, 0.8505289022155589), # 50
(0.6635433903452687, 1.681350106534091, 1.689603668478261, 1.375853492647059, 1.5765514627659574, 0.8431562406296852), # 51
(0.6645192774936062, 1.682559375, 1.684813647342995, 1.374787091503268, 1.5732822695035462, 0.8355867774446111), # 52
(0.6654762867647059, 1.683741228693182, 1.6798690972222223, 1.373680739379085, 1.5698939273049648, 0.8278329501915709), # 53
(0.6664150895140666, 1.6848954545454544, 1.6747760869565216, 1.3725352941176472, 1.5663893617021278, 0.8199071964017991), # 54
(0.6673363570971866, 1.6860218394886364, 1.6695406853864734, 1.3713516135620916, 1.5627714982269505, 0.8118219536065301), # 55
(0.6682407608695652, 1.6871201704545453, 1.6641689613526571, 1.3701305555555556, 1.5590432624113477, 0.8035896593369982), # 56
(0.6691289721867009, 1.6881902343750004, 1.6586669836956522, 1.3688729779411766, 1.555207579787234, 0.7952227511244377), # 57
(0.6700016624040921, 1.689231818181818, 1.6530408212560386, 1.3675797385620916, 1.5512673758865247, 0.7867336665000834), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(1, 1, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0), # 0
(1, 2, 2, 1, 1, 0, 2, 3, 0, 0, 0, 0), # 1
(1, 5, 3, 5, 2, 0, 3, 4, 2, 1, 0, 0), # 2
(1, 7, 3, 5, 2, 0, 7, 5, 4, 1, 0, 0), # 3
(1, 9, 7, 6, 3, 0, 8, 6, 5, 4, 0, 0), # 4
(2, 11, 9, 8, 5, 0, 10, 6, 5, 5, 1, 0), # 5
(3, 13, 9, 8, 5, 0, 12, 7, 6, 5, 1, 0), # 6
(3, 14, 9, 10, 5, 0, 15, 11, 7, 6, 2, 0), # 7
(3, 16, 11, 10, 5, 0, 15, 12, 8, 6, 2, 0), # 8
(4, 16, 13, 12, 6, 0, 16, 13, 8, 7, 2, 0), # 9
(4, 18, 14, 12, 8, 0, 18, 13, 9, 7, 3, 0), # 10
(4, 18, 14, 13, 10, 0, 19, 14, 9, 7, 3, 0), # 11
(4, 19, 14, 13, 10, 0, 19, 14, 9, 9, 3, 0), # 12
(4, 20, 14, 14, 10, 0, 19, 16, 10, 9, 3, 0), # 13
(6, 22, 14, 14, 10, 0, 21, 18, 10, 9, 3, 0), # 14
(6, 23, 14, 15, 10, 0, 23, 20, 12, 9, 3, 0), # 15
(7, 23, 15, 15, 11, 0, 24, 20, 14, 11, 3, 0), # 16
(7, 25, 15, 15, 11, 0, 25, 23, 15, 11, 3, 0), # 17
(8, 25, 16, 16, 11, 0, 25, 24, 15, 11, 3, 0), # 18
(9, 26, 17, 16, 11, 0, 25, 26, 15, 12, 4, 0), # 19
(10, 26, 18, 16, 11, 0, 26, 27, 16, 14, 4, 0), # 20
(10, 27, 18, 17, 11, 0, 30, 29, 18, 14, 4, 0), # 21
(11, 31, 18, 19, 12, 0, 30, 32, 18, 14, 4, 0), # 22
(12, 31, 19, 19, 13, 0, 31, 34, 20, 14, 4, 0), # 23
(14, 35, 20, 20, 13, 0, 31, 36, 21, 16, 4, 0), # 24
(14, 38, 21, 21, 13, 0, 33, 38, 21, 17, 4, 0), # 25
(15, 39, 23, 21, 14, 0, 33, 39, 22, 18, 4, 0), # 26
(16, 41, 23, 24, 14, 0, 33, 40, 22, 19, 4, 0), # 27
(17, 42, 24, 24, 14, 0, 33, 42, 23, 20, 4, 0), # 28
(18, 44, 24, 24, 15, 0, 33, 42, 24, 20, 4, 0), # 29
(18, 45, 24, 24, 15, 0, 33, 42, 24, 21, 4, 0), # 30
(18, 46, 26, 25, 15, 0, 36, 44, 25, 22, 4, 0), # 31
(21, 48, 26, 25, 15, 0, 38, 45, 25, 23, 5, 0), # 32
(22, 49, 29, 26, 15, 0, 38, 45, 27, 23, 5, 0), # 33
(23, 49, 29, 26, 15, 0, 38, 46, 30, 24, 5, 0), # 34
(23, 50, 30, 27, 16, 0, 38, 49, 30, 25, 5, 0), # 35
(25, 52, 31, 27, 16, 0, 39, 51, 31, 26, 5, 0), # 36
(25, 52, 33, 27, 16, 0, 39, 53, 31, 28, 6, 0), # 37
(25, 53, 34, 27, 16, 0, 39, 54, 32, 30, 6, 0), # 38
(27, 54, 34, 27, 16, 0, 39, 56, 33, 31, 6, 0), # 39
(27, 54, 34, 27, 16, 0, 40, 57, 33, 35, 7, 0), # 40
(27, 54, 35, 28, 17, 0, 41, 59, 33, 35, 7, 0), # 41
(28, 55, 36, 28, 17, 0, 42, 59, 33, 36, 7, 0), # 42
(30, 57, 38, 31, 17, 0, 42, 60, 33, 36, 7, 0), # 43
(30, 58, 41, 31, 18, 0, 44, 64, 37, 37, 8, 0), # 44
(31, 60, 41, 31, 18, 0, 44, 64, 38, 37, 8, 0), # 45
(33, 62, 43, 31, 20, 0, 44, 64, 39, 39, 9, 0), # 46
(34, 64, 44, 32, 20, 0, 45, 66, 39, 39, 9, 0), # 47
(34, 67, 45, 32, 20, 0, 47, 66, 39, 39, 9, 0), # 48
(34, 69, 47, 32, 21, 0, 47, 66, 39, 39, 9, 0), # 49
(35, 69, 47, 34, 21, 0, 48, 66, 40, 39, 9, 0), # 50
(35, 71, 48, 35, 21, 0, 48, 68, 42, 40, 9, 0), # 51
(36, 73, 51, 35, 22, 0, 49, 70, 43, 41, 9, 0), # 52
(38, 74, 51, 36, 22, 0, 51, 71, 43, 41, 9, 0), # 53
(38, 77, 51, 36, 24, 0, 52, 73, 43, 41, 9, 0), # 54
(39, 78, 53, 38, 25, 0, 52, 76, 45, 42, 10, 0), # 55
(39, 78, 53, 38, 25, 0, 53, 78, 46, 42, 10, 0), # 56
(39, 79, 54, 38, 25, 0, 54, 79, 46, 43, 10, 0), # 57
(39, 80, 56, 39, 25, 0, 54, 81, 47, 44, 10, 0), # 58
(39, 80, 56, 39, 25, 0, 54, 81, 47, 44, 10, 0), # 59
)
passenger_arriving_rate = (
(0.5299303116769096, 1.087433712121212, 0.959308322622108, 0.5069021739130434, 0.2857211538461538, 0.0, 0.951358695652174, 1.1428846153846153, 0.7603532608695651, 0.6395388817480719, 0.271858428030303, 0.0), # 0
(0.53490440200956, 1.0995266557940517, 0.9644898645029993, 0.5097251509661835, 0.2878626602564102, 0.0, 0.9510344278381644, 1.1514506410256409, 0.7645877264492754, 0.6429932430019996, 0.27488166394851293, 0.0), # 1
(0.5398216954443468, 1.111440224466891, 0.9695484147386461, 0.5124859903381642, 0.28995897435897433, 0.0, 0.9507002415458937, 1.1598358974358973, 0.7687289855072464, 0.646365609825764, 0.27786005611672276, 0.0), # 2
(0.544678017998244, 1.1231615625, 0.9744802779562982, 0.5151823369565216, 0.2920081730769231, 0.0, 0.9503561820652175, 1.1680326923076925, 0.7727735054347825, 0.6496535186375322, 0.280790390625, 0.0), # 3
(0.5494691956882256, 1.1346778142536476, 0.9792817587832047, 0.5178118357487923, 0.29400833333333337, 0.0, 0.9500022946859903, 1.1760333333333335, 0.7767177536231885, 0.6528545058554698, 0.2836694535634119, 0.0), # 4
(0.5541910545312653, 1.1459761240881035, 0.9839491618466152, 0.5203721316425121, 0.295957532051282, 0.0, 0.9496386246980676, 1.183830128205128, 0.7805581974637681, 0.6559661078977435, 0.28649403102202586, 0.0), # 5
(0.5588394205443372, 1.1570436363636363, 0.9884787917737792, 0.5228608695652174, 0.29785384615384614, 0.0, 0.9492652173913044, 1.1914153846153845, 0.7842913043478261, 0.6589858611825195, 0.28926090909090907, 0.0), # 6
(0.5634101197444152, 1.1678674954405162, 0.9928669531919452, 0.5252756944444444, 0.2996953525641026, 0.0, 0.9488821180555557, 1.1987814102564105, 0.7879135416666667, 0.6619113021279635, 0.29196687386012904, 0.0), # 7
(0.5678989781484733, 1.1784348456790121, 0.9971099507283635, 0.5276142512077294, 0.3014801282051282, 0.0, 0.9484893719806764, 1.2059205128205128, 0.7914213768115942, 0.6647399671522423, 0.29460871141975303, 0.0), # 8
(0.5723018217734855, 1.188732831439394, 1.001204089010283, 0.5298741847826087, 0.30320624999999995, 0.0, 0.9480870244565218, 1.2128249999999998, 0.7948112771739131, 0.6674693926735219, 0.2971832078598485, 0.0), # 9
(0.5766144766364257, 1.1987485970819305, 1.0051456726649528, 0.5320531400966184, 0.3048717948717949, 0.0, 0.9476751207729468, 1.2194871794871796, 0.7980797101449276, 0.6700971151099685, 0.2996871492704826, 0.0), # 10
(0.5808327687542679, 1.2084692869668912, 1.0089310063196228, 0.5341487620772947, 0.3064748397435897, 0.0, 0.9472537062198069, 1.2258993589743588, 0.8012231431159421, 0.6726206708797485, 0.3021173217417228, 0.0), # 11
(0.584952524143986, 1.2178820454545454, 1.0125563946015423, 0.5361586956521739, 0.30801346153846154, 0.0, 0.9468228260869564, 1.2320538461538462, 0.8042380434782609, 0.6750375964010282, 0.30447051136363634, 0.0), # 12
(0.5889695688225538, 1.2269740169051628, 1.0160181421379604, 0.5380805857487923, 0.30948573717948724, 0.0, 0.9463825256642512, 1.237942948717949, 0.8071208786231884, 0.6773454280919736, 0.3067435042262907, 0.0), # 13
(0.5928797288069457, 1.2357323456790126, 1.0193125535561267, 0.5399120772946859, 0.31088974358974353, 0.0, 0.9459328502415458, 1.2435589743589741, 0.8098681159420289, 0.6795417023707511, 0.30893308641975314, 0.0), # 14
(0.5966788301141351, 1.2441441761363634, 1.0224359334832902, 0.5416508152173912, 0.3122235576923077, 0.0, 0.9454738451086957, 1.2488942307692308, 0.8124762228260869, 0.6816239556555268, 0.31103604403409085, 0.0), # 15
(0.6003626987610965, 1.252196652637486, 1.025384586546701, 0.5432944444444444, 0.3134852564102564, 0.0, 0.9450055555555557, 1.2539410256410255, 0.8149416666666667, 0.6835897243644673, 0.3130491631593715, 0.0), # 16
(0.6039271607648035, 1.2598769195426485, 1.0281548173736075, 0.5448406099033817, 0.31467291666666664, 0.0, 0.9445280268719808, 1.2586916666666665, 0.8172609148550726, 0.6854365449157384, 0.3149692298856621, 0.0), # 17
(0.6073680421422301, 1.2671721212121212, 1.0307429305912597, 0.5462869565217392, 0.31578461538461544, 0.0, 0.9440413043478261, 1.2631384615384618, 0.8194304347826088, 0.6871619537275064, 0.3167930303030303, 0.0), # 18
(0.6106811689103502, 1.2740694020061727, 1.0331452308269067, 0.547631129227053, 0.3168184294871795, 0.0, 0.9435454332729469, 1.267273717948718, 0.8214466938405797, 0.6887634872179377, 0.3185173505015432, 0.0), # 19
(0.613862367086138, 1.2805559062850727, 1.0353580227077976, 0.5488707729468599, 0.31777243589743587, 0.0, 0.943040458937198, 1.2710897435897435, 0.82330615942029, 0.6902386818051983, 0.32013897657126816, 0.0), # 20
(0.6169074626865673, 1.2866187784090906, 1.0373776108611827, 0.5500035326086956, 0.31864471153846147, 0.0, 0.9425264266304348, 1.2745788461538459, 0.8250052989130435, 0.691585073907455, 0.32165469460227264, 0.0), # 21
(0.6198122817286118, 1.292245162738496, 1.0392002999143102, 0.5510270531400966, 0.31943333333333324, 0.0, 0.9420033816425122, 1.277733333333333, 0.8265405797101449, 0.6928001999428733, 0.323061290684624, 0.0), # 22
(0.6225726502292459, 1.2974222036335579, 1.0408223944944301, 0.551938979468599, 0.3201363782051282, 0.0, 0.9414713692632852, 1.2805455128205128, 0.8279084692028986, 0.6938815963296201, 0.32435555090838947, 0.0), # 23
(0.6251843942054434, 1.3021370454545453, 1.0422401992287915, 0.552736956521739, 0.3207519230769231, 0.0, 0.9409304347826087, 1.2830076923076923, 0.8291054347826087, 0.694826799485861, 0.32553426136363633, 0.0), # 24
(0.6276433396741781, 1.3063768325617282, 1.0434500187446445, 0.5534186292270532, 0.32127804487179484, 0.0, 0.9403806234903382, 1.2851121794871794, 0.8301279438405799, 0.6956333458297629, 0.32659420814043205, 0.0), # 25
(0.6299453126524241, 1.3101287093153757, 1.0444481576692373, 0.5539816425120772, 0.32171282051282046, 0.0, 0.9398219806763285, 1.2868512820512819, 0.8309724637681158, 0.6962987717794915, 0.32753217732884393, 0.0), # 26
(0.6320861391571554, 1.3133798200757574, 1.0452309206298198, 0.5544236413043478, 0.32205432692307695, 0.0, 0.9392545516304349, 1.2882173076923078, 0.8316354619565218, 0.6968206137532132, 0.32834495501893934, 0.0), # 27
(0.6340616452053459, 1.3161173092031424, 1.045794612253642, 0.5547422705314009, 0.322300641025641, 0.0, 0.9386783816425122, 1.289202564102564, 0.8321134057971015, 0.6971964081690946, 0.3290293273007856, 0.0), # 28
(0.6358676568139694, 1.3183283210578003, 1.046135537167952, 0.554935175120773, 0.32244983974358976, 0.0, 0.9380935160024155, 1.289799358974359, 0.8324027626811595, 0.6974236914453013, 0.3295820802644501, 0.0), # 29
(0.6375000000000001, 1.32, 1.0462500000000001, 0.555, 0.3225, 0.0, 0.9375, 1.29, 0.8325000000000001, 0.6975, 0.33, 0.0), # 30
(0.6390274056905372, 1.3213886079545452, 1.0461641938405797, 0.5549882924836602, 0.3224817464539007, 0.0, 0.9366752519573547, 1.2899269858156028, 0.8324824387254903, 0.6974427958937197, 0.3303471519886363, 0.0), # 31
(0.6405218350383632, 1.3227588636363636, 1.0459092028985506, 0.5549533986928104, 0.3224273758865248, 0.0, 0.9354049516908214, 1.2897095035460993, 0.8324300980392155, 0.6972728019323671, 0.3306897159090909, 0.0), # 32
(0.6419839593989769, 1.3241105965909092, 1.045488668478261, 0.5548956617647058, 0.3223374734042553, 0.0, 0.933701536731634, 1.2893498936170211, 0.8323434926470589, 0.696992445652174, 0.3310276491477273, 0.0), # 33
(0.6434144501278772, 1.3254436363636364, 1.044906231884058, 0.5548154248366014, 0.3222126241134752, 0.0, 0.9315774446110279, 1.2888504964539007, 0.8322231372549022, 0.696604154589372, 0.3313609090909091, 0.0), # 34
(0.6448139785805627, 1.3267578124999997, 1.0441655344202898, 0.5547130310457516, 0.3220534131205674, 0.0, 0.9290451128602366, 1.2882136524822696, 0.8320695465686275, 0.6961103562801932, 0.3316894531249999, 0.0), # 35
(0.646183216112532, 1.3280529545454547, 1.0432702173913042, 0.5545888235294117, 0.3218604255319149, 0.0, 0.9261169790104948, 1.2874417021276596, 0.8318832352941177, 0.6955134782608695, 0.3320132386363637, 0.0), # 36
(0.6475228340792839, 1.3293288920454547, 1.0422239221014493, 0.5544431454248366, 0.32163424645390065, 0.0, 0.9228054805930368, 1.2865369858156026, 0.831664718137255, 0.6948159480676328, 0.33233222301136367, 0.0), # 37
(0.6488335038363171, 1.3305854545454545, 1.0410302898550725, 0.5542763398692809, 0.3213754609929078, 0.0, 0.919123055139097, 1.2855018439716313, 0.8314145098039215, 0.6940201932367149, 0.33264636363636363, 0.0), # 38
(0.6501158967391305, 1.331822471590909, 1.0396929619565216, 0.55408875, 0.3210846542553191, 0.0, 0.91508214017991, 1.2843386170212765, 0.831133125, 0.6931286413043477, 0.33295561789772726, 0.0), # 39
(0.6513706841432225, 1.3330397727272727, 1.0382155797101449, 0.5538807189542484, 0.32076241134751765, 0.0, 0.9106951732467099, 1.2830496453900706, 0.8308210784313727, 0.6921437198067633, 0.3332599431818182, 0.0), # 40
(0.6525985374040921, 1.3342371874999996, 1.03660178442029, 0.553652589869281, 0.3204093173758865, 0.0, 0.9059745918707315, 1.281637269503546, 0.8304788848039216, 0.6910678562801932, 0.3335592968749999, 0.0), # 41
(0.6538001278772378, 1.3354145454545456, 1.0348552173913044, 0.5534047058823529, 0.3200259574468085, 0.0, 0.9009328335832084, 1.280103829787234, 0.8301070588235294, 0.6899034782608696, 0.3338536363636364, 0.0), # 42
(0.6549761269181587, 1.3365716761363635, 1.0329795199275362, 0.5531374101307189, 0.31961291666666664, 0.0, 0.8955823359153756, 1.2784516666666665, 0.8297061151960784, 0.688653013285024, 0.3341429190340909, 0.0), # 43
(0.656127205882353, 1.337708409090909, 1.0309783333333333, 0.552851045751634, 0.31917078014184397, 0.0, 0.8899355363984673, 1.2766831205673759, 0.829276568627451, 0.6873188888888889, 0.33442710227272726, 0.0), # 44
(0.6572540361253196, 1.3388245738636362, 1.0288552989130437, 0.5525459558823529, 0.3187001329787234, 0.0, 0.8840048725637182, 1.2748005319148936, 0.8288189338235293, 0.6859035326086957, 0.33470614346590905, 0.0), # 45
(0.6583572890025576, 1.33992, 1.0266140579710146, 0.5522224836601306, 0.31820156028368796, 0.0, 0.8778027819423623, 1.2728062411347518, 0.828333725490196, 0.6844093719806764, 0.33498, 0.0), # 46
(0.6594376358695653, 1.3409945170454545, 1.0242582518115941, 0.5518809722222222, 0.3176756471631206, 0.0, 0.8713417020656339, 1.2707025886524823, 0.8278214583333333, 0.6828388345410626, 0.3352486292613636, 0.0), # 47
(0.6604957480818415, 1.3420479545454547, 1.0217915217391305, 0.5515217647058823, 0.3171229787234042, 0.0, 0.8646340704647678, 1.268491914893617, 0.8272826470588235, 0.6811943478260869, 0.33551198863636367, 0.0), # 48
(0.6615322969948849, 1.3430801420454541, 1.0192175090579711, 0.5511452042483661, 0.31654414007092196, 0.0, 0.8576923246709978, 1.2661765602836879, 0.8267178063725492, 0.6794783393719807, 0.33577003551136353, 0.0), # 49
(0.6625479539641944, 1.3440909090909086, 1.0165398550724638, 0.5507516339869282, 0.31593971631205675, 0.0, 0.8505289022155589, 1.263758865248227, 0.8261274509803923, 0.6776932367149758, 0.33602272727272714, 0.0), # 50
(0.6635433903452687, 1.3450800852272726, 1.0137622010869565, 0.5503413970588236, 0.31531029255319143, 0.0, 0.8431562406296852, 1.2612411702127657, 0.8255120955882354, 0.6758414673913044, 0.33627002130681816, 0.0), # 51
(0.6645192774936062, 1.3460474999999998, 1.010888188405797, 0.5499148366013071, 0.31465645390070923, 0.0, 0.8355867774446111, 1.258625815602837, 0.8248722549019608, 0.673925458937198, 0.33651187499999996, 0.0), # 52
(0.6654762867647059, 1.3469929829545455, 1.0079214583333334, 0.549472295751634, 0.31397878546099295, 0.0, 0.8278329501915709, 1.2559151418439718, 0.824208443627451, 0.6719476388888889, 0.3367482457386364, 0.0), # 53
(0.6664150895140666, 1.3479163636363634, 1.004865652173913, 0.5490141176470589, 0.31327787234042553, 0.0, 0.8199071964017991, 1.2531114893617021, 0.8235211764705883, 0.6699104347826086, 0.33697909090909084, 0.0), # 54
(0.6673363570971866, 1.348817471590909, 1.001724411231884, 0.5485406454248366, 0.31255429964539005, 0.0, 0.8118219536065301, 1.2502171985815602, 0.822810968137255, 0.6678162741545893, 0.33720436789772723, 0.0), # 55
(0.6682407608695652, 1.3496961363636362, 0.9985013768115942, 0.5480522222222222, 0.3118086524822695, 0.0, 0.8035896593369982, 1.247234609929078, 0.8220783333333334, 0.6656675845410628, 0.33742403409090904, 0.0), # 56
(0.6691289721867009, 1.3505521875000002, 0.9952001902173913, 0.5475491911764706, 0.3110415159574468, 0.0, 0.7952227511244377, 1.2441660638297871, 0.821323786764706, 0.6634667934782609, 0.33763804687500004, 0.0), # 57
(0.6700016624040921, 1.3513854545454542, 0.991824492753623, 0.5470318954248365, 0.31025347517730495, 0.0, 0.7867336665000834, 1.2410139007092198, 0.8205478431372549, 0.6612163285024154, 0.33784636363636356, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
64, # 1
)
| 113.373134 | 220 | 0.729753 |
25ede33468fa00e9ce110dd26b19a90f81697eec | 93 | py | Python | anuvaad-etl/anuvaad-extractor/content-handler/src/utilities/__init__.py | srihari-nagaraj/anuvaad | b09b01a033a033e97db6e404c088e0e6332053e4 | [
"MIT"
] | null | null | null | anuvaad-etl/anuvaad-extractor/content-handler/src/utilities/__init__.py | srihari-nagaraj/anuvaad | b09b01a033a033e97db6e404c088e0e6332053e4 | [
"MIT"
] | null | null | null | anuvaad-etl/anuvaad-extractor/content-handler/src/utilities/__init__.py | srihari-nagaraj/anuvaad | b09b01a033a033e97db6e404c088e0e6332053e4 | [
"MIT"
] | null | null | null | from .app_context import AppContext
from .pymongo_data_handling import normalize_bson_to_json | 46.5 | 57 | 0.903226 |
71f3625e1204814d0de5e90dedde32752cb891cb | 335 | py | Python | src/PriceComparer/price_offer_validation_pipeline.py | bugsancho/PriceComparer | 21d1c1b38c1d570c914f5d870ed3f3dedb15a886 | [
"MIT"
] | null | null | null | src/PriceComparer/price_offer_validation_pipeline.py | bugsancho/PriceComparer | 21d1c1b38c1d570c914f5d870ed3f3dedb15a886 | [
"MIT"
] | null | null | null | src/PriceComparer/price_offer_validation_pipeline.py | bugsancho/PriceComparer | 21d1c1b38c1d570c914f5d870ed3f3dedb15a886 | [
"MIT"
] | null | null | null | from scrapy.exceptions import DropItem
class PriceOfferValidationPipeline(object):
def process_item(self, item, spider):
if not(('discounted_price' in item and item['discounted_price']) or
('price' in item and item['price'])):
raise DropItem("Missing price in %s" % item)
return item
| 27.916667 | 75 | 0.653731 |
1cc0c4dfd6efb7bed5953393ff9b3b6f2f744e04 | 1,238 | py | Python | algo/problems/subarray_with_given_sum.py | avi3tal/knowledgebase | fd30805aa94332a6c14c9d8631c7044673fb3e2c | [
"MIT"
] | null | null | null | algo/problems/subarray_with_given_sum.py | avi3tal/knowledgebase | fd30805aa94332a6c14c9d8631c7044673fb3e2c | [
"MIT"
] | null | null | null | algo/problems/subarray_with_given_sum.py | avi3tal/knowledgebase | fd30805aa94332a6c14c9d8631c7044673fb3e2c | [
"MIT"
] | 1 | 2021-11-19T13:45:59.000Z | 2021-11-19T13:45:59.000Z | """
http://practice.geeksforgeeks.org/problems/subarray-with-given-sum/0
Example:
Input:
2
5 12
1 2 3 7 5
10 15
1 2 3 4 5 6 7 8 9 10
Output:
2 4
1 5
"""
def find_sub_array_with_given_sum(arr, expected_sum):
added = dict()
sum = 0
for i in range(0, len(arr)):
if sum > expected_sum:
sum -= added.pop(min(added.keys()))
elif sum < expected_sum:
sum += arr[i]
added[i+1] = arr[i]
if sum == expected_sum:
# compatible with py36
return "{} {}".format(min(list(added.keys())), max(list(added.keys())))
return -1
def main():
# first line contain one integer: T = number of tests
# second line contains two integers: N = number of elements in array, S = sum of sub-array
t = int(input("Enter number of tests: "))
res = dict()
for i in range(1, t+1):
n, s = map(lambda x: int(x), raw_input("Enter Number of elements and Sum expected: ").split(" "))
arr = map(lambda x: int(x), raw_input("Enter int array: ").split(" "))
res[i] = find_sub_array_with_given_sum(arr, s)
for k, v in res.iteritems():
print("arr number {} is {}".format(k, v))
if __name__ == "__main__":
main()
| 22.509091 | 105 | 0.57916 |
641ae7c537f824c0fddd14768e335d0fcf684e86 | 183 | py | Python | kattis/codetosavelives.py | terror/Solutions | 1ad33daec95b565a38ac4730261593bcf249ac86 | [
"CC0-1.0"
] | 2 | 2020-08-01T22:53:32.000Z | 2020-08-31T22:45:35.000Z | kattis/codetosavelives.py | terror/Solutions | 1ad33daec95b565a38ac4730261593bcf249ac86 | [
"CC0-1.0"
] | null | null | null | kattis/codetosavelives.py | terror/Solutions | 1ad33daec95b565a38ac4730261593bcf249ac86 | [
"CC0-1.0"
] | null | null | null | def main(T):
for _ in range(T):
print(' '.join(list(str(sum([int(''.join(input().split())), int(''.join(input().split()))])))))
if __name__ == '__main__':
main(int(input()))
| 26.142857 | 99 | 0.562842 |
c4fecfbf59b58a9c456179446aa55ccb8ff31281 | 1,199 | py | Python | google/ads/googleads/v9/enums/types/price_extension_price_qualifier.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/enums/types/price_extension_price_qualifier.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/enums/types/price_extension_price_qualifier.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"PriceExtensionPriceQualifierEnum",},
)
class PriceExtensionPriceQualifierEnum(proto.Message):
r"""Container for enum describing a price extension price
qualifier.
"""
class PriceExtensionPriceQualifier(proto.Enum):
r"""Enums of price extension price qualifier."""
UNSPECIFIED = 0
UNKNOWN = 1
FROM = 2
UP_TO = 3
AVERAGE = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| 28.547619 | 74 | 0.709758 |
09b76f24556a8d87a269f2a7638d0a9c92e602d6 | 861 | py | Python | actor_critic/inference.py | novicasarenac/car-racing-rl | 5bb3b2c47fb6ceda3e8f2c149485652da5a079ba | [
"MIT"
] | 10 | 2019-08-08T03:17:39.000Z | 2021-12-15T08:43:29.000Z | actor_critic/inference.py | novicasarenac/car-racing-rl | 5bb3b2c47fb6ceda3e8f2c149485652da5a079ba | [
"MIT"
] | 7 | 2019-11-29T04:00:22.000Z | 2022-03-11T23:38:20.000Z | actor_critic/inference.py | novicasarenac/car-racing-rl | 5bb3b2c47fb6ceda3e8f2c149485652da5a079ba | [
"MIT"
] | 4 | 2019-11-28T10:14:48.000Z | 2020-04-08T08:10:37.000Z | import gym
import torch
from actor_critic.actor_critic import ActorCritic
from actor_critic.actions import get_action_space, get_actions
from actor_critic.environment_wrapper import EnvironmentWrapper
def actor_critic_inference(params, path):
model = ActorCritic(params.stack_size, get_action_space())
model.load_state_dict(torch.load(path))
model.eval()
env = gym.make('CarRacing-v0')
env_wrapper = EnvironmentWrapper(env, params.stack_size)
state = env_wrapper.reset()
state = torch.Tensor([state])
done = False
total_score = 0
while not done:
probs, _, _ = model(state)
action = get_actions(probs)
print(action)
state, reward, done = env_wrapper.step(action[0])
state = torch.Tensor([state])
total_score += reward
env_wrapper.render()
return total_score
| 29.689655 | 63 | 0.706156 |
de800f23266d71d98d03cf3486e79ca352e3f670 | 1,939 | py | Python | locations/spiders/aldi_nord_be.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
] | null | null | null | locations/spiders/aldi_nord_be.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
] | null | null | null | locations/spiders/aldi_nord_be.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import scrapy
from locations.items import GeojsonPointItem
class AldiNordBESpider(scrapy.Spider):
name = "aldi_nord_be"
item_attributes = {"brand": "ALDI Nord", "brand_wikidata": "Q41171373"}
allowed_domains = ["www.aldi.be"]
start_urls = [
"https://www.aldi.be/nl/informatie/winkels-en-openingsuren.html",
]
def parse(self, response):
urls = response.xpath(
'//div[@class="mod-stores__multicolumn"]/p/a/@href'
).extract()
is_store_list = response.xpath(
'//div[@class="mod mod-stores"]//div[@class="mod-stores__overview-company-tools"]/a/@href'
).extract()
if not urls and is_store_list:
for store_url in is_store_list:
yield scrapy.Request(
response.urljoin(store_url), callback=self.parse_store
)
else:
for url in urls:
yield scrapy.Request(response.urljoin(url))
def parse_store(self, response):
ref = re.search(r".+/(.+?)/?(?:\.html|$)", response.url).group(1)
country = re.search(r"aldi\.(\w{2}?)\/", response.url).group(1)
properties = {
"ref": ref,
"name": response.xpath(
'//div[@class="mod-overview-intro__content"]/h1/text()'
).extract_first(),
"addr_full": response.xpath(
'normalize-space(//span[@itemprop="streetAddress"]//text())'
).extract_first(),
"city": response.xpath(
'normalize-space(//span[@itemprop="addressLocality"]//text())'
).extract_first(),
"postcode": response.xpath(
'normalize-space(//span[@itemprop="postalCode"]//text())'
).extract_first(),
"country": country,
"website": response.url,
}
yield GeojsonPointItem(**properties)
| 34.017544 | 102 | 0.553894 |
3217a5470818286008bd1a57b117c9dc0e198b40 | 1,053 | py | Python | docs/core/howto/listings/ssl/check_server_certificate.py | hawkowl/twisted | c413aac3888dea2202c0dc26f978d7f88b4b837a | [
"Unlicense",
"MIT"
] | 1 | 2019-02-08T18:37:42.000Z | 2019-02-08T18:37:42.000Z | docs/core/howto/listings/ssl/check_server_certificate.py | hawkowl/twisted | c413aac3888dea2202c0dc26f978d7f88b4b837a | [
"Unlicense",
"MIT"
] | 5 | 2020-06-05T18:16:39.000Z | 2022-01-13T00:45:49.000Z | docs/core/howto/listings/ssl/check_server_certificate.py | hawkowl/twisted | c413aac3888dea2202c0dc26f978d7f88b4b837a | [
"Unlicense",
"MIT"
] | 1 | 2019-12-26T21:34:29.000Z | 2019-12-26T21:34:29.000Z | from __future__ import print_function
import sys
from twisted.internet import defer, endpoints, protocol, ssl, task, error
def main(reactor, host, port=443):
options = ssl.optionsForClientTLS(hostname=host.decode('utf-8'))
port = int(port)
class ShowCertificate(protocol.Protocol):
def connectionMade(self):
self.transport.write(b"GET / HTTP/1.0\r\n\r\n")
self.done = defer.Deferred()
def dataReceived(self, data):
certificate = ssl.Certificate(self.transport.getPeerCertificate())
print("OK:", certificate)
self.transport.abortConnection()
def connectionLost(self, reason):
print("Lost.")
if not reason.check(error.ConnectionClosed):
print("BAD:", reason.value)
self.done.callback(None)
return endpoints.connectProtocol(
endpoints.SSL4ClientEndpoint(reactor, host, port, options),
ShowCertificate()
).addCallback(lambda protocol: protocol.done)
task.react(main, sys.argv[1:])
| 36.310345 | 78 | 0.653371 |
e09303e2ca9d08237bfc99bec93fd7d49d90855a | 7,168 | py | Python | torba/tests/client_tests/unit/test_ledger.py | mittalkartik2000/lbry-sdk | a07b17ec0c9c5d0a88bc730caf6ab955e0971b38 | [
"MIT"
] | null | null | null | torba/tests/client_tests/unit/test_ledger.py | mittalkartik2000/lbry-sdk | a07b17ec0c9c5d0a88bc730caf6ab955e0971b38 | [
"MIT"
] | null | null | null | torba/tests/client_tests/unit/test_ledger.py | mittalkartik2000/lbry-sdk | a07b17ec0c9c5d0a88bc730caf6ab955e0971b38 | [
"MIT"
] | null | null | null | import os
from binascii import hexlify
from torba.coin.bitcoinsegwit import MainNetLedger
from torba.client.wallet import Wallet
from client_tests.unit.test_transaction import get_transaction, get_output
from client_tests.unit.test_headers import BitcoinHeadersTestCase, block_bytes
class MockNetwork:
def __init__(self, history, transaction):
self.history = history
self.transaction = transaction
self.address = None
self.get_history_called = []
self.get_transaction_called = []
self.is_connected = False
def retriable_call(self, function, *args, **kwargs):
return function(*args, **kwargs)
async def get_history(self, address):
self.get_history_called.append(address)
self.address = address
return self.history
async def get_merkle(self, txid, height):
return {'merkle': ['abcd01'], 'pos': 1}
async def get_transaction(self, tx_hash, _=None):
self.get_transaction_called.append(tx_hash)
return self.transaction[tx_hash]
class LedgerTestCase(BitcoinHeadersTestCase):
async def asyncSetUp(self):
self.ledger = MainNetLedger({
'db': MainNetLedger.database_class(':memory:'),
'headers': MainNetLedger.headers_class(':memory:')
})
await self.ledger.db.open()
async def asyncTearDown(self):
await self.ledger.db.close()
def make_header(self, **kwargs):
header = {
'bits': 486604799,
'block_height': 0,
'merkle_root': b'4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b',
'nonce': 2083236893,
'prev_block_hash': b'0000000000000000000000000000000000000000000000000000000000000000',
'timestamp': 1231006505,
'version': 1
}
header.update(kwargs)
header['merkle_root'] = header['merkle_root'].ljust(64, b'a')
header['prev_block_hash'] = header['prev_block_hash'].ljust(64, b'0')
return self.ledger.headers.serialize(header)
def add_header(self, **kwargs):
serialized = self.make_header(**kwargs)
self.ledger.headers.io.seek(0, os.SEEK_END)
self.ledger.headers.io.write(serialized)
self.ledger.headers._size = None
class TestSynchronization(LedgerTestCase):
async def test_update_history(self):
account = self.ledger.account_class.generate(self.ledger, Wallet(), "torba")
address = await account.receiving.get_or_create_usable_address()
address_details = await self.ledger.db.get_address(address=address)
self.assertEqual(address_details['history'], None)
self.add_header(block_height=0, merkle_root=b'abcd04')
self.add_header(block_height=1, merkle_root=b'abcd04')
self.add_header(block_height=2, merkle_root=b'abcd04')
self.add_header(block_height=3, merkle_root=b'abcd04')
self.ledger.network = MockNetwork([
{'tx_hash': 'abcd01', 'height': 0},
{'tx_hash': 'abcd02', 'height': 1},
{'tx_hash': 'abcd03', 'height': 2},
], {
'abcd01': hexlify(get_transaction(get_output(1)).raw),
'abcd02': hexlify(get_transaction(get_output(2)).raw),
'abcd03': hexlify(get_transaction(get_output(3)).raw),
})
await self.ledger.update_history(address, '')
self.assertListEqual(self.ledger.network.get_history_called, [address])
self.assertListEqual(self.ledger.network.get_transaction_called, ['abcd01', 'abcd02', 'abcd03'])
address_details = await self.ledger.db.get_address(address=address)
self.assertEqual(
address_details['history'],
'252bda9b22cc902ca2aa2de3548ee8baf06b8501ff7bfb3b0b7d980dbd1bf792:0:'
'ab9c0654dd484ac20437030f2034e25dcb29fc507e84b91138f80adc3af738f9:1:'
'a2ae3d1db3c727e7d696122cab39ee20a7f81856dab7019056dd539f38c548a0:2:'
)
self.ledger.network.get_history_called = []
self.ledger.network.get_transaction_called = []
await self.ledger.update_history(address, '')
self.assertListEqual(self.ledger.network.get_history_called, [address])
self.assertListEqual(self.ledger.network.get_transaction_called, [])
self.ledger.network.history.append({'tx_hash': 'abcd04', 'height': 3})
self.ledger.network.transaction['abcd04'] = hexlify(get_transaction(get_output(4)).raw)
self.ledger.network.get_history_called = []
self.ledger.network.get_transaction_called = []
await self.ledger.update_history(address, '')
self.assertListEqual(self.ledger.network.get_history_called, [address])
self.assertListEqual(self.ledger.network.get_transaction_called, ['abcd04'])
address_details = await self.ledger.db.get_address(address=address)
self.assertEqual(
address_details['history'],
'252bda9b22cc902ca2aa2de3548ee8baf06b8501ff7bfb3b0b7d980dbd1bf792:0:'
'ab9c0654dd484ac20437030f2034e25dcb29fc507e84b91138f80adc3af738f9:1:'
'a2ae3d1db3c727e7d696122cab39ee20a7f81856dab7019056dd539f38c548a0:2:'
'047cf1d53ef68f0fd586d46f90c09ff8e57a4180f67e7f4b8dd0135c3741e828:3:'
)
class MocHeaderNetwork(MockNetwork):
def __init__(self, responses):
super().__init__(None, None)
self.responses = responses
async def get_headers(self, height, blocks):
return self.responses[height]
class BlockchainReorganizationTests(LedgerTestCase):
async def test_1_block_reorganization(self):
self.ledger.network = MocHeaderNetwork({
20: {'height': 20, 'count': 5, 'hex': hexlify(
self.get_bytes(after=block_bytes(20), upto=block_bytes(5))
)},
25: {'height': 25, 'count': 0, 'hex': b''}
})
headers = self.ledger.headers
await headers.connect(0, self.get_bytes(upto=block_bytes(20)))
self.add_header(block_height=len(headers))
self.assertEqual(headers.height, 20)
await self.ledger.receive_header([{
'height': 21, 'hex': hexlify(self.make_header(block_height=21))
}])
async def test_3_block_reorganization(self):
self.ledger.network = MocHeaderNetwork({
20: {'height': 20, 'count': 5, 'hex': hexlify(
self.get_bytes(after=block_bytes(20), upto=block_bytes(5))
)},
21: {'height': 21, 'count': 1, 'hex': hexlify(self.make_header(block_height=21))},
22: {'height': 22, 'count': 1, 'hex': hexlify(self.make_header(block_height=22))},
25: {'height': 25, 'count': 0, 'hex': b''}
})
headers = self.ledger.headers
await headers.connect(0, self.get_bytes(upto=block_bytes(20)))
self.add_header(block_height=len(headers))
self.add_header(block_height=len(headers))
self.add_header(block_height=len(headers))
self.assertEqual(headers.height, 22)
await self.ledger.receive_header(({
'height': 23, 'hex': hexlify(self.make_header(block_height=23))
},))
| 41.918129 | 104 | 0.663504 |
c1ea031df71c6256704b76bb7dfe252978435fb2 | 543 | py | Python | example/example_heatmap.py | summiee/tof_mass_calibration | 39d4248a24cee9ae2461b07cfe323be05bc8dd57 | [
"MIT"
] | null | null | null | example/example_heatmap.py | summiee/tof_mass_calibration | 39d4248a24cee9ae2461b07cfe323be05bc8dd57 | [
"MIT"
] | null | null | null | example/example_heatmap.py | summiee/tof_mass_calibration | 39d4248a24cee9ae2461b07cfe323be05bc8dd57 | [
"MIT"
] | null | null | null | import numpy as np
from camp.utils.mock import create_fake_trace
from camp.utils.heatmap import Heatmap
scale, offset, quantity = 10, -5, 1000
delays = scale * np.random.random(quantity) + offset
keys = ['delay','trace']
data = {key: [] for key in keys}
length_of_trace = 300
for delay in delays:
trace = create_fake_trace(length_of_trace, shift=delay, noise=0.3)
data['delay'].append(delay)
data['trace'].append(trace)
bins = [-5, 5, 1]
heatmap = Heatmap(data['trace'], data['delay'], bins)
heatmap.show()
heatmap.frequency() | 25.857143 | 70 | 0.707182 |
0c824f73c697deb02abf140e6383967ab5c3ca6e | 7,789 | py | Python | pyutilib/th/tests/test_misc.py | qtothec/pyutilib | 772671aa2a7b263b9a936099e70ec544c7fb397d | [
"BSD-3-Clause"
] | null | null | null | pyutilib/th/tests/test_misc.py | qtothec/pyutilib | 772671aa2a7b263b9a936099e70ec544c7fb397d | [
"BSD-3-Clause"
] | null | null | null | pyutilib/th/tests/test_misc.py | qtothec/pyutilib | 772671aa2a7b263b9a936099e70ec544c7fb397d | [
"BSD-3-Clause"
] | 1 | 2021-04-01T14:10:30.000Z | 2021-04-01T14:10:30.000Z | import os
import sys
from os.path import abspath, dirname
currdir = dirname(abspath(__file__)) + os.sep
import pyutilib.th as unittest
class Tester(unittest.TestCase):
def test1(self):
self.assertFileEqualsBaseline(
currdir + 'file1.txt', currdir + 'file1.txt', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file2.txt', currdir + 'file2.txt', delete=False)
@unittest.expectedFailure
def test2(self):
self.assertFileEqualsBaseline(
currdir + 'file1.txt', currdir + 'file2.txt', delete=False)
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test3(self):
self.assertFileEqualsBaseline(
currdir + 'file1.txt', currdir + 'file1.zip', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file1.zip', currdir + 'file1.txt', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file1.zip', currdir + 'file1.zip', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file2.txt', currdir + 'file2.zip', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file2.zip', currdir + 'file2.txt', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file2.zip', currdir + 'file2.zip', delete=False)
@unittest.skipIf(sys.version_info[:2] >= (3, 0) and sys.version_info[:2] <
(3, 3), "Skipping tests with GZ files.")
def test3gz(self):
self.assertFileEqualsBaseline(
currdir + 'file1.txt', currdir + 'file1.txt.gz', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file1.txt.gz', currdir + 'file1.txt', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file1.txt.gz', currdir + 'file1.txt.gz', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file2.txt', currdir + 'file2.txt.gz', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file2.txt.gz', currdir + 'file2.txt', delete=False)
self.assertFileEqualsBaseline(
currdir + 'file2.txt.gz', currdir + 'file2.txt.gz', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test4(self):
self.assertFileEqualsBaseline(
currdir + 'file1.txt', currdir + 'file3.zip', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test5(self):
self.assertFileEqualsBaseline(
currdir + 'file3.zip', currdir + 'file1.txt', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test6(self):
self.assertFileEqualsBaseline(
currdir + 'file1.zip', currdir + 'file3.txt', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test7(self):
self.assertFileEqualsBaseline(
currdir + 'file3.zip', currdir + 'file3.zip', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test8(self):
self.assertFileEqualsBaseline(
currdir + 'file1.zip', currdir + 'file2.zip', delete=False)
@unittest.expectedFailure
def test8gz(self):
self.assertFileEqualsBaseline(
currdir + 'file1.txt.gz', currdir + 'file2.txt.gz', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] >= (2, 6),
"Skipping tests that don't fail.")
def test9(self):
self.assertFileEqualsBaseline(
currdir + 'file1.zip', currdir + 'file2.zip', delete=False)
class TesterL(unittest.TestCase):
def test1(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file1.txt', currdir + 'file1.txt', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file2.txt', currdir + 'file2.txt', delete=False)
@unittest.expectedFailure
def test2(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file1.txt', currdir + 'file2.txt', delete=False)
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test3(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file1.txt', currdir + 'file1.zip', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file1.zip', currdir + 'file1.txt', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file1.zip', currdir + 'file1.zip', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file2.txt', currdir + 'file2.zip', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file2.zip', currdir + 'file2.txt', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file2.zip', currdir + 'file2.zip', delete=False)
@unittest.skipIf(sys.version_info[:2] >= (3, 0) and sys.version_info[:2] <
(3, 3), "Skipping tests with GZ files.")
def test3gz(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file1.txt', currdir + 'file1.txt.gz', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file1.txt.gz', currdir + 'file1.txt', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file1.txt.gz', currdir + 'file1.txt.gz', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file2.txt', currdir + 'file2.txt.gz', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file2.txt.gz', currdir + 'file2.txt', delete=False)
self.assertFileEqualsLargeBaseline(
currdir + 'file2.txt.gz', currdir + 'file2.txt.gz', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test4(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file1.txt', currdir + 'file3.zip', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test5(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file3.zip', currdir + 'file1.txt', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test6(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file1.zip', currdir + 'file3.txt', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test7(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file3.zip', currdir + 'file3.zip', delete=False)
@unittest.expectedFailure
@unittest.skipIf(sys.version_info[:2] < (2, 6),
"Skipping tests with ZIP files.")
def test8(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file1.zip', currdir + 'file2.zip', delete=False)
@unittest.expectedFailure
def test8gz(self):
self.assertFileEqualsLargeBaseline(
currdir + 'file1.txt.gz', currdir + 'file2.txt.gz', delete=False)
if __name__ == "__main__":
unittest.main()
| 40.994737 | 78 | 0.616767 |
54235fdb17cae37c20670e7dbf21de95b230d040 | 1,760 | py | Python | google/ads/googleads/v6/resources/types/carrier_constant.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/resources/types/carrier_constant.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/resources/types/carrier_constant.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.resources",
marshal="google.ads.googleads.v6",
manifest={"CarrierConstant",},
)
class CarrierConstant(proto.Message):
r"""A carrier criterion that can be used in campaign targeting.
Attributes:
resource_name (str):
Output only. The resource name of the carrier criterion.
Carrier criterion resource names have the form:
``carrierConstants/{criterion_id}``
id (int):
Output only. The ID of the carrier criterion.
name (str):
Output only. The full name of the carrier in
English.
country_code (str):
Output only. The country code of the country
where the carrier is located, e.g., "AR", "FR",
etc.
"""
resource_name = proto.Field(proto.STRING, number=1)
id = proto.Field(proto.INT64, number=5, optional=True)
name = proto.Field(proto.STRING, number=6, optional=True)
country_code = proto.Field(proto.STRING, number=7, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
| 32 | 74 | 0.676136 |
9bd3831e7e4cbb26c5606e19ed26b4647b80eeac | 14,011 | py | Python | python/federatedml/feature/hetero_feature_binning/hetero_binning_guest.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 715 | 2019-01-24T10:52:03.000Z | 2019-10-31T12:19:22.000Z | python/federatedml/feature/hetero_feature_binning/hetero_binning_guest.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 270 | 2019-02-11T02:57:36.000Z | 2019-08-29T11:22:33.000Z | python/federatedml/feature/hetero_feature_binning/hetero_binning_guest.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 200 | 2019-01-26T14:21:35.000Z | 2019-11-01T01:14:36.000Z | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import functools
import numpy as np
from federatedml.cipher_compressor.packer import GuestIntegerPacker
from federatedml.feature.binning.iv_calculator import IvCalculator
from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator
from federatedml.feature.binning.optimal_binning.optimal_binning import OptimalBinning
from federatedml.feature.hetero_feature_binning.base_feature_binning import BaseFeatureBinning
from federatedml.secureprotol import PaillierEncrypt
from federatedml.secureprotol.fate_paillier import PaillierEncryptedNumber
from federatedml.statistic import data_overview
from federatedml.statistic import statics
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroFeatureBinningGuest(BaseFeatureBinning):
def __init__(self):
super().__init__()
self._packer: GuestIntegerPacker = None
def fit(self, data_instances):
"""
Apply binning method for both data instances in local party as well as the other one. Afterwards, calculate
the specific metric value for specific columns. Currently, iv is support for binary labeled data only.
"""
LOGGER.info("Start feature binning fit and transform")
self._abnormal_detection(data_instances)
# self._parse_cols(data_instances)
self._setup_bin_inner_param(data_instances, self.model_param)
if self.model_param.method == consts.OPTIMAL:
has_missing_value = self.iv_calculator.check_containing_missing_value(data_instances)
for idx in self.bin_inner_param.bin_indexes:
if idx in has_missing_value:
raise ValueError(f"Optimal Binning do not support missing value now.")
split_points = self.binning_obj.fit_split_points(data_instances)
if self.model_param.skip_static:
self.transform(data_instances)
return self.data_output
label_counts_dict = data_overview.get_label_count(data_instances)
if len(label_counts_dict) > 2:
if self.model_param.method == consts.OPTIMAL:
raise ValueError("Have not supported optimal binning in multi-class data yet")
self.labels = list(label_counts_dict.keys())
label_counts = [label_counts_dict[k] for k in self.labels]
label_table = IvCalculator.convert_label(data_instances, self.labels)
self.bin_result = self.iv_calculator.cal_local_iv(data_instances=data_instances,
split_points=split_points,
labels=self.labels,
label_counts=label_counts,
bin_cols_map=self.bin_inner_param.get_need_cal_iv_cols_map(),
label_table=label_table)
if self.model_param.local_only:
self.transform(data_instances)
self.set_summary(self.bin_result.summary())
return self.data_output
if self.model_param.encrypt_param.method == consts.PAILLIER:
paillier_encryptor = PaillierEncrypt()
paillier_encryptor.generate_key(self.model_param.encrypt_param.key_length)
else:
raise NotImplementedError("encrypt method not supported yet")
self._packer = GuestIntegerPacker(pack_num=len(self.labels), pack_num_range=label_counts,
encrypter=paillier_encryptor)
self.federated_iv(data_instances=data_instances, label_table=label_table,
result_counts=label_counts_dict, label_elements=self.labels)
total_summary = self.bin_result.summary()
for host_res in self.host_results:
total_summary = self._merge_summary(total_summary, host_res.summary())
self.set_schema(data_instances)
self.transform(data_instances)
LOGGER.info("Finish feature binning fit and transform")
self.set_summary(total_summary)
return self.data_output
def federated_iv(self, data_instances, label_table, result_counts, label_elements):
converted_label_table = label_table.mapValues(lambda x: [int(i) for i in x])
encrypted_label_table = self._packer.pack_and_encrypt(converted_label_table)
self.transfer_variable.encrypted_label.remote(encrypted_label_table,
role=consts.HOST,
idx=-1)
encrypted_bin_sum_infos = self.transfer_variable.encrypted_bin_sum.get(idx=-1)
encrypted_bin_infos = self.transfer_variable.optimal_info.get(idx=-1)
LOGGER.info("Get encrypted_bin_sum from host")
for host_idx, encrypted_bin_info in enumerate(encrypted_bin_infos):
host_party_id = self.component_properties.host_party_idlist[host_idx]
encrypted_bin_sum = encrypted_bin_sum_infos[host_idx]
# assert 1 == 2, f"encrypted_bin_sum: {list(encrypted_bin_sum.collect())}"
result_counts_table = self._packer.decrypt_cipher_package_and_unpack(encrypted_bin_sum)
# LOGGER.debug(f"unpack result: {result_counts_table.first()}")
bin_result = self.cal_bin_results(data_instances=data_instances,
host_idx=host_idx,
encrypted_bin_info=encrypted_bin_info,
result_counts_table=result_counts_table,
result_counts=result_counts,
label_elements=label_elements)
bin_result.set_role_party(role=consts.HOST, party_id=host_party_id)
self.host_results.append(bin_result)
def host_optimal_binning(self, data_instances, host_idx, encrypted_bin_info, result_counts, category_names):
optimal_binning_params = encrypted_bin_info['optimal_params']
host_model_params = copy.deepcopy(self.model_param)
host_model_params.bin_num = optimal_binning_params.get('bin_num')
host_model_params.optimal_binning_param.metric_method = optimal_binning_params.get('metric_method')
host_model_params.optimal_binning_param.mixture = optimal_binning_params.get('mixture')
host_model_params.optimal_binning_param.max_bin_pct = optimal_binning_params.get('max_bin_pct')
host_model_params.optimal_binning_param.min_bin_pct = optimal_binning_params.get('min_bin_pct')
event_total, non_event_total = self.get_histogram(data_instances)
result_counts = dict(result_counts.collect())
optimal_binning_cols = {x: y for x, y in result_counts.items() if x not in category_names}
host_binning_obj = OptimalBinning(params=host_model_params, abnormal_list=self.binning_obj.abnormal_list)
host_binning_obj.event_total = event_total
host_binning_obj.non_event_total = non_event_total
host_binning_obj = self.optimal_binning_sync(host_binning_obj, optimal_binning_cols, data_instances.count(),
data_instances.partitions,
host_idx)
return host_binning_obj
def cal_bin_results(self, data_instances, host_idx, encrypted_bin_info, result_counts_table,
result_counts, label_elements):
host_bin_methods = encrypted_bin_info['bin_method']
category_names = encrypted_bin_info['category_names']
result_counts_dict = dict(result_counts_table.collect())
host_party_id = self.component_properties.host_party_idlist[host_idx]
if host_bin_methods == consts.OPTIMAL:
if len(result_counts) > 2:
raise ValueError("Have not supported optimal binning in multi-class data yet")
host_binning_obj = self.host_optimal_binning(data_instances, host_idx,
encrypted_bin_info, result_counts_table,
category_names)
optimal_counts = {}
for col_name, bucket_list in host_binning_obj.bucket_lists.items():
optimal_counts[col_name] = [np.array([b.event_count, b.non_event_count]) for b in bucket_list]
for col_name, counts in result_counts_dict.items():
if col_name in category_names:
optimal_counts[col_name] = counts
# LOGGER.debug(f"optimal_counts: {optimal_counts}")
bin_res = self.iv_calculator.cal_iv_from_counts(optimal_counts, labels=label_elements,
role=consts.HOST, party_id=host_party_id)
else:
bin_res = self.iv_calculator.cal_iv_from_counts(result_counts_table,
label_elements,
role=consts.HOST,
party_id=host_party_id)
return bin_res
@staticmethod
def convert_decompress_format(encrypted_bin_sum):
"""
Parameters
----------
encrypted_bin_sum : dict.
{"keys": ['x1', 'x2' ...],
"event_counts": [...],
"non_event_counts": [...],
bin_num": [...]
}
returns
-------
{'x1': [[event_count, non_event_count], [event_count, non_event_count] ... ],
'x2': [[event_count, non_event_count], [event_count, non_event_count] ... ],
...
}
"""
result = {}
start = 0
event_counts = [int(x) for x in encrypted_bin_sum['event_counts']]
non_event_counts = [int(x) for x in encrypted_bin_sum['non_event_counts']]
for idx, k in enumerate(encrypted_bin_sum["keys"]):
bin_num = encrypted_bin_sum["bin_nums"][idx]
result[k] = list(zip(event_counts[start: start + bin_num], non_event_counts[start: start + bin_num]))
start += bin_num
assert start == len(event_counts) == len(non_event_counts), \
f"Length of event/non-event does not match " \
f"with bin_num sums, all_counts: {start}, length of event count: {len(event_counts)}," \
f"length of non_event_counts: {len(non_event_counts)}"
return result
@staticmethod
def _merge_summary(summary_1, summary_2):
def merge_single_label(s1, s2):
res = {}
for k, v in s1.items():
if k == 'iv':
v.extend(s2[k])
v = sorted(v, key=lambda p: p[1], reverse=True)
else:
v.update(s2[k])
res[k] = v
return res
res = {}
for label, s1 in summary_1.items():
s2 = summary_2.get(label)
res[label] = merge_single_label(s1, s2)
return res
@staticmethod
def encrypt(x, cipher):
if not isinstance(x, np.ndarray):
return cipher.encrypt(x)
res = []
for idx, value in enumerate(x):
res.append(cipher.encrypt(value))
return np.array(res)
@staticmethod
def __decrypt_bin_sum(encrypted_bin_sum, cipher):
def decrypt(values):
res = []
for counts in values:
for idx, c in enumerate(counts):
if isinstance(c, PaillierEncryptedNumber):
counts[idx] = cipher.decrypt(c)
res.append(counts)
return res
return encrypted_bin_sum.mapValues(decrypt)
@staticmethod
def load_data(data_instance):
data_instance = copy.deepcopy(data_instance)
# Here suppose this is a binary question and the event label is 1
if data_instance.label != 1:
data_instance.label = 0
return data_instance
def optimal_binning_sync(self, host_binning_obj, result_counts, sample_count, partitions, host_idx):
LOGGER.debug("Start host party optimal binning train")
bucket_table = host_binning_obj.bin_sum_to_bucket_list(result_counts, partitions)
host_binning_obj.fit_buckets(bucket_table, sample_count)
encoded_split_points = host_binning_obj.bin_results.all_split_points
self.transfer_variable.bucket_idx.remote(encoded_split_points,
role=consts.HOST,
idx=host_idx)
return host_binning_obj
@staticmethod
def get_histogram(data_instances):
static_obj = statics.MultivariateStatisticalSummary(data_instances, cols_index=-1)
label_historgram = static_obj.get_label_histogram()
event_total = label_historgram.get(1, 0)
non_event_total = label_historgram.get(0, 0)
if event_total == 0 or non_event_total == 0:
LOGGER.warning(f"event_total or non_event_total might have errors, event_total: {event_total},"
f" non_event_total: {non_event_total}")
return event_total, non_event_total
| 49.334507 | 119 | 0.633716 |
64167e853825309c41ef17212952c5b95180699c | 4,064 | py | Python | kubernetes/test/test_custom_objects_api.py | itholic/python | dffe577a062e17057270ae80fa677ffd83e9d183 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_custom_objects_api.py | itholic/python | dffe577a062e17057270ae80fa677ffd83e9d183 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_custom_objects_api.py | itholic/python | dffe577a062e17057270ae80fa677ffd83e9d183 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.custom_objects_api import CustomObjectsApi # noqa: E501
from kubernetes.client.rest import ApiException
class TestCustomObjectsApi(unittest.TestCase):
"""CustomObjectsApi unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.custom_objects_api.CustomObjectsApi() # noqa: E501
def tearDown(self):
pass
def test_create_cluster_custom_object(self):
"""Test case for create_cluster_custom_object
"""
pass
def test_create_namespaced_custom_object(self):
"""Test case for create_namespaced_custom_object
"""
pass
def test_delete_cluster_custom_object(self):
"""Test case for delete_cluster_custom_object
"""
pass
def test_delete_namespaced_custom_object(self):
"""Test case for delete_namespaced_custom_object
"""
pass
def test_get_cluster_custom_object(self):
"""Test case for get_cluster_custom_object
"""
pass
def test_get_cluster_custom_object_scale(self):
"""Test case for get_cluster_custom_object_scale
"""
pass
def test_get_cluster_custom_object_status(self):
"""Test case for get_cluster_custom_object_status
"""
pass
def test_get_namespaced_custom_object(self):
"""Test case for get_namespaced_custom_object
"""
pass
def test_get_namespaced_custom_object_scale(self):
"""Test case for get_namespaced_custom_object_scale
"""
pass
def test_get_namespaced_custom_object_status(self):
"""Test case for get_namespaced_custom_object_status
"""
pass
def test_list_cluster_custom_object(self):
"""Test case for list_cluster_custom_object
"""
pass
def test_list_namespaced_custom_object(self):
"""Test case for list_namespaced_custom_object
"""
pass
def test_patch_cluster_custom_object(self):
"""Test case for patch_cluster_custom_object
"""
pass
def test_patch_cluster_custom_object_scale(self):
"""Test case for patch_cluster_custom_object_scale
"""
pass
def test_patch_cluster_custom_object_status(self):
"""Test case for patch_cluster_custom_object_status
"""
pass
def test_patch_namespaced_custom_object(self):
"""Test case for patch_namespaced_custom_object
"""
pass
def test_patch_namespaced_custom_object_scale(self):
"""Test case for patch_namespaced_custom_object_scale
"""
pass
def test_patch_namespaced_custom_object_status(self):
"""Test case for patch_namespaced_custom_object_status
"""
pass
def test_replace_cluster_custom_object(self):
"""Test case for replace_cluster_custom_object
"""
pass
def test_replace_cluster_custom_object_scale(self):
"""Test case for replace_cluster_custom_object_scale
"""
pass
def test_replace_cluster_custom_object_status(self):
"""Test case for replace_cluster_custom_object_status
"""
pass
def test_replace_namespaced_custom_object(self):
"""Test case for replace_namespaced_custom_object
"""
pass
def test_replace_namespaced_custom_object_scale(self):
"""Test case for replace_namespaced_custom_object_scale
"""
pass
def test_replace_namespaced_custom_object_status(self):
"""Test case for replace_namespaced_custom_object_status
"""
pass
if __name__ == '__main__':
unittest.main()
| 22.831461 | 124 | 0.673228 |
2c1fbeb0e90e2e3f6a1855363944d57ff2c3056e | 803 | py | Python | examples/Apriori_TEST.py | ProblemTryer/MachineLearning | e3a75d787078da2a03ca1b824a0c319e3020a4cd | [
"Apache-2.0"
] | 1 | 2021-05-11T05:05:29.000Z | 2021-05-11T05:05:29.000Z | examples/Apriori_TEST.py | ProblemTryer/MachineLearning | e3a75d787078da2a03ca1b824a0c319e3020a4cd | [
"Apache-2.0"
] | null | null | null | examples/Apriori_TEST.py | ProblemTryer/MachineLearning | e3a75d787078da2a03ca1b824a0c319e3020a4cd | [
"Apache-2.0"
] | null | null | null | """
@ Filename: Apriori_TEST.py
@ Author: Danc1elion
@ Create Date: 2019-05-28
@ Update Date: 2019-05-31
@ Description: Implement Apriori_TEST
"""
from AssociationAnalysis import Apriori
import numpy as np
import pandas as pd
import time
trainData = [['bread', 'milk', 'vegetable', 'fruit', 'eggs'],
['noodle', 'beef', 'pork', 'water', 'socks', 'gloves', 'shoes', 'rice'],
['socks', 'gloves'],
['bread', 'milk', 'shoes', 'socks', 'eggs'],
['socks', 'shoes', 'sweater', 'cap', 'milk', 'vegetable', 'gloves'],
['eggs', 'bread', 'milk', 'fish', 'crab', 'shrimp', 'rice']]
time_start1 = time.time()
clf1 = Apriori()
pred1 = clf1.train(trainData)
time_end1 = time.time()
print("Runtime of Apriori:", time_end1-time_start1)
| 30.884615 | 83 | 0.589041 |
0723441feaf23343514e031edbc0f847df144e6e | 7,630 | py | Python | sdk/python/pulumi_aws/servicequotas/get_service_quota.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/servicequotas/get_service_quota.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/servicequotas/get_service_quota.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetServiceQuotaResult',
'AwaitableGetServiceQuotaResult',
'get_service_quota',
]
@pulumi.output_type
class GetServiceQuotaResult:
"""
A collection of values returned by getServiceQuota.
"""
def __init__(__self__, adjustable=None, arn=None, default_value=None, global_quota=None, id=None, quota_code=None, quota_name=None, service_code=None, service_name=None, value=None):
if adjustable and not isinstance(adjustable, bool):
raise TypeError("Expected argument 'adjustable' to be a bool")
pulumi.set(__self__, "adjustable", adjustable)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if default_value and not isinstance(default_value, float):
raise TypeError("Expected argument 'default_value' to be a float")
pulumi.set(__self__, "default_value", default_value)
if global_quota and not isinstance(global_quota, bool):
raise TypeError("Expected argument 'global_quota' to be a bool")
pulumi.set(__self__, "global_quota", global_quota)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if quota_code and not isinstance(quota_code, str):
raise TypeError("Expected argument 'quota_code' to be a str")
pulumi.set(__self__, "quota_code", quota_code)
if quota_name and not isinstance(quota_name, str):
raise TypeError("Expected argument 'quota_name' to be a str")
pulumi.set(__self__, "quota_name", quota_name)
if service_code and not isinstance(service_code, str):
raise TypeError("Expected argument 'service_code' to be a str")
pulumi.set(__self__, "service_code", service_code)
if service_name and not isinstance(service_name, str):
raise TypeError("Expected argument 'service_name' to be a str")
pulumi.set(__self__, "service_name", service_name)
if value and not isinstance(value, float):
raise TypeError("Expected argument 'value' to be a float")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def adjustable(self) -> bool:
"""
Whether the service quota is adjustable.
"""
return pulumi.get(self, "adjustable")
@property
@pulumi.getter
def arn(self) -> str:
"""
Amazon Resource Name (ARN) of the service quota.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="defaultValue")
def default_value(self) -> float:
"""
Default value of the service quota.
"""
return pulumi.get(self, "default_value")
@property
@pulumi.getter(name="globalQuota")
def global_quota(self) -> bool:
"""
Whether the service quota is global for the AWS account.
"""
return pulumi.get(self, "global_quota")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="quotaCode")
def quota_code(self) -> str:
return pulumi.get(self, "quota_code")
@property
@pulumi.getter(name="quotaName")
def quota_name(self) -> str:
return pulumi.get(self, "quota_name")
@property
@pulumi.getter(name="serviceCode")
def service_code(self) -> str:
return pulumi.get(self, "service_code")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> str:
"""
Name of the service.
"""
return pulumi.get(self, "service_name")
@property
@pulumi.getter
def value(self) -> float:
"""
Current value of the service quota.
"""
return pulumi.get(self, "value")
class AwaitableGetServiceQuotaResult(GetServiceQuotaResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceQuotaResult(
adjustable=self.adjustable,
arn=self.arn,
default_value=self.default_value,
global_quota=self.global_quota,
id=self.id,
quota_code=self.quota_code,
quota_name=self.quota_name,
service_code=self.service_code,
service_name=self.service_name,
value=self.value)
def get_service_quota(quota_code: Optional[str] = None,
quota_name: Optional[str] = None,
service_code: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceQuotaResult:
"""
Retrieve information about a Service Quota.
> **NOTE:** Global quotas apply to all AWS regions, but can only be accessed in `us-east-1` in the Commercial partition or `us-gov-west-1` in the GovCloud partition. In other regions, the AWS API will return the error `The request failed because the specified service does not exist.`
## Example Usage
```python
import pulumi
import pulumi_aws as aws
by_quota_code = aws.servicequotas.get_service_quota(quota_code="L-F678F1CE",
service_code="vpc")
by_quota_name = aws.servicequotas.get_service_quota(quota_name="VPCs per Region",
service_code="vpc")
```
:param str quota_code: Quota code within the service. When configured, the data source directly looks up the service quota. Available values can be found with the [AWS CLI service-quotas list-service-quotas command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-service-quotas.html).
:param str quota_name: Quota name within the service. When configured, the data source searches through all service quotas to find the matching quota name. Available values can be found with the [AWS CLI service-quotas list-service-quotas command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-service-quotas.html).
:param str service_code: Service code for the quota. Available values can be found with the `servicequotas.getService` data source or [AWS CLI service-quotas list-services command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-services.html).
"""
__args__ = dict()
__args__['quotaCode'] = quota_code
__args__['quotaName'] = quota_name
__args__['serviceCode'] = service_code
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:servicequotas/getServiceQuota:getServiceQuota', __args__, opts=opts, typ=GetServiceQuotaResult).value
return AwaitableGetServiceQuotaResult(
adjustable=__ret__.adjustable,
arn=__ret__.arn,
default_value=__ret__.default_value,
global_quota=__ret__.global_quota,
id=__ret__.id,
quota_code=__ret__.quota_code,
quota_name=__ret__.quota_name,
service_code=__ret__.service_code,
service_name=__ret__.service_name,
value=__ret__.value)
| 39.947644 | 342 | 0.667497 |
258aa8ead1ccab94b35b82fc5b2d676e715d2c1f | 2,054 | py | Python | salt/executors/sudo.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 5 | 2017-02-07T05:39:29.000Z | 2020-06-13T02:07:33.000Z | salt/executors/sudo.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | salt/executors/sudo.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 11 | 2017-01-26T19:36:29.000Z | 2021-12-11T07:54:16.000Z | # -*- coding: utf-8 -*-
'''
Sudo executor module
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.utils.json
import salt.utils.path
import salt.syspaths
from salt.ext import six
from salt.ext.six.moves import shlex_quote as _cmd_quote
__virtualname__ = 'sudo'
def __virtual__():
if salt.utils.path.which('sudo') and __opts__.get('sudo_user'):
return __virtualname__
return False
def execute(opts, data, func, args, kwargs):
'''
Allow for the calling of execution modules via sudo.
This module is invoked by the minion if the ``sudo_user`` minion config is
present.
Example minion config:
.. code-block:: yaml
sudo_user: saltdev
Once this setting is made, any execution module call done by the minion will be
run under ``sudo -u <sudo_user> salt-call``. For example, with the above
minion config,
.. code-block:: bash
salt sudo_minion cmd.run 'cat /etc/sudoers'
is equivalent to
.. code-block:: bash
sudo -u saltdev salt-call cmd.run 'cat /etc/sudoers'
being run on ``sudo_minion``.
'''
cmd = ['sudo',
'-u', opts.get('sudo_user'),
'salt-call',
'--out', 'json',
'--metadata',
'-c', opts.get('config_dir'),
'--',
data.get('fun')]
if data['fun'] in ('state.sls', 'state.highstate', 'state.apply'):
kwargs['concurrent'] = True
for arg in args:
cmd.append(_cmd_quote(six.text_type(arg)))
for key in kwargs:
cmd.append(_cmd_quote('{0}={1}'.format(key, kwargs[key])))
cmd_ret = __salt__['cmd.run_all'](cmd, use_vt=True, python_shell=False)
if cmd_ret['retcode'] == 0:
cmd_meta = salt.utils.json.loads(cmd_ret['stdout'])['local']
ret = cmd_meta['return']
__context__['retcode'] = cmd_meta.get('retcode', 0)
else:
ret = cmd_ret['stderr']
__context__['retcode'] = cmd_ret['retcode']
return ret
| 25.675 | 83 | 0.621714 |
cd5ece1d76746855e060929f7693e08840ae3ae9 | 16,254 | py | Python | sdk/python/pulumi_azure_nextgen/cdn/v20190415/endpoint.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/cdn/v20190415/endpoint.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/cdn/v20190415/endpoint.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Endpoint']
class Endpoint(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_types_to_compress: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delivery_policy: Optional[pulumi.Input[pulumi.InputType['EndpointPropertiesUpdateParametersDeliveryPolicyArgs']]] = None,
endpoint_name: Optional[pulumi.Input[str]] = None,
geo_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GeoFilterArgs']]]]] = None,
is_compression_enabled: Optional[pulumi.Input[bool]] = None,
is_http_allowed: Optional[pulumi.Input[bool]] = None,
is_https_allowed: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
optimization_type: Optional[pulumi.Input[Union[str, 'OptimizationType']]] = None,
origin_host_header: Optional[pulumi.Input[str]] = None,
origin_path: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DeepCreatedOriginArgs']]]]] = None,
probe_path: Optional[pulumi.Input[str]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
query_string_caching_behavior: Optional[pulumi.Input['QueryStringCachingBehavior']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
CDN endpoint is the entity within a CDN profile containing configuration information such as origin, protocol, content caching and delivery behavior. The CDN endpoint uses the URL format <endpointname>.azureedge.net.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_types_to_compress: List of content types on which compression applies. The value should be a valid MIME type.
:param pulumi.Input[pulumi.InputType['EndpointPropertiesUpdateParametersDeliveryPolicyArgs']] delivery_policy: A policy that specifies the delivery rules to be used for an endpoint.
:param pulumi.Input[str] endpoint_name: Name of the endpoint under the profile which is unique globally.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GeoFilterArgs']]]] geo_filters: List of rules defining the user's geo access within a CDN endpoint. Each geo filter defines an access rule to a specified path or content, e.g. block APAC for path /pictures/
:param pulumi.Input[bool] is_compression_enabled: Indicates whether content compression is enabled on CDN. Default value is false. If compression is enabled, content will be served as compressed if user requests for a compressed version. Content won't be compressed on CDN when requested content is smaller than 1 byte or larger than 1 MB.
:param pulumi.Input[bool] is_http_allowed: Indicates whether HTTP traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
:param pulumi.Input[bool] is_https_allowed: Indicates whether HTTPS traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Union[str, 'OptimizationType']] optimization_type: Specifies what scenario the customer wants this CDN endpoint to optimize for, e.g. Download, Media services. With this information, CDN can apply scenario driven optimization.
:param pulumi.Input[str] origin_host_header: The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default.
:param pulumi.Input[str] origin_path: A directory path on the origin that CDN can use to retrieve content from, e.g. contoso.cloudapp.net/originpath.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DeepCreatedOriginArgs']]]] origins: The source of the content being delivered via CDN.
:param pulumi.Input[str] probe_path: Path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the origin path.
:param pulumi.Input[str] profile_name: Name of the CDN profile which is unique within the resource group.
:param pulumi.Input['QueryStringCachingBehavior'] query_string_caching_behavior: Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL.
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['content_types_to_compress'] = content_types_to_compress
__props__['delivery_policy'] = delivery_policy
__props__['endpoint_name'] = endpoint_name
__props__['geo_filters'] = geo_filters
__props__['is_compression_enabled'] = is_compression_enabled
__props__['is_http_allowed'] = is_http_allowed
__props__['is_https_allowed'] = is_https_allowed
__props__['location'] = location
__props__['optimization_type'] = optimization_type
__props__['origin_host_header'] = origin_host_header
__props__['origin_path'] = origin_path
if origins is None and not opts.urn:
raise TypeError("Missing required property 'origins'")
__props__['origins'] = origins
__props__['probe_path'] = probe_path
if profile_name is None and not opts.urn:
raise TypeError("Missing required property 'profile_name'")
__props__['profile_name'] = profile_name
__props__['query_string_caching_behavior'] = query_string_caching_behavior
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['host_name'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:cdn:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/latest:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20150601:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20160402:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20161002:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20170402:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20171012:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20190615:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20190615preview:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20191231:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20200331:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20200415:Endpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20200901:Endpoint")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Endpoint, __self__).__init__(
'azure-nextgen:cdn/v20190415:Endpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Endpoint':
"""
Get an existing Endpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Endpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="contentTypesToCompress")
def content_types_to_compress(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of content types on which compression applies. The value should be a valid MIME type.
"""
return pulumi.get(self, "content_types_to_compress")
@property
@pulumi.getter(name="deliveryPolicy")
def delivery_policy(self) -> pulumi.Output[Optional['outputs.EndpointPropertiesUpdateParametersResponseDeliveryPolicy']]:
"""
A policy that specifies the delivery rules to be used for an endpoint.
"""
return pulumi.get(self, "delivery_policy")
@property
@pulumi.getter(name="geoFilters")
def geo_filters(self) -> pulumi.Output[Optional[Sequence['outputs.GeoFilterResponse']]]:
"""
List of rules defining the user's geo access within a CDN endpoint. Each geo filter defines an access rule to a specified path or content, e.g. block APAC for path /pictures/
"""
return pulumi.get(self, "geo_filters")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> pulumi.Output[str]:
"""
The host name of the endpoint structured as {endpointName}.{DNSZone}, e.g. contoso.azureedge.net
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter(name="isCompressionEnabled")
def is_compression_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether content compression is enabled on CDN. Default value is false. If compression is enabled, content will be served as compressed if user requests for a compressed version. Content won't be compressed on CDN when requested content is smaller than 1 byte or larger than 1 MB.
"""
return pulumi.get(self, "is_compression_enabled")
@property
@pulumi.getter(name="isHttpAllowed")
def is_http_allowed(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether HTTP traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
"""
return pulumi.get(self, "is_http_allowed")
@property
@pulumi.getter(name="isHttpsAllowed")
def is_https_allowed(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether HTTPS traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed.
"""
return pulumi.get(self, "is_https_allowed")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="optimizationType")
def optimization_type(self) -> pulumi.Output[Optional[str]]:
"""
Specifies what scenario the customer wants this CDN endpoint to optimize for, e.g. Download, Media services. With this information, CDN can apply scenario driven optimization.
"""
return pulumi.get(self, "optimization_type")
@property
@pulumi.getter(name="originHostHeader")
def origin_host_header(self) -> pulumi.Output[Optional[str]]:
"""
The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default.
"""
return pulumi.get(self, "origin_host_header")
@property
@pulumi.getter(name="originPath")
def origin_path(self) -> pulumi.Output[Optional[str]]:
"""
A directory path on the origin that CDN can use to retrieve content from, e.g. contoso.cloudapp.net/originpath.
"""
return pulumi.get(self, "origin_path")
@property
@pulumi.getter
def origins(self) -> pulumi.Output[Sequence['outputs.DeepCreatedOriginResponse']]:
"""
The source of the content being delivered via CDN.
"""
return pulumi.get(self, "origins")
@property
@pulumi.getter(name="probePath")
def probe_path(self) -> pulumi.Output[Optional[str]]:
"""
Path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the origin path.
"""
return pulumi.get(self, "probe_path")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning status of the endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="queryStringCachingBehavior")
def query_string_caching_behavior(self) -> pulumi.Output[Optional[str]]:
"""
Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL.
"""
return pulumi.get(self, "query_string_caching_behavior")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> pulumi.Output[str]:
"""
Resource status of the endpoint.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 55.474403 | 827 | 0.685308 |
46e49334c34fdb71be2db28711d497171703752a | 16,078 | py | Python | elevenclock/lang/lang_cs.py | mfpx/ElevenClock | 9a7dec3654b94c97fffe10f91d8669645413852c | [
"Apache-2.0"
] | null | null | null | elevenclock/lang/lang_cs.py | mfpx/ElevenClock | 9a7dec3654b94c97fffe10f91d8669645413852c | [
"Apache-2.0"
] | null | null | null | elevenclock/lang/lang_cs.py | mfpx/ElevenClock | 9a7dec3654b94c97fffe10f91d8669645413852c | [
"Apache-2.0"
] | null | null | null | # INSTRUCTIONS
# Translate the text and write it between the "
# EXAMPLE: original -> "This text is in english: value {0}"
# translation -> "Tento text je v angličtině: hodnota {0}"
# So it would look like: "ORIGINAL_TEXT" : "TRANSLATED_TEXT",
# If you see sth like {0}, {1}, maintain it on the translated sentence
# Meke special attention to elements like ":", etc.
lang_3_3_1 = {
"Invalid time format\nPlease follow the\nC 1989 Standards": "Špatný formát času.\nProsím, zkontrolujte si\nstandard 1989 C.",
"Nothing to preview": "Žádný náhled",
"Invalid time format\nPlease modify it\nin the settings": "Špatný formát času\nProsím, upravte se\njej v nastavení",
"Disable the tooltip shown when the clock is hovered": "Vypnout popis po najetí myší na hodiny"
}
lang_3_3 = lang_3_3_1 | {
"Custom format rules:": "Pravidla formátování:",
"Any text can be placed here. To place items such as date and time, please use the 1989 C standard. More info on the following link": "Zde můžete vložit jakýkoliv text. Chcete-li umístit hodnoty jako je datum a čas, použijte standard 1989 C. Více informací se dozvíte na následujícím odkaze",
"Python date and time formats": "Formáty datumu a času jazyka Pyhton",
"To disable the zero-padding effect, add a # in between the % and the code: non-zero-padded hours would be %#H, and zero-padded hours would be %H": "Pokud nechcete doplňovat nuly před čísla, stačí přidat znak \"#\" mezi znak \"%\", například pro hodiny bez počáteční nuly by to bylo %#H, s případnou nulou pak %H", # Here please don't modify the %H and %#H values
"Click on Apply to apply and preview the format": "Kliknutím na \"Použít\" aplikujete vlastní formát",
"Apply": "Použít",
"If you don't understand what is happening, please uncheck the checkbox over the text area": "Pokud nevíte co se děje, tak odškrtněte zaškrtávátko nad textovým polem",
"Set a custom date and time format": "Nastavit vlastní formát datumu a času",
"(for advanced users only)": "(pouze pro pokročilé uživatele)",
"Move this clock to the left": "Přesunout hodiny vlevo",
"Move this clock to the top": "Přesunout hodiny nahoru",
"Move this clock to the right": "Přesunout hodiny vpravo",
"Move this clock to the bottom": "Přesunout hodiny dolu",
"Restore horizontal position": "Obnovit horizontální pozici",
"Restore vertical position": "Obnovit vertikální pozici",
}
lang_3_2_1 = lang_3_3 | {
"Open online help to troubleshoot problems": "Otevřít online nápovědu pro řešení problémů",
"Reset ElevenClock preferences to defaults": "Obnovit aplikaci do výchozího nastavení",
"Specify a minimum width for the clock": "Specifikovat minimální šířku pro hodiny",
"Search on the settings": "Vyhledávání v nastavení",
"No results were found": "Žádné výsledky",
}
lang_3_2 = lang_3_2_1 | {
"Use system accent color as background color": "Použít barvu motivu jako barvu pozadí",
"Check only the focused window on the fullscreen check": "Kontrolovat pouze aktivní okno při kontrole celé obrazovky",
"Clock on monitor {0}": "Hodiny na monitoru {0}",
"Move to the left": "Přesunout vlevo",
"Show this clock on the left": "Zobrazit hodiny na levé straně",
"Show this clock on the right": "Zobrazit hodiny na pravé straně",
"Restore clock position": "Obnovit pozici hodin",
}
lang_3_1 = lang_3_2 | {
"W": "T", # The initial of the word week in your language: W for week, S for setmana, etc.
"Disable the notification badge": "Vypnout ikonku upozornění notifikací",
"Override clock default height": "Přepsat výchozí výšku hodin",
"Adjust horizontal clock position": "Upravit horizontální pozici hodin",
"Adjust vertical clock position": "Upravit vertikální pozici hodin",
"Export log as a file": "Exportovat protokol do souboru",
"Copy log to clipboard": "Zkopírovat protokol do schránky",
"Announcements:": "Oznámení:",
"Fetching latest announcement, please wait...": "Získávám poslední oznámení, prosím vyčkejte...",
"Couldn't load the announcements. Please try again later": "Nelze získat oznámení. Prosím, zkuste to později",
"ElevenClock's log": "Protokol ElevenClock",
"Pick a color": "Vybrat barvu"
}
lang_3 = lang_3_1 | {
"Hide the clock during 10 seconds when clicked": "Skrýt hodiny po kliknutí na dobu 10 sekund",
"Enable low-cpu mode": "Zapnout mód pro počítače se slabším procesorem",
"You might lose functionalities, like the notification counter or the dynamic background": "Můžete ztratit některé funkce, jako počítadlo notifikací nebo dynamické pozadí",
"Clock position and size:": "Umístění a velikost hodin",
"Clock size preferences, position offset, clock at the left, etc.": "Velikost, pozice, umístění z leva a další",
"Reset monitor blacklisting status": "Obnovit stav blacklistovaných monitorů",
"Reset": "Obnovit",
"Third party licenses": "Licence aplikací třetích stran",
"View": "Zobrazit",
"ElevenClock": "ElevenClock",
"Monitor tools": "Nástroje monitoru",
"Blacklist this monitor": "Umístit tento monitor na blacklist",
"Third Party Open-Source Software in Elevenclock {0} (And their licenses)": "Aplikace s otevřeným zdrojovým kódem třetích stran v ElevenClock {0} (a jejich licence)",
"ElevenClock is an Open-Source application made with the help of other libraries made by the community:": "ElevenClock je aplikace s otevřeným zdrojovým kódem, která vznikla za pomocí dalších knihovem vytvořených komunitou:",
"Ok": "Ok",
"More Info": "Více informací",
"About Qt": "O Qt",
"Success": "Úspěch",
"The monitors were unblacklisted successfully.": "Všechny monitory byly odebrány z blacklistu.",
"Now you should see the clock everywhere": "Hodiny nyní uvidíte všude",
"Blacklist Monitor": "Blacklist monitoru",
"Blacklisting a monitor will hide the clock on this monitor permanently.": "Blacklistování monitoru trvale skryje hodiny na daném monitoru.",
"This action can be reverted from the settings window, under <b>Clock position and size</b>": "Tato akce může být změněna z nastavení pod volbou <b>Umístění a velikost hodin</b>",
"Are you sure do you want to blacklist the monitor \"{0}\"?": "Jste si jistí, že chcete umístit monitor \"{0}\" na blacklist?",
"Yes": "Ano",
"No": "Ne",
}
lang_2_9_2 = lang_3 | {
"Reload log": "Znovu načíst protokol",
"Do not show the clock on secondary monitors": "Nezobrazovat hodiny na sekundárním monitoru",
"Disable clock taskbar background color (make clock transparent)": "Vypnout barvu pozadí hodin na hlavní liště (hodiny budou průhledné)",
"Open the welcome wizard": "Otevřít průvodce prvního spuštění",
" (ALPHA STAGE, MAY NOT WORK)": " (TESTOVACÍ FÁZE, NEMUSÍ FUNGOVAT)",
"Welcome to ElevenClock": "Vítejte v ElevenClock",
"Skip": "Přeskočit",
"Start": "Začít",
"Next": "Další",
"Finish": "Dokončit",
}
lang_2_9 = lang_2_9_2 | {
"Task Manager": "Správce úloh",
"Change date and time": "Upravit datum a čas",
"Notification settings": "Nastavení oznámení",
"Updates, icon tray, language": "Aktualizace, lokalizace a ikonka lišty",
"Hide extended options from the clock right-click menu (needs a restart to be applied)": "Skrýt rozšířené možnosti z nabídky po kliknutí pravým tlačítkem myši (aplikace vyžaduje restart)",
"Fullscreen behaviour, clock position, 1st monitor clock, other miscellanious settings": "Chování na celé obrazovce, pozice hodin a další různá nastavení",
'Add the "Show Desktop" button on the left corner of every clock': 'Přidat tlačítko "Zobrazit plochu" ke všem hodinám',
'You might need to set a custom background color for this to work. More info <a href="{0}" style="color:DodgerBlue">HERE</a>': 'Pro správnou funkčnost budete muset nejspíš změnit barvu pozadí. Pro více info <a href="{0}" style="color:DodgerBlue">zde</a>',
"Clock's font, font size, font color and background, text alignment": "Styl, velikost a barva písma, barva pozadí a zarovnání textu",
"Date format, Time format, seconds,weekday, weeknumber, regional settings": "Formát data a času, sekundy, dny v týdnu, číslo dne a regionální nastavení",
"Testing features and error-fixing tools": "Testovací funkce a nástroje pro opravu chyb",
"Language pack author(s), help translating ElevenClock": "Autoři lokalizace a pomoc s překladem aplikace ElevenClock",
"Info, report a bug, submit a feature request, donate, about": "Informace, hlášení chyb, žádosti o nové funknce a darování",
"Log, debugging information": "Ladící informace a protokoly",
}
lang_2_8 = lang_2_9 | {
"Force the clock to be at the top of the screen": "Vynutit, aby byly hodiny ve horní části obrazovky",
"Show the clock on the primary screen": "Zobrazit hodiny na hlavní obrazovce",
"Use a custom font color": "Použít vlastní barvu písma",
"Use a custom background color": "Použít vlastní barvu pozadí",
"Align the clock text to the center": "Vycentrovat text hodin",
"Select custom color": "Vybrat vlastní barvu",
"Hide the clock when a program occupies all screens": "Skrýt hodiny, když je aplikace na všech obrazovkách",
}
lang2_7_bis = lang_2_8 | {
"Use a custom font": "Použít vlastní styl písma",
"Use a custom font size": "Použít vlastní velikost písma",
"Enable hide when multi-monitor fullscreen apps are running": "Umožnit skrytí, když beží celoobrazovkové aplikace pro více monitorů",
"<b>{0}</b> needs to be enabled to change this setting": "Pro změnu tohoto nastavení je třeba povolit: <b>{0}</b>",
"<b>{0}</b> needs to be disabled to change this setting": "Pro změnu tohoto nastavení je třeba deaktivovat: <b>{0}</b>",
}
lang2_7 = lang2_7_bis | {
" (This feature has been disabled because it should work by default. If it is not, please report a bug)": "Tato funkce byla deaktivována, protože by měla fungovat ve výchozím nastavení. Pokud ne, nahlaste prosím chybu",
"ElevenClock's language": "Jazyk ElevenClock"
}
lang2_6 = lang2_7 | {
"About Qt6 (PySide6)": "O Qt6 (PySide6)",
"About": "O",
"Alternative non-SSL update server (This might help with SSL errors)": "Alternativní aktualizační server bez SSL (Může pomoci s SSL chybami)",
"Fixes and other experimental features: (Use ONLY if something is not working)": "Opravy a další experimentální funkce (Použijte POUZE v případě, že něco nefunguje)",
"Show week number on the clock": "Zobrazit číslo týdne na hodinách"
}
lang2_5 = lang2_6 | {
"Hide the clock when RDP Client or Citrix Workspace are running": "Skrýt hodiny, když běží RDP Client nebo Citrix Workspace",
"Clock Appearance:": "Vzhled hodin",
"Force the clock to have black text": "Vynutit černý text hodin",
" - It is required that the Dark Text checkbox is disabled": " - Toto vyžaduje vypnuté nastavení zobrazování černého textu",
"Debbugging information:": "Informace o ladění",
"Open ElevenClock's log": "Otevřít protokol ElevenClock",
}
lang2_4 = lang2_5 | {
# Added text in version 2.4
"Show the clock on the primary screen (Useful if clock is set on the left)": "Zobrazit hodiny na hlavní obrazovce (Užitečné, pokud jsou nastaveny hodiny vlevo)",
"Show weekday on the clock": "Zobrazit den v týdnu na hodinách",
}
lang2_3 = lang2_4 | {
#Context menu
"ElevenClock Settings" :"Nastavení ElevenClock", # Also settings title
"Reload Clocks" :"Znovu načíst hodiny",
"ElevenClock v{0}" :"ElevenClock verze {0}",
"Restart ElevenClock" :"Restartovat ElevenClock",
"Hide ElevenClock" :"Skrýt ElevenClock",
"Quit ElevenClock" :"Ukončit ElevenClock",
#General settings section
"General Settings:" :"Obecné nastavení",
"Automatically check for updates" :"Automaticky kontrolovat aktualizace",
"Automatically install available updates" :"Automaticky instalovat dostupné aktualizace",
"Enable really silent updates" :"Povolit opravdu tiché aktualizace",
"Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)" :"Vynechat kontrolu pravosti poskytovatele aktualizací (NEDOPORUČUJEME, NA VAŠE VLASTNÍ RIZIKO)",
"Show ElevenClock on system tray" :"Zobrazit ElevenClock na systémové liště",
"Alternative clock alignment (may not work)" :"Alternativní zarovnání hodin (nemusí fungovat)",
"Change startup behaviour" :"Změňit chování při spouštění",
"Change" :"Změnit",
"<b>Update to the latest version!</b>" :"<b>Aktualizujte na nejnovější verzi!</b>",
"Install update" :"Instalovat aktualizaci",
#Clock settings
"Clock Settings:" :"Nastavení hodin",
"Hide the clock in fullscreen mode" :"Skrýt hodiny v celoobrazovém režimu",
"Hide the clock when RDP client is active" :"Skrýt hodiny, když je RDP client aktivní",
"Force the clock to be at the bottom of the screen" :"Vynutit, aby byly hodiny ve spodní části obrazovky",
"Show the clock when the taskbar is set to hide automatically" :"Zobrazit hodiny, když je hlavní panel nastaven na automatické skrytí",
"Fix the hyphen/dash showing over the month" :"Opravuje pomlčku zobrazovanou v průběhu měsíce",
"Force the clock to have white text" :"Vynutit, aby hodiny měly bílý text",
"Show the clock at the left of the screen" :"Zobrazit hodiny v levé části obrazovky",
#Date & time settings
"Date & Time Settings:" :"Nastavení data a času",
"Show seconds on the clock" :"Zobrazit sekundy na hodinách",
"Show date on the clock" :"Zobrazit datum na hodinách",
"Show time on the clock" :"Ukázat čas na hodinách",
"Change date and time format (Regional settings)" :"Změnit formát data a času (regionální nastavení)",
"Regional settings" :"Regionální nastavení",
#About the language pack
"About the language pack:" :"O jazykovém balíčku",
"Translated to English by martinet101" :"Do češtiny přeložili Matouš Adamů, panther7 a SunightMC", # Here, make sute to give you some credits: Translated to LANGUAGE by USER/NAME/PSEUDONYM/etc.
"Translate ElevenClock to your language" :"Přeložte ElevenClock do svého jazyka",
"Get started" :"Začít",
#About ElevenClock
"About ElevenClock version {0}:" :"O verzi ElevenClock {0}",
"View ElevenClock's homepage" :"Zobrazit domovskou stránku aplikace ElevenClock",
"Open" :"Otevřít",
"Report an issue/request a feature" :"Nahlásit problém/požádat o funkci",
"Report" :"Hlášení",
"Support the dev: Give me a coffee☕" :"Podpořte vývojáře: Pošlete mi kávu ☕",
"Open page" :"Otevřít stránku",
"Icons by Icons8" :"Ikony vytvořil Icons8", # Here, the word "Icons8" should not be translated
"Webpage" :"Webová stránka",
"Close settings" :"Zavřít nastavení",
"Close" :"Zavřít",
}
lang = lang2_3
| 68.127119 | 367 | 0.662209 |
ad4dbbfa82965cf582ef0646ff16a51fb5c4d6b6 | 854 | py | Python | stylemotery/ast_generators/DefaultPythonAstGenerator.py | ml-in-programming/ml-on-source-code-models | 28f206afcda761320550cefdd53a3f89d206f82f | [
"Apache-2.0"
] | 3 | 2020-07-05T22:21:00.000Z | 2021-07-06T08:32:13.000Z | stylemotery/ast_generators/DefaultPythonAstGenerator.py | ml-in-programming/ml-on-source-code-models | 28f206afcda761320550cefdd53a3f89d206f82f | [
"Apache-2.0"
] | null | null | null | stylemotery/ast_generators/DefaultPythonAstGenerator.py | ml-in-programming/ml-on-source-code-models | 28f206afcda761320550cefdd53a3f89d206f82f | [
"Apache-2.0"
] | 3 | 2019-11-20T14:16:12.000Z | 2020-07-05T22:21:02.000Z | import ast
class DefaultPythonAstGenerator:
NONE = "None"
def __init__(self) -> None:
super().__init__()
self.node_types = []
for x in dir(ast):
try:
if isinstance(ast.__getattribute__(x)(), ast.AST):
self.node_types.append(x)
except TypeError:
pass
self.node_types.append(self.NONE)
self.node_types_indices = {v: i for i, v in enumerate(self.node_types)}
def get_node_id(self, node):
return self.node_types_indices[node]
def __call__(self, filepath):
try:
with open(filepath, 'r', encoding="utf-8") as file:
tree = ast.parse(file.read())
return tree
except Exception as e:
print("ERROR during creating AST: ", e, " filename", filepath)
| 29.448276 | 79 | 0.556206 |
ad34f12c11dd75cb49404833309a57472c7c8a59 | 2,096 | py | Python | projetdrone-ShuttleControll/shuttleFlightcontroller.py | mariusweiler/raspberry-phantom2 | 39e9a85e6a9163618e4f49402f89dbdf6111dc9f | [
"MIT"
] | null | null | null | projetdrone-ShuttleControll/shuttleFlightcontroller.py | mariusweiler/raspberry-phantom2 | 39e9a85e6a9163618e4f49402f89dbdf6111dc9f | [
"MIT"
] | null | null | null | projetdrone-ShuttleControll/shuttleFlightcontroller.py | mariusweiler/raspberry-phantom2 | 39e9a85e6a9163618e4f49402f89dbdf6111dc9f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# import de la fonction permettant de detecter une personne
# et de renvoyer ses coordonnées
from shuttlePersondetection import persondetection
# import des fonctions pour la gestion des capteurs
from shuttleCaptorManagement import *
# impor de la class pour envoyer des donnes a mission control
from shuttleSendMissionControllData import *
# import des fonctions de temps pour faire des pauses
import time
# import des fonctions d'open cv
import cv2
# import de la class pour recevoir les infos de mission control
from shuttleCommManagement import *
# import de la class queue
from multiprocessing import Queue
# imports pour la camera
from picamera.array import PiRGBArray
from picamera import PiCamera
# initialisations en début de script
## On créer une connexion pour envoyer des données puis on attend quelques secondes que mission control créer l'autre connexion de réception puis on s'y connecte également
ssmcd=socketSendMissionControllData();
time.sleep(3);
scm=socketCommManagement();
stop = 0;
# initialisation de la camera
camera = PiCamera()
rawCapture = PiRGBArray(camera)
while not stop:
## on regarde ce que mission control demande
wts=scm.whatToSend();
## on nettoie ce que lon a recu
wts=wts.lstrip(' ');
wts=int(wts)
## si mission control demande la hauteur
if wts == 0:
ssmcd.sendHeight();
## sinon s'ils demande la position
elif wts==1:
## on capture une image
camera.capture(rawCapture, format="bgr")
image = rawCapture.array;
## on demande le tableau de position a la fonction de reconnaissance
tab=persondetection(image);
## on envoie les données a mission control
ssmcd.sendXY(tab);
## on remet la camera a zero
rawCapture.truncate()
rawCapture.seek(0)
## sinon s'ils demande de tirer
elif wts==2:
## on active le relay pour tirer
activatePistol()
## sinon s'ils demandent d'arrêter
elif wts==3:
self.stop=1;
## on ferme le socket de reception
scm.closeSocket();
| 34.360656 | 171 | 0.711832 |
0ae2e6c73d9aac922171a463c1160f05d9ae3017 | 4,241 | py | Python | azure-polaris-annotate-pr.py | synopsys-sig-community/synopsys-azure-tools | 4c1eb026f543c6df5b92e3690cca16018423bc3e | [
"Apache-2.0"
] | 1 | 2021-12-03T22:19:25.000Z | 2021-12-03T22:19:25.000Z | azure-polaris-annotate-pr.py | synopsys-sig-community/synopsys-azure-tools | 4c1eb026f543c6df5b92e3690cca16018423bc3e | [
"Apache-2.0"
] | null | null | null | azure-polaris-annotate-pr.py | synopsys-sig-community/synopsys-azure-tools | 4c1eb026f543c6df5b92e3690cca16018423bc3e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import json
import sys
import os
import argparse
import urllib
import glob
import requests
import base64
# Parse command line arguments
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='Post Coverity issue summary to Azure Repos Pull Request Threads')
parser.add_argument('--debug', default=0, help='set debug level [0-9]')
args = parser.parse_args()
debug = int(args.debug)
#jsonFiles = glob.glob("./.synopsys/polaris/diagnostics/analyze,*/local-analysis/results/incremental-results.json")
jsonFiles = glob.glob("./.synopsys/polaris/data/coverity/*/idir/incremental-results/incremental-results.json")
jsonFile = jsonFiles[0]
# Process output from Polaris CLI
with open(jsonFile) as f:
data = json.load(f)
print("INFO: Reading Polaris incremental analysis results from " + jsonFile)
if(debug): print("DEBUG: " + json.dumps(data, indent = 4, sort_keys=True) + "\n")
# Loop through found issues for specified merge keys, and build out output map
# TODO: Can there be multiple entries for the merge key? I think the right thing would be to list all of them.
sast_report = dict()
sast_report["version"] = "2.0"
vulnerabilities = []
azComments = []
for item in data["issues"]:
checkerName = item["checkerName"]
checkerProperties = item["checkerProperties"]
subcategoryShortDescription = checkerProperties["subcategoryShortDescription"]
subcategoryLongDescription = checkerProperties["subcategoryLongDescription"]
cwe = checkerProperties["cweCategory"]
impact = checkerProperties["impact"]
codeLangauge = item["code-language"]
mergeKey = item["mergeKey"]
strippedMainEventFilePathname = item["strippedMainEventFilePathname"]
mainEventLineNumber = item["mainEventLineNumber"]
eventNumber = 1
description = ""
start_line = 0
location = dict();
for event in item["events"]:
if event["main"]:
location["file"] = event["strippedFilePathname"]
start_line = event["lineNumber"]
description = description + "" + event["eventDescription"]
if event["remediation"]:
description = description + "\n\n" + event["eventDescription"]
newComment = dict()
comments = []
comment = dict()
comment["parentCommentId"] = 0
comment["commentType"] = 1
commentContent = ":warning: Coverity Static Analysis found this issue with your code:\n\n" + description + "\n\n[View the full issue report in Coverity](http://synopsys.com)"
comment["content"] = commentContent
comments.append(comment)
newComment["comments"] = comments
threadContext = dict()
rightFileEnd = dict()
rightFileEnd["line"] = start_line
rightFileEnd["offset"] = 1
rightFileStart = dict()
rightFileStart["line"] = start_line
rightFileStart["offset"] = 1
threadContext["filePath"] = "/" + location["file"]
threadContext["rightFileEnd"] = rightFileEnd
threadContext["rightFileStart"] = rightFileStart
newComment["threadContext"] = threadContext
newComment["status"] = "active"
azComments.append(newComment)
# Ad commensts to PR
SYSTEM_COLLECTIONURI = os.getenv('SYSTEM_COLLECTIONURI')
SYSTEM_PULLREQUEST_PULLREQUESTID = os.getenv('SYSTEM_PULLREQUEST_PULLREQUESTID')
SYSTEM_TEAMPROJECT = os.getenv('SYSTEM_TEAMPROJECT')
BUILD_REPOSITORY_ID = os.getenv('BUILD_REPOSITORY_ID')
url = f"{SYSTEM_COLLECTIONURI}{SYSTEM_TEAMPROJECT}/_apis/git/repositories/{BUILD_REPOSITORY_ID}/pullRequests/{SYSTEM_PULLREQUEST_PULLREQUESTID}/threads?api-version=6.0"
accessToken = os.getenv('SYSTEM_ACCESSTOKEN')
authorization = str(base64.b64encode(bytes(':' + accessToken, 'ascii')), 'ascii')
headers = {
'Accept': 'application/json',
'Authorization': 'Basic '+ authorization
}
for comment in azComments:
if (debug): print("DEBUG: perform API Call to ADO" + url +" : " + json.dumps(comment, indent = 4, sort_keys=True) + "\n")
r = requests.post(url=url, json=comment, headers=headers)
if r.status_code == 200:
if (debug): print("DEBUG: Success")
else:
print(f"ERROR: Unable to post PR comment to Azure DevOps. Error code: {r.status_code}")
print(r.text)
sys.exit(1)
if (len(azComments) > 0):
print("INFO: New security weaknesses found, returning exit code 1 to break the build")
sys.exit(1) | 34.479675 | 176 | 0.735204 |
4c3858ebb549d252e37073e6e03df4847c9eefa0 | 56 | py | Python | kubee2etests/exceptions.py | RoseTeague/kube-e2etestapp | 210808bcdc585005d59a2fac9223c6a2125931de | [
"Apache-2.0"
] | 3 | 2018-04-10T13:03:10.000Z | 2020-02-15T07:03:35.000Z | kubee2etests/exceptions.py | RoseTeague/kube-e2etestapp | 210808bcdc585005d59a2fac9223c6a2125931de | [
"Apache-2.0"
] | 10 | 2018-04-12T13:10:01.000Z | 2019-05-03T14:21:49.000Z | kubee2etests/exceptions.py | RoseTeague/kube-e2etestapp | 210808bcdc585005d59a2fac9223c6a2125931de | [
"Apache-2.0"
] | 3 | 2018-04-19T13:46:59.000Z | 2019-02-26T17:22:32.000Z | class PrerequisiteMissingException(Exception):
pass
| 18.666667 | 46 | 0.821429 |
a6ab6500628927f29e8c5ccd0cfa0dbb3726240e | 2,688 | py | Python | logdevice/ops/ldops/util/convert.py | zhengxiaochuan-3/LogDevice | 01e2302a382db1d87f934e305c8cc74ffc0a24a4 | [
"BSD-3-Clause"
] | 1 | 2019-12-18T09:07:45.000Z | 2019-12-18T09:07:45.000Z | logdevice/ops/ldops/util/convert.py | zhengxiaochuan-3/LogDevice | 01e2302a382db1d87f934e305c8cc74ffc0a24a4 | [
"BSD-3-Clause"
] | null | null | null | logdevice/ops/ldops/util/convert.py | zhengxiaochuan-3/LogDevice | 01e2302a382db1d87f934e305c8cc74ffc0a24a4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
ldops.util.convert
~~~~~~~~~~~
A collection of helpful conversion utility functions
"""
import re
from typing import Dict, List, Mapping, Optional, Pattern
from logdevice.admin.common.types import (
LocationScope,
NodeID,
ReplicationProperty,
ShardID,
)
from logdevice.admin.nodes.types import ShardStorageState
__all__: List[str] = ["to_shard_id", "to_storage_state", "to_replication"]
# A string representation of a shard or node. Consists of a node ID and an
# optional shard ID.
# Examples: N0, N1:S5
SHARD_PATTERN: Pattern[str] = re.compile(
"^N(?P<node_index>[0-9]+)(:S(?P<shard_index>[0-9]+))?$"
)
def to_shard_id(scope: str) -> ShardID:
"""
A conversion utility that takes a Nx:Sy string and convert it into the
typed ShardID. The 'Sy' part is optional and if unset the generated
ShardID will have a shard_index set to -1
"""
scope = scope.upper()
if not scope:
raise ValueError(f"Cannot parse empty scope")
match = SHARD_PATTERN.match(scope)
if match is None:
# There were no shards, or invalid.
raise ValueError(f"Cannot parse '{scope}'. Invalid format!")
results = match.groupdict()
shard_index = -1
if results["shard_index"] is not None:
shard_index = int(results["shard_index"])
node_index = int(results["node_index"])
node = NodeID(node_index=node_index)
return ShardID(node=node, shard_index=shard_index)
def to_storage_state(raw_state: str) -> ShardStorageState:
"""
A conversion utility that takes a string and creates a ShardStorageState
object from. It ignore the case of the input.
E.g., Valid inputs ["read-only", "read_only", "READ_ONLY", "READ-ONLY", ...]
"""
normal_state = raw_state.upper().replace("-", "_")
if not normal_state:
raise ValueError(f"Cannot parse empty storage-state")
return ShardStorageState[normal_state]
def to_replication(
raw_repl: Optional[Mapping[str, int]]
) -> Optional[ReplicationProperty]:
"""
Converts a dictionary like {'rack': 3, 'node': 2} to the corresponding
ReplicationProperty object. {LocationScope.RACK: 3, LocationScope.NODE: 2}
"""
if raw_repl is None:
return None
res: Dict[LocationScope, int] = {}
for scope, value in raw_repl.items():
normalized_scope = scope.upper()
res[LocationScope[normalized_scope]] = value
return ReplicationProperty(res)
| 29.866667 | 80 | 0.687872 |
359aa679367cf28d7e936437cc4517dbfee79e07 | 6,423 | py | Python | brainio/lookup.py | dapello/brainio | f8a218f9e796e6eea48edcd67d691c00a9bc5e07 | [
"MIT"
] | 1 | 2022-02-04T21:34:07.000Z | 2022-02-04T21:34:07.000Z | brainio/lookup.py | dapello/brainio | f8a218f9e796e6eea48edcd67d691c00a9bc5e07 | [
"MIT"
] | 2 | 2021-07-23T17:05:30.000Z | 2021-09-27T23:38:56.000Z | brainio/lookup.py | dapello/brainio | f8a218f9e796e6eea48edcd67d691c00a9bc5e07 | [
"MIT"
] | 8 | 2021-07-30T15:08:54.000Z | 2022-03-10T02:04:40.000Z | import hashlib
import logging
from pathlib import Path
import entrypoints
import numpy as np
import pandas as pd
LOOKUP_SOURCE = "lookup_source"
ENTRYPOINT = "brainio_lookups"
TYPE_ASSEMBLY = 'assembly'
TYPE_STIMULUS_SET = 'stimulus_set'
CATALOG_PATH_KEY = "catalog_path"
_catalogs = {}
_concat_catalogs = None
_logger = logging.getLogger(__name__)
def list_catalogs():
return list(entrypoints.get_group_named(ENTRYPOINT).keys())
def load_lookup(name, entry_point):
df = entry_point.load()()
df[LOOKUP_SOURCE] = name
return df
def get_lookups():
lookups = entrypoints.get_group_named(ENTRYPOINT)
dfs = {}
for k, v in lookups.items():
df = load_lookup(k, v)
dfs[k] = df
return dfs
def get_catalogs():
global _catalogs
if not _catalogs:
_logger.debug(f"Loading lookup from entrypoints")
print(f"Loading lookup from entrypoints")
_catalogs = get_lookups()
return _catalogs
def data():
global _concat_catalogs
if _concat_catalogs is None:
catalogs = get_catalogs()
_concat_catalogs = pd.concat(catalogs.values(), ignore_index=True)
return _concat_catalogs
def list_stimulus_sets():
stimuli_rows = data()[data()['lookup_type'] == TYPE_STIMULUS_SET]
return sorted(list(set(stimuli_rows['identifier'])))
def list_assemblies():
assembly_rows = data()[data()['lookup_type'] == TYPE_ASSEMBLY]
return sorted(list(set(assembly_rows['identifier'])))
def lookup_stimulus_set(identifier):
lookup = data()[(data()['identifier'] == identifier) & (data()['lookup_type'] == TYPE_STIMULUS_SET)]
if len(lookup) == 0:
raise StimulusSetLookupError(f"stimulus_set {identifier} not found")
csv_lookup = _lookup_stimulus_set_filtered(lookup, filter_func=_is_csv_lookup, label="CSV")
zip_lookup = _lookup_stimulus_set_filtered(lookup, filter_func=_is_zip_lookup, label="zip")
return csv_lookup, zip_lookup
def _lookup_stimulus_set_filtered(lookup, filter_func, label):
cols = [n for n in lookup.columns if n != LOOKUP_SOURCE]
# filter for csv vs. zip
# if there are any groups of rows where every field except source is the same,
# we only want one from each group
filtered_rows = lookup[lookup.apply(filter_func, axis=1)].drop_duplicates(subset=cols)
identifier = lookup.iloc[0]['identifier']
if len(filtered_rows) == 0:
raise StimulusSetLookupError(f"{label} for stimulus set {identifier} not found")
if len(filtered_rows) > 1: # there were multiple rows but not all identical
raise RuntimeError(
f"Internal data inconsistency: Found more than 2 lookup rows for stimulus_set {label} for identifier {identifier}")
assert len(filtered_rows) == 1
return filtered_rows.squeeze()
def lookup_assembly(identifier):
lookup = data()[(data()['identifier'] == identifier) & (data()['lookup_type'] == TYPE_ASSEMBLY)]
if len(lookup) == 0:
raise AssemblyLookupError(f"assembly {identifier} not found")
cols = [n for n in lookup.columns if n != LOOKUP_SOURCE]
# if there are any groups of rows where every field except source is the same,
# we only want one from each group
de_dupe = lookup.drop_duplicates(subset=cols)
if len(de_dupe) > 1: # there were multiple rows but not all identical
raise RuntimeError(f"Internal data inconsistency: Found multiple lookup rows for identifier {identifier}")
assert len(de_dupe) == 1
return de_dupe.squeeze()
class StimulusSetLookupError(KeyError):
pass
class AssemblyLookupError(KeyError):
pass
def append(catalog_name, object_identifier, cls, lookup_type,
bucket_name, sha1, s3_key, stimulus_set_identifier=None):
global _catalogs
global _concat_catalogs
catalogs = get_catalogs()
catalog = catalogs[catalog_name]
catalog_path = Path(catalog.attrs[CATALOG_PATH_KEY])
_logger.debug(f"Adding {lookup_type} {object_identifier} to catalog {catalog_name}")
object_lookup = {
'identifier': object_identifier,
'lookup_type': lookup_type,
'class': cls,
'location_type': "S3",
'location': f"https://{bucket_name}.s3.amazonaws.com/{s3_key}",
'sha1': sha1,
'stimulus_set_identifier': stimulus_set_identifier,
'lookup_source': catalog_name,
}
# check duplicates
assert object_lookup['lookup_type'] in [TYPE_ASSEMBLY, TYPE_STIMULUS_SET]
duplicates = catalog[(catalog['identifier'] == object_lookup['identifier']) &
(catalog['lookup_type'] == object_lookup['lookup_type'])]
if len(duplicates) > 0:
if object_lookup['lookup_type'] == TYPE_ASSEMBLY:
raise ValueError(f"Trying to add duplicate identifier {object_lookup['identifier']}, "
f"existing \n{duplicates.to_string()}")
elif object_lookup['lookup_type'] == TYPE_STIMULUS_SET:
if len(duplicates) == 1 and duplicates.squeeze()['identifier'] == object_lookup['identifier'] and (
(_is_csv_lookup(duplicates.squeeze()) and _is_zip_lookup(object_lookup)) or
(_is_zip_lookup(duplicates.squeeze()) and _is_csv_lookup(object_lookup))):
pass # all good, we're just adding the second part of a stimulus set
else:
raise ValueError(
f"Trying to add duplicate identifier {object_lookup['identifier']}, existing {duplicates}")
# append and save
add_lookup = pd.DataFrame({key: [value] for key, value in object_lookup.items()})
catalog = catalog.append(add_lookup)
catalog.to_csv(catalog_path, index=False)
_catalogs[catalog_name] = catalog
_concat_catalogs = None
def _is_csv_lookup(data_row):
return data_row['lookup_type'] == TYPE_STIMULUS_SET \
and data_row['location'].endswith('.csv') \
and data_row['class'] not in [None, np.nan]
def _is_zip_lookup(data_row):
return data_row['lookup_type'] == TYPE_STIMULUS_SET \
and data_row['location'].endswith('.zip') \
and data_row['class'] in [None, np.nan]
def sha1_hash(path, buffer_size=64 * 2 ** 10):
sha1 = hashlib.sha1()
with open(path, "rb") as f:
buffer = f.read(buffer_size)
while len(buffer) > 0:
sha1.update(buffer)
buffer = f.read(buffer_size)
return sha1.hexdigest()
| 36.702857 | 127 | 0.682703 |
cb7fcc0b6523329864c122eabcea5bdba4fd84a7 | 141 | py | Python | kelte/procgen/encounters/api.py | brianbruggeman/rl | 6dd8a53da07697ffc87e62aa397be7b3b08f0aa0 | [
"MIT"
] | null | null | null | kelte/procgen/encounters/api.py | brianbruggeman/rl | 6dd8a53da07697ffc87e62aa397be7b3b08f0aa0 | [
"MIT"
] | null | null | null | kelte/procgen/encounters/api.py | brianbruggeman/rl | 6dd8a53da07697ffc87e62aa397be7b3b08f0aa0 | [
"MIT"
] | null | null | null | from pathlib import Path
def populate_encounters():
for path in (Path(__file__).parent / 'data').glob('**/*.yml'):
print(path)
| 20.142857 | 66 | 0.638298 |
06f80a8cf904e3f4e6b55f1fa36b981bbbb78e6e | 1,631 | py | Python | setup.py | eacooper/BerkeleyVisionStats | 39192eca0ade05f8a1473cd8032b08c2a1c19e7b | [
"MIT"
] | null | null | null | setup.py | eacooper/BerkeleyVisionStats | 39192eca0ade05f8a1473cd8032b08c2a1c19e7b | [
"MIT"
] | 12 | 2015-01-06T19:16:17.000Z | 2015-01-14T02:03:23.000Z | setup.py | eacooper/BerkeleyVisionStats | 39192eca0ade05f8a1473cd8032b08c2a1c19e7b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# setup.py build file
DESCRIPTION = "BinOcular Retinal Image Statistics"
LONG_DESCRIPTION = """\
Berkeley BORIS is a project to analyze the statistics of the visual world
during natural viewing.
"""
NAME = "boris"
AUTHOR = "Bill Sprague and Emily Cooper"
AUTHOR_EMAIL = "bill.sprague@berkeley.edu"
DOWNLOAD_URL = "https://github.com/Berkeley-BORIS/BORIS_Code.git"
LICENSE = "MIT"
VERSION = "0.1.dev2"
from setuptools import setup, find_packages
if __name__ == '__main__':
setup(name=NAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
packages=find_packages(exclude='boris.cli'),
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2'
'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3.3',
#'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
install_requires=['numpy', 'pandas', 'Click', 'tables', 'pyyaml'],
entry_points={
'console_scripts': ['boris=boris.cli:main']
}
) | 36.244444 | 74 | 0.57756 |
dc13ad02d854c65f8e83630b273733f6f7d417b9 | 1,208 | py | Python | CustomMobjects/CircleCurvy.py | piakos314/mathmizo | c565ea802cca30ac64b66282f11e8ba60b1ad669 | [
"MIT"
] | 1 | 2021-06-07T08:22:26.000Z | 2021-06-07T08:22:26.000Z | CustomMobjects/CircleCurvy.py | piakos314/mathmizo | c565ea802cca30ac64b66282f11e8ba60b1ad669 | [
"MIT"
] | null | null | null | CustomMobjects/CircleCurvy.py | piakos314/mathmizo | c565ea802cca30ac64b66282f11e8ba60b1ad669 | [
"MIT"
] | 1 | 2021-06-07T14:08:53.000Z | 2021-06-07T14:08:53.000Z | from manim import *
'''
class CircleCurvy(VMobject):
def __init__(
self,
theta = 2*PI,
step_size =0.05,
amplitude = 0.34,
radius = 3.1,
frequency =4,
phase = 0,
**kwargs):
VMobject.__init__(self, **kwargs)
theta = np.arange(0, theta+step_size, step_size)
points = [np.array([
(radius + amplitude * np.sin(phase + frequency * t)) * np.cos(t),
(radius + amplitude * np.sin(phase+ frequency * t)) * np.sin(t), 0])
for t in theta]
self.set_points_smoothly(points)
'''
class enscene(Scene):
def construct(self):
cir = [
CircleCurvy(color = RED_D),
CircleCurvy(color = GOLD_D, phase = PI/2),
CircleCurvy(color = PURPLE_D, phase = PI),
CircleCurvy(color = MAROON_D, phase = 3*PI/2)
]
self.add(*[x for x in cir])
self.play(
cir[0].animate.rotate(PI/8),
cir[1].animate.rotate(-PI/8),
cir[2].animate.rotate(PI/8),
cir[3].animate.rotate(-PI/8),
run_time=2, rate_func = rate_functions.ease_in_out_sine
)
self.wait() | 32.648649 | 80 | 0.51904 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.