id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1671967
|
import logging
import datetime
import os
import pytest
from wuphf.endpoints import SmtpMessenger
from wuphf.endpoints.smtp_messenger import sample_msg
exp0 = "To: <EMAIL>\nFrom: <EMAIL>\nSubject: Sample WUPHF generated at {}".format(datetime.datetime.now().date())
exp1 = "The WUPHF message is: \"Hello world\""
exp2 = "The WUPHF message is: \"Foo Bar Baz\""
@pytest.mark.skip(reason="Because it fails!")
def test_messenger_template():
M = SmtpMessenger(msg_t=sample_msg, target="<EMAIL>", user="admin")
msg = M.get({"msg_text": "Hello world"})
assert exp0 in msg
assert exp1 in msg
class Item(object):
def __init__(self, msg_text):
self.meta = {"msg_text": msg_text}
item = Item("Foo Bar Baz")
msg = M.get(item)
assert exp0 in msg
assert exp2 in msg
@pytest.mark.skip(reason="No way to check response automatically")
def test_messenger_stmp():
M = SmtpMessenger(
msg_t=sample_msg,
host=os.environ.get("SMTP_HOST"),
port=os.environ.get("SMTP_PORT"),
from_addr=os.environ.get("SMTP_FROM_ADDR"),
target=os.environ.get("SMTP_TO_ADDRS")
)
assert M.check()
M.send({"msg_text": "Hello world"})
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
test_messenger_template()
|
1671986
|
from __future__ import absolute_import, unicode_literals
from kombu import Queue
from celery.utils.nodenames import worker_direct
class test_worker_direct:
def test_returns_if_queue(self):
q = Queue('foo')
assert worker_direct(q) is q
|
1672003
|
from models.model_plain import ModelPlain
import numpy as np
class ModelPlain4(ModelPlain):
"""Train with four inputs (L, k, sf, sigma) and with pixel loss for USRNet"""
# ----------------------------------------
# feed L/H data
# ----------------------------------------
def feed_data(self, data, need_H=True):
self.L = data['L'].to(self.device) # low-quality image
self.k = data['k'].to(self.device) # blur kernel
self.sf = np.int(data['sf'][0,...].squeeze().cpu().numpy()) # scale factor
self.sigma = data['sigma'].to(self.device) # noise level
if need_H:
self.H = data['H'].to(self.device) # H
# ----------------------------------------
# feed (L, C) to netG and get E
# ----------------------------------------
def netG_forward(self):
self.E = self.netG(self.L, self.k, self.sf, self.sigma)
|
1672055
|
from indy_common.constants import GET_RICH_SCHEMA_OBJECT_BY_ID, RS_ID
from indy_common.config_util import getConfig
from indy_node.server.request_handlers.read_req_handlers.rich_schema.abstract_rich_schema_read_req_handler import \
AbstractRichSchemaReadRequestHandler
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.request import Request
from plenum.common.exceptions import InvalidClientRequest
from plenum.server.database_manager import DatabaseManager
class GetRichSchemaObjectByIdHandler(AbstractRichSchemaReadRequestHandler):
def __init__(self, database_manager: DatabaseManager):
super().__init__(database_manager, GET_RICH_SCHEMA_OBJECT_BY_ID, DOMAIN_LEDGER_ID)
def get_result(self, request: Request):
super().get_result(request)
self._validate_request_type(request)
id = request.operation[RS_ID]
try:
value, seq_no, last_update_time, proof = self.lookup(id, is_committed=True, with_proof=True)
except KeyError:
value, seq_no, last_update_time, proof = None, None, None, None
return self.make_result(request=request,
data=value,
last_seq_no=seq_no,
update_time=last_update_time,
proof=proof)
|
1672074
|
import abc
from copy import copy
from dataclasses import dataclass, field
import functools
import multiprocessing
from multiprocessing import synchronize
import threading
import time
import typing as tp
import stopit
from pypeln import utils as pypeln_utils
from . import utils
from .queue import IterableQueue, OutputQueues
WorkerConstructor = tp.Callable[[int, "StageParams", IterableQueue], "Worker"]
Kwargs = tp.Dict[str, tp.Any]
T = tp.TypeVar("T")
class ProcessFn(pypeln_utils.Protocol):
def __call__(self, worker: "Worker", **kwargs):
...
class StageParams(tp.NamedTuple):
input_queue: IterableQueue
output_queues: OutputQueues
namespace: utils.Namespace
@classmethod
def create(
cls, input_queue: IterableQueue, output_queues: OutputQueues, total_workers: int
) -> "StageParams":
return cls(
namespace=utils.Namespace(active_workers=total_workers),
input_queue=input_queue,
output_queues=output_queues,
)
def worker_done(self):
with self.namespace:
self.namespace.active_workers -= 1
class WorkerInfo(tp.NamedTuple):
index: int
@dataclass
class Worker(tp.Generic[T]):
process_fn: ProcessFn
index: int
timeout: float
stage_params: StageParams
main_queue: IterableQueue
on_start: tp.Optional[tp.Callable[..., Kwargs]]
on_done: tp.Optional[tp.Callable[..., Kwargs]]
use_threads: bool
f_args: tp.List[str]
namespace: utils.Namespace = field(
default_factory=lambda: utils.Namespace(done=False, task_start_time=None)
)
process: tp.Optional[tp.Union[multiprocessing.Process, threading.Thread]] = None
def __call__(self):
worker_info = WorkerInfo(index=self.index)
on_start_args: tp.List[str] = (
pypeln_utils.function_args(self.on_start) if self.on_start else []
)
on_done_args: tp.List[str] = (
pypeln_utils.function_args(self.on_done) if self.on_done else []
)
try:
if self.on_start is not None:
on_start_kwargs = dict(worker_info=worker_info)
kwargs = self.on_start(
**{
key: value
for key, value in on_start_kwargs.items()
if key in on_start_args
}
)
else:
kwargs = {}
if kwargs is None:
kwargs = {}
kwargs.setdefault("worker_info", worker_info)
self.process_fn(
self,
**{key: value for key, value in kwargs.items() if key in self.f_args},
)
self.stage_params.worker_done()
if self.on_done is not None:
kwargs.setdefault(
"stage_status",
StageStatus(
namespace=self.stage_params.namespace,
),
)
self.on_done(
**{
key: value
for key, value in kwargs.items()
if key in on_done_args
}
)
self.stage_params.output_queues.worker_done()
except pypeln_utils.StopThreadException:
pass
except BaseException as e:
self.main_queue.raise_exception(e)
time.sleep(0.01)
finally:
self.done()
def start(self):
[self.process] = start_workers(self, use_threads=self.use_threads)
def stop(self):
if self.process is None:
return
if not self.process.is_alive():
return
if isinstance(self.process, multiprocessing.Process):
self.process.terminate()
else:
stopit.async_raise(
self.process.ident,
pypeln_utils.StopThreadException,
)
self.namespace.task_start_time = None
def done(self):
self.namespace.done = True
def did_timeout(self):
task_start_time = self.namespace.task_start_time
done = self.namespace.done
return (
self.timeout
and not done
and task_start_time is not None
and (time.time() - task_start_time > self.timeout)
)
@dataclass
class MeasureTaskTime:
worker: "Worker"
def __enter__(self):
self.worker.namespace.task_start_time = time.time()
def __exit__(self, *args):
self.worker.namespace.task_start_time = None
def measure_task_time(self):
return self.MeasureTaskTime(self)
class Applicable(pypeln_utils.Protocol):
def apply(self, worker: "Worker", elem: tp.Any, **kwargs):
...
class ApplyProcess(ProcessFn, Applicable):
def __call__(self, worker: Worker, **kwargs):
for elem in worker.stage_params.input_queue:
with worker.measure_task_time():
self.apply(worker, elem, **kwargs)
class StageStatus:
"""
Object passed to various `on_done` callbacks. It contains information about the stage in case book keeping is needed.
"""
def __init__(self, namespace):
self._namespace = namespace
@property
def done(self) -> bool:
"""
`bool` : `True` if all workers finished.
"""
return self._namespace.active_workers == 0
@property
def active_workers(self):
"""
`int` : Number of active workers.
"""
return self._namespace.active_workers
def __str__(self):
return (
f"StageStatus(done = {self.done}, active_workers = {self.active_workers})"
)
# ----------------------------------------------------------------
# create_daemon_workers
# ----------------------------------------------------------------
def start_workers(
target: tp.Callable,
n_workers: int = 1,
args: tp.Tuple[tp.Any, ...] = tuple(),
kwargs: tp.Optional[tp.Dict[tp.Any, tp.Any]] = None,
use_threads: bool = False,
) -> tp.Union[tp.List[multiprocessing.Process], tp.List[threading.Thread]]:
if kwargs is None:
kwargs = {}
workers = []
for _ in range(n_workers):
if use_threads:
t = threading.Thread(target=target, args=args, kwargs=kwargs)
else:
t = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
workers.append(t)
return workers
|
1672079
|
from ....Classes.Arc1 import Arc1
from ....Classes.SurfLine import SurfLine
def get_surface_active(self, alpha=0, delta=0):
"""Return the full winding surface
Parameters
----------
self : SlotW22
A SlotW22 object
alpha : float
float number for rotation (Default value = 0) [rad]
delta : complex
complex number for translation (Default value = 0)
Returns
-------
surf_wind: Surface
Surface corresponding to the Winding Area
"""
# get the name of the lamination
st = self.get_name_lam()
# Create curve list
curve_list = self.build_geometry()[1:-1]
curve_list.append(
Arc1(
begin=curve_list[-1].get_end(),
end=curve_list[0].get_begin(),
radius=-abs(curve_list[-1].get_end()),
is_trigo_direction=False,
)
)
# Create surface
if self.is_outwards():
Zmid = self.get_Rbo() + self.H0 + self.H2 / 2
else:
Zmid = self.get_Rbo() - self.H0 - self.H2 / 2
surface = SurfLine(
line_list=curve_list, label="Wind_" + st + "_R0_T0_S0", point_ref=Zmid
)
# Apply transformation
surface.rotate(alpha)
surface.translate(delta)
return surface
|
1672083
|
import logging
from time import sleep
from vspk import v6 as vsdk
from vspk.utils import set_log_level
set_log_level(logging.ERROR)
def did_receive_push(data):
""" Receive delegate
"""
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(data)
if __name__ == '__main__':
import sys
sys.setrecursionlimit(50)
# create a user session for user csproot
session = vsdk.NUVSDSession(username="csproot", password="<PASSWORD>", enterprise="csp", api_url="https://localhost:8443")
# start the session
# now session contains a push center and the connected user
session.start()
session.reset()
session.start()
# we get the push center from the session
push_center = session.push_center
# we register our delegate that will be called on each event
push_center.add_delegate(did_receive_push)
# and we start it
push_center.start()
# then we do nothing, welcome to the marvelous world of async programing ;)
while True:
sleep(10000)
|
1672120
|
import math
import mmcv
import torchvision.utils
from basicsr.data import create_dataloader, create_dataset
def main(mode='folder'):
"""Test vimeo90k dataset.
Args:
mode: There are two modes: 'lmdb', 'folder'.
"""
opt = {}
opt['dist'] = False
opt['phase'] = 'train'
opt['name'] = 'Vimeo90K'
opt['type'] = 'Vimeo90KDataset'
if mode == 'folder':
opt['dataroot_gt'] = 'datasets/vimeo90k/vimeo_septuplet/sequences'
opt['dataroot_lq'] = 'datasets/vimeo90k/vimeo_septuplet_matlabLRx4/sequences' # noqa E501
opt['meta_info_file'] = 'basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt' # noqa E501
opt['io_backend'] = dict(type='disk')
elif mode == 'lmdb':
opt['dataroot_gt'] = 'datasets/vimeo90k/vimeo90k_train_GT_only4th.lmdb'
opt['dataroot_lq'] = 'datasets/vimeo90k/vimeo90k_train_LR7frames.lmdb'
opt['meta_info_file'] = 'basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt' # noqa E501
opt['io_backend'] = dict(type='lmdb')
opt['num_frame'] = 7
opt['gt_size'] = 256
opt['random_reverse'] = True
opt['use_flip'] = True
opt['use_rot'] = True
opt['use_shuffle'] = True
opt['num_worker_per_gpu'] = 1
opt['batch_size_per_gpu'] = 16
opt['scale'] = 4
opt['dataset_enlarge_ratio'] = 1
mmcv.mkdir_or_exist('tmp')
dataset = create_dataset(opt)
data_loader = create_dataloader(
dataset, opt, num_gpu=0, dist=opt['dist'], sampler=None)
nrow = int(math.sqrt(opt['batch_size_per_gpu']))
padding = 2 if opt['phase'] == 'train' else 0
print('start...')
for i, data in enumerate(data_loader):
if i > 5:
break
print(i)
lq = data['lq']
gt = data['gt']
key = data['key']
print(key)
for j in range(opt['num_frame']):
torchvision.utils.save_image(
lq[:, j, :, :, :],
f'tmp/lq_{i:03d}_frame{j}.png',
nrow=nrow,
padding=padding,
normalize=False)
torchvision.utils.save_image(
gt,
f'tmp/gt_{i:03d}.png',
nrow=nrow,
padding=padding,
normalize=False)
if __name__ == '__main__':
main()
|
1672160
|
import os
from lib import action
class Plan(action.TerraformBaseAction):
def run(self, plan_path, state_file_path, target_resources, terraform_exec,
variable_dict, variable_files):
"""
Plan the changes required to reach the desired state of the configuration
Args:
- plan_path: path of the Terraform files
- state_file_path: path of the Terraform state file
- target_resources: list of resources to target from the configuration
- terraform_exec: path of the Terraform bin
- variable_dict: dictionary of Terraform variables that will overwrite the
variable files if both are declared
- variable_files: array of Terraform variable files
Returns:
- dict: Terraform output command output
"""
os.chdir(plan_path)
self.terraform.state = state_file_path
self.terraform.targets = target_resources
self.terraform.terraform_bin_path = terraform_exec
self.terraform.var_file = variable_files
self.terraform.variables = variable_dict
return_code, stdout, stderr = self.terraform.plan(plan_path, capture_output=False)
return self.check_result(
return_code,
stdout,
stderr,
return_output=True,
valid_return_codes=[0, 2]
)
|
1672167
|
import functools
import inspect
import logging
import os
from typing import Callable, Optional
from ..shared import constants
from ..shared.functions import resolve_truthy_env_var_choice
from ..tracing import Tracer
from .exceptions import MiddlewareInvalidArgumentError
logger = logging.getLogger(__name__)
def lambda_handler_decorator(decorator: Optional[Callable] = None, trace_execution: Optional[bool] = None):
"""Decorator factory for decorating Lambda handlers.
You can use lambda_handler_decorator to create your own middlewares,
where your function signature follows: `fn(handler, event, context)`
Custom keyword arguments are also supported e.g. `fn(handler, event, context, option=value)`
Middlewares created by this factory supports tracing to help you quickly troubleshoot
any overhead that custom middlewares may cause - They will appear as custom subsegments.
**Non-key value params are not supported** e.g. `fn(handler, event, context, option)`
Environment variables
---------------------
POWERTOOLS_TRACE_MIDDLEWARES : str
uses `aws_lambda_powertools.tracing.Tracer`
to create sub-segments per middleware (e.g. `"true", "True", "TRUE"`)
Parameters
----------
decorator: Callable
Middleware to be wrapped by this factory
trace_execution: bool
Flag to explicitly enable trace execution for middlewares.\n
`Env POWERTOOLS_TRACE_MIDDLEWARES="true"`
Example
-------
**Create a middleware no params**
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
@lambda_handler_decorator
def log_response(handler, event, context):
any_code_to_execute_before_lambda_handler()
response = handler(event, context)
any_code_to_execute_after_lambda_handler()
print(f"Lambda handler response: {response}")
@log_response
def lambda_handler(event, context):
return True
**Create a middleware with params**
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
@lambda_handler_decorator
def obfuscate_sensitive_data(handler, event, context, fields=None):
# Obfuscate email before calling Lambda handler
if fields:
for field in fields:
field = event.get(field, "")
event[field] = obfuscate_pii(field)
response = handler(event, context)
print(f"Lambda handler response: {response}")
@obfuscate_sensitive_data(fields=["email"])
def lambda_handler(event, context):
return True
**Trace execution of custom middleware**
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
tracer = Tracer(service="payment") # or via env var
...
@lambda_handler_decorator(trace_execution=True)
def log_response(handler, event, context):
...
@tracer.capture_lambda_handler
@log_response
def lambda_handler(event, context):
return True
Limitations
-----------
* Async middlewares not supported
* Classes, class methods middlewares not supported
Raises
------
MiddlewareInvalidArgumentError
When middleware receives non keyword=arguments
"""
if decorator is None:
return functools.partial(lambda_handler_decorator, trace_execution=trace_execution)
trace_execution = resolve_truthy_env_var_choice(
env=os.getenv(constants.MIDDLEWARE_FACTORY_TRACE_ENV, "false"), choice=trace_execution
)
@functools.wraps(decorator)
def final_decorator(func: Optional[Callable] = None, **kwargs):
# If called with kwargs return new func with kwargs
if func is None:
return functools.partial(final_decorator, **kwargs)
if not inspect.isfunction(func):
# @custom_middleware(True) vs @custom_middleware(log_event=True)
raise MiddlewareInvalidArgumentError(
f"Only keyword arguments is supported for middlewares: {decorator.__qualname__} received {func}" # type: ignore # noqa: E501
)
@functools.wraps(func)
def wrapper(event, context):
try:
middleware = functools.partial(decorator, func, event, context, **kwargs)
if trace_execution:
tracer = Tracer(auto_patch=False)
with tracer.provider.in_subsegment(name=f"## {decorator.__qualname__}"):
response = middleware()
else:
response = middleware()
return response
except Exception:
logger.exception(f"Caught exception in {decorator.__qualname__}")
raise
return wrapper
return final_decorator
|
1672193
|
from datetime import date, timedelta, datetime
import numpy
import os
import requests
import sys
config = ["develop", "master"]
def getLastWeekTimeWindow():
dt = date.today()
start = dt - timedelta(days=dt.weekday()+7)
end = start + timedelta(days=7)
return (start.isoformat(), end.isoformat())
def getLastWeekJobsFromBranch(branch_name):
start_date, end_date = getLastWeekTimeWindow()
offset = 0
print "Retrieving CircleCI jobs started after %s on branch %s"%(start_date, branch_name)
print "Page %s..."%(str(offset + 1))
jobs = requests.get('https://circleci.com/api/v1.1/project/github/shared-components/shared-components/tree/' + branch_name + '?limit=100&circle-token='+circle_ci_token).json()
while jobs[-1]['start_time'] > start_date:
offset += 1
print "Page %s..."%(str(offset + 1))
jobs += requests.get('https://circleci.com/api/v1.1/project/github/shared-components/shared-components/tree/' + branch_name + '?limit=100&offset='+str(100*offset)+'&circle-token='+circle_ci_token).json()
print "Removing jobs started after %s"%end_date
jobs = filter(lambda x: x['start_time'] > start_date, jobs)
jobs = filter(lambda x: x['start_time'] < end_date, jobs)
print "Number of jobs considered: %d"%(len(jobs))
return jobs
def buildWorkflows(jobs):
workflows = {}
for job in jobs:
workflow_id = job['workflows']['workflow_id']
if not workflows.has_key(workflow_id):
workflows[workflow_id] = []
workflows[workflow_id].append(dict(
duration=job['build_time_millis'],
name=job['workflows']['job_name'],
stop_time=job['stop_time'],
start_time=job['start_time'],
upstream=job['workflows']['upstream_job_ids'],
status=job['status']
))
return workflows.values()
def removeFailedFlows(workflows):
def everyJobIsSuccessful(workflow):
for i in workflow:
if i["status"] != 'success':
return False
return True
return filter(lambda x: everyJobIsSuccessful(x), workflows)
def computeWorkflowDuration(workflow):
workflow_starting_time = min(map(lambda x: x['start_time'], workflow))
workflow_ending_time = max(map(lambda x: x['stop_time'], workflow))
return (datetime.strptime(workflow_ending_time[:-1]+'000', '%Y-%m-%dT%H:%M:%S.%f') - datetime.strptime(workflow_starting_time[:-1]+'000', '%Y-%m-%dT%H:%M:%S.%f')).total_seconds()
def computeDurationIndicatorForBranch(branch):
jobs = getLastWeekJobsFromBranch(branch)
workflows = buildWorkflows(jobs)
successful_workflows = removeFailedFlows(workflows)
print "Number of successful workflows: %d"%(len(successful_workflows))
wf_duration = map(lambda x: float(computeWorkflowDuration(x)), successful_workflows)
print "Longest %s workflow duration: %fs"%(branch, max(wf_duration))
print "Median %s workflow duration: %fs"%(branch, numpy.median(wf_duration))
if __name__ == '__main__':
try:
circle_ci_token = os.environ["CIRCLE_CI_TOKEN"]
except KeyError:
print "Circle CI token is not set, please set environment variable CIRCLE_CI_TOKEN"
sys.exit(1)
for branch in config:
print "Analysing workflows on " + branch + " branch"
print "==================================="
computeDurationIndicatorForBranch(branch)
print "\n\n"
|
1672236
|
import argparse
import os
class TermColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
FUNCTION_NAME_PREFIX = "pybind_output_fun_"
def write_module(out_file, module_name, files, additional_funcs):
out_file.write("#include <pybind11/pybind11.h>\n")
func_names = ["pybind_output_fun_" + os.path.basename(fn).replace(".", "_") for fn in files]
func_names.extend(additional_funcs)
for fn in func_names:
out_file.write("void %s(pybind11::module&);\n" % fn)
out_file.write("PYBIND11_MODULE(%s, m) {\n" % module_name)
out_file.write("m.doc() = \"TODO: Dodumentation\";\n")
for fn in func_names:
out_file.write("%s(m);\n" % fn)
out_file.write("#ifdef VERSION_INFO\n")
out_file.write("m.attr(\"__version__\") = VERSION_INFO;\n")
out_file.write("m.attr(\"__version__\") = \"dev\";\n")
out_file.write("#endif\n")
out_file.write("}\n")
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-o", "--output", type=str, default="a.out")
arg_parser.add_argument("-m", "--module-name", type=str, required=True)
arg_parser.add_argument("-f", "--files", type=str, nargs="+", required=True)
arg_parser.add_argument("-e", "--extra-functions", type=str, nargs="*", required=True)
args = arg_parser.parse_args()
print(TermColors.OKGREEN + "NumpyEigen Module:" + TermColors.ENDC + args.module_name)
with open(args.output, 'w+') as outfile:
write_module(outfile, args.module_name, args.files, args.extra_functions)
|
1672239
|
import os
import pytest
import manuel.ignore
import manuel.codeblock
import manuel.doctest
import manuel.testing
def make_manuel_suite(ns):
"""
Prepare Manuel test suite.
Test functions are injected in the given namespace.
"""
# Wrap function so pytest does not expect an spurious "self" fixture.
def _wrapped(func, name):
wrapped = lambda: func()
wrapped.__name__ = name
return wrapped
# Collect documentation files
cd = os.path.dirname
path = cd(cd(cd(cd(__file__))))
doc_path = os.path.join(path, 'docs')
readme = os.path.join(path, 'README.rst')
files = sorted(os.path.join(doc_path, f) for f in os.listdir(doc_path))
files = [f for f in files if f.endswith('.rst') or f.endswith('.txt')]
files.append(readme)
# Create manuel suite
m = manuel.ignore.Manuel()
m += manuel.doctest.Manuel()
m += manuel.codeblock.Manuel()
# Copy tests from the suite to the global namespace
suite = manuel.testing.TestSuite(m, *files)
for i, test in enumerate(suite):
name = 'test_doc_%s' % i
ns[name] = pytest.mark.documentation(_wrapped(test.runTest, name))
return suite
try:
make_manuel_suite(globals())
except OSError:
print('Documentation files not found: disabling tests!')
|
1672294
|
from functools import partial
from typing import Callable, Type, Union
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import assume, given
from numpy.testing import assert_array_equal
import mygrad as mg
from mygrad import Tensor
from mygrad.math.misc.ops import MatMul
from mygrad.math.arithmetic.ops import (
Add,
Divide,
Multiply,
Positive,
Power,
Square,
Subtract,
)
from mygrad.operation_base import Operation
def plus(x, y):
return x + y
def minus(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
def power(x, y):
return x ** y
def matmul(x, y):
assume(0 < x.ndim < 3)
return x @ y.T
@pytest.mark.parametrize(
"func, op",
[
(plus, Add),
(minus, Subtract),
(multiply, Multiply),
(divide, Divide),
(power, (Power, Positive, Square)), # can specialize
(matmul, MatMul),
],
)
@given(
arr=hnp.arrays(
shape=hnp.array_shapes(min_dims=0, min_side=0),
dtype=hnp.floating_dtypes(),
elements=dict(min_value=1.0, max_value=2.0),
)
)
def test_arithmetic_operators_between_array_and_tensor_cast_to_tensor(
arr: np.ndarray,
func: Callable[[Union[np.ndarray, Tensor], Union[np.ndarray, Tensor]], Tensor],
op: Type[Operation],
):
tensor = Tensor(arr)
out = func(tensor, arr)
assert isinstance(out, Tensor)
assert isinstance(out.creator, op)
out = func(arr, tensor)
assert isinstance(out, Tensor)
assert isinstance(out.creator, op)
out = func(tensor, tensor)
assert isinstance(out, Tensor)
assert isinstance(out.creator, op)
constant_tensor: Callable[..., Tensor] = partial(mg.tensor, constant=True)
@given(
arr1=hnp.arrays(
shape=st.just(tuple()) | st.just((3,)),
dtype=st.sampled_from([float, int]),
elements=dict(min_value=1, max_value=2),
),
arr2=hnp.arrays(
shape=st.just(tuple()) | st.just((3,)),
dtype=st.sampled_from([float, int]),
elements=dict(min_value=1, max_value=2),
),
)
@pytest.mark.parametrize(
"f1, f2",
[
(constant_tensor, lambda x: x),
(
lambda x: x.tolist(),
constant_tensor,
), # `list/tensor` ensures __rfloordiv__ gets called
(constant_tensor, constant_tensor),
],
)
def test_floor_div(arr1, arr2, f1, f2):
desired = arr1 // arr2
actual = f1(arr1) // f2(arr2)
assert actual.dtype == desired.dtype
assert_array_equal(desired, actual)
def test_floor_div_is_raises_for_variable_tensors():
with pytest.raises(ValueError):
mg.tensor(1.0, constant=False) // 1
with pytest.raises(ValueError):
1 // mg.tensor(1.0, constant=False)
|
1672322
|
import itertools
import string
import numpy as np
from numpy import random
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
@pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="X")
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.boxplot(return_type="NOTATYPE")
result = df.boxplot()
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="dict")
self._check_box_return_type(result, "dict")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="axes")
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df["age"] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=100),
"b": np.random.randn(100),
"c": np.random.randn(100) + 2,
"d": date_range("2012-01-01", periods=100).astype(str),
"e": date_range("2012-01-01", periods=100, tz="UTC"),
"f": timedelta_range("1 days", periods=100),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
dict(boxes="r", whiskers="b", medians="g", caps="c"),
dict(boxes="r", whiskers="b", medians="g", caps="c"),
),
(dict(boxes="r"), dict(boxes="r")),
("r", dict(boxes="r", whiskers="r", medians="r", caps="r")),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[(dict(boxes="r", invalid_key="r"), "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: dict(color="C1")}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy3(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
gb = df.groupby("gender")
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type="axes")
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby("gender").hist()
tm.close()
@pytest.mark.slow
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by="gender")
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
# now for groupby
result = df.groupby("gender").boxplot(return_type="dict")
self._check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
columns2 = "X B C D A G Y N Q O".split()
df2 = DataFrame(random.randn(50, 10), columns=columns2)
categories2 = "A B C D E F G H I J".split()
df2["category"] = categories2 * 5
for t in ["dict", "axes", "both"]:
returned = df.groupby("classroom").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=["A", "B", "C"])
returned = df.boxplot(by="classroom", return_type=t)
self._check_box_return_type(
returned, t, expected_keys=["height", "weight", "category"]
)
returned = df2.groupby("category").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by="category", return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
@pytest.mark.slow
def test_grouped_box_layout(self):
df = self.hist_df
msg = "Layout of 1x1 must be larger than required size 2"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
msg = "The 'layout' keyword is not supported when 'by' is None"
with pytest.raises(ValueError, match=msg):
df.boxplot(
column=["height", "weight", "category"],
layout=(2, 1),
return_type="dict",
)
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("gender").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("classroom").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(
column=["height", "weight", "category"], by="gender", return_type="axes"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes["height"]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes["weight"], axes["category"]]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, 2),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, -1),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(4, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(-1, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], layout=(1, 4), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby("classroom").boxplot( # noqa
column=["height", "weight", "category"], layout=(1, -1), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
@pytest.mark.slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby("category").boxplot(column="height", return_type="axes", ax=axes)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with tm.assert_produces_warning(UserWarning):
returned = df.boxplot(
column=["height", "weight", "category"],
by="gender",
return_type="axes",
ax=axes[0],
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning):
returned = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="axes", ax=axes[1]
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby("classroom").boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
self._check_ticks_props(
df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16
)
|
1672369
|
from .lrs_layer import LowRankSig_FirstOrder, LowRankSig_HigherOrder
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import keras
from keras import backend as K
from keras.layers import Dense, BatchNormalization, Reshape, Activation
def init_lrs2_model(input_shape, num_levels, num_hidden, num_classes, recursive_tensors=True, reverse=False, use_batchnorm=True, order=1):
num_sig_layers = 3
num_sig_hidden = num_hidden if recursive_tensors else int(num_hidden / num_levels)
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=input_shape))
for i in range(num_sig_layers-1):
if order == 1:
model.add(LowRankSig_FirstOrder(num_sig_hidden, num_levels, add_time=True, return_levels=True, return_sequences=True, reverse=reverse, recursive_tensors=recursive_tensors))
else:
model.add(LowRankSig_HigherOrder(num_sig_hidden, num_levels, add_time=True, return_levels=True, return_sequences=True, reverse=reverse, recursive_tensors=recursive_tensors, order=order))
model.add(Reshape((-1, num_sig_hidden * num_levels,)))
model.add(Activation('tanh'))
if use_batchnorm:
model.add(BatchNormalization(axis=-1))
if order == 1:
model.add(LowRankSig_FirstOrder(num_sig_hidden, num_levels, add_time=True, return_levels=True, reverse=reverse, recursive_tensors=recursive_tensors))
else:
model.add(LowRankSig_HigherOrder(num_sig_hidden, num_levels, add_time=True, return_levels=True, reverse=reverse, recursive_tensors=recursive_tensors, order=order))
model.add(Reshape((num_sig_hidden * num_levels,)))
model.add(Activation('tanh'))
if use_batchnorm:
model.add(BatchNormalization(axis=-1))
model.add(Dense(num_classes, activation='softmax'))
model.name = 'LRS2BN_M{}_H{}_D{}'.format(num_levels, num_hidden, order) if use_batchnorm else 'LRS2_M{}_H{}_D{}'.format(num_levels, num_hidden, order)
return model
|
1672383
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from login.email import send_password_reset_email
from v1.models import Contact, PortalCategory, PortalTopic
admin.site.register(Contact)
admin.site.unregister(User)
@admin.register(User)
class UserAdmin(UserAdmin):
actions = UserAdmin.actions + ["send_password_reset_email"]
def send_password_reset_email(self, request, queryset):
for user in queryset:
send_password_reset_email(user.email, request=request)
self.message_user(
request, "{} password reset email(s) sent".format(queryset.count())
)
send_password_reset_email.short_description = "Send password reset email"
# class AnswerPageInline(admin.TabularInline):
# from ask_cfpb.models import AnswerPage
# model = AnswerPage
@admin.register(PortalTopic)
class PortalTopicAdmin(admin.ModelAdmin):
list_display = ("heading", "heading_es", "askids", "page_count")
def askids(self, obj):
pks = sorted(
set(
[
answerpage.answer_base.pk
for answerpage in obj.answerpage_set.all()
]
)
)
return ", ".join([str(pk) for pk in pks])
def page_count(self, obj):
return str(obj.answerpage_set.count())
askids.short_description = "Ask IDs"
page_count.short_description = "Page count"
@admin.register(PortalCategory)
class PortalCategoryAdmin(admin.ModelAdmin):
list_display = ("heading", "heading_es", "askids", "page_count")
def askids(self, obj):
pks = sorted(
set(
[
answerpage.answer_base.pk
for answerpage in obj.answerpage_set.all()
]
)
)
return ", ".join([str(pk) for pk in pks])
def page_count(self, obj):
return str(obj.answerpage_set.count())
askids.short_description = "Ask IDs"
page_count.short_description = "Page count"
|
1672387
|
import glob
import xml.etree.ElementTree as ET
import json
from tqdm import tqdm
import random
MAX_DEPTH = 3
MAX_DEPTH += 1
random.seed(42)
class Hier(object):
def __init__(self, label):
super(Hier, self).__init__()
self.label = label
self.children = {}
def __repr__(self):
return self.label + '\t' + '\t'.join(k for k in self.children)
def __getitem__(self, key):
ret = None
try:
ret = self.children[key]
except KeyError:
self.children[key] = Hier(key)
ret = self.children[key]
return ret
def write_hier(self, fd):
if len(self.children) > 0:
fd.write(str(self))
fd.write('\n')
for _, v in self.children.items():
v.write_hier(fd)
total = 0
skipped = 0
train = 0
test = 0
all_labels = set()
train_file = open('nyt/nyt_train.json', 'w')
test_file = open('nyt/nyt_test.json', 'w')
error_file = open('nyt_err.txt', 'w')
hier_root = Hier('Root')
for file in tqdm(glob.glob('../nyt_corpus/data/*/*/*/*.xml')):
tree = ET.parse(file)
root = tree.getroot()
meta = root.find('head').find('docdata')\
.find('identified-content').findall('classifier')
labels = [t.text.replace(' ', '_').split('/')[:MAX_DEPTH] for t in meta
if t.attrib['type']=='taxonomic_classifier']
blocks = root.find('body').find('body.content').findall('block')
lead_para = [t for t in blocks if t.attrib['class']=='lead_paragraph']
if len(lead_para) != 1:
skipped += 1
error_file.write('Skipped %s: No lead-paragraph found\n'%(file))
continue
text = ' '.join([p.text for p in lead_para[0]])
label = list(set(l for t in labels for l in t))
if len(label) == 0:
skipped += 1
error_file.write('Skipped %s: No labels found\n'%(file))
continue
x = {}
x["label"] = label
x["text"] = text
all_labels.update(label)
total += 1
if random.random() > 0.3:
train_file.write(json.dumps(x)+"\n")
train += 1
else:
test_file.write(json.dumps(x)+"\n")
test += 1
for label in labels:
t_root = hier_root
for i in label:
t_root = t_root[i]
print('Total Docs: %i, Skipped: %i, Train: %i, Test: %i' \
% (total, skipped, train, test))
hier_root.write_hier(open('nyt/nyt.taxonomy', 'w'))
with open('nyt/nyt_labels.txt', 'w') as f:
for i in all_labels:
f.write(i+'\n')
|
1672395
|
import pymclevel
from pymclevel.minecraft_server import MCServerChunkGenerator
from pymclevel import BoundingBox
import logging
logging.basicConfig(level=logging.INFO)
gen = MCServerChunkGenerator()
half_width = 4096
gen.createLevel("HugeWorld", BoundingBox((-half_width, 0, -half_width), (half_width, 0, half_width)))
|
1672448
|
from __future__ import absolute_import
import matplotlib
matplotlib.rc('xtick', labelsize=6)
matplotlib.rc('ytick', labelsize=6)
from numpy import arange
class small_multiples_plot(object):
def __init__(self, fig=None, *args, **kwargs):
if fig is None:
raise AssertionError("A valid figure must be passed in.")
# fig = figure()
self.fig = fig
self.fig.subplots_adjust(bottom=0.20, left = 0.1, right=0.9, top=0.9)
self.colorbar_ax = fig.add_axes((0.1, 0.1, 0.8, 0.05))
self.multiples = small_multiples(self.fig, **kwargs)
def label_edges(self, bool_val):
m = self.multiples
leftside = m[:,0]
for ax in leftside:
ax.yaxis.tick_left()
ax.yaxis.set_visible(bool_val)
#last row
bottomedge = m[-1,:]
for ax in bottomedge:
ax.xaxis.tick_bottom()
ax.xaxis.set_visible(bool_val)
def small_multiples(f, rows=4, columns=5, margin=(0.0,0.0), zoom_together=True):
""" Given a figure f, create linked subplots with given number of rows and columns.
Returns an object array of axes instances [rows, columns], with top left being [0,0].
"""
# rows = 4 #number in y direction
# columns = 5 #number in x direction
f.subplots_adjust(wspace=margin[0], hspace=margin[1])
# should use N.empty((rows,columns),dtype=object)
# and attribute name should perhaps be changed
multiples = arange(rows*columns, dtype=object)
multiples.shape=(rows, columns)
# No axis defined to start with
commonaxis=None
for row in range(rows):
for column in range(columns):
nth_plot = row*columns + column
ax = f.add_subplot(rows, columns, nth_plot + 1, sharex=commonaxis, sharey=commonaxis)
if not commonaxis and zoom_together:
commonaxis = ax
# leaves axes frame, but turns off axis labels and ticks
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
multiples[row, column] = ax
# ax.plot(range(10), range(10))
# ax.text(1,1,'%i, %i, %i' % (row, column, nth_plot))
# print row, column, nth_plot
return multiples
if __name__ == '__main__':
from pylab import figure, show #, subplot, show
f=figure()
m=small_multiples(f)
#first column
leftside = m[:,0]
for ax in leftside:
ax.yaxis.set_visible(True)
#last row
bottomedge = m[-1,:]
for ax in bottomedge:
ax.xaxis.set_visible(True)
show()
|
1672460
|
class Document(object,IDisposable):
""" An object that represents an open Autodesk Revit project. """
def AutoJoinElements(self):
"""
AutoJoinElements(self: Document)
Forces the elements in the Revit document to automatically join to their
neighbors where appropriate.
"""
pass
def CanEnableWorksharing(self):
"""
CanEnableWorksharing(self: Document) -> bool
Checks if worksharing can be enabled in the document.
Returns: True if worksharing can be enabled in the document,False otherwise.
"""
pass
def Close(self,saveModified=None):
"""
Close(self: Document,saveModified: bool) -> bool
Closes the document with the option to save.
saveModified: Indicates if the current document should be saved before close operation.
Returns: False if closing procedure fails or if saving of a modified document was
requested (saveModified=True) but failed.
Also returns False if closing is
cancelled by an external application during 'DocumentClosing' event.
When
function succeeds,True is returned.
Close(self: Document) -> bool
Closes the document,save the changes if there are.
Returns: False if either closing procedure fails or if saving of a modified document
failed.
Also returns False if closing is cancelled by an external application
during 'DocumentClosing' event.
When function succeeds,True is returned.
"""
pass
def CombineElements(self,members):
"""
CombineElements(self: Document,members: CombinableElementArray) -> GeomCombination
Combine a set of combinable elements into a geometry combination.
members: A list of combinable elements to be combined.
Returns: If successful,the newly created geometry combination is returned,otherwise an
exception with error information will be thrown.
"""
pass
def ConvertDetailToModelCurves(self,view,detailCurves):
"""
ConvertDetailToModelCurves(self: Document,view: View,detailCurves: DetailCurveArray) -> ModelCurveArray
Converts a group of DetailCurves to equivalent ModelCurves.
view: The view where the new lines will be created.
The lines are projected on the
view workplane.
The view workplane must be parallel to the view plane.
detailCurves: The detail curve array to be converted.
"""
pass
def ConvertModelToDetailCurves(self,view,modelCurves):
"""
ConvertModelToDetailCurves(self: Document,view: View,modelCurves: ModelCurveArray) -> DetailCurveArray
Converts a group of ModelCurves to equivalent DetailCurves.
view: The view where the new lines will be created.
The lines are projected on the
view plane.
If the lines are not parallel to the view plane,lines are
foreshortened and arcs are converted to ellipses.
Splines are modified.
modelCurves: The model curve array to be converted.
"""
pass
def ConvertModelToSymbolicCurves(self,view,modelCurves):
"""
ConvertModelToSymbolicCurves(self: Document,view: View,modelCurves: ModelCurveArray) -> SymbolicCurveArray
Converts a group of ModelCurves to equivalent SymbolicCurves.
view: The view where the new lines will be created.
The lines are projected on the
view workplane.
The view workplane must be parallel to the view plane.
If the
lines are not parallel to the view plane,lines are foreshortened and arcs are
converted to ellipses.
Splines are modified.
modelCurves: The model curve array to be converted.
"""
pass
def ConvertSymbolicToModelCurves(self,view,symbolicCurve):
"""
ConvertSymbolicToModelCurves(self: Document,view: View,symbolicCurve: SymbolicCurveArray) -> ModelCurveArray
Converts a group of SymbolicCurves to equivalent ModelCurves.
view: The view where the new lines will be created.
The lines are projected on the
view workplane.
The view workplane must be parallel to the view plane.
symbolicCurve: The symbolic curve array to be converted.
"""
pass
def Delete(self,*__args):
"""
Delete(self: Document,elementId: ElementId) -> ICollection[ElementId]
Deletes an element from the document given the id of that element.
elementId: Id of the element to delete.
Returns: The deleted element id set.
Delete(self: Document,elementIds: ICollection[ElementId]) -> ICollection[ElementId]
"""
pass
def Dispose(self):
""" Dispose(self: Document) """
pass
def EditFamily(self,loadedFamily):
"""
EditFamily(self: Document,loadedFamily: Family) -> Document
Gets the document of a loaded family to edit.
loadedFamily: The loaded family in current document.
Returns: Reference of the document of the family.
"""
pass
def EnableWorksharing(self,worksetNameGridLevel,worksetName):
"""
EnableWorksharing(self: Document,worksetNameGridLevel: str,worksetName: str)
Enables worksharing in the document.
worksetNameGridLevel: Name of workset for grids and levels.
worksetName: Name of workset for all other elements.
"""
pass
def Equals(self,obj):
"""
Equals(self: Document,obj: object) -> bool
Determines whether the specified System.Object equals to this System.Object.
"""
pass
def Export(self,folder,name,*__args):
"""
Export(self: Document,folder: str,name: str,views: ViewSet,options: FBXExportOptions) -> bool
Exports the document in 3D-Studio Max (FBX) format.
folder: Output folder,into which file(s) will be exported. The folder must exist.
name: Either the name of a single file or a prefix for a set of files.
If ll or empty,automatic naming will be used.
views: Selection of views to be exported.Only 3D views are allowed.
options: Options applicable to the FBX format.
Returns: Function returns true only if all specified views are exported successfully.
The function returns False if exporting of any view fails,even if some views
might have been exported successfully.
Export(self: Document,folder: str,name: str,views: ICollection[ElementId],options: DWGExportOptions) -> bool
Export(self: Document,folder: str,name: str,views: ICollection[ElementId],options: DXFExportOptions) -> bool
Export(self: Document,folder: str,name: str,view: View3D,grossAreaPlan: ViewPlan,options: BuildingSiteExportOptions) -> bool
Exports the document in the format of Civil Engineering design applications.
folder: Output folder,into which file will be exported. The folder must exist.
name: The name for the exported civil file.
If ll or empty,
automatic naming will be used."
view: 3D View to be exported.
grossAreaPlan: All the areas on the view plan will be exported,it must be 'Gross Building'
area plan. It must not be ll.
To check whether its
area scheme is Gross Building,use IsGrossBuildingArea property of
Autodesk.Revit.DB.AreaScheme.
options: Various options applicable to the format of Civil Engineering design
applications.
If ll,all options will be set to their
respective default values.
Returns: True if successful,otherwise False.
Export(self: Document,folder: str,name: str,views: ViewSet,options: DWFExportOptions) -> bool
Exports the current view or a selection of views in DWF format.
folder: Output folder,into which file(s) will be exported. The folder must exist.
name: Either the name of a single file or a prefix for a set of files.
If ll or empty,automatic naming will be used.
views: Selection of views to be exported.
options: Various options applicable to the DWF format.
If ll,all
options will be set to their respective default values.
Returns: Function returns true only if all specified views are exported successfully.
Returns False if exporting of any view fails,
even if some views might have
been exported successfully.
Export(self: Document,folder: str,name: str,views: ViewSet,options: DWFXExportOptions) -> bool
Exports the current view or a selection of views in DWFX format.
folder: Output folder,into which file(s) will be exported. The folder must exist.
name: Either the name of a single file or a prefix for a set of files.
If ll or empty,automatic naming will be used.
views: Selection of views to be exported.
options: Various options applicable to the DWFX format.
If ll,all
options will be set to their respective default values.
Returns: Function returns true only if all specified views are exported successfully.
The function returns False if exporting of any view fails,even if some views
might have been exported successfully.
Export(self: Document,folder: str,name: str,options: NavisworksExportOptions)
Exports a Revit project to the Navisworks .nwc format.
folder: The name of the folder for the exported file.
name: The name of the exported file. If it doesn't end in '.nwc',this extension
will be added automatically.
options: Options which control the contents of the export.
Export(self: Document,folder: str,name: str,options: MassGBXMLExportOptions)
Exports a gbXML file from a mass model document.
folder: Indicates the path of a folder where to export the gbXML file.
name: Indicates the name of the gbXML file to export. If it doesn't end with ".xml",
extension ".xml" will be added automatically. The name cannot contain any of
the following characters: \/:*?"<>|. Empty name is not acceptable.
options: Options which control the contents of the export.
Export(self: Document,folder: str,name: str,options: GBXMLExportOptions) -> bool
Export the model in gbXML (green-building) format.
folder: Indicates the path of a folder where to export the gbXML file.
name: Indicates the name of the gbXML file to export. If it doesn't end with ".xml",
extension ".xml" will be added automatically. The name cannot contain any of
the following characters: \/:*?"<>|. Empty name is not acceptable.
options: Options which control the contents of the export.
Returns: True if successful,otherwise False.
Export(self: Document,folder: str,name: str,views: ICollection[ElementId],options: DGNExportOptions) -> bool
Export(self: Document,folder: str,name: str,views: ICollection[ElementId],options: SATExportOptions) -> bool
Export(self: Document,folder: str,name: str,options: IFCExportOptions) -> bool
Exports the document to the Industry Standard Classes (IFC) format.
folder: Output folder into which the file will be exported. The folder must exist.
name: Either the name of a single file or a prefix for a set of files.
If empty,
automatic naming will be used.
options: Various options applicable to the IFC format.
If ll,all options will be
set to their respective default values.
Returns: True if successful,otherwise False.
"""
pass
def ExportImage(self,options):
"""
ExportImage(self: Document,options: ImageExportOptions)
Exports a view or set of views into an image file.
options: The options which govern the image export.
"""
pass
def GetDefaultElementTypeId(self,defaultTypeId):
"""
GetDefaultElementTypeId(self: Document,defaultTypeId: ElementTypeGroup) -> ElementId
Gets the default element type id with the given DefaultElementType id.
defaultTypeId: The default element type id.
Returns: The element type id.
"""
pass
def GetDefaultFamilyTypeId(self,familyCategoryId):
"""
GetDefaultFamilyTypeId(self: Document,familyCategoryId: ElementId) -> ElementId
Gets the default family type id with the given family category id.
familyCategoryId: The family category id.
Returns: The default family type id.
"""
pass
def GetDocumentPreviewSettings(self):
"""
GetDocumentPreviewSettings(self: Document) -> DocumentPreviewSettings
Returns the preview settings for the given document.
Returns: The preview settings.
"""
pass
@staticmethod
def GetDocumentVersion(doc):
"""
GetDocumentVersion(doc: Document) -> DocumentVersion
Gets the DocumentVersion that corresponds to a document.
doc: The document whose DocumentVersion will be returned.
Returns: The DocumentVersion corresponding to the given document.
"""
pass
def GetElement(self,*__args):
"""
GetElement(self: Document,reference: Reference) -> Element
Gets the Element referenced by the input reference.
reference: The reference,whose referenced Element will be retrieved from the model.
Returns: The element referenced by the input argument.
GetElement(self: Document,uniqueId: str) -> Element
Gets the Element referenced by a unique id string.
uniqueId: The element unique id,whose referenced Element will be retrieved from the
model.
Autodesk.Revit.DB.Element.UniqueId
Returns: The element referenced by the input argument.
GetElement(self: Document,id: ElementId) -> Element
Gets the Element referenced by the input string name.
id: The ElementId,whose referenced Element will be retrieved from the model.
Returns: The element referenced by the input argument.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: Document) -> int
Gets the hash code of this document instance.
"""
pass
def GetPaintedMaterial(self,elementId,face):
"""
GetPaintedMaterial(self: Document,elementId: ElementId,face: Face) -> ElementId
Get the material painted on the element's face. Returns invalidElementId if the
face is not painted.
elementId: The element that the face belongs to.
face: The painted element's face.
Returns: The material's Id painted on the element's face.
"""
pass
def GetPrintSettingIds(self):
"""
GetPrintSettingIds(self: Document) -> ICollection[ElementId]
Retrieves all Print Settings of current project.
Returns: The ElementIds of all print setting elements
"""
pass
def GetRoomAtPoint(self,point,phase=None):
"""
GetRoomAtPoint(self: Document,point: XYZ,phase: Phase) -> Room
Gets a room containing the point.
point: Point to be checked.
phase: Phase in which the room exists.
Returns: The room containing the point.
GetRoomAtPoint(self: Document,point: XYZ) -> Room
Gets a room containing the point.
point: Point to be checked.
Returns: The room containing the point.
"""
pass
def GetSpaceAtPoint(self,point,phase=None):
"""
GetSpaceAtPoint(self: Document,point: XYZ,phase: Phase) -> Space
Gets a space containing the point.
point: Point to be checked.
phase: Phase in which the space exists.
Returns: The space containing the point.
GetSpaceAtPoint(self: Document,point: XYZ) -> Space
Gets a space containing the point.
point: Point to be checked.
Returns: The space containing the point.
"""
pass
def GetUnits(self):
"""
GetUnits(self: Document) -> Units
Gets the Units object.
Returns: The Units object.
"""
pass
def GetWorksetId(self,id):
"""
GetWorksetId(self: Document,id: ElementId) -> WorksetId
Get Id of the Workset which owns the element.
id: Id of the element.
Returns: Id of the Workset which owns the element.
"""
pass
def GetWorksetTable(self):
"""
GetWorksetTable(self: Document) -> WorksetTable
Get the WorksetTable of this document.
Returns: The WorksetTable of this document.
"""
pass
def GetWorksharingCentralModelPath(self):
"""
GetWorksharingCentralModelPath(self: Document) -> ModelPath
Gets the central model path of the worksharing model.
Returns: The central model path,or null if the document is not workshared.
"""
pass
def HasAllChangesFromCentral(self):
"""
HasAllChangesFromCentral(self: Document) -> bool
Returns whether the model in the current session is up to date with central.
Returns: True means up to date; false means out of date.
If central is locked but
Revit can determine that
the model in the current session is out of date
without opening central,this method will return false
instead of throwing
CentralModelContentionException.
"""
pass
def Import(self,file,options,*__args):
"""
Import(self: Document,file: str,options: DWGImportOptions,pDBView: View) -> (bool,ElementId)
Imports a DWG or DXF file to the document.
file: Full path of the file to import. File must exist and must be a valid DWG or DXF
file.
options: Various options applicable to the DWG or DXF format. If ll,all options will be
set to their respective default values.
pDBView: The view into which the file wil be imported.
Returns: True if successful,otherwise False.
Import(self: Document,file: str,options: GBXMLImportOptions) -> bool
Imports a Green-Building XML file into the document.
file: Full path of the file to import. File must exist.
options: Various options applicable to GBXml import. If ll,all options will be set to
their respective default values.
Returns: True if successful,otherwise False.
Import(self: Document,file: str,options: ImageImportOptions,view: View) -> (bool,Element)
Imports an image (a bitmap) into the document.
file: Full path of the file to import. File must exist.
options: Various options applicable to an image.
If ll,all options will be set to
their respective default values.
view: The view into which the image is going to be imported.
Returns: True if successful,otherwise False.
Import(self: Document,file: str,options: DGNImportOptions,pDBView: View) -> (bool,ElementId)
Imports a DGN file to the document.
file: Full path of the file to import. File must exist and must be a valid DGN file.
options: Various options applicable to the DGN format. If ll,all options will be set to
their respective default values.
pDBView: The view into which the file will be imported.
Returns: True if successful,otherwise False.
Import(self: Document,file: str,options: SKPImportOptions,pDBView: View) -> ElementId
Imports an SKP file into the document.
file: Full path of the file to link. File must exist and must be a valid SAT file.
options: Various import options applicable to the SKP format. If ll,all options will be
set to their respective default values.
pDBView: The view into which the file will be linked.
Returns: Returns the element Id of the linked instance.
Import(self: Document,file: str,options: SATImportOptions,pDBView: View) -> ElementId
Imports an SAT file into the document.
file: Full path of the file to link. File must exist and must be a valid SAT file.
options: Various import options applicable to the SAT format. If ll,all options will be
set to their respective default values.
pDBView: The view into which the file will be linked.
Returns: Returns the element Id of the linked instance.
"""
pass
def IsDefaultElementTypeIdValid(self,defaultTypeId,typeId):
"""
IsDefaultElementTypeIdValid(self: Document,defaultTypeId: ElementTypeGroup,typeId: ElementId) -> bool
Checks whether the element type id is valid for the give DefaultElmentType id.
defaultTypeId: The default element type id.
typeId: The element type id.
Returns: True if the element type id is valid for the give DefaultElmentType id,false
otherwise.
"""
pass
def IsDefaultFamilyTypeIdValid(self,familyCategoryId,familyTypeId):
"""
IsDefaultFamilyTypeIdValid(self: Document,familyCategoryId: ElementId,familyTypeId: ElementId) -> bool
Checks whether the family type id is valid for the give family category.
familyCategoryId: The family category id.
familyTypeId: The default family type id.
Returns: True if the family type id is valid for the give family category,false
otherwise.
"""
pass
def IsPainted(self,elementId,face):
"""
IsPainted(self: Document,elementId: ElementId,face: Face) -> bool
Checks if the element's face is painted with a material.
elementId: The element that the face belongs to.
face: The painted element's face.
Returns: True if the element's face is painted.
"""
pass
def Link(self,file,options,pDBView=None,elementId=None):
"""
Link(self: Document,file: str,options: SATImportOptions,pDBView: View) -> ElementId
Links an SAT file into the document.
file: Full path of the file to link. File must exist and must be a valid SAT file.
options: Various import options applicable to the SAT format. If ll,all options will be
set to their respective default values.
pDBView: The view into which the file will be linked.
Returns: Returns the element Id of the linked instance.
Link(self: Document,file: str,options: DWGImportOptions,pDBView: View) -> (bool,ElementId)
Links a DWG or DXF file to the document.
file: Full path of the file to link. File must exist and must be a valid DWG or DXF
file.
options: Various import options applicable to the DWG or DXF format. If ll,all options
will be set to their respective default values.
pDBView: The view into which the file will be linked.
Returns: True if successful,otherwise False.
Link(self: Document,file: str,options: SKPImportOptions,pDBView: View) -> ElementId
Links an SKP file into the document.
file: Full path of the file to link. File must exist and must be a valid SAT file.
options: Various import options applicable to the SKP format. If ll,all options will be
set to their respective default values.
pDBView: The view into which the file will be linked.
Returns: Returns the element Id of the linked instance.
Link(self: Document,file: str,options: DWFImportOptions) -> IList[ElementId]
Links Markups in a DWF file to the document.
file: Full path of the file to link. File must exist and must be a valid DWF file.
options: Various link options applicable to the DWF format.
Returns: A collection of link instance element ids created by the markup link.
Link(self: Document,file: str,options: DGNImportOptions,pDBView: View) -> (bool,ElementId)
Links a DGN file to the document.
file: Full path of the file to link. File must exist and must be a valid DGN file.
options: Various import options applicable to the DGN format. If ll,all options will be
set to their respective default values.
pDBView: The view into which the file will be linked.
Returns: True if successful,otherwise False.
"""
pass
def LoadFamily(self,*__args):
"""
LoadFamily(self: Document,filename: str) -> (bool,Family)
Loads an entire family and all its types/symbols into the document and provides
a reference
to the loaded family.
filename: The fully qualified filename of the Family file,usually ending in .rfa.
Returns: True if the entire family was loaded successfully into the project,otherwise
False.
LoadFamily(self: Document,filename: str) -> bool
Loads an entire family and all its types/symbols into the document.
filename: The fully qualified filename of the Family file,usually ending in .rfa.
Returns: True if the entire family was loaded successfully into the project,otherwise
False.
LoadFamily(self: Document,filename: str,familyLoadOptions: IFamilyLoadOptions) -> (bool,Family)
Loads an entire family and all its types/symbols into the document and provides
a reference
to the loaded family.
filename: The fully qualified filename of the Family file,usually ending in .rfa.
familyLoadOptions: The interface implementation to use when loading a family into the document.
Returns: True if the entire family was loaded successfully into the project,otherwise
False.
LoadFamily(self: Document,targetDocument: Document,familyLoadOptions: IFamilyLoadOptions) -> Family
Loads the contents of this family document into another document.
targetDocument: The target document which the family will be loaded into.
familyLoadOptions: The interface implementation to use when responding to conflicts during the
load operation.
Returns: Reference of the family in the target document.
LoadFamily(self: Document,targetDocument: Document) -> Family
Loads the contents of this family document into another document.
targetDocument: The target document where the family will be loaded.
Returns: Reference of the family in the target document.
"""
pass
def LoadFamilySymbol(self,filename,name,*__args):
"""
LoadFamilySymbol(self: Document,filename: str,name: str) -> bool
Loads only a specified family type/symbol from a family file into the document.
filename: The fully qualified filename of the Family file,usually ending in .rfa.
name: The name of the type/symbol to be loaded,such as "W11x14".
Returns: True if the family type/symbol was loaded successfully into the project,
otherwise False.
LoadFamilySymbol(self: Document,filename: str,name: str) -> (bool,FamilySymbol)
Loads only the specified family type/symbol from a family file into the
document and
provides a reference to the loaded family symbol.
filename: The fully qualified filename of the Family file,usually ending in .rfa.
name: The name of the type/symbol to be loaded,such as "W11x14".
Returns: True if the family type/symbol was loaded successfully into the project,
otherwise False.
LoadFamilySymbol(self: Document,filename: str,name: str,familyLoadOptions: IFamilyLoadOptions) -> (bool,FamilySymbol)
Loads only the specified family type/symbol from a family file into the
document and
provides a reference to the loaded family symbol.
filename: The fully qualified filename of the Family file,usually ending in .rfa.
name: The name of the type/symbol to be loaded,such as "W11x14".
familyLoadOptions: The interface implementation to use when loading a family into the document.
Returns: True if the family type/symbol was loaded successfully into the project,
otherwise False.
"""
pass
def MakeTransientElements(self,maker):
"""
MakeTransientElements(self: Document,maker: ITransientElementMaker)
This method encapsulates the process of creating transient elements in the
document.
maker: An instance of a class that implements the
Autodesk.Revit.DB.ITransientElementMaker interface.
The maker will be
called to create element(s) which would become transient.
"""
pass
def Paint(self,elementId,face,*__args):
"""
Paint(self: Document,elementId: ElementId,face: Face,familyParameter: FamilyParameter)
Paint the element's face with specified material.
elementId: The element that the face belongs to.
face: The painted element's face.
familyParameter: The family parameter associated with a material.
Paint(self: Document,elementId: ElementId,face: Face,materialId: ElementId)
Paint the element's face with specified material.
elementId: The element that the face belongs to.
face: The painted element's face.
materialId: The material to be painted on the face
"""
pass
def PostFailure(self,failure):
"""
PostFailure(self: Document,failure: FailureMessage) -> FailureMessageKey
Posts a failure to be displayed to the user at the end of transaction.
failure: The failure to be posted.
Returns: A unique key that identifies posted failure message in a document. If exactly
the same error is posted more than once,
and not removed between the
postings,returned key will be the same every time.
"""
pass
def Print(self,views,*__args):
"""
Print(self: Document,views: ViewSet,useCurrentPrintSettings: bool)
Prints a set of views with default view template and default print settings.
views: The set of views which need to be printed.
useCurrentPrintSettings: If true,print the view with the current print setting,
otherwise with the
print setting of the document of the view.
Print(self: Document,views: ViewSet)
Prints a set of views with default view template and default print settings.
views: The set of views which need to be printed.
Print(self: Document,views: ViewSet,viewTemplate: View,useCurrentPrintSettings: bool)
Prints a set of views with a specified view template and default print settings.
views: The set of views which need to be printed.
viewTemplate: The view template which apply to the set of views.
useCurrentPrintSettings: If true,print the view with the current print setting,
otherwise with the
print setting of the document of the view.
Print(self: Document,views: ViewSet,viewTemplate: View)
Prints a set of views with a specified view template and default print settings.
views: The set of views which need to be printed.
viewTemplate: The view template which apply to the set of views.
"""
pass
def Regenerate(self):
"""
Regenerate(self: Document)
Updates the elements in the Revit document to reflect all changes.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Document,disposing: bool) """
pass
def ReleaseUnmanagedResources_(self,*args):
""" ReleaseUnmanagedResources_(self: Document,disposing: bool) """
pass
def ReloadLatest(self,reloadOptions):
"""
ReloadLatest(self: Document,reloadOptions: ReloadLatestOptions)
Fetches changes from central (due to one or more synchronizations with central)
and merges them into the current session.
reloadOptions: Various options to control behavior of reloadLatest.
"""
pass
def RemovePaint(self,elementId,face):
"""
RemovePaint(self: Document,elementId: ElementId,face: Face)
Remove the material painted on the element's face.
If the face is currently
not painted,it will do nothing.
elementId: The element that the painted face belongs to.
face: The painted element's face.
"""
pass
def Save(self,options=None):
"""
Save(self: Document)
Saves the document.
Save(self: Document,options: SaveOptions)
Saves the document.
options: Options to control the Save operation.
"""
pass
def SaveAs(self,*__args):
"""
SaveAs(self: Document,filepath: str)
Saves the document to a given file path.
filepath: File name and path to be saved as. Either a relative or absolute path can be
provided.
SaveAs(self: Document,filepath: str,options: SaveAsOptions)
Saves the document to a given file path.
filepath: File name and path to be saved as. Either a relative or absolute path can be
provided.
options: Options to govern the SaveAs operation.
SaveAs(self: Document,path: ModelPath,options: SaveAsOptions)
Saves the document to a given path.
path: Name and path to be saved as. For a file path,either a relative or absolute
path can be provided.
options: Options to govern the SaveAs operation.
"""
pass
def SaveToProjectAsImage(self,options):
"""
SaveToProjectAsImage(self: Document,options: ImageExportOptions) -> ElementId
Creates an image view from the currently active view.
options: The options which govern the image creation.
Returns: Id of the newly created view if the operation succeeded,invalid element id
otherwise.
"""
pass
def SeparateElements(self,members):
"""
SeparateElements(self: Document,members: CombinableElementArray)
Separate a set of combinable elements out of combinations they currently belong
to.
members: A list of combinable elements to be separated.
"""
pass
def SetDefaultElementTypeId(self,defaultTypeId,typeId):
"""
SetDefaultElementTypeId(self: Document,defaultTypeId: ElementTypeGroup,typeId: ElementId)
Sets the default element type id of the given DefaultElementType id.
defaultTypeId: The default element type id.
typeId: The element type id.
"""
pass
def SetDefaultFamilyTypeId(self,familyCategoryId,familyTypeId):
"""
SetDefaultFamilyTypeId(self: Document,familyCategoryId: ElementId,familyTypeId: ElementId)
Sets the default family type id for the given family category.
familyCategoryId: The family category id.
familyTypeId: The default family type id.
"""
pass
def SetUnits(self,units):
"""
SetUnits(self: Document,units: Units)
Sets the units.
units: The units.
"""
pass
def SynchronizeWithCentral(self,transactOptions,syncOptions):
"""
SynchronizeWithCentral(self: Document,transactOptions: TransactWithCentralOptions,syncOptions: SynchronizeWithCentralOptions)
Performs reload latest until the model in the current session is up to date and
then saves changes back to central.
A save to central is performed even if
no changes were made.
transactOptions: Options to customize behavior accessing the central model.
syncOptions: Options to customize behavior of SynchronizeWithCentral.
"""
pass
def UnpostFailure(self,messageKey):
"""
UnpostFailure(self: Document,messageKey: FailureMessageKey)
Deletes the posted failure message associated with a given FailureMessageKey.
messageKey: The key of the FailureMessage to be deleted.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __ne__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
ActiveProjectLocation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieve the active project location.
Get: ActiveProjectLocation(self: Document) -> ProjectLocation
Set: ActiveProjectLocation(self: Document)=value
"""
ActiveView=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The document's active view.
Get: ActiveView(self: Document) -> View
"""
Application=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the Application in which the Document resides.
Get: Application(self: Document) -> Application
"""
Create=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""An object that can be used to create new instances of Autodesk Revit API elements
within a project.
Get: Create(self: Document) -> Document
"""
DisplayUnitSystem=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Provides access to display unit type with in the document.
Get: DisplayUnitSystem(self: Document) -> DisplayUnit
"""
FamilyCreate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""An object that can be used to create new instances of Autodesk Revit API elements
within a family document.
Get: FamilyCreate(self: Document) -> FamilyItemFactory
"""
FamilyManager=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The family manager object provides access to family types and parameters.
Get: FamilyManager(self: Document) -> FamilyManager
"""
IsDetached=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Identifies if a workshared document is detached.
Also,see Autodesk.Revit.DB.Document.IsWorkshared
Get: IsDetached(self: Document) -> bool
"""
IsFamilyDocument=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Identifies if the current document is a family document.
Get: IsFamilyDocument(self: Document) -> bool
"""
IsLinked=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Identifies if a document is a linked RVT.
Get: IsLinked(self: Document) -> bool
"""
IsModifiable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The document's state of modifiability.
Get: IsModifiable(self: Document) -> bool
"""
IsModified=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The state of changes made to the document.
Get: IsModified(self: Document) -> bool
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Identifies if the document is read-only or can possibly be modified.
Get: IsReadOnly(self: Document) -> bool
"""
IsReadOnlyFile=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Signals whether the document was opened from a read-only file.
Get: IsReadOnlyFile(self: Document) -> bool
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: Document) -> bool
"""
IsWorkshared=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Identifies if worksharing (i.e. editing permissions and multiple worksets) have been enabled in the document.
Also,see Autodesk.Revit.DB.Document.IsDetached
Get: IsWorkshared(self: Document) -> bool
"""
MassDisplayTemporaryOverride=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This setting controls temporary display in views of objects with mass category or subcategories.
Get: MassDisplayTemporaryOverride(self: Document) -> MassDisplayTemporaryOverrideType
Set: MassDisplayTemporaryOverride(self: Document)=value
"""
MullionTypes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is used to retrieve all the mullion types in current system.
Get: MullionTypes(self: Document) -> MullionTypeSet
"""
OwnerFamily=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get the Family of this Family Document.
Get: OwnerFamily(self: Document) -> Family
"""
PanelTypes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieves a set of PanelType objects that contains all the panel types that are currently loaded into the
project.
Get: PanelTypes(self: Document) -> PanelTypeSet
"""
ParameterBindings=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieves an object from which mappings between parameter definitions and categories can
be found.
Get: ParameterBindings(self: Document) -> BindingMap
"""
PathName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The fully qualified path of the document's disk file.
Get: PathName(self: Document) -> str
"""
Phases=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieves all the object that represent phases within the project.
Get: Phases(self: Document) -> PhaseArray
"""
PrintManager=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieve the PrintManager of current project.
Get: PrintManager(self: Document) -> PrintManager
"""
ProjectInformation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Return the Project Information of the current project.
Get: ProjectInformation(self: Document) -> ProjectInfo
"""
ProjectLocations=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieve all the project locations associated with this project
Get: ProjectLocations(self: Document) -> ProjectLocationSet
"""
ReactionsAreUpToDate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Reports if the analytical model has regenerated in a document with reaction loads.
Get: ReactionsAreUpToDate(self: Document) -> bool
"""
Settings=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Provides access to general application settings,such as Categories.
Get: Settings(self: Document) -> Settings
"""
SiteLocation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the site location information.
Get: SiteLocation(self: Document) -> SiteLocation
"""
Title=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The document's title.
Get: Title(self: Document) -> str
"""
WorksharingCentralGUID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The central GUID of the server-based model.
Get: WorksharingCentralGUID(self: Document) -> Guid
"""
DocumentClosing=None
DocumentPrinted=None
DocumentPrinting=None
DocumentSaved=None
DocumentSavedAs=None
DocumentSaving=None
DocumentSavingAs=None
ViewPrinted=None
ViewPrinting=None
|
1672465
|
import sqlite3
import unittest
from tests import load_resource
from tibiawikisql import Article, models, schema
class TestModels(unittest.TestCase):
def setUp(self):
self.conn = sqlite3.connect(":memory:")
self.conn.row_factory = sqlite3.Row
schema.create_tables(self.conn)
def test_achievement(self):
article = Article(1, "Demonic Barkeeper", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_achievement.txt"))
achievement = models.Achievement.from_article(article)
self.assertIsInstance(achievement, models.Achievement)
achievement.insert(self.conn)
db_achievement = models.Achievement.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_achievement, models.Achievement)
self.assertEqual(db_achievement.name, achievement.name)
db_achievement = models.Achievement.get_by_field(self.conn, "name", "demonic barkeeper", use_like=True)
self.assertIsInstance(db_achievement, models.Achievement)
def test_creature(self):
article = Article(1, "Demon", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_creature.txt"))
creature = models.Creature.from_article(article)
self.assertIsInstance(creature, models.Creature)
creature.insert(self.conn)
db_creature: models.Creature = models.Creature.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_creature, models.Creature)
self.assertEqual(db_creature.name, creature.name)
self.assertEqual(db_creature.modifier_earth, creature.modifier_earth)
# Dynamic properties
self.assertEqual(50, db_creature.charm_points)
self.assertEqual(2500, db_creature.bestiary_kills)
self.assertEqual(3, len(db_creature.immune_to))
self.assertEqual(4, len(db_creature.resistant_to))
self.assertEqual(2, len(db_creature.weak_to))
self.assertEqual(9, len(db_creature.abilities))
self.assertEqual(2500, db_creature.bestiary_kills)
self.assertEqual(50, db_creature.charm_points)
self.assertIsNotNone(db_creature.max_damage)
db_creature = models.Creature.get_by_field(self.conn, "name", "demon", use_like=True)
self.assertIsInstance(db_creature, models.Creature)
def test_house(self):
article = Article(1, "Crystal Glance", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_house.txt"))
house = models.House.from_article(article)
self.assertIsInstance(house, models.House)
house.insert(self.conn)
db_house = models.House.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_house, models.House)
self.assertEqual(db_house.name, house.name)
models.House.get_by_field(self.conn, "house_id", 55302)
self.assertIsInstance(db_house, models.House)
def test_imbuement(self):
article = Article(1, "Powerful Strike", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_imbuement.txt"))
imbuement = models.Imbuement.from_article(article)
self.assertIsInstance(imbuement, models.Imbuement)
imbuement.insert(self.conn)
db_imbuement = models.Imbuement.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_imbuement, models.Imbuement)
self.assertEqual(db_imbuement.name, imbuement.name)
self.assertEqual(db_imbuement.tier, imbuement.tier)
self.assertGreater(len(db_imbuement.materials), 0)
db_imbuement = models.Imbuement.get_by_field(self.conn, "name", "powerful strike", use_like=True)
self.assertIsInstance(db_imbuement, models.Imbuement)
def test_item(self):
article = Article(1, "Fire Sword", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_item.txt"))
item = models.Item.from_article(article)
self.assertIsInstance(item, models.Item)
item.insert(self.conn)
db_item = models.Item.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_item, models.Item)
self.assertEqual(db_item.name, item.name)
self.assertGreater(len(db_item.attributes), 0)
# Dynamic properties:
self.assertEqual(len(item.attributes_dict.keys()), len(item.attributes))
fire_sword_look_text = ('You see a fire sword (Atk:24 physical + 11 fire, Def:20 +1).'
' It can only be wielded properly by players of level 30 or higher.'
'\nIt weights 23.00 oz.\n'
'The blade is a magic flame.')
self.assertEqual(fire_sword_look_text, item.look_text)
db_item = models.Item.get_by_field(self.conn, "name", "fire sword", use_like=True)
self.assertIsInstance(db_item, models.Item)
def test_item_resist(self):
article = Article(1, "Dream Shroud", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_item_resist.txt"))
item = models.Item.from_article(article)
self.assertIsInstance(item, models.Item)
self.assertIn("energy%", item.attributes_dict)
self.assertEqual(item.attributes_dict['magic'], "+3")
dream_shroud_look_text = ('You see a dream shroud (Arm:12, magic level +3, protection energy +10%).'
' It can only be wielded properly by sorcerers and druids of level 180 or higher.'
'\nIt weights 25.00 oz.')
self.assertEqual(dream_shroud_look_text, item.look_text)
self.assertEqual(len(item.resistances), 1)
self.assertEqual(item.resistances["energy"], 10)
item.insert(self.conn)
db_item = models.Item.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_item, models.Item)
self.assertEqual(db_item.name, item.name)
self.assertGreater(len(db_item.attributes), 0)
db_item = models.Item.get_by_field(self.conn, "name", "dream shroud", use_like=True)
self.assertIsInstance(db_item, models.Item)
def test_item_sounds(self):
article = Article(1, "Mini NabBot", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_item_sounds.txt"))
item = models.Item.from_article(article)
self.assertIsInstance(item, models.Item)
self.assertEqual(len(item.sounds), 5)
item.insert(self.conn)
db_item = models.Item.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_item, models.Item)
self.assertEqual(db_item.name, item.name)
db_item = models.Item.get_by_field(self.conn, "name", "mini nabbot", use_like=True)
self.assertEqual(len(item.sounds), len(db_item.sounds))
self.assertIsInstance(db_item, models.Item)
def test_item_store(self):
article = Article(1, "Health Potion", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_item_store.txt"))
item = models.Item.from_article(article)
self.assertIsInstance(item, models.Item)
self.assertEqual(2, len(item.store_offers))
def test_item_perfect_shot(self):
article = Article(1, "Gilded Eldritch Wand", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_item_perfect_shot.txt"))
item = models.Item.from_article(article)
self.assertIsInstance(item, models.Item)
self.assertEqual(12, len(item.attributes))
self.assertIn("perfect_shot", item.attributes_dict)
self.assertIn("perfect_shot_range", item.attributes_dict)
def test_key(self):
article = Article(1, "Key 3940", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_key.txt"))
key = models.Key.from_article(article)
self.assertIsInstance(key, models.Key)
key.insert(self.conn)
db_key = models.Key.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_key, models.Key)
self.assertEqual(db_key.name, key.name)
db_key = models.Key.get_by_field(self.conn, "number", 3940)
self.assertIsInstance(db_key, models.Key)
def test_book(self):
article = Article(1, "Imperial Scripts (Book)", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_book.txt"))
book = models.Book.from_article(article)
self.assertIsInstance(book, models.Book)
book.insert(self.conn)
db_book = models.Book.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_book, models.Book)
self.assertEqual(db_book.name, book.name)
def test_npc(self):
article = Article(1, "Yaman", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_npc.txt"))
npc = models.Npc.from_article(article)
self.assertEqual(1, len(npc.jobs))
self.assertEqual("Shopkeeper", npc.job)
self.assertEqual(1, len(npc.races))
self.assertEqual("Djinn", npc.race)
self.assertIsInstance(npc, models.Npc)
npc.insert(self.conn)
db_npc = models.Npc.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_npc, models.Npc)
self.assertEqual(db_npc.name, npc.name)
article = Article(2, "Captain Bluebear", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_npc_travel.txt"))
npc = models.Npc.from_article(article)
self.assertIsInstance(npc, models.Npc)
self.assertGreater(len(npc.destinations), 0)
article = Article(3, "Shalmar", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_npc_spells.txt"))
npc = models.Npc.from_article(article)
self.assertIsInstance(npc, models.Npc)
def test_quest(self):
article = Article(1, "The Annihilator Quest", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_quest.txt"))
quest = models.Quest.from_article(article)
self.assertIsInstance(quest, models.Quest)
quest.insert(self.conn)
db_quest = models.Quest.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_quest, models.Quest)
self.assertEqual(db_quest.name, quest.name)
def test_spell(self):
article = Article(1, "The Annihilator Quest", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_spell.txt"))
spell = models.Spell.from_article(article)
self.assertIsInstance(spell, models.Spell)
spell.insert(self.conn)
db_spell = models.Spell.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_spell, models.Spell)
self.assertEqual(db_spell.name, spell.name)
def test_world(self):
article = Article(1, "Mortera", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_world.txt"))
world = models.World.from_article(article)
self.assertIsInstance(world, models.World)
self.assertIsInstance(world.trade_board, int)
world.insert(self.conn)
db_world = models.World.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_world, models.World)
self.assertEqual(db_world.name, world.name)
def test_mount(self):
article = Article(1, "Doombringer", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_mount.txt"))
mount = models.Mount.from_article(article)
self.assertIsInstance(mount, models.Mount)
self.assertIsInstance(mount.price, int)
self.assertIsInstance(mount.speed, int)
self.assertIsInstance(mount.buyable, int)
mount.insert(self.conn)
db_mount = models.Mount.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_mount, models.Mount)
self.assertEqual(db_mount.name, mount.name)
def test_charm(self):
article = Article(1, "Curse (Charm)", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_charm.txt"))
charm = models.Charm.from_article(article)
self.assertIsInstance(charm, models.Charm)
self.assertEqual(900, charm.cost)
self.assertEqual("Offensive", charm.type)
self.assertIsInstance(charm.effect, str)
self.assertEqual("11.50.6055", charm.version)
charm.insert(self.conn)
db_charm = models.Charm.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_charm, models.Charm)
self.assertEqual(db_charm.name, charm.name)
def test_outfit(self):
article = Article(1, "Barbarian Outfits", timestamp="2018-08-20T04:33:15Z",
content=load_resource("content_outfit.txt"))
outfit = models.Outfit.from_article(article)
self.assertIsInstance(outfit, models.Outfit)
self.assertTrue(outfit.premium)
self.assertEqual(outfit.achievement, "Brutal Politeness")
outfit.insert(self.conn)
db_outfit = models.Outfit.get_by_field(self.conn, "article_id", 1)
self.assertIsInstance(db_outfit, models.Outfit)
self.assertEqual(db_outfit.name, outfit.name)
|
1672476
|
import torch
class AnchorGenerator(object):
def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):
self.base_size = base_size
self.scales = torch.Tensor(scales)
self.ratios = torch.Tensor(ratios)
self.scale_major = scale_major
self.ctr = ctr
self.base_anchors = self.gen_base_anchors()
@property
def num_base_anchors(self):
return self.base_anchors.size(0)
def gen_base_anchors(self):
w = self.base_size
h = self.base_size
if self.ctr is None:
x_ctr = 0.5 * (w - 1)
y_ctr = 0.5 * (h - 1)
else:
x_ctr, y_ctr = self.ctr
h_ratios = torch.sqrt(self.ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * self.scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * self.scales[None, :]).view(-1)
else:
ws = (w * self.scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * self.scales[:, None] * h_ratios[None, :]).view(-1)
base_anchors = torch.stack(
[
x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
],
dim=-1).round()
return base_anchors
def _meshgrid(self, x, y, row_major=True):
xx = x.repeat(len(y))
yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_anchors(self, featmap_size, stride=16, device='cuda'):
base_anchors = self.base_anchors.to(device)
feat_h, feat_w = featmap_size
shift_x = torch.arange(0, feat_w, device=device) * stride
shift_y = torch.arange(0, feat_h, device=device) * stride
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
shifts = shifts.type_as(base_anchors)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def valid_flags(self, featmap_size, valid_size, device='cuda'):
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
valid = valid[:, None].expand(
valid.size(0), self.num_base_anchors).contiguous().view(-1)
return valid
|
1672491
|
import base64
import contextlib
import os
from .analysis import get_names
from .emitter import Emitter, FileWriter
from .consts import (
INDENT, MAX_HIGHLIGHT_RANGE,
POSITION_ENCODING, PROTOCOL_VERSION,
)
class DefinitionMeta:
"""
A bag of properties around a single source definition.
This contains previously generated identifiers needed
when later linking a name reference to its definition.
"""
def __init__(self, range_id, result_set_id, contents):
self.range_id = range_id
self.result_set_id = result_set_id
self.contents = contents
self.reference_range_ids = set()
self.definition_result_id = 0
class FileIndexer:
"""
Analysis the definitions and uses in the given file and
add an LSIF document to the emitter. As analysis is done
on a per-file basis, this class holds the majority of the
indexer logic.
"""
def __init__(self, filename, emitter, project_id, verbose, exclude_content):
self.filename = filename
self.emitter = emitter
self.project_id = project_id
self.verbose = verbose
self.exclude_content = exclude_content
self.definition_metas = {}
def index(self):
print('Indexing file {}'.format(self.filename))
with open(self.filename) as f:
source = f.read()
self.source_lines = source.split('\n')
document_args = [
'py',
'file://{}'.format(os.path.abspath(self.filename)),
]
if not self.exclude_content:
encoded = base64.b64encode(source.encode('utf-8')).decode()
document_args.append(encoded)
self.document_id = self.emitter.emit_document(*document_args)
with scope_events(self.emitter, 'document', self.document_id):
self._index(source)
def _index(self, source):
# Do an initial analysis to get a list of names from
# the source file. Some additional analysis may be
# done lazily in later steps when needed.
self.names = get_names(source, self.filename)
if self.verbose:
print('{}Searching for defs'.format(INDENT))
# First emit everything for names defined in this
# file. This needs to be done first as edges need
# to be emitted only after both of their adjacent
# vertices (i.e. defs must be emitted before uses).
for name in self.names:
if name.is_definition():
self._export_definition(name)
if self.verbose:
print('{}Searching for uses'.format(INDENT))
# Next, we can emit uses. Some of these names may
# reference a definition from another file or a
# builtin. The procedure below must account for
# these cases.
for name in self.names:
self._export_uses(name)
# Next, do any additional linking that we need to
# do now that both definition and their uses are
# emitted. Mainly, this populates hover tooltips
# for uses.
for definition, meta in self.definition_metas.items():
self._link_uses(definition, meta)
# Finally, link uses to their containing document
self._emit_contains()
def _export_definition(self, name):
"""
Emit vertices and edges related directly to the definition of
or assignment to a variable. Create a definition meta object
with the generated LSIF identifiers and make it queryable by
the same definition object.
"""
contents = [{
'language': 'py',
'value': extract_text(self.source_lines, name),
}]
docstring = name.docstring
if docstring:
contents.append(docstring)
# Emit hover tooltip and link it to a result set so that we can
# re-use the same node for hover tooltips on usages.
hover_id = self.emitter.emit_hoverresult({'contents': contents})
result_set_id = self.emitter.emit_resultset()
self.emitter.emit_textdocument_hover(result_set_id, hover_id)
# Link result set to range
range_id = self.emitter.emit_range(*make_ranges(name))
self.emitter.emit_next(range_id, result_set_id)
# Stash the identifiers generated above so we can use then
# when exporting related uses.
self.definition_metas[name] = DefinitionMeta(
range_id,
result_set_id,
contents,
)
# Print progress
self._debug_def(name)
def _export_uses(self, name):
"""
Emit vertices and edges related to any use of a definition.
The definition must have already been exported by the above
procedure.
"""
try:
definitions = name.definitions()
except Exception as ex:
raise
print('Failed to retrieve definitions: {}'.format(str(ex)))
return
for definition in definitions:
self._export_use(name, definition)
def _export_use(self, name, definition):
"""
Emit vertices and edges directly related to a single use of
a definition.
"""
meta = self.definition_metas.get(definition)
if not meta:
return
# Print progress
self._debug_use(name, definition)
if name.is_definition():
# The use and the definition are the same. It is against
# spec to have overlapping or duplicate ranges in a single
# document, so we re-use the one that we had generated
# previously.
range_id = meta.range_id
else:
# This must be a unique name, generate a new range vertex
range_id = self.emitter.emit_range(*make_ranges(name))
# Link use range to definition resultset
self.emitter.emit_next(range_id, meta.result_set_id)
if not meta.definition_result_id:
result_id = self.emitter.emit_definitionresult()
self.emitter.emit_textdocument_definition(meta.result_set_id, result_id)
meta.definition_result_id = result_id
self.emitter.emit_item(meta.definition_result_id, [meta.range_id], self.document_id)
# Bookkeep this reference for the link procedure below
meta.reference_range_ids.add(range_id)
def _link_uses(self, name, meta):
"""
Emit vertices and edges related to the relationship between a definition
and it use(s).
"""
if len(meta.reference_range_ids) == 0:
return
result_id = self.emitter.emit_referenceresult()
self.emitter.emit_textdocument_references(meta.result_set_id, result_id)
self.emitter.emit_item(
result_id,
[meta.range_id],
self.document_id,
'definitions',
)
self.emitter.emit_item(
result_id,
sorted(list(meta.reference_range_ids)),
self.document_id,
'references',
)
def _emit_contains(self):
"""
Emit vertices and edges related to parentage relationship. Currently
this links only range vertices to its containing document vertex.
"""
all_range_ids = set()
for meta in self.definition_metas.values():
all_range_ids.add(meta.range_id)
all_range_ids.update(meta.reference_range_ids)
# Deduplicate (and sort for better testing)
all_range_ids = sorted(list(all_range_ids))
# Link document to project
self.emitter.emit_contains(self.project_id, [self.document_id])
if all_range_ids:
# Link ranges to document
self.emitter.emit_contains(self.document_id, all_range_ids)
#
# Debugging Methods
def _debug_def(self, name):
if not self.verbose:
return
print('{}Def #{}, line {}: {}'.format(
INDENT * 2,
self.definition_metas.get(name).range_id,
name.line + 1,
highlight_range(self.source_lines, name).strip()),
)
def _debug_use(self, name, definition):
if not self.verbose or name == definition:
return
print('{}Use of #{}, line {}: {}'.format(
INDENT * 2,
self.definition_metas.get(definition).range_id,
name.line + 1,
highlight_range(self.source_lines, name),
))
def index(workspace, writer, verbose, exclude_content):
"""
Read each python file (recursively) in the given path and
write the analysis of each source file as an LSIF-dump to
the given file writer.
"""
uri = 'file://{}'.format(os.path.abspath(workspace))
emitter = Emitter(FileWriter(writer))
emitter.emit_metadata(PROTOCOL_VERSION, POSITION_ENCODING, uri)
project_id = emitter.emit_project('py')
with scope_events(emitter, 'project', project_id):
file_count = 0
for root, dirs, files in os.walk(workspace):
for file in files:
_, ext = os.path.splitext(file)
if ext != '.py':
continue
file_count += 1
path = os.path.join(root, file)
FileIndexer(
path,
emitter,
project_id,
verbose,
exclude_content,
).index()
if file_count == 0:
print('No files found to index')
@contextlib.contextmanager
def scope_events(emitter, scope, id):
emitter.emit_event('begin', scope, id)
yield
emitter.emit_event('end', scope, id)
def make_ranges(name):
"""
Return a start and end range values for a range vertex.
"""
return (
{'line': name.line, 'character': name.lo},
{'line': name.line, 'character': name.hi},
)
def extract_text(source_lines, name):
"""
Extract the text at the range described by the given name.
"""
# TODO(efritz) - highlight span
return source_lines[name.line].strip()
def highlight_range(source_lines, name):
"""
Return the source line where the name occurs with the range
described by the name highlighted with an ANSI code.
"""
lo, hi = name.lo, name.hi
trimmed_lo, trimmed_hi = False, False
# Right-most whitespace is meaningless
line = source_lines[name.line].rstrip()
# Left-most whitespace is also meaningless, but we have
# to be a bit more careful to maintain the correct range
# of the highlighted region relative to the line.
while line and line[0] in [' ', '\t']:
line = line[1:]
lo, hi = lo - 1, hi - 1
# While we have more characters than we want AND the size
# of the highlighted portion can be reduced to below this
# size, try trimming single characters from the end of the
# string and leave the highlighted portion somewhere in the
# middle.
while len(line) > MAX_HIGHLIGHT_RANGE and (hi - lo) < MAX_HIGHLIGHT_RANGE:
trimmable_lo = lo > 0
trimmable_hi = len(line) - hi - 1
if trimmable_lo > 0 and trimmable_lo >= trimmable_hi:
line = line[1:]
lo, hi = lo - 1, hi - 1
trimmed_lo = True
if trimmable_hi > 0 and trimmable_hi >= trimmable_lo:
line = line[:-1].rstrip()
trimmed_hi = True
return '{}{}\033[4;31m{}\033[0m{}{}'.format(
'... ' if trimmed_lo else '',
line[:lo].lstrip(),
line[lo:hi],
line[hi:].rstrip(),
' ...' if trimmed_hi else '',
)
|
1672518
|
from alertaclient.utils import DateTime
class ApiKey:
def __init__(self, user, scopes, text='', expire_time=None, customer=None, **kwargs):
self.id = kwargs.get('id', None)
self.key = kwargs.get('key', None)
self.user = user
self.scopes = scopes
self.text = text
self.expire_time = expire_time
self.count = kwargs.get('count', 0)
self.last_used_time = kwargs.get('last_used_time', None)
self.customer = customer
@property
def type(self):
return self.scopes_to_type(self.scopes)
def __repr__(self):
return 'ApiKey(key={!r}, user={!r}, scopes={!r}, expireTime={!r}, customer={!r})'.format(
self.key, self.user, self.scopes, self.expire_time, self.customer)
@classmethod
def parse(cls, json):
if not isinstance(json.get('scopes', []), list):
raise ValueError('scopes must be a list')
return ApiKey(
id=json.get('id', None),
key=json.get('key', None),
user=json.get('user', None),
scopes=json.get('scopes', None) or list(),
text=json.get('text', None),
expire_time=DateTime.parse(json.get('expireTime')),
count=json.get('count', None),
last_used_time=DateTime.parse(json.get('lastUsedTime')),
customer=json.get('customer', None)
)
def scopes_to_type(self, scopes):
for scope in scopes:
if scope.startswith('write') or scope.startswith('admin'):
return 'read-write'
return 'read-only'
def tabular(self, timezone=None):
return {
'id': self.id,
'key': self.key,
'user': self.user,
'scopes': ','.join(self.scopes),
'text': self.text,
'expireTime': DateTime.localtime(self.expire_time, timezone),
'count': self.count,
'lastUsedTime': DateTime.localtime(self.last_used_time, timezone),
'customer': self.customer
}
|
1672549
|
f = open('input.txt')
lines = [l.strip() for l in f.readlines()]
inputs = [[int(j) for j in i] for i in lines]
hlen = len(lines[0]) # 100
vlen = len(lines) # 100
def adj1(l,i):
if i == 0:
return [1]
elif i == l-1:
return [l-2]
else:
return [i-1,i+1]
def adj(i,j):
return [(ii,j)
for ii in adj1(vlen,i)
] + [(i,jj)
for jj in adj1(hlen,j)]
min_s = []
for i in range(0,vlen):
for j in range(0, hlen):
if inputs[i][j] < min([inputs[ii][jj] for (ii,jj) in adj(i,j)]):
min_s += [inputs[i][j] + 1]
print(f'part 1: {sum(min_s)}') # 423
def sizebasin(i,j):
if inputs[i][j]<9:
inputs[i][j] = 9
return 1 + sum(sizebasin(ii,jj) for (ii,jj) in adj(i,j))
else: return 0
basins = []
for i in range(0,vlen):
for j in range(0, hlen):
if inputs[i][j] < 9:
basins += [sizebasin(i,j)]
basins.sort()
print(f'part 2: { basins[-1]*basins[-2]*basins[-3] }') # 1198704
|
1672556
|
import covertool
#!/usr/bin/env python
#
# Copyright (c) 2008, <NAME> <bbb [at] cs.unc.edu>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""An implementation of Binomial Heaps.
From Wikipedia:
A binomial heap is a heap similar to a binary heap but also supporting the
operation of merging two heaps quickly. This is achieved by using a special
tree structure.
All of the following operations work in O(log n) time on a binomial heap with
n elements:
- Insert a new element to the heap
- Find the element with minimum key
- Delete the element with minimum key from the heap
- Decrease key of a given element
- Delete given element from the heap
- Merge two given heaps to one heap
More details: http://en.wikipedia.org/wiki/Binomial_heap
This implementation is based on the description in CLRS.
"""
class ItemRef(object):
"""Reference to an item in the heap. Used for decreasing keys and deletion.
Do not use this class directly; only use instances returned by
BinomialHeap.insert()!
You should only use ItemRef.delete() and ItemRef.decrease(new_priority).
"""
def __init__(self, node, get_heap):
covertool.cover("bh.py:59")
self.ref = node
covertool.cover("bh.py:60")
self.get_heap = get_heap
covertool.cover("bh.py:61")
self.in_tree = True
def __str__(self):
covertool.cover("bh.py:64")
if self.in_tree:
covertool.cover("bh.py:65")
return "<BinomialHeap Reference to '%s'>" % str(self.ref.val)
else:
covertool.cover("bh.py:67")
return "<stale BinomialHeap Reference>"
def decrease(self, new_key):
covertool.cover("bh.py:70")
"Update the priority of the referenced item to a lower value."
covertool.cover("bh.py:71")
assert self.in_tree
covertool.cover("bh.py:72")
assert self.ref.ref == self
covertool.cover("bh.py:73")
self.ref.decrease(new_key)
def delete(self):
"""Remove the referenced item from the heap.
"""
covertool.cover("bh.py:78")
self.decrease(self)
covertool.cover("bh.py:79")
v = self.get_heap().extract_min()
covertool.cover("bh.py:80")
assert not self.in_tree
covertool.cover("bh.py:81")
assert v is self.ref.val
def in_heap(self, heap):
"""Returns True if the referenced item is part of the BinomialHeap 'heap';
False otherwise.
"""
covertool.cover("bh.py:87")
return self.in_tree and self.get_heap() == heap
def __lt__(self, other):
covertool.cover("bh.py:90")
"Behaves like negative infinity: always True."
covertool.cover("bh.py:91")
return True
def __gt__(self, other):
covertool.cover("bh.py:94")
"Behaves like negative infinity: always False."
covertool.cover("bh.py:95")
return False
class BinomialHeap(object):
"""Usage:
> H1 = BinomialHeap()
> H1.insert(40, "fast.")
> H1.insert(10, "Merging")
> H2 = BinomialHeap([(30, "quite"), (20, "is")])
> H1 += H2
> for x in H1:
> print x,
=> "Merging is quite fast."
"""
class Node(object):
covertool.cover("bh.py:112")
"Internal node of the heap. Don't use directly."
def __init__(self, get_heap, key, val=None):
covertool.cover("bh.py:114")
self.degree = 0
covertool.cover("bh.py:115")
self.parent = None
covertool.cover("bh.py:116")
self.next = None
covertool.cover("bh.py:117")
self.child = None
covertool.cover("bh.py:118")
self.key = key
covertool.cover("bh.py:119")
self.ref = ItemRef(self, get_heap)
covertool.cover("bh.py:120")
if val == None:
covertool.cover("bh.py:121")
val = key
covertool.cover("bh.py:122")
self.val = val
def getStrKey(x):
covertool.cover("bh.py:125")
if x:
covertool.cover("bh.py:126")
return str(x.key)
else:
covertool.cover("bh.py:128")
return 'NIL'
def __str__(self):
covertool.cover("bh.py:131")
return '(%s, c:%s, n:%s)' % (getStrKey(self), getStrKey(self.child), getStrKey(self.next))
def link(self, other):
covertool.cover("bh.py:134")
"Makes other a subtree of self."
covertool.cover("bh.py:135")
other.parent = self
covertool.cover("bh.py:136")
other.next = self.child
covertool.cover("bh.py:137")
self.child = other
covertool.cover("bh.py:138")
self.degree += 1
def decrease(self, new_key):
covertool.cover("bh.py:141")
node = self
covertool.cover("bh.py:142")
assert new_key < node.key
covertool.cover("bh.py:143")
node.key = new_key
covertool.cover("bh.py:144")
cur = node
covertool.cover("bh.py:145")
parent = cur.parent
covertool.cover("bh.py:146")
while parent and cur.key < parent.key:
# need to bubble up
# swap refs
covertool.cover("bh.py:149")
parent.ref.ref, cur.ref.ref = cur, parent
covertool.cover("bh.py:150")
parent.ref, cur.ref = cur.ref, parent.ref
# now swap keys and payload
covertool.cover("bh.py:152")
parent.key, cur.key = cur.key, parent.key
covertool.cover("bh.py:153")
parent.val, cur.val = cur.val, parent.val
# step up
covertool.cover("bh.py:155")
cur = parent
covertool.cover("bh.py:156")
parent = cur.parent
@staticmethod
def roots_merge(h1, h2):
"""Merge two lists of heap roots, sorted by degree.
Returns the new head.
"""
covertool.cover("bh.py:163")
if not h1:
covertool.cover("bh.py:164")
return h2
covertool.cover("bh.py:165")
if not h2:
covertool.cover("bh.py:166")
return h1
covertool.cover("bh.py:167")
if h1.degree < h2.degree:
covertool.cover("bh.py:168")
h = h1
covertool.cover("bh.py:169")
h1 = h.next
else:
covertool.cover("bh.py:171")
h = h2
covertool.cover("bh.py:172")
h2 = h2.next
covertool.cover("bh.py:173")
p = h
covertool.cover("bh.py:174")
while h2 and h1:
covertool.cover("bh.py:175")
if h1.degree < h2.degree:
covertool.cover("bh.py:176")
p.next = h1
covertool.cover("bh.py:177")
h1 = h1.next
else:
covertool.cover("bh.py:179")
p.next = h2
covertool.cover("bh.py:180")
h2 = h2.next
covertool.cover("bh.py:181")
p = p.next
covertool.cover("bh.py:182")
if h2:
covertool.cover("bh.py:183")
p.next = h2
else:
covertool.cover("bh.py:185")
p.next = h1
covertool.cover("bh.py:186")
return h
@staticmethod
def roots_reverse(h):
"""Reverse the heap root list.
Returns the new head. Also clears parent references.
"""
covertool.cover("bh.py:193")
if not h:
covertool.cover("bh.py:194")
return None
covertool.cover("bh.py:195")
tail = None
covertool.cover("bh.py:196")
next = h
covertool.cover("bh.py:197")
h.parent = None
covertool.cover("bh.py:198")
while h.next:
covertool.cover("bh.py:199")
next = h.next
covertool.cover("bh.py:200")
h.next = tail
covertool.cover("bh.py:201")
tail = h
covertool.cover("bh.py:202")
h = next
covertool.cover("bh.py:203")
h.parent = None
covertool.cover("bh.py:204")
h.next = tail
covertool.cover("bh.py:205")
return h
class __Ref(object):
def __init__(self, h):
covertool.cover("bh.py:209")
self.heap = h
covertool.cover("bh.py:210")
self.ref = None
def get_heap_ref(self):
covertool.cover("bh.py:212")
if not self.ref:
covertool.cover("bh.py:213")
return self
else:
# compact
covertool.cover("bh.py:216")
self.ref = self.ref.get_heap_ref()
covertool.cover("bh.py:217")
return self.ref
def get_heap(self):
covertool.cover("bh.py:219")
return self.get_heap_ref().heap
def __init__(self, lst=[]):
"""Populate a new heap with the (key, value) pairs in 'lst'.
If the elements of lst are not subscriptable, then they are treated as
opaque elements and inserted into the heap themselves.
"""
covertool.cover("bh.py:226")
self.head = None
covertool.cover("bh.py:227")
self.size = 0
covertool.cover("bh.py:228")
self.ref = BinomialHeap.__Ref(self)
covertool.cover("bh.py:229")
for x in lst:
covertool.cover("bh.py:230")
try:
covertool.cover("bh.py:231")
self.insert(x[0], x[1])
except TypeError:
covertool.cover("bh.py:233")
self.insert(x)
def insert(self, key, value=None):
"""Insert 'value' in to the heap with priority 'key'. If 'value' is omitted,
then 'key' is used as the value.
Returns a reference (of type ItemRef) to the internal node in the tree.
Use this reference to delete the key or to change its priority.
"""
covertool.cover("bh.py:241")
n = BinomialHeap.Node(self.ref.get_heap, key, value)
covertool.cover("bh.py:242")
self.__union(n)
covertool.cover("bh.py:243")
self.size += 1
covertool.cover("bh.py:244")
return n.ref
def union(self, other):
"""Merge 'other' into 'self'. Returns None.
Note: This is a destructive operation; 'other' is an empty heap afterwards.
"""
covertool.cover("bh.py:250")
assert (self != other)
covertool.cover("bh.py:251")
self.size = self.size + other.size
covertool.cover("bh.py:252")
h2 = other.head
covertool.cover("bh.py:253")
self.__union(h2)
covertool.cover("bh.py:254")
other.ref.ref = self.ref
covertool.cover("bh.py:255")
other.__init__()
def min(self):
"""Returns the value with the minimum key (= highest priority) in the heap
without removing it, or None if the heap is empty.
"""
covertool.cover("bh.py:261")
pos = self.__min()
covertool.cover("bh.py:262")
if pos:
covertool.cover("bh.py:263")
return pos[0].val
else:
covertool.cover("bh.py:265")
return None
def extract_min(self):
"""Returns the value with the minimum key (= highest priority) in the heap
AND removes it from the heap, or None if the heap is empty.
"""
# find mininum
covertool.cover("bh.py:272")
pos = self.__min()
covertool.cover("bh.py:273")
if not pos:
covertool.cover("bh.py:274")
return None
else:
covertool.cover("bh.py:276")
(x, prev) = pos
# remove from list
covertool.cover("bh.py:278")
if prev:
covertool.cover("bh.py:279")
prev.next = x.next
else:
covertool.cover("bh.py:281")
self.head = x.next
covertool.cover("bh.py:282")
kids = BinomialHeap.Node.roots_reverse(x.child)
covertool.cover("bh.py:283")
self.__union(kids)
covertool.cover("bh.py:284")
x.ref.in_tree = False
covertool.cover("bh.py:285")
self.size -= 1
covertool.cover("bh.py:286")
return x.val
def __nonzero__(self):
"""True if the heap is not empty; False otherwise."""
return self.head != None
def __iter__(self):
"""Returns a _destructive_ iterator over the values in the heap.
covertool.cover("bh.py:294")
This violates the iterator protocol slightly, but is very useful.
"""
return self
def __len__(self):
"""Returns the number of items in this heap."""
covertool.cover("bh.py:300")
return self.size
def __setitem__(self, key, value):
"""Insert.
H[key] = value is equivalent to H.insert(key, value)
"""
covertool.cover("bh.py:306")
self.insert(key, value)
def __iadd__(self, other):
"""Merge.
a += b is equivalent to a.union(b).
"""
covertool.cover("bh.py:312")
self.union(other)
covertool.cover("bh.py:313")
return self
def next(self):
"""Returns the value with the minimum key (= highest priority) in the heap
AND removes it from the heap; raises StopIteration if the heap is empty.
"""
covertool.cover("bh.py:319")
if self.head:
covertool.cover("bh.py:320")
return self.extract_min()
else:
covertool.cover("bh.py:322")
raise StopIteration
def __contains__(self, ref):
"""Test whether a given reference 'ref' (of ItemRef) is in this heap.
"""
covertool.cover("bh.py:327")
if type(ref) != ItemRef:
covertool.cover("bh.py:328")
raise TypeError, "Expected an ItemRef"
else:
covertool.cover("bh.py:330")
return ref.in_heap(self)
def __min(self):
covertool.cover("bh.py:333")
if not self.head:
covertool.cover("bh.py:334")
return None
covertool.cover("bh.py:335")
min = self.head
covertool.cover("bh.py:336")
min_prev = None
covertool.cover("bh.py:337")
prev = min
covertool.cover("bh.py:338")
cur = min.next
covertool.cover("bh.py:339")
while cur:
covertool.cover("bh.py:340")
if cur.key < min.key:
covertool.cover("bh.py:341")
min = cur
covertool.cover("bh.py:342")
min_prev = prev
covertool.cover("bh.py:343")
prev = cur
covertool.cover("bh.py:344")
cur = cur.next
covertool.cover("bh.py:345")
return (min, min_prev)
def __union(self, h2):
covertool.cover("bh.py:348")
if not h2:
# nothing to do
covertool.cover("bh.py:350")
return
covertool.cover("bh.py:351")
h1 = self.head
covertool.cover("bh.py:352")
if not h1:
covertool.cover("bh.py:353")
self.head = h2
covertool.cover("bh.py:354")
return
covertool.cover("bh.py:355")
h1 = BinomialHeap.Node.roots_merge(h1, h2)
covertool.cover("bh.py:356")
prev = None
covertool.cover("bh.py:357")
x = h1
covertool.cover("bh.py:358")
next = x.next
covertool.cover("bh.py:359")
while next:
covertool.cover("bh.py:360")
if x.degree != next.degree or \
(next.next and next.next.degree == x.degree):
covertool.cover("bh.py:362")
prev = x
covertool.cover("bh.py:363")
x = next
elif x.key <= next.key:
# x becomes the root of next
covertool.cover("bh.py:366")
x.next = next.next
covertool.cover("bh.py:367")
x.link(next)
else:
# next becomes the root of x
covertool.cover("bh.py:370")
if not prev:
# update the "master" head
covertool.cover("bh.py:372")
h1 = next
else:
# just update previous link
covertool.cover("bh.py:375")
prev.next = next
covertool.cover("bh.py:376")
next.link(x)
# x is not toplevel anymore, update ref by advancing
covertool.cover("bh.py:378")
x = next
covertool.cover("bh.py:379")
next = x.next
covertool.cover("bh.py:380")
self.head = h1
def heap(lst=[]):
"""Create a new heap. lst should be a sequence of (key, value) pairs.
Shortcut for BinomialHeap(lst)
"""
covertool.cover("bh.py:386")
return BinomialHeap(lst)
covertool.cover("bh.py:388")
if __name__ == "__main__":
covertool.cover("bh.py:389")
tokens1 = [(24, 'all'), (16, 'star'), (9, 'true.\nSinging'), (7, 'clear'),
(25, 'praises'), (13, 'to'), (5, 'Heel'),
(6, 'voices\nRinging'), (26, 'thine.'), (21, 'shine\nCarolina'),
(117, 'Rah,'), (102, 'Tar'), (108, 'bred\nAnd'), (125, 'Rah!'),
(107, 'Heel'), (118, 'Rah,'), (111, "die\nI'm"),
(115, 'dead.\nSo'), (120, 'Rah,'), (121, "Car'lina-lina\nRah,"),
(109, 'when'), (105, 'a'), (123, "Car'lina-lina\nRah!"),
(110, 'I'), (114, 'Heel'), (101, 'a'), (106, 'Tar'),
(18, 'all\nClear'), (14, 'the')]
covertool.cover("bh.py:398")
tokens2 = [(113, 'Tar'), (124, 'Rah!'), (112, 'a'), (103, 'Heel'),
(104, "born\nI'm"), (122, 'Rah,'), (119, "Car'lina-lina\nRah,"),
(2, 'sound'), (20, 'radiance'), (12, 'N-C-U.\nHail'),
(10, "Carolina's"), (3, 'of'), (17, 'of'),
(23, 'gem.\nReceive'), (19, 'its'), (0, '\nHark'),
(22, 'priceless'), (4, 'Tar'), (1, 'the'), (8, 'and'),
(15, 'brightest'), (11, 'praises.\nShouting'),
(100, "\nI'm"), (116, "it's")]
covertool.cover("bh.py:406")
h1 = heap(tokens1)
covertool.cover("bh.py:407")
h2 = heap(tokens2)
covertool.cover("bh.py:408")
h3 = heap()
covertool.cover("bh.py:409")
line = "\n==================================="
covertool.cover("bh.py:410")
h3[90] = line
covertool.cover("bh.py:411")
h3[-2] = line
covertool.cover("bh.py:412")
h3[200] = line
covertool.cover("bh.py:413")
h3[201] = '\n\n'
covertool.cover("bh.py:414")
t1ref = h3.insert(1000, "\nUNC Alma Mater:")
covertool.cover("bh.py:415")
t2ref = h3.insert(120, "\nUNC Fight Song:")
covertool.cover("bh.py:416")
bad = [h3.insert(666, "Dook"),
h3.insert(666, "Go Devils!"),
h3.insert(666, "Blue Devils") ]
ref = bad[0]
print "%s: \n\tin h1: %s\n\tin h2: %s\n\tin h3: %s" % \
(str(ref), ref in h1, ref in h2, ref in h3)
print "Merging h3 into h2..."
h2 += h3
print "%s: \n\tin h1: %s\n\tin h2: %s\n\tin h3: %s" % \
(str(ref), ref in h1, ref in h2, ref in h3)
print "Merging h2 into h1..."
h1 += h2
print "%s: \n\tin h1: %s\n\tin h2: %s\n\tin h3: %s" % \
(str(ref), ref in h1, ref in h2, ref in h3)
t1ref.decrease(-1)
t2ref.decrease(99)
for ref in bad:
covertool.cover("bh.py:440")
ref.delete()
covertool.cover("bh.py:441")
for x in h1:
covertool.cover("bh.py:442")
print x,
|
1672625
|
from unittest import TestCase
from app import create_app
class TestWelcome(TestCase):
def setUp(self):
self.app = create_app().test_client()
def test_welcome(self):
"""
Tests the route screen message
"""
rv = self.app.get('/api/')
# If we recalculate the hash on the block we should get the same result as we have stored
self.assertEqual({"message": 'Hello World!'}, rv.get_json())
|
1672676
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from scipy import signal
from scipy import linalg as la
from functools import partial
from model.rnncell import RNNCell
from model.orthogonalcell import OrthogonalLinear
from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer
from model.op import LegSAdaptiveTransitionManual, LegTAdaptiveTransitionManual, LagTAdaptiveTransitionManual, TLagTAdaptiveTransitionManual
forward_aliases = ['euler', 'forward_euler', 'forward', 'forward_diff']
backward_aliases = ['backward', 'backward_diff', 'backward_euler']
bilinear_aliases = ['bilinear', 'tustin', 'trapezoidal', 'trapezoid']
zoh_aliases = ['zoh']
class MemoryCell(RNNCell):
name = None
valid_keys = ['uxh', 'ux', 'uh', 'um', 'hxm', 'hx', 'hm', 'hh', 'bias', ]
def default_initializers(self):
return {
'uxh': 'uniform',
'hxm': 'xavier',
'hx': 'xavier',
'hm': 'xavier',
'um': 'zero',
'hh': 'xavier',
}
def default_architecture(self):
return {
'ux': True,
# 'uh': True,
'um': False,
'hx': True,
'hm': True,
'hh': False,
'bias': True,
}
def __init__(self, input_size, hidden_size, memory_size, memory_order,
memory_activation='id',
gate='G', # 'N' | 'G' | UR'
memory_output=False,
**kwargs
):
self.memory_size = memory_size
self.memory_order = memory_order
self.memory_activation = memory_activation
self.gate = gate
self.memory_output = memory_output
super(MemoryCell, self).__init__(input_size, hidden_size, **kwargs)
self.input_to_hidden_size = self.input_size if self.architecture['hx'] else 0
self.input_to_memory_size = self.input_size if self.architecture['ux'] else 0
# Construct and initialize u
self.W_uxh = nn.Linear(self.input_to_memory_size + self.hidden_size, self.memory_size,
bias=self.architecture['bias'])
# nn.init.zeros_(self.W_uxh.bias)
if 'uxh' in self.initializers:
get_initializer(self.initializers['uxh'], self.memory_activation)(self.W_uxh.weight)
if 'ux' in self.initializers: # Re-init if passed in
get_initializer(self.initializers['ux'], self.memory_activation)(self.W_uxh.weight[:, :self.input_size])
if 'uh' in self.initializers: # Re-init if passed in
get_initializer(self.initializers['uh'], self.memory_activation)(self.W_uxh.weight[:, self.input_size:])
# Construct and initialize h
self.memory_to_hidden_size = self.memory_size * self.memory_order if self.architecture['hm'] else 0
preact_ctor = Linear_
preact_args = [self.input_to_hidden_size + self.memory_to_hidden_size, self.hidden_size,
self.architecture['bias']]
self.W_hxm = preact_ctor(*preact_args)
if self.initializers.get('hxm', None) is not None: # Re-init if passed in
get_initializer(self.initializers['hxm'], self.hidden_activation)(self.W_hxm.weight)
if self.initializers.get('hx', None) is not None: # Re-init if passed in
get_initializer(self.initializers['hx'], self.hidden_activation)(self.W_hxm.weight[:, :self.input_size])
if self.initializers.get('hm', None) is not None: # Re-init if passed in
get_initializer(self.initializers['hm'], self.hidden_activation)(self.W_hxm.weight[:, self.input_size:])
if self.architecture['um']:
# No bias here because the implementation is awkward otherwise, but probably doesn't matter
self.W_um = nn.Parameter(torch.Tensor(self.memory_size, self.memory_order))
get_initializer(self.initializers['um'], self.memory_activation)(self.W_um)
if self.architecture['hh']:
self.reset_hidden_to_hidden()
else:
self.W_hh = None
if self.gate is not None:
if self.architecture['hh']:
print("input to hidden size, memory to hidden size, hidden size:", self.input_to_hidden_size, self.memory_to_hidden_size, self.hidden_size)
preact_ctor = Linear_
preact_args = [self.input_to_hidden_size + self.memory_to_hidden_size + self.hidden_size, self.hidden_size,
self.architecture['bias']]
self.W_gxm = Gate(self.hidden_size, preact_ctor, preact_args, mechanism=self.gate)
def reset_parameters(self):
# super().reset_parameters()
self.hidden_activation_fn = get_activation(self.hidden_activation, self.hidden_size) # TODO figure out how to remove this duplication
self.memory_activation_fn = get_activation(self.memory_activation, self.memory_size)
def forward(self, input, state):
h, m, time_step = state
input_to_hidden = input if self.architecture['hx'] else input.new_empty((0,))
input_to_memory = input if self.architecture['ux'] else input.new_empty((0,))
# Construct the update features
memory_preact = self.W_uxh(torch.cat((input_to_memory, h), dim=-1)) # (batch, memory_size)
if self.architecture['um']:
memory_preact = memory_preact + (m * self.W_um).sum(dim=-1)
u = self.memory_activation_fn(memory_preact) # (batch, memory_size)
# Update the memory
m = self.update_memory(m, u, time_step) # (batch, memory_size, memory_order)
# Update hidden state from memory
if self.architecture['hm']:
memory_to_hidden = m.view(input.shape[0], self.memory_size*self.memory_order)
else:
memory_to_hidden = input.new_empty((0,))
m_inputs = (torch.cat((input_to_hidden, memory_to_hidden), dim=-1),)
hidden_preact = self.W_hxm(*m_inputs)
if self.architecture['hh']:
hidden_preact = hidden_preact + self.W_hh(h)
hidden = self.hidden_activation_fn(hidden_preact)
# Construct gate if necessary
if self.gate is None:
h = hidden
else:
if self.architecture['hh']:
m_inputs = torch.cat((m_inputs[0], h), -1),
g = self.W_gxm(*m_inputs)
h = (1.-g) * h + g * hidden
next_state = (h, m, time_step + 1)
output = self.output(next_state)
return output, next_state
def update_memory(self, m, u, time_step):
"""
m: (B, M, N) [batch size, memory size, memory order]
u: (B, M)
Output: (B, M, N)
"""
raise NotImplementedError
def default_state(self, input, batch_size=None):
batch_size = input.size(0) if batch_size is None else batch_size
return (input.new_zeros(batch_size, self.hidden_size, requires_grad=False),
input.new_zeros(batch_size, self.memory_size, self.memory_order, requires_grad=False),
0)
def output(self, state):
""" Converts a state into a single output (tensor) """
h, m, time_step = state
if self.memory_output:
hm = torch.cat((h, m.view(m.shape[0], self.memory_size*self.memory_order)), dim=-1)
return hm
else:
return h
def state_size(self):
return self.hidden_size + self.memory_size*self.memory_order
def output_size(self):
if self.memory_output:
return self.hidden_size + self.memory_size*self.memory_order
else:
return self.hidden_size
class LTICell(MemoryCell):
""" A cell implementing Linear Time Invariant dynamics: c' = Ac + Bf. """
def __init__(self, input_size, hidden_size, memory_size, memory_order,
A, B,
trainable_scale=0., # how much to scale LR on A and B
dt=0.01,
discretization='zoh',
**kwargs
):
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
C = np.ones((1, memory_order))
D = np.zeros((1,))
dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D), dt=dt, method=discretization)
dA = dA - np.eye(memory_order) # puts into form: x += Ax
self.trainable_scale = np.sqrt(trainable_scale)
if self.trainable_scale <= 0.:
self.register_buffer('A', torch.Tensor(dA))
self.register_buffer('B', torch.Tensor(dB))
else:
self.A = nn.Parameter(torch.Tensor(dA / self.trainable_scale), requires_grad=True)
self.B = nn.Parameter(torch.Tensor(dB / self.trainable_scale), requires_grad=True)
# TODO: proper way to implement LR scale is a preprocess() function that occurs once per unroll
# also very useful for orthogonal params
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
if self.trainable_scale <= 0.:
return m + F.linear(m, self.A) + F.linear(u, self.B)
else:
return m + F.linear(m, self.A * self.trainable_scale) + F.linear(u, self.B * self.trainable_scale)
class LSICell(MemoryCell):
""" A cell implementing Linear 'Scale' Invariant dynamics: c' = 1/t (Ac + Bf). """
def __init__(self, input_size, hidden_size, memory_size, memory_order,
A, B,
init_t = 0, # 0 for special case at t=0 (new code), else old code without special case
max_length=1024,
discretization='bilinear',
**kwargs
):
"""
# TODO: make init_t start at arbitrary time (instead of 0 or 1)
"""
# B should have shape (N, 1)
assert len(B.shape) == 2 and B.shape[1] == 1
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
assert isinstance(init_t, int)
self.init_t = init_t
self.max_length = max_length
A_stacked = np.empty((max_length, memory_order, memory_order), dtype=A.dtype)
B_stacked = np.empty((max_length, memory_order), dtype=B.dtype)
B = B[:,0]
N = memory_order
for t in range(1, max_length + 1):
At = A / t
Bt = B / t
if discretization in forward_aliases:
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
elif discretization in backward_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif discretization in bilinear_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
elif discretization in zoh_aliases:
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
B_stacked = B_stacked[:, :, None]
A_stacked -= np.eye(memory_order) # puts into form: x += Ax
self.register_buffer('A', torch.Tensor(A_stacked))
self.register_buffer('B', torch.Tensor(B_stacked))
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
t = time_step - 1 + self.init_t
if t < 0:
return F.pad(u, (0, self.memory_order - 1))
else:
if t >= self.max_length: t = self.max_length - 1
return m + F.linear(m, self.A[t]) + F.linear(u, self.B[t])
class TimeMemoryCell(MemoryCell):
""" MemoryCell with timestamped data """
def __init__(self, input_size, hidden_size, memory_size, memory_order, **kwargs):
super().__init__(input_size-1, hidden_size, memory_size, memory_order, **kwargs)
def forward(self, input, state):
h, m, time_step = state
timestamp, input = input[:, 0], input[:, 1:]
input_to_hidden = input if self.architecture['hx'] else input.new_empty((0,))
input_to_memory = input if self.architecture['ux'] else input.new_empty((0,))
# Construct the update features
memory_preact = self.W_uxh(torch.cat((input_to_memory, h), dim=-1)) # (batch, memory_size)
if self.architecture['um']:
memory_preact = memory_preact + (m * self.W_um).sum(dim=-1)
u = self.memory_activation_fn(memory_preact) # (batch, memory_size)
# Update the memory
m = self.update_memory(m, u, time_step, timestamp) # (batch, memory_size, memory_order)
# Update hidden state from memory
if self.architecture['hm']:
memory_to_hidden = m.view(input.shape[0], self.memory_size*self.memory_order)
else:
memory_to_hidden = input.new_empty((0,))
m_inputs = (torch.cat((input_to_hidden, memory_to_hidden), dim=-1),)
hidden_preact = self.W_hxm(*m_inputs)
if self.architecture['hh']:
hidden_preact = hidden_preact + self.W_hh(h)
hidden = self.hidden_activation_fn(hidden_preact)
# Construct gate if necessary
if self.gate is None:
h = hidden
else:
if self.architecture['hh']:
m_inputs = torch.cat((m_inputs[0], h), -1),
g = self.W_gxm(*m_inputs)
h = (1.-g) * h + g * hidden
next_state = (h, m, timestamp)
output = self.output(next_state)
return output, next_state
class TimeLSICell(TimeMemoryCell):
""" A cell implementing "Linear Scale Invariant" dynamics: c' = Ac + Bf with timestamped inputs. """
name = 'tlsi'
def __init__(self, input_size, hidden_size, memory_size=1, memory_order=-1,
measure='legs',
measure_args={},
method='manual',
discretization='bilinear',
**kwargs
):
if memory_order < 0:
memory_order = hidden_size
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
assert measure in ['legs', 'lagt', 'tlagt', 'legt']
assert method in ['manual', 'linear', 'toeplitz']
if measure == 'legs':
if method == 'manual':
self.transition = LegSAdaptiveTransitionManual(self.memory_order)
kwargs = {'precompute': False}
if measure == 'legt':
if method == 'manual':
self.transition = LegTAdaptiveTransitionManual(self.memory_order)
kwargs = {'precompute': False}
elif measure == 'lagt':
if method == 'manual':
self.transition = LagTAdaptiveTransitionManual(self.memory_order)
kwargs = {'precompute': False}
elif measure == 'tlagt':
if method == 'manual':
self.transition = TLagTAdaptiveTransitionManual(self.memory_order, **measure_args)
kwargs = {'precompute': False}
if discretization in forward_aliases:
self.transition_fn = partial(self.transition.forward_diff, **kwargs)
elif discretization in backward_aliases:
self.transition_fn = partial(self.transition.backward_diff, **kwargs)
elif discretization in bilinear_aliases:
self.transition_fn = partial(self.transition.bilinear, **kwargs)
else: assert False
def update_memory(self, m, u, t0, t1):
"""
m: (B, M, N) [batch, memory_size, memory_order]
u: (B, M)
t0: (B,) previous time
t1: (B,) current time
"""
if torch.eq(t1, 0.).any():
return F.pad(u.unsqueeze(-1), (0, self.memory_order - 1))
else:
dt = ((t1-t0)/t1).unsqueeze(-1)
m = self.transition_fn(dt, m, u)
return m
class TimeLTICell(TimeLSICell):
""" A cell implementing Linear Time Invariant dynamics: c' = Ac + Bf with timestamped inputs. """
name = 'tlti'
def __init__(self, input_size, hidden_size, memory_size=1, memory_order=-1,
dt=1.0,
**kwargs
):
if memory_order < 0:
memory_order = hidden_size
self.dt = dt
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
def update_memory(self, m, u, t0, t1):
"""
m: (B, M, N) [batch, memory_size, memory_order]
u: (B, M)
t0: (B,) previous time
t1: (B,) current time
"""
dt = self.dt*(t1-t0).unsqueeze(-1)
m = self.transition_fn(dt, m, u)
return m
|
1672692
|
from setuptools import setup
name = 'zipdump'
version = '0.3'
setup(name=name,
version=version,
url='https://github.com/nlitsme/zipdump',
author='<NAME>',
author_email='<EMAIL>',
description='Analyze zipfile, either local, or from url',
classifiers=[
"Programming Language :: Python",
],
py_modules = [ 'urlstream', 'zipdump', 'webdump' ],
zip_safe=False,
entry_points="""
[console_scripts]
zipdump = zipdump:main
webdump = webdump:main
""",
)
|
1672707
|
from enum import Enum
import logging
import binascii
from blatann.nrf.nrf_dll_load import driver
import blatann.nrf.nrf_driver_types as util
from blatann.nrf.nrf_types.enums import *
from blatann.nrf.nrf_types.gap import BLEGapAddr
logger = logging.getLogger(__name__)
class BLEGapSecMode(object):
def __init__(self, sec_mode, level):
self.sm = sec_mode
self.level = level
def to_c(self):
params = driver.ble_gap_conn_sec_mode_t()
params.sm = self.sm
params.lv = self.level
return params
@classmethod
def from_c(cls, params):
return cls(params.sm, params.lv)
class BLEGapSecModeType(object):
NO_ACCESS = BLEGapSecMode(0, 0)
OPEN = BLEGapSecMode(1, 1)
ENCRYPTION = BLEGapSecMode(1, 2)
MITM = BLEGapSecMode(1, 3)
LESC_MITM = BLEGapSecMode(1, 4)
SIGN_OR_ENCRYPT = BLEGapSecMode(2, 1)
SIGN_OR_ENCRYPT_MITM = BLEGapSecMode(2, 2)
class BLEGapSecLevels(object):
def __init__(self, lv1, lv2, lv3, lv4):
self.lv1 = lv1
self.lv2 = lv2
self.lv3 = lv3
self.lv4 = lv4
@classmethod
def from_c(cls, sec_level):
return cls(lv1=sec_level.lv1,
lv2=sec_level.lv2,
lv3=sec_level.lv3,
lv4=sec_level.lv4)
def to_c(self):
sec_level = driver.ble_gap_sec_levels_t()
sec_level.lv1 = self.lv1
sec_level.lv2 = self.lv2
sec_level.lv3 = self.lv3
sec_level.lv4 = self.lv4
return sec_level
def __repr__(self):
return "{}(lv1={!r}, lv2={!r}, lv3={!r}, lv4={!r})".format(self.__class__.__name__,
self.lv1, self.lv2, self.lv3, self.lv4)
class BLEGapSecKeyDist(object):
def __init__(self, enc_key=False, id_key=False, sign_key=False, link_key=False):
self.enc_key = enc_key
self.id_key = id_key
self.sign_key = sign_key
self.link_key = link_key
@classmethod
def from_c(cls, kdist):
return cls(enc_key=kdist.enc,
id_key=kdist.id,
sign_key=kdist.sign,
link_key=kdist.link)
def to_c(self):
kdist = driver.ble_gap_sec_kdist_t()
kdist.enc = self.enc_key
kdist.id = self.id_key
kdist.sign = self.sign_key
kdist.link = self.link_key
return kdist
def __repr__(self):
return "{}(enc_key={!r}, id_key={!r}, sign_key={!r}, link_key={!r})".format(
self.__class__.__name__, self.enc_key, self.id_key, self.sign_key, self.link_key)
class BLEGapSecParams(object):
def __init__(self, bond, mitm, le_sec_pairing, keypress_noti, io_caps, oob, min_key_size, max_key_size, kdist_own,
kdist_peer):
self.bond = bond
self.mitm = mitm
self.le_sec_pairing = le_sec_pairing
self.keypress_noti = keypress_noti
self.io_caps = io_caps
self.oob = oob
self.min_key_size = min_key_size
self.max_key_size = max_key_size
self.kdist_own = kdist_own
self.kdist_peer = kdist_peer
@classmethod
def from_c(cls, sec_params):
return cls(bond=sec_params.bond,
mitm=sec_params.mitm,
le_sec_pairing=sec_params.lesc,
keypress_noti=sec_params.keypress,
io_caps=sec_params.io_caps,
oob=sec_params.oob,
min_key_size=sec_params.min_key_size,
max_key_size=sec_params.max_key_size,
kdist_own=BLEGapSecKeyDist.from_c(sec_params.kdist_own),
kdist_peer=BLEGapSecKeyDist.from_c(sec_params.kdist_peer))
def to_c(self):
sec_params = driver.ble_gap_sec_params_t()
sec_params.bond = self.bond
sec_params.mitm = self.mitm
sec_params.lesc = self.le_sec_pairing
sec_params.keypress = self.keypress_noti
sec_params.io_caps = self.io_caps
sec_params.oob = self.oob
sec_params.min_key_size = self.min_key_size
sec_params.max_key_size = self.max_key_size
sec_params.kdist_own = self.kdist_own.to_c()
sec_params.kdist_peer = self.kdist_peer.to_c()
return sec_params
def __repr__(self):
return "{}(bond={!r}, mitm={!r}, lesc={!r}, " \
"keypress_noti={!r}, io_caps={!r}, oob={!r}, " \
"min_key_size={!r}, max_key_size={!r}, " \
"kdist_own={!r}, kdist_peer={!r})".format(self.__class__.__name__, self.bond, self.mitm,
self.le_sec_pairing, self.keypress_noti, self.io_caps,
self.oob, self.min_key_size, self.max_key_size,
self.kdist_own, self.kdist_peer)
class BLEGapMasterId(object):
RAND_LEN = driver.BLE_GAP_SEC_RAND_LEN
RAND_INVALID = b"\x00" * RAND_LEN
def __init__(self, ediv=0, rand=b""):
self.ediv = ediv
self.rand = rand
def to_c(self):
rand_array = util.list_to_uint8_array(self.rand)
master_id = driver.ble_gap_master_id_t()
master_id.ediv = self.ediv
master_id.rand = rand_array.cast()
return master_id
@property
def is_valid(self) -> bool:
return len(self.rand) == self.RAND_LEN and self.rand != self.RAND_INVALID
@classmethod
def from_c(cls, master_id):
rand = util.uint8_array_to_list(master_id.rand, cls.RAND_LEN)
ediv = master_id.ediv
return cls(ediv, bytearray(rand))
def __eq__(self, other):
if not isinstance(other, BLEGapMasterId):
return False
return self.is_valid and self.ediv == other.ediv and self.rand == other.rand
def __repr__(self):
return "{}(e: {!r}, r: {!r})".format(self.__class__.__name__, self.ediv, binascii.hexlify(self.rand))
class BLEGapEncryptInfo(object):
KEY_LENGTH = driver.BLE_GAP_SEC_KEY_LEN
def __init__(self, ltk=b"", lesc=False, auth=False):
self.ltk = ltk
self.lesc = lesc
self.auth = auth
def to_c(self):
ltk = util.list_to_uint8_array(self.ltk)
info = driver.ble_gap_enc_info_t()
info.ltk = ltk.cast()
info.lesc = self.lesc
info.auth = self.auth
info.ltk_len = len(self.ltk)
return info
@classmethod
def from_c(cls, info):
ltk = bytearray(util.uint8_array_to_list(info.ltk, cls.KEY_LENGTH))
lesc = info.lesc
auth = info.auth
return cls(ltk, lesc, auth)
def __repr__(self):
if not self.ltk:
return ""
return "Encrypt(ltk: {}, lesc: {}, auth: {})".format(binascii.hexlify(self.ltk), self.lesc, self.auth)
class BLEGapEncryptKey(object):
def __init__(self, enc_info=None, master_id=None):
self.enc_info = enc_info or BLEGapEncryptInfo()
self.master_id = master_id or BLEGapMasterId()
def to_c(self):
key = driver.ble_gap_enc_key_t()
key.enc_info = self.enc_info.to_c()
key.master_id = self.master_id.to_c()
return key
@classmethod
def from_c(cls, key):
enc_info = BLEGapEncryptInfo.from_c(key.enc_info)
master_id = BLEGapMasterId.from_c(key.master_id)
return cls(enc_info, master_id)
def __repr__(self):
if not self.enc_info:
return ""
return "key: {}, master_id: {}".format(self.enc_info, self.master_id)
class BLEGapIdKey(object):
KEY_LENGTH = driver.BLE_GAP_SEC_KEY_LEN
def __init__(self, irk=b"", peer_addr=None):
self.irk = irk
self.peer_addr = peer_addr
def to_c(self):
irk_array = util.list_to_uint8_array(self.irk)
irk_key = driver.ble_gap_id_key_t()
irk = driver.ble_gap_irk_t()
irk.irk = irk_array.cast()
irk_key.id_info = irk
if self.peer_addr:
addr = self.peer_addr.to_c()
irk_key.id_addr_info = addr
return irk_key
@classmethod
def from_c(cls, id_key):
irk = bytearray(util.uint8_array_to_list(id_key.id_info.irk, cls.KEY_LENGTH))
addr = BLEGapAddr.from_c(id_key.id_addr_info)
return cls(irk, addr)
def __repr__(self):
if not self.irk:
return ""
return "irk: {}, peer: {}".format(binascii.hexlify(self.irk).decode("ascii"), self.peer_addr)
class BLEGapPublicKey(object):
KEY_LENGTH = driver.BLE_GAP_LESC_P256_PK_LEN
def __init__(self, key=b""):
self.key = key
def to_c(self):
pk = util.list_to_uint8_array(self.key)
key = driver.ble_gap_lesc_p256_pk_t()
key.pk = pk.cast()
return key
@classmethod
def from_c(cls, key):
key_data = bytearray(util.uint8_array_to_list(key.pk, cls.KEY_LENGTH))
return cls(key_data)
def __repr__(self):
if not self.key:
return ""
return binascii.hexlify(self.key).decode("ascii")
class BLEGapDhKey(object):
KEY_LENGTH = driver.BLE_GAP_LESC_DHKEY_LEN
def __init__(self, key=b""):
self.key = key
def to_c(self):
k = util.list_to_uint8_array(self.key)
key = driver.ble_gap_lesc_dhkey_t()
key.key = k.cast()
return key
@classmethod
def from_c(cls, key):
key_data = bytearray(util.uint8_array_to_list(key.key, cls.KEY_LENGTH))
return cls(key_data)
def __repr__(self):
if not self.key:
return ""
return binascii.hexlify(self.key).decode("ascii")
class BLEGapSignKey(object):
KEY_LENGTH = driver.BLE_GAP_SEC_KEY_LEN
def __init__(self, key=b""):
self.key = key
def to_c(self):
csrk = util.list_to_uint8_array(self.key)
key = driver.ble_gap_sign_info_t()
key.csrk = csrk.cast()
return key
@classmethod
def from_c(cls, key):
key_data = bytearray(util.uint8_array_to_list(key.csrk, cls.KEY_LENGTH))
return cls(key_data)
def __repr__(self):
if not self.key:
return ""
return binascii.hexlify(self.key).decode("ascii")
class BLEGapSecKeys(object):
def __init__(self, enc_key=None, id_key=None, sign_key=None, public_key=None):
if not enc_key:
enc_key = BLEGapEncryptKey()
if not id_key:
id_key = BLEGapIdKey()
if not sign_key:
sign_key = BLEGapSignKey()
if not public_key:
public_key = BLEGapPublicKey()
self.enc_key = enc_key # type: BLEGapEncryptKey
self.id_key = id_key # type: BLEGapIdKey
self.sign_key = sign_key # type: BLEGapSignKey
self.public_key = public_key # type: BLEGapPublicKey
def to_c(self):
keys = driver.ble_gap_sec_keys_t()
keys.p_enc_key = self.enc_key.to_c()
keys.p_id_key = self.id_key.to_c()
keys.p_sign_key = self.sign_key.to_c()
keys.p_pk = self.public_key.to_c()
return keys
@classmethod
def from_c(cls, keys):
enc_key = BLEGapEncryptKey.from_c(keys.p_enc_key)
id_key = BLEGapIdKey.from_c(keys.p_id_key)
sign_key = BLEGapSignKey.from_c(keys.p_sign_key)
pk = BLEGapPublicKey.from_c(keys.p_pk)
return cls(enc_key, id_key, sign_key, pk)
def __repr__(self):
return "{}(enc: {}, id: {}, sign: {}, pk: {})".format(self.__class__.__name__, self.enc_key, self.id_key,
self.sign_key, self.public_key)
class BLEGapSecKeyset(object):
def __init__(self, own_keys=None, peer_keys=None):
if not own_keys:
own_keys = BLEGapSecKeys()
if not peer_keys:
peer_keys = BLEGapSecKeys()
self.own_keys = own_keys
self.peer_keys = peer_keys
self.ble_keyset = self.to_c()
def to_c(self):
self.ble_keyset = driver.ble_gap_sec_keyset_t()
self.ble_keyset.keys_own = self.own_keys.to_c()
self.ble_keyset.keys_peer = self.peer_keys.to_c()
return self.ble_keyset
def reload(self):
self.own_keys = BLEGapSecKeys.from_c(self.ble_keyset.keys_own)
self.peer_keys = BLEGapSecKeys.from_c(self.ble_keyset.keys_peer)
@classmethod
def from_c(cls, keyset):
own_keys = BLEGapSecKeys.from_c(keyset.keys_own)
peer_keys = BLEGapSecKeys.from_c(keyset.keys_peer)
return cls(own_keys, peer_keys)
def __repr__(self):
return "{}(own: {!r}, peer: {!r})".format(self.__class__.__name__, self.own_keys, self.peer_keys)
|
1672717
|
def wavelet(Y,dt,pad=0.,dj=0.25,s0=-1,J1=-1,mother="MORLET",param=-1):
"""
This function is the translation of wavelet.m by Torrence and Compo
import wave_bases from wave_bases.py
The following is the original comment in wavelet.m
#WAVELET 1D Wavelet transform with optional singificance testing
%
% [WAVE,PERIOD,SCALE,COI] = wavelet(Y,DT,PAD,DJ,S0,J1,MOTHER,PARAM)
%
% Computes the wavelet transform of the vector Y (length N),
% with sampling rate DT.
%
% By default, the Morlet wavelet (k0=6) is used.
% The wavelet basis is normalized to have total energy=1 at all scales.
%
%
% INPUTS:
%
% Y = the time series of length N.
% DT = amount of time between each Y value, i.e. the sampling time.
%
% OUTPUTS:
%
% WAVE is the WAVELET transform of Y. This is a complex array
% of dimensions (N,J1+1). FLOAT(WAVE) gives the WAVELET amplitude,
% ATAN(IMAGINARY(WAVE),FLOAT(WAVE) gives the WAVELET phase.
% The WAVELET power spectrum is ABS(WAVE)^2.
% Its units are sigma^2 (the time series variance).
%
%
% OPTIONAL INPUTS:
%
% *** Note *** setting any of the following to -1 will cause the default
% value to be used.
%
% PAD = if set to 1 (default is 0), pad time series with enough zeroes to get
% N up to the next higher power of 2. This prevents wraparound
% from the end of the time series to the beginning, and also
% speeds up the FFT's used to do the wavelet transform.
% This will not eliminate all edge effects (see COI below).
%
% DJ = the spacing between discrete scales. Default is 0.25.
% A smaller # will give better scale resolution, but be slower to plot.
%
% S0 = the smallest scale of the wavelet. Default is 2*DT.
%
% J1 = the # of scales minus one. Scales range from S0 up to S0*2^(J1*DJ),
% to give a total of (J1+1) scales. Default is J1 = (LOG2(N DT/S0))/DJ.
%
% MOTHER = the mother wavelet function.
% The choices are 'MORLET', 'PAUL', or 'DOG'
%
% PARAM = the mother wavelet parameter.
% For 'MORLET' this is k0 (wavenumber), default is 6.
% For 'PAUL' this is m (order), default is 4.
% For 'DOG' this is m (m-th derivative), default is 2.
%
%
% OPTIONAL OUTPUTS:
%
% PERIOD = the vector of "Fourier" periods (in time units) that corresponds
% to the SCALEs.
%
% SCALE = the vector of scale indices, given by S0*2^(j*DJ), j=0...J1
% where J1+1 is the total # of scales.
%
% COI = if specified, then return the Cone-of-Influence, which is a vector
% of N points that contains the maximum period of useful information
% at that particular time.
% Periods greater than this are subject to edge effects.
% This can be used to plot COI lines on a contour plot by doing:
%
% contour(time,log(period),log(power))
% plot(time,log(coi),'k')
%
%----------------------------------------------------------------------------
% Copyright (C) 1995-2004, <NAME> and <NAME>
%
% This software may be used, copied, or redistributed as long as it is not
% sold and this copyright notice is reproduced on each copy made. This
% routine is provided as is without any express or implied warranties
% whatsoever.
%
% Notice: Please acknowledge the use of the above software in any publications:
% ``Wavelet software was provided by <NAME> and <NAME>,
% and is available at URL: http://paos.colorado.edu/research/wavelets/''.
%
% Reference: <NAME>. and <NAME>, 1998: A Practical Guide to
% Wavelet Analysis. <I>Bull. Amer. Meteor. Soc.</I>, 79, 61-78.
%
% Please send a copy of such publications to either <NAME> or G. Compo:
% Dr. <NAME> Dr. <NAME>
% Research Systems, Inc. Climate Diagnostics Center
% 4990 Pearl East Circle 325 Broadway R/CDC1
% Boulder, CO 80301, USA Boulder, CO 80305-3328, USA
% E-mail: chris[AT]rsinc[DOT]com E-mail: compo[AT]colorado[DOT]edu
%----------------------------------------------------------------------------"""
#modules
import numpy as np
from wave_bases import wave_bases
#set default
n1 = len(Y)
if (s0 == -1): s0=2.*dt
if (dj == -1): dj = 1./4.
if (J1 == -1): J1=np.fix((np.log(n1*dt/s0)/np.log(2))/dj)
if (mother == -1): mother = 'MORLET'
#print "s0=",s0
#print "J1=",J1
#....construct time series to analyze, pad if necessary
x = Y - np.mean(Y);
if (pad == 1):
base2 = np.fix(np.log(n1)/np.log(2) + 0.4999) # power of 2 nearest to N
temp=np.zeros((2**(int(base2)+1)-n1,))
x=np.concatenate((x,temp))
n = len(x)
#....construct wavenumber array used in transform [Eqn(5)]
k = np.arange(1,np.fix(n/2)+1)
k = k*(2.*np.pi)/(n*dt)
k = np.concatenate((np.zeros((1,)),k, -k[-2::-1]));
#....compute FFT of the (padded) time series
f = np.fft.fft(x) # [Eqn(3)]
#....construct SCALE array & empty PERIOD & WAVE arrays
scale=np.array([s0*2**(i*dj) for i in range(0,int(J1)+1)])
period = scale.copy()
wave = np.zeros((int(J1)+1,n),dtype=np.complex) # define the wavelet array # make it complex
# loop through all scales and compute transform
for a1 in range(0,int(J1)+1):
daughter,fourier_factor,coi,dofmin=wave_bases(mother,k,scale[a1],param)
wave[a1,:] = np.fft.ifft(f*daughter) # wavelet transform[Eqn(4)]
period = fourier_factor*scale
coi=coi*dt*np.concatenate(([1.E-5],np.arange(1.,(n1+1.)/2.-1),np.flipud(np.arange(1,n1/2.)),[1.E-5])) # COI [Sec.3g]
wave = wave[:,:n1] # get rid of padding before returning
return wave,period,scale,coi
# end of code
|
1672784
|
import torch
import os
def train_simple_conv(model, cfg, train_loader, optimizer, epoch):
model.train()
total_loss = 0
print (len(optimizer.param_groups))
for batch_idx, sampled_batch in enumerate(train_loader):
optimizer.zero_grad()
nn_outputs = model(sampled_batch['data'].to(cfg.device))
loss = model.compute_loss(nn_outputs,sampled_batch, cfg)
loss.backward()
optimizer.step()
total_loss += loss
if batch_idx % cfg.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * cfg.batch_size, len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
torch.save(model.state_dict(), os.path.join(cfg.checkpoints_path,"ChauffeurNet_{}_{}.pt".format(epoch,batch_idx)))
total_loss /= len(train_loader)
cfg.scheduler.step(total_loss)
for param_group in optimizer.param_groups:
print(param_group['lr'])
del total_loss
|
1672828
|
import numpy as np
from copy import deepcopy
from rlcard.games.mahjong import Dealer
from rlcard.games.mahjong import Player
from rlcard.games.mahjong import Round
from rlcard.games.mahjong import Judger
class MahjongGame:
def __init__(self, allow_step_back=False):
'''Initialize the class MajongGame
'''
self.allow_step_back = allow_step_back
self.np_random = np.random.RandomState()
self.num_players = 4
def init_game(self):
''' Initialilze the game of Mahjong
This version supports two-player Mahjong
Returns:
(tuple): Tuple containing:
(dict): The first state of the game
(int): Current player's id
'''
# Initialize a dealer that can deal cards
self.dealer = Dealer(self.np_random)
# Initialize four players to play the game
self.players = [Player(i, self.np_random) for i in range(self.num_players)]
self.judger = Judger(self.np_random)
self.round = Round(self.judger, self.dealer, self.num_players, self.np_random)
# Deal 13 cards to each player to prepare for the game
for player in self.players:
self.dealer.deal_cards(player, 13)
# Save the hisory for stepping back to the last state.
self.history = []
self.dealer.deal_cards(self.players[self.round.current_player], 1)
state = self.get_state(self.round.current_player)
self.cur_state = state
return state, self.round.current_player
def step(self, action):
''' Get the next state
Args:
action (str): a specific action. (call, raise, fold, or check)
Returns:
(tuple): Tuple containing:
(dict): next player's state
(int): next plater's id
'''
# First snapshot the current state
if self.allow_step_back:
hist_dealer = deepcopy(self.dealer)
hist_round = deepcopy(self.round)
hist_players = deepcopy(self.players)
self.history.append((hist_dealer, hist_players, hist_round))
self.round.proceed_round(self.players, action)
state = self.get_state(self.round.current_player)
self.cur_state = state
return state, self.round.current_player
def step_back(self):
''' Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
'''
if not self.history:
return False
self.dealer, self.players, self.round = self.history.pop()
return True
def get_state(self, player_id):
''' Return player's state
Args:
player_id (int): player id
Returns:
(dict): The state of the player
'''
state = self.round.get_state(self.players, player_id)
return state
@staticmethod
def get_legal_actions(state):
''' Return the legal actions for current player
Returns:
(list): A list of legal actions
'''
if state['valid_act'] == ['play']:
state['valid_act'] = state['action_cards']
return state['action_cards']
else:
return state['valid_act']
@staticmethod
def get_num_actions():
''' Return the number of applicable actions
Returns:
(int): The number of actions. There are 4 actions (call, raise, check and fold)
'''
return 38
def get_num_players(self):
''' return the number of players in Mahjong
returns:
(int): the number of players in the game
'''
return self.num_players
def get_player_id(self):
''' return the id of current player in Mahjong
returns:
(int): the number of players in the game
'''
return self.round.current_player
def is_over(self):
''' Check if the game is over
Returns:
(boolean): True if the game is over
'''
win, player, _ = self.judger.judge_game(self)
#pile =[sorted([c.get_str() for c in s ]) for s in self.players[player].pile if self.players[player].pile != None]
#cards = sorted([c.get_str() for c in self.players[player].hand])
#count = len(cards) + sum([len(p) for p in pile])
self.winner = player
#print(win, player, players_val)
#print(win, self.round.current_player, player, cards, pile, count)
return win
|
1672848
|
import unittest
import sys
import re
import os
from androguard.misc import AnalyzeAPK
from androguard.decompiler.decompiler import DecompilerJADX
def which(program):
"""
Thankfully copied from https://stackoverflow.com/a/377028/446140
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class DecompilerTest(unittest.TestCase):
@unittest.skipIf(which("jadx") is None, "Skipping JADX test as jadx "
"executable is not in path")
def testJadx(self):
a, d, dx = AnalyzeAPK("examples/tests/hello-world.apk")
decomp = DecompilerJADX(d[0], dx)
self.assertIsNotNone(decomp)
d[0].set_decompiler(decomp)
for c in d[0].get_classes():
self.assertIsNotNone(c.get_source())
if __name__ == '__main__':
unittest.main()
|
1672861
|
from __future__ import print_function
import argparse
import os
import random
import chainer
import numpy as np
from chainer import training, Variable
from chainer.training import extensions
from dataset import H5pyDataset
from model import DCGAN_G, DCGAN_D, init_bn, init_conv
from sampler import sampler
from updater import WassersteinUpdater
def make_optimizer(model, lr):
optimizer = chainer.optimizers.RMSprop(lr=lr)
optimizer.setup(model)
return optimizer
def main():
parser = argparse.ArgumentParser(description='Train Unsupervised Blending GAN')
parser.add_argument('--nz', type=int, default=100, help='Size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64, help='# of base filters in G')
parser.add_argument('--ndf', type=int, default=64, help='# of base filters in D')
parser.add_argument('--nc', type=int, default=3, help='# of output channels in G')
parser.add_argument('--load_size', type=int, default=64, help='Scale image to load_size')
parser.add_argument('--image_size', type=int, default=64, help='The height / width of the input image to network')
parser.add_argument('--gpu', type=int, default=0, help='GPU ID (negative value indicates CPU)')
parser.add_argument('--lr_d', type=float, default=0.00005, help='Learning rate for Critic, default=0.00005')
parser.add_argument('--lr_g', type=float, default=0.00005, help='Learning rate for Generator, default=0.00005')
parser.add_argument('--d_iters', type=int, default=5, help='# of D iters per each G iter')
parser.add_argument('--n_epoch', type=int, default=25, help='# of epochs to train for')
parser.add_argument('--clamp_lower', type=float, default=-0.01, help='Lower bound for clipping')
parser.add_argument('--clamp_upper', type=float, default=0.01, help='Upper bound for clipping')
parser.add_argument('--data_root', help='Path to dataset')
parser.add_argument('--experiment', default='Wasserstein_GAN_result', help='Where to store samples and models')
parser.add_argument('--workers', type=int, default=10, help='# of data loading workers')
parser.add_argument('--batch_size', type=int, default=128, help='input batch size')
parser.add_argument('--test_size', type=int, default=64, help='Batch size for testing')
parser.add_argument('--manual_seed', type=int, default=5, help='Manul seed')
parser.add_argument('--resume', default='', help='Resume the training from snapshot')
parser.add_argument('--snapshot_interval', type=int, default=1, help='Interval of snapshot (epoch)')
parser.add_argument('--print_interval', type=int, default=1, help='Interval of printing log to console (iteration)')
parser.add_argument('--plot_interval', type=int, default=10, help='Interval of plot (iteration)')
args = parser.parse_args()
random.seed(args.manual_seed)
print('Input arguments:')
for key, value in vars(args).items():
print('\t{}: {}'.format(key, value))
print('')
# Set up G & D
print('Create & Init models ...')
print('\tInit G network ...')
G = DCGAN_G(args.image_size, args.nc, args.ngf, init_conv, init_bn)
print('\tInit D network ...')
D = DCGAN_D(args.image_size, args.ndf, 1, init_conv, init_bn)
if args.gpu >= 0:
print('\tCopy models to gpu {} ...'.format(args.gpu))
chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current
G.to_gpu() # Copy the model to the GPU
D.to_gpu()
print('Init models done ...\n')
# Setup an optimizer
optimizer_d = make_optimizer(D, args.lr_d)
optimizer_g = make_optimizer(G, args.lr_g)
########################################################################################################################
# Setup dataset & iterator
print('Load images from {} ...'.format(args.data_root))
trainset = H5pyDataset(args.data_root, load_size=args.load_size, crop_size=args.image_size)
print('\tTrainset contains {} image files'.format(len(trainset)))
print('')
train_iter = chainer.iterators.MultiprocessIterator(trainset, args.batch_size, n_processes=args.workers,
n_prefetch=args.workers)
########################################################################################################################
# Set up a trainer
updater = WassersteinUpdater(
models=(G, D),
args=args,
iterator=train_iter,
optimizer={'main': optimizer_g, 'D': optimizer_d},
device=args.gpu
)
trainer = training.Trainer(updater, (args.n_epoch, 'epoch'), out=args.experiment)
# Snapshot
snapshot_interval = (args.snapshot_interval, 'epoch')
trainer.extend(
extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'),
trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
G, 'g_epoch_{.updater.epoch}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
D, 'd_epoch_{.updater.epoch}.npz'), trigger=snapshot_interval)
# Display
print_interval = (args.print_interval, 'iteration')
trainer.extend(extensions.LogReport(trigger=print_interval))
trainer.extend(extensions.PrintReport([
'iteration', 'main/loss', 'D/loss', 'D/loss_real', 'D/loss_fake'
]), trigger=print_interval)
trainer.extend(extensions.ProgressBar(update_interval=args.print_interval))
trainer.extend(extensions.dump_graph('D/loss', out_name='TrainGraph.dot'))
# Plot
plot_interval = (args.plot_interval, 'iteration')
trainer.extend(
extensions.PlotReport(['main/loss'], 'iteration', file_name='loss.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(
extensions.PlotReport(['D/loss'], 'iteration', file_name='d_loss.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(
extensions.PlotReport(['D/loss_real'], 'iteration', file_name='loss_real.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(
extensions.PlotReport(['D/loss_fake'], 'iteration', file_name='loss_fake.png', trigger=plot_interval),
trigger=plot_interval)
# Eval
path = os.path.join(args.experiment, 'samples')
if not os.path.isdir(path):
os.makedirs(path)
print('Saving samples to {} ...\n'.format(path))
noisev = Variable(np.asarray(np.random.normal(size=(args.test_size, args.nz, 1, 1)), dtype=np.float32))
noisev.to_gpu(args.gpu)
trainer.extend(sampler(G, path, noisev, 'fake_samples_{}.png'), trigger=plot_interval)
if args.resume:
# Resume from a snapshot
print('Resume from {} ... \n'.format(args.resume))
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
print('Training start ...\n')
trainer.run()
if __name__ == '__main__':
main()
|
1672915
|
from glob import glob
def get_activations(model, model_inputs, print_shape_only=False, layer_name=None):
import keras.backend as K
print('----- activations -----')
activations = []
inp = model.input
model_multi_inputs_cond = True
if not isinstance(inp, list):
# only one input! let's wrap it in a list.
inp = [inp]
model_multi_inputs_cond = False
outputs = [layer.output for layer in model.layers if
layer.name == layer_name or layer_name is None] # all layer outputs
funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
if model_multi_inputs_cond:
list_inputs = []
list_inputs.extend(model_inputs)
list_inputs.append(1.)
else:
list_inputs = [model_inputs, 1.]
# Learning phase. 1 = Test mode (no dropout or batch normalization)
# layer_outputs = [func([model_inputs, 1.])[0] for func in funcs]
layer_outputs = [func(list_inputs)[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
if __name__ == '__main__':
checkpoints = glob('checkpoints/*.h5')
# pip3 install natsort
from natsort import natsorted
from keras.models import load_model
if len(checkpoints) > 0:
checkpoints = natsorted(checkpoints)
assert len(checkpoints) != 0, 'No checkpoints found.'
checkpoint_file = checkpoints[-1]
print('Loading [{}]'.format(checkpoint_file))
model = load_model(checkpoint_file)
model.compile(optimizer='adam',
loss='mse ',
metrics=['accuracy'])
print(model.summary())
get_activations(model, x_test[0:1], print_shape_only=True) # with just one sample.
get_activations(model, x_test[0:200], print_shape_only=True) # with 200 samples.
|
1672958
|
from __future__ import absolute_import, division
import textwrap
from pprint import PrettyPrinter
from _plotly_utils.utils import *
# Pretty printing
def _list_repr_elided(v, threshold=200, edgeitems=3, indent=0, width=80):
"""
Return a string representation for of a list where list is elided if
it has more than n elements
Parameters
----------
v : list
Input list
threshold :
Maximum number of elements to display
Returns
-------
str
"""
if isinstance(v, list):
open_char, close_char = "[", "]"
elif isinstance(v, tuple):
open_char, close_char = "(", ")"
else:
raise ValueError("Invalid value of type: %s" % type(v))
if len(v) <= threshold:
disp_v = v
else:
disp_v = list(v[:edgeitems]) + ["..."] + list(v[-edgeitems:])
v_str = open_char + ", ".join([str(e) for e in disp_v]) + close_char
v_wrapped = "\n".join(
textwrap.wrap(
v_str,
width=width,
initial_indent=" " * (indent + 1),
subsequent_indent=" " * (indent + 1),
)
).strip()
return v_wrapped
class ElidedWrapper(object):
"""
Helper class that wraps values of certain types and produces a custom
__repr__() that may be elided and is suitable for use during pretty
printing
"""
def __init__(self, v, threshold, indent):
self.v = v
self.indent = indent
self.threshold = threshold
@staticmethod
def is_wrappable(v):
numpy = get_module("numpy")
if isinstance(v, (list, tuple)) and len(v) > 0 and not isinstance(v[0], dict):
return True
elif numpy and isinstance(v, numpy.ndarray):
return True
elif isinstance(v, str):
return True
else:
return False
def __repr__(self):
numpy = get_module("numpy")
if isinstance(self.v, (list, tuple)):
# Handle lists/tuples
res = _list_repr_elided(
self.v, threshold=self.threshold, indent=self.indent
)
return res
elif numpy and isinstance(self.v, numpy.ndarray):
# Handle numpy arrays
# Get original print opts
orig_opts = numpy.get_printoptions()
# Set threshold to self.max_list_elements
numpy.set_printoptions(
**dict(orig_opts, threshold=self.threshold, edgeitems=3, linewidth=80)
)
res = self.v.__repr__()
# Add indent to all but the first line
res_lines = res.split("\n")
res = ("\n" + " " * self.indent).join(res_lines)
# Restore print opts
numpy.set_printoptions(**orig_opts)
return res
elif isinstance(self.v, str):
# Handle strings
if len(self.v) > 80:
return "(" + repr(self.v[:30]) + " ... " + repr(self.v[-30:]) + ")"
else:
return self.v.__repr__()
else:
return self.v.__repr__()
class ElidedPrettyPrinter(PrettyPrinter):
"""
PrettyPrinter subclass that elides long lists/arrays/strings
"""
def __init__(self, *args, **kwargs):
self.threshold = kwargs.pop("threshold", 200)
PrettyPrinter.__init__(self, *args, **kwargs)
def _format(self, val, stream, indent, allowance, context, level):
if ElidedWrapper.is_wrappable(val):
elided_val = ElidedWrapper(val, self.threshold, indent)
return self._format(elided_val, stream, indent, allowance, context, level)
else:
return PrettyPrinter._format(
self, val, stream, indent, allowance, context, level
)
def node_generator(node, path=()):
"""
General, node-yielding generator.
Yields (node, path) tuples when it finds values that are dict
instances.
A path is a sequence of hashable values that can be used as either keys to
a mapping (dict) or indices to a sequence (list). A path is always wrt to
some object. Given an object, a path explains how to get from the top level
of that object to a nested value in the object.
:param (dict) node: Part of a dict to be traversed.
:param (tuple[str]) path: Defines the path of the current node.
:return: (Generator)
Example:
>>> for node, path in node_generator({'a': {'b': 5}}):
... print(node, path)
{'a': {'b': 5}} ()
{'b': 5} ('a',)
"""
if not isinstance(node, dict):
return # in case it's called with a non-dict node at top level
yield node, path
for key, val in node.items():
if isinstance(val, dict):
for item in node_generator(val, path + (key,)):
yield item
def get_by_path(obj, path):
"""
Iteratively get on obj for each key in path.
:param (list|dict) obj: The top-level object.
:param (tuple[str]|tuple[int]) path: Keys to access parts of obj.
:return: (*)
Example:
>>> figure = {'data': [{'x': [5]}]}
>>> path = ('data', 0, 'x')
>>> get_by_path(figure, path)
[5]
"""
for key in path:
obj = obj[key]
return obj
def decode_unicode(coll):
if isinstance(coll, list):
for no, entry in enumerate(coll):
if isinstance(entry, (dict, list)):
coll[no] = decode_unicode(entry)
else:
if isinstance(entry, str):
try:
coll[no] = str(entry)
except UnicodeEncodeError:
pass
elif isinstance(coll, dict):
keys, vals = list(coll.keys()), list(coll.values())
for key, val in zip(keys, vals):
if isinstance(val, (dict, list)):
coll[key] = decode_unicode(val)
elif isinstance(val, str):
try:
coll[key] = str(val)
except UnicodeEncodeError:
pass
coll[str(key)] = coll.pop(key)
return coll
|
1672978
|
import logging
from abc import abstractmethod, ABC
class PostProcess(ABC):
def __init__(self):
self.logger = logging.getLogger("barry")
@abstractmethod
def __call__(self, **inputs):
pass
class PkPostProcess(PostProcess):
""" An abstract implementation of PostProcess for power spectrum models.
Requires that implementations pass in k values, p(k) values and a boolean mask.
"""
def __call__(self, **inputs):
return self.postprocess(inputs["ks"], inputs["pk"], inputs["mask"])
@abstractmethod
def postprocess(self, ks, pk, mask):
pass
class XiPostProcess(PostProcess):
""" An abstract implementation of PostProcess for correlation function models.
Requires that implementations pass in dist values, xi(s) values and a boolean mask.
Note that xi(s) is assumed to be the xi_0 aka the monopole.
As the BAO extractor is a Pk model only this class is not used... but it could be.
"""
def __call__(self, **inputs):
return self.postprocess(inputs["dist"], inputs["xi"], inputs["mask"])
@abstractmethod
def postprocess(self, dist, xi, mask):
pass
|
1673014
|
from vibora import Vibora, Request
from vibora.tests import TestSuite
from vibora.responses import JsonResponse
from vibora.multipart import FileUpload
class FormsTestCase(TestSuite):
async def test_simple_post__expects_correctly_interpreted(self):
app = Vibora()
@app.route('/', methods=['POST'])
async def home(request: Request):
return JsonResponse((await request.form()))
async with app.test_client() as client:
response = await client.post('/', form={'a': 1, 'b': 2})
self.assertDictEqual(response.json(), {'a': '1', 'b': '2'})
async def test_file_upload_with_another_values(self):
app = Vibora()
@app.route('/', methods=['POST'])
async def home(request: Request):
form = await request.form()
return JsonResponse({'a': form['a'], 'b': (await form['b'].read()).decode()})
async with app.test_client() as client:
response = await client.post('/', form={'a': 1, 'b': FileUpload(content=b'uploaded_file')})
self.assertDictEqual(response.json(), {'a': '1', 'b': 'uploaded_file'})
|
1673025
|
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
plt.rc('font', family='serif')
plt.rc('font', serif='Times New Roman')
plt.rcParams["mathtext.fontset"] = "stix"
def smooth(y):
return gaussian_filter1d(y, sigma=0.6)
base_path = os.path.dirname(".")
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
COLORS = {"mnist_balanced": colors[0],
"mnist_unbalanced": colors[1],
"synthetic_a0b0": colors[2],
"synthetic_a1b1": colors[3]}
LABELS = {'mnist_balanced': 'mnist balanced',
'mnist_unbalanced': 'mnist unbalanced',
'synthetic_a0b0': r'synthetic$(0,0)$',
"synthetic_a1b1": r'synthetic$(1,1)$'}
synthetic_a0b0_X = [5, 10, 20, 30, 50, 60, 80, 100, 125, 200]
synthetic_a0b0 = smooth([160, 101, 88, 87, 90, 92, 96, 106, 114, 139])
synthetic_a1b1_X = [5, 10, 20, 30, 50]
synthetic_a1b1 = smooth([189, 140, 143, 150, 194])
mnist_balanced_X = [10, 20, 30, 50, 60, 80, 100, 125, 150]
mnist_balanced = smooth([120, 50, 39, 28, 27, 22, 21, 20, 20])
mnist_unbalanced_X = [10, 20, 30, 50, 100, 200, 400]
mnist_unbalanced = smooth([400, 145, 114, 104, 119, 137, 205])
matplotlib.rcParams['font.family'] = 'Times New Roman'
stats_dict = {'mnist_unbalanced': (mnist_unbalanced_X, mnist_unbalanced),
'mnist_balanced': (mnist_balanced_X, mnist_balanced),
'synthetic_a0b0': (synthetic_a0b0_X, synthetic_a0b0),
'synthetic_a1b1': (synthetic_a1b1_X, synthetic_a1b1)}
plt.figure(figsize=(4, 3))
for data, stat in stats_dict.items():
plt.plot(np.array(stat[0])*10, np.array(stat[1]), linewidth=1.0, color=COLORS[data], label=LABELS[data])
plt.grid(True)
plt.legend(loc=0, borderaxespad=0., prop={'size': 10})
plt.ylabel(r'Required rounds ($T_{\epsilon}/E$)', fontdict={'size': 10})
plt.xlabel('Local steps ($E$)', fontdict={'size': 10})
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.xscale('log')
plt.tight_layout()
fig = plt.gcf()
fig.savefig('E.pdf')
|
1673029
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from core.models import BaseAbstractModel
from core.utils import iban_validator, phonenumber_validator
from orders import enums
from products.models import Product
class BillingAddress(BaseAbstractModel):
"""
Billing Address Model
"""
full_name = models.CharField(max_length=255, verbose_name=_("Full Name"))
line_1 = models.CharField(max_length=255, verbose_name=_("Address Line 1"))
line_2 = models.CharField(
max_length=255, verbose_name=_("Address Line 2"), blank=True, null=True
)
phone = models.CharField(
max_length=20,
verbose_name=_("Phone Number"),
validators=[phonenumber_validator],
)
district = models.CharField(max_length=255, verbose_name=_("District"))
zipcode = models.CharField(max_length=20, verbose_name=_("Zip Code"))
city = models.ForeignKey(
"customers.City", verbose_name=_("City"), on_delete=models.PROTECT
)
class Meta:
verbose_name = _("Billing Address")
verbose_name_plural = _("Billing Addresses")
def __str__(self):
return f"{self.full_name} - {self.line_1} - {self.line_2} - {self.district} - {self.city}"
class ShippingAddress(BaseAbstractModel):
"""
Shipping Address Model
"""
full_name = models.CharField(max_length=255, verbose_name=_("Full Name"))
line_1 = models.CharField(max_length=255, verbose_name=_("Address Line 1"))
line_2 = models.CharField(
max_length=255, verbose_name=_("Address Line 2"), blank=True, null=True
)
phone = models.CharField(
max_length=20,
verbose_name=_("Phone Number"),
validators=[phonenumber_validator],
)
district = models.CharField(max_length=255, verbose_name=_("District"))
zipcode = models.CharField(max_length=20, verbose_name=_("Zip Code"), blank=True)
city = models.ForeignKey(
"customers.City", verbose_name=_("City"), on_delete=models.PROTECT
)
class Meta:
verbose_name = _("Shipping Address")
verbose_name_plural = _("Shipping Addresses")
def __str__(self):
return f"{self.full_name} - {self.line_1} - {self.line_2} - {self.district} - {self.city}"
class OrderBankAccount(BaseAbstractModel):
"""
Order Bank Account Model
"""
name = models.CharField(max_length=255, verbose_name=_("Name"))
iban = models.CharField(
max_length=100, verbose_name=_("IBAN"), validators=[iban_validator]
)
bank_name = models.CharField(max_length=100, verbose_name=_("Bank Name"))
order = models.ForeignKey(
"orders.Order", verbose_name=_("Order"), on_delete=models.PROTECT
)
class Meta:
verbose_name = _("Order Bank Account")
verbose_name_plural = _("Order Bank Accounts")
def __str__(self):
return f"{self.name} - {self.order}"
class Order(BaseAbstractModel):
"""
Order Model
"""
customer = models.ForeignKey(
"customers.Customer", verbose_name=_("Customer"), on_delete=models.PROTECT
)
basket = models.ForeignKey(
"baskets.Basket", verbose_name=_("Basket"), on_delete=models.PROTECT
)
status = models.CharField(
choices=enums.OrderStatus.choices,
default=enums.OrderStatus.PENDING,
max_length=20,
verbose_name=_("Status"),
)
billing_address = models.ForeignKey(
BillingAddress, verbose_name=_("Billing Address"), on_delete=models.PROTECT
)
shipping_address = models.ForeignKey(
ShippingAddress, verbose_name=_("Shipping Address"), on_delete=models.PROTECT
)
total_price = models.DecimalField(
verbose_name=_("Total Price"), max_digits=10, decimal_places=2
)
class Meta:
verbose_name = _("Order")
verbose_name_plural = _("Orders")
def __str__(self):
return f"{self.customer} - {self.basket}"
class OrderItem(BaseAbstractModel):
"""
Order Item Model
"""
order = models.ForeignKey(
"Order", verbose_name=_("Order"), on_delete=models.PROTECT
)
product = models.ForeignKey(
Product, verbose_name=_("Product"), on_delete=models.PROTECT
)
price = models.DecimalField(
verbose_name=_("Price"), max_digits=10, decimal_places=2
)
class Meta:
verbose_name = _("Order Item")
verbose_name_plural = _("Order Items")
def __str__(self):
return f"{self.order} - {self.product} - {self.price}"
|
1673037
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
#determine unconditional mean, sum R in each bin. But then devide by master counts
def boxbin(x,y,xedge,yedge,c=None,figsize=(5,5),cmap='viridis',mincnt=10,vmin=None,vmax=None,edgecolor=None,powernorm=False,
ax=None,normed=False,method='mean',quantile=None,alpha=1.0,cbar=True,unconditional=False,master_count=np.array([])):
""" This function will grid data for you and provide the counts if no variable c is given, or the median if
a variable c is given. In the future I will add functionallity to do the median, and possibly quantiles.
x: 1-D array
y: 1-D array
xedge: 1-D array for xbins
yedge: 1-D array for ybins
c: 1-D array, same len as x and y
returns
axis handle
cbar handle
C matrix (counts or median values in bin)
"""
midpoints = np.empty(xedge.shape[0]-1)
for i in np.arange(1,xedge.shape[0]):
midpoints[i-1] = xedge[i-1] + (np.abs(xedge[i] - xedge[i-1]))/2.
#note on digitize. bin 0 is outside to the left of the bins, bin -1 is outside to the right
ind1 = np.digitize(x,bins = xedge) #inds of x in each bin
ind2 = np.digitize(y,bins = yedge) #inds of y in each bin
#drop points outside range
outsideleft = np.where(ind1 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind1 != len(xedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
outsideleft = np.where(ind2 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind2 != len(yedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
if c is None:
c = np.zeros(len(ind1))
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
df2 = df.groupby(["x","y"]).count()
df = df2.where(df2.values >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = np.ma.masked_where(C == -9999,C)
if normed:
n_samples = np.ma.sum(C)
C = C/n_samples
C = C*100
print('n_samples= {}'.format(n_samples))
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,edgecolor=edgecolor,norm=colors.PowerNorm(gamma=0.5),vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
else:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,edgecolor=edgecolor,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
return ax,cbar,C
elif unconditional:
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
if method=='mean':
df2 = df.groupby(["x","y"])['c'].sum()
df3 = df.groupby(["x","y"]).count()
df2 = df2.to_frame()
df2.insert(1,'Count',df3.values)
df = df2.where(df2.Count >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = C/master_count.values
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
if method=='mean':
df2 = df.groupby(["x","y"])['c'].mean()
elif method=='std':
df2 = df.groupby(["x","y"])['c'].std()
elif method=='median':
df2 = df.groupby(["x","y"])['c'].median()
elif method=='qunatile':
if quantile is None:
print('No quantile given, defaulting to median')
quantile = 0.5
else:
pass
df2 = df.groupby(["x","y"])['c'].apply(percentile(quantile*100))
df3 = df.groupby(["x","y"]).count()
df2 = df2.to_frame()
df2.insert(1,'Count',df3.values)
df = df2.where(df2.Count >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = np.ma.masked_where(C == -9999,C)
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
else:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
return ax,cbar,C
|
1673043
|
from idm.objects import dp, MySignalEvent
from idm.utils import find_mention_by_event
from microvk import VkApiResponseException
@dp.longpoll_event_register('+др', '+друг', '-др', '-друг')
@dp.my_signal_event_register('+др', '+друг', '-др', '-друг')
def change_friend_status(event: MySignalEvent) -> str:
user_id = find_mention_by_event(event)
if user_id:
if event.command.startswith('-др'):
try:
status = event.api('friends.delete', user_id=user_id)
if status.get('friend_deleted'): msg = "💔 Пользователь удален из друзей"
elif status.get('out_request_deleted'): msg = "✅ Отменена исходящая заявка"
elif status.get('in_request_deleted'): msg = "✅ Отклонена входящая заявка"
elif status.get('suggestion_deleted'): msg = "✅ Отклонена рекомендация друга"
else: msg = "❗ Произошла ошибка"
except VkApiResponseException as e:
msg = f"❗ Произошла ошибка VK №{e.error_code} {e.error_msg}"
else:
try:
status = event.api('friends.add', user_id = user_id)
if status == 1: msg = "✅ Заявка отправлена"
elif status == 2: msg = "✅ Пользователь добавлен"
else: msg = "✅ Заявка отправлена повторно"
except VkApiResponseException as e:
if e.error_code == 174:
msg = "🤔 Ты себя добавить хочешь?"
elif e.error_code == 175:
msg = "❗ Ты в ЧС данного пользователя"
elif e.error_code == 176:
msg = "❗ Пользователь в ЧС"
else:
msg = f"❗ Ошибка: {e.error_msg}"
else:
msg = "❗ Необходимо пересланное сообщение или упоминание"
event.msg_op(2, msg)
return "ok"
@dp.longpoll_event_register('+чс', '-чс')
@dp.my_signal_event_register('+чс', '-чс')
def ban_user(event: MySignalEvent) -> str:
user_id = find_mention_by_event(event)
if user_id:
if event.command == '+чс':
try:
if event.api('account.ban', owner_id=user_id) == 1:
msg = '😡 Забанено'
except VkApiResponseException as e:
if e.error_msg.endswith('already blacklisted'):
msg = '❗ Пользователь уже забанен'
else:
msg = f'❗ Ошиб_очка: {e.error_msg}'
else:
try:
if event.api('account.unban', owner_id = user_id) == 1:
msg = '💚 Разбанено'
except VkApiResponseException as e:
if e.error_msg.endswith('not blacklisted'):
msg = '👌🏻 Пользователь не забанен'
else:
msg = f'❗ Ошиб_очка: {e.error_msg}'
else:
msg = "❗ Необходимо пересланное сообщение или упоминание"
event.msg_op(2, msg)
return "ok"
|
1673049
|
import pytest
from pycec.network import PhysicalAddress
def test_creation():
pa = PhysicalAddress("8F:65")
assert 0x8F65 == pa.asint
pa = PhysicalAddress("0F:60")
assert 0x0F60 == pa.asint
pa = PhysicalAddress("2.F.6.5")
assert 0x2F65 == pa.asint
assert "2.f.6.5" == pa.asstr
pa = PhysicalAddress("0.F.6.0")
assert 0x0F60 == pa.asint
pa = PhysicalAddress([2, 15, 6, 4])
assert 0x2F64 == pa.asint
pa = PhysicalAddress([0, 15, 6, 0])
assert 0x0F60 == pa.asint
pa = PhysicalAddress(0x0F60)
assert 0x0F60 == pa.asint
def test_aslist():
pa = PhysicalAddress("8f:ab")
assert pa.asattr == [0x8F, 0xAB]
pa = PhysicalAddress("00:00")
assert pa.asattr == [0x0, 0x0]
pa = PhysicalAddress("00:10")
assert pa.asattr == [0x0, 0x10]
def test_asint():
pa = PhysicalAddress("8f:ab")
assert pa.asint == 0x8FAB
pa = PhysicalAddress("00:00")
assert pa.asint == 0x0000
pa = PhysicalAddress("00:10")
assert pa.asint == 0x0010
def test_ascmd():
pa = PhysicalAddress("8f:ab")
assert pa.ascmd == "8f:ab"
pa = PhysicalAddress("00:00")
assert pa.ascmd == "00:00"
pa = PhysicalAddress("00:10")
assert pa.ascmd == "00:10"
def test_str():
pa = PhysicalAddress("8f:ab")
assert ("%s" % pa) == "8.f.a.b"
pa = PhysicalAddress("00:00")
assert ("%s" % pa) == "0.0.0.0"
pa = PhysicalAddress("00:10")
assert ("%s" % pa) == "0.0.1.0"
def test_raises():
with pytest.raises(AttributeError):
PhysicalAddress([0] * 8)
|
1673101
|
from typing import Dict, List, Sequence, TypeVar, Union
from turf.bbox import bbox
from turf.explode import explode
from turf.nearest_point import nearest_point
from turf.helpers import (
all_geometry_types,
FeatureCollection,
MultiPolygon,
Point,
Polygon,
)
from turf.helpers import feature, feature_collection, geometry, point, polygon
from turf.helpers import Feature, FeatureCollection, Geometry
from turf.invariant import get_coords_from_features, get_geometry_type
from turf.utils.error_codes import error_code_messages
from turf.utils.exceptions import InvalidInput
GeoJson = TypeVar("GeoJson", Dict, Feature, FeatureCollection, Geometry)
PointFeature = TypeVar("PointFeature", Dict, Point, Sequence)
PolygonFeature = TypeVar("PolygonFeature", Dict, Polygon, MultiPolygon)
def polygon_tangents(
start_point: PointFeature, polygon: PolygonFeature
) -> FeatureCollection:
""" Finds the tangents of a {Polygon or(MultiPolygon} from a {Point}.
more:
http://geomalgorithms.com/a15-_tangents.html
:param point: point [lng, lat] or Point feature to calculate the tangent points from
:param polygon: polygon to get tangents from
:return:
Feature Collection containing the two tangent points
"""
point_features = []
point_coord = get_coords_from_features(start_point, ("Point",))
polygon_coords = get_coords_from_features(polygon, ("Polygon", "MultiPolygon"))
geometry_type = get_geometry_type(polygon)
if isinstance(geometry_type, str):
geometry_type = [geometry_type]
polygon_coords = [polygon_coords]
box = bbox(polygon)
near_point_index = 0
near_point = False
# If the point lies inside the polygon bbox then it's a bit more complicated
# points lying inside a polygon can reflex angles on concave polygons
if (
(point_coord[0] > box[0])
and (point_coord[0] < box[2])
and (point_coord[1] > box[1])
and (point_coord[1] < box[3])
):
near_point = nearest_point(start_point, explode(polygon))
near_point_index = near_point["properties"]["featureIndex"]
for geo_type, poly_coords in zip(geometry_type, polygon_coords):
if geo_type == "Polygon":
tangents = process_polygon(
poly_coords, point_coord, near_point, near_point_index
)
# bruteforce approach
# calculate both tangents for each polygon
# define all tangents as a new polygon and calculate tangetns out of those coordinates
elif geo_type == "MultiPolygon":
multi_tangents = []
for poly_coord in poly_coords:
tangents = process_polygon(
poly_coord, point_coord, near_point, near_point_index
)
multi_tangents.extend(tangents)
tangents = process_polygon(
[multi_tangents], point_coord, near_point, near_point_index
)
r_tangents = tangents[0]
l_tangents = tangents[1]
point_features.extend([point(r_tangents), point(l_tangents)])
return feature_collection(point_features)
def process_polygon(
polygon_coords: Sequence,
point_coord: Sequence,
near_point: Union[Point, None],
near_point_index: int,
) -> Sequence:
""" Prepares a polygon to calculate the tangents
:param polygon_coords: point [lng, lat] or Point feature to calculate the tangent points from
:param point_coord: point [lng, lat]
:param near_point: nearest point [lng, lat] on polygon towards view point
in case the view point lies inside the polygon
:param near_point_index: index of neareast point on polygon
in case the view point lies inside the polygon
:return:
List of tangents coordinates [upper and lower]
"""
r_tangents = polygon_coords[0][near_point_index]
l_tangents = polygon_coords[0][0]
if near_point:
if near_point["geometry"]["coordinates"][1] < point_coord[1]:
l_tangents = polygon_coords[0][near_point_index]
tangents = calculate_tangents(
polygon_coords[0], point_coord, r_tangents, l_tangents,
)
return tangents
def calculate_tangents(
poly_coords: Sequence,
point_coord: Sequence,
r_tangents: Sequence,
l_tangents: Sequence,
) -> Sequence:
""" Calculate the upper and lower tangents from the polygon by
comparison with each neighbor coordinate in the polygon
:param polygon_coords: point [lng, lat] or Point feature to calculate the tangent points from
:param point_coord: point [lng, lat]
:param r_tangents: current rightmost tangents point [lng, lat] of the polygon
:param l_tangents: current leftmost tangents point [lng, lat] of the polygon
:return:
List of tangents coordinates [upper and lower]
"""
edge_next = None
edge_prev = is_left(poly_coords[0], poly_coords[len(poly_coords) - 1], point_coord,)
for i in range(1, len(poly_coords) + 1):
current_poly_coord = poly_coords[i - 1]
if i == len(poly_coords):
next_poly_coord = poly_coords[0]
else:
next_poly_coord = poly_coords[i]
edge_next = is_left(current_poly_coord, next_poly_coord, point_coord)
if (edge_prev <= 0) and (edge_next > 0):
if not is_below(point_coord, current_poly_coord, r_tangents):
r_tangents = current_poly_coord
elif (edge_prev > 0) and (edge_next <= 0):
if not is_above(point_coord, current_poly_coord, l_tangents):
l_tangents = current_poly_coord
edge_prev = edge_next
return [r_tangents, l_tangents]
def is_above(point1: Sequence, point2: Sequence, point3: Sequence) -> bool:
""" Checks if point 1 is above point2 and point 3
:param geojson: geojson or Feature
:param geojson_types: string of the supposed feature tpye
:return:
Feature or FeatureCollection of the incoming geojson
"""
return is_left(point1, point2, point3) > 0
def is_below(point1: Sequence, point2: Sequence, point3: Sequence) -> bool:
""" Checks if point 1 is below point2 and point 3
:param geojson: geojson or Feature
:param geojson_types: string of the supposed feature tpye
:return:
Feature or FeatureCollection of the incoming geojson
"""
return is_left(point1, point2, point3) < 0
def is_left(point1: Sequence, point2: Sequence, point3: Sequence) -> float:
""" Calculates if point 1 is left of point2 and point 3
:param geojson: geojson or Feature
:param geojson_types: string of the supposed feature tpye
:return:
Feature or FeatureCollection of the incoming geojson
"""
return (point2[0] - point1[0]) * (point3[1] - point1[1]) - (
point3[0] - point1[0]
) * (point2[1] - point1[1])
|
1673141
|
class Networks(object):
def __init__(self, session):
super(Networks, self).__init__()
self._session = session
def getNetwork(self, networkId: str):
"""
**Return a network**
https://developer.cisco.com/meraki/api-v1/#!get-network
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure'],
'operation': 'getNetwork'
}
resource = f'/networks/{networkId}'
return self._session.get(metadata, resource)
def updateNetwork(self, networkId: str, **kwargs):
"""
**Update a network**
https://developer.cisco.com/meraki/api-v1/#!update-network
- networkId (string): (required)
- name (string): The name of the network
- timeZone (string): The timezone of the network. For a list of allowed timezones, please see the 'TZ' column in the table in <a target='_blank' href='https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'>this article.</a>
- tags (array): A list of tags to be applied to the network
- enrollmentString (string): A unique identifier which can be used for device enrollment or easy access through the Meraki SM Registration page or the Self Service Portal. Please note that changing this field may cause existing bookmarks to break.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure'],
'operation': 'updateNetwork'
}
resource = f'/networks/{networkId}'
body_params = ['name', 'timeZone', 'tags', 'enrollmentString', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetwork(self, networkId: str):
"""
**Delete a network**
https://developer.cisco.com/meraki/api-v1/#!delete-network
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure'],
'operation': 'deleteNetwork'
}
resource = f'/networks/{networkId}'
return self._session.delete(metadata, resource)
def getNetworkAlertsSettings(self, networkId: str):
"""
**Return the alert configuration for this network**
https://developer.cisco.com/meraki/api-v1/#!get-network-alerts-settings
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'alerts', 'settings'],
'operation': 'getNetworkAlertsSettings'
}
resource = f'/networks/{networkId}/alerts/settings'
return self._session.get(metadata, resource)
def updateNetworkAlertsSettings(self, networkId: str, **kwargs):
"""
**Update the alert configuration for this network**
https://developer.cisco.com/meraki/api-v1/#!update-network-alerts-settings
- networkId (string): (required)
- defaultDestinations (object): The network-wide destinations for all alerts on the network.
- alerts (array): Alert-specific configuration for each type. Only alerts that pertain to the network can be updated.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'alerts', 'settings'],
'operation': 'updateNetworkAlertsSettings'
}
resource = f'/networks/{networkId}/alerts/settings'
body_params = ['defaultDestinations', 'alerts', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def bindNetwork(self, networkId: str, configTemplateId: str, **kwargs):
"""
**Bind a network to a template.**
https://developer.cisco.com/meraki/api-v1/#!bind-network
- networkId (string): (required)
- configTemplateId (string): The ID of the template to which the network should be bound.
- autoBind (boolean): Optional boolean indicating whether the network's switches should automatically bind to profiles of the same model. Defaults to false if left unspecified. This option only affects switch networks and switch templates. Auto-bind is not valid unless the switch template has at least one profile and has at most one profile per switch model.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure'],
'operation': 'bindNetwork'
}
resource = f'/networks/{networkId}/bind'
body_params = ['configTemplateId', 'autoBind', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkBluetoothClients(self, networkId: str, total_pages=1, direction='next', **kwargs):
"""
**List the Bluetooth clients seen by APs in this network**
https://developer.cisco.com/meraki/api-v1/#!get-network-bluetooth-clients
- networkId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 7 days from today.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameter t0. The value must be in seconds and be less than or equal to 7 days. The default is 1 day.
- perPage (integer): The number of entries per page returned. Acceptable range is 5 - 1000. Default is 10.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- includeConnectivityHistory (boolean): Include the connectivity history for this client
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'monitor', 'bluetoothClients'],
'operation': 'getNetworkBluetoothClients'
}
resource = f'/networks/{networkId}/bluetoothClients'
query_params = ['t0', 'timespan', 'perPage', 'startingAfter', 'endingBefore', 'includeConnectivityHistory', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getNetworkBluetoothClient(self, networkId: str, bluetoothClientId: str, **kwargs):
"""
**Return a Bluetooth client**
https://developer.cisco.com/meraki/api-v1/#!get-network-bluetooth-client
- networkId (string): (required)
- bluetoothClientId (string): (required)
- includeConnectivityHistory (boolean): Include the connectivity history for this client
- connectivityHistoryTimespan (integer): The timespan, in seconds, for the connectivityHistory data. By default 1 day, 86400, will be used.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'monitor', 'bluetoothClients'],
'operation': 'getNetworkBluetoothClient'
}
resource = f'/networks/{networkId}/bluetoothClients/{bluetoothClientId}'
query_params = ['includeConnectivityHistory', 'connectivityHistoryTimespan', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getNetworkClients(self, networkId: str, total_pages=1, direction='next', **kwargs):
"""
**List the clients that have used this network in the timespan**
https://developer.cisco.com/meraki/api-v1/#!get-network-clients
- networkId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 31 days from today.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameter t0. The value must be in seconds and be less than or equal to 31 days. The default is 1 day.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 10.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'monitor', 'clients'],
'operation': 'getNetworkClients'
}
resource = f'/networks/{networkId}/clients'
query_params = ['t0', 'timespan', 'perPage', 'startingAfter', 'endingBefore', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def provisionNetworkClients(self, networkId: str, clients: list, devicePolicy: str, **kwargs):
"""
**Provisions a client with a name and policy**
https://developer.cisco.com/meraki/api-v1/#!provision-network-clients
- networkId (string): (required)
- clients (array): The array of clients to provision
- devicePolicy (string): The policy to apply to the specified client. Can be 'Group policy', 'Allowed', 'Blocked', 'Per connection' or 'Normal'. Required.
- groupPolicyId (string): The ID of the desired group policy to apply to the client. Required if 'devicePolicy' is set to "Group policy". Otherwise this is ignored.
- policiesBySecurityAppliance (object): An object, describing what the policy-connection association is for the security appliance. (Only relevant if the security appliance is actually within the network)
- policiesBySsid (object): An object, describing the policy-connection associations for each active SSID within the network. Keys should be the number of enabled SSIDs, mapping to an object describing the client's policy
"""
kwargs.update(locals())
if 'devicePolicy' in kwargs:
options = ['Group policy', 'Allowed', 'Blocked', 'Per connection', 'Normal']
assert kwargs['devicePolicy'] in options, f'''"devicePolicy" cannot be "{kwargs['devicePolicy']}", & must be set to one of: {options}'''
metadata = {
'tags': ['networks', 'configure', 'clients'],
'operation': 'provisionNetworkClients'
}
resource = f'/networks/{networkId}/clients/provision'
body_params = ['clients', 'devicePolicy', 'groupPolicyId', 'policiesBySecurityAppliance', 'policiesBySsid', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkClient(self, networkId: str, clientId: str):
"""
**Return the client associated with the given identifier**
https://developer.cisco.com/meraki/api-v1/#!get-network-client
- networkId (string): (required)
- clientId (string): (required)
"""
metadata = {
'tags': ['networks', 'monitor', 'clients'],
'operation': 'getNetworkClient'
}
resource = f'/networks/{networkId}/clients/{clientId}'
return self._session.get(metadata, resource)
def getNetworkClientPolicy(self, networkId: str, clientId: str):
"""
**Return the policy assigned to a client on the network**
https://developer.cisco.com/meraki/api-v1/#!get-network-client-policy
- networkId (string): (required)
- clientId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'clients', 'policy'],
'operation': 'getNetworkClientPolicy'
}
resource = f'/networks/{networkId}/clients/{clientId}/policy'
return self._session.get(metadata, resource)
def updateNetworkClientPolicy(self, networkId: str, clientId: str, devicePolicy: str, **kwargs):
"""
**Update the policy assigned to a client on the network**
https://developer.cisco.com/meraki/api-v1/#!update-network-client-policy
- networkId (string): (required)
- clientId (string): (required)
- devicePolicy (string): The policy to assign. Can be 'Whitelisted', 'Blocked', 'Normal' or 'Group policy'. Required.
- groupPolicyId (string): [optional] If 'devicePolicy' is set to 'Group policy' this param is used to specify the group policy ID.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'clients', 'policy'],
'operation': 'updateNetworkClientPolicy'
}
resource = f'/networks/{networkId}/clients/{clientId}/policy'
body_params = ['devicePolicy', 'groupPolicyId', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkClientSplashAuthorizationStatus(self, networkId: str, clientId: str):
"""
**Return the splash authorization for a client, for each SSID they've associated with through splash**
https://developer.cisco.com/meraki/api-v1/#!get-network-client-splash-authorization-status
- networkId (string): (required)
- clientId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'clients', 'splashAuthorizationStatus'],
'operation': 'getNetworkClientSplashAuthorizationStatus'
}
resource = f'/networks/{networkId}/clients/{clientId}/splashAuthorizationStatus'
return self._session.get(metadata, resource)
def updateNetworkClientSplashAuthorizationStatus(self, networkId: str, clientId: str, ssids: dict):
"""
**Update a client's splash authorization**
https://developer.cisco.com/meraki/api-v1/#!update-network-client-splash-authorization-status
- networkId (string): (required)
- clientId (string): (required)
- ssids (object): The target SSIDs. Each SSID must be enabled and must have Click-through splash enabled. For each SSID where isAuthorized is true, the expiration time will automatically be set according to the SSID's splash frequency. Not all networks support configuring all SSIDs
"""
kwargs = locals()
metadata = {
'tags': ['networks', 'configure', 'clients', 'splashAuthorizationStatus'],
'operation': 'updateNetworkClientSplashAuthorizationStatus'
}
resource = f'/networks/{networkId}/clients/{clientId}/splashAuthorizationStatus'
body_params = ['ssids', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkClientTrafficHistory(self, networkId: str, clientId: str, total_pages=1, direction='next', **kwargs):
"""
**Return the client's network traffic data over time**
https://developer.cisco.com/meraki/api-v1/#!get-network-client-traffic-history
- networkId (string): (required)
- clientId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'monitor', 'clients', 'trafficHistory'],
'operation': 'getNetworkClientTrafficHistory'
}
resource = f'/networks/{networkId}/clients/{clientId}/trafficHistory'
query_params = ['perPage', 'startingAfter', 'endingBefore', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getNetworkClientUsageHistory(self, networkId: str, clientId: str):
"""
**Return the client's daily usage history**
https://developer.cisco.com/meraki/api-v1/#!get-network-client-usage-history
- networkId (string): (required)
- clientId (string): (required)
"""
metadata = {
'tags': ['networks', 'monitor', 'clients', 'usageHistory'],
'operation': 'getNetworkClientUsageHistory'
}
resource = f'/networks/{networkId}/clients/{clientId}/usageHistory'
return self._session.get(metadata, resource)
def getNetworkDevices(self, networkId: str):
"""
**List the devices in a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-devices
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'devices'],
'operation': 'getNetworkDevices'
}
resource = f'/networks/{networkId}/devices'
return self._session.get(metadata, resource)
def claimNetworkDevices(self, networkId: str, serials: list):
"""
**Claim devices into a network**
https://developer.cisco.com/meraki/api-v1/#!claim-network-devices
- networkId (string): (required)
- serials (array): A list of serials of devices to claim
"""
kwargs = locals()
metadata = {
'tags': ['networks', 'configure', 'devices'],
'operation': 'claimNetworkDevices'
}
resource = f'/networks/{networkId}/devices/claim'
body_params = ['serials', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def removeNetworkDevices(self, networkId: str, serial: str):
"""
**Remove a single device**
https://developer.cisco.com/meraki/api-v1/#!remove-network-devices
- networkId (string): (required)
- serial (string): The serial of a device
"""
kwargs = locals()
metadata = {
'tags': ['networks', 'configure', 'devices'],
'operation': 'removeNetworkDevices'
}
resource = f'/networks/{networkId}/devices/remove'
body_params = ['serial', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkEnvironmentalEvents(self, networkId: str, total_pages=1, direction='next', **kwargs):
"""
**List the environmental events for the network**
https://developer.cisco.com/meraki/api-v1/#!get-network-environmental-events
- networkId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- includedEventTypes (array): A list of event types. The returned events will be filtered to only include events with these types.
- excludedEventTypes (array): A list of event types. The returned events will be filtered to exclude events with these types.
- sensorSerial (string): The serial of the sensor device which the list of events will be filtered with
- gatewaySerial (string): The serial of the environmental gateway device which the list of events will be filtered with
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 10.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'monitor', 'environmental', 'events'],
'operation': 'getNetworkEnvironmentalEvents'
}
resource = f'/networks/{networkId}/environmental/events'
query_params = ['includedEventTypes', 'excludedEventTypes', 'sensorSerial', 'gatewaySerial', 'perPage', 'startingAfter', 'endingBefore', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
array_params = ['includedEventTypes', 'excludedEventTypes', ]
for k, v in kwargs.items():
if k.strip() in array_params:
params[f'{k.strip()}[]'] = kwargs[f'{k}']
params.pop(k.strip())
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getNetworkEnvironmentalEventsEventTypes(self, networkId: str):
"""
**List the event type to human-readable description**
https://developer.cisco.com/meraki/api-v1/#!get-network-environmental-events-event-types
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'environmental', 'events', 'eventTypes'],
'operation': 'getNetworkEnvironmentalEventsEventTypes'
}
resource = f'/networks/{networkId}/environmental/events/eventTypes'
return self._session.get(metadata, resource)
def getNetworkEvents(self, networkId: str, total_pages=1, direction='prev', event_log_end_time=None, **kwargs):
"""
**List the events for the network**
https://developer.cisco.com/meraki/api-v1/#!get-network-events
- networkId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" or "prev" (default) page
- event_log_end_time (string): ISO8601 Zulu/UTC time, to use in conjunction with startingAfter, to retrieve events within a time window
- productType (string): The product type to fetch events for. This parameter is required for networks with multiple device types. Valid types are wireless, appliance, switch, systemsManager, camera, cellularGateway, and environmental
- includedEventTypes (array): A list of event types. The returned events will be filtered to only include events with these types.
- excludedEventTypes (array): A list of event types. The returned events will be filtered to exclude events with these types.
- deviceMac (string): The MAC address of the Meraki device which the list of events will be filtered with
- deviceSerial (string): The serial of the Meraki device which the list of events will be filtered with
- deviceName (string): The name of the Meraki device which the list of events will be filtered with
- clientIp (string): The IP of the client which the list of events will be filtered with. Only supported for track-by-IP networks.
- clientMac (string): The MAC address of the client which the list of events will be filtered with. Only supported for track-by-MAC networks.
- clientName (string): The name, or partial name, of the client which the list of events will be filtered with
- smDeviceMac (string): The MAC address of the Systems Manager device which the list of events will be filtered with
- smDeviceName (string): The name of the Systems Manager device which the list of events will be filtered with
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 10.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'monitor', 'events'],
'operation': 'getNetworkEvents'
}
resource = f'/networks/{networkId}/events'
query_params = ['productType', 'includedEventTypes', 'excludedEventTypes', 'deviceMac', 'deviceSerial', 'deviceName', 'clientIp', 'clientMac', 'clientName', 'smDeviceMac', 'smDeviceName', 'perPage', 'startingAfter', 'endingBefore', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
array_params = ['includedEventTypes', 'excludedEventTypes', ]
for k, v in kwargs.items():
if k.strip() in array_params:
params[f'{k.strip()}[]'] = kwargs[f'{k}']
params.pop(k.strip())
return self._session.get_pages(metadata, resource, params, total_pages, direction, event_log_end_time)
def getNetworkEventsEventTypes(self, networkId: str):
"""
**List the event type to human-readable description**
https://developer.cisco.com/meraki/api-v1/#!get-network-events-event-types
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'monitor', 'events', 'eventTypes'],
'operation': 'getNetworkEventsEventTypes'
}
resource = f'/networks/{networkId}/events/eventTypes'
return self._session.get(metadata, resource)
def getNetworkFirmwareUpgrades(self, networkId: str):
"""
**Get current maintenance window for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-firmware-upgrades
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'firmwareUpgrades'],
'operation': 'getNetworkFirmwareUpgrades'
}
resource = f'/networks/{networkId}/firmwareUpgrades'
return self._session.get(metadata, resource)
def updateNetworkFirmwareUpgrades(self, networkId: str, **kwargs):
"""
**Update current maintenance window for a network**
https://developer.cisco.com/meraki/api-v1/#!update-network-firmware-upgrades
- networkId (string): (required)
- upgradeWindow (object): Upgrade window for devices in network
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'firmwareUpgrades'],
'operation': 'updateNetworkFirmwareUpgrades'
}
resource = f'/networks/{networkId}/firmwareUpgrades'
body_params = ['upgradeWindow', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkFloorPlans(self, networkId: str):
"""
**List the floor plans that belong to your network**
https://developer.cisco.com/meraki/api-v1/#!get-network-floor-plans
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'floorPlans'],
'operation': 'getNetworkFloorPlans'
}
resource = f'/networks/{networkId}/floorPlans'
return self._session.get(metadata, resource)
def createNetworkFloorPlan(self, networkId: str, name: str, imageContents: str, **kwargs):
"""
**Upload a floor plan**
https://developer.cisco.com/meraki/api-v1/#!create-network-floor-plan
- networkId (string): (required)
- name (string): The name of your floor plan.
- imageContents (string): The file contents (a base 64 encoded string) of your image. Supported formats are PNG, GIF, and JPG. Note that all images are saved as PNG files, regardless of the format they are uploaded in.
- center (object): The longitude and latitude of the center of your floor plan. The 'center' or two adjacent corners (e.g. 'topLeftCorner' and 'bottomLeftCorner') must be specified. If 'center' is specified, the floor plan is placed over that point with no rotation. If two adjacent corners are specified, the floor plan is rotated to line up with the two specified points. The aspect ratio of the floor plan's image is preserved regardless of which corners/center are specified. (This means if that more than two corners are specified, only two corners may be used to preserve the floor plan's aspect ratio.). No two points can have the same latitude, longitude pair.
- bottomLeftCorner (object): The longitude and latitude of the bottom left corner of your floor plan.
- bottomRightCorner (object): The longitude and latitude of the bottom right corner of your floor plan.
- topLeftCorner (object): The longitude and latitude of the top left corner of your floor plan.
- topRightCorner (object): The longitude and latitude of the top right corner of your floor plan.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'floorPlans'],
'operation': 'createNetworkFloorPlan'
}
resource = f'/networks/{networkId}/floorPlans'
body_params = ['name', 'center', 'bottomLeftCorner', 'bottomRightCorner', 'topLeftCorner', 'topRightCorner', 'imageContents', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkFloorPlan(self, networkId: str, floorPlanId: str):
"""
**Find a floor plan by ID**
https://developer.cisco.com/meraki/api-v1/#!get-network-floor-plan
- networkId (string): (required)
- floorPlanId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'floorPlans'],
'operation': 'getNetworkFloorPlan'
}
resource = f'/networks/{networkId}/floorPlans/{floorPlanId}'
return self._session.get(metadata, resource)
def updateNetworkFloorPlan(self, networkId: str, floorPlanId: str, **kwargs):
"""
**Update a floor plan's geolocation and other meta data**
https://developer.cisco.com/meraki/api-v1/#!update-network-floor-plan
- networkId (string): (required)
- floorPlanId (string): (required)
- name (string): The name of your floor plan.
- center (object): The longitude and latitude of the center of your floor plan. If you want to change the geolocation data of your floor plan, either the 'center' or two adjacent corners (e.g. 'topLeftCorner' and 'bottomLeftCorner') must be specified. If 'center' is specified, the floor plan is placed over that point with no rotation. If two adjacent corners are specified, the floor plan is rotated to line up with the two specified points. The aspect ratio of the floor plan's image is preserved regardless of which corners/center are specified. (This means if that more than two corners are specified, only two corners may be used to preserve the floor plan's aspect ratio.). No two points can have the same latitude, longitude pair.
- bottomLeftCorner (object): The longitude and latitude of the bottom left corner of your floor plan.
- bottomRightCorner (object): The longitude and latitude of the bottom right corner of your floor plan.
- topLeftCorner (object): The longitude and latitude of the top left corner of your floor plan.
- topRightCorner (object): The longitude and latitude of the top right corner of your floor plan.
- imageContents (string): The file contents (a base 64 encoded string) of your new image. Supported formats are PNG, GIF, and JPG. Note that all images are saved as PNG files, regardless of the format they are uploaded in. If you upload a new image, and you do NOT specify any new geolocation fields ('center, 'topLeftCorner', etc), the floor plan will be recentered with no rotation in order to maintain the aspect ratio of your new image.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'floorPlans'],
'operation': 'updateNetworkFloorPlan'
}
resource = f'/networks/{networkId}/floorPlans/{floorPlanId}'
body_params = ['name', 'center', 'bottomLeftCorner', 'bottomRightCorner', 'topLeftCorner', 'topRightCorner', 'imageContents', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkFloorPlan(self, networkId: str, floorPlanId: str):
"""
**Destroy a floor plan**
https://developer.cisco.com/meraki/api-v1/#!delete-network-floor-plan
- networkId (string): (required)
- floorPlanId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'floorPlans'],
'operation': 'deleteNetworkFloorPlan'
}
resource = f'/networks/{networkId}/floorPlans/{floorPlanId}'
return self._session.delete(metadata, resource)
def getNetworkGroupPolicies(self, networkId: str):
"""
**List the group policies in a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-group-policies
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'groupPolicies'],
'operation': 'getNetworkGroupPolicies'
}
resource = f'/networks/{networkId}/groupPolicies'
return self._session.get(metadata, resource)
def createNetworkGroupPolicy(self, networkId: str, name: str, **kwargs):
"""
**Create a group policy**
https://developer.cisco.com/meraki/api-v1/#!create-network-group-policy
- networkId (string): (required)
- name (string): The name for your group policy. Required.
- scheduling (object): The schedule for the group policy. Schedules are applied to days of the week.
- bandwidth (object): The bandwidth settings for clients bound to your group policy.
- firewallAndTrafficShaping (object): The firewall and traffic shaping rules and settings for your policy.
- contentFiltering (object): The content filtering settings for your group policy
- splashAuthSettings (string): Whether clients bound to your policy will bypass splash authorization or behave according to the network's rules. Can be one of 'network default' or 'bypass'. Only available if your network has a wireless configuration.
- vlanTagging (object): The VLAN tagging settings for your group policy. Only available if your network has a wireless configuration.
- bonjourForwarding (object): The Bonjour settings for your group policy. Only valid if your network has a wireless configuration.
"""
kwargs.update(locals())
if 'splashAuthSettings' in kwargs:
options = ['network default', 'bypass']
assert kwargs['splashAuthSettings'] in options, f'''"splashAuthSettings" cannot be "{kwargs['splashAuthSettings']}", & must be set to one of: {options}'''
metadata = {
'tags': ['networks', 'configure', 'groupPolicies'],
'operation': 'createNetworkGroupPolicy'
}
resource = f'/networks/{networkId}/groupPolicies'
body_params = ['name', 'scheduling', 'bandwidth', 'firewallAndTrafficShaping', 'contentFiltering', 'splashAuthSettings', 'vlanTagging', 'bonjourForwarding', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkGroupPolicy(self, networkId: str, groupPolicyId: str):
"""
**Display a group policy**
https://developer.cisco.com/meraki/api-v1/#!get-network-group-policy
- networkId (string): (required)
- groupPolicyId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'groupPolicies'],
'operation': 'getNetworkGroupPolicy'
}
resource = f'/networks/{networkId}/groupPolicies/{groupPolicyId}'
return self._session.get(metadata, resource)
def updateNetworkGroupPolicy(self, networkId: str, groupPolicyId: str, **kwargs):
"""
**Update a group policy**
https://developer.cisco.com/meraki/api-v1/#!update-network-group-policy
- networkId (string): (required)
- groupPolicyId (string): (required)
- name (string): The name for your group policy.
- scheduling (object): The schedule for the group policy. Schedules are applied to days of the week.
- bandwidth (object): The bandwidth settings for clients bound to your group policy.
- firewallAndTrafficShaping (object): The firewall and traffic shaping rules and settings for your policy.
- contentFiltering (object): The content filtering settings for your group policy
- splashAuthSettings (string): Whether clients bound to your policy will bypass splash authorization or behave according to the network's rules. Can be one of 'network default' or 'bypass'. Only available if your network has a wireless configuration.
- vlanTagging (object): The VLAN tagging settings for your group policy. Only available if your network has a wireless configuration.
- bonjourForwarding (object): The Bonjour settings for your group policy. Only valid if your network has a wireless configuration.
"""
kwargs.update(locals())
if 'splashAuthSettings' in kwargs:
options = ['network default', 'bypass']
assert kwargs['splashAuthSettings'] in options, f'''"splashAuthSettings" cannot be "{kwargs['splashAuthSettings']}", & must be set to one of: {options}'''
metadata = {
'tags': ['networks', 'configure', 'groupPolicies'],
'operation': 'updateNetworkGroupPolicy'
}
resource = f'/networks/{networkId}/groupPolicies/{groupPolicyId}'
body_params = ['name', 'scheduling', 'bandwidth', 'firewallAndTrafficShaping', 'contentFiltering', 'splashAuthSettings', 'vlanTagging', 'bonjourForwarding', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkGroupPolicy(self, networkId: str, groupPolicyId: str):
"""
**Delete a group policy**
https://developer.cisco.com/meraki/api-v1/#!delete-network-group-policy
- networkId (string): (required)
- groupPolicyId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'groupPolicies'],
'operation': 'deleteNetworkGroupPolicy'
}
resource = f'/networks/{networkId}/groupPolicies/{groupPolicyId}'
return self._session.delete(metadata, resource)
def getNetworkMerakiAuthUsers(self, networkId: str):
"""
**List the users configured under Meraki Authentication for a network (splash guest or RADIUS users for a wireless network, or client VPN users for a wired network)**
https://developer.cisco.com/meraki/api-v1/#!get-network-meraki-auth-users
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'merakiAuthUsers'],
'operation': 'getNetworkMerakiAuthUsers'
}
resource = f'/networks/{networkId}/merakiAuthUsers'
return self._session.get(metadata, resource)
def createNetworkMerakiAuthUser(self, networkId: str, email: str, name: str, password: str, authorizations: list, **kwargs):
"""
**Create a user configured with Meraki Authentication for a network (currently supports 802.1X, splash guest, and client VPN users, and currently, organizations have a 50,000 user cap)**
https://developer.cisco.com/meraki/api-v1/#!create-network-meraki-auth-user
- networkId (string): (required)
- email (string): Email address of the user
- name (string): Name of the user
- password (string): <PASSWORD> this <PASSWORD> account
- authorizations (array): Authorization zones and expiration dates for the user.
- accountType (string): Authorization type for user. Can be 'Guest' or '802.1X' for wireless networks, or 'Client VPN' for wired networks. Defaults to '802.1X'.
- emailPasswordToUser (boolean): Whether or not Meraki should email the password to user. Default is false.
"""
kwargs.update(locals())
if 'accountType' in kwargs:
options = ['Guest', '802.1X', 'Client VPN']
assert kwargs['accountType'] in options, f'''"accountType" cannot be "{kwargs['accountType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['networks', 'configure', 'merakiAuthUsers'],
'operation': 'createNetworkMerakiAuthUser'
}
resource = f'/networks/{networkId}/merakiAuthUsers'
body_params = ['email', 'name', 'password', 'accountType', 'emailPasswordToUser', 'authorizations', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkMerakiAuthUser(self, networkId: str, merakiAuthUserId: str):
"""
**Return the Meraki Auth splash guest, RADIUS, or client VPN user**
https://developer.cisco.com/meraki/api-v1/#!get-network-meraki-auth-user
- networkId (string): (required)
- merakiAuthUserId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'merakiAuthUsers'],
'operation': 'getNetworkMerakiAuthUser'
}
resource = f'/networks/{networkId}/merakiAuthUsers/{merakiAuthUserId}'
return self._session.get(metadata, resource)
def deleteNetworkMerakiAuthUser(self, networkId: str, merakiAuthUserId: str):
"""
**Delete a user configured with Meraki Authentication (currently, 802.1X RADIUS, splash guest, and client VPN users can be deleted)**
https://developer.cisco.com/meraki/api-v1/#!delete-network-meraki-auth-user
- networkId (string): (required)
- merakiAuthUserId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'merakiAuthUsers'],
'operation': 'deleteNetworkMerakiAuthUser'
}
resource = f'/networks/{networkId}/merakiAuthUsers/{merakiAuthUserId}'
return self._session.delete(metadata, resource)
def updateNetworkMerakiAuthUser(self, networkId: str, merakiAuthUserId: str, **kwargs):
"""
**Update a user configured with Meraki Authentication (currently, 802.1X RADIUS, splash guest, and client VPN users can be deleted)**
https://developer.cisco.com/meraki/api-v1/#!update-network-meraki-auth-user
- networkId (string): (required)
- merakiAuthUserId (string): (required)
- name (string): Name of the user
- password (string): <PASSWORD> this user account
- emailPasswordToUser (boolean): Whether or not Meraki should email the password to user. Default is false.
- authorizations (array): Authorization zones and expiration dates for the user.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'merakiAuthUsers'],
'operation': 'updateNetworkMerakiAuthUser'
}
resource = f'/networks/{networkId}/merakiAuthUsers/{merakiAuthUserId}'
body_params = ['name', 'password', 'emailPasswordToUser', 'authorizations', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkMqttBrokers(self, networkId: str):
"""
**List the MQTT brokers for this network**
https://developer.cisco.com/meraki/api-v1/#!get-network-mqtt-brokers
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'mqttBrokers'],
'operation': 'getNetworkMqttBrokers'
}
resource = f'/networks/{networkId}/mqttBrokers'
return self._session.get(metadata, resource)
def createNetworkMqttBroker(self, networkId: str, name: str, host: str, port: int):
"""
**Add an MQTT broker**
https://developer.cisco.com/meraki/api-v1/#!create-network-mqtt-broker
- networkId (string): (required)
- name (string): Name of the MQTT broker
- host (string): Host name/IP address where MQTT broker runs
- port (integer): Host port though which MQTT broker can be reached
"""
kwargs = locals()
metadata = {
'tags': ['networks', 'configure', 'mqttBrokers'],
'operation': 'createNetworkMqttBroker'
}
resource = f'/networks/{networkId}/mqttBrokers'
body_params = ['name', 'host', 'port', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkMqttBroker(self, networkId: str, mqttBrokerId: str):
"""
**Return an MQTT broker**
https://developer.cisco.com/meraki/api-v1/#!get-network-mqtt-broker
- networkId (string): (required)
- mqttBrokerId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'mqttBrokers'],
'operation': 'getNetworkMqttBroker'
}
resource = f'/networks/{networkId}/mqttBrokers/{mqttBrokerId}'
return self._session.get(metadata, resource)
def updateNetworkMqttBroker(self, networkId: str, mqttBrokerId: str, **kwargs):
"""
**Update an MQTT broker**
https://developer.cisco.com/meraki/api-v1/#!update-network-mqtt-broker
- networkId (string): (required)
- mqttBrokerId (string): (required)
- name (string): Name of the mqtt config
- host (string): Host name where mqtt broker runs
- port (integer): Host port though which mqtt broker can be reached
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'mqttBrokers'],
'operation': 'updateNetworkMqttBroker'
}
resource = f'/networks/{networkId}/mqttBrokers/{mqttBrokerId}'
body_params = ['name', 'host', 'port', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkMqttBroker(self, networkId: str, mqttBrokerId: str):
"""
**Delete an MQTT broker**
https://developer.cisco.com/meraki/api-v1/#!delete-network-mqtt-broker
- networkId (string): (required)
- mqttBrokerId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'mqttBrokers'],
'operation': 'deleteNetworkMqttBroker'
}
resource = f'/networks/{networkId}/mqttBrokers/{mqttBrokerId}'
return self._session.delete(metadata, resource)
def getNetworkNetflow(self, networkId: str):
"""
**Return the NetFlow traffic reporting settings for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-netflow
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'netflow'],
'operation': 'getNetworkNetflow'
}
resource = f'/networks/{networkId}/netflow'
return self._session.get(metadata, resource)
def updateNetworkNetflow(self, networkId: str, **kwargs):
"""
**Update the NetFlow traffic reporting settings for a network**
https://developer.cisco.com/meraki/api-v1/#!update-network-netflow
- networkId (string): (required)
- reportingEnabled (boolean): Boolean indicating whether NetFlow traffic reporting is enabled (true) or disabled (false).
- collectorIp (string): The IPv4 address of the NetFlow collector.
- collectorPort (integer): The port that the NetFlow collector will be listening on.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'netflow'],
'operation': 'updateNetworkNetflow'
}
resource = f'/networks/{networkId}/netflow'
body_params = ['reportingEnabled', 'collectorIp', 'collectorPort', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkNetworkHealthChannelUtilization(self, networkId: str, total_pages=1, direction='next', **kwargs):
"""
**Get the channel utilization over each radio for all APs in a network.**
https://developer.cisco.com/meraki/api-v1/#!get-network-network-health-channel-utilization
- networkId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 31 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 31 days after t0.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 31 days. The default is 1 day.
- resolution (integer): The time resolution in seconds for returned data. The valid resolutions are: 600. The default is 600.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 100. Default is 10.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'networkHealth', 'channelUtilization'],
'operation': 'getNetworkNetworkHealthChannelUtilization'
}
resource = f'/networks/{networkId}/networkHealth/channelUtilization'
query_params = ['t0', 't1', 'timespan', 'resolution', 'perPage', 'startingAfter', 'endingBefore', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getNetworkPiiPiiKeys(self, networkId: str, **kwargs):
"""
**List the keys required to access Personally Identifiable Information (PII) for a given identifier**
https://developer.cisco.com/meraki/api-v1/#!get-network-pii-pii-keys
- networkId (string): (required)
- username (string): The username of a Systems Manager user
- email (string): The email of a network user account or a Systems Manager device
- mac (string): The MAC of a network client device or a Systems Manager device
- serial (string): The serial of a Systems Manager device
- imei (string): The IMEI of a Systems Manager device
- bluetoothMac (string): The MAC of a Bluetooth client
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'pii', 'piiKeys'],
'operation': 'getNetworkPiiPiiKeys'
}
resource = f'/networks/{networkId}/pii/piiKeys'
query_params = ['username', 'email', 'mac', 'serial', 'imei', 'bluetoothMac', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getNetworkPiiRequests(self, networkId: str):
"""
**List the PII requests for this network or organization**
https://developer.cisco.com/meraki/api-v1/#!get-network-pii-requests
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'pii', 'requests'],
'operation': 'getNetworkPiiRequests'
}
resource = f'/networks/{networkId}/pii/requests'
return self._session.get(metadata, resource)
def createNetworkPiiRequest(self, networkId: str, **kwargs):
"""
**Submit a new delete or restrict processing PII request**
https://developer.cisco.com/meraki/api-v1/#!create-network-pii-request
- networkId (string): (required)
- type (string): One of "delete" or "restrict processing"
- datasets (array): The datasets related to the provided key that should be deleted. Only applies to "delete" requests. The value "all" will be expanded to all datasets applicable to this type. The datasets by applicable to each type are: mac (usage, events, traffic), email (users, loginAttempts), username (users, loginAttempts), bluetoothMac (client, connectivity), smDeviceId (device), smUserId (user)
- username (string): The username of a network log in. Only applies to "delete" requests.
- email (string): The email of a network user account. Only applies to "delete" requests.
- mac (string): The MAC of a network client device. Applies to both "restrict processing" and "delete" requests.
- smDeviceId (string): The sm_device_id of a Systems Manager device. The only way to "restrict processing" or "delete" a Systems Manager device. Must include "device" in the dataset for a "delete" request to destroy the device.
- smUserId (string): The sm_user_id of a Systems Manager user. The only way to "restrict processing" or "delete" a Systems Manager user. Must include "user" in the dataset for a "delete" request to destroy the user.
"""
kwargs.update(locals())
if 'type' in kwargs:
options = ['delete', 'restrict processing']
assert kwargs['type'] in options, f'''"type" cannot be "{kwargs['type']}", & must be set to one of: {options}'''
metadata = {
'tags': ['networks', 'configure', 'pii', 'requests'],
'operation': 'createNetworkPiiRequest'
}
resource = f'/networks/{networkId}/pii/requests'
body_params = ['type', 'datasets', 'username', 'email', 'mac', 'smDeviceId', 'smUserId', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkPiiRequest(self, networkId: str, requestId: str):
"""
**Return a PII request**
https://developer.cisco.com/meraki/api-v1/#!get-network-pii-request
- networkId (string): (required)
- requestId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'pii', 'requests'],
'operation': 'getNetworkPiiRequest'
}
resource = f'/networks/{networkId}/pii/requests/{requestId}'
return self._session.get(metadata, resource)
def deleteNetworkPiiRequest(self, networkId: str, requestId: str):
"""
**Delete a restrict processing PII request**
https://developer.cisco.com/meraki/api-v1/#!delete-network-pii-request
- networkId (string): (required)
- requestId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'pii', 'requests'],
'operation': 'deleteNetworkPiiRequest'
}
resource = f'/networks/{networkId}/pii/requests/{requestId}'
return self._session.delete(metadata, resource)
def getNetworkPiiSmDevicesForKey(self, networkId: str, **kwargs):
"""
**Given a piece of Personally Identifiable Information (PII), return the Systems Manager device ID(s) associated with that identifier**
https://developer.cisco.com/meraki/api-v1/#!get-network-pii-sm-devices-for-key
- networkId (string): (required)
- username (string): The username of a Systems Manager user
- email (string): The email of a network user account or a Systems Manager device
- mac (string): The MAC of a network client device or a Systems Manager device
- serial (string): The serial of a Systems Manager device
- imei (string): The IMEI of a Systems Manager device
- bluetoothMac (string): The MAC of a Bluetooth client
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'pii', 'smDevicesForKey'],
'operation': 'getNetworkPiiSmDevicesForKey'
}
resource = f'/networks/{networkId}/pii/smDevicesForKey'
query_params = ['username', 'email', 'mac', 'serial', 'imei', 'bluetoothMac', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getNetworkPiiSmOwnersForKey(self, networkId: str, **kwargs):
"""
**Given a piece of Personally Identifiable Information (PII), return the Systems Manager owner ID(s) associated with that identifier**
https://developer.cisco.com/meraki/api-v1/#!get-network-pii-sm-owners-for-key
- networkId (string): (required)
- username (string): The username of a Systems Manager user
- email (string): The email of a network user account or a Systems Manager device
- mac (string): The MAC of a network client device or a Systems Manager device
- serial (string): The serial of a Systems Manager device
- imei (string): The IMEI of a Systems Manager device
- bluetoothMac (string): The MAC of a Bluetooth client
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'pii', 'smOwnersForKey'],
'operation': 'getNetworkPiiSmOwnersForKey'
}
resource = f'/networks/{networkId}/pii/smOwnersForKey'
query_params = ['username', 'email', 'mac', 'serial', 'imei', 'bluetoothMac', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getNetworkSettings(self, networkId: str):
"""
**Return the settings for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-settings
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'settings'],
'operation': 'getNetworkSettings'
}
resource = f'/networks/{networkId}/settings'
return self._session.get(metadata, resource)
def updateNetworkSettings(self, networkId: str, **kwargs):
"""
**Update the settings for a network**
https://developer.cisco.com/meraki/api-v1/#!update-network-settings
- networkId (string): (required)
- localStatusPageEnabled (boolean): Enables / disables the local device status pages (<a target='_blank' href='http://my.meraki.com/'>my.meraki.com, </a><a target='_blank' href='http://ap.meraki.com/'>ap.meraki.com, </a><a target='_blank' href='http://switch.meraki.com/'>switch.meraki.com, </a><a target='_blank' href='http://wired.meraki.com/'>wired.meraki.com</a>). Optional (defaults to false)
- remoteStatusPageEnabled (boolean): Enables / disables access to the device status page (<a target='_blank'>http://[device's LAN IP])</a>. Optional. Can only be set if localStatusPageEnabled is set to true
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'settings'],
'operation': 'updateNetworkSettings'
}
resource = f'/networks/{networkId}/settings'
body_params = ['localStatusPageEnabled', 'remoteStatusPageEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSnmp(self, networkId: str):
"""
**Return the SNMP settings for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-snmp
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'snmp'],
'operation': 'getNetworkSnmp'
}
resource = f'/networks/{networkId}/snmp'
return self._session.get(metadata, resource)
def updateNetworkSnmp(self, networkId: str, **kwargs):
"""
**Update the SNMP settings for a network**
https://developer.cisco.com/meraki/api-v1/#!update-network-snmp
- networkId (string): (required)
- access (string): The type of SNMP access. Can be one of 'none' (disabled), 'community' (V1/V2c), or 'users' (V3).
- communityString (string): The SNMP community string. Only relevant if 'access' is set to 'community'.
- users (array): The list of SNMP users. Only relevant if 'access' is set to 'users'.
"""
kwargs.update(locals())
if 'access' in kwargs:
options = ['none', 'community', 'users']
assert kwargs['access'] in options, f'''"access" cannot be "{kwargs['access']}", & must be set to one of: {options}'''
metadata = {
'tags': ['networks', 'configure', 'snmp'],
'operation': 'updateNetworkSnmp'
}
resource = f'/networks/{networkId}/snmp'
body_params = ['access', 'communityString', 'users', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSplashLoginAttempts(self, networkId: str, **kwargs):
"""
**List the splash login attempts for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-splash-login-attempts
- networkId (string): (required)
- ssidNumber (integer): Only return the login attempts for the specified SSID
- loginIdentifier (string): The username, email, or phone number used during login
- timespan (integer): The timespan, in seconds, for the login attempts. The period will be from [timespan] seconds ago until now. The maximum timespan is 3 months
"""
kwargs.update(locals())
if 'ssidNumber' in kwargs:
options = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
assert kwargs['ssidNumber'] in options, f'''"ssidNumber" cannot be "{kwargs['ssidNumber']}", & must be set to one of: {options}'''
metadata = {
'tags': ['networks', 'monitor', 'splashLoginAttempts'],
'operation': 'getNetworkSplashLoginAttempts'
}
resource = f'/networks/{networkId}/splashLoginAttempts'
query_params = ['ssidNumber', 'loginIdentifier', 'timespan', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def splitNetwork(self, networkId: str):
"""
**Split a combined network into individual networks for each type of device**
https://developer.cisco.com/meraki/api-v1/#!split-network
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure'],
'operation': 'splitNetwork'
}
resource = f'/networks/{networkId}/split'
return self._session.post(metadata, resource)
def getNetworkSyslogServers(self, networkId: str):
"""
**List the syslog servers for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-syslog-servers
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'syslogServers'],
'operation': 'getNetworkSyslogServers'
}
resource = f'/networks/{networkId}/syslogServers'
return self._session.get(metadata, resource)
def updateNetworkSyslogServers(self, networkId: str, servers: list):
"""
**Update the syslog servers for a network**
https://developer.cisco.com/meraki/api-v1/#!update-network-syslog-servers
- networkId (string): (required)
- servers (array): A list of the syslog servers for this network
"""
kwargs = locals()
metadata = {
'tags': ['networks', 'configure', 'syslogServers'],
'operation': 'updateNetworkSyslogServers'
}
resource = f'/networks/{networkId}/syslogServers'
body_params = ['servers', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkTraffic(self, networkId: str, **kwargs):
"""
**Return the traffic analysis data for this network**
https://developer.cisco.com/meraki/api-v1/#!get-network-traffic
- networkId (string): (required)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 30 days from today.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameter t0. The value must be in seconds and be less than or equal to 30 days.
- deviceType (string): Filter the data by device type: 'combined', 'wireless', 'switch' or 'appliance'. Defaults to 'combined'. When using 'combined', for each rule the data will come from the device type with the most usage.
"""
kwargs.update(locals())
if 'deviceType' in kwargs:
options = ['combined', 'wireless', 'switch', 'appliance']
assert kwargs['deviceType'] in options, f'''"deviceType" cannot be "{kwargs['deviceType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['networks', 'monitor', 'traffic'],
'operation': 'getNetworkTraffic'
}
resource = f'/networks/{networkId}/traffic'
query_params = ['t0', 'timespan', 'deviceType', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getNetworkTrafficAnalysis(self, networkId: str):
"""
**Return the traffic analysis settings for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-traffic-analysis
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'trafficAnalysis'],
'operation': 'getNetworkTrafficAnalysis'
}
resource = f'/networks/{networkId}/trafficAnalysis'
return self._session.get(metadata, resource)
def updateNetworkTrafficAnalysis(self, networkId: str, **kwargs):
"""
**Update the traffic analysis settings for a network**
https://developer.cisco.com/meraki/api-v1/#!update-network-traffic-analysis
- networkId (string): (required)
- mode (string): The traffic analysis mode for the network. Can be one of 'disabled' (do not collect traffic types),
'basic' (collect generic traffic categories), or 'detailed' (collect destination hostnames).
- customPieChartItems (array): The list of items that make up the custom pie chart for traffic reporting.
"""
kwargs.update(locals())
if 'mode' in kwargs:
options = ['disabled', 'basic', 'detailed']
assert kwargs['mode'] in options, f'''"mode" cannot be "{kwargs['mode']}", & must be set to one of: {options}'''
metadata = {
'tags': ['networks', 'configure', 'trafficAnalysis'],
'operation': 'updateNetworkTrafficAnalysis'
}
resource = f'/networks/{networkId}/trafficAnalysis'
body_params = ['mode', 'customPieChartItems', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkTrafficShapingApplicationCategories(self, networkId: str):
"""
**Returns the application categories for traffic shaping rules.**
https://developer.cisco.com/meraki/api-v1/#!get-network-traffic-shaping-application-categories
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'trafficShaping', 'applicationCategories'],
'operation': 'getNetworkTrafficShapingApplicationCategories'
}
resource = f'/networks/{networkId}/trafficShaping/applicationCategories'
return self._session.get(metadata, resource)
def getNetworkTrafficShapingDscpTaggingOptions(self, networkId: str):
"""
**Returns the available DSCP tagging options for your traffic shaping rules.**
https://developer.cisco.com/meraki/api-v1/#!get-network-traffic-shaping-dscp-tagging-options
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'trafficShaping', 'dscpTaggingOptions'],
'operation': 'getNetworkTrafficShapingDscpTaggingOptions'
}
resource = f'/networks/{networkId}/trafficShaping/dscpTaggingOptions'
return self._session.get(metadata, resource)
def unbindNetwork(self, networkId: str):
"""
**Unbind a network from a template.**
https://developer.cisco.com/meraki/api-v1/#!unbind-network
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure'],
'operation': 'unbindNetwork'
}
resource = f'/networks/{networkId}/unbind'
return self._session.post(metadata, resource)
def getNetworkWebhooksHttpServers(self, networkId: str):
"""
**List the HTTP servers for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-webhooks-http-servers
- networkId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'webhooks', 'httpServers'],
'operation': 'getNetworkWebhooksHttpServers'
}
resource = f'/networks/{networkId}/webhooks/httpServers'
return self._session.get(metadata, resource)
def createNetworkWebhooksHttpServer(self, networkId: str, name: str, url: str, **kwargs):
"""
**Add an HTTP server to a network**
https://developer.cisco.com/meraki/api-v1/#!create-network-webhooks-http-server
- networkId (string): (required)
- name (string): A name for easy reference to the HTTP server
- url (string): The URL of the HTTP server
- sharedSecret (string): A shared secret that will be included in POSTs sent to the HTTP server. This secret can be used to verify that the request was sent by Meraki.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'webhooks', 'httpServers'],
'operation': 'createNetworkWebhooksHttpServer'
}
resource = f'/networks/{networkId}/webhooks/httpServers'
body_params = ['name', 'url', 'sharedSecret', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkWebhooksHttpServer(self, networkId: str, httpServerId: str):
"""
**Return an HTTP server for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-webhooks-http-server
- networkId (string): (required)
- httpServerId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'webhooks', 'httpServers'],
'operation': 'getNetworkWebhooksHttpServer'
}
resource = f'/networks/{networkId}/webhooks/httpServers/{httpServerId}'
return self._session.get(metadata, resource)
def updateNetworkWebhooksHttpServer(self, networkId: str, httpServerId: str, **kwargs):
"""
**Update an HTTP server**
https://developer.cisco.com/meraki/api-v1/#!update-network-webhooks-http-server
- networkId (string): (required)
- httpServerId (string): (required)
- name (string): A name for easy reference to the HTTP server
- url (string): The URL of the HTTP server
- sharedSecret (string): A shared secret that will be included in POSTs sent to the HTTP server. This secret can be used to verify that the request was sent by Meraki.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'webhooks', 'httpServers'],
'operation': 'updateNetworkWebhooksHttpServer'
}
resource = f'/networks/{networkId}/webhooks/httpServers/{httpServerId}'
body_params = ['name', 'url', 'sharedSecret', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkWebhooksHttpServer(self, networkId: str, httpServerId: str):
"""
**Delete an HTTP server from a network**
https://developer.cisco.com/meraki/api-v1/#!delete-network-webhooks-http-server
- networkId (string): (required)
- httpServerId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'webhooks', 'httpServers'],
'operation': 'deleteNetworkWebhooksHttpServer'
}
resource = f'/networks/{networkId}/webhooks/httpServers/{httpServerId}'
return self._session.delete(metadata, resource)
def createNetworkWebhooksWebhookTest(self, networkId: str, url: str, **kwargs):
"""
**Send a test webhook for a network**
https://developer.cisco.com/meraki/api-v1/#!create-network-webhooks-webhook-test
- networkId (string): (required)
- url (string): The URL where the test webhook will be sent
- sharedSecret (string): The shared secret the test webhook will send. Optional. Defaults to an empty string.
"""
kwargs.update(locals())
metadata = {
'tags': ['networks', 'configure', 'webhooks', 'webhookTests'],
'operation': 'createNetworkWebhooksWebhookTest'
}
resource = f'/networks/{networkId}/webhooks/webhookTests'
body_params = ['url', 'sharedSecret', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkWebhooksWebhookTest(self, networkId: str, webhookTestId: str):
"""
**Return the status of a webhook test for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-webhooks-webhook-test
- networkId (string): (required)
- webhookTestId (string): (required)
"""
metadata = {
'tags': ['networks', 'configure', 'webhooks', 'webhookTests'],
'operation': 'getNetworkWebhooksWebhookTest'
}
resource = f'/networks/{networkId}/webhooks/webhookTests/{webhookTestId}'
return self._session.get(metadata, resource)
|
1673172
|
import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import os
def getSVHN(batch_size, test_batch_size, img_size, **kwargs):
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building SVHN data loader with {} workers".format(num_workers))
def target_transform(target):
new_target = target - 1
if new_target == -1:
new_target = 9
return new_target
ds = []
train_loader = DataLoader(
datasets.SVHN(
root='../data/svhn', split='train', download=True,
transform=transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
]),
target_transform=target_transform,
),
batch_size=batch_size, shuffle=True, drop_last=True, **kwargs)
ds.append(train_loader)
test_loader = DataLoader(
datasets.SVHN(
root='../data/svhn', split='test', download=True,
transform=transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
]),
target_transform=target_transform
),
batch_size=batch_size, shuffle=False, drop_last=True, **kwargs)
ds.append(test_loader)
return ds
def getCIFAR10(batch_size, test_batch_size, img_size, **kwargs):
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building CIFAR-10 data loader with {} workers".format(num_workers))
ds = []
train_loader = DataLoader(
datasets.CIFAR10(
root='../data/cifar10', train=True, download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.Resize(img_size),
transforms.ToTensor(),
])),
batch_size=batch_size, shuffle=True, drop_last=True, **kwargs)
ds.append(train_loader)
test_loader = DataLoader(
datasets.CIFAR10(
root='../data/cifar10', train=False, download=True,
transform=transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
])),
batch_size=batch_size, shuffle=False, drop_last=True, **kwargs)
ds.append(test_loader)
return ds
def getCIFAR100(batch_size, test_batch_size, img_size, **kwargs):
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building CIFAR-100 data loader with {} workers".format(num_workers))
ds = []
train_loader = DataLoader(
datasets.CIFAR100(
root='../data/cifar100', train=True, download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.Resize(img_size),
transforms.ToTensor(),
])),
batch_size=batch_size, shuffle=True, drop_last=True, **kwargs)
ds.append(train_loader)
test_loader = DataLoader(
datasets.CIFAR100(
root='../data/cifar100', train=False, download=True,
transform=transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
])),
batch_size=batch_size, shuffle=False, drop_last=True, **kwargs)
ds.append(test_loader)
return ds
def getDataSet(data_type, batch_size,test_batch_size, imageSize):
if data_type == 'cifar10':
train_loader, test_loader = getCIFAR10(batch_size, test_batch_size, imageSize)
elif data_type == 'svhn':
train_loader, test_loader = getSVHN(batch_size, test_batch_size, imageSize)
elif data_type == 'cifar100':
train_loader, test_loader = getCIFAR100(batch_size, test_batch_size, imageSize)
return train_loader, test_loader
if __name__ == '__main__':
train_loader, test_loader = getDataSet('cifar10', 256, 1000, 28)
for batch_idx, (inputs, targets) in enumerate(test_loader):
print(inputs.shape)
print(targets.shape)
|
1673188
|
import typing
import vel.api as api
from vel.callbacks.time_tracker import TimeTracker
class SimpleTrainCommand:
""" Very simple training command - just run the supplied generators """
def __init__(self, epochs: int, model_config: api.ModelConfig, model_factory: api.ModelFactory,
optimizer_factory: api.OptimizerFactory, scheduler_factory: typing.Optional[api.SchedulerFactory],
source: api.Source, storage: api.Storage, callbacks: typing.Optional[typing.List[api.Callback]],
max_grad_norm: typing.Optional[float]):
self.epochs = epochs
self.model_config = model_config
self.model_factory = model_factory
self.optimizer_factory = optimizer_factory
self.scheduler_factory = scheduler_factory
self.source = source
self.storage = storage
self.callbacks = callbacks if callbacks is not None else []
self.max_grad_norm = max_grad_norm
def run(self):
""" Run the command with supplied configuration """
device = self.model_config.torch_device()
learner = api.Learner(device, self.model_factory.instantiate(), self.max_grad_norm)
optimizer = self.optimizer_factory.instantiate(learner.model)
# All callbacks used for learning
callbacks = self.gather_callbacks(optimizer)
# Metrics to track through this training
metrics = learner.metrics()
# Check if training was already started and potentially continue where we left off
training_info = self.resume_training(learner, callbacks, metrics)
training_info.on_train_begin()
if training_info.optimizer_initial_state:
optimizer.load_state_dict(training_info.optimizer_initial_state)
for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.epochs + 1):
epoch_info = api.EpochInfo(
training_info=training_info,
global_epoch_idx=global_epoch_idx,
batches_per_epoch=self.source.train_iterations_per_epoch(),
optimizer=optimizer
)
# Execute learning
learner.run_epoch(epoch_info, self.source)
self.storage.checkpoint(epoch_info, learner.model)
training_info.on_train_end()
return training_info
def gather_callbacks(self, optimizer) -> list:
""" Gather all the callbacks to be used in this training run """
callbacks = [TimeTracker()]
if self.scheduler_factory is not None:
callbacks.append(self.scheduler_factory.instantiate(optimizer))
callbacks.extend(self.callbacks)
callbacks.extend(self.storage.streaming_callbacks())
return callbacks
def resume_training(self, learner, callbacks, metrics) -> api.TrainingInfo:
""" Possibly resume training from a saved state from the storage """
if self.model_config.continue_training:
start_epoch = self.storage.last_epoch_idx()
else:
start_epoch = 0
training_info = api.TrainingInfo(
start_epoch_idx=start_epoch,
run_name=self.model_config.run_name,
metrics=metrics,
callbacks=callbacks
)
if start_epoch == 0:
self.storage.reset(self.model_config.render_configuration())
training_info.initialize()
learner.initialize_training(training_info)
else:
model_state, hidden_state = self.storage.load(training_info)
learner.initialize_training(training_info, model_state, hidden_state)
return training_info
def create(model_config, epochs, optimizer, model, source, storage, scheduler=None, callbacks=None, max_grad_norm=None):
""" Vel factory function """
return SimpleTrainCommand(
epochs=epochs,
model_config=model_config,
model_factory=model,
optimizer_factory=optimizer,
scheduler_factory=scheduler,
source=source,
storage=storage,
callbacks=callbacks,
max_grad_norm=max_grad_norm
)
|
1673227
|
import unittest
import hcl2
from checkov.terraform.checks.resource.azure.AppServiceIdentityProviderEnabled import check
from checkov.common.models.enums import CheckResult
class TestAppServiceIdentityProviderEnabled(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "azurerm_app_service" "example" {
name = "example-app-service"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
app_service_plan_id = azurerm_app_service_plan.example.id
site_config {
dotnet_framework_version = "v4.0"
scm_type = "LocalGit"
}
app_settings = {
"SOME_KEY" = "some-value"
}
connection_string {
name = "Database"
type = "SQLServer"
value = "Server=some-server.mydomain.com;Integrated Security=SSPI"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_app_service']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "azurerm_app_service" "example" {
name = "example-app-service"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
app_service_plan_id = azurerm_app_service_plan.example.id
https_only = true
site_config {
dotnet_framework_version = "v5.0"
scm_type = "someValue"
}
identity {
type = "SystemAssigned"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_app_service']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
1673291
|
import tensorflow as tf
from abcnn import args
class Graph:
def __init__(self, abcnn1=False, abcnn2=False):
self.p = tf.placeholder(dtype=tf.int32, shape=(None, args.seq_length), name='p')
self.h = tf.placeholder(dtype=tf.int32, shape=(None, args.seq_length), name='h')
self.y = tf.placeholder(dtype=tf.int32, shape=None, name='y')
self.keep_prob = tf.placeholder(dtype=tf.float32, name='drop_rate')
self.embedding = tf.get_variable(dtype=tf.float32, shape=(args.vocab_size, args.char_embedding_size),
name='embedding')
self.W0 = tf.get_variable(name="aW",
shape=(args.seq_length + 4, args.char_embedding_size),
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=tf.contrib.layers.l2_regularizer(scale=0.0004))
self.abcnn1 = abcnn1
self.abcnn2 = abcnn2
self.forward()
def dropout(self, x):
return tf.nn.dropout(x, keep_prob=self.keep_prob)
def cos_sim(self, v1, v2):
norm1 = tf.sqrt(tf.reduce_sum(tf.square(v1), axis=1))
norm2 = tf.sqrt(tf.reduce_sum(tf.square(v2), axis=1))
dot_products = tf.reduce_sum(v1 * v2, axis=1, name="cos_sim")
return dot_products / (norm1 * norm2)
def forward(self):
p_embedding = tf.nn.embedding_lookup(self.embedding, self.p)
h_embedding = tf.nn.embedding_lookup(self.embedding, self.h)
p_embedding = tf.expand_dims(p_embedding, axis=-1)
h_embedding = tf.expand_dims(h_embedding, axis=-1)
p_embedding = tf.pad(p_embedding, paddings=[[0, 0], [2, 2], [0, 0], [0, 0]])
h_embedding = tf.pad(h_embedding, paddings=[[0, 0], [2, 2], [0, 0], [0, 0]])
if self.abcnn1:
euclidean = tf.sqrt(tf.reduce_sum(
tf.square(tf.transpose(p_embedding, perm=[0, 2, 1, 3]) - tf.transpose(h_embedding, perm=[0, 2, 3, 1])),
axis=1) + 1e-6)
attention_matrix = 1 / (euclidean + 1)
p_attention = tf.expand_dims(tf.einsum("ijk,kl->ijl", attention_matrix, self.W0), -1)
h_attention = tf.expand_dims(
tf.einsum("ijk,kl->ijl", tf.transpose(attention_matrix, perm=[0, 2, 1]), self.W0), -1)
p_embedding = tf.concat([p_embedding, p_attention], axis=-1)
h_embedding = tf.concat([h_embedding, h_attention], axis=-1)
p = tf.layers.conv2d(p_embedding,
filters=args.cnn1_filters,
kernel_size=(args.filter_width, args.filter_height))
h = tf.layers.conv2d(h_embedding,
filters=args.cnn1_filters,
kernel_size=(args.filter_width, args.filter_height))
p = self.dropout(p)
h = self.dropout(h)
if self.abcnn2:
attention_pool_euclidean = tf.sqrt(
tf.reduce_sum(tf.square(tf.transpose(p, perm=[0, 3, 1, 2]) - tf.transpose(h, perm=[0, 3, 2, 1])),
axis=1))
attention_pool_matrix = 1 / (attention_pool_euclidean + 1)
p_sum = tf.reduce_sum(attention_pool_matrix, axis=2, keep_dims=True)
h_sum = tf.reduce_sum(attention_pool_matrix, axis=1, keep_dims=True)
p = tf.reshape(p, shape=(-1, p.shape[1], p.shape[2] * p.shape[3]))
h = tf.reshape(h, shape=(-1, h.shape[1], h.shape[2] * h.shape[3]))
p = tf.multiply(p, p_sum)
h = tf.multiply(h, tf.matrix_transpose(h_sum))
else:
p = tf.reshape(p, shape=(-1, p.shape[1], p.shape[2] * p.shape[3]))
h = tf.reshape(h, shape=(-1, h.shape[1], h.shape[2] * h.shape[3]))
p = tf.expand_dims(p, axis=3)
h = tf.expand_dims(h, axis=3)
p = tf.layers.conv2d(p,
filters=args.cnn2_filters,
kernel_size=(args.filter_width, args.cnn1_filters))
h = tf.layers.conv2d(h,
filters=args.cnn2_filters,
kernel_size=(args.filter_width, args.cnn1_filters))
p = self.dropout(p)
h = self.dropout(h)
p_all = tf.reduce_mean(p, axis=1)
h_all = tf.reduce_mean(h, axis=1)
x = tf.concat((p_all, h_all), axis=2)
x = tf.reshape(x, shape=(-1, x.shape[1] * x.shape[2]))
out = tf.layers.dense(x, 50)
logits = tf.layers.dense(out, 2)
self.train(logits)
def train(self, logits):
y = tf.one_hot(self.y, args.class_size)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)
self.loss = tf.reduce_mean(loss)
self.train_op = tf.train.AdamOptimizer(args.learning_rate).minimize(self.loss)
prediction = tf.argmax(logits, axis=1)
correct_prediction = tf.equal(tf.cast(prediction, tf.int32), self.y)
self.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
|
1673302
|
import pykd
import re
LOG_FILE = open(r'C:\local\tmp\ttt-common-bp-go.txt', 'w')
def pykdLog2File(logObj, fileObj):
for line in str(logObj).split('\n'):
fileObj.write(line+'\n')
def pykdLog(log):
print(log)
pykdLog2File(log, LOG_FILE)
pykd.dbgCommand(r'bc *')
pykd.dbgCommand(r'bp 08bca3d8')
pykd.dbgCommand(r'bp 2c6d2310')
#pykd.dbgCommand(r'bp 72debcf8')
ret = pykd.dbgCommand(r'bl')
pykdLog(ret)
pykd.dbgCommand(r'bd *')
pykd.dbgCommand(r'g-')
pykd.dbgCommand(r'be *')
while True:
ret = pykd.dbgCommand(r'g;.time;kL;')
pykdLog(ret)
if 'TTT Replay: End of trace reached.' in ret:
break
#for line in ret.split('\n'):
# if '0038294b' in line:
# pykdLog(line)
#if '0038294b 00000080' in ret:
# pykdLog(ret)
# ret = pykd.dbgCommand(r'.time')
# pykdLog(ret)
# break
LOG_FILE.flush()
pykd.dbgCommand(r'bd *')
pykd.dbgCommand(r'g')
pykd.dbgCommand(r'be *')
while True:
ret = pykd.dbgCommand(r'g-;.time;kL;')
pykdLog(ret)
if 'TTT Replay: Start of trace reached.' in ret:
break
LOG_FILE.flush()
LOG_FILE.close()
|
1673317
|
import torch
from torch import Tensor
from typing import List, Union
def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor:
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
# TODO add back the assert
# assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor:
concat_boxes = _cat([b for b in boxes], dim=0)
temp = []
for i, b in enumerate(boxes):
temp.append(torch.full_like(b[:, :1], i))
ids = _cat(temp, dim=0)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]):
if isinstance(boxes, (list, tuple)):
for _tensor in boxes:
assert _tensor.size(1) == 4, \
'The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]'
elif isinstance(boxes, torch.Tensor):
assert boxes.size(1) == 5, 'The boxes tensor shape is not correct as Tensor[K, 5]'
else:
assert False, 'boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]'
return
|
1673355
|
from _kratos.formal import remove_async_reset as _remove_async_reset
from .passes import verilog
import tempfile
import os
import shutil
import subprocess
def output_btor(generator, filename, remove_async_reset=True, yosys_path="",
quite=False, **kargs):
# check yosys
if yosys_path == "" or not os.path.isfile(yosys_path):
# need to figure out yosys by ourselves
yosys_path = shutil.which("yosys")
assert yosys_path, "Yosys not found"
# remove async reset if necessary
if remove_async_reset:
_remove_async_reset(generator.internal_generator)
# abs path
filename = os.path.abspath(filename)
# I think most of the kratos semantics are supported by yosys
# if not, you can also turn on the flag to use sv2v to convert
with tempfile.TemporaryDirectory() as temp:
verilog_filename = os.path.join(temp, "verilog.sv")
verilog(generator, filename=verilog_filename, **kargs)
# need to output the yosys file
yosys_file = os.path.join(temp, "convert.ys")
with open(yosys_file, "w+") as f:
f.write("read_verilog -formal -sv {0}\n".format(verilog_filename))
f.write("prep -top {0}\n".format(generator.name))
f.write("flatten\n")
f.write("write_btor {0}\n".format(filename))
# call yosys
extra_args = ["-s"]
if quite:
extra_args = ["-q"] + extra_args
subprocess.check_call([yosys_path] + extra_args + [yosys_file])
|
1673366
|
import pickle
from werkzeug.contrib.cache import (BaseCache, NullCache, SimpleCache, MemcachedCache,
GAEMemcachedCache, FileSystemCache)
from ._compat import range_type
class SASLMemcachedCache(MemcachedCache):
def __init__(self, servers=None, default_timeout=300, key_prefix=None,
username=None, password=None):
BaseCache.__init__(self, default_timeout)
if servers is None:
servers = ['127.0.0.1:11211']
import pylibmc
self._client = pylibmc.Client(servers,
username=username,
password=password,
binary=True)
self.key_prefix = key_prefix
def null(app, config, args, kwargs):
return NullCache()
def simple(app, config, args, kwargs):
kwargs.update(dict(threshold=config['CACHE_THRESHOLD']))
return SimpleCache(*args, **kwargs)
def memcached(app, config, args, kwargs):
args.append(config['CACHE_MEMCACHED_SERVERS'])
kwargs.update(dict(key_prefix=config['CACHE_KEY_PREFIX']))
return MemcachedCache(*args, **kwargs)
def saslmemcached(app, config, args, kwargs):
args.append(config['CACHE_MEMCACHED_SERVERS'])
kwargs.update(dict(username=config['CACHE_MEMCACHED_USERNAME'],
password=config['CACHE_MEMCACHED_PASSWORD'],
key_prefix=config['CACHE_KEY_PREFIX']))
return SASLMemcachedCache(*args, **kwargs)
def gaememcached(app, config, args, kwargs):
kwargs.update(dict(key_prefix=config['CACHE_KEY_PREFIX']))
return GAEMemcachedCache(*args, **kwargs)
def filesystem(app, config, args, kwargs):
args.insert(0, config['CACHE_DIR'])
kwargs.update(dict(threshold=config['CACHE_THRESHOLD']))
return FileSystemCache(*args, **kwargs)
# RedisCache is supported since Werkzeug 0.7.
try:
from werkzeug.contrib.cache import RedisCache
from redis import from_url as redis_from_url
except ImportError:
pass
else:
def redis(app, config, args, kwargs):
kwargs.update(dict(
host=config.get('CACHE_REDIS_HOST', 'localhost'),
port=config.get('CACHE_REDIS_PORT', 6379),
))
password = config.get('CACHE_REDIS_PASSWORD')
if password:
kwargs['password'] = password
key_prefix = config.get('CACHE_KEY_PREFIX')
if key_prefix:
kwargs['key_prefix'] = key_prefix
db_number = config.get('CACHE_REDIS_DB')
if db_number:
kwargs['db'] = db_number
redis_url = config.get('CACHE_REDIS_URL')
if redis_url:
kwargs['host'] = redis_from_url(
redis_url,
db=kwargs.pop('db', None),
)
return RedisCache(*args, **kwargs)
class SpreadSASLMemcachedCache(SASLMemcachedCache):
"""
Simple Subclass of SASLMemcached client that spread value across multiple
key is they are bigger than a given treshhold.
Spreading require using pickle to store the value, wich can significantly
impact the performances.
"""
def __init__(self, *args, **kwargs):
"""
chunksize : (int) max size in bytes of chunk stored in memcached
"""
self.chunksize = kwargs.get('chunksize', 950000)
self.maxchunk = kwargs.get('maxchunk', 32)
super(SpreadSASLMemcachedCache, self).__init__(*args, **kwargs)
def delete(self, key):
for skey in self._genkeys(key):
super(SpreadSASLMemcachedCache, self).delete(skey)
def set(self, key, value, timeout=None, chunk=True):
"""set a value in cache, potentially spreding it across multiple key.
chunk : (Bool) if set to false, does not try to spread across multiple key.
this can be faster, but will fail if value is bigger than chunks,
and require you to get back the object by specifying that it is not spread.
"""
if chunk:
return self._set(key, value, timeout=timeout)
else:
return super(SpreadSASLMemcachedCache, self).set(key, value, timeout=timeout)
def _set(self, key, value, timeout=None):
# pickling/unpickling add an overhed,
# I didn't found a good way to avoid pickling/unpickling if
# key is smaller than chunksize, because in case or <werkzeug.requests>
# getting the length consume the data iterator.
serialized = pickle.dumps(value, 2)
values = {}
len_ser = len(serialized)
chks = range_type(0, len_ser, self.chunksize)
if len(chks) > self.maxchunk:
raise ValueError('Cannot store value in less than %s keys'%(self.maxchunk))
for i in chks:
values['%s.%s' % (key, i//self.chunksize)] = serialized[i : i+self.chunksize]
super(SpreadSASLMemcachedCache, self).set_many(values, timeout)
def get(self, key, chunk=True):
"""get a value in cache, potentially spreded it across multiple key.
chunk : (Bool) if set to false, get a value set with set(..., chunk=False)
"""
if chunk :
return self._get(key)
else :
return super(SpreadSASLMemcachedCache, self).get(key)
def _genkeys(self, key):
return ['%s.%s' % (key, i) for i in range_type(self.maxchunk)]
def _get(self, key):
to_get = ['%s.%s' % (key, i) for i in range_type(self.maxchunk)]
result = super(SpreadSASLMemcachedCache, self).get_many( *to_get)
serialized = ''.join([v for v in result if v is not None])
if not serialized:
return None
return pickle.loads(serialized)
def spreadsaslmemcachedcache(app, config, args, kwargs):
args.append(config['CACHE_MEMCACHED_SERVERS'])
kwargs.update(dict(username=config.get('CACHE_MEMCACHED_USERNAME'),
password=config.get('CACHE_MEMCACHED_PASSWORD'),
key_prefix=config.get('CACHE_KEY_PREFIX')
))
return SpreadSASLMemcachedCache(*args, **kwargs)
|
1673484
|
import pytest
import json
from collections import OrderedDict
import time
import tox
from redis import Redis
# from RLTest import Env
from pymongo import MongoClient
from tests import find_package
@pytest.mark.mongo
class TestMongoJSON:
def teardown_method(self):
self.dbconn.drop_database(self.DBNAME)
self.env.flushall()
@classmethod
def setup_class(cls):
cls.env = Redis(decode_responses=True)
pkg = find_package()
# connection info
r = tox.config.parseconfig(open("tox.ini").read())
docker = r._docker_container_configs["mongo"]["environment"]
dbuser = docker["MONGO_INITDB_ROOT_USERNAME"]
dbpasswd = docker["MONGO_INITDB_ROOT_PASSWORD"]
db = docker["MONGO_DB"]
con = "mongodb://{user}:{password}@172.17.0.1:27017/{db}?authSource=admin".format(
user=dbuser,
password=<PASSWORD>,
db=db,
)
script = """
from rgsync import RGJSONWriteBehind, RGJSONWriteThrough
from rgsync.Connectors import MongoConnector, MongoConnection
connection = MongoConnection('%s', '%s', '172.17.0.1:27017/%s')
db = '%s'
jConnector = MongoConnector(connection, db, 'persons', 'person_id')
dataKey = 'gears'
RGJSONWriteBehind(GB, keysPrefix='person',
connector=jConnector, name='PersonsWriteBehind',
version='99.99.99', dataKey=dataKey)
RGJSONWriteThrough(GB, keysPrefix='__', connector=jConnector,
name='JSONWriteThrough', version='99.99.99', dataKey=dataKey)
""" % (dbuser, dbpasswd, db, db)
cls.env.execute_command('RG.PYEXECUTE', script, 'REQUIREMENTS', pkg, 'pymongo')
e = MongoClient(con)
# # tables are only created upon data use - so this is our equivalent
# # for mongo
assert 'version' in e.server_info().keys()
cls.dbconn = e
cls.DBNAME = db
def _sampledata(self):
d = {'some': 'value',
'and another': ['set', 'of', 'values']
}
return d
def _base_writebehind_validation(self):
self.env.execute_command('json.set', 'person:1', '.', json.dumps(self._sampledata()))
result = list(self.dbconn[self.DBNAME]['persons'].find())
while len(result) == 0:
time.sleep(0.1)
result = list(self.dbconn[self.DBNAME]['persons'].find())
assert 'gears' in result[0].keys()
assert '1' == result[0]['person_id']
assert 'value' == result[0]['gears']['some']
assert ['set', 'of', 'values'] == result[0]['gears']['and another']
def testSimpleWriteBehind(self):
self._base_writebehind_validation()
self.env.execute_command('json.del', 'person:1')
result = list(self.dbconn[self.DBNAME]['persons'].find())
count = 0
while len(result) != 0:
time.sleep(0.1)
result = list(self.dbconn[self.DBNAME]['persons'].find())
if count == 10:
assert False == True, "Failed deleting data from mongo"
break
count += 1
def testStraightDelete(self):
self._base_writebehind_validation()
self.env.execute_command('del', 'person:1')
result = list(self.dbconn[self.DBNAME]['persons'].find())
count = 0
while len(result) != 0:
time.sleep(0.1)
result = list(self.dbconn[self.DBNAME]['persons'].find())
if count == 10:
assert False == True, "Failed deleting data from mongo"
break
count += 1
def testSimpleWriteThroughPartialUpdate(self):
self._base_writebehind_validation()
result = list(self.dbconn[self.DBNAME]['persons'].find())
ud = {'some': 'not a value!'}
self.env.execute_command('json.set', 'person:1', '.', json.dumps(ud))
# need replication time
result = list(self.dbconn[self.DBNAME]['persons'].find())
count = 0
while count != 10:
time.sleep(0.1)
result = list(self.dbconn[self.DBNAME]['persons'].find())
if result[0]['gears']['some'] == 'not a value!':
break
else:
count += 1
if count == 10:
assert False == True, "Failed to update sub value!"
assert result[0]['person_id'] == '1'
def testUpdatingWithFieldsNotInMap(self):
self._base_writebehind_validation()
result = list(self.dbconn[self.DBNAME]['persons'].find())
ud = {'somerandomthing': 'this too is random!'}
self.env.execute_command('json.set', 'person:1', '.', json.dumps(ud))
# need replication time
result = list(self.dbconn[self.DBNAME]['persons'].find())
count = 0
while count != 10:
time.sleep(0.1)
result = list(self.dbconn[self.DBNAME]['persons'].find())
if 'somerandomthing' not in result[0]['gears'].keys():
count += 1
else:
assert result[0]['gears']['somerandomthing'] == 'this too is random!'
break
if count == 10:
assert False == True, "Failed to update sub value!"
assert result[0]['person_id'] == '1'
|
1673577
|
from __future__ import print_function
from airflow.operators import SparkSubmitOperator
from airflow.models import DAG
from datetime import datetime, timedelta
import os
DAG_ID = os.path.basename(__file__).replace(".pyc", "").replace(".py", "")
APPLICATION_FILE_PATH = "~/spark-test/spark_test.R"
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'retries': 0,
}
dag = DAG(DAG_ID, default_args=default_args, schedule_interval=None, start_date=(datetime.now() - timedelta(minutes=1)))
dummy = SparkSubmitOperator(
task_id='spark-submit-r',
application_file=APPLICATION_FILE_PATH,
dag=dag)
|
1673608
|
import numpy as np
import h5py
import time
import logging
from utilities import calculate_scalar, scale
import config
class DataGenerator(object):
def __init__(self, hdf5_path, batch_size, holdout_fold, seed=1234):
"""
Inputs:
hdf5_path: str
batch_size: int
holdout_fold: int
seed: int, random seed
"""
self.batch_size = batch_size
self.holdout_fold = holdout_fold
self.random_state = np.random.RandomState(seed)
self.validate_random_state = np.random.RandomState(0)
# Load data
load_time = time.time()
hf = h5py.File(hdf5_path, 'r')
self.audio_names = np.array([s.decode() for s in hf['audio_name'][:]])
self.x = hf['mixture_logmel'][:]
self.y = hf['target'][:]
self.folds = hf['fold'][:]
hf.close()
logging.info('Loading data time: {:.3f} s'.format(
time.time() - load_time))
# Split data to training and validation
self.train_audio_indexes, self.validate_audio_indexes = \
self.get_train_validate_audio_indexes()
# Calculate scalar
(self.mean, self.std) = calculate_scalar(
self.x[self.train_audio_indexes])
def get_train_validate_audio_indexes(self):
audio_indexes = np.arange(len(self.audio_names))
train_audio_indexes = audio_indexes[self.folds != self.holdout_fold]
validate_audio_indexes = audio_indexes[self.folds == self.holdout_fold]
return train_audio_indexes, validate_audio_indexes
def generate_train(self):
"""Generate mini-batch data for training.
Returns:
batch_x: (batch_size, seq_len, freq_bins)
batch_y: (batch_size,)
"""
batch_size = self.batch_size
audio_indexes = np.array(self.train_audio_indexes)
audios_num = len(audio_indexes)
self.random_state.shuffle(audio_indexes)
iteration = 0
pointer = 0
while True:
# Reset pointer
if pointer >= audios_num:
pointer = 0
self.random_state.shuffle(audio_indexes)
# Get batch indexes
batch_audio_indexes = audio_indexes[pointer: pointer + batch_size]
pointer += batch_size
iteration += 1
batch_x = self.x[batch_audio_indexes]
batch_y = self.y[batch_audio_indexes]
# Transform data
batch_x = self.transform(batch_x)
batch_y = batch_y.astype(np.float32)
yield batch_x, batch_y
def generate_validate(self, data_type, shuffle, max_iteration=None):
"""Generate mini-batch data for evaluation.
Args:
data_type: 'train' | 'validate'
max_iteration: int, maximum iteration for validation
shuffle: bool
Returns:
batch_x: (batch_size, seq_len, freq_bins)
batch_y: (batch_size,)
batch_audio_names: (batch_size,)
"""
batch_size = self.batch_size
if data_type == 'train':
audio_indexes = np.array(self.train_audio_indexes)
elif data_type == 'validate':
audio_indexes = np.array(self.validate_audio_indexes)
else:
raise Exception('Invalid data_type!')
if shuffle:
self.validate_random_state.shuffle(audio_indexes)
audios_num = len(audio_indexes)
iteration = 0
pointer = 0
while True:
if iteration == max_iteration:
break
# Reset pointer
if pointer >= audios_num:
break
# Get batch indexes
batch_audio_indexes = audio_indexes[
pointer: pointer + batch_size]
pointer += batch_size
iteration += 1
batch_x = self.x[batch_audio_indexes]
batch_y = self.y[batch_audio_indexes]
batch_audio_names = self.audio_names[batch_audio_indexes]
# Transform data
batch_x = self.transform(batch_x)
batch_y = batch_y.astype(np.float32)
yield batch_x, batch_y, batch_audio_names
def transform(self, x):
"""Transform data.
Args:
x: (batch_x, seq_len, freq_bins) | (seq_len, freq_bins)
Returns:
Transformed data.
"""
return scale(x, self.mean, self.std)
class InferenceDataGenerator(DataGenerator):
def __init__(self, hdf5_path, batch_size, holdout_fold):
"""Data generator for test data.
Inputs:
dev_hdf5_path: str
test_hdf5_path: str
batch_size: int
"""
super(InferenceDataGenerator, self).__init__(
hdf5_path=hdf5_path,
batch_size=batch_size,
holdout_fold=holdout_fold)
# Load stft data
load_time = time.time()
hf = h5py.File(hdf5_path, 'r')
self.hf = hf
logging.info('Loading data time: {:.3f} s'.format(
time.time() - load_time))
def generate_test(self):
audios_num = len(self.test_x)
audio_indexes = np.arange(audios_num)
batch_size = self.batch_size
pointer = 0
while True:
# Reset pointer
if pointer >= audios_num:
break
# Get batch indexes
batch_audio_indexes = audio_indexes[pointer: pointer + batch_size]
pointer += batch_size
batch_x = self.test_x[batch_audio_indexes]
batch_audio_names = self.test_audio_names[batch_audio_indexes]
# Transform data
batch_x = self.transform(batch_x)
yield batch_x, batch_audio_names
def get_events_scene_mixture_stft(self, audio_name):
index = np.where(self.audio_names == audio_name)[0][0]
events_stft = self.hf['events_stft'][index]
scene_stft = self.hf['scene_stft'][index]
mixture_stft = self.hf['mixture_stft'][index]
return events_stft, scene_stft, mixture_stft
|
1673642
|
from __future__ import absolute_import
import logging
class Result:
'''
'lik0' : null likelihood
'nLL' : negative log-likelihood
'sigma2' : the model variance sigma^2
'beta' : [D*1] array of fixed effects weights beta
'h2' : mixture weight between Covariance and noise
'REML' : True: REML was computed, False: ML was computed
'a2' : mixture weight between K0 and K1
'lik1' : alternative likelihood
'nLL' : negative log-likelihood
'sigma2' : the model variance sigma^2
'beta' : [D*1] array of fixed effects weights beta
'h2' : mixture weight between Covariance and noise
'REML' : True: REML was computed, False: ML was computed
'a2' : mixture weight between K0 and K1
'nexclude' : array of the number of excluded snps from null
'test' : "lrt", "sc_davies", sc_..."
'''
def __init__(self,setname,iset,iperm,ichrm,iposrange):
self.setname = setname
self.iset = iset
self.iperm = iperm
self.ichrm = ichrm
self.iposrange = iposrange
# computing observed lrt statistics and a2 parameters
@property
def stat(self):
return self.test['stat']
#return 2 * (self.lik0['nLL'] - self.lik1['nLL'])
@property
def a2(self):
return self.test['lik1']['a2']
@property
def h2(self):
try:
return self.test['lik1']['h2'][0]
except:
logging.info("found a scalar h2")
return self.test['lik1']['h2']
@property
def h2_1(self):
try:
return self.test['lik1']['h2_1'][0]
except:
logging.info("found a scalar h2_1")
return self.test['lik1']['h2_1']
@property
def type(self):
return self.test['type']
@property
def pv(self):
return self.test['pv']
@property
def alteqnull(self):
if 'alteqnull' in self.test:
return self.test['alteqnull']
return None
@property
def lik0Details(self):
return self.test['lik0']
@property
def lik1Details(self):
return self.test['lik1']
|
1673646
|
import numpy as np
from mayavi import mlab
def sectional2nodal(x):
return np.r_[x[0], np.convolve(x, [0.5, 0.5], "valid"), x[-1]]
def nodal2sectional(x):
return 0.5 * (x[:-1] + x[1:])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
class Visualize(object):
def __init__(self, prob):
prob.run_model()
self.prob = prob
self.fig = None
def draw_spar(self, fname="spar.png"):
self.init_figure()
self.draw_ocean()
self.draw_mooring(self.prob["mooring_plot_matrix"])
zcut = 1.0 + self.prob["main_freeboard"]
self.draw_pontoons(self.prob["plot_matrix"], 0.5 * self.prob["fairlead_support_outer_diameter"], zcut)
self.draw_column(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"],
self.prob["main.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["main.wall_thickness"])
self.draw_ballast(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"] - t_full,
self.prob["main.permanent_ballast_height"],
self.prob["variable_ballast_height"],
)
self.draw_column(
[0.0, 0.0],
self.prob["hub_height"],
self.prob["tow.tower_section_height"],
0.5 * self.prob["tow.tower_outer_diameter"],
None,
(0.9,) * 3,
)
if self.prob["main.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
self.prob["main.buoyancy_tank_location"],
0.5 * self.prob["main.buoyancy_tank_diameter"],
self.prob["main.buoyancy_tank_height"],
)
self.set_figure(fname)
def draw_semi(self, fname="semi.png"):
self.init_figure()
self.draw_ocean()
self.draw_mooring(self.prob["mooring_plot_matrix"])
pontoonMat = self.prob["plot_matrix"]
zcut = 1.0 + np.maximum(self.prob["main_freeboard"], self.prob["offset_freeboard"])
self.draw_pontoons(pontoonMat, 0.5 * self.prob["pontoon_outer_diameter"], zcut)
self.draw_column(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"],
self.prob["main.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["main.wall_thickness"])
self.draw_ballast(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"] - t_full,
self.prob["main.permanent_ballast_height"],
self.prob["variable_ballast_height"],
)
if self.prob["main.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
self.prob["main.buoyancy_tank_location"],
0.5 * self.prob["main.buoyancy_tank_diameter"],
self.prob["main.buoyancy_tank_height"],
)
R_semi = self.prob["radius_to_offset_column"]
ncolumn = int(self.prob["number_of_offset_columns"])
angles = np.linspace(0, 2 * np.pi, ncolumn + 1)
x = R_semi * np.cos(angles)
y = R_semi * np.sin(angles)
for k in range(ncolumn):
self.draw_column(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
0.5 * self.prob["off.outer_diameter"],
self.prob["off.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["off.wall_thickness"])
self.draw_ballast(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
0.5 * self.prob["off.outer_diameter"] - t_full,
self.prob["off.permanent_ballast_height"],
0.0,
)
if self.prob["off.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
self.prob["off.buoyancy_tank_location"],
0.5 * self.prob["off.buoyancy_tank_diameter"],
self.prob["off.buoyancy_tank_height"],
)
self.draw_column(
[0.0, 0.0],
self.prob["hub_height"],
self.prob["tow.tower_section_height"],
0.5 * self.prob["tow.tower_outer_diameter"],
None,
(0.9,) * 3,
)
self.set_figure(fname)
def init_figure(self):
mysky = np.array([135, 206, 250]) / 255.0
mysky = tuple(mysky.tolist())
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# fig = mlab.figure(bgcolor=(1,)*3, size=(1600,1100))
# fig = mlab.figure(bgcolor=mysky, size=(1600,1100))
self.fig = mlab.figure(bgcolor=(0,) * 3, size=(1600, 1100))
def draw_ocean(self):
if self.fig is None:
self.init_figure()
npts = 100
# mybrown = np.array([244, 170, 66]) / 255.0
# mybrown = tuple(mybrown.tolist())
mywater = np.array([95, 158, 160]) / 255.0 # (0.0, 0.0, 0.8) [143, 188, 143]
mywater = tuple(mywater.tolist())
alpha = 0.3
# Waterplane box
x = y = 100 * np.linspace(-1, 1, npts)
X, Y = np.meshgrid(x, y)
Z = np.sin(100 * X * Y) # np.zeros(X.shape)
# ax.plot_surface(X, Y, Z, alpha=alpha, color=mywater)
mlab.mesh(X, Y, Z, opacity=alpha, color=mywater, figure=self.fig)
# Sea floor
Z = -self.prob["water_depth"] * np.ones(X.shape)
# ax.plot_surface(10*X, 10*Y, Z, alpha=1.0, color=mybrown)
# mlab.mesh(10*X,10*Y,Z, opacity=1.0, color=mybrown, figure=self.fig)
# Sides
# x = 500 * np.linspace(-1, 1, npts)
# z = self.prob['water_depth'] * np.linspace(-1, 0, npts)
# X,Z = np.meshgrid(x,z)
# Y = x.max()*np.ones(Z.shape)
##ax.plot_surface(X, Y, Z, alpha=alpha, color=mywater)
# mlab.mesh(X,Y,Z, opacity=alpha, color=mywater, figure=self.fig)
# mlab.mesh(X,-Y,Z, opacity=alpha, color=mywater, figure=self.fig)
# mlab.mesh(Y,X,Z, opacity=alpha, color=mywater, figure=self.fig)
##mlab.mesh(-Y,X,Z, opacity=alpha, color=mywater, figure=self.fig)
def draw_mooring(self, mooring):
mybrown = np.array([244, 170, 66]) / 255.0
mybrown = tuple(mybrown.tolist())
npts = 100
# Sea floor
print(self.prob["anchor_radius"])
r = np.linspace(0, self.prob["anchor_radius"], npts)
th = np.linspace(0, 2 * np.pi, npts)
R, TH = np.meshgrid(r, th)
X = R * np.cos(TH)
Y = R * np.sin(TH)
Z = -self.prob["water_depth"] * np.ones(X.shape)
# ax.plot_surface(X, Y, Z, alpha=1.0, color=mybrown)
mlab.mesh(X, Y, Z, opacity=1.0, color=mybrown, figure=self.fig)
cmoor = (0, 0.8, 0)
nlines = int(self.prob["number_of_mooring_connections"] * self.prob["mooring_lines_per_connection"])
for k in range(nlines):
# ax.plot(mooring[k,:,0], mooring[k,:,1], mooring[k,:,2], 'k', lw=2)
mlab.plot3d(
mooring[k, :, 0],
mooring[k, :, 1],
mooring[k, :, 2],
color=cmoor,
tube_radius=0.5 * self.prob["mooring_diameter"],
figure=self.fig,
)
def draw_pontoons(self, truss, R, freeboard):
nE = truss.shape[0]
c = (0.5, 0, 0)
for k in range(nE):
if np.any(truss[k, 2, :] > freeboard):
continue
mlab.plot3d(truss[k, 0, :], truss[k, 1, :], truss[k, 2, :], color=c, tube_radius=R, figure=self.fig)
def draw_column(self, centerline, freeboard, h_section, r_nodes, spacingVec=None, ckIn=None):
npts = 20
nsection = h_section.size
z_nodes = np.flipud(freeboard - np.r_[0.0, np.cumsum(np.flipud(h_section))])
th = np.linspace(0, 2 * np.pi, npts)
for k in range(nsection):
rk = np.linspace(r_nodes[k], r_nodes[k + 1], npts)
z = np.linspace(z_nodes[k], z_nodes[k + 1], npts)
R, TH = np.meshgrid(rk, th)
Z, _ = np.meshgrid(z, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
# Draw parameters
if ckIn is None:
ck = (0.6,) * 3 if np.mod(k, 2) == 0 else (0.4,) * 3
else:
ck = ckIn
# ax.plot_surface(X, Y, Z, alpha=0.5, color=ck)
mlab.mesh(X, Y, Z, opacity=0.7, color=ck, figure=self.fig)
if spacingVec is None:
continue
z = z_nodes[k] + spacingVec[k]
while z < z_nodes[k + 1]:
rk = np.interp(z, z_nodes[k:], r_nodes[k:])
# ax.plot(rk*np.cos(th), rk*np.sin(th), z*np.ones(th.shape), 'r', lw=0.25)
mlab.plot3d(
rk * np.cos(th) + centerline[0],
rk * np.sin(th) + centerline[1],
z * np.ones(th.shape),
color=(0.5, 0, 0),
figure=self.fig,
)
z += spacingVec[k]
"""
# Web
r = np.linspace(rk - self.prob['stiffener_web_height'][k], rk, npts)
R, TH = np.meshgrid(r, th)
Z, _ = np.meshgrid(z, th)
X = R*np.cos(TH)
Y = R*np.sin(TH)
ax.plot_surface(X, Y, Z, alpha=0.7, color='r')
# Flange
r = r[0]
h = np.linspace(0, self.prob['stiffener_flange_width'][k], npts)
zflange = z + h - 0.5*self.prob['stiffener_flange_width'][k]
R, TH = np.meshgrid(r, th)
Z, _ = np.meshgrid(zflange, th)
X = R*np.cos(TH)
Y = R*np.sin(TH)
ax.plot_surface(X, Y, Z, alpha=0.7, color='r')
"""
def draw_ballast(self, centerline, freeboard, h_section, r_nodes, h_perm, h_water):
npts = 40
th = np.linspace(0, 2 * np.pi, npts)
z_nodes = np.flipud(freeboard - np.r_[0.0, np.cumsum(np.flipud(h_section))])
# Permanent ballast
z_perm = z_nodes[0] + np.linspace(0, h_perm, npts)
r_perm = np.interp(z_perm, z_nodes, r_nodes)
R, TH = np.meshgrid(r_perm, th)
Z, _ = np.meshgrid(z_perm, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
ck = np.array([122, 85, 33]) / 255.0
ck = tuple(ck.tolist())
mlab.mesh(X, Y, Z, color=ck, figure=self.fig)
# Water ballast
z_water = z_perm[-1] + np.linspace(0, h_water, npts)
r_water = np.interp(z_water, z_nodes, r_nodes)
R, TH = np.meshgrid(r_water, th)
Z, _ = np.meshgrid(z_water, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
ck = (0.0, 0.1, 0.8) # Dark blue
mlab.mesh(X, Y, Z, color=ck, figure=self.fig)
def draw_buoyancy_tank(self, centerline, freeboard, h_section, loc, r_box, h_box):
npts = 20
z_nodes = np.flipud(freeboard - np.r_[0.0, np.cumsum(np.flipud(h_section))])
z_lower = loc * (z_nodes[-1] - z_nodes[0]) + z_nodes[0]
# Lower and Upper surfaces
r = np.linspace(0, r_box, npts)
th = np.linspace(0, 2 * np.pi, npts)
R, TH = np.meshgrid(r, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
Z = z_lower * np.ones(X.shape)
ck = (0.9,) * 3
mlab.mesh(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)
Z += h_box
mlab.mesh(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)
# Cylinder part
z = z_lower + np.linspace(0, h_box, npts)
Z, TH = np.meshgrid(z, th)
R = r_box * np.ones(Z.shape)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
mlab.mesh(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)
def set_figure(self, fname=None):
# ax.set_aspect('equal')
# set_axes_equal(ax)
# ax.autoscale_view(tight=True)
# ax.set_xlim([-125, 125])
# ax.set_ylim([-125, 125])
# ax.set_zlim([-220, 30])
# plt.axis('off')
# plt.show()
# mlab.move([-517.16728532, -87.0711504, 5.60826224], [1.35691603e+01, -2.84217094e-14, -1.06547500e+02])
# mlab.view(-170.68320804213343, 78.220729198686854, 549.40101471336777, [1.35691603e+01, 0.0, -1.06547500e+02])
if not fname is None:
fpart = fname.split(".")
if len(fpart) == 1 or not fpart[-1].lower() in ["jpg", "png", "bmp"]:
fname += ".png"
mlab.savefig(fname, figure=self.fig)
mlab.show()
|
1673746
|
import sys
from requests import get
from core.colors import bad
def reverseLookup(inp):
lookup = 'https://api.hackertarget.com/reverseiplookup/?q=%s' % inp
try:
result = get(lookup).text
sys.stdout.write(result)
except:
sys.stdout.write('%s Invalid IP address' % bad)
|
1673773
|
from contextlib import contextmanager
from StringIO import StringIO
import logging
from posix import rmdir
import unittest
import os
from time import time
from eventlet import GreenPool
from hashlib import md5
from tempfile import mkstemp, mkdtemp
from shutil import rmtree
from copy import copy
import math
import tarfile
from eventlet.wsgi import Input
from zerocloud import objectquery
from swift.common import utils
from test.unit import FakeLogger, create_random_numbers, get_sorted_numbers, \
create_tar
from test.unit import trim
from swift.common.swob import Request
from swift.common.utils import mkdirs, normalize_timestamp, get_logger
from swift.obj.server import ObjectController
from test_proxyquery import ZEROVM_DEFAULT_MOCK
from zerocloud.common import ACCESS_READABLE, ACCESS_WRITABLE, ACCESS_CDR, \
parse_location, ACCESS_RANDOM
from zerocloud import TAR_MIMES
from zerocloud.configparser import ZvmNode
from zerocloud.thread_pool import WaitPool, Zuid
def get_headers(self):
headers = {}
for key, value in self.pax_headers.items():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
headers[key.title()] = value
return headers
tarfile.TarInfo.get_headers = get_headers
class FakeLoggingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class FakeApp(ObjectController):
def __init__(self, conf):
ObjectController.__init__(self, conf)
self.bytes_per_sync = 1
self.fault = False
def __call__(self, env, start_response):
if self.fault:
raise Exception
ObjectController.__call__(self, env, start_response)
class OsMock():
def __init__(self):
self.closed = False
self.unlinked = False
self.path = os.path
self.SEEK_SET = os.SEEK_SET
def close(self, fd):
self.closed = True
raise OSError
def unlink(self, fd):
self.unlinked = True
raise OSError
def write(self, fd, str):
return os.write(fd, str)
def read(self, fd, bufsize):
return os.read(fd, bufsize)
def lseek(self, fd, pos, how):
return os.lseek(fd, pos, how)
class TestObjectQuery(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
self.testdir = \
os.path.join(mkdtemp(), 'tmp_test_object_server_ObjectController')
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.conf = {
'devices': self.testdir,
'mount_check': 'false',
'disable_fallocate': 'true',
'zerovm_sysimage_devices': ('sysimage1 /opt/zerovm/sysimage1 '
'sysimage2 /opt/zerovm/sysimage2')
}
self.obj_controller = FakeApp(self.conf)
self.app = objectquery.ObjectQueryMiddleware(
self.obj_controller, self.conf, logger=FakeLogger())
self.app.zerovm_maxoutput = 1024 * 1024 * 10
self.zerovm_mock = None
self.uid_generator = Zuid()
def tearDown(self):
""" Tear down for testing swift.object_server.ObjectController """
rmtree(os.path.dirname(self.testdir))
if self.zerovm_mock:
os.unlink(self.zerovm_mock)
def setup_zerovm_query(self, mock=None):
# ensure that python executable is used
zerovm_mock = ZEROVM_DEFAULT_MOCK
if mock:
fd, zerovm_mock = mkstemp()
os.write(fd, mock)
os.close(fd)
self.zerovm_mock = zerovm_mock
self.app.zerovm_exename = ['python', zerovm_mock]
# do not set it lower than 2 * BLOCKSIZE (2 * 512)
# it will break tar RPC protocol
self.app.app.network_chunk_size = 2 * 512
randomnumbers = create_random_numbers(10)
self.create_object(randomnumbers)
self._nexescript = 'return pickle.dumps(sorted(id))'
self._sortednumbers = get_sorted_numbers()
self._randomnumbers_etag = md5()
self._randomnumbers_etag.update(randomnumbers)
self._randomnumbers_etag = self._randomnumbers_etag.hexdigest()
self._sortednumbers_etag = md5()
self._sortednumbers_etag.update(self._sortednumbers)
self._sortednumbers_etag = self._sortednumbers_etag.hexdigest()
self._nexescript_etag = md5()
self._nexescript_etag.update(self._nexescript)
self._nexescript_etag = self._nexescript_etag.hexdigest()
self._stderr = '\nfinished\n'
self._emptyresult = '(l.'
self._emptyresult_etag = md5()
self._emptyresult_etag.update(self._emptyresult)
self._emptyresult_etag = self._emptyresult_etag.hexdigest()
def create_object(self, body, path='/sda1/p/a/c/o'):
timestamp = normalize_timestamp(time())
headers = {'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream'}
req = Request.blank(path,
environ={'REQUEST_METHOD': 'PUT'}, headers=headers)
req.body = body
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
def zerovm_object_request(self):
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/x-gtar',
'x-zerovm-execute': '1.0',
'x-account-name': 'a',
'x-zerovm-access': 'GET'})
req.headers['x-zerocloud-id'] = self.uid_generator.get()
return req
def zerovm_free_request(self):
req = Request.blank('/sda1/p/a',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/x-gtar',
'x-zerovm-execute': '1.0',
'x-account-name': 'a',
'x-zerovm-access': ''})
req.headers['x-zerocloud-id'] = self.uid_generator.get()
return req
def test_tmpdir_mkstemp_creates_dir(self):
tmpdir = os.path.join(self.testdir, 'sda1', 'tmp')
os.rmdir(tmpdir)
with objectquery.TmpDir(tmpdir, 'sda1').mkstemp():
self.assert_(os.path.exists(tmpdir))
def __test_QUERY_realzvm(self):
orig_exe = self.app.zerovm_exename
orig_sysimages = self.app.zerovm_sysimage_devices
try:
self.app.zerovm_sysimage_devices['python-image'] = (
'/media/40G/zerovm-samples/zshell/zpython2/python.tar'
)
self.setup_zerovm_query()
self.app.zerovm_exename = ['/opt/zerovm/bin/zerovm']
req = self.zerovm_free_request()
req.headers['x-zerovm-daemon'] = 'asdf'
conf = ZvmNode(1, 'python', parse_location(
'file://python-image:python'), args='hello.py')
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf.add_new_channel(
'python-image', ACCESS_READABLE | ACCESS_RANDOM)
conf.add_new_channel('image', ACCESS_CDR, removable='yes')
conf = conf.dumps()
sysmap = StringIO(conf)
image = open('/home/kit/python-script.tar', 'rb')
with self.create_tar({'sysmap': sysmap, 'image': image}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
resp = req.get_response(self.app)
print ['x-zerovm-daemon', resp.headers.get('x-zerovm-daemon',
'---')]
print ['x-nexe-cdr-line', resp.headers['x-nexe-cdr-line']]
if resp.content_type in TAR_MIMES:
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
for n, m in zip(names, members):
print [n, tar.extractfile(m).read()]
else:
print resp.body
finally:
self.app.zerovm_exename = orig_exe
self.app.zerovm_sysimage_devices = orig_sysimages
def test_QUERY_sort(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(members[-1].size, len(self._sortednumbers))
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), self._sortednumbers)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
# self.assertEqual(self.app.logger.log_dict['info'][0][0][0],
# 'Zerovm CDR: 0 0 0 0 1 46 2 56 0 0 0 0')
def test_QUERY_sort_textout(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO('return str(sorted(id))')
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]')
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
# self.assertEqual(self.app.logger.log_dict['info'][0][0][0],
# 'Zerovm CDR: 0 0 0 0 1 46 2 40 0 0 0 0')
def test_QUERY_http_message(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel(
'stdout', ACCESS_WRITABLE, content_type='message/http')
conf = conf.dumps()
sysmap = StringIO(conf)
nexefile = StringIO(trim(r'''
resp = '\n'.join([
'HTTP/1.1 200 OK',
'Content-Type: application/json',
'X-Object-Meta-Key1: value1',
'X-Object-Meta-Key2: value2',
'', ''
])
out = str(sorted(id))
return resp + out
'''))
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]')
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
stdout_headers = members[-1].get_headers()
self.assertEqual(stdout_headers['Content-Type'],
'application/json')
self.assertEqual(stdout_headers['X-Object-Meta-Key1'],
'value1')
self.assertEqual(stdout_headers['X-Object-Meta-Key2'],
'value2')
def test_QUERY_cgi_message(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel(
'stdout', ACCESS_WRITABLE, content_type='message/cgi')
conf = conf.dumps()
sysmap = StringIO(conf)
nexefile = StringIO(trim(r'''
resp = '\n'.join([
'Content-Type: application/json',
'X-Object-Meta-Key1: value1',
'X-Object-Meta-Key2: value2',
'', ''
])
out = str(sorted(id))
return resp + out
'''))
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]')
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
stdout_headers = members[-1].get_headers()
self.assertEqual(stdout_headers['Content-Type'],
'application/json')
self.assertEqual(stdout_headers['X-Object-Meta-Key1'],
'value1')
self.assertEqual(stdout_headers['X-Object-Meta-Key2'],
'value2')
def test_QUERY_invalid_http_message(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel(
'stdout', ACCESS_WRITABLE, content_type='message/http')
conf = conf.dumps()
sysmap = StringIO(conf)
nexefile = StringIO(trim('''
resp = '\\n'.join(['Status: 200 OK',
'Content-Type: application/json', '', ''])
out = str(sorted(id))
return resp + out
'''))
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
file = tar.extractfile(members[-1])
self.assertEqual(file.read(),
'Status: 200 OK\n'
'Content-Type: application/json\n\n'
'[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]')
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEqual(
resp.headers['content-type'], 'application/x-gtar')
stdout_headers = members[-1].get_headers()
self.assertEqual(stdout_headers['Content-Type'], 'message/http')
def test_QUERY_invalid_nexe(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO('INVALID')
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(members[-1].size, 0)
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), '')
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'nexe is invalid')
self.assertEqual(resp.headers['x-nexe-validation'], '1')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEqual(
resp.headers['content-type'], 'application/x-gtar')
# self.assertEqual(self.app.logger.log_dict['info'][0][0][0],
# 'Zerovm CDR: 0 0 0 0 0 0 0 0 0 0 0 0')
def test_QUERY_freenode(self):
# running code without input file
self.setup_zerovm_query()
rmdir(os.path.join(self.testdir, 'sda1', 'tmp'))
req = self.zerovm_free_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(members[-1].size, len(self._emptyresult))
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), self._emptyresult)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEqual(
resp.headers['content-type'], 'application/x-gtar')
# self.assertEqual(self.app.logger.log_dict['info'][0][0][0],
# 'Zerovm CDR: 0 0 0 0 1 0 2 13 0 0 0 0')
def test_QUERY_write_only(self):
# running the executable creates a new object in-place
self.setup_zerovm_query()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
meta = {'key1': 'value1',
'key2': 'value2'}
content_type = 'application/x-pickle'
conf.add_new_channel('stdout',
ACCESS_WRITABLE,
parse_location('swift://a/c/out'),
meta_data=meta,
content_type=content_type)
conf = conf.dumps()
sysmap = StringIO(conf)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/out',
environ={'REQUEST_METHOD': 'POST'},
headers={
'Content-Type': 'application/x-gtar',
'x-zerovm-execute': '1.0',
'x-zerocloud-id': self.uid_generator.get(),
'x-timestamp': timestamp,
'x-zerovm-access': 'PUT'
})
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
req = Request.blank('/sda1/p/a/c/out')
resp = self.obj_controller.GET(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, len(self._emptyresult))
self.assertEqual(resp.body, self._emptyresult)
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEqual(resp.content_type, content_type)
for k, v in meta.iteritems():
self.assertEqual(resp.headers['x-object-meta-%s' % k], v)
def test_QUERY_write_and_report(self):
# running the executable creates a new object from stdout
# and sends stderr output to the user
self.setup_zerovm_query()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
meta = {'key1': 'value1',
'key2': 'value2'}
content_type = 'application/x-pickle'
conf.add_new_channel('stdout',
ACCESS_WRITABLE,
parse_location('swift://a/c/out'),
meta_data=meta,
content_type=content_type)
conf.add_new_channel('stderr', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/out',
environ={'REQUEST_METHOD': 'POST'},
headers={
'Content-Type': 'application/x-gtar',
'x-zerovm-execute': '1.0',
'x-zerocloud-id': self.uid_generator.get(),
'x-timestamp': timestamp,
'x-zerovm-access': 'PUT'
})
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stderr', names)
self.assertEqual(names[-1], 'stderr')
f = tar.extractfile(members[-1])
self.assertEqual(f.read(), self._stderr)
req = Request.blank('/sda1/p/a/c/out')
resp = self.obj_controller.GET(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, len(self._emptyresult))
self.assertEqual(resp.body, self._emptyresult)
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEqual(resp.content_type, content_type)
for k, v in meta.iteritems():
self.assertEqual(resp.headers['x-object-meta-%s' % k], v)
def test_QUERY_OsErr(self):
def mock(*args):
raise Exception('Mock lseek failed')
self.app.os_interface = OsMock()
self.setup_zerovm_query()
req = self.zerovm_free_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(members[-1].size, len(self._emptyresult))
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), self._emptyresult)
del self.app.parser_config['limits']['wbytes']
self.setup_zerovm_query()
req = self.zerovm_free_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
self.setup_zerovm_query()
req = self.zerovm_free_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', '/c/exe')
conf.add_new_channel('stderr', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_QUERY_nexe_environment(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf.args = 'aaa bbb'
conf.env = {'KEY_A': 'value_a', 'KEY_B': 'value_b'}
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
def test_QUERY_multichannel(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'input', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel(
'output', ACCESS_WRITABLE, parse_location('swift://a/c/o2'))
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
def test_QUERY_std_list(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel(
'stdout', ACCESS_WRITABLE, parse_location('swift://a/c/o2'))
conf.add_new_channel('stderr', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stderr', names)
self.assertEqual(names[-1], 'stderr')
self.assertEqual(members[-1].size, len(self._stderr))
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), self._stderr)
self.assertIn('stdout', names)
self.assertEqual(names[0], 'stdout')
self.assertEqual(members[0].size, len(self._sortednumbers))
file = tar.extractfile(members[0])
self.assertEqual(file.read(), self._sortednumbers)
def test_QUERY_logger(self):
# check logger assignment
logger = get_logger({}, log_route='obj-query-test')
self.app = objectquery.ObjectQueryMiddleware(
self.obj_controller, self.conf, logger)
self.assertIs(logger, self.app.logger)
def test_QUERY_object_not_exists(self):
# check if querying non existent object
req = self.zerovm_object_request()
nexefile = StringIO('SCRIPT')
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 404)
def test_QUERY_invalid_path(self):
# check if just querying container fails
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'POST'},
headers={
'x-zerovm-execute': '1.0',
'x-zerocloud-id': self.uid_generator.get()
})
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 400)
def test_QUERY_max_upload_time(self):
class SlowBody():
def __init__(self, body):
self.body = body
def read(self, size=-1):
return self.body.read(10)
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
fp = open(tar, 'rb')
length = os.path.getsize(tar)
req.body_file = Input(SlowBody(fp), length)
req.content_length = length
resp = req.get_response(self.app)
fp.close()
self.assertEquals(resp.status_int, 200)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar_out = tarfile.open(name)
names = tar_out.getnames()
members = tar_out.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[0], 'stdout')
self.assertEqual(members[0].size, len(self._sortednumbers))
file = tar_out.extractfile(members[0])
self.assertEqual(file.read(), self._sortednumbers)
orig_max_upload_time = self.app.max_upload_time
self.app.max_upload_time = 0.001
fp = open(tar, 'rb')
length = os.path.getsize(tar)
req.body_file = Input(SlowBody(fp), length)
req.content_length = length
resp = req.get_response(self.app)
fp.close()
self.app.max_upload_time = orig_max_upload_time
self.assertEquals(resp.status_int, 408)
def test_QUERY_no_content_type(self):
req = self.zerovm_object_request()
del req.headers['Content-Type']
req.body = 'SCRIPT'
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 400)
self.assert_('No content type' in resp.body)
def test_QUERY_invalid_content_type(self):
req = self.zerovm_object_request()
req.headers['Content-Type'] = 'application/blah-blah-blah'
req.body = 'SCRIPT'
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 400)
self.assert_('Invalid Content-Type' in resp.body)
def test_QUERY_invalid_path_encoding(self):
req = Request.blank('/sda1/p/a/c/o'.encode('utf-16'),
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/x-gtar',
'x-zerovm-execute': '1.0',
'x-account-name': 'a'})
req.body = 'SCRIPT'
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 412)
self.assert_('Invalid UTF8' in resp.body)
def test_QUERY_error_upstream(self):
self.obj_controller.fault = True
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/x-gtar'})
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 500)
self.assert_('Traceback' in resp.body)
def __test_QUERY_script_invalid_etag(self):
# we cannot etag the tar stream because we mangle it while
# transferring, on the fly
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', '/c/exe')
conf.add_new_channel('stdin', ACCESS_READABLE, '/c/o')
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
fp = open(tar, 'rb')
etag = md5()
etag.update(fp.read())
fp.close()
req.headers['etag'] = etag.hexdigest()
req.body_file = open(tar, 'rb')
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
etag = md5()
etag.update('blah-blah')
req.headers['etag'] = etag.hexdigest()
req.body_file = open(tar, 'rb')
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 422)
def test_QUERY_short_body(self):
# This test exercises a case where a request is submitted with a
# content length of X, but the actual amount of data sent in the body
# is _less_ than X.
# This is interpreted as a "499 Client Closed Request" (prematurely).
class ShortBody():
def __init__(self):
self.sent = False
def read(self, size=-1):
if not self.sent:
self.sent = True
return ' ' * 3
return ''
self.setup_zerovm_query()
req = Request.blank('/sda1/p/a/c/o',
environ={
'REQUEST_METHOD': 'POST',
'wsgi.input': Input(ShortBody(), 4)
},
headers={
'X-Timestamp': normalize_timestamp(time()),
'x-zerovm-execute': '1.0',
'x-zerocloud-id': self.uid_generator.get(),
'Content-Length': '4',
'Content-Type': 'application/x-gtar'
})
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 499)
def test_QUERY_long_body(self):
# This test exercises a case where a request is submitted with a
# content length of X, but the actual amount of data sent in the body
# is _greater_ than X.
# This would indicate that the `Content-Length` is wrong, and thus
# results in a "400 Bad Request".
class LongBody():
def __init__(self):
self.sent = False
def read(self, size=-1):
if not self.sent:
self.sent = True
return ' ' * 5
return ''
self.setup_zerovm_query()
req = Request.blank('/sda1/p/a/c/o',
environ={
'REQUEST_METHOD': 'POST',
'wsgi.input': Input(LongBody(), 4)
},
headers={
'X-Timestamp': normalize_timestamp(time()),
'x-zerovm-execute': '1.0',
'x-zerocloud-id': self.uid_generator.get(),
'Content-Length': '4',
'Content-Type': 'application/x-gtar'
})
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 400)
def test_QUERY_zerovm_stderr(self):
self.setup_zerovm_query(trim(r'''
import sys
sys.stderr.write('some shit happened\n')
'''))
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 500)
self.assertIn('ERROR OBJ.QUERY retcode=OK, '
'zerovm_stdout=some shit happened',
resp.body)
self.setup_zerovm_query(trim(r'''
import sys
import time
sys.stdout.write('0\n\nok.\n')
for i in range(20):
time.sleep(0.1)
sys.stderr.write(''.zfill(4096))
'''))
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
self.assertIn('ERROR OBJ.QUERY retcode=Output too long', resp.body)
self.setup_zerovm_query(trim(r'''
import sys, time, signal
signal.signal(signal.SIGTERM, signal.SIG_IGN)
time.sleep(0.9)
sys.stdout.write('0\n\nok.\n')
sys.stderr.write(''.zfill(4096*20))
'''))
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
req.headers['x-zerovm-timeout'] = 1
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
self.assertIn(
'ERROR OBJ.QUERY retcode=Output too long', resp.body)
def test_QUERY_zerovm_term_timeouts(self):
self.setup_zerovm_query(trim(r'''
from time import sleep
sleep(10)
'''))
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
req.headers['x-zerovm-timeout'] = 1
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 500)
self.assertIn('ERROR OBJ.QUERY retcode=Timed out', resp.body)
def test_QUERY_zerovm_kill_timeouts(self):
self.setup_zerovm_query(trim(r'''
import signal, time
signal.signal(signal.SIGTERM, signal.SIG_IGN)
time.sleep(10)
'''))
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
orig_kill_timeout = self.app.zerovm_kill_timeout
try:
self.app.zerovm_kill_timeout = 0.1
req.headers['x-zerovm-timeout'] = 1
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 500)
self.assertIn('ERROR OBJ.QUERY retcode=Killed', resp.body)
finally:
self.app.zerovm_kill_timeout = orig_kill_timeout
def test_QUERY_simulteneous_running_zerovm_limits(self):
self.setup_zerovm_query()
nexefile = StringIO('return sleep(.2)')
conf = ZvmNode(1, 'sleep', parse_location('swift://a/c/exe'))
conf = conf.dumps()
sysmap = StringIO(conf)
maxreq_factor = 2
r = range(0, maxreq_factor * 5)
req = copy(r)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
orig_zerovm_threadpools = self.app.zerovm_thread_pools
try:
pool = GreenPool()
t = copy(r)
def make_requests_storm(queue_factor, pool_factor):
for i in r:
req[i] = self.zerovm_free_request()
req[i].body_file = Input(open(tar, 'rb'), length)
req[i].content_length = length
req[i].headers['x-zerovm-timeout'] = 5
size = int(maxreq_factor * pool_factor * 5)
queue = int(maxreq_factor * queue_factor * 5)
self.app.zerovm_thread_pools[
'default'] = WaitPool(size, queue)
spil_over = size + queue
for i in r:
t[i] = pool.spawn(req[i].get_response, self.app)
pool.waitall()
resp = copy(r)
for i in r[:spil_over]:
resp[i] = t[i].wait()
# print 'expecting ok #%s: %s' % (i, resp[i])
self.assertEqual(resp[i].status_int, 200)
for i in r[spil_over:]:
resp[i] = t[i].wait()
# print 'expecting fail #%s: %s' % (i, resp[i])
self.assertEqual(resp[i].status_int, 503)
self.assertEqual(resp[i].body, 'Slot not available')
make_requests_storm(0.2, 0.4)
make_requests_storm(0, 1)
make_requests_storm(0.4, 0.6)
make_requests_storm(0, 0.1)
finally:
self.app.zerovm_thread_pools = orig_zerovm_threadpools
def test_QUERY_max_input_size(self):
self.setup_zerovm_query()
orig_maxinput = self.app.parser_config['limits']['rbytes']
try:
self.app.parser_config['limits']['rbytes'] = 0
req = self.zerovm_object_request()
req.body = 'xxxxxxxxx'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 413)
self.assertEqual(resp.body, 'RPC request too large')
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
self.create_object(
create_random_numbers(os.path.getsize(tar) + 2))
self.app.parser_config['limits'][
'rbytes'] = os.path.getsize(tar) + 1
req = self.zerovm_object_request()
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 413)
self.assertEqual(resp.body, 'Data object too large')
finally:
self.create_object(create_random_numbers(10))
self.app.parser_config['limits']['rbytes'] = orig_maxinput
def test_QUERY_max_nexe_size(self):
self.setup_zerovm_query()
orig_maxnexe = getattr(self.app, 'zerovm_maxnexe')
try:
setattr(self.app, 'zerovm_maxnexe', 0)
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
finally:
setattr(self.app, 'zerovm_maxnexe', orig_maxnexe)
def test_QUERY_bad_system_map(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = '{""}'
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(resp.body, 'Cannot parse system map')
with create_tar({'boot': nexefile}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(resp.body, 'No system map found in request')
def test_QUERY_sysimage(self):
self.setup_zerovm_query()
req = self.zerovm_free_request()
for dev, path in self.app.parser.sysimage_devices.items():
script = 'return mnfst.channels["/dev/%s"]["path"]'\
' + "\\n" + ' \
'open(mnfst.channels["/dev/nvram"]["path"]).read()' \
% dev
nexefile = StringIO(script)
conf = ZvmNode(
1, 'sysimage-test', parse_location('swift://a/c/exe'))
conf.add_new_channel(dev, ACCESS_CDR)
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
self.assertEqual(resp.status_int, 200)
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
data = tar.extractfile(members[-1]).read()
self.assertTrue('%s\n' % path in data)
self.assertTrue('channel=/dev/%s, mountpoint=/, access=ro, '
'removable=no\n' % dev in data)
self.assertTrue('channel=/dev/%s, mode=file\n' % dev in data)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(
resp.headers['x-nexe-system'], 'sysimage-test')
def test_QUERY_use_image_file(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', 'file://usr/bin/sort')
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf.add_new_channel('image', ACCESS_CDR)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'usr/bin/sort': nexefile}) as image_tar:
with create_tar({'image': open(image_tar, 'rb'),
'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
self.assertEqual(resp.status_int, 200)
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(members[-1].size, len(self._sortednumbers))
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), self._sortednumbers)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(
math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp))
)
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
# self.assertEqual(self.app.logger.log_dict['info'][0][0][0],
# 'Zerovm CDR: 0 0 0 0 1 46 2 56 0 0 0 0')
def test_QUERY_use_gzipped_image(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', 'file://usr/bin/sort')
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf.add_new_channel('image', ACCESS_CDR)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'usr/bin/sort': nexefile}) as image_tar:
import gzip
image_tar_gz = image_tar + '.gz'
try:
t = open(image_tar, 'rb')
gz = gzip.open(image_tar_gz, 'wb')
gz.writelines(t)
gz.close()
t.close()
with create_tar({'image.gz': open(image_tar_gz, 'rb'),
'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
self.assertEqual(resp.status_int, 200)
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(
os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(
members[-1].size, len(self._sortednumbers))
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), self._sortednumbers)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(
math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp))
)
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
finally:
try:
os.unlink(image_tar_gz)
except OSError:
pass
def test_QUERY_bypass_image_file(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf.add_new_channel('image', ACCESS_CDR)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'usr/bin/sort': StringIO('bla-bla')}) as image_tar:
with create_tar({'image': open(image_tar, 'rb'),
'sysmap': sysmap, 'boot': nexefile}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
self.assertEqual(resp.status_int, 200)
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(members[-1].size, len(self._sortednumbers))
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), self._sortednumbers)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(
math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp))
)
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
# self.assertEqual(self.app.logger.log_dict['info'][0][0][0],
# 'Zerovm CDR: 0 0 0 0 1 46 1 46 0 0 0 0')
def test_QUERY_bad_channel_path(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel('stdin', ACCESS_READABLE, 'bla-bla')
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'sysmap': sysmap, 'boot': nexefile}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(
resp.body, 'Could not resolve channel path "bla-bla" for '
'device: stdin')
def test_QUERY_filter_factory(self):
app = objectquery.filter_factory(self.conf)(FakeApp(self.conf))
self.assertIsInstance(app, objectquery.ObjectQueryMiddleware)
def test_QUERY_prevalidate(self):
self.setup_zerovm_query()
req = Request.blank('/sda1/p/a/c/exe',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'x-zerovm-validate': 'true',
'Content-Type': 'application/octet-stream'
})
req.body = self._nexescript
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-zerovm-valid'], 'true')
req = Request.blank('/sda1/p/a/c/exe',
headers={'x-zerovm-valid': 'true'})
req.body = self._nexescript
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['x-zerovm-valid'], 'true')
req = Request.blank('/sda1/p/a/c/exe',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream'
})
req.body = self._nexescript
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
self.assertNotIn('x-zerovm-valid', resp.headers)
req = Request.blank('/sda1/p/a/c/exe',
headers={'x-zerovm-valid': 'true'})
req.body = self._nexescript
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
self.assertNotIn('x-zerovm-valid', resp.headers)
req = Request.blank('/sda1/p/a/c/exe',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/x-nexe'
})
req.body = self._nexescript
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-zerovm-valid'], 'true')
req = Request.blank('/sda1/p/a/c/exe',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream'
})
req.body = 'INVALID'
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
self.assertNotIn('x-zerovm-valid', resp.headers)
req = Request.blank('/sda1/p/a/c/exe',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'x-zerovm-validate': 'true',
'Content-Type': 'application/octet-stream'
})
req.body = 'INVALID'
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
self.assertNotIn('x-zerovm-valid', resp.headers)
def test_QUERY_execute_prevalidated(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
req.headers['x-zerovm-valid'] = 'true'
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar_result = tarfile.open(name)
names = tar_result.getnames()
members = tar_result.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(members[-1].size, len(self._sortednumbers))
file = tar_result.extractfile(members[-1])
self.assertEqual(file.read(), self._sortednumbers)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '2')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
req.headers['x-zerovm-valid'] = 'false'
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar_result = tarfile.open(name)
names = tar_result.getnames()
members = tar_result.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(members[-1].size, len(self._sortednumbers))
file = tar_result.extractfile(members[-1])
self.assertEqual(file.read(), self._sortednumbers)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
def test_zerovm_bad_exit_code(self):
@contextmanager
def save_zerovm_exename():
exename = self.app.zerovm_exename
try:
yield True
finally:
self.app.zerovm_exename = exename
self.setup_zerovm_query()
with save_zerovm_exename():
(zfd, zerovm) = mkstemp()
os.write(zfd, trim(r'''
from sys import exit
exit(255)
'''))
os.close(zfd)
self.app.zerovm_exename = ['python', zerovm]
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'exit', parse_location('swift://a/c/exe'))
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 500)
self.assertIn(
'ERROR OBJ.QUERY retcode=Error, zerovm_stdout=',
resp.body
)
os.unlink(zerovm)
def test_zerovm_bad_retcode(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexe = trim(r'''
global error_code
error_code = 4
return pickle.dumps(sorted(id))
''')
nexefile = StringIO(nexe)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
self.assertEqual(resp.headers['x-nexe-error'], 'bad return code')
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
fh = tar.extractfile(members[-1])
self.assertEqual(fh.read(), self._sortednumbers)
self.assertEqual(
resp.headers['content-type'], 'application/x-gtar')
class TestUtils(unittest.TestCase):
"""
Tests for misc. utilities in :mod:`zerocloud.objectquery`.
"""
def test_get_zerovm_sysimage_devices(self):
conf = dict(zerovm_sysimage_devices='image1 path1 image2 path2')
exp = dict(image1='path1', image2='path2')
self.assertEqual(exp, objectquery.get_zerovm_sysimage_devices(conf))
# If there are any trailing items in the list (that is, an odd number
# of list items), just ignore them:
conf = dict(zerovm_sysimage_devices='image1 path1 image2')
exp = dict(image1='path1')
self.assertEqual(exp, objectquery.get_zerovm_sysimage_devices(conf))
if __name__ == '__main__':
unittest.main()
|
1673790
|
from icolos.core.containers.generic import GenericData
import unittest
from icolos.core.workflow_steps.schrodinger.desmond_preprocessor import StepDesmondSetup
from icolos.utils.general.files_paths import attach_root_path
import os
from tests.tests_paths import PATHS_EXAMPLEDATA
from icolos.utils.enums.step_enums import StepBaseEnum, StepDesmondEnum
_SBE = StepBaseEnum
_SDE = StepDesmondEnum()
class Test_Desmond_Setup(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/schrodinger")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
def setUp(self):
with open(attach_root_path(PATHS_EXAMPLEDATA.DESMOND_SETUP_PDB), "r") as f:
self.pdb = f.read()
def test_desmond_preprocess(self):
step_conf = {
_SBE.STEPID: "test_desmond_setup",
_SBE.STEP_TYPE: "desmond_preprocess",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load schrodinger/2021-1-js-aws"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {},
_SBE.SETTINGS_ADDITIONAL: {_SDE.MSJ_FIELDS: {}},
},
}
step_desmond_preprocess = StepDesmondSetup(**step_conf)
step_desmond_preprocess.data.generic.add_file(
GenericData(file_name="structure.pdb", file_data=self.pdb, argument=True)
)
step_desmond_preprocess.execute()
out_path = os.path.join(self._test_dir, "setup.cms")
step_desmond_preprocess.data.generic.write_out_all_files(self._test_dir)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 22560500)
|
1673810
|
import pandas as pd
import valentine.metrics as valentine_metrics
import valentine.algorithms
import valentine.data_sources
class NotAValentineMatcher(Exception):
pass
def valentine_match(df1: pd.DataFrame,
df2: pd.DataFrame,
matcher: valentine.algorithms.BaseMatcher,
df1_name: str = 'table_1',
df2_name: str = 'table_2'):
if isinstance(matcher, valentine.algorithms.BaseMatcher):
table_1 = valentine.data_sources.DataframeTable(df1, name=df1_name)
table_2 = valentine.data_sources.DataframeTable(df2, name=df2_name)
matches = dict(sorted(matcher.get_matches(table_1, table_2).items(),
key=lambda item: item[1], reverse=True))
else:
raise NotAValentineMatcher('The method that you selected is not supported by Valentine')
return matches
|
1673873
|
import datetime
from typing import List
from sqlalchemy import cast, String, or_
from sqlalchemy.dialects import postgresql
from backend.database.objects import Game, PlayerGame, GameVisibilitySetting, GameTag
from backend.utils.checks import is_admin
from backend.utils.safe_flask_globals import get_current_user_id
class QueryFilterBuilder:
"""
Builds the filtered query for players or games.
"""
def __init__(self):
self.start_time: datetime.datetime = None
self.end_time: datetime.datetime = None
self.players: List[str] = None
self.contains_all_players: List[str] = None
self.tag_ids = None
self.stats_query = None
self.rank: int = None
self.initial_query = None # This is a query that is created with initial values
self.team_size: int = None
self.replay_ids: List[str] = None
self.is_game: bool = False
self.playlists: List[int] = None
self.has_joined_game: bool = False # used to see if this query has been joined with the Game database
self.safe_checking: bool = False
self.sticky_values: dict = dict() # a list of values that survive a clean
def reset(self):
self.start_time = None
self.end_time = None
self.players = None
self.contains_all_players = None
self.tag_ids = None
self.stats_query = None
self.rank = None
self.initial_query = None # This is a query that is created with initial values
self.team_size = None
self.replay_ids = None
self.is_game = False
self.playlists = None
self.has_joined_game = False # used to see if this query has been joined with the Game database
self.safe_checking = False # checks to make sure the replay has good data for the player
self.sticky_values = dict()
def clean(self):
"""
Clears the filter but maintains the initial query and other stateful values that are required.
"""
query = self.initial_query
has_joined = self.has_joined_game
is_game = self.is_game
sticky_values = self.sticky_values
self.reset()
self.has_joined_game = has_joined
self.is_game = is_game
self.sticky_values = sticky_values
self.initial_query = query
for key, value in self.sticky_values.items():
setattr(self, key, value)
return self
def with_relative_start_time(self, days_ago: float = 0, hours_ago: float = 0) -> 'QueryFilterBuilder':
ago = datetime.datetime.now() - datetime.timedelta(days=days_ago, hours=hours_ago)
return self.with_timeframe(start_time=ago)
def with_timeframe(self,
start_time: datetime.datetime = None,
end_time: datetime.datetime = None) -> 'QueryFilterBuilder':
self.start_time = start_time
self.end_time = end_time
return self
def with_players(self, player_ids: List[str]) -> 'QueryFilterBuilder':
self.players = player_ids
return self
def with_all_players(self, player_ids: List[str]) -> 'QueryFilterBuilder':
self.contains_all_players = player_ids
return self
def with_tags(self, tags) -> 'QueryFilterBuilder':
self.tag_ids = tags
return self
def with_stat_query(self, stats_query) -> 'QueryFilterBuilder':
self.stats_query = stats_query
return self
def with_rank(self, rank: int) -> 'QueryFilterBuilder':
self.rank = rank
return self
def with_replay_ids(self, replay_ids: List[str]) -> 'QueryFilterBuilder':
self.replay_ids = replay_ids
return self
def set_replay_id(self, replay_id: str) -> 'QueryFilterBuilder':
self.replay_ids = replay_id
return self
def with_team_size(self, team_size: int) -> 'QueryFilterBuilder':
self.team_size = team_size
return self
def with_safe_checking(self) -> 'QueryFilterBuilder':
self.safe_checking = True
return self
def as_game(self) -> 'QueryFilterBuilder':
self.is_game = True
return self
def with_playlists(self, playlists: List[int]) -> 'QueryFilterBuilder':
self.playlists = playlists
return self
def build_query(self, session):
"""
Builds a query given the current state, returns the result.
This method does not modify state of this object at all
:return: A filtered query.
"""
has_joined_game = False
needs_pg = False
if self.initial_query is None:
if self.is_game and self.stats_query is None:
filtered_query = session.query(Game)
else:
filtered_query = session.query(*self.stats_query)
else:
filtered_query = self.initial_query
if (self.start_time is not None or
self.end_time is not None or
self.team_size is not None):
if not self.is_game:
filtered_query = filtered_query.join(Game, Game.hash == PlayerGame.game)
has_joined_game = True
if self.tag_ids is not None:
filtered_query = filtered_query.join(GameTag, Game.hash == GameTag.game_id)
if self.is_game or has_joined_game:
# Do visibility check
if not is_admin():
filtered_query = filtered_query.filter(or_(Game.visibility != GameVisibilitySetting.PRIVATE,
Game.players.any(get_current_user_id())))
if self.start_time is not None:
filtered_query = filtered_query.filter(
Game.match_date >= self.start_time)
if self.end_time is not None:
filtered_query = filtered_query.filter(
Game.match_date <= self.end_time)
if self.rank is not None:
needs_pg = True
filtered_query = filtered_query.filter(PlayerGame.rank == self.rank)
if self.team_size is not None:
filtered_query = filtered_query.filter(Game.teamsize == self.team_size)
if self.playlists is not None:
filtered_query = filtered_query.filter(Game.playlist.in_(self.playlists))
if self.safe_checking:
needs_pg = True
filtered_query = filtered_query.filter(PlayerGame.total_hits > 0).filter(PlayerGame.time_in_game > 0)
if self.players is not None and len(self.players) > 0:
needs_pg = True
filtered_query = filtered_query.filter(self.handle_list(PlayerGame.player, self.players))
if self.contains_all_players is not None and len(self.contains_all_players) > 0:
filtered_query = filtered_query.filter(self.handle_union(Game.players, self.contains_all_players))
if self.replay_ids is not None and len(self.replay_ids) > 0:
if self.is_game or has_joined_game:
filtered_query = filtered_query.filter(self.handle_list(Game.hash, self.replay_ids))
else:
needs_pg = True
filtered_query = filtered_query.filter(self.handle_list(PlayerGame.game, self.replay_ids))
elif self.replay_ids is not None and len(self.replay_ids) == 1:
if self.is_game or has_joined_game:
filtered_query = filtered_query.filter(Game.hash == self.replay_ids)
else:
needs_pg = True
filtered_query = filtered_query.filter(PlayerGame.game == self.replay_ids)
if self.tag_ids is not None:
if len(self.tag_ids) == 1:
filtered_query = filtered_query.filter(GameTag.tag_id == self.tag_ids[0])
else:
filtered_query = filtered_query.filter(self.handle_list(GameTag.tag_id, self.tag_ids))
if needs_pg and self.is_game:
filtered_query = filtered_query.join(PlayerGame, PlayerGame.game == Game.hash)
return filtered_query
def create_stored_query(self, session) -> 'QueryFilterBuilder':
"""
Creates a query then stores it locally in this object.
Useful if we want lots of different queries built off of a central object.
This also clears anything currently stored in the object.
"""
query = self.build_query(session)
# maintain state
has_joined = self.has_joined_game
is_game = self.is_game
self.reset()
self.has_joined_game = has_joined
self.is_game = is_game
# reassign query
self.initial_query = query
return self
def sticky(self) -> 'QueryFilterBuilder':
"""Creates a list of values that should be saved from a clean"""
self.sticky_values = dict()
for key, value in vars(self).items():
if value is not None and key != "sticky_values":
self.sticky_values[key] = value
return self
def clone(self) -> 'QueryFilterBuilder':
"""Returns a copy of this object"""
copy = QueryFilterBuilder()
copy.initial_query = self.initial_query
copy.has_joined_game = self.has_joined_game
copy.is_game = self.is_game
copy.sticky_values = self.sticky_values.copy()
return copy
@staticmethod
def handle_list(field, lst):
if isinstance(lst, list):
if len(lst) == 1:
return field == lst[0]
else:
return field.in_(lst)
else:
return field == lst
@staticmethod
def handle_union(field, lst):
if isinstance(lst, list):
return field.contains(cast(lst, postgresql.ARRAY(String)))
else:
return field.contains(cast([lst], postgresql.ARRAY(String)))
def get_stored_query(self):
return self.initial_query
@staticmethod
def apply_arguments_to_query(builder, args):
if 'rank' in args:
builder.with_rank(args['rank'])
if 'team_size' in args:
builder.with_team_size(args['team_size'])
if 'playlists' in args:
builder.with_playlists(args['playlists'])
if 'date_before' in args:
if 'date_after' in args:
builder.with_timeframe(end_time=args['date_before'],
start_time=args['date_after'])
else:
builder.with_timeframe(end_time=args['date_before'])
elif 'date_after' in args:
builder.with_timeframe(start_time=args['date_after'])
if 'player_ids' in args:
builder.with_all_players(args['player_ids'])
|
1673899
|
import logging
import gluoncv as gcv
gcv.utils.check_version('0.8.0')
from gluoncv.auto.estimators import CenterNetEstimator
from gluoncv.auto.tasks.utils import config_to_nested
from d8.object_detection import Dataset
if __name__ == '__main__':
# specify hyperparameters
config = {
'dataset': 'sheep',
'gpus': [0, 1, 2, 3, 4, 5, 6, 7],
'estimator': 'center_net',
'base_network': 'resnet50_v1b',
'batch_size': 64, # range [8, 16, 32, 64]
'epochs': 3
}
config = config_to_nested(config)
config.pop('estimator')
# specify dataset
dataset = Dataset.get('sheep')
train_data, valid_data = dataset.split(0.8)
# specify estimator
estimator = CenterNetEstimator(config)
# fit estimator
estimator.fit(train_data, valid_data)
# evaluate auto estimator
eval_map = estimator.evaluate(valid_data)
logging.info('evaluation: mAP={}'.format(eval_map[-1][-1]))
|
1673917
|
from functools import partial
from .config import BOROUGHS
from .error import GeosupportError
from .function_info import (
FUNCTIONS, AUXILIARY_SEGMENT_LENGTH, WORK_AREA_LAYOUTS
)
def list_of(length, callback, v):
output = []
i = 0
# While the next entry isn't blank
while v[i:i+length].strip() != '':
output.append(callback(v[i:i+length]))
i += length
return output
def list_of_items(length):
return partial(list_of, length, lambda v: v.strip())
def list_of_workareas(name, length):
return partial(
list_of, length,
lambda v: parse_workarea(WORK_AREA_LAYOUTS['output'][name], v)
)
def list_of_nodes(v):
return list_of(
160,
lambda w: list_of(32, list_of_items(8), w),
v
)
def borough(v):
if v:
v2 = str(v).strip().upper()
if v2.isdigit():
return str(v2)
if v2 in BOROUGHS:
return str(BOROUGHS[v2])
raise GeosupportError("%s is not a valid borough" % v)
else:
return ''
def function(v):
v = str(v).upper().strip()
if v in FUNCTIONS:
v = FUNCTIONS[v]['function']
return v
def flag(true, false):
def f(v):
if type(v) == bool:
return true if v else false
if v:
return str(v).strip().upper()[:1]
else:
return false
return f
FORMATTERS = {
# Format input
'function': function,
'borough': borough,
# Flags
'auxseg': flag('Y', 'N'),
'cross_street_names': flag('E', ''),
'long_work_area_2': flag('L', ''),
'mode_switch': flag('X', ''),
'real_streets_only': flag('R', ''),
'roadbed_request_switch': flag('R', ''),
'street_name_normalization': flag('C', ''),
'tpad': flag('Y', 'N'),
# Parse certain output differently
'LGI': list_of_workareas('LGI', 53),
'LGI-extended': list_of_workareas('LGI-extended', 116),
'BINs': list_of_workareas('BINs', 7),
'BINs-tpad': list_of_workareas('BINs-tpad', 8),
'intersections': list_of_workareas('INTERSECTION', 55),
'node_list': list_of_nodes,
# Census Tract formatter
'CT': lambda v: '' if v is None else v.replace(' ', '0'),
# Default formatter
'': lambda v: '' if v is None else str(v).strip().upper()
}
def get_formatter(name):
if name in FORMATTERS:
return FORMATTERS[name]
elif name.isdigit():
return list_of_items(int(name))
def set_mode(mode):
flags = {}
if mode:
if mode == 'extended':
flags['mode_switch'] = True
if 'long' in mode:
flags['long_work_area_2'] = True
if 'tpad' in mode:
flags['tpad'] = True
return flags
def get_mode(flags):
if flags['mode_switch']:
return 'extended'
elif flags['long_work_area_2'] and flags['tpad']:
return 'long+tpad'
elif flags['long_work_area_2']:
return 'long'
else:
return 'regular'
def get_flags(wa1):
layout = WORK_AREA_LAYOUTS['input']['WA1']
flags = {
'function': parse_field(layout['function'], wa1),
'mode_switch': parse_field(layout['mode_switch'], wa1) == 'X',
'long_work_area_2': parse_field(layout['long_work_area_2'], wa1) == 'L',
'tpad': parse_field(layout['tpad'], wa1) == 'Y',
'auxseg': parse_field(layout['auxseg'], wa1) == 'Y'
}
flags['mode'] = get_mode(flags)
return flags
def create_wa1(kwargs):
kwargs['work_area_format'] = 'C'
b = bytearray(b' '*1200)
mv = memoryview(b)
layout = WORK_AREA_LAYOUTS['input']['WA1']
for key, value in kwargs.items():
formatter = get_formatter(layout[key]['formatter'])
value = '' if value is None else str(formatter(value))
i = layout[key]['i']
length = i[1]-i[0]
mv[i[0]:i[1]] = value.ljust(length)[:length].encode()
return str(b.decode())
def create_wa2(flags):
length = FUNCTIONS[flags['function']][flags['mode']]
if length is None:
return None
if flags['auxseg']:
length += AUXILIARY_SEGMENT_LENGTH
return ' ' * length
def format_input(kwargs):
wa1 = create_wa1(kwargs)
flags = get_flags(wa1)
if flags['function'] not in FUNCTIONS:
raise GeosupportError('INVALID FUNCTION CODE', {})
wa2 = create_wa2(flags)
return flags, wa1, wa2
def parse_field(field, wa):
i = field['i']
formatter = get_formatter(field['formatter'])
return formatter(wa[i[0]:i[1]])
def parse_workarea(layout, wa):
output = {}
for key in layout:
if 'i' in layout[key]:
output[key] = parse_field(layout[key], wa)
else:
output[key] = {}
for subkey in layout[key]:
output[key][subkey] = parse_field(layout[key][subkey], wa)
return output
def parse_output(flags, wa1, wa2):
output = {}
output.update(parse_workarea(WORK_AREA_LAYOUTS['output']['WA1'], wa1))
function_name = flags['function']
if function_name in WORK_AREA_LAYOUTS['output']:
output.update(parse_workarea(
WORK_AREA_LAYOUTS['output'][function_name], wa2
))
function_mode = function_name + '-' + flags['mode']
if function_mode in WORK_AREA_LAYOUTS['output']:
output.update(parse_workarea(
WORK_AREA_LAYOUTS['output'][function_mode], wa2
))
if flags['auxseg']:
output.update(parse_workarea(
WORK_AREA_LAYOUTS['output']['AUXSEG'],
wa2[-AUXILIARY_SEGMENT_LENGTH:]
))
return output
|
1673928
|
import json
import hashlib
class ConfigBase:
__slots__ = []
def to_dict(self):
return { x : getattr(self, x) for x in self.__slots__ }
def to_json(self, **kwargs):
return json.dumps(self, default = lambda x : x.to_dict(), **kwargs)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def get_hash(self):
s = self.to_json(sort_keys = True)
md5 = hashlib.md5()
md5.update(s.encode())
return md5.hexdigest()
|
1673985
|
import sys
print("@function from_script:hello")
print("say hello")
print("This is not part of the function.", file=sys.stderr)
|
1673994
|
import h5py
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from copy import deepcopy
#group a set of img patches
def group_images(data,per_row):
assert data.shape[0]%per_row==0
assert (data.shape[1]==1 or data.shape[1]==3)
data = np.transpose(data,(0,2,3,1))
all_stripe = []
for i in range(int(data.shape[0]/per_row)):
stripe = data[i*per_row]
for k in range(i*per_row+1, i*per_row+per_row):
stripe = np.concatenate((stripe,data[k]),axis=1)
all_stripe.append(stripe)
totimg = all_stripe[0]
for i in range(1,len(all_stripe)):
totimg = np.concatenate((totimg,all_stripe[i]),axis=0)
return totimg
# Prediction result splicing (original img, predicted probability, binary img, groundtruth)
def concat_result(ori_img,pred_res,gt):
ori_img = data = np.transpose(ori_img,(1,2,0))
pred_res = data = np.transpose(pred_res,(1,2,0))
gt = data = np.transpose(gt,(1,2,0))
binary = deepcopy(pred_res)
binary[binary>=0.5]=1
binary[binary<0.5]=0
if ori_img.shape[2]==3:
pred_res = np.repeat((pred_res*255).astype(np.uint8),repeats=3,axis=2)
binary = np.repeat((binary*255).astype(np.uint8),repeats=3,axis=2)
gt = np.repeat((gt*255).astype(np.uint8),repeats=3,axis=2)
total_img = np.concatenate((ori_img,pred_res,binary,gt),axis=1)
return total_img
#visualize image, save as PIL image
def save_img(data,filename):
assert (len(data.shape)==3) #height*width*channels
if data.shape[2]==1: #in case it is black and white
data = np.reshape(data,(data.shape[0],data.shape[1]))
img = Image.fromarray(data.astype(np.uint8)) #the image is between 0-1
img.save(filename)
return img
|
1674005
|
import csv
import os
import torch
from torch.optim import *
import torchvision
from torchvision.transforms import *
from scipy import stats
from sklearn import metrics
import numpy as np
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Logger(object):
def __init__(self, path, header):
self.log_file = open(path, 'w')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerow(header)
self.header = header
def __del(self):
self.log_file.close()
def log(self, values):
write_values = []
for col in self.header:
assert col in values
write_values.append(values[col])
self.logger.writerow(write_values)
self.log_file.flush()
def accuracy(output, target, topk=(1, 5)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res, pred
def reverseTransform(img):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if len(img.shape) == 5:
for i in range(3):
img[:, i, :, :, :] = img[:, i, :, :, :]*std[i] + mean[i]
else:
for i in range(3):
img[:, i, :, :] = img[:, i, :, :]*std[i] + mean[i]
return img
def d_prime(auc):
standard_normal = stats.norm()
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
return d_prime
def calculate_stats(output, target):
"""Calculate statistics including mAP, AUC, etc.
Args:
output: 2d array, (samples_num, classes_num)
target: 2d array, (samples_num, classes_num)
Returns:
stats: list of statistic of each class.
"""
classes_num = target.shape[-1]
stats = []
# Class-wise statistics
for k in range(classes_num):
# Average precision
avg_precision = metrics.average_precision_score(
target[:, k], output[:, k], average=None)
# AUC
auc = metrics.roc_auc_score(target[:, k], output[:, k], average=None)
# Precisions, recalls
(precisions, recalls, thresholds) = metrics.precision_recall_curve(
target[:, k], output[:, k])
# FPR, TPR
(fpr, tpr, thresholds) = metrics.roc_curve(target[:, k], output[:, k])
save_every_steps = 1000 # Sample statistics to reduce size
dict = {'precisions': precisions[0::save_every_steps],
'recalls': recalls[0::save_every_steps],
'AP': avg_precision,
'fpr': fpr[0::save_every_steps],
'fnr': 1. - tpr[0::save_every_steps],
'auc': auc}
stats.append(dict)
return stats
|
1674022
|
from vesper.tests.test_case import TestCase
from vesper.util.byte_buffer import ByteBuffer
class ByteBufferTests(TestCase):
def test_initializer(self):
b = ByteBuffer(10)
self.assertEqual(len(b.bytes), 10)
self.assertEqual(b.offset, 0)
b = ByteBuffer(bytearray(12))
self.assertEqual(len(b.bytes), 12)
self.assertEqual(b.offset, 0)
def test_reads_and_writes(self):
self._test_reads_and_writes(ByteBuffer(14))
self._test_reads_and_writes(ByteBuffer(bytearray(14)))
def _test_reads_and_writes(self, b):
b.write_bytes(b'test')
self.assertEqual(b.offset, 4)
b.write_value(17, '<I')
self.assertEqual(b.offset, 8)
b.write_value(18, '<H', 12)
self.assertEqual(b.offset, 14)
b.write_value(1.5, '<f', 8)
self.assertEqual(b.offset, 12)
b.offset = 0
bytes_ = b.read_bytes(4)
self.assertEqual(bytes_, b'test')
self.assertEqual(b.offset, 4)
value = b.read_value('<I')
self.assertEqual(value, 17)
self.assertEqual(b.offset, 8)
value = b.read_value('<H', 12)
self.assertEqual(value, 18)
self.assertEqual(b.offset, 14)
value = b.read_value('<f', 8)
self.assertEqual(value, 1.5)
self.assertEqual(b.offset, 12)
|
1674048
|
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
Discrete communication with NO discourage failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = False
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
# Balancing
final_cnn_channels = 140
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoVision(
**{
**dict(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveExperimentConfig()
|
1674059
|
from paste.response import *
def test_replace_header():
h = [('content-type', 'text/plain'),
('x-blah', 'foobar')]
replace_header(h, 'content-length', '10')
assert h[-1] == ('content-length', '10')
replace_header(h, 'Content-Type', 'text/html')
assert ('content-type', 'text/html') in h
assert ('content-type', 'text/plain') not in h
|
1674066
|
import numpy as np
a = np.ones([2, 3], np.float32)
a = [[0.1, 0.1, 0.1],
[0.1, 0.2, 0.3]]
gamma = np.ones([1, 3], np.float32)
gamma = [[0.01, 0.02, 0.03]]
mean = np.ones([1, 3], np.float32)
mean = [[0.09, 0.08, 0.07]]
var = np.ones([1, 3], np.float32)
var = [[0.0001, 0.0003, 0.0004]]
z = np.subtract(a, mean)
s = np.sqrt(var)
normed = np.divide(z, s)
exp_normed = np.exp(np.multiply(-1, normed))
sigmoid_normed = np.divide(1, np.add(1, exp_normed))
item1 = np.subtract(1, sigmoid_normed)
item1 = np.multiply(item1, gamma)
item1 = np.multiply(item1, a)
item2 = np.multiply(sigmoid_normed, a)
y = np.add(item1, item2)
print y
|
1674089
|
from __future__ import unicode_literals
from django.apps import AppConfig
class VideokitConfig(AppConfig):
name = 'videokit'
DEFAULT_VIDEO_CACHE_BACKEND = 'videokit.cache.VideoCacheBackend'
VIDEOKIT_CACHEFILE_DIR = 'CACHE/videos'
VIDEOKIT_TEMP_DIR = 'videokit-temp'
VIDEOKIT_SUPPORTED_FORMATS = ['mp4', 'ogg', 'webm']
VIDEOKIT_DEFAULT_FORMAT = 'mp4'
|
1674119
|
from pwnypack.shellcode.arm.thumb import ARMThumb
__all__ = ['ARMThumbMixed']
class ARMThumbMixed(ARMThumb):
"""
Environment that targets a generic, unrestricted ARM architecture that
switches to the Thumb instruction set.
"""
PREAMBLE = [
'.global _start',
'.arm',
'_start:',
'\tadd r0, pc, #1',
'\tbx r0',
'.thumb',
'__thumbcode:',
]
|
1674122
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from qanet.position_encoding import PositionEncoding
from qanet.layer_norm import LayerNorm1d
from qanet.depthwise_separable_conv import DepthwiseSeparableConv1d
from qanet.self_attention import SelfAttention
class EncoderBlock(nn.Module):
def __init__(self, n_conv, kernel_size=7, padding=3, n_filters=128, n_heads=8, conv_type='depthwise_separable'):
super(EncoderBlock, self).__init__()
self.n_conv = n_conv
self.n_filters = n_filters
self.position_encoding = PositionEncoding(n_filters=n_filters)
# self.layer_norm = LayerNorm1d(n_features=n_filters)
self.layer_norm = nn.ModuleList([LayerNorm1d(n_features=n_filters) for i in range(n_conv+2)])
self.conv = nn.ModuleList([DepthwiseSeparableConv1d(n_filters,
kernel_size=kernel_size,
padding=padding) for i in range(n_conv)])
self.self_attention = SelfAttention(n_heads, n_filters)
self.fc = nn.Conv1d(n_filters, n_filters, kernel_size=1)
def layer_dropout(self, inputs, residual, dropout):
if self.training:
if torch.rand(1) > dropout:
outputs = F.dropout(inputs, p=0.1, training=self.training)
return outputs + residual
else:
return residual
else:
return inputs + residual
def forward(self, x, mask, start_index, total_layers):
outputs = self.position_encoding(x)
# convolutional layers
for i in range(self.n_conv):
residual = outputs
outputs = self.layer_norm[i](outputs)
if i % 2 == 0:
outputs = F.dropout(outputs, p=0.1, training=self.training)
outputs = F.relu(self.conv[i](outputs))
# layer dropout
outputs = self.layer_dropout(outputs, residual, (0.1 * start_index / total_layers))
start_index += 1
# self attention
residual = outputs
outputs = self.layer_norm[-2](outputs)
outputs = F.dropout(outputs, p=0.1, training=self.training)
outputs = outputs.permute(0, 2, 1)
outputs = self.self_attention(outputs, mask)
outputs = outputs.permute(0, 2, 1)
outputs = self.layer_dropout(outputs, residual, 0.1 * start_index / total_layers)
start_index += 1
# fully connected layer
residual = outputs
outputs = self.layer_norm[-1](outputs)
outputs = F.dropout(outputs, p=0.1, training=self.training)
outputs = self.fc(outputs)
outputs = self.layer_dropout(outputs, residual, 0.1 * start_index / total_layers)
return outputs
|
1674148
|
import sys
import random
from dqn import Agent
import numpy as np
class MockEnv:
def __init__(self, env_name):
self.action_space = MockActionSpace(10)
self.observation_space = MockObservationSpace((1, 1, 1))
def reset(self):
return self.random_observation()
def step(self, action):
print("stepping")
return self.random_observation(), 5, random.randint(0, 1000) == 555, None
def random_observation(self):
return np.zeros((1, 1, 1, 1))
#return [[0] * 12]
class MockActionSpace:
def __init__(self, n):
self.n = n
class MockObservationSpace:
def __init__(self, shape):
self.shape = shape
num_episodes = 20
env_name = sys.argv[1] if len(sys.argv) > 1 else "MsPacman-v0"
env = MockEnv(env_name)
agent = Agent(state_size=env.observation_space.shape,
number_of_actions=env.action_space.n,
save_name=env_name)
for e in range(num_episodes):
observation = env.reset()
done = False
agent.new_episode()
total_cost = 0.0
total_reward = 0.0
frame = 0
while not done:
frame += 1
#env.render()
action, values = agent.act(observation)
print(action)
#action = env.action_space.sample()
observation, reward, done, info = env.step(action)
total_cost += agent.observe(reward)
total_reward += reward
print("total reward {}".format(total_reward))
print("mean cost {}".format(total_cost/frame))
|
1674154
|
from mongokat import Document, Collection
class ShortNamesDocument(Document):
""" This Document subclass supports limited alias names, as suggested in https://github.com/pricingassistant/mongokat/issues/13
Note that they don't work in queries, field name lists, or dict(doc). Further subclassing would be necessary
for that to work. Pull Requests welcome, though we won't include that in MongoKat itself.
"""
short_names = {
"description": "d",
"value": "v"
}
def __getitem__(self, key):
if key in self.short_names:
return self.get(self.short_names[key])
return self.get(key)
def __setitem__(self, key, value):
if key in self.short_names:
key = self.short_names[key]
dict.__setitem__(self, key, value)
class ShortNamesCollection(Collection):
document_class = ShortNamesDocument
def test_shortnames(db):
db.test_shortnames.drop()
SN = ShortNamesCollection(collection=db.test_shortnames)
doc = SN({"regular": "1"})
doc.save()
docs = list(SN.find())
print(docs)
assert len(docs) == 1
assert docs[0]["regular"] == "1"
docs[0]["value"] = "2"
docs[0].save()
docs = list(SN.find())
print(docs)
print(dict(docs[0]))
assert len(docs) == 1
assert docs[0]["value"] == "2"
assert docs[0]["v"] == "2"
# Bypass mongokat to see the real document
raw_docs = list(db.test_shortnames.find())
assert len(raw_docs) == 1
assert "value" not in raw_docs[0]
assert raw_docs[0]["v"] == "2"
|
1674184
|
import torch
class EstimateAffineParams(torch.nn.Module):
"""
Layer to estimate the parameters of an affine transformation matrix,
which would transform the ``mean_shape`` into a given shape
"""
def __init__(self, mean_shape):
"""
Parameters
----------
mean_shape : :class:`torch.Tensor`
The mean shape
"""
super().__init__()
self.register_buffer("mean_shape", mean_shape)
def forward(self, transformed_shape):
"""
actual parameter estimation
Parameters
----------
transformed_shape : :class:`torch.Tensor`
the target shape
Returns
-------
:class:`torch.Tensor`
the estimated transformation matrix
"""
source = transformed_shape.view((transformed_shape.size(0), -1, 2))
batch_size = source.size(0)
dst_mean = self.mean_shape.mean(dim=0)
src_mean = source.mean(dim=1)
dst_mean = dst_mean.unsqueeze(dim=0)
src_mean = src_mean.unsqueeze(dim=1)
src_vec = (source - src_mean).view(batch_size, -1)
dest_vec = (self.mean_shape - dst_mean).view(-1)
dest_vec = dest_vec.expand(batch_size, *dest_vec.shape)
dest_norm = torch.zeros(batch_size, device=dest_vec.device)
src_norm = torch.zeros(batch_size, device=src_vec.device)
for i in range(batch_size):
dest_norm[i] = dest_vec[i].norm(p=2)
src_norm[i] = src_vec[i].norm(p=2)
a = torch.bmm(src_vec.view(batch_size, 1, -1),
dest_vec.view(batch_size, -1, 1)).squeeze()/src_norm**2
b = 0
for i in range(self.mean_shape.shape[0]):
b += src_vec[:, 2*i] * dest_vec[:, 2*i+1] - \
src_vec[:, 2*i+1] * dest_vec[:, 2*i]
b = b / src_norm**2
A = torch.zeros((batch_size, 2, 2), device=a.device)
A[:, 0, 0] = a
A[:, 0, 1] = b
A[:, 1, 0] = -b
A[:, 1, 1] = a
src_mean = torch.bmm(src_mean.view(batch_size, 1, -1), A)
out = torch.cat(
(A.view(batch_size, -1), (dst_mean - src_mean).view(batch_size, -1)), 1)
return out
|
1674215
|
from odoo import models, fields, api
from odoo import exceptions
import logging
_logger = logging.getLogger(__name__)
class TodoWizard(models.TransientModel):
_name = 'todo.wizard'
_description = 'To-do Mass Assignment'
task_ids = fields.Many2many('todo.task', string='Tasks')
new_deadline = fields.Date('Set Deadline')
new_user_id = fields.Many2one('res.users', string='Set Responsible')
@api.multi
def do_mass_update(self):
self.ensure_one()
if not self.new_deadline and not self.new_user_id:
raise exceptions.ValidationError('No data to update!')
_logger.debug('Mass update on Todo Tasks %s' % self.task_ids)
# Values to Write
vals = {}
if self.new_deadline:
vals['date_deadline'] = self.new_deadline
if self.new_user_id:
vals['user_id'] = self.new_user_id
# Mass write values on all selected tasks
if vals:
self.task_ids.write(vals)
return True
@api.multi
def do_count_tasks(self):
Task = self.env['todo.task']
count = Task.search_count([('is_done', '=', False)])
raise exceptions.Warning('Counted %d to-do tasks.' % count)
@api.multi
def _reopen_form(self):
self.ensure_one()
action = {
'type': 'ir.actions.act_window',
'res_model': self._name,
'res_id': self.id,
'view_type': 'form',
'view_mode': 'form',
'target': 'new',
}
return action
@api.multi
def do_populate_tasks(self):
import pudb; pudb.set_trace()
self.ensure_one()
Task = self.env['todo.task']
open_tasks = Task.search([('is_done', '=', False)])
self.task_ids = open_tasks
# reopen wizard form on same wizard record
return self._reopen_form()
|
1674226
|
from datetime import datetime, timedelta, timezone
import logging
import pickle
from unittest.mock import Mock
import pytest
import trio
from . import AsyncMock, asyncio_loop, fail_after
from starbelly.frontier import FrontierExhaustionError
from starbelly.job import (
PipelineTerminator,
RunState,
StatsTracker,
CrawlManager,
)
logger = logging.getLogger(__name__)
def make_policy_doc():
created_at = datetime(2019, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
return {
'id': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'name': 'Test Policy',
'created_at': created_at,
'updated_at': created_at,
'authentication': {'enabled': True},
'limits': {
'max_cost': 10,
'max_duration': 3600,
'max_items': 10_000,
},
'mime_type_rules': [
{'match': 'MATCHES', 'pattern': '^text/', 'save': True},
{'save': False},
],
'proxy_rules': [],
'robots_txt': {
'usage': 'IGNORE',
},
'url_normalization': {
'enabled': True,
'strip_parameters': ['PHPSESSID'],
},
'url_rules': [
{'action': 'ADD', 'amount': 1, 'match': 'MATCHES',
'pattern': '^https?://({SEED_DOMAINS})/'},
{'action': 'MULTIPLY', 'amount': 0},
],
'user_agents': [
{'name': 'Test User Agent'}
]
}
@fail_after(3)
async def test_start_job(asyncio_loop, nursery):
# Set up fixtures
job_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
policy_id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
rate_limiter = Mock()
rate_limiter.remove_job = AsyncMock()
stats_tracker = StatsTracker(timedelta(seconds=60))
robots_txt_manager = Mock()
manager_db = Mock()
manager_db.clear_frontier = AsyncMock()
manager_db.create_job = AsyncMock(job_id)
manager_db.finish_job = AsyncMock()
manager_db.get_max_sequence = AsyncMock(100)
manager_db.get_policy = AsyncMock(make_policy_doc())
manager_db.run_job = AsyncMock()
frontier_db = Mock()
frontier_db.any_in_flight = AsyncMock(False)
frontier_db.get_frontier_batch = AsyncMock({})
frontier_db.get_frontier_size = AsyncMock(0)
frontier_db.run = AsyncMock()
extractor_db = Mock()
storage_db = Mock()
login_db = Mock()
crawl_manager = CrawlManager(rate_limiter, stats_tracker,
robots_txt_manager, manager_db, frontier_db, extractor_db, storage_db,
login_db)
# Run the crawl manager and start a new job
await nursery.start(crawl_manager.run)
await crawl_manager.start_job('Test Job', ['https://seed.example'],
['tag1'], policy_id)
# Wait for the crawler to tell us that the job is running.
recv_channel = crawl_manager.get_job_state_channel()
state_event = await recv_channel.receive()
assert state_event.run_state == RunState.RUNNING
resources = crawl_manager.get_resource_usage()
assert resources['maximum_downloads'] == 20
assert resources['current_downloads'] == 0
assert resources['jobs'][0]['id'] == job_id
assert resources['jobs'][0]['name'] == 'Test Job'
assert resources['jobs'][0]['current_downloads'] == 0
# The job has an empty frontier, so it will quit immediately after starting.
# Wait for the completed job state.
state_event = await recv_channel.receive()
assert state_event.run_state == RunState.COMPLETED
# Make sure the manager interacted with other objects correctly.
assert manager_db.clear_frontier.call_args[0] == job_id
assert manager_db.finish_job.call_args[0] == job_id
assert manager_db.finish_job.call_args[1] == RunState.COMPLETED
assert manager_db.get_policy.call_args[0] == policy_id
assert manager_db.run_job.call_args[0] == job_id
assert frontier_db.get_frontier_batch.call_args[0] == job_id
stats = stats_tracker.snapshot()
assert stats[0]['id'] == job_id
assert stats[0]['name'] == '<NAME>'
assert stats[0]['run_state'] == RunState.COMPLETED
assert stats[0]['seeds'] == ['https://seed.example']
assert stats[0]['tags'] == ['tag1']
@fail_after(3)
async def test_pause_resume_cancel(asyncio_loop, nursery):
# Set up fixtures
job_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
created_at = datetime(2019, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
job_doc = {
'id': job_id,
'name': '<NAME>',
'seeds': ['https://seed1.example', 'https://seed2.example'],
'tags': [],
'run_state': RunState.PAUSED,
'old_urls': b'\x80\x03cbuiltins\nset\nq\x00]q\x01C\x10\xad\xb6\x93\x9b'
b'\xac\x92\xd8\xfd\xc0\x8dJ\x94^\x8d\xe5~q\x02a\x85q\x03Rq'
b'\x04.',
'started_at': created_at,
'completed_at': None,
'duration': None,
'item_count': 0,
'http_success_count': 0,
'http_error_count': 0,
'exception_count': 0,
'http_status_counts': {},
'schedule_id': 'cccccccc-cccc-cccc-cccc-cccccccccccc',
'policy': {
'id': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'name': 'Test Policy',
'created_at': created_at,
'updated_at': created_at,
'authentication': {
'enabled': False,
},
'captcha_solver_id': None,
'limits': {
'max_cost': 10,
'max_duration': 3600,
'max_items': 10_000,
},
'mime_type_rules': [
{'match': 'MATCHES', 'pattern': '^text/', 'save': True},
{'save': False},
],
'proxy_rules': [],
'robots_txt': {
'usage': 'IGNORE',
},
'url_normalization': {
'enabled': True,
'strip_parameters': [],
},
'url_rules': [
{'action': 'ADD', 'amount': 1, 'match': 'MATCHES',
'pattern': '^https?://({SEED_DOMAINS})/'},
{'action': 'MULTIPLY', 'amount': 0},
],
'user_agents': [
{'name': 'Test User Agent'}
],
},
}
rate_limiter = Mock()
rate_limiter.remove_job = AsyncMock()
stats_tracker = StatsTracker(timedelta(seconds=60))
robots_txt_manager = Mock()
manager_db = Mock()
manager_db.clear_frontier = AsyncMock()
manager_db.create_job = AsyncMock(job_id)
manager_db.finish_job = AsyncMock()
manager_db.get_max_sequence = AsyncMock(100)
manager_db.get_policy = AsyncMock(make_policy_doc())
manager_db.resume_job = AsyncMock(job_doc)
manager_db.pause_job = AsyncMock()
manager_db.run_job = AsyncMock()
frontier_db = Mock()
frontier_db.any_in_flight = AsyncMock(True)
frontier_db.get_frontier_batch = AsyncMock({})
frontier_db.get_frontier_size = AsyncMock(0)
frontier_db.run = AsyncMock()
extractor_db = Mock()
storage_db = Mock()
login_db = Mock()
crawl_manager = CrawlManager(rate_limiter, stats_tracker,
robots_txt_manager, manager_db, frontier_db, extractor_db, storage_db,
login_db)
# Run the crawl manager and start a new job
await nursery.start(crawl_manager.run)
await crawl_manager.start_job(job_doc['name'], job_doc['seeds'],
job_doc['tags'], job_doc['policy']['id'])
# Wait for the crawler to tell us that the job is running.
recv_channel = crawl_manager.get_job_state_channel()
state_event = await recv_channel.receive()
assert state_event.run_state == RunState.RUNNING
# Now pause and wait for the paused event.
await crawl_manager.pause_job(job_id)
state_event = await recv_channel.receive()
assert state_event.run_state == RunState.PAUSED
assert manager_db.pause_job.call_args[0] == job_id
# There are two "old URLs": the seed URLs.
assert len(pickle.loads(manager_db.pause_job.call_args[1])) == 2
assert stats_tracker.snapshot()[0]['run_state'] == RunState.PAUSED
# Now resume and wait for the running event.
await crawl_manager.resume_job(job_id)
state_event = await recv_channel.receive()
assert state_event.run_state == RunState.RUNNING
assert manager_db.resume_job.call_args[0] == job_id
# Now cancel and wait for the cancelled event
await crawl_manager.cancel_job(job_id)
state_event = await recv_channel.receive()
assert state_event.run_state == RunState.CANCELLED
assert manager_db.finish_job.call_args[0] == job_id
assert manager_db.finish_job.call_args[1] == RunState.CANCELLED
|
1674256
|
from pygsti.processors import QubitProcessorSpec
from ..testutils import BaseTestCase
class ProcessorSpecCase(BaseTestCase):
def test_processorspec(self):
# Tests init a pspec using standard gatenames, and all standards.
n = 3
gate_names = ['Gh','Gp','Gxpi','Gypi','Gzpi','Gpdag','Gcphase', 'Gi']
ps = QubitProcessorSpec(n,gate_names=gate_names, geometry='line')
# Tests init a pspec containing 1 qubit (as special case which could break)
n = 1
gate_names = ['Gh','Gp','Gxpi','Gypi','Gzpi','Gpdag','Gcphase', 'Gi']
ps = QubitProcessorSpec(n,gate_names=gate_names) # no geometry needed for 1-qubit specs
|
1674257
|
import pytest
pytest.importorskip("requests")
pytest.importorskip("requests.exceptions")
def test_load_module():
__import__("modules.contrib.getcrypto")
|
1674312
|
from telium.constant import *
from telium.payment import TeliumAsk, TeliumResponse, LrcChecksumException, SequenceDoesNotMatchLengthException
from telium.manager import *
from telium.version import __version__, VERSION
|
1674326
|
from typing import Generator
from typing import Iterator
from typing import Tuple
import numpy
from ..Spectrum import Spectrum
def parse_msp_file(filename: str) -> Generator[dict, None, None]:
"""Read msp file and parse info in list of spectrum dictionaries."""
# Lists/dicts that will contain all params, masses and intensities of each molecule
params = {}
masses = []
intensities = []
# Peaks counter. Used to track and count the number of peaks
peakscount = 0
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
rline = line.rstrip()
if len(rline) == 0:
continue
if contains_metadata(rline):
parse_metadata(rline, params)
else:
# Obtaining the masses and intensities
peak_pairs = get_peak_tuples(rline)
for peak in peak_pairs:
mz, intensity = get_peak_values(peak)
peakscount += 1
masses.append(mz)
intensities.append(intensity)
# Obtaining the masses and intensities
if int(params['num peaks']) == peakscount:
peakscount = 0
yield {
'params': (params),
'm/z array': numpy.array(masses),
'intensity array': numpy.array(intensities)
}
params = {}
masses = []
intensities = []
def get_peak_values(peak: str) -> Tuple[float, float]:
""" Get the m/z and intensity value from the line containing the peak information. """
splitted_line = peak.split(maxsplit=2)
mz = float(splitted_line[0].strip())
intensity = float(splitted_line[1].strip())
return mz, intensity
def get_peak_tuples(rline: str) -> Iterator[str]:
""" Splits line at ';' and performs additional string cleaning. """
tokens = filter(None, rline.split(";"))
peak_pairs = map(lambda x: x.lstrip().rstrip(), tokens)
return peak_pairs
def parse_metadata(rline: str, params: dict):
""" Reads metadata contained in line into params dict. """
splitted_line = rline.split(":", 1)
if splitted_line[0].lower() == 'comments':
# Obtaining the parameters inside the comments index
for s in splitted_line[1][2:-1].split('" "'):
splitted_line = s.split("=", 1)
if splitted_line[0].lower() in params.keys() and splitted_line[0].lower() == 'smiles':
params[splitted_line[0].lower()+"_2"] = splitted_line[1].strip()
else:
params[splitted_line[0].lower()] = splitted_line[1].strip()
else:
params[splitted_line[0].lower()] = splitted_line[1].strip()
def contains_metadata(rline: str) -> bool:
""" Check if line contains Spectrum metadata."""
return ':' in rline
def load_from_msp(filename: str) -> Generator[Spectrum, None, None]:
"""
MSP file to a :py:class:`~matchms.Spectrum.Spectrum` objects
Function that reads a .msp file and converts the info
in :py:class:`~matchms.Spectrum.Spectrum` objects.
Parameters
----------
filename:
Path of the msp file.
Yields
------
Yield a spectrum object with the data of the msp file
Example:
.. code-block:: python
from matchms.importing import load_from_msp
# Download msp file from MassBank of North America repository at https://mona.fiehnlab.ucdavis.edu/
file_msp = "MoNA-export-GC-MS-first10.msp"
spectrums = list(load_from_msp(file_msp))
"""
for spectrum in parse_msp_file(filename):
metadata = spectrum.get("params", None)
mz = spectrum["m/z array"]
intensities = spectrum["intensity array"]
# Sort by mz (if not sorted already)
if not numpy.all(mz[:-1] <= mz[1:]):
idx_sorted = numpy.argsort(mz)
mz = mz[idx_sorted]
intensities = intensities[idx_sorted]
yield Spectrum(mz=mz, intensities=intensities, metadata=metadata)
|
1674379
|
from django.test import TestCase
from ringapp.models import Logic, PropertySide, Ring, RingProperty
from ringapp.LogicUtils import LogicEngine, LogicError
from model_mommy import mommy
log_eng = LogicEngine()
# [(0, ''),
# (1, 'left and right'),
# (2, 'left'),
# (3, 'right'),
# (4, 'left or right'), ]
class LogicTestCase(TestCase):
def setUp(self):
self.ring = Ring.objects.create(name='joe', is_commutative=False)
self.ps1 = mommy.make(PropertySide, side=3) # right
self.ps2 = mommy.make(PropertySide, side=3) # right
self.ps3 = mommy.make(PropertySide, side=0) # symmetric
self.ps3.property.symmetric = True
self.ps3.property.save()
self.ps4 = mommy.make(PropertySide, side=4) # left or right
def test_merge_if_possible(self):
d1 = {1: True, 2: True, 3: False}
d2 = {1: True, 2: True, 4: False, 5: True}
d3 = log_eng.merge_if_possible(d1, d2)
d1.update(d2)
self.assertEqual(d1, d3)
def test_merge_if_possible_neg(self):
d1 = {1: True, 2: True}
d2 = {1: True, 2: False}
with self.assertRaises(LogicError):
log_eng.merge_if_possible(d1, d2)
def test_simple_same_side_forward(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps2, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps1.property,
has_on_right=True,
has_on_left=True)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps2.property,
has_on_right=True,
has_on_left=True)
self.assertTrue(check.exists())
def test_simple_same_side_backward(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps2, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps2.property,
has_on_right=False,
has_on_left=False)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps1.property,
has_on_right=False,
has_on_left=False)
self.assertTrue(check.exists())
def test_simple_exception(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps2, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps1.property,
has_on_right=True)
RingProperty.objects.create(ring=self.ring,
property=self.ps2.property,
has_on_right=False)
log_eng.reinitialize()
log_eng.load_ringproperties(self.ring)
with self.assertRaises(LogicError):
log_eng.apply_logic(log, self.ring, {})
def test_simple_equivalence_forward(self):
log = Logic.objects.create(symmetric=False,
variety=1)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps2, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps1.property,
has_on_right=True,
has_on_left=False)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps2.property,
has_on_right=True,
has_on_left=False)
self.assertTrue(check.exists())
def test_simple_equivalence_backward(self):
log = Logic.objects.create(symmetric=False,
variety=1)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps2, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps2.property,
has_on_right=False,
has_on_left=True)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps1.property,
has_on_right=False,
has_on_left=True)
self.assertTrue(check.exists())
def test_simple_equivalence_exception(self):
log = Logic.objects.create(symmetric=False,
variety=1)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps2, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps1.property,
has_on_right=False)
RingProperty.objects.create(ring=self.ring,
property=self.ps2.property,
has_on_right=True)
log_eng.reinitialize()
log_eng.load_ringproperties(self.ring)
with self.assertRaises(LogicError):
log_eng.apply_logic(log, self.ring, {})
def test_one_implies_two_pos(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps2, self.ps3])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps1.property,
has_on_right=True,
has_on_left=False)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check1 = RingProperty.objects.filter(ring=self.ring,
property=self.ps2.property,
has_on_right=True)
check2 = RingProperty.objects.filter(ring=self.ring,
property=self.ps3.property,
has_on_right=True,
has_on_left=True)
self.assertTrue(check1.exists() and check2.exists())
def test_one_implies_two_neg1(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps2, self.ps3])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps2.property,
has_on_right=False)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps1.property,
has_on_right=False)
self.assertTrue(check.exists())
def test_one_implies_two_neg2(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps2, self.ps3])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps3.property,
has_on_right=False)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps1.property,
has_on_right=False)
self.assertTrue(check.exists())
def test_two_implies_one_pos(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, self.ps3])
log.concs.set([self.ps2, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps3.property,
has_on_right=True)
RingProperty.objects.create(ring=self.ring,
property=self.ps1.property,
has_on_right=True)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps2.property,
has_on_right=True)
self.assertTrue(check.exists())
def test_two_implies_one_neg(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, self.ps3])
log.concs.set([self.ps2, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps2.property,
has_on_right=False)
RingProperty.objects.create(ring=self.ring,
property=self.ps1.property,
has_on_right=True)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps3.property,
has_on_right=False)
self.assertTrue(check.exists())
def test_or_implies_one(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps4])
log.concs.set([self.ps1, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps4.property,
has_on_right=True,
has_on_left=False)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps1.property,
has_on_right=True)
self.assertTrue(check.exists())
def test_or_implies_one_neg(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps4])
log.concs.set([self.ps1, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps1.property,
has_on_right=False)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps4.property,
has_on_right=False,
has_on_left=False)
self.assertTrue(check.exists())
def test_one_implies_or(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps4, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps1.property,
has_on_right=True)
RingProperty.objects.create(ring=self.ring,
property=self.ps4.property,
has_on_right=False)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps4.property,
has_on_right=False,
has_on_left=True)
self.assertTrue(check.exists())
def test_one_implies_or_neg(self):
log = Logic.objects.create(symmetric=False,
variety=0)
log.hyps.set([self.ps1, ])
log.concs.set([self.ps4, ])
log.save()
RingProperty.objects.create(ring=self.ring,
property=self.ps4.property,
has_on_right=False,
has_on_left=False)
log_eng.reinitialize()
log_eng.process_ring(self.ring)
check = RingProperty.objects.filter(ring=self.ring,
property=self.ps1.property,
has_on_right=False,
has_on_left=False)
self.assertTrue(check.exists())
def tearDown(self):
RingProperty.objects.all().delete()
Logic.objects.all().delete()
|
1674440
|
from datetime import date
today = date.today()
def bthdycal(b_year,b_month,b_date,name):
age = today.year - int(b_year)
if (today.month == int(b_month) and today.day == int(b_date)):
print(f"Happy Birthday {name}")
print(f"its your {age} birthday")
else:
print(f"the coming birthday will be your {age + 1} birthday")
bthdycal(2003,6,5,"Vinaya")
|
1674447
|
import logging
import graypy
from balebot.config import Config
class Logger:
logger = None
@staticmethod
def init_logger():
use_graylog = Config.use_graylog
source = Config.source
graylog_host = Config.graylog_host
graylog_port = Config.graylog_port
log_level = Config.log_level
log_facility_name = Config.log_facility_name
temp_logger = logging.getLogger(log_facility_name)
temp_logger.setLevel(log_level)
log_handlers = []
if use_graylog == "0":
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(filename)s:%(lineno)d %(levelname)s:\n"%(message)s"'
)
handler.setFormatter(formatter)
log_handlers.append(handler)
elif use_graylog == "1" and graylog_host and source and graylog_port is not None \
and isinstance(graylog_port, int):
log_handlers.append(graypy.GELFHandler(host=graylog_host, port=graylog_port, localname=source))
elif use_graylog == "2" and graylog_host and source and graylog_port is not None \
and isinstance(graylog_port, int):
handler1 = graypy.GELFHandler(host=graylog_host, port=graylog_port, localname=source)
handler2 = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(filename)s:%(lineno)d %(levelname)s:\n"%(message)s"'
)
handler2.setFormatter(formatter)
log_handlers.append(handler1)
log_handlers.append(handler2)
for log_handler in log_handlers:
temp_logger.addHandler(log_handler)
Logger.logger = temp_logger
return Logger.logger
@staticmethod
def get_logger():
if Logger.logger:
return Logger.logger
else:
return Logger.init_logger()
|
1674452
|
from typing import Optional, Tuple
from overrides import overrides
import torch
from torch.nn import Conv1d, Linear
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.nn import Activation
@Seq2VecEncoder.register("cnn")
class CnnEncoder(Seq2VecEncoder):
"""
A ``CnnEncoder`` is a combination of multiple convolution layers and max pooling layers. As a
:class:`Seq2VecEncoder`, the input to this module is of shape ``(batch_size, num_tokens,
input_dim)``, and the output is of shape ``(batch_size, output_dim)``.
The CNN has one convolution layer for each ngram filter size. Each convolution operation gives
out a vector of size num_filters. The number of times a convolution layer will be used
is ``num_tokens - ngram_size + 1``. The corresponding maxpooling layer aggregates all these
outputs from the convolution layer and outputs the max.
This operation is repeated for every ngram size passed, and consequently the dimensionality of
the output after maxpooling is ``len(ngram_filter_sizes) * num_filters``. This then gets
(optionally) projected down to a lower dimensional output, specified by ``output_dim``.
We then use a fully connected layer to project in back to the desired output_dim. For more
details, refer to "A Sensitivity Analysis of (and Practitioners’ Guide to) Convolutional Neural
Networks for Sentence Classification", Zhang and Wallace 2016, particularly Figure 1.
Parameters
----------
embedding_dim : ``int``
This is the input dimension to the encoder. We need this because we can't do shape
inference in pytorch, and we need to know what size filters to construct in the CNN.
num_filters: ``int``
This is the output dim for each convolutional layer, which is the number of "filters"
learned by that layer.
ngram_filter_sizes: ``Tuple[int]``, optional (default=``(2, 3, 4, 5)``)
This specifies both the number of convolutional layers we will create and their sizes. The
default of ``(2, 3, 4, 5)`` will have four convolutional layers, corresponding to encoding
ngrams of size 2 to 5 with some number of filters.
conv_layer_activation: ``Activation``, optional (default=``torch.nn.ReLU``)
Activation to use after the convolution layers.
output_dim : ``Optional[int]``, optional (default=``None``)
After doing convolutions and pooling, we'll project the collected features into a vector of
this size. If this value is ``None``, we will just return the result of the max pooling,
giving an output of shape ``len(ngram_filter_sizes) * num_filters``.
"""
def __init__(self,
embedding_dim: int,
num_filters: int,
ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5), # pylint: disable=bad-whitespace
conv_layer_activation: Activation = None,
output_dim: Optional[int] = None) -> None:
super(CnnEncoder, self).__init__()
self._embedding_dim = embedding_dim
self._num_filters = num_filters
self._ngram_filter_sizes = ngram_filter_sizes
self._activation = conv_layer_activation or Activation.by_name('relu')()
self._output_dim = output_dim
self._convolution_layers = [Conv1d(in_channels=self._embedding_dim,
out_channels=self._num_filters,
kernel_size=ngram_size)
for ngram_size in self._ngram_filter_sizes]
for i, conv_layer in enumerate(self._convolution_layers):
self.add_module('conv_layer_%d' % i, conv_layer)
maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
if self._output_dim:
self.projection_layer = Linear(maxpool_output_dim, self._output_dim)
else:
self.projection_layer = None
self._output_dim = maxpool_output_dim
@overrides
def get_input_dim(self) -> int:
return self._embedding_dim
@overrides
def get_output_dim(self) -> int:
return self._output_dim
def forward(self, tokens: torch.Tensor, mask: torch.Tensor): # pylint: disable=arguments-differ
if mask is not None:
tokens = tokens * mask.unsqueeze(-1).float()
# Our input is expected to have shape `(batch_size, num_tokens, embedding_dim)`. The
# convolution layers expect input of shape `(batch_size, in_channels, sequence_length)`,
# where the conv layer `in_channels` is our `embedding_dim`. We thus need to transpose the
# tensor first.
tokens = torch.transpose(tokens, 1, 2)
# Each convolution layer returns output of size `(batch_size, num_filters, pool_length)`,
# where `pool_length = num_tokens - ngram_size + 1`. We then do an activation function,
# then do max pooling over each filter for the whole input sequence. Because our max
# pooling is simple, we just use `torch.max`. The resultant tensor of has shape
# `(batch_size, num_conv_layers * num_filters)`, which then gets projected using the
# projection layer, if requested.
filter_outputs = []
for i in range(len(self._convolution_layers)):
convolution_layer = getattr(self, 'conv_layer_{}'.format(i))
filter_outputs.append(
self._activation(convolution_layer(tokens)).max(dim=2)[0]
)
# Now we have a list of `num_conv_layers` tensors of shape `(batch_size, num_filters)`.
# Concatenating them gives us a tensor of shape `(batch_size, num_filters * num_conv_layers)`.
maxpool_output = torch.cat(filter_outputs, dim=1) if len(filter_outputs) > 1 else filter_outputs[0]
if self.projection_layer:
result = self.projection_layer(maxpool_output)
else:
result = maxpool_output
return result
|
1674478
|
from .octopus_ml import plot_imp
from .octopus_ml import adjusted_classes
from .octopus_ml import cv
from .octopus_ml import cv_adv
from .octopus_ml import cv_plot
from .octopus_ml import roc_curve_plot
from .octopus_ml import confusion_matrix_plot
from .octopus_ml import hist_target
from .octopus_ml import target_pie
from .octopus_ml import preds_distribution
from .octopus_ml import target_corr
from .octopus_ml import label_dist
from .octopus_data import sampling_within_group
from .octopus_data import anomalies
from .octopus_data import correlations
from .octopus_data import recieve_fps
from .octopus_data import recieve_fns
from .octopus_data import sampling_by_group
from .octopus_data import sampling
from .octopus_data import data_leakage
from .octopus_data import cat_features_proccessing
from .octopus_data import convert_to_categorical
from .octopus_data import detect_categorical
from .octopus_data import diff_list
|
1674484
|
import numpy as np
from tqdm import trange, tqdm
import tensorflow as tf
from .fedbase import BaseFedarated
from flearn.utils.tf_utils import process_grad, cosine_sim, softmax, norm_grad
from flearn.utils.model_utils import batch_data, gen_batch, gen_epoch, project
class Server(BaseFedarated):
def __init__(self, params, learner, dataset):
print('Using agnostic flearn (non-stochastic version) to Train')
self.inner_opt = tf.train.AdagradOptimizer(params['learning_rate'])
super(Server, self).__init__(params, learner, dataset)
self.latest_lambdas = np.ones(len(self.clients)) * 1.0 / len(self.clients)
self.resulting_model = self.client_model.get_params() # this is only for the agnostic flearn paper
def train(self):
print('Training with {} workers ---'.format(self.clients_per_round))
num_clients = len(self.clients)
pk = np.ones(num_clients) * 1.0 / num_clients
batches = {}
for c in self.clients:
batches[c] = gen_epoch(c.train_data, self.num_rounds+2)
for i in trange(self.num_rounds+1, desc='Round: ', ncols=120):
# test model
if i % self.eval_every == 0:
self.client_model.set_params(self.resulting_model)
stats = self.test_resulting_model()
tqdm.write('At round {} testing accuracy: {}'.format(i, np.sum(stats[3])*1.0/np.sum(stats[2])))
test_accuracies = np.divide(np.asarray(stats[3]), np.asarray(stats[2]))
for idx in range(len(self.clients)):
tqdm.write('Client {} testing accuracy: {}'.format(self.clients[idx].id, test_accuracies[idx]))
solns = []
losses = []
for idx, c in enumerate(self.clients):
c.set_params(self.latest_model)
batch = next(batches[c])
_, grads, loss = c.solve_sgd(batch) # this gradient is with respect to w
losses.append(loss)
solns.append((self.latest_lambdas[idx],grads[1]))
avg_gradient = self.aggregate(solns)
for v,g in zip(self.latest_model, avg_gradient):
v -= self.learning_rate * g
for idx in range(len(self.latest_lambdas)):
self.latest_lambdas[idx] += self.learning_rate_lambda * losses[idx]
self.latest_lambdas = project(self.latest_lambdas)
for k in range(len(self.resulting_model)):
self.resulting_model[k] = (self.resulting_model[k] * i + self.latest_model[k]) * 1.0 / (i+1)
|
1674537
|
import dash
import dash_html_components as html
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.renderer = 'var renderer = new DashRenderer();'
app.layout = html.Div('Simple Dash App')
if __name__ == '__main__':
app.run_server(debug=True)
|
1674540
|
import pandas as pd
import math
import parser
# Worksheets with indemnification funds and temporary remuneration
# For active members there are spreadsheets as of July 2019
# Adjust existing spreadsheet variations
def format_value(element):
if element == None:
return 0.0
if type(element) == str and "-" in element:
return 0.0
if element == "#N/DISP":
return 0.0
return element
# July and August 2019
def update_employee_indemnity_jul_aug_2019(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
ferias_pc = format_value(row[5])
cumulativa = format_value(row[6]) # Gratificação Cumulativa
grat_natureza = format_value(row[7]) # Gratificação de Natureza Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
emp["income"].update(
{
"perks": {
"total": round(ferias_pc + alimentacao, 2),
"food": alimentacao,
"vacation_pecuniary": ferias_pc,
}
}
)
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"] + cumulativa + grat_natureza, 2
)
total_gratificacoes = round(
emp["income"]["other"]["total"] + cumulativa + grat_natureza, 2
)
emp["income"].update(
{"total": round(emp["income"]["total"] + cumulativa + grat_natureza, 2)}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# September to December 2019 / January and November 2020
def update_employee_indemnity_sept_2019_to_jan_and_nov_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
ferias_pc = format_value(row[5])
licensa_pc = format_value(row[6])
cumulativa = format_value(row[7]) # Gratificação Cumulativa
grat_natureza = format_value(row[8]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[9]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{"total": round(emp["income"]["total"] + cumulativa + grat_natureza, 2)}
)
emp["income"]["perks"].update(
{
"total": round(ferias_pc + alimentacao + licensa_pc, 2),
"food": alimentacao,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# February and March 2020
def update_employee_indemnity_feb_mar_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
licensa_compensatoria = format_value(
row[5]
) # Licença Compensatória ato 1124/18
ferias_pc = format_value(row[6])
licensa_pc = format_value(row[7])
cumulativa = format_value(row[8]) # Gratificação Cumulativa
grat_natureza = format_value(row[9]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[10]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
}
)
emp["income"]["perks"].update(
{
"total": round(ferias_pc + alimentacao + licensa_compensatoria, 2),
"food": alimentacao,
"compensatory_leave": licensa_compensatoria,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# April to July 2020
def update_employee_indemnity_apr_to_july_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
licensa_compensatoria = format_value(
row[5]
) # Licença Compensatória ato 1124/18
ferias_pc = format_value(row[6])
cumulativa = format_value(row[7]) # Gratificação Cumulativa
grat_natureza = format_value(row[8]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[9]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
}
)
emp["income"]["perks"].update(
{
"total": round(ferias_pc + alimentacao + licensa_compensatoria, 2),
"food": alimentacao,
"compensatory_leave": licensa_compensatoria,
"vacation_pecuniary": ferias_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# August and September 2020
def update_employee_indemnity_aug_sept_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
transporte = format_value(row[5]) # Auxilio Transporte
creche = format_value(row[6]) # Auxilio Creche
ferias_pc = format_value(row[7])
licensa_pc = format_value(row[8]) # Licensa em pecunia
licensa_compensatoria = format_value(
row[9]
) # Licença Compensatória ato 1124/18
insalubridade = format_value(row[10]) # Adicional de Insalubridade
subs_funcao = format_value(row[11]) # Substituição de Função
viatura = format_value(row[12]) # Viatura
cumulativa = format_value(row[13]) # Gratificação Cumulativa
grat_qualificacao = format_value(row[14])
grat_natureza = format_value(row[15]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[16]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial
+ grat_qualificacao
+ viatura
+ insalubridade
+ subs_funcao,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial
+ grat_qualificacao
+ viatura
+ insalubridade
+ subs_funcao,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial
+ grat_qualificacao
+ viatura
+ insalubridade
+ subs_funcao,
2,
)
}
)
emp["income"]["perks"].update(
{
"total": round(
ferias_pc
+ alimentacao
+ transporte
+ creche
+ licensa_compensatoria
+ licensa_pc,
2,
),
"food": alimentacao,
"transportation": transporte,
"pre_school": creche,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
"compensatory_leave": licensa_compensatoria,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"INSALUBRIDADE": insalubridade,
"SUBS. DE FUNÇÃO": subs_funcao,
"VIATURA": viatura,
"GRAT. CUMULATIVA": cumulativa,
"GRAT. DE QUALIFICAÇÃO": grat_qualificacao,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# October 2020
def update_employee_indemnity_oct_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
ferias_pc = format_value(row[4])
cumulativa = format_value(row[5]) # Gratificação Cumulativa
grat_natureza = format_value(row[6]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[7]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
}
)
emp["income"]["perks"].update(
{
"vacation_pecuniary": ferias_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# December 2020
def update_employee_indemnity_dec_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
ferias_pc = format_value(row[5])
cumulativa = format_value(row[6]) # Gratificação Cumulativa
grat_natureza = format_value(row[7]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[8]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação,
"vacation_pecuniary": ferias_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# January 2021
def update_employee_indemnity_jan_2021(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
cumulativa = format_value(row[5]) # Gratificação Cumulativa
grat_natureza = format_value(row[6]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[7]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# February 2021
def update_employee_indemnity_feb_2021(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
ferias_pc = format_value(row[5])
cumulativa = format_value(row[6]) # Gratificação Cumulativa
grat_natureza = format_value(row[7]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[8]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação,
"vacation_pecuniary": ferias_pc
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# March and april 2021
def update_employee_indemnity_mar_apr_2021(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
ferias_pc = format_value(row[5])
saude = format_value(row[6])
cumulativa = format_value(row[7]) # Gratificação Cumulativa
grat_natureza = format_value(row[8]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[9]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação,
"vacation_pecuniary": ferias_pc,
"health": saude
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# June 2021
def update_employee_indemnity_june_jul_2021(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
ferias_pc = format_value(row[5])
licensa_pc = format_value(row[6])
saude = format_value(row[7])
cumulativa = format_value(row[8]) # Gratificação Cumulativa
grat_natureza = format_value(row[9]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[10]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
"health": saude
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
|
1674556
|
import torch
import numpy as np
from functools import partial
class Optimizer():
def __init__(self, parameters, optimizer, lr, eps, lr_scheduler, tf_start=1, tf_end=1, tf_step=1, **kwargs):
# Setup teacher forcing scheduler
self.tf_type = tf_end != 1
self.tf_rate = lambda step: max(
tf_end, tf_start-(tf_start-tf_end)*step/tf_step)
# Setup torch optimizer
self.opt_type = optimizer
self.init_lr = lr
self.sch_type = lr_scheduler
opt = getattr(torch.optim, optimizer)
if lr_scheduler == 'warmup':
warmup_step = 4000.0
init_lr = lr
self.lr_scheduler = lambda step: init_lr * warmup_step ** 0.5 * \
np.minimum((step+1)*warmup_step**-1.5, (step+1)**-0.5)
self.opt = opt(parameters, lr=1.0)
elif lr_scheduler == 'spec-aug-basic':
# Scheduler from https://arxiv.org/pdf/1904.08779.pdf
self.lr_scheduler = partial(speech_aug_scheduler, s_r=500,
s_i=20000, s_f=80000, peak_lr=lr)
self.opt = opt(parameters, lr=lr, eps=eps)
elif lr_scheduler == 'spec-aug-double':
# Scheduler from https://arxiv.org/pdf/1904.08779.pdf
self.lr_scheduler = partial(speech_aug_scheduler, s_r=1000,
s_i=40000, s_f=160000, peak_lr=lr)
self.opt = opt(parameters, lr=lr, eps=eps)
else:
self.lr_scheduler = None
self.opt = opt(parameters, lr=lr, eps=eps) # ToDo: 1e-8 better?
def get_opt_state_dict(self):
return self.opt.state_dict()
def load_opt_state_dict(self, state_dict):
self.opt.load_state_dict(state_dict)
def pre_step(self, step):
if self.lr_scheduler is not None:
cur_lr = self.lr_scheduler(step)
for param_group in self.opt.param_groups:
param_group['lr'] = cur_lr
self.opt.zero_grad()
return self.tf_rate(step)
def step(self):
self.opt.step()
def create_msg(self):
return ['Optim.spec.| Algo. = {}\t| Lr = {}\t (Scheduler = {})| Scheduled sampling = {}'
.format(self.opt_type, self.init_lr, self.sch_type, self.tf_type)]
def speech_aug_scheduler(step, s_r, s_i, s_f, peak_lr):
# Starting from 0, ramp-up to set LR and converge to 0.01*LR, w/ exp. decay
final_lr_ratio = 0.01
exp_decay_lambda = -np.log10(final_lr_ratio)/(s_f-s_i) # Approx. w/ 10-based
cur_step = step+1
if cur_step<s_r:
# Ramp-up
return peak_lr*float(cur_step)/s_r
elif cur_step<s_i:
# Hold
return peak_lr
elif cur_step<=s_f:
# Decay
return peak_lr*np.power(10,-exp_decay_lambda*(cur_step-s_i))
else:
# Converge
return peak_lr*final_lr_ratio
|
1674582
|
import rospy
import smach
import smach_ros
from helpers import movement as m
from helpers import transforms as t
from helpers import gripper
class ToolChange(smach.State):
def __init__(self, tool=None, duration=1.0):
smach.State.__init__(self, outcomes=['succeeded','failed'])
self.tool = tool
# ==========================================================
def execute(self, userdata):
gripper.open_gripper
rospy.loginfo('TOOL CHANGE')
rospy.loginfo('\tMove up to safe height')
curr = t.current_robot_pose('global_xyz_link', 'realsense_endpoint')
res = m.move_to_global(curr.position.x, curr.position.y, 0.505, 'realsense')
if not res:
rospy.logerr('Failed to move to safe position.')
return 'failed'
rospy.loginfo('\tMove to tool change position')
res = m.move_to_named_pose('realsense', 'tool_change_position')
if not res:
rospy.logerr('Failed to move to tool_change_position')
return 'failed'
if self.tool == 'gripper':
rospy.loginfo('\tSelect Gripper')
tool_change_named_pose = 'tool_change_gripper'
elif self.tool == 'sucker':
rospy.loginfo('\tSelect Sucker')
tool_change_named_pose = 'tool_change_sucker'
elif self.tool == 'neutral':
# Allow us to select neutral
rospy.loginfo('\tSelect Neutral')
tool_change_named_pose = 'tool_change_neutral'
else:
rospy.logerr('Incorrect tool name for tool change')
return 'failed'
res = m.move_to_named_pose('whole_arm', tool_change_named_pose)
if not res:
rospy.logerr('Failed to move to %s' % tool_change_named_pose)
return 'failed'
return 'succeeded'
|
1674583
|
from django.conf.urls import url
from .views import levels
urlpatterns = [
url(r'(?P<level>\d+)', levels, name="level"),
]
|
1674592
|
from datetime import datetime, timedelta
import logging
import onedrivesdk
import onedrivesdk.error
from onedrive_client.od_tasks import base
from onedrive_client import od_api_helper
class UpdateSubscriptionTask(base.TaskBase):
def __init__(self, repo, task_pool, webhook_worker, subscription_id=None):
"""
:param onedrive_client.od_repo.OneDriveLocalRepository repo:
:param onedrive_client.od_task.TaskPool | None task_pool:
:param onedrive_client.od_webhook.WebhookWorkerThread webhook_worker:
:param str | None subscription_id:
"""
super().__init__(repo, task_pool)
self.webhook_worker = webhook_worker
self.subscription_id = subscription_id
def handle(self):
logging.info('Updating webhook for Drive %s.', self.repo.drive.id)
item_request = self.repo.authenticator.client.item(drive=self.repo.drive.id, path='/')
expiration_time = datetime.utcnow() + timedelta(seconds=self.repo.context.config['webhook_renew_interval_sec'])
try:
if self.subscription_id is None:
subscription = od_api_helper.create_subscription(
item_request, self.repo, self.webhook_worker.webhook_url, expiration_time)
else:
subscription = onedrivesdk.Subscription()
subscription.id = self.subscription_id
subscription.notification_url = self.webhook_worker.webhook_url
subscription.expiration_date_time = expiration_time
subscription = od_api_helper.item_request_call(
self.repo, item_request.subscriptions[self.subscription_id].update, subscription)
self.webhook_worker.add_subscription(subscription, self.repo)
logging.info('Webhook for Drive %s updated.', self.repo.drive.id)
return subscription
except onedrivesdk.error.OneDriveError as e:
logging.error('Error: %s', e)
return None
|
1674596
|
from __future__ import with_statement
import random
from collections import defaultdict
from datetime import datetime
import pytest
from whoosh import analysis, fields, index, qparser, query
from whoosh.compat import b, u, xrange, text_type, PY3, permutations
from whoosh.filedb.filestore import RamStorage
from whoosh.writing import IndexingError
from whoosh.util.numeric import length_to_byte, byte_to_length
from whoosh.util.testing import TempIndex, TempStorage
def test_creation():
s = fields.Schema(content=fields.TEXT(phrase=True),
title=fields.TEXT(stored=True),
path=fields.ID(stored=True),
tags=fields.KEYWORD(stored=True),
quick=fields.NGRAM,
note=fields.STORED)
st = RamStorage()
ix = st.create_index(s)
w = ix.writer()
w.add_document(title=u("First"), content=u("This is the first document"),
path=u("/a"), tags=u("first second third"),
quick=u("First document"),
note=u("This is the first document"))
w.add_document(content=u("Let's try this again"), title=u("Second"),
path=u("/b"), tags=u("Uno Dos Tres"),
quick=u("Second document"),
note=u("This is the second document"))
w.commit()
def test_empty_commit():
s = fields.Schema(id=fields.ID(stored=True))
with TempIndex(s, "emptycommit") as ix:
w = ix.writer()
w.add_document(id=u("1"))
w.add_document(id=u("2"))
w.add_document(id=u("3"))
w.commit()
w = ix.writer()
w.commit()
def test_version_in():
from whoosh import __version__
from whoosh import index
with TempStorage("versionin") as st:
assert not index.exists(st)
schema = fields.Schema(text=fields.TEXT)
ix = st.create_index(schema)
assert index.exists(st)
assert ix.is_empty()
v = index.version(st)
assert v[0] == __version__
assert v[1] == index._CURRENT_TOC_VERSION
with ix.writer() as w:
w.add_document(text=u("alfa"))
assert not ix.is_empty()
def test_simple_indexing():
schema = fields.Schema(text=fields.TEXT, id=fields.STORED)
domain = (u("alfa"), u("bravo"), u("charlie"), u("delta"), u("echo"),
u("foxtrot"), u("golf"), u("hotel"), u("india"), u("juliet"),
u("kilo"), u("lima"), u("mike"), u("november"))
docs = defaultdict(list)
with TempIndex(schema, "simple") as ix:
with ix.writer() as w:
for i in xrange(100):
smp = random.sample(domain, 5)
for word in smp:
docs[word].append(i)
w.add_document(text=u(" ").join(smp), id=i)
with ix.searcher() as s:
for word in domain:
rset = sorted([hit["id"] for hit
in s.search(query.Term("text", word),
limit=None)])
assert rset == docs[word]
def test_integrity():
s = fields.Schema(name=fields.TEXT, value=fields.TEXT)
st = RamStorage()
ix = st.create_index(s)
w = ix.writer()
w.add_document(name=u("Yellow brown"), value=u("Blue red green purple?"))
w.add_document(name=u("Alpha beta"), value=u("Gamma delta epsilon omega."))
w.commit()
w = ix.writer()
w.add_document(name=u("One two"), value=u("Three four five."))
w.commit()
tr = ix.reader()
assert ix.doc_count_all() == 3
assert " ".join(tr.field_terms("name")) == "alpha beta brown one two yellow"
def test_lengths():
s = fields.Schema(f1=fields.KEYWORD(stored=True, scorable=True),
f2=fields.KEYWORD(stored=True, scorable=True))
with TempIndex(s, "testlengths") as ix:
w = ix.writer()
items = u("ABCDEFG")
from itertools import cycle, islice
lengths = [10, 20, 2, 102, 45, 3, 420, 2]
for length in lengths:
w.add_document(f2=u(" ").join(islice(cycle(items), length)))
w.commit()
with ix.reader() as dr:
ls1 = [dr.doc_field_length(i, "f1")
for i in xrange(0, len(lengths))]
assert ls1 == [0] * len(lengths)
ls2 = [dr.doc_field_length(i, "f2")
for i in xrange(0, len(lengths))]
assert ls2 == [byte_to_length(length_to_byte(l)) for l in lengths]
def test_many_lengths():
domain = u("alfa bravo charlie delta echo").split()
schema = fields.Schema(text=fields.TEXT)
ix = RamStorage().create_index(schema)
w = ix.writer()
for i, word in enumerate(domain):
length = (i + 1) ** 6
w.add_document(text=" ".join(word for _ in xrange(length)))
w.commit()
s = ix.searcher()
for i, word in enumerate(domain):
target = byte_to_length(length_to_byte((i + 1) ** 6))
ti = s.term_info("text", word)
assert ti.min_length() == target
assert ti.max_length() == target
def test_lengths_ram():
s = fields.Schema(f1=fields.KEYWORD(stored=True, scorable=True),
f2=fields.KEYWORD(stored=True, scorable=True))
st = RamStorage()
ix = st.create_index(s)
w = ix.writer()
w.add_document(f1=u("A B C D E"), f2=u("X Y Z"))
w.add_document(f1=u("B B B B C D D Q"), f2=u("Q R S T"))
w.add_document(f1=u("D E F"), f2=u("U V A B C D E"))
w.commit()
dr = ix.reader()
assert dr.stored_fields(0)["f1"] == "A B C D E"
assert dr.doc_field_length(0, "f1") == 5
assert dr.doc_field_length(1, "f1") == 8
assert dr.doc_field_length(2, "f1") == 3
assert dr.doc_field_length(0, "f2") == 3
assert dr.doc_field_length(1, "f2") == 4
assert dr.doc_field_length(2, "f2") == 7
assert dr.field_length("f1") == 16
assert dr.field_length("f2") == 14
assert dr.max_field_length("f1") == 8
assert dr.max_field_length("f2") == 7
def test_merged_lengths():
s = fields.Schema(f1=fields.KEYWORD(stored=True, scorable=True),
f2=fields.KEYWORD(stored=True, scorable=True))
with TempIndex(s, "mergedlengths") as ix:
w = ix.writer()
w.add_document(f1=u("A B C"), f2=u("X"))
w.add_document(f1=u("B C D E"), f2=u("Y Z"))
w.commit()
w = ix.writer()
w.add_document(f1=u("A"), f2=u("B C D E X Y"))
w.add_document(f1=u("B C"), f2=u("X"))
w.commit(merge=False)
w = ix.writer()
w.add_document(f1=u("A B X Y Z"), f2=u("B C"))
w.add_document(f1=u("Y X"), f2=u("A B"))
w.commit(merge=False)
with ix.reader() as dr:
assert dr.stored_fields(0)["f1"] == u("A B C")
assert dr.doc_field_length(0, "f1") == 3
assert dr.doc_field_length(2, "f2") == 6
assert dr.doc_field_length(4, "f1") == 5
def test_frequency_keyword():
s = fields.Schema(content=fields.KEYWORD)
st = RamStorage()
ix = st.create_index(s)
w = ix.writer()
w.add_document(content=u("A B C D E"))
w.add_document(content=u("B B B B C D D"))
w.add_document(content=u("D E F"))
w.commit()
with ix.reader() as tr:
assert tr.doc_frequency("content", u("B")) == 2
assert tr.frequency("content", u("B")) == 5
assert tr.doc_frequency("content", u("E")) == 2
assert tr.frequency("content", u("E")) == 2
assert tr.doc_frequency("content", u("A")) == 1
assert tr.frequency("content", u("A")) == 1
assert tr.doc_frequency("content", u("D")) == 3
assert tr.frequency("content", u("D")) == 4
assert tr.doc_frequency("content", u("F")) == 1
assert tr.frequency("content", u("F")) == 1
assert tr.doc_frequency("content", u("Z")) == 0
assert tr.frequency("content", u("Z")) == 0
stats = [(fname, text, ti.doc_frequency(), ti.weight())
for (fname, text), ti in tr]
assert stats == [("content", b("A"), 1, 1), ("content", b("B"), 2, 5),
("content", b("C"), 2, 2), ("content", b("D"), 3, 4),
("content", b("E"), 2, 2), ("content", b("F"), 1, 1)]
def test_frequency_text():
s = fields.Schema(content=fields.KEYWORD)
st = RamStorage()
ix = st.create_index(s)
w = ix.writer()
w.add_document(content=u("alfa bravo charlie delta echo"))
w.add_document(content=u("bravo bravo bravo bravo charlie delta delta"))
w.add_document(content=u("delta echo foxtrot"))
w.commit()
with ix.reader() as tr:
assert tr.doc_frequency("content", u("bravo")) == 2
assert tr.frequency("content", u("bravo")) == 5
assert tr.doc_frequency("content", u("echo")) == 2
assert tr.frequency("content", u("echo")) == 2
assert tr.doc_frequency("content", u("alfa")) == 1
assert tr.frequency("content", u("alfa")) == 1
assert tr.doc_frequency("content", u("delta")) == 3
assert tr.frequency("content", u("delta")) == 4
assert tr.doc_frequency("content", u("foxtrot")) == 1
assert tr.frequency("content", u("foxtrot")) == 1
assert tr.doc_frequency("content", u("zulu")) == 0
assert tr.frequency("content", u("zulu")) == 0
stats = [(fname, text, ti.doc_frequency(), ti.weight())
for (fname, text), ti in tr]
assert stats == [("content", b("alfa"), 1, 1),
("content", b("bravo"), 2, 5),
("content", b("charlie"), 2, 2),
("content", b("delta"), 3, 4),
("content", b("echo"), 2, 2),
("content", b("foxtrot"), 1, 1)]
def test_deletion():
s = fields.Schema(key=fields.ID, name=fields.TEXT, value=fields.TEXT)
with TempIndex(s, "deletion") as ix:
w = ix.writer()
w.add_document(key=u("A"), name=u("Yellow brown"),
value=u("Blue red green purple?"))
w.add_document(key=u("B"), name=u("Alpha beta"),
value=u("Gamma delta epsilon omega."))
w.add_document(key=u("C"), name=u("One two"),
value=u("Three four five."))
w.commit()
w = ix.writer()
assert w.delete_by_term("key", u("B")) == 1
w.commit(merge=False)
assert ix.doc_count_all() == 3
assert ix.doc_count() == 2
w = ix.writer()
w.add_document(key=u("A"), name=u("Yellow brown"),
value=u("Blue red green purple?"))
w.add_document(key=u("B"), name=u("Alpha beta"),
value=u("Gamma delta epsilon omega."))
w.add_document(key=u("C"), name=u("One two"),
value=u("Three four five."))
w.commit()
# This will match both documents with key == B, one of which is already
# deleted. This should not raise an error.
w = ix.writer()
assert w.delete_by_term("key", u("B")) == 1
w.commit()
ix.optimize()
assert ix.doc_count_all() == 4
assert ix.doc_count() == 4
with ix.reader() as tr:
assert " ".join(tr.field_terms("name")) == "brown one two yellow"
def test_writer_reuse():
s = fields.Schema(key=fields.ID)
ix = RamStorage().create_index(s)
w = ix.writer()
w.add_document(key=u("A"))
w.add_document(key=u("B"))
w.add_document(key=u("C"))
w.commit()
# You can't re-use a commited/canceled writer
pytest.raises(IndexingError, w.add_document, key=u("D"))
pytest.raises(IndexingError, w.update_document, key=u("B"))
pytest.raises(IndexingError, w.delete_document, 0)
pytest.raises(IndexingError, w.add_reader, None)
pytest.raises(IndexingError, w.add_field, "name", fields.ID)
pytest.raises(IndexingError, w.remove_field, "key")
pytest.raises(IndexingError, w.searcher)
def test_update():
# Test update with multiple unique keys
SAMPLE_DOCS = [{"id": u("test1"), "path": u("/test/1"),
"text": u("Hello")},
{"id": u("test2"), "path": u("/test/2"),
"text": u("There")},
{"id": u("test3"), "path": u("/test/3"),
"text": u("Reader")},
]
schema = fields.Schema(id=fields.ID(unique=True, stored=True),
path=fields.ID(unique=True, stored=True),
text=fields.TEXT)
with TempIndex(schema, "update") as ix:
with ix.writer() as w:
for doc in SAMPLE_DOCS:
w.add_document(**doc)
with ix.writer() as w:
w.update_document(id=u("test2"), path=u("test/1"),
text=u("Replacement"))
def test_update2():
schema = fields.Schema(key=fields.ID(unique=True, stored=True),
p=fields.ID(stored=True))
with TempIndex(schema, "update2") as ix:
nums = list(range(21))
random.shuffle(nums)
for i, n in enumerate(nums):
w = ix.writer()
w.update_document(key=text_type(n % 10), p=text_type(i))
w.commit()
with ix.searcher() as s:
results = [d["key"] for _, d in s.iter_docs()]
results = " ".join(sorted(results))
assert results == "0 1 2 3 4 5 6 7 8 9"
def test_update_numeric():
schema = fields.Schema(num=fields.NUMERIC(unique=True, stored=True),
text=fields.ID(stored=True))
with TempIndex(schema, "updatenum") as ix:
nums = list(range(5)) * 3
random.shuffle(nums)
for num in nums:
with ix.writer() as w:
w.update_document(num=num, text=text_type(num))
with ix.searcher() as s:
results = [d["text"] for _, d in s.iter_docs()]
results = " ".join(sorted(results))
assert results == "0 1 2 3 4"
def test_reindex():
SAMPLE_DOCS = [
{'id': u('test1'),
'text': u('This is a document. Awesome, is it not?')},
{'id': u('test2'), 'text': u('Another document. Astounding!')},
{'id': u('test3'),
'text': u('A fascinating article on the behavior of domestic '
'steak knives.')},
]
schema = fields.Schema(text=fields.TEXT(stored=True),
id=fields.ID(unique=True, stored=True))
with TempIndex(schema, "reindex") as ix:
def reindex():
writer = ix.writer()
for doc in SAMPLE_DOCS:
writer.update_document(**doc)
writer.commit()
reindex()
assert ix.doc_count_all() == 3
reindex()
assert ix.doc_count_all() == 3
def test_noscorables1():
values = [u("alfa"), u("bravo"), u("charlie"), u("delta"), u("echo"),
u("foxtrot"), u("golf"), u("hotel"), u("india"), u("juliet"),
u("kilo"), u("lima")]
from random import choice, sample, randint
times = 1000
schema = fields.Schema(id=fields.ID, tags=fields.KEYWORD)
with TempIndex(schema, "noscorables1") as ix:
w = ix.writer()
for _ in xrange(times):
w.add_document(id=choice(values),
tags=u(" ").join(sample(values, randint(2, 7))))
w.commit()
with ix.searcher() as s:
s.search(query.Term("id", "bravo"))
def test_noscorables2():
schema = fields.Schema(field=fields.ID)
with TempIndex(schema, "noscorables2") as ix:
writer = ix.writer()
writer.add_document(field=u('foo'))
writer.commit()
def test_multi():
schema = fields.Schema(id=fields.ID(stored=True),
content=fields.KEYWORD(stored=True))
with TempIndex(schema, "multi") as ix:
writer = ix.writer()
# Deleted 1
writer.add_document(id=u("1"), content=u("alfa bravo charlie"))
# Deleted 1
writer.add_document(id=u("2"), content=u("bravo charlie delta echo"))
# Deleted 2
writer.add_document(id=u("3"), content=u("charlie delta echo foxtrot"))
writer.commit()
writer = ix.writer()
writer.delete_by_term("id", "1")
writer.delete_by_term("id", "2")
writer.add_document(id=u("4"), content=u("apple bear cherry donut"))
writer.add_document(id=u("5"), content=u("bear cherry donut eggs"))
# Deleted 2
writer.add_document(id=u("6"), content=u("delta echo foxtrot golf"))
# no d
writer.add_document(id=u("7"), content=u("echo foxtrot golf hotel"))
writer.commit(merge=False)
writer = ix.writer()
writer.delete_by_term("id", "3")
writer.delete_by_term("id", "6")
writer.add_document(id=u("8"), content=u("cherry donut eggs falafel"))
writer.add_document(id=u("9"), content=u("donut eggs falafel grape"))
writer.add_document(id=u("A"), content=u(" foxtrot golf hotel india"))
writer.commit(merge=False)
assert ix.doc_count() == 6
with ix.searcher() as s:
r = s.search(query.Prefix("content", u("d")), optimize=False)
assert sorted([d["id"] for d in r]) == ["4", "5", "8", "9"]
r = s.search(query.Prefix("content", u("d")))
assert sorted([d["id"] for d in r]) == ["4", "5", "8", "9"]
r = s.search(query.Prefix("content", u("d")), limit=None)
assert sorted([d["id"] for d in r]) == ["4", "5", "8", "9"]
def test_deleteall():
schema = fields.Schema(text=fields.TEXT)
with TempIndex(schema, "deleteall") as ix:
w = ix.writer()
domain = u("alfa bravo charlie delta echo").split()
for i, ls in enumerate(permutations(domain)):
w.add_document(text=u(" ").join(ls))
if not i % 10:
w.commit()
w = ix.writer()
w.commit()
# This is just a test, don't use this method to delete all docs IRL!
doccount = ix.doc_count_all()
w = ix.writer()
for docnum in xrange(doccount):
w.delete_document(docnum)
w.commit()
with ix.searcher() as s:
r = s.search(query.Or([query.Term("text", u("alfa")),
query.Term("text", u("bravo"))]))
assert len(r) == 0
ix.optimize()
assert ix.doc_count_all() == 0
with ix.reader() as r:
assert list(r) == []
def test_simple_stored():
schema = fields.Schema(a=fields.ID(stored=True), b=fields.ID(stored=False))
ix = RamStorage().create_index(schema)
with ix.writer() as w:
w.add_document(a=u("alfa"), b=u("bravo"))
with ix.searcher() as s:
sf = s.stored_fields(0)
assert sf == {"a": "alfa"}
def test_single():
schema = fields.Schema(id=fields.ID(stored=True), text=fields.TEXT)
with TempIndex(schema, "single") as ix:
w = ix.writer()
w.add_document(id=u("1"), text=u("alfa"))
w.commit()
with ix.searcher() as s:
assert ("text", u("alfa")) in s.reader()
assert list(s.documents(id="1")) == [{"id": "1"}]
assert list(s.documents(text="alfa")) == [{"id": "1"}]
assert list(s.all_stored_fields()) == [{"id": "1"}]
def test_indentical_fields():
schema = fields.Schema(id=fields.STORED,
f1=fields.TEXT, f2=fields.TEXT, f3=fields.TEXT)
with TempIndex(schema, "identifields") as ix:
w = ix.writer()
w.add_document(id=1, f1=u("alfa"), f2=u("alfa"), f3=u("alfa"))
w.commit()
with ix.searcher() as s:
assert list(s.lexicon("f1")) == [b("alfa")]
assert list(s.lexicon("f2")) == [b("alfa")]
assert list(s.lexicon("f3")) == [b("alfa")]
assert list(s.documents(f1="alfa")) == [{"id": 1}]
assert list(s.documents(f2="alfa")) == [{"id": 1}]
assert list(s.documents(f3="alfa")) == [{"id": 1}]
def test_multivalue():
ana = analysis.StemmingAnalyzer()
schema = fields.Schema(id=fields.STORED, date=fields.DATETIME,
num=fields.NUMERIC,
txt=fields.TEXT(analyzer=ana))
ix = RamStorage().create_index(schema)
with ix.writer() as w:
w.add_document(id=1, date=datetime(2001, 1, 1), num=5)
w.add_document(id=2, date=[datetime(2002, 2, 2), datetime(2003, 3, 3)],
num=[1, 2, 3, 12])
w.add_document(txt=u("a b c").split())
with ix.reader() as r:
assert ("num", 3) in r
assert ("date", datetime(2003, 3, 3)) in r
assert " ".join(r.field_terms("txt")) == "a b c"
def test_multi_language():
# Analyzer for English
ana_eng = analysis.StemmingAnalyzer()
# analyzer for Pig Latin
def stem_piglatin(w):
if w.endswith("ay"):
w = w[:-2]
return w
ana_pig = analysis.StemmingAnalyzer(stoplist=["nday", "roay"],
stemfn=stem_piglatin)
# Dictionary mapping languages to analyzers
analyzers = {"eng": ana_eng, "pig": ana_pig}
# Fake documents
corpus = [(u("eng"), u("Such stuff as dreams are made on")),
(u("pig"), u("Otay ebay, roay otnay otay ebay"))]
schema = fields.Schema(content=fields.TEXT(stored=True),
lang=fields.ID(stored=True))
ix = RamStorage().create_index(schema)
with ix.writer() as w:
for doclang, content in corpus:
ana = analyzers[doclang]
# "Pre-analyze" the field into token strings
words = [token.text for token in ana(content)]
# Note we store the original value but index the pre-analyzed words
w.add_document(lang=doclang, content=words,
_stored_content=content)
with ix.searcher() as s:
schema = s.schema
# Modify the schema to fake the correct analyzer for the language
# we're searching in
schema["content"].analyzer = analyzers["eng"]
qp = qparser.QueryParser("content", schema)
q = qp.parse("dreaming")
r = s.search(q)
assert len(r) == 1
assert r[0]["content"] == "Such stuff as dreams are made on"
schema["content"].analyzer = analyzers["pig"]
qp = qparser.QueryParser("content", schema)
q = qp.parse("otnay")
r = s.search(q)
assert len(r) == 1
assert r[0]["content"] == "Otay ebay, roay otnay otay ebay"
def test_doc_boost():
schema = fields.Schema(id=fields.STORED, a=fields.TEXT, b=fields.TEXT)
ix = RamStorage().create_index(schema)
w = ix.writer()
w.add_document(id=0, a=u("alfa alfa alfa"), b=u("bravo"))
w.add_document(id=1, a=u("alfa"), b=u("bear"), _a_boost=5.0)
w.add_document(id=2, a=u("alfa alfa alfa alfa"), _boost=0.5)
w.commit()
with ix.searcher() as s:
r = s.search(query.Term("a", "alfa"))
assert [hit["id"] for hit in r] == [1, 0, 2]
w = ix.writer()
w.add_document(id=3, a=u("alfa"), b=u("bottle"))
w.add_document(id=4, b=u("bravo"), _b_boost=2.0)
w.commit(merge=False)
with ix.searcher() as s:
r = s.search(query.Term("a", "alfa"))
assert [hit["id"] for hit in r] == [1, 0, 3, 2]
def test_globfield_length_merge():
# Issue 343
schema = fields.Schema(title=fields.TEXT(stored=True),
path=fields.ID(stored=True))
schema.add("*_text", fields.TEXT, glob=True)
with TempIndex(schema, "globlenmerge") as ix:
with ix.writer() as w:
w.add_document(title=u("First document"), path=u("/a"),
content_text=u("This is the first document we've added!"))
with ix.writer() as w:
w.add_document(title=u("Second document"), path=u("/b"),
content_text=u("The second document is even more interesting!"))
with ix.searcher() as s:
docnum = s.document_number(path="/a")
assert s.doc_field_length(docnum, "content_text") is not None
qp = qparser.QueryParser("content", schema)
q = qp.parse("content_text:document")
r = s.search(q)
paths = sorted(hit["path"] for hit in r)
assert paths == ["/a", "/b"]
def test_index_decimals():
from decimal import Decimal
schema = fields.Schema(name=fields.KEYWORD(stored=True),
num=fields.NUMERIC(int))
ix = RamStorage().create_index(schema)
with ix.writer() as w:
with pytest.raises(TypeError):
w.add_document(name=u("hello"), num=Decimal("3.2"))
schema = fields.Schema(name=fields.KEYWORD(stored=True),
num=fields.NUMERIC(Decimal, decimal_places=5))
ix = RamStorage().create_index(schema)
with ix.writer() as w:
w.add_document(name=u("hello"), num=Decimal("3.2"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.