max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
dags/pipe.py | richarms/docker-airflow-docker-sock | 0 | 6615751 | import logging
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
#from airflow.operators.docker_operator import DockerOperator
#from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from datetime import datetime
import docker
import docker.client
log = logging.getLogger(__name__)
default_args = {
'owner': 'richarms',
'start_date': datetime(2019, 8, 29),
}
def read_xcoms(**context):
for idx, task_id in enumerate(context['data_to_read']):
data = context['task_instance'].xcom_pull(task_ids=task_id, key='data')
logging.info(f'[{idx}] I have received data: {data} from task {task_id}')
def launch_docker_container(**context):
# just a mock for now
#logging.info(context['ti'])
#logging.info(context['image_name'])
#my_id = context['my_id']
#context['task_instance'].xcom_push('data', f'my name is {my_id}', context['execution_date'])
image_name = context['image_name']
client: Client = docker.from_env()
log.info(f"Creating image {image_name}")
container = client.create_container(image=image_name)
container_id = container.get('Id')
log.info(f"Running container with id {container_id}")
client.start(container=container_id)
logs = client.logs(container_id, follow=True, stderr=True, stdout=True, stream=True, tail='all')
try:
while True:
l = next(logs)
log.info(f"Task log: {l}")
except StopIteration:
pass
inspect = client.inspect_container(container)
log.info(inspect)
if inspect['State']['ExitCode'] != 0:
raise Exception("Container has not finished with exit code 0")
log.info(f"Task ends!")
my_id = context['my_id']
context['task_instance'].xcom_push('data', f'my name is {my_id}', context['execution_date'])
def do_test_docker():
client = docker.from_env()
for container in client.containers():
logging.info(str(container))
with DAG('pipeline_docker', default_args=default_args) as dag:
t1 = BashOperator(
task_id='print_date1',
bash_command='date')
t2_1_id = 'do_task_one'
t2_1 = PythonOperator(
task_id=t2_1_id,
provide_context=True,
op_kwargs={
'image_name': 'task1',
'my_id': t2_1_id
},
python_callable=launch_docker_container
)
t2_2_id = 'do_task_four'
t2_2 = PythonOperator(
task_id=t2_2_id,
provide_context=True,
op_kwargs={
'image_name': 'task4',
'my_id': t2_2_id
},
python_callable=launch_docker_container
)
# t2_3_id = 'do_docker_container'
# t2_3 = DockerOperator(
# task_id = t2_3_id,
# image="task4:latest",
# api_version='auto',
# auto_remove=True,
# command="/bin/sleep 30",
# docker_url="unix://var/run/docker.sock",
# network_mode="bridge"
# )
# t2_4_id = "do_kubernetes"
# t2_4 = KubernetesPodOperator(
# namespace='default',
# image="python:3.7",
# cmds=["python","-c"],
# arguments=["print('hello world')"],
# labels={"foo": "bar"},
# name="kube_pass",
# task_id=t2_4_id,
# get_logs=True,
# )
t3 = PythonOperator(
task_id='read_xcoms',
provide_context=True,
python_callable=read_xcoms,
op_kwargs={
'data_to_read': [t2_1_id, t2_2_id]
}
)
t1_5 = PythonOperator(
task_id="test_docker",
python_callable=do_test_docker
)
t1 >> t1_5 >> [t2_1, t2_2] >> t3
| import logging
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
#from airflow.operators.docker_operator import DockerOperator
#from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from datetime import datetime
import docker
import docker.client
log = logging.getLogger(__name__)
default_args = {
'owner': 'richarms',
'start_date': datetime(2019, 8, 29),
}
def read_xcoms(**context):
for idx, task_id in enumerate(context['data_to_read']):
data = context['task_instance'].xcom_pull(task_ids=task_id, key='data')
logging.info(f'[{idx}] I have received data: {data} from task {task_id}')
def launch_docker_container(**context):
# just a mock for now
#logging.info(context['ti'])
#logging.info(context['image_name'])
#my_id = context['my_id']
#context['task_instance'].xcom_push('data', f'my name is {my_id}', context['execution_date'])
image_name = context['image_name']
client: Client = docker.from_env()
log.info(f"Creating image {image_name}")
container = client.create_container(image=image_name)
container_id = container.get('Id')
log.info(f"Running container with id {container_id}")
client.start(container=container_id)
logs = client.logs(container_id, follow=True, stderr=True, stdout=True, stream=True, tail='all')
try:
while True:
l = next(logs)
log.info(f"Task log: {l}")
except StopIteration:
pass
inspect = client.inspect_container(container)
log.info(inspect)
if inspect['State']['ExitCode'] != 0:
raise Exception("Container has not finished with exit code 0")
log.info(f"Task ends!")
my_id = context['my_id']
context['task_instance'].xcom_push('data', f'my name is {my_id}', context['execution_date'])
def do_test_docker():
client = docker.from_env()
for container in client.containers():
logging.info(str(container))
with DAG('pipeline_docker', default_args=default_args) as dag:
t1 = BashOperator(
task_id='print_date1',
bash_command='date')
t2_1_id = 'do_task_one'
t2_1 = PythonOperator(
task_id=t2_1_id,
provide_context=True,
op_kwargs={
'image_name': 'task1',
'my_id': t2_1_id
},
python_callable=launch_docker_container
)
t2_2_id = 'do_task_four'
t2_2 = PythonOperator(
task_id=t2_2_id,
provide_context=True,
op_kwargs={
'image_name': 'task4',
'my_id': t2_2_id
},
python_callable=launch_docker_container
)
# t2_3_id = 'do_docker_container'
# t2_3 = DockerOperator(
# task_id = t2_3_id,
# image="task4:latest",
# api_version='auto',
# auto_remove=True,
# command="/bin/sleep 30",
# docker_url="unix://var/run/docker.sock",
# network_mode="bridge"
# )
# t2_4_id = "do_kubernetes"
# t2_4 = KubernetesPodOperator(
# namespace='default',
# image="python:3.7",
# cmds=["python","-c"],
# arguments=["print('hello world')"],
# labels={"foo": "bar"},
# name="kube_pass",
# task_id=t2_4_id,
# get_logs=True,
# )
t3 = PythonOperator(
task_id='read_xcoms',
provide_context=True,
python_callable=read_xcoms,
op_kwargs={
'data_to_read': [t2_1_id, t2_2_id]
}
)
t1_5 = PythonOperator(
task_id="test_docker",
python_callable=do_test_docker
)
t1 >> t1_5 >> [t2_1, t2_2] >> t3
| en | 0.218822 | #from airflow.operators.docker_operator import DockerOperator #from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator # just a mock for now #logging.info(context['ti']) #logging.info(context['image_name']) #my_id = context['my_id'] #context['task_instance'].xcom_push('data', f'my name is {my_id}', context['execution_date']) # t2_3_id = 'do_docker_container' # t2_3 = DockerOperator( # task_id = t2_3_id, # image="task4:latest", # api_version='auto', # auto_remove=True, # command="/bin/sleep 30", # docker_url="unix://var/run/docker.sock", # network_mode="bridge" # ) # t2_4_id = "do_kubernetes" # t2_4 = KubernetesPodOperator( # namespace='default', # image="python:3.7", # cmds=["python","-c"], # arguments=["print('hello world')"], # labels={"foo": "bar"}, # name="kube_pass", # task_id=t2_4_id, # get_logs=True, # ) | 2.17455 | 2 |
pygui/data/envelope/mpi.py | clark3493/pygui | 0 | 6615752 | <reponame>clark3493/pygui
import os
import multiprocessing as mp
from . import Envelope
# TODO: IMPLEMENT ABILITY TO HANDLE DIFFERENT XNAME/YNAME PER FILEPATH
# TODO: IMPLEMENT ABILITY TO KEEP ALL POINTS WITH MULTIPROCESSING
# TODO: IMPLEMENT MPI INTERFACE
# TODO: IMPLEMENT ABILITY TO PASS ARGS AND KWARGS INTO MPI INTERFACE
class MultiProcessEnvelopeGenerator(object):
def __init__(self, ncpus=None):
self.ncpus = ncpus if ncpus is not None else mp.cpu_count()
def compute_envelope(self, xname, yname, filepaths):
pool = mp.Pool(processes=self.ncpus)
env0 = Envelope.from_file(xname, yname, filepaths[0])
inputs = [(xname, yname, filepath) for filepath in filepaths[1:]]
data = pool.starmap(self.get_envelope_data, inputs)
for x, y, parents, indices in data:
env0.add_points(x, y, parents, indices)
return env0
@staticmethod
def get_envelope_data(xname, yname, filepath):
env = Envelope.from_file(xname, yname, filepath)
return (env.envelope_x(closed=False),
env.envelope_y(closed=False),
env.envelope_runs(closed=False),
env.envelope_indices(closed=False))
| import os
import multiprocessing as mp
from . import Envelope
# TODO: IMPLEMENT ABILITY TO HANDLE DIFFERENT XNAME/YNAME PER FILEPATH
# TODO: IMPLEMENT ABILITY TO KEEP ALL POINTS WITH MULTIPROCESSING
# TODO: IMPLEMENT MPI INTERFACE
# TODO: IMPLEMENT ABILITY TO PASS ARGS AND KWARGS INTO MPI INTERFACE
class MultiProcessEnvelopeGenerator(object):
def __init__(self, ncpus=None):
self.ncpus = ncpus if ncpus is not None else mp.cpu_count()
def compute_envelope(self, xname, yname, filepaths):
pool = mp.Pool(processes=self.ncpus)
env0 = Envelope.from_file(xname, yname, filepaths[0])
inputs = [(xname, yname, filepath) for filepath in filepaths[1:]]
data = pool.starmap(self.get_envelope_data, inputs)
for x, y, parents, indices in data:
env0.add_points(x, y, parents, indices)
return env0
@staticmethod
def get_envelope_data(xname, yname, filepath):
env = Envelope.from_file(xname, yname, filepath)
return (env.envelope_x(closed=False),
env.envelope_y(closed=False),
env.envelope_runs(closed=False),
env.envelope_indices(closed=False)) | en | 0.343557 | # TODO: IMPLEMENT ABILITY TO HANDLE DIFFERENT XNAME/YNAME PER FILEPATH # TODO: IMPLEMENT ABILITY TO KEEP ALL POINTS WITH MULTIPROCESSING # TODO: IMPLEMENT MPI INTERFACE # TODO: IMPLEMENT ABILITY TO PASS ARGS AND KWARGS INTO MPI INTERFACE | 2.850022 | 3 |
perceptron.py | JagerCox/general-neural-network | 0 | 6615753 | import math
class Perceptron:
snippet = ""
memory_raw = []
current_weight = -1
correction_bia = 1 # Total sigma 0-1
# Constructor
def __init__(self, snippet_file="medium.snippet"):
self.memory_raw = []
self.correction_bia = 1
self.current_weight = -1
self.snippet = snippet_file
# Private
def _run_snippet_function(self, file_snippet, globals=None, locals=None):
if globals is None:
globals = {}
globals.update({
"__file__": file_snippet,
"__name__": "__main__",
})
with open(file_snippet, 'rb') as file:
exec(compile(file.read(), file_snippet, 'exec'), globals, locals)
# Public
def add_value_raw(self, value):
locals_ = {}
self.memory_raw.append(value)
self._run_snippet_function(self.snippet, {'values': self.memory_raw}, locals_)
self.current_weight = locals_['result']
def sigma_value(self, test_value):
return math.fabs((self.current_weight - test_value) * self.correction_bia)
# Public properties
def set_memory_raw(self, list_values):
self.memory_raw = list_values
def get_memory_raw(self):
return self.memory_raw
def set_weight(self, weight):
self.current_weight = weight
def get_weight(self):
return self.current_weight
def set_bias(self, value_bia):
self.correction_bia = value_bia
def clear_memory(self):
self.memory_raw = [] | import math
class Perceptron:
snippet = ""
memory_raw = []
current_weight = -1
correction_bia = 1 # Total sigma 0-1
# Constructor
def __init__(self, snippet_file="medium.snippet"):
self.memory_raw = []
self.correction_bia = 1
self.current_weight = -1
self.snippet = snippet_file
# Private
def _run_snippet_function(self, file_snippet, globals=None, locals=None):
if globals is None:
globals = {}
globals.update({
"__file__": file_snippet,
"__name__": "__main__",
})
with open(file_snippet, 'rb') as file:
exec(compile(file.read(), file_snippet, 'exec'), globals, locals)
# Public
def add_value_raw(self, value):
locals_ = {}
self.memory_raw.append(value)
self._run_snippet_function(self.snippet, {'values': self.memory_raw}, locals_)
self.current_weight = locals_['result']
def sigma_value(self, test_value):
return math.fabs((self.current_weight - test_value) * self.correction_bia)
# Public properties
def set_memory_raw(self, list_values):
self.memory_raw = list_values
def get_memory_raw(self):
return self.memory_raw
def set_weight(self, weight):
self.current_weight = weight
def get_weight(self):
return self.current_weight
def set_bias(self, value_bia):
self.correction_bia = value_bia
def clear_memory(self):
self.memory_raw = [] | en | 0.640582 | # Total sigma 0-1 # Constructor # Private # Public # Public properties | 3.095997 | 3 |
nncore/nn/builder.py | yeliudev/nncore | 6 | 6615754 | <reponame>yeliudev/nncore
# Copyright (c) <NAME>. All rights reserved.
import torch.nn as nn
from nncore import Registry, build_object
from nncore.parallel import NNDataParallel, NNDistributedDataParallel
from .bundle import ModuleList, Sequential
MODELS = Registry('model')
ACTIVATIONS = Registry('activation', parent=MODELS)
CONVS = Registry('conv', parent=MODELS)
MESSAGE_PASSINGS = Registry('message passing', parent=MODELS)
NORMS = Registry('norm', parent=MODELS)
LOSSES = Registry('loss', parent=MODELS)
MODULES = Registry('module', parent=MODELS)
def build_model(cfg, *args, bundler=None, dist=None, **kwargs):
"""
Build a general model from a dict or str. This method searches for modules
in :obj:`MODELS` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the model.
bundler (str | None, optional): The type of bundler for multiple
models. Expected values include ``'sequential'``, ``'modulelist'``,
and ``None``. Default: ``None``.
dist (bool | None, optional): Whether the model is distributed. If not
specified, the model will not be wrapped. Default: ``None``.
Returns:
:obj:`nn.Module`: The constructed model.
"""
assert bundler in ('sequential', 'modulelist', None)
model = build_object(cfg, [MODELS, nn], args=args, **kwargs)
if isinstance(model, (list, tuple)):
model = [m for m in model if m is not None]
if bundler == 'sequential' and len(model) > 1:
model = Sequential(model)
elif model is None:
return
if bundler == 'modulelist':
model = ModuleList(model)
if dist:
model = NNDistributedDataParallel(model)
elif dist is not None:
model = NNDataParallel(model)
return model
def build_act_layer(cfg, *args, **kwargs):
"""
Build an activation layer from a dict or str. This method searches for
layers in :obj:`ACTIVATIONS` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the layer.
Returns:
:obj:`nn.Module`: The constructed layer.
"""
return build_object(cfg, [ACTIVATIONS, nn], args=args, **kwargs)
def build_conv_layer(cfg, *args, **kwargs):
"""
Build a convolution layer from a dict or str. This method searches for
layers in :obj:`CONVS` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the layer.
Returns:
:obj:`nn.Module`: The constructed layer.
"""
return build_object(cfg, [CONVS, nn], args=args, **kwargs)
def build_msg_pass_layer(cfg, *args, **kwargs):
"""
Build a message passing layer from a dict or str. This method searches for
layers in :obj:`MESSAGE_PASSINGS` first, and then fall back to
:obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the layer.
Returns:
:obj:`nn.Module`: The constructed layer.
"""
return build_object(cfg, [MESSAGE_PASSINGS, nn], args=args, **kwargs)
def build_norm_layer(cfg, *args, dims=None, **kwargs):
"""
Build a normalization layer from a dict or str. This method searches for
layers in :obj:`NORMS` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the layer.
dims (int | None, optional): The input dimensions of the layer.
Default: ``None``.
Returns:
:obj:`nn.Module`: The constructed layer.
"""
if isinstance(cfg, str):
cfg = dict(type=cfg)
elif not isinstance(cfg, dict):
return cfg
_cfg = cfg.copy()
if dims is not None and _cfg['type'] not in NORMS.group('drop'):
if _cfg['type'] == 'LN':
key = 'normalized_shape'
elif _cfg['type'] == 'GN':
key = 'num_channels'
else:
key = 'num_features'
_cfg.setdefault(key, dims)
return build_object(_cfg, [NORMS, nn], args=args, **kwargs)
def build_loss(cfg, *args, **kwargs):
"""
Build a loss module from a dict or str. This method searches for modules in
:obj:`LOSSES` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the module.
Returns:
:obj:`nn.Module`: The constructed module.
"""
return build_object(cfg, [LOSSES, nn], args=args, **kwargs)
| # Copyright (c) <NAME>. All rights reserved.
import torch.nn as nn
from nncore import Registry, build_object
from nncore.parallel import NNDataParallel, NNDistributedDataParallel
from .bundle import ModuleList, Sequential
MODELS = Registry('model')
ACTIVATIONS = Registry('activation', parent=MODELS)
CONVS = Registry('conv', parent=MODELS)
MESSAGE_PASSINGS = Registry('message passing', parent=MODELS)
NORMS = Registry('norm', parent=MODELS)
LOSSES = Registry('loss', parent=MODELS)
MODULES = Registry('module', parent=MODELS)
def build_model(cfg, *args, bundler=None, dist=None, **kwargs):
"""
Build a general model from a dict or str. This method searches for modules
in :obj:`MODELS` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the model.
bundler (str | None, optional): The type of bundler for multiple
models. Expected values include ``'sequential'``, ``'modulelist'``,
and ``None``. Default: ``None``.
dist (bool | None, optional): Whether the model is distributed. If not
specified, the model will not be wrapped. Default: ``None``.
Returns:
:obj:`nn.Module`: The constructed model.
"""
assert bundler in ('sequential', 'modulelist', None)
model = build_object(cfg, [MODELS, nn], args=args, **kwargs)
if isinstance(model, (list, tuple)):
model = [m for m in model if m is not None]
if bundler == 'sequential' and len(model) > 1:
model = Sequential(model)
elif model is None:
return
if bundler == 'modulelist':
model = ModuleList(model)
if dist:
model = NNDistributedDataParallel(model)
elif dist is not None:
model = NNDataParallel(model)
return model
def build_act_layer(cfg, *args, **kwargs):
"""
Build an activation layer from a dict or str. This method searches for
layers in :obj:`ACTIVATIONS` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the layer.
Returns:
:obj:`nn.Module`: The constructed layer.
"""
return build_object(cfg, [ACTIVATIONS, nn], args=args, **kwargs)
def build_conv_layer(cfg, *args, **kwargs):
"""
Build a convolution layer from a dict or str. This method searches for
layers in :obj:`CONVS` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the layer.
Returns:
:obj:`nn.Module`: The constructed layer.
"""
return build_object(cfg, [CONVS, nn], args=args, **kwargs)
def build_msg_pass_layer(cfg, *args, **kwargs):
"""
Build a message passing layer from a dict or str. This method searches for
layers in :obj:`MESSAGE_PASSINGS` first, and then fall back to
:obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the layer.
Returns:
:obj:`nn.Module`: The constructed layer.
"""
return build_object(cfg, [MESSAGE_PASSINGS, nn], args=args, **kwargs)
def build_norm_layer(cfg, *args, dims=None, **kwargs):
"""
Build a normalization layer from a dict or str. This method searches for
layers in :obj:`NORMS` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the layer.
dims (int | None, optional): The input dimensions of the layer.
Default: ``None``.
Returns:
:obj:`nn.Module`: The constructed layer.
"""
if isinstance(cfg, str):
cfg = dict(type=cfg)
elif not isinstance(cfg, dict):
return cfg
_cfg = cfg.copy()
if dims is not None and _cfg['type'] not in NORMS.group('drop'):
if _cfg['type'] == 'LN':
key = 'normalized_shape'
elif _cfg['type'] == 'GN':
key = 'num_channels'
else:
key = 'num_features'
_cfg.setdefault(key, dims)
return build_object(_cfg, [NORMS, nn], args=args, **kwargs)
def build_loss(cfg, *args, **kwargs):
"""
Build a loss module from a dict or str. This method searches for modules in
:obj:`LOSSES` first, and then fall back to :obj:`torch.nn`.
Args:
cfg (dict | str): The config or name of the module.
Returns:
:obj:`nn.Module`: The constructed module.
"""
return build_object(cfg, [LOSSES, nn], args=args, **kwargs) | en | 0.581982 | # Copyright (c) <NAME>. All rights reserved. Build a general model from a dict or str. This method searches for modules in :obj:`MODELS` first, and then fall back to :obj:`torch.nn`. Args: cfg (dict | str): The config or name of the model. bundler (str | None, optional): The type of bundler for multiple models. Expected values include ``'sequential'``, ``'modulelist'``, and ``None``. Default: ``None``. dist (bool | None, optional): Whether the model is distributed. If not specified, the model will not be wrapped. Default: ``None``. Returns: :obj:`nn.Module`: The constructed model. Build an activation layer from a dict or str. This method searches for layers in :obj:`ACTIVATIONS` first, and then fall back to :obj:`torch.nn`. Args: cfg (dict | str): The config or name of the layer. Returns: :obj:`nn.Module`: The constructed layer. Build a convolution layer from a dict or str. This method searches for layers in :obj:`CONVS` first, and then fall back to :obj:`torch.nn`. Args: cfg (dict | str): The config or name of the layer. Returns: :obj:`nn.Module`: The constructed layer. Build a message passing layer from a dict or str. This method searches for layers in :obj:`MESSAGE_PASSINGS` first, and then fall back to :obj:`torch.nn`. Args: cfg (dict | str): The config or name of the layer. Returns: :obj:`nn.Module`: The constructed layer. Build a normalization layer from a dict or str. This method searches for layers in :obj:`NORMS` first, and then fall back to :obj:`torch.nn`. Args: cfg (dict | str): The config or name of the layer. dims (int | None, optional): The input dimensions of the layer. Default: ``None``. Returns: :obj:`nn.Module`: The constructed layer. Build a loss module from a dict or str. This method searches for modules in :obj:`LOSSES` first, and then fall back to :obj:`torch.nn`. Args: cfg (dict | str): The config or name of the module. Returns: :obj:`nn.Module`: The constructed module. | 2.28021 | 2 |
rest-service/manager_rest/test/endpoints/test_execution_schedules.py | ilan-WS/cloudify-manager | 124 | 6615755 | from datetime import datetime, timedelta
from manager_rest.test.attribute import attr
from manager_rest.test.base_test import BaseServerTestCase, LATEST_API_VERSION
from cloudify_rest_client.exceptions import CloudifyClientError
@attr(client_min_version=LATEST_API_VERSION,
client_max_version=LATEST_API_VERSION)
class ExecutionSchedulesTestCase(BaseServerTestCase):
DEPLOYMENT_ID = 'deployment'
fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
an_hour_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=1)
two_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=2)
three_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=3)
three_weeks_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(weeks=3)
deployment_id = None
def setUp(self):
super(ExecutionSchedulesTestCase, self).setUp()
_, self.deployment_id, _, _ = self.put_deployment(self.DEPLOYMENT_ID)
def test_schedule_create(self):
schedule_id = 'sched-1'
workflow_id = 'install'
schedule = self.client.execution_schedules.create(
schedule_id, self.deployment_id, workflow_id,
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(schedule.id, schedule_id)
self.assertEqual(schedule.deployment_id, self.deployment_id)
self.assertEqual(schedule.workflow_id, workflow_id)
self.assertEqual(datetime.strptime(schedule.since, self.fmt),
self.an_hour_from_now)
self.assertEqual(len(schedule['all_next_occurrences']), 5)
self.assertEqual(
datetime.strptime(schedule['next_occurrence'], self.fmt),
self.an_hour_from_now)
self.assertEqual(schedule['slip'], 0)
self.assertEqual(schedule['stop_on_fail'], False)
def test_schedule_create_weekdays(self):
schedule = self.client.execution_schedules.create(
'sched-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.three_weeks_from_now,
recurrence='1 days', weekdays=['mo', 'tu', 'we', 'th'])
self.assertEqual(len(schedule['all_next_occurrences']), 12) # 3w * 4d
def test_schedules_list(self):
schedule_ids = ['sched-1', 'sched-2']
for schedule_id in schedule_ids:
self.client.execution_schedules.create(
schedule_id, self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
schedules = self.client.execution_schedules.list()
self.assertEqual(len(schedules), 2)
self.assertSetEqual({s.id for s in schedules}, set(schedule_ids))
def test_schedule_delete(self):
self.client.execution_schedules.create(
'delete-me', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(len(self.client.execution_schedules.list()), 1)
self.client.execution_schedules.delete('delete-me', self.deployment_id)
self.assertEqual(len(self.client.execution_schedules.list()), 0)
def test_schedule_update(self):
schedule = self.client.execution_schedules.create(
'update-me', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.two_hours_from_now,
recurrence='1 minutes')
# `until` is inclusive
self.assertEqual(len(schedule['all_next_occurrences']), 61)
self.assertEqual(schedule['rule']['recurrence'], '1 minutes')
self.assertEqual(schedule['slip'], 0)
self.client.execution_schedules.update(
'update-me', self.deployment_id, recurrence='5 minutes', slip=30)
# get the schedule from the DB and not directly from .update endpoint
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 13) # 60/5+1
self.assertEqual(schedule['rule']['recurrence'], '5 minutes')
self.assertEqual(schedule['slip'], 30)
self.client.execution_schedules.update(
'update-me', self.deployment_id, until=self.three_hours_from_now)
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 25) # 2*60/5+1
def test_schedule_get_invalid_id(self):
self.assertRaisesRegex(
CloudifyClientError,
'404: Requested `ExecutionSchedule` .* was not found',
self.client.execution_schedules.get,
'nonsuch',
self.deployment_id
)
def test_schedule_create_no_since(self):
self.assertRaises(
AssertionError,
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_time_format(self):
self.assertRaisesRegex(
AttributeError,
"'str' object has no attribute 'isoformat'",
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'install',
since='long ago', recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_workflow(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: Workflow some_workflow does not exist',
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
since=self.an_hour_from_now, recurrence='1 minutes', count=5,
)
def test_schedule_invalid_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['oneday', 'someday']
)
self.client.execution_schedules.create(
'good-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours', count=6,
weekdays=['mo', 'tu']
)
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.update,
'good-weekdays', self.deployment_id, weekdays=['oneday', 'someday']
)
def test_schedule_create_invalid_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['5tu']
)
def test_schedule_create_invalid_recurrence_with_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* complex weekday expression',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['2mo', 'l-tu']
)
def test_schedule_invalid_repetition_without_recurrence(self):
recurrence_error = \
'400: recurrence must be specified for execution count ' \
'larger than 1'
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.create,
'no-recurrence-no-count', self.deployment_id, 'uninstall',
since=self.an_hour_from_now, weekdays=['su', 'mo', 'tu'],
)
self.client.execution_schedules.create(
'no-recurrence-count-1', self.deployment_id, 'install',
since=self.an_hour_from_now, count=1,
)
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.update,
'no-recurrence-count-1', self.deployment_id, count=2
)
def test_schedule_create_invalid_recurrence(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: `10 doboshes` is not a legal recurrence expression.',
self.client.execution_schedules.create,
'bad-freq', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='10 doboshes'
)
| from datetime import datetime, timedelta
from manager_rest.test.attribute import attr
from manager_rest.test.base_test import BaseServerTestCase, LATEST_API_VERSION
from cloudify_rest_client.exceptions import CloudifyClientError
@attr(client_min_version=LATEST_API_VERSION,
client_max_version=LATEST_API_VERSION)
class ExecutionSchedulesTestCase(BaseServerTestCase):
DEPLOYMENT_ID = 'deployment'
fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
an_hour_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=1)
two_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=2)
three_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=3)
three_weeks_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(weeks=3)
deployment_id = None
def setUp(self):
super(ExecutionSchedulesTestCase, self).setUp()
_, self.deployment_id, _, _ = self.put_deployment(self.DEPLOYMENT_ID)
def test_schedule_create(self):
schedule_id = 'sched-1'
workflow_id = 'install'
schedule = self.client.execution_schedules.create(
schedule_id, self.deployment_id, workflow_id,
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(schedule.id, schedule_id)
self.assertEqual(schedule.deployment_id, self.deployment_id)
self.assertEqual(schedule.workflow_id, workflow_id)
self.assertEqual(datetime.strptime(schedule.since, self.fmt),
self.an_hour_from_now)
self.assertEqual(len(schedule['all_next_occurrences']), 5)
self.assertEqual(
datetime.strptime(schedule['next_occurrence'], self.fmt),
self.an_hour_from_now)
self.assertEqual(schedule['slip'], 0)
self.assertEqual(schedule['stop_on_fail'], False)
def test_schedule_create_weekdays(self):
schedule = self.client.execution_schedules.create(
'sched-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.three_weeks_from_now,
recurrence='1 days', weekdays=['mo', 'tu', 'we', 'th'])
self.assertEqual(len(schedule['all_next_occurrences']), 12) # 3w * 4d
def test_schedules_list(self):
schedule_ids = ['sched-1', 'sched-2']
for schedule_id in schedule_ids:
self.client.execution_schedules.create(
schedule_id, self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
schedules = self.client.execution_schedules.list()
self.assertEqual(len(schedules), 2)
self.assertSetEqual({s.id for s in schedules}, set(schedule_ids))
def test_schedule_delete(self):
self.client.execution_schedules.create(
'delete-me', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(len(self.client.execution_schedules.list()), 1)
self.client.execution_schedules.delete('delete-me', self.deployment_id)
self.assertEqual(len(self.client.execution_schedules.list()), 0)
def test_schedule_update(self):
schedule = self.client.execution_schedules.create(
'update-me', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.two_hours_from_now,
recurrence='1 minutes')
# `until` is inclusive
self.assertEqual(len(schedule['all_next_occurrences']), 61)
self.assertEqual(schedule['rule']['recurrence'], '1 minutes')
self.assertEqual(schedule['slip'], 0)
self.client.execution_schedules.update(
'update-me', self.deployment_id, recurrence='5 minutes', slip=30)
# get the schedule from the DB and not directly from .update endpoint
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 13) # 60/5+1
self.assertEqual(schedule['rule']['recurrence'], '5 minutes')
self.assertEqual(schedule['slip'], 30)
self.client.execution_schedules.update(
'update-me', self.deployment_id, until=self.three_hours_from_now)
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 25) # 2*60/5+1
def test_schedule_get_invalid_id(self):
self.assertRaisesRegex(
CloudifyClientError,
'404: Requested `ExecutionSchedule` .* was not found',
self.client.execution_schedules.get,
'nonsuch',
self.deployment_id
)
def test_schedule_create_no_since(self):
self.assertRaises(
AssertionError,
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_time_format(self):
self.assertRaisesRegex(
AttributeError,
"'str' object has no attribute 'isoformat'",
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'install',
since='long ago', recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_workflow(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: Workflow some_workflow does not exist',
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
since=self.an_hour_from_now, recurrence='1 minutes', count=5,
)
def test_schedule_invalid_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['oneday', 'someday']
)
self.client.execution_schedules.create(
'good-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours', count=6,
weekdays=['mo', 'tu']
)
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.update,
'good-weekdays', self.deployment_id, weekdays=['oneday', 'someday']
)
def test_schedule_create_invalid_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['5tu']
)
def test_schedule_create_invalid_recurrence_with_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* complex weekday expression',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['2mo', 'l-tu']
)
def test_schedule_invalid_repetition_without_recurrence(self):
recurrence_error = \
'400: recurrence must be specified for execution count ' \
'larger than 1'
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.create,
'no-recurrence-no-count', self.deployment_id, 'uninstall',
since=self.an_hour_from_now, weekdays=['su', 'mo', 'tu'],
)
self.client.execution_schedules.create(
'no-recurrence-count-1', self.deployment_id, 'install',
since=self.an_hour_from_now, count=1,
)
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.update,
'no-recurrence-count-1', self.deployment_id, count=2
)
def test_schedule_create_invalid_recurrence(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: `10 doboshes` is not a legal recurrence expression.',
self.client.execution_schedules.create,
'bad-freq', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='10 doboshes'
)
| en | 0.727447 | # 3w * 4d # `until` is inclusive # get the schedule from the DB and not directly from .update endpoint # 60/5+1 # 2*60/5+1 | 1.952975 | 2 |
src/wrapper/decision_extractor_table_xy.py | cfrentze/datasheet-scrubber | 2 | 6615756 | #MIT License
#Copyright (c) 2018 The University of Michigan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#removed broken comments
import sublist_equal_element
import clean_array_single
import copy
def decision_extractor_table_xy(all_row,x_row_counter,x_column_counter,x_type,y_row_counter,y_column_counter,y_kind, test_row, test_col):
# print(x_type)
# print("(attribute) y: ", x_row_counter, " (attribute) x: ", x_column_counter)
# print(test_row, " ", test_col)
result={}
No_title_count=0
title_count=0
clean_titles = [] #needed for some edge case... not sure why... AD7183
if len(y_row_counter)!=0:#CSV page includes spec?Yes
sorted_row=sublist_equal_element.sublist_equal_element(x_row_counter)
for s in range(0,len(sorted_row)):
clean_tiltes = []
if(sorted_row[s][0]-1 >= 0):
clean_titles=clean_array_single.clean_array_single(all_row[sorted_row[s][0]-1])##why we need this? add to pretify ///gives row above min, typ, max
if((len(clean_tiltes) == 0) and sorted_row[s][0]-2 >= 0 ):
clean_titles=clean_array_single.clean_array_single(all_row[sorted_row[s][0]-2])##why we need this? add to pretify ///gives row 2 above min, typ, max
nothing = True
for x in all_row[y_row_counter[0]][1:]:
if(x.replace("-","").replace("+","").replace(".","").replace("?","").replace("±","").isdigit()):
nothing = False
too_long = False #here in case the element is at the bottom of all_row to make sure the index wont go out of bounds
try:
garbage = all_row[y_row_counter[0]+1][y_column_counter[0]]
except:
too_long = True
pass
if (len(clean_titles)!=0): #added incase min/typ/max are in row 0
# print("Path: a")
for j in range(0,len(sorted_row[s])):
for k in range(0,len(y_row_counter)):
# if s==len(sorted_row)-1: #last element
if sorted_row[s][j]<y_row_counter[k]: #Keyword under Max/Min/Typ
dic_title_accessory=[clean_titles[j],str(title_count)]
dic_title="_".join(dic_title_accessory)
try: #needed in case the csv is not rectangular
result[dic_title]=all_row[y_row_counter[k]][x_column_counter[j]]
title_count+=1
except:
pass
elif (not too_long and all_row[y_row_counter[0]+1][y_column_counter[0]] == "" and test_col and all_row[y_row_counter[0]][test_col[0]] != "" and all_row[y_row_counter[0]+1][test_col[0]] != ""): #LDO ADP7185
# print("Path: b")
iter = 0
try:
while((all_row[y_row_counter[0]+iter][y_column_counter[0]] == "" or iter == 0) and all_row[y_row_counter[0]+iter][test_col[0]] != ""):
useful = False
temp = copy.deepcopy(all_row[y_row_counter[0]+iter])
temp.pop(test_col[0])
# print(temp)
for r in temp:
if(r != ''):
useful = True
if(useful):
dic_title_accessory=[all_row[y_row_counter[0]+iter][test_col[0]],str(title_count)]
dic_title="_".join(dic_title_accessory)
result[dic_title]=all_row[y_row_counter[0]+iter][x_column_counter[0]]
title_count+=1
iter+=1
except:
pass
break
elif (not too_long and (all_row[y_row_counter[0]+1][y_column_counter[0]] == "") and nothing): #wierd case //might need to make broader //perfect split
# print("Path: c")
if (all_row[y_row_counter[0] - 1][y_column_counter[0]] == ""): #look above
temp = all_row[y_row_counter[0] - 1][y_column_counter[0] + 1]
if(temp != ""):
for j in range(0,len(sorted_row[s])):
for k in range(0,len(y_row_counter)):
dic_title_accessory=[temp,str(title_count)]
dic_title="_".join(dic_title_accessory)
result[dic_title]=all_row[y_row_counter[k]-1][x_column_counter[j]]
title_count+=1
if (all_row[y_row_counter[0] + 1][y_column_counter[0]] == ""): #look below
temp = all_row[y_row_counter[0] + 1][y_column_counter[0] + 1]
if(temp != ""):
for j in range(0,len(sorted_row[s])):
for k in range(0,len(y_row_counter)):
dic_title_accessory=[temp,str(title_count)]
dic_title="_".join(dic_title_accessory)
result[dic_title]=all_row[y_row_counter[k]+1][x_column_counter[j]]
title_count+=1
break
else: #basic no title
# print("Path: d")
for j in range(0,len(sorted_row[s])):
for k in range(0,len(y_row_counter)):
# if s==len(sorted_row)-1:
if sorted_row[s][j]<y_row_counter[k]:
result[No_title_count]=all_row[y_row_counter[k]][x_column_counter[j]]
No_title_count+=1
else:
result["nothing"]="nothing"
return result
| #MIT License
#Copyright (c) 2018 The University of Michigan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#removed broken comments
import sublist_equal_element
import clean_array_single
import copy
def decision_extractor_table_xy(all_row,x_row_counter,x_column_counter,x_type,y_row_counter,y_column_counter,y_kind, test_row, test_col):
# print(x_type)
# print("(attribute) y: ", x_row_counter, " (attribute) x: ", x_column_counter)
# print(test_row, " ", test_col)
result={}
No_title_count=0
title_count=0
clean_titles = [] #needed for some edge case... not sure why... AD7183
if len(y_row_counter)!=0:#CSV page includes spec?Yes
sorted_row=sublist_equal_element.sublist_equal_element(x_row_counter)
for s in range(0,len(sorted_row)):
clean_tiltes = []
if(sorted_row[s][0]-1 >= 0):
clean_titles=clean_array_single.clean_array_single(all_row[sorted_row[s][0]-1])##why we need this? add to pretify ///gives row above min, typ, max
if((len(clean_tiltes) == 0) and sorted_row[s][0]-2 >= 0 ):
clean_titles=clean_array_single.clean_array_single(all_row[sorted_row[s][0]-2])##why we need this? add to pretify ///gives row 2 above min, typ, max
nothing = True
for x in all_row[y_row_counter[0]][1:]:
if(x.replace("-","").replace("+","").replace(".","").replace("?","").replace("±","").isdigit()):
nothing = False
too_long = False #here in case the element is at the bottom of all_row to make sure the index wont go out of bounds
try:
garbage = all_row[y_row_counter[0]+1][y_column_counter[0]]
except:
too_long = True
pass
if (len(clean_titles)!=0): #added incase min/typ/max are in row 0
# print("Path: a")
for j in range(0,len(sorted_row[s])):
for k in range(0,len(y_row_counter)):
# if s==len(sorted_row)-1: #last element
if sorted_row[s][j]<y_row_counter[k]: #Keyword under Max/Min/Typ
dic_title_accessory=[clean_titles[j],str(title_count)]
dic_title="_".join(dic_title_accessory)
try: #needed in case the csv is not rectangular
result[dic_title]=all_row[y_row_counter[k]][x_column_counter[j]]
title_count+=1
except:
pass
elif (not too_long and all_row[y_row_counter[0]+1][y_column_counter[0]] == "" and test_col and all_row[y_row_counter[0]][test_col[0]] != "" and all_row[y_row_counter[0]+1][test_col[0]] != ""): #LDO ADP7185
# print("Path: b")
iter = 0
try:
while((all_row[y_row_counter[0]+iter][y_column_counter[0]] == "" or iter == 0) and all_row[y_row_counter[0]+iter][test_col[0]] != ""):
useful = False
temp = copy.deepcopy(all_row[y_row_counter[0]+iter])
temp.pop(test_col[0])
# print(temp)
for r in temp:
if(r != ''):
useful = True
if(useful):
dic_title_accessory=[all_row[y_row_counter[0]+iter][test_col[0]],str(title_count)]
dic_title="_".join(dic_title_accessory)
result[dic_title]=all_row[y_row_counter[0]+iter][x_column_counter[0]]
title_count+=1
iter+=1
except:
pass
break
elif (not too_long and (all_row[y_row_counter[0]+1][y_column_counter[0]] == "") and nothing): #wierd case //might need to make broader //perfect split
# print("Path: c")
if (all_row[y_row_counter[0] - 1][y_column_counter[0]] == ""): #look above
temp = all_row[y_row_counter[0] - 1][y_column_counter[0] + 1]
if(temp != ""):
for j in range(0,len(sorted_row[s])):
for k in range(0,len(y_row_counter)):
dic_title_accessory=[temp,str(title_count)]
dic_title="_".join(dic_title_accessory)
result[dic_title]=all_row[y_row_counter[k]-1][x_column_counter[j]]
title_count+=1
if (all_row[y_row_counter[0] + 1][y_column_counter[0]] == ""): #look below
temp = all_row[y_row_counter[0] + 1][y_column_counter[0] + 1]
if(temp != ""):
for j in range(0,len(sorted_row[s])):
for k in range(0,len(y_row_counter)):
dic_title_accessory=[temp,str(title_count)]
dic_title="_".join(dic_title_accessory)
result[dic_title]=all_row[y_row_counter[k]+1][x_column_counter[j]]
title_count+=1
break
else: #basic no title
# print("Path: d")
for j in range(0,len(sorted_row[s])):
for k in range(0,len(y_row_counter)):
# if s==len(sorted_row)-1:
if sorted_row[s][j]<y_row_counter[k]:
result[No_title_count]=all_row[y_row_counter[k]][x_column_counter[j]]
No_title_count+=1
else:
result["nothing"]="nothing"
return result
| en | 0.689712 | #MIT License #Copyright (c) 2018 The University of Michigan #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. #removed broken comments # print(x_type) # print("(attribute) y: ", x_row_counter, " (attribute) x: ", x_column_counter) # print(test_row, " ", test_col) #needed for some edge case... not sure why... AD7183 #CSV page includes spec?Yes ##why we need this? add to pretify ///gives row above min, typ, max ##why we need this? add to pretify ///gives row 2 above min, typ, max #here in case the element is at the bottom of all_row to make sure the index wont go out of bounds #added incase min/typ/max are in row 0 # print("Path: a") # if s==len(sorted_row)-1: #last element #Keyword under Max/Min/Typ #needed in case the csv is not rectangular #LDO ADP7185 # print("Path: b") # print(temp) #wierd case //might need to make broader //perfect split # print("Path: c") #look above #look below #basic no title # print("Path: d") # if s==len(sorted_row)-1: | 1.547855 | 2 |
text2graph/tokenizer_text.py | Brayan2016Linux/text2graph | 0 | 6615757 | #!/usr/bin/python3
# =============================================================================
# Tokenizer of Text Tools and Utils
# =============================================================================
#
# Miscellaneous utility functions to be used with text to get tokens and
# source and target nodes for concepts networks
# @Author: <NAME> <<EMAIL>>
# @Organization: LIIT-UNED 2020
#TODO:
#Create a best tokenizer model and lematization for spanish or improve
#use of spycy and nltk.
#Maybe use a automaticed algorithm of learning to stemming and lematization.
#Convertidor de texto a tokens
#Import:
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords, treebank
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.porter import PorterStemmer
import nltk
import string
import re
from string import digits
import locale
import pandas as pd
#Experimental:
import spacy
__all__ = ['tokenize_text']
#Constantes:
PREPOSITIONS = ['a', 'ante', 'bajo', 'cabe', 'con', 'contra', 'de'
'desde', 'en', 'entre', 'para', 'por', 'segun', 'sin',
'so', 'sobre', 'tras']
NO_SEMANTIC_WORDS = ['mas', 'asi', 'menos', 'ser', 'estar', 'ello', 'mientras', 'despues',
'tanto', 'mismo', 'parecer', 'tambien', 'si', 'no', 'etcetera', 'hacia',
'durante', 'decir', 'desear', 'recitar', 'cerca', 'lejos', 'entonces',
'luego', 'hola', 'ningun', 'primer', 'primero', 'atras', 'delante', 'ademas']
ABBREVIATIONS = ['etc', 'sr', 'sres', 'sras', 'srta']
ENCLITIC_PRONOUNS = ['me', 'se', 'te', 'nos', 'le', 'la', 'lo', 'los', 'las']
PUNCTUATION_SIGN = [i for i in string.punctuation]
CURRENCIES_SYMB = ['$', '€', '¢', '¥']
OTHERS_SYMB = ['...', "\"", "`", "''", "``", "¿", "?", "º", "¡", "“", "*", "-","_", "”" ]
NOUNS_ES_FINISHED_IN_S = ['pais', 'virus', 'dios', 'coronavirus', 'viernes']
NOUNS_ES_FINISHED_IN_R = ['mar', 'par']
NOUNS_ES_FINISHED_IN_RIA = ['historia', 'histeria', 'alegria']
NOUNS_ES_FINISHED_IN_TO = ['manifiesto', 'movimiento']
NOUNS_ES_FINISHED_IN_RO = ['carnero', 'astillero']
NOUNS_OF_MONTH = ['enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'setiembre', 'octubre', 'noviembre', 'diciembre']
NAMES=['figueres', 'chavarria', 'chaves', 'cespedes', 'maria', 'jose', 'carlos', 'luis', 'echeverria', 'arias']
class tokenize_text():
def __init__(self, text, language ='spanish', with_stopwords=False):
self.language = language
self.stemmer = self.stemmer()
self.text = text.rstrip('\n') #Elimina saltos de carro
self.text = self.remove_emoji(self.text) #Elimina emojis
self.text = self.text.replace(u'\u200d️', '') #Elimina simbolos
self.text = self.text.translate({ord(k): None for k in digits}) #Elimina números
if language == 'spanish':
self.text = self.normalize_spanish_text()
if with_stopwords:
self.token = self.tokenize()
else:
self.token = self.tokenize_without_stopwords()
def print(self):
print(self.text)
def remove_emoji(self, text):
try:
emoji_pattern = re.compile(u'['
u'\U0001F300-\U0001F64F'
u'\U0001F680-\U0001F6FF'
u'\u2600-\u26FF\u2700-\u27BF]+',
re.UNICODE)
return emoji_pattern.sub('', text) #no emoji text
except re.error:
# Narrow UCS-2 build
emoji_pattern = re.compile(
u"(\ud83d[\ude00-\ude4f])|" # emoticons
u"(\ud83c[\udf00-\uffff])|" # symbols & pictographs (1 of 2)
u"(\ud83d[\u0000-\uddff])|" # symbols & pictographs (2 of 2)
u"(\ud83d[\ude80-\udeff])|" # transport & map symbols
u"(\ud83c[\udde0-\uddff])" # flags (iOS)
"+", flags=re.UNICODE)
return emoji_pattern.sub('', text)
def tweet_tokenize(self):
tokenizer = TweetTokenizer()
text = self.lower()
return tokenizer.tokenize(text)
def tokenize(self):
punct_sign = CURRENCIES_SYMB + OTHERS_SYMB + PUNCTUATION_SIGN
stop_words = set(stopwords.words(self.language))
text = word_tokenize(self.normalize(self.lower()))
new_text = list()
for w in text:
#if (w not in stop_words) and (w not in punct_sign):
if (w not in punct_sign) and (w not in NO_SEMANTIC_WORDS) and (w not in ABBREVIATIONS):
new_text.append(w)
return new_text
def tokenize_without_stopwords(self):
punct_sign = CURRENCIES_SYMB + OTHERS_SYMB + PUNCTUATION_SIGN
stop_words = set (stopwords.words(self.language))
text = word_tokenize(self.normalize(self.lower()))
new_text = list()
for w in text:
if (w not in stop_words) and (w not in punct_sign) and (w not in PREPOSITIONS) and (w not in NO_SEMANTIC_WORDS) and (w not in ABBREVIATIONS):
new_text.append(w)
return new_text
def lower(self):
return self.text.lower()
#Limpieza:
def normalize(self, s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("ñ", "_n"),
("ñ", "_n"),
("i±", "_n"),
("i±", "i"),
("ó", "o"),
("Ã ", "i "),
("á", "a"),
("é", "e"),
("ú", "u"),
("." , ""),
("!" , ""),
("¡" , ""),
("?" , ""),
("_*", ""),
("¿", ""),
("-*", ""),
("*", ""),
("--", ""),
("costa rica", "costarica")
)
for a, b in replacements:
s = s.replace(a, b)
return s
def normalize_spanish_text(self):
text = word_tokenize(self.lower())
lex = []
for word in text:
lex.append(word.lower())
return self.normalize(' '.join(lex))
def get_token_frequency_df(self, with_stopwords=False):
df = pd.DataFrame()
if with_stopwords:
token_list = self.tokenize()
else:
token_list = self.tokenize_without_stopwords()
if self.language=='spanish':
words, pos, lemma = self.filter_spanish_token_list_with_lemma(token_list)
else:
words, pos, lemma = self.lematizing_text(token_list)
stem = self.stemming_token(words)
count = [stem.count(x) for x in stem]
df['word']= words
df['stem'] = stem
df['pos'] = pos
df['labels'] = lemma
df['stem_count'] = count
return df
def get_source_target_graph(self, gap=2):
source = list()
target = list()
df = self.get_token_frequency_df()
token = df['labels'].tolist()
for i in range(len(token) - 1 - gap):
source.append(token[i])
target.append(token[i + 1 + gap])
return source, target
#TODO: Falta decidir si se va a trabajar con a documento de constantes b Radicacion o reemplazo de radicación con palabra más frecuente c verbos irregulares devolver el token.
def stemmer(self, stemmer_type='Snowball', ignore_stopwords='True'):
if stemmer_type == 'Porter':
stemmer = PorterStemmer()
elif stemmer_type == 'Snowball':
stemmer = SnowballStemmer(language=self.language, ignore_stopwords=ignore_stopwords)
return stemmer
def stemming_token(self, token_list):
token_stm = [self.stemmer.stem(i) for i in token_list]
return token_stm
def stemming_word(self, word):
return self.stemmer.stem(word)
def lematizing_text(self, token_list):
word = list()
lemma = list()
pos = list()
wnl = WordNetLemmatizer()
for w in token_list:
word.append(w)
lemma.append(wnl.lemmatize(w))
pos.append(nltk.pos_tag(w)[0][1])
return word, pos, lemma
def filter_spanish_token_list_with_lemma(self, token_list, model='es_core_news_md', p_e = ENCLITIC_PRONOUNS):
nlp = spacy.load(model)
tk_list = list()
pos_list = list()
lm_list = list()
for i in token_list:
i = self.normalize(i)
doc = nlp(i)
pos = doc[0].pos_
word_lemma = doc[0].lemma_
if i[-3:] == 'ria' and i not in NOUNS_ES_FINISHED_IN_RIA and i not in NAMES:
word_lemma = i[:-2]
pos = 'VERB'
if i[-2:] == 'ro' and nlp(self.stemming_word(i)+'ar')[0].pos_ == 'VERB' and i not in NOUNS_OF_MONTH and i not in NOUNS_ES_FINISHED_IN_RO:
word_lemma = self.stemming_word(i)+'ar'
pos = 'VERB'
if i[-2:] == 'to' and nlp(self.stemming_word(i)+'ar')[0].pos_ == 'VERB' and self.stemming_word(i)+'ar' not in NOUNS_ES_FINISHED_IN_R and i not in NOUNS_ES_FINISHED_IN_TO:
word_lemma = self.stemming_word(i)+'ar'
pos = 'VERB'
if i[-3:] == 'rio' and nlp(i[:-3]+'ir')[0].pos_ == 'VERB':
word_lemma = i[:-2]+'ir'
pos = 'VERB'
if pos == 'NOUN' or pos == 'PROPN' and word_lemma not in NO_SEMANTIC_WORDS and i not in NAMES:
if word_lemma[-1:] is 's' and word_lemma not in NOUNS_ES_FINISHED_IN_S:
word_lemma = word_lemma[:-1]
if word_lemma[-2:] is ('lo' or 'no') and nlp(word_lemma[:-2])[0].pos_=='VERB':
word_lemma = word_lemma[:-2]
pos = 'VERB'
if word_lemma[-1:] is 'r' and (i[-1:]=='o' or i[-1:]=='a' or i[-2:]=='as') and word_lemma not in NOUNS_ES_FINISHED_IN_R:
word_lemma = i
if word_lemma[-2:] is 'nt':
word_lemma += 'e'
if word_lemma[-2:] is 'j' and (i[-2:]=='je' or i[-3:]=='jes') :
word_lemma += 'e'
if pos == 'VERB' and i not in NAMES:
if word_lemma[-2:] in p_e:
word_lemma = word_lemma[:-2]
elif (word_lemma[-3:] in p_e):
word_lemma = word_lemma[:-3]
else: word_lemma = word_lemma
if word_lemma not in NO_SEMANTIC_WORDS and pos not in ['AUX','DET','INTJ','ADP', 'ADV', 'SCONJ', 'CCONJ', 'NUM', 'PUNCT']:
tk_list.append(i)
pos_list.append(pos)
lm_list.append(self.normalize(word_lemma))
return tk_list, pos_list, lm_list
if __name__=='__main__':
print("Tokenizer")
| #!/usr/bin/python3
# =============================================================================
# Tokenizer of Text Tools and Utils
# =============================================================================
#
# Miscellaneous utility functions to be used with text to get tokens and
# source and target nodes for concepts networks
# @Author: <NAME> <<EMAIL>>
# @Organization: LIIT-UNED 2020
#TODO:
#Create a best tokenizer model and lematization for spanish or improve
#use of spycy and nltk.
#Maybe use a automaticed algorithm of learning to stemming and lematization.
#Convertidor de texto a tokens
#Import:
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords, treebank
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.porter import PorterStemmer
import nltk
import string
import re
from string import digits
import locale
import pandas as pd
#Experimental:
import spacy
__all__ = ['tokenize_text']
#Constantes:
PREPOSITIONS = ['a', 'ante', 'bajo', 'cabe', 'con', 'contra', 'de'
'desde', 'en', 'entre', 'para', 'por', 'segun', 'sin',
'so', 'sobre', 'tras']
NO_SEMANTIC_WORDS = ['mas', 'asi', 'menos', 'ser', 'estar', 'ello', 'mientras', 'despues',
'tanto', 'mismo', 'parecer', 'tambien', 'si', 'no', 'etcetera', 'hacia',
'durante', 'decir', 'desear', 'recitar', 'cerca', 'lejos', 'entonces',
'luego', 'hola', 'ningun', 'primer', 'primero', 'atras', 'delante', 'ademas']
ABBREVIATIONS = ['etc', 'sr', 'sres', 'sras', 'srta']
ENCLITIC_PRONOUNS = ['me', 'se', 'te', 'nos', 'le', 'la', 'lo', 'los', 'las']
PUNCTUATION_SIGN = [i for i in string.punctuation]
CURRENCIES_SYMB = ['$', '€', '¢', '¥']
OTHERS_SYMB = ['...', "\"", "`", "''", "``", "¿", "?", "º", "¡", "“", "*", "-","_", "”" ]
NOUNS_ES_FINISHED_IN_S = ['pais', 'virus', 'dios', 'coronavirus', 'viernes']
NOUNS_ES_FINISHED_IN_R = ['mar', 'par']
NOUNS_ES_FINISHED_IN_RIA = ['historia', 'histeria', 'alegria']
NOUNS_ES_FINISHED_IN_TO = ['manifiesto', 'movimiento']
NOUNS_ES_FINISHED_IN_RO = ['carnero', 'astillero']
NOUNS_OF_MONTH = ['enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'setiembre', 'octubre', 'noviembre', 'diciembre']
NAMES=['figueres', 'chavarria', 'chaves', 'cespedes', 'maria', 'jose', 'carlos', 'luis', 'echeverria', 'arias']
class tokenize_text():
def __init__(self, text, language ='spanish', with_stopwords=False):
self.language = language
self.stemmer = self.stemmer()
self.text = text.rstrip('\n') #Elimina saltos de carro
self.text = self.remove_emoji(self.text) #Elimina emojis
self.text = self.text.replace(u'\u200d️', '') #Elimina simbolos
self.text = self.text.translate({ord(k): None for k in digits}) #Elimina números
if language == 'spanish':
self.text = self.normalize_spanish_text()
if with_stopwords:
self.token = self.tokenize()
else:
self.token = self.tokenize_without_stopwords()
def print(self):
print(self.text)
def remove_emoji(self, text):
try:
emoji_pattern = re.compile(u'['
u'\U0001F300-\U0001F64F'
u'\U0001F680-\U0001F6FF'
u'\u2600-\u26FF\u2700-\u27BF]+',
re.UNICODE)
return emoji_pattern.sub('', text) #no emoji text
except re.error:
# Narrow UCS-2 build
emoji_pattern = re.compile(
u"(\ud83d[\ude00-\ude4f])|" # emoticons
u"(\ud83c[\udf00-\uffff])|" # symbols & pictographs (1 of 2)
u"(\ud83d[\u0000-\uddff])|" # symbols & pictographs (2 of 2)
u"(\ud83d[\ude80-\udeff])|" # transport & map symbols
u"(\ud83c[\udde0-\uddff])" # flags (iOS)
"+", flags=re.UNICODE)
return emoji_pattern.sub('', text)
def tweet_tokenize(self):
tokenizer = TweetTokenizer()
text = self.lower()
return tokenizer.tokenize(text)
def tokenize(self):
punct_sign = CURRENCIES_SYMB + OTHERS_SYMB + PUNCTUATION_SIGN
stop_words = set(stopwords.words(self.language))
text = word_tokenize(self.normalize(self.lower()))
new_text = list()
for w in text:
#if (w not in stop_words) and (w not in punct_sign):
if (w not in punct_sign) and (w not in NO_SEMANTIC_WORDS) and (w not in ABBREVIATIONS):
new_text.append(w)
return new_text
def tokenize_without_stopwords(self):
punct_sign = CURRENCIES_SYMB + OTHERS_SYMB + PUNCTUATION_SIGN
stop_words = set (stopwords.words(self.language))
text = word_tokenize(self.normalize(self.lower()))
new_text = list()
for w in text:
if (w not in stop_words) and (w not in punct_sign) and (w not in PREPOSITIONS) and (w not in NO_SEMANTIC_WORDS) and (w not in ABBREVIATIONS):
new_text.append(w)
return new_text
def lower(self):
return self.text.lower()
#Limpieza:
def normalize(self, s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("ñ", "_n"),
("ñ", "_n"),
("i±", "_n"),
("i±", "i"),
("ó", "o"),
("Ã ", "i "),
("á", "a"),
("é", "e"),
("ú", "u"),
("." , ""),
("!" , ""),
("¡" , ""),
("?" , ""),
("_*", ""),
("¿", ""),
("-*", ""),
("*", ""),
("--", ""),
("costa rica", "costarica")
)
for a, b in replacements:
s = s.replace(a, b)
return s
def normalize_spanish_text(self):
text = word_tokenize(self.lower())
lex = []
for word in text:
lex.append(word.lower())
return self.normalize(' '.join(lex))
def get_token_frequency_df(self, with_stopwords=False):
df = pd.DataFrame()
if with_stopwords:
token_list = self.tokenize()
else:
token_list = self.tokenize_without_stopwords()
if self.language=='spanish':
words, pos, lemma = self.filter_spanish_token_list_with_lemma(token_list)
else:
words, pos, lemma = self.lematizing_text(token_list)
stem = self.stemming_token(words)
count = [stem.count(x) for x in stem]
df['word']= words
df['stem'] = stem
df['pos'] = pos
df['labels'] = lemma
df['stem_count'] = count
return df
def get_source_target_graph(self, gap=2):
source = list()
target = list()
df = self.get_token_frequency_df()
token = df['labels'].tolist()
for i in range(len(token) - 1 - gap):
source.append(token[i])
target.append(token[i + 1 + gap])
return source, target
#TODO: Falta decidir si se va a trabajar con a documento de constantes b Radicacion o reemplazo de radicación con palabra más frecuente c verbos irregulares devolver el token.
def stemmer(self, stemmer_type='Snowball', ignore_stopwords='True'):
if stemmer_type == 'Porter':
stemmer = PorterStemmer()
elif stemmer_type == 'Snowball':
stemmer = SnowballStemmer(language=self.language, ignore_stopwords=ignore_stopwords)
return stemmer
def stemming_token(self, token_list):
token_stm = [self.stemmer.stem(i) for i in token_list]
return token_stm
def stemming_word(self, word):
return self.stemmer.stem(word)
def lematizing_text(self, token_list):
word = list()
lemma = list()
pos = list()
wnl = WordNetLemmatizer()
for w in token_list:
word.append(w)
lemma.append(wnl.lemmatize(w))
pos.append(nltk.pos_tag(w)[0][1])
return word, pos, lemma
def filter_spanish_token_list_with_lemma(self, token_list, model='es_core_news_md', p_e = ENCLITIC_PRONOUNS):
nlp = spacy.load(model)
tk_list = list()
pos_list = list()
lm_list = list()
for i in token_list:
i = self.normalize(i)
doc = nlp(i)
pos = doc[0].pos_
word_lemma = doc[0].lemma_
if i[-3:] == 'ria' and i not in NOUNS_ES_FINISHED_IN_RIA and i not in NAMES:
word_lemma = i[:-2]
pos = 'VERB'
if i[-2:] == 'ro' and nlp(self.stemming_word(i)+'ar')[0].pos_ == 'VERB' and i not in NOUNS_OF_MONTH and i not in NOUNS_ES_FINISHED_IN_RO:
word_lemma = self.stemming_word(i)+'ar'
pos = 'VERB'
if i[-2:] == 'to' and nlp(self.stemming_word(i)+'ar')[0].pos_ == 'VERB' and self.stemming_word(i)+'ar' not in NOUNS_ES_FINISHED_IN_R and i not in NOUNS_ES_FINISHED_IN_TO:
word_lemma = self.stemming_word(i)+'ar'
pos = 'VERB'
if i[-3:] == 'rio' and nlp(i[:-3]+'ir')[0].pos_ == 'VERB':
word_lemma = i[:-2]+'ir'
pos = 'VERB'
if pos == 'NOUN' or pos == 'PROPN' and word_lemma not in NO_SEMANTIC_WORDS and i not in NAMES:
if word_lemma[-1:] is 's' and word_lemma not in NOUNS_ES_FINISHED_IN_S:
word_lemma = word_lemma[:-1]
if word_lemma[-2:] is ('lo' or 'no') and nlp(word_lemma[:-2])[0].pos_=='VERB':
word_lemma = word_lemma[:-2]
pos = 'VERB'
if word_lemma[-1:] is 'r' and (i[-1:]=='o' or i[-1:]=='a' or i[-2:]=='as') and word_lemma not in NOUNS_ES_FINISHED_IN_R:
word_lemma = i
if word_lemma[-2:] is 'nt':
word_lemma += 'e'
if word_lemma[-2:] is 'j' and (i[-2:]=='je' or i[-3:]=='jes') :
word_lemma += 'e'
if pos == 'VERB' and i not in NAMES:
if word_lemma[-2:] in p_e:
word_lemma = word_lemma[:-2]
elif (word_lemma[-3:] in p_e):
word_lemma = word_lemma[:-3]
else: word_lemma = word_lemma
if word_lemma not in NO_SEMANTIC_WORDS and pos not in ['AUX','DET','INTJ','ADP', 'ADV', 'SCONJ', 'CCONJ', 'NUM', 'PUNCT']:
tk_list.append(i)
pos_list.append(pos)
lm_list.append(self.normalize(word_lemma))
return tk_list, pos_list, lm_list
if __name__=='__main__':
print("Tokenizer")
| en | 0.362603 | #!/usr/bin/python3 # ============================================================================= # Tokenizer of Text Tools and Utils # ============================================================================= # # Miscellaneous utility functions to be used with text to get tokens and # source and target nodes for concepts networks # @Author: <NAME> <<EMAIL>> # @Organization: LIIT-UNED 2020 #TODO: #Create a best tokenizer model and lematization for spanish or improve #use of spycy and nltk. #Maybe use a automaticed algorithm of learning to stemming and lematization. #Convertidor de texto a tokens #Import: #Experimental: #Constantes: #Elimina saltos de carro #Elimina emojis #Elimina simbolos #Elimina números #no emoji text # Narrow UCS-2 build # emoticons # symbols & pictographs (1 of 2) # symbols & pictographs (2 of 2) # transport & map symbols # flags (iOS) #if (w not in stop_words) and (w not in punct_sign): #Limpieza: #TODO: Falta decidir si se va a trabajar con a documento de constantes b Radicacion o reemplazo de radicación con palabra más frecuente c verbos irregulares devolver el token. | 2.623351 | 3 |
Leetcode/res/Merge Two Sorted Lists/1.py | AllanNozomu/CompetitiveProgramming | 1 | 6615758 | # Author: allannozomu
# Runtime: 44 ms
# Memory: 13.1 MB
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
headNode = ListNode(-1)
actualNode = headNode
while l1 and l2:
if l1.val < l2.val:
actualNode.next = l1
l1 = l1.next
else:
actualNode.next = l2
l2 = l2.next
actualNode = actualNode.next
if l1:
actualNode.next = l1
if l2:
actualNode.next = l2
return headNode.next | # Author: allannozomu
# Runtime: 44 ms
# Memory: 13.1 MB
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
headNode = ListNode(-1)
actualNode = headNode
while l1 and l2:
if l1.val < l2.val:
actualNode.next = l1
l1 = l1.next
else:
actualNode.next = l2
l2 = l2.next
actualNode = actualNode.next
if l1:
actualNode.next = l1
if l2:
actualNode.next = l2
return headNode.next | en | 0.543744 | # Author: allannozomu # Runtime: 44 ms # Memory: 13.1 MB # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None | 3.872525 | 4 |
test/covfiles/exclusions.py | samtaufa/pry | 1 | 6615759 |
#begin nocover
#end nocover
# begin nocover
|
#begin nocover
#end nocover
# begin nocover
| sl | 0.145403 | #begin nocover #end nocover # begin nocover | 1.112624 | 1 |
artpalettes.py | klanita/pretty-plots | 1 | 6615760 | # this code will be taking an image and extracting best colors out of it
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
def get_palette(
n_colors,
palette_name='category20'
):
try:
palette = sns.color_palette(palette_name)
except:
print('Palette not found. Using default palette tab10')
palette = sns.color_palette('category20')
while len(palette) < n_colors:
palette += palette
return palette | # this code will be taking an image and extracting best colors out of it
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
def get_palette(
n_colors,
palette_name='category20'
):
try:
palette = sns.color_palette(palette_name)
except:
print('Palette not found. Using default palette tab10')
palette = sns.color_palette('category20')
while len(palette) < n_colors:
palette += palette
return palette | en | 0.910743 | # this code will be taking an image and extracting best colors out of it | 3.375173 | 3 |
shared_control/src/simple.py | Taemin-Choi/interactive_learning | 0 | 6615761 | #!/usr/bin/env python
#-*-coding: utf-8-*-
import interface
import rospy
import pygame
import copy
import os
import math
from sys import stdout
from std_msgs.msg import Int32, Header, Int32MultiArray, Float32, Time
from visualization_msgs.msg import MarkerArray, Marker
from sensor_msgs.msg import Image, Joy, JointState
from geometry_msgs.msg import Twist, PoseWithCovarianceStamped, Point
from shared_control.msg import NavCue, CmdIntuit, CmdAssist, RobotMotion
from shared_control.srv import Nav2Cmd, Node
from reserved_words import *
class Interf(interface.Interface):
def __init__(self):
# 파라미터 설정
self.lin_vel_joy = rospy.get_param('~lin_vel_joy', 0.69)
self.ang_vel_joy = rospy.get_param('~ang_vel_joy', 3.67)
self.camera = rospy.get_param('~camera', 'camera/color/image_raw')
self.spin_cycle = rospy.Duration(rospy.get_param('~spin_cycle', 0.05))
self.scale_arrow = rospy.get_param('~scale_arrow', 50)
self.scale_cross = rospy.get_param('~scale_cross', 30)
# 화면 초기화
os.environ['SDL_VIDEO_WINDOW_POS'] = "0, 0"
pygame.init()
self.arrow_switch = False
self.cross_switch = False
self.monitor = pygame.display.Info()
self.width = rospy.get_param('~width', int(0.48*self.monitor.current_w))
self.height = rospy.get_param('~height', int(0.48*self.monitor.current_h))
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.mouse.set_visible(False)
pygame.display.set_caption("Shared control interface")
# 토픽 구독
print(C_YELLO + '\rInterfacer, BCI 서비스 준비중...' + C_END)
rospy.Subscriber(self.camera, Image, self.visualize)
self.color = {'data': [(255, 223, 36), # default
(255, 223, 36), # M_RIGHT
(255, 223, 36), # M_LEFT
(255, 223, 36), # M_FORWARD
(255, 223, 36),
(255, 223, 36),
(255, 223, 36),
(134, 229, 127)], # M_MOVE
'time': [rospy.get_time()]*8}
# 출력 설정
self.publisher_cmd_intuit = rospy.Publisher('interf/cmd/intuit', CmdIntuit, queue_size=1)
self.publisher_cmd_assist = rospy.Publisher('interf/cmd/assist', CmdAssist, queue_size=1)
self.publisher_nav_cue = rospy.Publisher('interf/nav_cue', NavCue, queue_size=1)
self.publisher_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.publisher_cmd_joint = rospy.Publisher('cmd_joint', JointState, queue_size=1)
# 토픽 구독
self.cmd = CmdIntuit()
self.switch_marker = [False, False, False]
rospy.Subscriber('interf/cmd/intuit', CmdIntuit, self.update_cmd_intuit)
rospy.Subscriber('interf/robot/motion', RobotMotion, self.update_marker_color)
rospy.Subscriber('interf/nav_cue', NavCue, self.update_marker_visibility)
rospy.Subscriber('joy', Joy, self.joystick)
self.path = []
rospy.Subscriber('robot/pose', PoseWithCovarianceStamped, self.update_robot_pose)
# 서비스 시작
self.publisher_time_start = rospy.Publisher('time/start', Time, queue_size=1)
self.publisher_time_end = rospy.Publisher('time/end', Time, queue_size=1)
self.time_start = rospy.Time.now()
# self.the_timer = rospy.Timer(rospy.Duration(0.1), self.timer)
self.path_publisher = rospy.Publisher('interf/path', MarkerArray, queue_size=1)
self.path_visualizer = rospy.Timer(rospy.Duration(0.3), self.visualize_path)
rospy.Service('interf/nav2cmd', Nav2Cmd, self.nav2cmd)
self.key_watcher = rospy.Timer(self.spin_cycle, self.keyboard)
print(C_YELLO + '\rInterfacer, BCI 서비스 시작' + C_END)
print(C_GREEN + '\rInterfacer, 초기화 완료' + C_END)
def visualize(self, data):
"""화면을 출력한다."""
# 영상을 획득한다.
cam = pygame.image.frombuffer(data.data, (data.width, data.height), 'RGB')
img = pygame.transform.smoothscale(cam, (self.width, self.height))
self.screen.blit(img, (0, 0))
# 영상 위에 화살표 마커를 덧붙여 출력한다.
if self.arrow_switch:
self.draw_arrow(M_RIGHT, 0.94*self.width, 0.5*self.height)
self.draw_arrow(M_LEFT, 0.06*self.width, 0.5*self.height)
self.draw_arrow(M_FORWARD, 0.5*self.width, 0.1*self.height)
if self.cross_switch:
self.draw_cross(0.5*self.width, 0.5*self.height)
pygame.display.flip()
def draw_arrow(self, type, x, y):
"""화살표를 그린다."""
if type == M_RIGHT:
arr = [[1, 0], [0, 1], [0, 0.5], [-1, 0.5], [-1, -0.5], [0, -0.5], [0, -1]]
elif type == M_LEFT:
arr = [[1, 0.5], [0, 0.5], [0, 1], [-1, 0], [0, -1], [0, -0.5], [1, -0.5]]
elif type == M_FORWARD:
arr = [[1, 0], [0.5, 0], [0.5, 1], [-0.5, 1], [-0.5, 0], [-1, 0], [0, -1]]
arr = [[self.scale_arrow*i+x, self.scale_arrow*j+y] for [i, j] in arr]
pygame.draw.polygon(self.screen, (255, 223, 36), arr)
class Simple:
def __init__(self):
self.interf = Interf()
# 파라미터 획득
self.move_dist = rospy.get_param('~move_dist', 0.5)
self.move_vel = rospy.get_param('~move_vel', 0.3)
self.move_time = rospy.get_param('~move_time', 2.0)
self.turn_vel = rospy.get_param('~turn_vel', 1.82)
self.wait_1 = rospy.get_param('~wait_1', 1.0)
self.wait_2 = rospy.get_param('~wait_2', 3.0)
# 변수 초기화
self.time_start = rospy.Time.now()
self.time_cmd = rospy.get_time()
# 신호 획득
self.get_cmd = rospy.ServiceProxy('interf/nav2cmd', Nav2Cmd)
rospy.Subscriber('interf/cmd/assist', CmdAssist, self.update_cmd)
rospy.Subscriber('time/start', Time, self.update_time)
# 발행 설정
rospy.wait_for_service('interf/nav2cmd')
self.publisher_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.publisher_robot_motion = rospy.Publisher('interf/robot/motion', RobotMotion, queue_size=1)
self.publisher_nav_cue = rospy.Publisher('interf/nav_cue', NavCue, queue_size=1)
self.publisher_simple = rospy.Publisher('simple/motion', RobotMotion, queue_size=1)
# 실행
self.interf.arrow_switch = False
self.interf.cross_switch = False
print(C_GREEN + '\rSimple, 초기화 완료' + C_END)
rospy.sleep(rospy.Duration(0.2))
while True:
self.task()
rospy.sleep(rospy.Duration(0.2))
def task(self):
'''질문과 이동을 반복한다.'''
print('\r휴면')
self.publisher_simple.publish(header=self.get_header(), motion=1)
now = rospy.get_time()
while self.time_cmd < now:
rospy.sleep(rospy.Duration(0.2))
print('\r화살표')
self.publisher_simple.publish( header=self.get_header(), motion=2)
self.interf.arrow_switch = True
now = rospy.get_time()
while rospy.get_time() < now+self.wait_1:
rospy.sleep(rospy.Duration(0.2))
print('\r픽스에이션')
self.publisher_simple.publish(header=self.get_header(), motion=3)
self.interf.cross_switch = True
now = rospy.get_time()
while rospy.get_time() < now+self.wait_2:
rospy.sleep(rospy.Duration(0.2))
print('\rStop cue')
self.publisher_simple.publish(header=self.get_header(), motion=4)
rospy.sleep(rospy.Duration(0.2))
print('\r로봇 이동')
self.move()
rospy.sleep(rospy.Duration(0.2))
print('\r화살표와 픽스에이션 제거')
self.publisher_simple.publish(header=self.get_header(), motion=6)
self.interf.arrow_switch = False
self.interf.cross_switch = False
rospy.sleep(rospy.Duration(0.2))
def move(self):
print('\r%6.1f[s]: Simple, 명령 요청'%(rospy.Time.now() - self.time_start).to_sec())
cmd = self.get_cmd(
header=self.get_header(),
dist=self.move_dist,
right=1,
left=1,
forward=1,
backward=1
)
if cmd.dir==M_FORWARD:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '앞' + C_END + ') 획득')
print('\r%6.1f[s]: Simple, 다음 노드로 이동'%(rospy.Time.now() - self.time_start).to_sec())
self.move_forward(self.move_vel, self.move_time)
elif cmd.dir==M_BACKWARD:
print('\r%6.1f[s]: Simple, 다음 노드로 이동'%(rospy.Time.now() - self.time_start).to_sec())
self.move_forward(-self.move_vel, self.move_time)
elif cmd.dir==M_LEFT:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '좌' + C_END + ') 획득')
self.turn(self.move_vel)
print('\r%6.1f[s]: Simple, 다음 노드로 이동'%(rospy.Time.now() - self.time_start).to_sec())
self.move_forward(self.move_vel, self.move_time)
elif cmd.dir==M_RIGHT:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '우' + C_END + ') 획득')
self.turn(-self.move_vel)
print('\r%6.1f[s]: Simple, 다음 노드로 이동'%(rospy.Time.now() - self.time_start).to_sec())
self.move_forward(self.move_vel, self.move_time)
else:
self.robot_state = S_INDIRECT_WAIT
def move_forward(self, vel, time):
'''주어진 시간동안 전진한다.'''
self.publisher_robot_motion.publish(
header=self.get_header(),
motion=M_FORWARD)
self.publisher_simple.publish(header=self.get_header(), motion=5)
v = Twist()
v.linear.x = vel
now = rospy.get_time()
while rospy.get_time() < now+time:
self.publisher_cmd_vel.publish(v)
rospy.sleep(rospy.Duration(0.2))
v.linear.x = 0
self.publisher_cmd_vel.publish(v)
rospy.sleep(rospy.Duration(0.2))
def turn(self, vel):
'''주어진 방향으로 회전한다.'''
if vel > 0:
self.publisher_robot_motion.publish(
header=self.get_header(),
motion=M_LEFT)
else:
self.publisher_robot_motion.publish(
header=self.get_header(),
motion=M_RIGHT)
v = Twist()
v.angular.z = vel
now = rospy.get_time()
while self.time_cmd < now:
self.publisher_cmd_vel.publish(v)
rospy.sleep(rospy.Duration(0.2))
v.angular.z = 0
self.publisher_cmd_vel.publish(v)
rospy.sleep(rospy.Duration(0.2))
def get_header(self):
'''헤더를 생성한다.'''
header = Header()
header.stamp = rospy.Time.now()
return header
def update_cmd(self, data):
'''이동시점 관련명령을 갱신한다.'''
if data.num==3:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '3' + C_END + ') 획득')
vel = Twist()
self.publisher_cmd_vel.publish(vel)
elif data.num==2:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '2' + C_END + ') 획득')
self.time_cmd = rospy.get_time()
def update_time(self, data):
'''시작시각을 갱신한다.'''
self.time_start = data.data
if __name__=='__main__':
rospy.init_node('simple')
s = Simple()
rospy.spin()
| #!/usr/bin/env python
#-*-coding: utf-8-*-
import interface
import rospy
import pygame
import copy
import os
import math
from sys import stdout
from std_msgs.msg import Int32, Header, Int32MultiArray, Float32, Time
from visualization_msgs.msg import MarkerArray, Marker
from sensor_msgs.msg import Image, Joy, JointState
from geometry_msgs.msg import Twist, PoseWithCovarianceStamped, Point
from shared_control.msg import NavCue, CmdIntuit, CmdAssist, RobotMotion
from shared_control.srv import Nav2Cmd, Node
from reserved_words import *
class Interf(interface.Interface):
def __init__(self):
# 파라미터 설정
self.lin_vel_joy = rospy.get_param('~lin_vel_joy', 0.69)
self.ang_vel_joy = rospy.get_param('~ang_vel_joy', 3.67)
self.camera = rospy.get_param('~camera', 'camera/color/image_raw')
self.spin_cycle = rospy.Duration(rospy.get_param('~spin_cycle', 0.05))
self.scale_arrow = rospy.get_param('~scale_arrow', 50)
self.scale_cross = rospy.get_param('~scale_cross', 30)
# 화면 초기화
os.environ['SDL_VIDEO_WINDOW_POS'] = "0, 0"
pygame.init()
self.arrow_switch = False
self.cross_switch = False
self.monitor = pygame.display.Info()
self.width = rospy.get_param('~width', int(0.48*self.monitor.current_w))
self.height = rospy.get_param('~height', int(0.48*self.monitor.current_h))
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.mouse.set_visible(False)
pygame.display.set_caption("Shared control interface")
# 토픽 구독
print(C_YELLO + '\rInterfacer, BCI 서비스 준비중...' + C_END)
rospy.Subscriber(self.camera, Image, self.visualize)
self.color = {'data': [(255, 223, 36), # default
(255, 223, 36), # M_RIGHT
(255, 223, 36), # M_LEFT
(255, 223, 36), # M_FORWARD
(255, 223, 36),
(255, 223, 36),
(255, 223, 36),
(134, 229, 127)], # M_MOVE
'time': [rospy.get_time()]*8}
# 출력 설정
self.publisher_cmd_intuit = rospy.Publisher('interf/cmd/intuit', CmdIntuit, queue_size=1)
self.publisher_cmd_assist = rospy.Publisher('interf/cmd/assist', CmdAssist, queue_size=1)
self.publisher_nav_cue = rospy.Publisher('interf/nav_cue', NavCue, queue_size=1)
self.publisher_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.publisher_cmd_joint = rospy.Publisher('cmd_joint', JointState, queue_size=1)
# 토픽 구독
self.cmd = CmdIntuit()
self.switch_marker = [False, False, False]
rospy.Subscriber('interf/cmd/intuit', CmdIntuit, self.update_cmd_intuit)
rospy.Subscriber('interf/robot/motion', RobotMotion, self.update_marker_color)
rospy.Subscriber('interf/nav_cue', NavCue, self.update_marker_visibility)
rospy.Subscriber('joy', Joy, self.joystick)
self.path = []
rospy.Subscriber('robot/pose', PoseWithCovarianceStamped, self.update_robot_pose)
# 서비스 시작
self.publisher_time_start = rospy.Publisher('time/start', Time, queue_size=1)
self.publisher_time_end = rospy.Publisher('time/end', Time, queue_size=1)
self.time_start = rospy.Time.now()
# self.the_timer = rospy.Timer(rospy.Duration(0.1), self.timer)
self.path_publisher = rospy.Publisher('interf/path', MarkerArray, queue_size=1)
self.path_visualizer = rospy.Timer(rospy.Duration(0.3), self.visualize_path)
rospy.Service('interf/nav2cmd', Nav2Cmd, self.nav2cmd)
self.key_watcher = rospy.Timer(self.spin_cycle, self.keyboard)
print(C_YELLO + '\rInterfacer, BCI 서비스 시작' + C_END)
print(C_GREEN + '\rInterfacer, 초기화 완료' + C_END)
def visualize(self, data):
"""화면을 출력한다."""
# 영상을 획득한다.
cam = pygame.image.frombuffer(data.data, (data.width, data.height), 'RGB')
img = pygame.transform.smoothscale(cam, (self.width, self.height))
self.screen.blit(img, (0, 0))
# 영상 위에 화살표 마커를 덧붙여 출력한다.
if self.arrow_switch:
self.draw_arrow(M_RIGHT, 0.94*self.width, 0.5*self.height)
self.draw_arrow(M_LEFT, 0.06*self.width, 0.5*self.height)
self.draw_arrow(M_FORWARD, 0.5*self.width, 0.1*self.height)
if self.cross_switch:
self.draw_cross(0.5*self.width, 0.5*self.height)
pygame.display.flip()
def draw_arrow(self, type, x, y):
"""화살표를 그린다."""
if type == M_RIGHT:
arr = [[1, 0], [0, 1], [0, 0.5], [-1, 0.5], [-1, -0.5], [0, -0.5], [0, -1]]
elif type == M_LEFT:
arr = [[1, 0.5], [0, 0.5], [0, 1], [-1, 0], [0, -1], [0, -0.5], [1, -0.5]]
elif type == M_FORWARD:
arr = [[1, 0], [0.5, 0], [0.5, 1], [-0.5, 1], [-0.5, 0], [-1, 0], [0, -1]]
arr = [[self.scale_arrow*i+x, self.scale_arrow*j+y] for [i, j] in arr]
pygame.draw.polygon(self.screen, (255, 223, 36), arr)
class Simple:
def __init__(self):
self.interf = Interf()
# 파라미터 획득
self.move_dist = rospy.get_param('~move_dist', 0.5)
self.move_vel = rospy.get_param('~move_vel', 0.3)
self.move_time = rospy.get_param('~move_time', 2.0)
self.turn_vel = rospy.get_param('~turn_vel', 1.82)
self.wait_1 = rospy.get_param('~wait_1', 1.0)
self.wait_2 = rospy.get_param('~wait_2', 3.0)
# 변수 초기화
self.time_start = rospy.Time.now()
self.time_cmd = rospy.get_time()
# 신호 획득
self.get_cmd = rospy.ServiceProxy('interf/nav2cmd', Nav2Cmd)
rospy.Subscriber('interf/cmd/assist', CmdAssist, self.update_cmd)
rospy.Subscriber('time/start', Time, self.update_time)
# 발행 설정
rospy.wait_for_service('interf/nav2cmd')
self.publisher_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.publisher_robot_motion = rospy.Publisher('interf/robot/motion', RobotMotion, queue_size=1)
self.publisher_nav_cue = rospy.Publisher('interf/nav_cue', NavCue, queue_size=1)
self.publisher_simple = rospy.Publisher('simple/motion', RobotMotion, queue_size=1)
# 실행
self.interf.arrow_switch = False
self.interf.cross_switch = False
print(C_GREEN + '\rSimple, 초기화 완료' + C_END)
rospy.sleep(rospy.Duration(0.2))
while True:
self.task()
rospy.sleep(rospy.Duration(0.2))
def task(self):
'''질문과 이동을 반복한다.'''
print('\r휴면')
self.publisher_simple.publish(header=self.get_header(), motion=1)
now = rospy.get_time()
while self.time_cmd < now:
rospy.sleep(rospy.Duration(0.2))
print('\r화살표')
self.publisher_simple.publish( header=self.get_header(), motion=2)
self.interf.arrow_switch = True
now = rospy.get_time()
while rospy.get_time() < now+self.wait_1:
rospy.sleep(rospy.Duration(0.2))
print('\r픽스에이션')
self.publisher_simple.publish(header=self.get_header(), motion=3)
self.interf.cross_switch = True
now = rospy.get_time()
while rospy.get_time() < now+self.wait_2:
rospy.sleep(rospy.Duration(0.2))
print('\rStop cue')
self.publisher_simple.publish(header=self.get_header(), motion=4)
rospy.sleep(rospy.Duration(0.2))
print('\r로봇 이동')
self.move()
rospy.sleep(rospy.Duration(0.2))
print('\r화살표와 픽스에이션 제거')
self.publisher_simple.publish(header=self.get_header(), motion=6)
self.interf.arrow_switch = False
self.interf.cross_switch = False
rospy.sleep(rospy.Duration(0.2))
def move(self):
print('\r%6.1f[s]: Simple, 명령 요청'%(rospy.Time.now() - self.time_start).to_sec())
cmd = self.get_cmd(
header=self.get_header(),
dist=self.move_dist,
right=1,
left=1,
forward=1,
backward=1
)
if cmd.dir==M_FORWARD:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '앞' + C_END + ') 획득')
print('\r%6.1f[s]: Simple, 다음 노드로 이동'%(rospy.Time.now() - self.time_start).to_sec())
self.move_forward(self.move_vel, self.move_time)
elif cmd.dir==M_BACKWARD:
print('\r%6.1f[s]: Simple, 다음 노드로 이동'%(rospy.Time.now() - self.time_start).to_sec())
self.move_forward(-self.move_vel, self.move_time)
elif cmd.dir==M_LEFT:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '좌' + C_END + ') 획득')
self.turn(self.move_vel)
print('\r%6.1f[s]: Simple, 다음 노드로 이동'%(rospy.Time.now() - self.time_start).to_sec())
self.move_forward(self.move_vel, self.move_time)
elif cmd.dir==M_RIGHT:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '우' + C_END + ') 획득')
self.turn(-self.move_vel)
print('\r%6.1f[s]: Simple, 다음 노드로 이동'%(rospy.Time.now() - self.time_start).to_sec())
self.move_forward(self.move_vel, self.move_time)
else:
self.robot_state = S_INDIRECT_WAIT
def move_forward(self, vel, time):
'''주어진 시간동안 전진한다.'''
self.publisher_robot_motion.publish(
header=self.get_header(),
motion=M_FORWARD)
self.publisher_simple.publish(header=self.get_header(), motion=5)
v = Twist()
v.linear.x = vel
now = rospy.get_time()
while rospy.get_time() < now+time:
self.publisher_cmd_vel.publish(v)
rospy.sleep(rospy.Duration(0.2))
v.linear.x = 0
self.publisher_cmd_vel.publish(v)
rospy.sleep(rospy.Duration(0.2))
def turn(self, vel):
'''주어진 방향으로 회전한다.'''
if vel > 0:
self.publisher_robot_motion.publish(
header=self.get_header(),
motion=M_LEFT)
else:
self.publisher_robot_motion.publish(
header=self.get_header(),
motion=M_RIGHT)
v = Twist()
v.angular.z = vel
now = rospy.get_time()
while self.time_cmd < now:
self.publisher_cmd_vel.publish(v)
rospy.sleep(rospy.Duration(0.2))
v.angular.z = 0
self.publisher_cmd_vel.publish(v)
rospy.sleep(rospy.Duration(0.2))
def get_header(self):
'''헤더를 생성한다.'''
header = Header()
header.stamp = rospy.Time.now()
return header
def update_cmd(self, data):
'''이동시점 관련명령을 갱신한다.'''
if data.num==3:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '3' + C_END + ') 획득')
vel = Twist()
self.publisher_cmd_vel.publish(vel)
elif data.num==2:
print('\r%6.1f[s]: Simple, 명령('%(rospy.Time.now() - self.time_start).to_sec() + C_YELLO + '2' + C_END + ') 획득')
self.time_cmd = rospy.get_time()
def update_time(self, data):
'''시작시각을 갱신한다.'''
self.time_start = data.data
if __name__=='__main__':
rospy.init_node('simple')
s = Simple()
rospy.spin()
| ko | 0.999811 | #!/usr/bin/env python #-*-coding: utf-8-*- # 파라미터 설정 # 화면 초기화 # 토픽 구독 # default # M_RIGHT # M_LEFT # M_FORWARD # M_MOVE # 출력 설정 # 토픽 구독 # 서비스 시작 # self.the_timer = rospy.Timer(rospy.Duration(0.1), self.timer) 화면을 출력한다. # 영상을 획득한다. # 영상 위에 화살표 마커를 덧붙여 출력한다. 화살표를 그린다. # 파라미터 획득 # 변수 초기화 # 신호 획득 # 발행 설정 # 실행 질문과 이동을 반복한다. 주어진 시간동안 전진한다. 주어진 방향으로 회전한다. 헤더를 생성한다. 이동시점 관련명령을 갱신한다. 시작시각을 갱신한다. | 2.239767 | 2 |
python/dirsig/lidarbin/readbin.py | pavdpr/DIRSIG | 1 | 6615762 | #!/usr/bin/env python
""" Reads a DIRSIG lidar "bin" file
Description:
This file provides code to read a DIRSIG bin file and provides basic
manipulation of that file.
Usage:
To read a bin file:
For most cases:
binfile = readDirsigBin(filename)
If dirsig was compiled on a 32 bit system and the bin file is version
0 or 1:
binfile = readDirsigBin(filename, True)
External Dependancies:
numpy
struct
sys
zlib
Warnings:
This code has not been tested on a version 0 bin file.
Author(s):
<NAME> par4249 at rit dot edu
Copyright:
(c) 2015 Rochester Institute of Technology
References:
[1] http://www.dirsig.org/docs/new/bin.html (Accessed 2013-02-09).
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Rochester Institute of Technology"
__credits__ = []
__license__ = "MIT"
#__version__ = "1.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import sys # stderr and command-line arguments
import numpy # base data type for signals
import struct # for convertint data types
import zlib # for decompression
def readbin(filename, is32bit=False):
"""Reads a DIRSIG bin file.
Args:
filename (str): A string containing the file to read.
is32bit (bool, optional): Set to True if DIRSIG was compiled on a 32 bit
system. This tells the code to use 32 bit long longs for the pulse
data bytes field. In version 2 or later of the bin file, this was
guaranteed to be 64 bits in of the bin file and this flag will have
no effect on the data parsing. The default is False.
Returns:
A dictionary containing two keys: 'header' and 'tasks'. output['header']
is a dictionary containing the file header. For output['tasks'] is a list
containing dictionaries. Let task = output['tasks'][i], be the ith task.
task['header'] is a dictionary containing the task header. task['pulses']
is a list of dictionaries containing the pulses. Let task['pulses'][j] be
the jth pulse of the task. pulse['header'] is a dict containing the pulse
header. pulse['data'] is a numpy.array contating the return information.
The contents of the header files will depend on the version of the bin
file that is being read. See [1] for more details.
output = {'header': dict, 'tasks': list}
output['tasks'][i] = {'header': dict, 'pulses': list}
output['tasks'][i]['pulses'][j] = {'header': dict, 'data': numpy.array}
The 0th time bin of the pulse data is the passive term, the remaining
bands are the active time part of the signal.
"""
# define helper functions
def readpulse(fid, version, endian, xpixelct, ypixelct, is32bit):
"""Reads a pulse from a DIRSIG bin file.
Args:
fid (file): The file id to read a pulse from.
version (int): The version of the bin file.
endian (str): The endian of the data.
xpixelct (int): the number of pixels in the x direction
ypixelct (int): the number of pixels in the y direction
is32bit (bool): a bool if DIRSIG was compiled on a 32 bit system.
Returns:
A dictionary containing the pulse data. This has two keys: 'header',
a dictionary containing the pulse header; and 'data', a numpy.array
containing the data for the pulse.
"""
output = {}
header = {}
header['pulse time'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['time gate start'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['time gate stop'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['time gate bin count'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
if version > 0:
header['samples per time bin'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
else:
# Just make a guess
header['samples per time bin'] = 1
header['platform location'] = numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
if version < 2:
header['platform orientation angle order'] = fid.read(3)
header['platform rotation'] = numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
if version > 1:
# pylint: disable=E1103
header['transmitter to mount affine'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
# pylint: enable=E1103
else:
header['transmitter mount pointing offset'] = \
numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
header['tranmitter orientation angle order'] = fid.read(3)
header['transmitter mount pointing rotation'] = \
numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
if version > 1:
# pylint: disable=E1103
header['transmitter mount to platform affine'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
header['receiver to mount affine'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
# pylint: enable=E1103
else:
header['receiver mount pointing offset'] = \
numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
header['receiver orientation angle order'] = fid.read(3)
header['receiver mount pointing rotation'] = \
numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
if version > 1:
# pylint: disable=E1103
header['receiver mount to platform affine'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
# pylint: enable=E1103
header['pulse data type'] = struct.unpack(endian + 'I', \
fid.read(4))[0] # should always be 5 (double)
header['data compression type'] = struct.unpack(endian + 'B', \
fid.read(1))[0]
if version > 1:
header['pulse index'] = struct.unpack(endian + 'I', fid.read(4))[0]
else:
header['delta histogram flag'] = struct.unpack(endian + 'B', \
fid.read(1))[0]
# check for bug where a long may be 32 bits on some systems and 64 on others
if is32bit and (version < 2):
header['pulse data bytes'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
else:
header['pulse data bytes'] = struct.unpack(endian + 'Q', \
fid.read(8))[0]
if version > 1:
# pylint: disable=E1103
header['system transmit mueller matrix'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
header['system receive mueller matrix'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
# pylint: enable=E1103
output['header'] = header
# read the data
tmp = fid.read(header['pulse data bytes'])
if header['data compression type'] == 1:
tmp = zlib.decompress(tmp)
tmp = struct.unpack(xpixelct * ypixelct * header['samples per time bin'] * \
(header['time gate bin count'] + 1) * 'd', tmp)
# pylint: disable=E1103
output['data'] = numpy.reshape(numpy.array(tmp), (xpixelct, \
ypixelct, header['samples per time bin'] * \
(header['time gate bin count'] + 1)))
# pylint: enable=E1103
return output
def readtask(fid, version, endian, xpixelct, ypixelct, is32bit):
"""Reads a task from a DIRSIG bin file.
Args:
fid (file): The file id to read a pulse from.
version (int): The version of the bin file.
endian (str): The endian of the data.
xpixelct (int): the number of pixels in the x direction
ypixelct (int): the number of pixels in the y direction
is32bit (bool): a bool if DIRSIG was compiled on a 32 bit system.
Returns:
A dictionary containing the task data. This has two keys: 'header',
a dictionary containing the task header; and 'pulses', a list of
dictionaries, each containing a pulse.
"""
output = {}
output['pulses'] = []
header = {}
header['task description'] = fid.read(64).replace('\x00', '')
header['task start date time'] = fid.read(15)
header['task stop date time'] = fid.read(15)
header['focal length'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['pulse repition frequency'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['pulse duration'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['pulse energy'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['laser spectral center'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['laser spectral width'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['pulse count'] = struct.unpack(endian + 'I', fid.read(4))[0]
output['header'] = header
for dummypulse in range(header['pulse count']):
output['pulses'].append(readpulse(fid, version, endian, xpixelct, \
ypixelct, is32bit))
return output
# start reading the bin file
fid = open(filename, "rb")
output = {}
output['tasks'] = []
header = {}
try:
byte = fid.read(11)
if byte != "DIRSIGPROTO":
raise RuntimeError("'" + filename + \
"' is not valid DIRSIG bin file.")
header['file format revision'] = struct.unpack('B', fid.read(1))[0]
_version = header['file format revision']
header['byte ordering'] = struct.unpack('B', fid.read(1))[0]
if header['byte ordering'] == 0:
endian = '>'
else:
endian = '<'
header['file creation date time'] = fid.read(15)
header['dirsig version string'] = fid.read(32).replace('\x00', '')
header['simulation description'] = fid.read(256).replace('\x00', '')
header['scene origin latitude'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['scene origin longitude'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['scene origin height'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['transmitter mount type'] = fid.read(16).replace('\x00', '')
header['reciever mount type'] = fid.read(16).replace('\x00', '')
header['x pixel count'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
header['y pixel count'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
header['x pixel pitch'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['y pixel pitch'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
if _version > 0:
header['x array offset'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['y array offset'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['lens distortion k1'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['lens distortion k2'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['task count'] = struct.unpack(endian + 'I', fid.read(4))[0]
if _version > 1:
header['focal plane array id'] = struct.unpack(endian + 'H', \
fid.read(2))[0]
output['header'] = header
for dummytask in range(header['task count']):
output['tasks'].append(readtask(fid, _version, endian, \
header['x pixel count'], header['y pixel count'], is32bit))
except RuntimeError, error:
sys.stderr.write('ERROR: #s\n' % str(error))
finally:
fid.close()
return output
if __name__ == '__main__':
ARGS = sys.argv[1:]
if ARGS:
FILENAME = "\\ ".join(ARGS)
BINFILE = readbin(FILENAME)
TASKCT = 0
PULSECT = 0
for dummytask in BINFILE['tasks']:
TASKCT += 1
for dummypulse in dummytask['pulses']:
PULSECT += 1
print FILENAME + ' contains:'
print '\t' + str(TASKCT) + ' tasks'
print '\t' + str(PULSECT) + ' pulses'
| #!/usr/bin/env python
""" Reads a DIRSIG lidar "bin" file
Description:
This file provides code to read a DIRSIG bin file and provides basic
manipulation of that file.
Usage:
To read a bin file:
For most cases:
binfile = readDirsigBin(filename)
If dirsig was compiled on a 32 bit system and the bin file is version
0 or 1:
binfile = readDirsigBin(filename, True)
External Dependancies:
numpy
struct
sys
zlib
Warnings:
This code has not been tested on a version 0 bin file.
Author(s):
<NAME> par4249 at rit dot edu
Copyright:
(c) 2015 Rochester Institute of Technology
References:
[1] http://www.dirsig.org/docs/new/bin.html (Accessed 2013-02-09).
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Rochester Institute of Technology"
__credits__ = []
__license__ = "MIT"
#__version__ = "1.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import sys # stderr and command-line arguments
import numpy # base data type for signals
import struct # for convertint data types
import zlib # for decompression
def readbin(filename, is32bit=False):
"""Reads a DIRSIG bin file.
Args:
filename (str): A string containing the file to read.
is32bit (bool, optional): Set to True if DIRSIG was compiled on a 32 bit
system. This tells the code to use 32 bit long longs for the pulse
data bytes field. In version 2 or later of the bin file, this was
guaranteed to be 64 bits in of the bin file and this flag will have
no effect on the data parsing. The default is False.
Returns:
A dictionary containing two keys: 'header' and 'tasks'. output['header']
is a dictionary containing the file header. For output['tasks'] is a list
containing dictionaries. Let task = output['tasks'][i], be the ith task.
task['header'] is a dictionary containing the task header. task['pulses']
is a list of dictionaries containing the pulses. Let task['pulses'][j] be
the jth pulse of the task. pulse['header'] is a dict containing the pulse
header. pulse['data'] is a numpy.array contating the return information.
The contents of the header files will depend on the version of the bin
file that is being read. See [1] for more details.
output = {'header': dict, 'tasks': list}
output['tasks'][i] = {'header': dict, 'pulses': list}
output['tasks'][i]['pulses'][j] = {'header': dict, 'data': numpy.array}
The 0th time bin of the pulse data is the passive term, the remaining
bands are the active time part of the signal.
"""
# define helper functions
def readpulse(fid, version, endian, xpixelct, ypixelct, is32bit):
"""Reads a pulse from a DIRSIG bin file.
Args:
fid (file): The file id to read a pulse from.
version (int): The version of the bin file.
endian (str): The endian of the data.
xpixelct (int): the number of pixels in the x direction
ypixelct (int): the number of pixels in the y direction
is32bit (bool): a bool if DIRSIG was compiled on a 32 bit system.
Returns:
A dictionary containing the pulse data. This has two keys: 'header',
a dictionary containing the pulse header; and 'data', a numpy.array
containing the data for the pulse.
"""
output = {}
header = {}
header['pulse time'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['time gate start'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['time gate stop'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['time gate bin count'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
if version > 0:
header['samples per time bin'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
else:
# Just make a guess
header['samples per time bin'] = 1
header['platform location'] = numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
if version < 2:
header['platform orientation angle order'] = fid.read(3)
header['platform rotation'] = numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
if version > 1:
# pylint: disable=E1103
header['transmitter to mount affine'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
# pylint: enable=E1103
else:
header['transmitter mount pointing offset'] = \
numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
header['tranmitter orientation angle order'] = fid.read(3)
header['transmitter mount pointing rotation'] = \
numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
if version > 1:
# pylint: disable=E1103
header['transmitter mount to platform affine'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
header['receiver to mount affine'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
# pylint: enable=E1103
else:
header['receiver mount pointing offset'] = \
numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
header['receiver orientation angle order'] = fid.read(3)
header['receiver mount pointing rotation'] = \
numpy.mat(struct.unpack(endian + 3 * 'd', \
fid.read(24)))
if version > 1:
# pylint: disable=E1103
header['receiver mount to platform affine'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
# pylint: enable=E1103
header['pulse data type'] = struct.unpack(endian + 'I', \
fid.read(4))[0] # should always be 5 (double)
header['data compression type'] = struct.unpack(endian + 'B', \
fid.read(1))[0]
if version > 1:
header['pulse index'] = struct.unpack(endian + 'I', fid.read(4))[0]
else:
header['delta histogram flag'] = struct.unpack(endian + 'B', \
fid.read(1))[0]
# check for bug where a long may be 32 bits on some systems and 64 on others
if is32bit and (version < 2):
header['pulse data bytes'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
else:
header['pulse data bytes'] = struct.unpack(endian + 'Q', \
fid.read(8))[0]
if version > 1:
# pylint: disable=E1103
header['system transmit mueller matrix'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
header['system receive mueller matrix'] = \
numpy.mat(struct.unpack(endian + 16 * 'd', \
fid.read(128))).reshape((4, 4))
# pylint: enable=E1103
output['header'] = header
# read the data
tmp = fid.read(header['pulse data bytes'])
if header['data compression type'] == 1:
tmp = zlib.decompress(tmp)
tmp = struct.unpack(xpixelct * ypixelct * header['samples per time bin'] * \
(header['time gate bin count'] + 1) * 'd', tmp)
# pylint: disable=E1103
output['data'] = numpy.reshape(numpy.array(tmp), (xpixelct, \
ypixelct, header['samples per time bin'] * \
(header['time gate bin count'] + 1)))
# pylint: enable=E1103
return output
def readtask(fid, version, endian, xpixelct, ypixelct, is32bit):
"""Reads a task from a DIRSIG bin file.
Args:
fid (file): The file id to read a pulse from.
version (int): The version of the bin file.
endian (str): The endian of the data.
xpixelct (int): the number of pixels in the x direction
ypixelct (int): the number of pixels in the y direction
is32bit (bool): a bool if DIRSIG was compiled on a 32 bit system.
Returns:
A dictionary containing the task data. This has two keys: 'header',
a dictionary containing the task header; and 'pulses', a list of
dictionaries, each containing a pulse.
"""
output = {}
output['pulses'] = []
header = {}
header['task description'] = fid.read(64).replace('\x00', '')
header['task start date time'] = fid.read(15)
header['task stop date time'] = fid.read(15)
header['focal length'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['pulse repition frequency'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['pulse duration'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['pulse energy'] = struct.unpack(endian + 'd', fid.read(8))[0]
header['laser spectral center'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['laser spectral width'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['pulse count'] = struct.unpack(endian + 'I', fid.read(4))[0]
output['header'] = header
for dummypulse in range(header['pulse count']):
output['pulses'].append(readpulse(fid, version, endian, xpixelct, \
ypixelct, is32bit))
return output
# start reading the bin file
fid = open(filename, "rb")
output = {}
output['tasks'] = []
header = {}
try:
byte = fid.read(11)
if byte != "DIRSIGPROTO":
raise RuntimeError("'" + filename + \
"' is not valid DIRSIG bin file.")
header['file format revision'] = struct.unpack('B', fid.read(1))[0]
_version = header['file format revision']
header['byte ordering'] = struct.unpack('B', fid.read(1))[0]
if header['byte ordering'] == 0:
endian = '>'
else:
endian = '<'
header['file creation date time'] = fid.read(15)
header['dirsig version string'] = fid.read(32).replace('\x00', '')
header['simulation description'] = fid.read(256).replace('\x00', '')
header['scene origin latitude'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['scene origin longitude'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['scene origin height'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['transmitter mount type'] = fid.read(16).replace('\x00', '')
header['reciever mount type'] = fid.read(16).replace('\x00', '')
header['x pixel count'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
header['y pixel count'] = struct.unpack(endian + 'I', \
fid.read(4))[0]
header['x pixel pitch'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['y pixel pitch'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
if _version > 0:
header['x array offset'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['y array offset'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['lens distortion k1'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['lens distortion k2'] = struct.unpack(endian + 'd', \
fid.read(8))[0]
header['task count'] = struct.unpack(endian + 'I', fid.read(4))[0]
if _version > 1:
header['focal plane array id'] = struct.unpack(endian + 'H', \
fid.read(2))[0]
output['header'] = header
for dummytask in range(header['task count']):
output['tasks'].append(readtask(fid, _version, endian, \
header['x pixel count'], header['y pixel count'], is32bit))
except RuntimeError, error:
sys.stderr.write('ERROR: #s\n' % str(error))
finally:
fid.close()
return output
if __name__ == '__main__':
ARGS = sys.argv[1:]
if ARGS:
FILENAME = "\\ ".join(ARGS)
BINFILE = readbin(FILENAME)
TASKCT = 0
PULSECT = 0
for dummytask in BINFILE['tasks']:
TASKCT += 1
for dummypulse in dummytask['pulses']:
PULSECT += 1
print FILENAME + ' contains:'
print '\t' + str(TASKCT) + ' tasks'
print '\t' + str(PULSECT) + ' pulses'
| en | 0.733924 | #!/usr/bin/env python Reads a DIRSIG lidar "bin" file Description: This file provides code to read a DIRSIG bin file and provides basic manipulation of that file. Usage: To read a bin file: For most cases: binfile = readDirsigBin(filename) If dirsig was compiled on a 32 bit system and the bin file is version 0 or 1: binfile = readDirsigBin(filename, True) External Dependancies: numpy struct sys zlib Warnings: This code has not been tested on a version 0 bin file. Author(s): <NAME> par4249 at rit dot edu Copyright: (c) 2015 Rochester Institute of Technology References: [1] http://www.dirsig.org/docs/new/bin.html (Accessed 2013-02-09). #__version__ = "1.0.1" # stderr and command-line arguments # base data type for signals # for convertint data types # for decompression Reads a DIRSIG bin file. Args: filename (str): A string containing the file to read. is32bit (bool, optional): Set to True if DIRSIG was compiled on a 32 bit system. This tells the code to use 32 bit long longs for the pulse data bytes field. In version 2 or later of the bin file, this was guaranteed to be 64 bits in of the bin file and this flag will have no effect on the data parsing. The default is False. Returns: A dictionary containing two keys: 'header' and 'tasks'. output['header'] is a dictionary containing the file header. For output['tasks'] is a list containing dictionaries. Let task = output['tasks'][i], be the ith task. task['header'] is a dictionary containing the task header. task['pulses'] is a list of dictionaries containing the pulses. Let task['pulses'][j] be the jth pulse of the task. pulse['header'] is a dict containing the pulse header. pulse['data'] is a numpy.array contating the return information. The contents of the header files will depend on the version of the bin file that is being read. See [1] for more details. output = {'header': dict, 'tasks': list} output['tasks'][i] = {'header': dict, 'pulses': list} output['tasks'][i]['pulses'][j] = {'header': dict, 'data': numpy.array} The 0th time bin of the pulse data is the passive term, the remaining bands are the active time part of the signal. # define helper functions Reads a pulse from a DIRSIG bin file. Args: fid (file): The file id to read a pulse from. version (int): The version of the bin file. endian (str): The endian of the data. xpixelct (int): the number of pixels in the x direction ypixelct (int): the number of pixels in the y direction is32bit (bool): a bool if DIRSIG was compiled on a 32 bit system. Returns: A dictionary containing the pulse data. This has two keys: 'header', a dictionary containing the pulse header; and 'data', a numpy.array containing the data for the pulse. # Just make a guess # pylint: disable=E1103 # pylint: enable=E1103 # pylint: disable=E1103 # pylint: enable=E1103 # pylint: disable=E1103 # pylint: enable=E1103 # should always be 5 (double) # check for bug where a long may be 32 bits on some systems and 64 on others # pylint: disable=E1103 # pylint: enable=E1103 # read the data # pylint: disable=E1103 # pylint: enable=E1103 Reads a task from a DIRSIG bin file. Args: fid (file): The file id to read a pulse from. version (int): The version of the bin file. endian (str): The endian of the data. xpixelct (int): the number of pixels in the x direction ypixelct (int): the number of pixels in the y direction is32bit (bool): a bool if DIRSIG was compiled on a 32 bit system. Returns: A dictionary containing the task data. This has two keys: 'header', a dictionary containing the task header; and 'pulses', a list of dictionaries, each containing a pulse. # start reading the bin file #s\n' % str(error)) | 3.288702 | 3 |
tests/unit-test/memstress/package.py | HPCToolkit/hpctest | 1 | 6615763 | <filename>tests/unit-test/memstress/package.py
#========================#
# MEMSTRESS PACKAGE FILE #
#========================#
from spack import *
class Memstress(MakefilePackage):
version('1.0', 'hpctest/tests/unit-tests/memstress')
url = 'hpctest/tests/unit-tests/memstress'
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('memstress', prefix.bin)
| <filename>tests/unit-test/memstress/package.py
#========================#
# MEMSTRESS PACKAGE FILE #
#========================#
from spack import *
class Memstress(MakefilePackage):
version('1.0', 'hpctest/tests/unit-tests/memstress')
url = 'hpctest/tests/unit-tests/memstress'
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('memstress', prefix.bin)
| fr | 0.529144 | #========================# # MEMSTRESS PACKAGE FILE # #========================# | 1.758379 | 2 |
frameworks/elastic/tests/conftest.py | ankitcid/dcos-commons | 1 | 6615764 | from typing import Iterator
import pytest
import sdk_security
from tests import config
@pytest.fixture(scope="session")
def configure_security(configure_universe: None) -> Iterator[None]:
yield from sdk_security.security_session(config.SERVICE_NAME)
| from typing import Iterator
import pytest
import sdk_security
from tests import config
@pytest.fixture(scope="session")
def configure_security(configure_universe: None) -> Iterator[None]:
yield from sdk_security.security_session(config.SERVICE_NAME)
| none | 1 | 1.918859 | 2 | |
tests/test_construct_estimator.py | mathijsvdv/pylogit | 153 | 6615765 | """
Tests for the construct_estimator.py file.
"""
import unittest
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix, eye
import pylogit.asym_logit as asym
import pylogit.conditional_logit as mnl
import pylogit.clog_log as clog
import pylogit.scobit as scobit
import pylogit.uneven_logit as uneven
import pylogit.mixed_logit_calcs as mlc
import pylogit.mixed_logit as mixed_logit
import pylogit.nested_logit as nested_logit
import pylogit.construct_estimator as constructor
class ConstructorTests(unittest.TestCase):
def make_asym_model(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
natural_shapes = asym._convert_eta_to_c(fake_shapes,
fake_shape_ref_pos)
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_shapes,
fake_intercepts,
fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
constructor_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
# Create a variable for the kwargs being passed to the constructor
constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos,
"shape_ref_pos": fake_shape_ref_pos,
"names": fake_names,
"intercept_names": fake_intercept_names,
"shape_names": fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
model_obj = asym.MNAL(*constructor_args, **constructor_kwargs)
model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
model_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names)
model_obj.nests = None
model_obj.params =\
pd.concat([model_obj.shapes,
model_obj.intercepts,
model_obj.coefs],
axis=0, ignore_index=False)
return model_obj
def make_uneven_and_scobit_models(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
fake_shapes = np.array([-1, 1, 2])
# Create names for the intercept parameters
fake_shape_names = ["Shape 1", "Shape 2", "Shape 3"]
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_shapes,
fake_intercepts,
fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the model constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionary for the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the choice models.
constructor_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
# Create a variable for the kwargs being passed to the constructor
constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos,
"names": fake_names,
"intercept_names": fake_intercept_names,
"shape_names": fake_shape_names}
# Initialize the various choice models
uneven_obj = uneven.MNUL(*constructor_args, **constructor_kwargs)
scobit_obj = scobit.MNSL(*constructor_args, **constructor_kwargs)
for model_obj in [uneven_obj, scobit_obj]:
model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
model_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names)
model_obj.nests = None
model_obj.params =\
pd.concat([model_obj.shapes,
model_obj.intercepts,
model_obj.coefs],
axis=0, ignore_index=False)
return uneven_obj, scobit_obj
def make_clog_and_mnl_models(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_intercepts, fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the model constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
mnl_spec = OrderedDict()
mnl_names = OrderedDict()
mnl_spec["intercept"] =[1, 2]
mnl_names["intercept"] = fake_intercept_names
mnl_spec["x"] = fake_specification["x"]
mnl_names["x"] = fake_names["x"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
clog_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
mnl_args = deepcopy(clog_args)
mnl_args[-1] = mnl_spec
# Create a variable for the kwargs being passed to the constructor
clog_kwargs = {"names": fake_names,
"intercept_ref_pos": fake_intercept_ref_pos,
"intercept_names": fake_intercept_names}
mnl_kwargs = {"names": mnl_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
clog_obj = clog.MNCL(*clog_args, **clog_kwargs)
mnl_obj = mnl.MNL(*mnl_args, **mnl_kwargs)
# Create the desired model attributes for the clog log model
clog_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
clog_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
clog_obj.shapes = None
clog_obj.nests = None
clog_obj.params =\
pd.concat([clog_obj.intercepts, clog_obj.coefs],
axis=0, ignore_index=False)
mnl_obj.params = clog_obj.params.copy()
mnl_obj.coefs = mnl_obj.params.copy()
mnl_obj.intercepts = None
mnl_obj.shapes = None
mnl_obj.nests = None
return clog_obj, mnl_obj
def make_mixed_model(self):
# Fake random draws where Row 1 is for observation 1 and row 2 is
# for observation 2. Column 1 is for draw 1 and column 2 is for draw 2
fake_draws = mlc.get_normal_draws(2, 2, 1, seed=1)[0]
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
fake_std = 1
fake_betas_ext = np.concatenate((fake_betas,
np.array([fake_std])),
axis=0)
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 1, 2.5],
[0, 0, 3.5],
[1, 0, 0.5],
[0, 1, 1.0],
[0, 0, 1.5]])
# Record what positions in the design matrix are being mixed over
mixing_pos = [2]
# Create the arrays that specify the choice situation, individual id
# and alternative ids
situation_ids = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
individual_ids = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2])
alternative_ids = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
# Create a fake array of choices
choice_array = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0])
# Create the 'rows_to_mixers' sparse array for this dataset
# Denote the rows that correspond to observation 1 and observation 2
obs_1_rows = np.ones(fake_design.shape[0])
# Make sure the rows for observation 2 are given a zero in obs_1_rows
obs_1_rows[-3:] = 0
obs_2_rows = 1 - obs_1_rows
# Create the row_to_mixers scipy.sparse matrix
fake_rows_to_mixers = csr_matrix(obs_1_rows[:, None] ==
np.array([1, 0])[None, :])
# Create the rows_to_obs scipy.sparse matrix
fake_rows_to_obs = csr_matrix(situation_ids[:, None] ==
np.arange(1, 4)[None, :])
# Create the design matrix that we should see for draw 1 and draw 2
arrays_to_join = (fake_design.copy(),
fake_design.copy()[:, -1][:, None])
fake_design_draw_1 = np.concatenate(arrays_to_join, axis=1)
fake_design_draw_2 = fake_design_draw_1.copy()
# Multiply the 'random' coefficient draws by the corresponding variable
fake_design_draw_1[:, -1] *= (obs_1_rows *
fake_draws[0, 0] +
obs_2_rows *
fake_draws[1, 0])
fake_design_draw_2[:, -1] *= (obs_1_rows *
fake_draws[0, 1] +
obs_2_rows *
fake_draws[1, 1])
extended_design_draw_1 = fake_design_draw_1[:, None, :]
extended_design_draw_2 = fake_design_draw_2[:, None, :]
fake_design_3d = np.concatenate((extended_design_draw_1,
extended_design_draw_2),
axis=1)
# Create the fake systematic utility values
sys_utilities_draw_1 = fake_design_draw_1.dot(fake_betas_ext)
sys_utilities_draw_2 = fake_design_draw_2.dot(fake_betas_ext)
#####
# Calculate the probabilities of each alternatve in each choice
# situation
#####
long_exp_draw_1 = np.exp(sys_utilities_draw_1)
long_exp_draw_2 = np.exp(sys_utilities_draw_2)
ind_exp_sums_draw_1 = fake_rows_to_obs.T.dot(long_exp_draw_1)
ind_exp_sums_draw_2 = fake_rows_to_obs.T.dot(long_exp_draw_2)
long_exp_sum_draw_1 = fake_rows_to_obs.dot(ind_exp_sums_draw_1)
long_exp_sum_draw_2 = fake_rows_to_obs.dot(ind_exp_sums_draw_2)
long_probs_draw_1 = long_exp_draw_1 / long_exp_sum_draw_1
long_probs_draw_2 = long_exp_draw_2 / long_exp_sum_draw_2
prob_array = np.concatenate((long_probs_draw_1[:, None],
long_probs_draw_2[:, None]),
axis=1)
###########
# Create a mixed logit object for later use.
##########
# Create a fake old long format dataframe for mixed logit model object
alt_id_column = "alt_id"
situation_id_column = "situation_id"
obs_id_column = "observation_id"
choice_column = "choice"
data = {"x": fake_design[:, 2],
alt_id_column: alternative_ids,
situation_id_column: situation_ids,
obs_id_column: individual_ids,
choice_column: choice_array}
fake_old_df = pd.DataFrame(data)
fake_old_df["intercept"] = 1
# Create a fake specification
fake_spec = OrderedDict()
fake_names = OrderedDict()
fake_spec["intercept"] = [1, 2]
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_spec["x"] = [[1, 2, 3]]
fake_names["x"] = ["beta_x"]
# Specify the mixing variable
fake_mixing_vars = ["beta_x"]
# Create a fake version of a mixed logit model object
args = [fake_old_df,
alt_id_column,
situation_id_column,
choice_column,
fake_spec]
kwargs = {"names": fake_names,
"mixing_id_col": obs_id_column,
"mixing_vars": fake_mixing_vars}
mixl_obj = mixed_logit.MixedLogit(*args, **kwargs)
# Set all the necessary attributes for prediction:
# design_3d, coefs, intercepts, shapes, nests, mixing_pos
mixl_obj.design_3d = fake_design_3d
mixl_obj.ind_var_names += ["Sigma X"]
mixl_obj.coefs =\
pd.Series(fake_betas_ext, index=mixl_obj.ind_var_names)
mixl_obj.intercepts = None
mixl_obj.shapes = None
mixl_obj.nests = None
mixl_obj.params = mixl_obj.coefs.copy()
return mixl_obj
def make_nested_model(self):
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
# Create the fake nest coefficients to be used during the tests
# Note that these are the 'natural' nest coefficients, i.e. the
# inverse of the scale parameters for each nest. They should be bigger
# than or equal to 1.
natural_nest_coefs = np.array([1 - 1e-16, 0.5])
# Create an array of all model parameters
fake_all_params = np.concatenate((natural_nest_coefs,
fake_betas))
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two.
# The nest memberships of these alternatives are given below.
fake_rows_to_nests = csr_matrix(np.array([[1, 0],
[1, 0],
[0, 1],
[1, 0],
[0, 1]]))
# Create a sparse matrix that maps the rows of the design matrix to the
# observatins
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 0, 3.5]])
# Create fake versions of the needed arguments for the MNL constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": range(5),
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Store the choice array
choice_array = fake_df[choice_col].values
# Create a sparse matrix that maps the chosen rows of the design
# matrix to the observatins
fake_chosen_rows_to_obs = csr_matrix(np.array([[0, 0],
[1, 0],
[0, 0],
[0, 0],
[0, 1]]))
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_specification["intercept"] = [1, 2]
fake_specification["x"] = [[1, 2, 3]]
fake_names = OrderedDict()
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_names["x"] = ["x (generic coefficient)"]
# Create the nesting specification
fake_nest_spec = OrderedDict()
fake_nest_spec["Nest 1"] = [1, 2]
fake_nest_spec["Nest 2"] = [3]
# Create a nested logit object
args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
kwargs = {"names": fake_names,
"nest_spec": fake_nest_spec}
model_obj = nested_logit.NestedLogit(*args, **kwargs)
model_obj.coefs = pd.Series(fake_betas, index=model_obj.ind_var_names)
model_obj.intercepts = None
model_obj.shapes = None
def logit(x):
return np.log(x / (1 - x))
model_obj.nests =\
pd.Series(logit(natural_nest_coefs), index=fake_nest_spec.keys())
model_obj.params =\
pd.concat([model_obj.nests, model_obj.coefs],
axis=0, ignore_index=False)
return model_obj
def setUp(self):
"""
Create the real model objects.
"""
self.asym_model = self.make_asym_model()
self.uneven_model, self.scobit_model =\
self.make_uneven_and_scobit_models()
self.clog_model, self.mnl_model = self.make_clog_and_mnl_models()
self.mixed_model = self.make_mixed_model()
self.nested_model = self.make_nested_model()
return None
def test_create_estimation_obj(self):
# Alias the function being tested
func = constructor.create_estimation_obj
# Take note of the models that are being used in this test
models = [self.mnl_model,
self.clog_model,
self.asym_model,
self.scobit_model,
self.uneven_model,
self.nested_model,
self.mixed_model]
# Perform the desired tests
for model_obj in models:
# Get the internal model name
internal_model_name =\
constructor.display_name_to_model_type[model_obj.model_type]
# Get the estimation object class
estimation_class = (constructor.model_type_to_resources
[internal_model_name]
['estimator'])
# Get the function results
args = [model_obj, model_obj.params.values]
kwargs = {"mappings": model_obj.get_mappings_for_fit(),
"ridge": 0.25,
"constrained_pos": [0],
"weights": np.ones(model_obj.data.shape[0])}
# Make sure the function result is of the correct class.
func_result = func(*args, **kwargs)
self.assertIsInstance(func_result, estimation_class)
for key in ['ridge', 'constrained_pos', 'weights']:
expected_value = kwargs[key]
self.assertTrue(hasattr(func_result, key))
func_value = getattr(func_result, key)
if isinstance(expected_value, np.ndarray):
npt.assert_allclose(expected_value, func_value)
else:
self.assertEqual(expected_value, func_value)
return None
| """
Tests for the construct_estimator.py file.
"""
import unittest
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix, eye
import pylogit.asym_logit as asym
import pylogit.conditional_logit as mnl
import pylogit.clog_log as clog
import pylogit.scobit as scobit
import pylogit.uneven_logit as uneven
import pylogit.mixed_logit_calcs as mlc
import pylogit.mixed_logit as mixed_logit
import pylogit.nested_logit as nested_logit
import pylogit.construct_estimator as constructor
class ConstructorTests(unittest.TestCase):
def make_asym_model(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
natural_shapes = asym._convert_eta_to_c(fake_shapes,
fake_shape_ref_pos)
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_shapes,
fake_intercepts,
fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
constructor_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
# Create a variable for the kwargs being passed to the constructor
constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos,
"shape_ref_pos": fake_shape_ref_pos,
"names": fake_names,
"intercept_names": fake_intercept_names,
"shape_names": fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
model_obj = asym.MNAL(*constructor_args, **constructor_kwargs)
model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
model_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names)
model_obj.nests = None
model_obj.params =\
pd.concat([model_obj.shapes,
model_obj.intercepts,
model_obj.coefs],
axis=0, ignore_index=False)
return model_obj
def make_uneven_and_scobit_models(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
fake_shapes = np.array([-1, 1, 2])
# Create names for the intercept parameters
fake_shape_names = ["Shape 1", "Shape 2", "Shape 3"]
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_shapes,
fake_intercepts,
fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the model constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionary for the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the choice models.
constructor_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
# Create a variable for the kwargs being passed to the constructor
constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos,
"names": fake_names,
"intercept_names": fake_intercept_names,
"shape_names": fake_shape_names}
# Initialize the various choice models
uneven_obj = uneven.MNUL(*constructor_args, **constructor_kwargs)
scobit_obj = scobit.MNSL(*constructor_args, **constructor_kwargs)
for model_obj in [uneven_obj, scobit_obj]:
model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
model_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names)
model_obj.nests = None
model_obj.params =\
pd.concat([model_obj.shapes,
model_obj.intercepts,
model_obj.coefs],
axis=0, ignore_index=False)
return uneven_obj, scobit_obj
def make_clog_and_mnl_models(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_intercepts, fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the model constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
mnl_spec = OrderedDict()
mnl_names = OrderedDict()
mnl_spec["intercept"] =[1, 2]
mnl_names["intercept"] = fake_intercept_names
mnl_spec["x"] = fake_specification["x"]
mnl_names["x"] = fake_names["x"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
clog_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
mnl_args = deepcopy(clog_args)
mnl_args[-1] = mnl_spec
# Create a variable for the kwargs being passed to the constructor
clog_kwargs = {"names": fake_names,
"intercept_ref_pos": fake_intercept_ref_pos,
"intercept_names": fake_intercept_names}
mnl_kwargs = {"names": mnl_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
clog_obj = clog.MNCL(*clog_args, **clog_kwargs)
mnl_obj = mnl.MNL(*mnl_args, **mnl_kwargs)
# Create the desired model attributes for the clog log model
clog_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
clog_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
clog_obj.shapes = None
clog_obj.nests = None
clog_obj.params =\
pd.concat([clog_obj.intercepts, clog_obj.coefs],
axis=0, ignore_index=False)
mnl_obj.params = clog_obj.params.copy()
mnl_obj.coefs = mnl_obj.params.copy()
mnl_obj.intercepts = None
mnl_obj.shapes = None
mnl_obj.nests = None
return clog_obj, mnl_obj
def make_mixed_model(self):
# Fake random draws where Row 1 is for observation 1 and row 2 is
# for observation 2. Column 1 is for draw 1 and column 2 is for draw 2
fake_draws = mlc.get_normal_draws(2, 2, 1, seed=1)[0]
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
fake_std = 1
fake_betas_ext = np.concatenate((fake_betas,
np.array([fake_std])),
axis=0)
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 1, 2.5],
[0, 0, 3.5],
[1, 0, 0.5],
[0, 1, 1.0],
[0, 0, 1.5]])
# Record what positions in the design matrix are being mixed over
mixing_pos = [2]
# Create the arrays that specify the choice situation, individual id
# and alternative ids
situation_ids = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
individual_ids = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2])
alternative_ids = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
# Create a fake array of choices
choice_array = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0])
# Create the 'rows_to_mixers' sparse array for this dataset
# Denote the rows that correspond to observation 1 and observation 2
obs_1_rows = np.ones(fake_design.shape[0])
# Make sure the rows for observation 2 are given a zero in obs_1_rows
obs_1_rows[-3:] = 0
obs_2_rows = 1 - obs_1_rows
# Create the row_to_mixers scipy.sparse matrix
fake_rows_to_mixers = csr_matrix(obs_1_rows[:, None] ==
np.array([1, 0])[None, :])
# Create the rows_to_obs scipy.sparse matrix
fake_rows_to_obs = csr_matrix(situation_ids[:, None] ==
np.arange(1, 4)[None, :])
# Create the design matrix that we should see for draw 1 and draw 2
arrays_to_join = (fake_design.copy(),
fake_design.copy()[:, -1][:, None])
fake_design_draw_1 = np.concatenate(arrays_to_join, axis=1)
fake_design_draw_2 = fake_design_draw_1.copy()
# Multiply the 'random' coefficient draws by the corresponding variable
fake_design_draw_1[:, -1] *= (obs_1_rows *
fake_draws[0, 0] +
obs_2_rows *
fake_draws[1, 0])
fake_design_draw_2[:, -1] *= (obs_1_rows *
fake_draws[0, 1] +
obs_2_rows *
fake_draws[1, 1])
extended_design_draw_1 = fake_design_draw_1[:, None, :]
extended_design_draw_2 = fake_design_draw_2[:, None, :]
fake_design_3d = np.concatenate((extended_design_draw_1,
extended_design_draw_2),
axis=1)
# Create the fake systematic utility values
sys_utilities_draw_1 = fake_design_draw_1.dot(fake_betas_ext)
sys_utilities_draw_2 = fake_design_draw_2.dot(fake_betas_ext)
#####
# Calculate the probabilities of each alternatve in each choice
# situation
#####
long_exp_draw_1 = np.exp(sys_utilities_draw_1)
long_exp_draw_2 = np.exp(sys_utilities_draw_2)
ind_exp_sums_draw_1 = fake_rows_to_obs.T.dot(long_exp_draw_1)
ind_exp_sums_draw_2 = fake_rows_to_obs.T.dot(long_exp_draw_2)
long_exp_sum_draw_1 = fake_rows_to_obs.dot(ind_exp_sums_draw_1)
long_exp_sum_draw_2 = fake_rows_to_obs.dot(ind_exp_sums_draw_2)
long_probs_draw_1 = long_exp_draw_1 / long_exp_sum_draw_1
long_probs_draw_2 = long_exp_draw_2 / long_exp_sum_draw_2
prob_array = np.concatenate((long_probs_draw_1[:, None],
long_probs_draw_2[:, None]),
axis=1)
###########
# Create a mixed logit object for later use.
##########
# Create a fake old long format dataframe for mixed logit model object
alt_id_column = "alt_id"
situation_id_column = "situation_id"
obs_id_column = "observation_id"
choice_column = "choice"
data = {"x": fake_design[:, 2],
alt_id_column: alternative_ids,
situation_id_column: situation_ids,
obs_id_column: individual_ids,
choice_column: choice_array}
fake_old_df = pd.DataFrame(data)
fake_old_df["intercept"] = 1
# Create a fake specification
fake_spec = OrderedDict()
fake_names = OrderedDict()
fake_spec["intercept"] = [1, 2]
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_spec["x"] = [[1, 2, 3]]
fake_names["x"] = ["beta_x"]
# Specify the mixing variable
fake_mixing_vars = ["beta_x"]
# Create a fake version of a mixed logit model object
args = [fake_old_df,
alt_id_column,
situation_id_column,
choice_column,
fake_spec]
kwargs = {"names": fake_names,
"mixing_id_col": obs_id_column,
"mixing_vars": fake_mixing_vars}
mixl_obj = mixed_logit.MixedLogit(*args, **kwargs)
# Set all the necessary attributes for prediction:
# design_3d, coefs, intercepts, shapes, nests, mixing_pos
mixl_obj.design_3d = fake_design_3d
mixl_obj.ind_var_names += ["Sigma X"]
mixl_obj.coefs =\
pd.Series(fake_betas_ext, index=mixl_obj.ind_var_names)
mixl_obj.intercepts = None
mixl_obj.shapes = None
mixl_obj.nests = None
mixl_obj.params = mixl_obj.coefs.copy()
return mixl_obj
def make_nested_model(self):
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
# Create the fake nest coefficients to be used during the tests
# Note that these are the 'natural' nest coefficients, i.e. the
# inverse of the scale parameters for each nest. They should be bigger
# than or equal to 1.
natural_nest_coefs = np.array([1 - 1e-16, 0.5])
# Create an array of all model parameters
fake_all_params = np.concatenate((natural_nest_coefs,
fake_betas))
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two.
# The nest memberships of these alternatives are given below.
fake_rows_to_nests = csr_matrix(np.array([[1, 0],
[1, 0],
[0, 1],
[1, 0],
[0, 1]]))
# Create a sparse matrix that maps the rows of the design matrix to the
# observatins
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 0, 3.5]])
# Create fake versions of the needed arguments for the MNL constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": range(5),
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Store the choice array
choice_array = fake_df[choice_col].values
# Create a sparse matrix that maps the chosen rows of the design
# matrix to the observatins
fake_chosen_rows_to_obs = csr_matrix(np.array([[0, 0],
[1, 0],
[0, 0],
[0, 0],
[0, 1]]))
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_specification["intercept"] = [1, 2]
fake_specification["x"] = [[1, 2, 3]]
fake_names = OrderedDict()
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_names["x"] = ["x (generic coefficient)"]
# Create the nesting specification
fake_nest_spec = OrderedDict()
fake_nest_spec["Nest 1"] = [1, 2]
fake_nest_spec["Nest 2"] = [3]
# Create a nested logit object
args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
kwargs = {"names": fake_names,
"nest_spec": fake_nest_spec}
model_obj = nested_logit.NestedLogit(*args, **kwargs)
model_obj.coefs = pd.Series(fake_betas, index=model_obj.ind_var_names)
model_obj.intercepts = None
model_obj.shapes = None
def logit(x):
return np.log(x / (1 - x))
model_obj.nests =\
pd.Series(logit(natural_nest_coefs), index=fake_nest_spec.keys())
model_obj.params =\
pd.concat([model_obj.nests, model_obj.coefs],
axis=0, ignore_index=False)
return model_obj
def setUp(self):
"""
Create the real model objects.
"""
self.asym_model = self.make_asym_model()
self.uneven_model, self.scobit_model =\
self.make_uneven_and_scobit_models()
self.clog_model, self.mnl_model = self.make_clog_and_mnl_models()
self.mixed_model = self.make_mixed_model()
self.nested_model = self.make_nested_model()
return None
def test_create_estimation_obj(self):
# Alias the function being tested
func = constructor.create_estimation_obj
# Take note of the models that are being used in this test
models = [self.mnl_model,
self.clog_model,
self.asym_model,
self.scobit_model,
self.uneven_model,
self.nested_model,
self.mixed_model]
# Perform the desired tests
for model_obj in models:
# Get the internal model name
internal_model_name =\
constructor.display_name_to_model_type[model_obj.model_type]
# Get the estimation object class
estimation_class = (constructor.model_type_to_resources
[internal_model_name]
['estimator'])
# Get the function results
args = [model_obj, model_obj.params.values]
kwargs = {"mappings": model_obj.get_mappings_for_fit(),
"ridge": 0.25,
"constrained_pos": [0],
"weights": np.ones(model_obj.data.shape[0])}
# Make sure the function result is of the correct class.
func_result = func(*args, **kwargs)
self.assertIsInstance(func_result, estimation_class)
for key in ['ridge', 'constrained_pos', 'weights']:
expected_value = kwargs[key]
self.assertTrue(hasattr(func_result, key))
func_value = getattr(func_result, key)
if isinstance(expected_value, np.ndarray):
npt.assert_allclose(expected_value, func_value)
else:
self.assertEqual(expected_value, func_value)
return None
| en | 0.832532 | Tests for the construct_estimator.py file. # The set up being used is one where there are two choice situations, # The first having three alternatives, and the second having only two # alternatives. There is one generic variable. Two alternative # specific constants and all three shape parameters are used. # Create the betas to be used during the tests # Create the fake outside intercepts to be used during the tests # Create names for the intercept parameters # Record the position of the intercept that is not being estimated # Create the shape parameters to be used during the tests. Note that # these are the reparameterized shape parameters, thus they will be # exponentiated in the fit_mle process and various calculations. # Create names for the intercept parameters # Record the position of the shape parameter that is being constrained # Calculate the 'natural' shape parameters # Create an array of all model parameters # Get the mappping between rows and observations # Create the fake design matrix with columns denoting X # The intercepts are not included because they are kept outside the # index in the scobit model. # Create the index array for this set of choice situations # Create the needed dataframe for the Asymmetric Logit constructor # Record the various column names # Create the index specification and name dictionaryfor the model # Bundle args and kwargs used to construct the Asymmetric Logit model. # Create a variable for the kwargs being passed to the constructor # Initialize a basic Asymmetric Logit model whose coefficients will be # estimated. # The set up being used is one where there are two choice situations, # The first having three alternatives, and the second having only two # alternatives. There is one generic variable. Two alternative # specific constants and all three shape parameters are used. # Create the betas to be used during the tests # Create the fake outside intercepts to be used during the tests # Create names for the intercept parameters # Record the position of the intercept that is not being estimated # Create the shape parameters to be used during the tests. Note that # these are the reparameterized shape parameters, thus they will be # exponentiated in the fit_mle process and various calculations. # Create names for the intercept parameters # Create an array of all model parameters # Get the mappping between rows and observations # Create the fake design matrix with columns denoting X # The intercepts are not included because they are kept outside the # index in the scobit model. # Create the index array for this set of choice situations # Create the needed dataframe for the model constructor # Record the various column names # Create the index specification and name dictionary for the model # Bundle args and kwargs used to construct the choice models. # Create a variable for the kwargs being passed to the constructor # Initialize the various choice models # The set up being used is one where there are two choice situations, # The first having three alternatives, and the second having only two # alternatives. There is one generic variable. Two alternative # specific constants and all three shape parameters are used. # Create the betas to be used during the tests # Create the fake outside intercepts to be used during the tests # Create names for the intercept parameters # Record the position of the intercept that is not being estimated # Create an array of all model parameters # Get the mappping between rows and observations # Create the fake design matrix with columns denoting X # The intercepts are not included because they are kept outside the # index in the scobit model. # Create the index array for this set of choice situations # Create the needed dataframe for the model constructor # Record the various column names # Create the index specification and name dictionaryfor the model # Bundle args and kwargs used to construct the Asymmetric Logit model. # Create a variable for the kwargs being passed to the constructor # Initialize a basic Asymmetric Logit model whose coefficients will be # estimated. # Create the desired model attributes for the clog log model # Fake random draws where Row 1 is for observation 1 and row 2 is # for observation 2. Column 1 is for draw 1 and column 2 is for draw 2 # Create the betas to be used during the tests # Create the fake design matrix with columns denoting ASC_1, ASC_2, X # Record what positions in the design matrix are being mixed over # Create the arrays that specify the choice situation, individual id # and alternative ids # Create a fake array of choices # Create the 'rows_to_mixers' sparse array for this dataset # Denote the rows that correspond to observation 1 and observation 2 # Make sure the rows for observation 2 are given a zero in obs_1_rows # Create the row_to_mixers scipy.sparse matrix # Create the rows_to_obs scipy.sparse matrix # Create the design matrix that we should see for draw 1 and draw 2 # Multiply the 'random' coefficient draws by the corresponding variable # Create the fake systematic utility values ##### # Calculate the probabilities of each alternatve in each choice # situation ##### ########### # Create a mixed logit object for later use. ########## # Create a fake old long format dataframe for mixed logit model object # Create a fake specification # Specify the mixing variable # Create a fake version of a mixed logit model object # Set all the necessary attributes for prediction: # design_3d, coefs, intercepts, shapes, nests, mixing_pos # Create the betas to be used during the tests # Create the fake nest coefficients to be used during the tests # Note that these are the 'natural' nest coefficients, i.e. the # inverse of the scale parameters for each nest. They should be bigger # than or equal to 1. # Create an array of all model parameters # The set up being used is one where there are two choice situations, # The first having three alternatives, and the second having only two. # The nest memberships of these alternatives are given below. # Create a sparse matrix that maps the rows of the design matrix to the # observatins # Create the fake design matrix with columns denoting ASC_1, ASC_2, X # Create fake versions of the needed arguments for the MNL constructor # Record the various column names # Store the choice array # Create a sparse matrix that maps the chosen rows of the design # matrix to the observatins # Create the index specification and name dictionaryfor the model # Create the nesting specification # Create a nested logit object Create the real model objects. # Alias the function being tested # Take note of the models that are being used in this test # Perform the desired tests # Get the internal model name # Get the estimation object class # Get the function results # Make sure the function result is of the correct class. | 2.629548 | 3 |
craftassist/agent/adtt.py | kandluis/droidlet | 626 | 6615766 | <filename>craftassist/agent/adtt.py
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from preprocess import word_tokenize
from ttad.generation_dialogues.generate_dialogue import action_type_map # Dict[str, Class]
from ttad.generation_dialogues.templates.templates import template_map # Dict[str, List[Template]]
from typing import Dict, List, Tuple, Sequence
def adtt(d: Dict) -> str:
"""Return a string that would produce the action dict `d`
d is post-process_span (i.e. its span values are replaced with strings)
d is pre-coref_resolve (i.e. its coref_resolve values are strings, not
memory objects or keywords)
"""
if d["dialogue_type"] != "HUMAN_GIVE_COMMAND":
raise NotImplementedError("can't handle {}".format(d["dialogue_type"]))
action_type = d["action"]["action_type"] # e.g. "MOVE"
action_type = action_type[0].upper() + action_type[1:].lower() # e.g. "Move"
for template in template_map[action_type]:
dialogue, gen_d = generate_from_template(action_type, template)
recurse_remove_keys(gen_d, ["has_attribute"])
if len(dialogue) != 1:
continue
if dicts_match(d, gen_d):
print(gen_d)
text = replace_spans(dialogue[0], gen_d, d)
print(dialogue[0])
return replace_relative_direction(text, gen_d, d)
raise ValueError("No matching template found for {}".format(d))
def replace_spans(text: str, gen_d: Dict, d: Dict) -> str:
"""Replace words in text with spans from d"""
words = word_tokenize(text).split()
# compile list of spans to replace via recursive search
replaces = []
to_consider = [(gen_d, d)]
while len(to_consider) > 0:
cur_gen_d, cur_d = to_consider.pop()
for k in cur_gen_d.keys():
if type(cur_d[k]) == dict:
to_consider.append((cur_gen_d[k], cur_d[k]))
elif type(cur_d[k]) == str and cur_d[k].upper() != cur_d[k]:
replaces.append((cur_gen_d[k], cur_d[k]))
# replace each span in words
replaces.sort(key=lambda r: r[0][1][0], reverse=True) # sort by L of span
for (sentence_idx, (L, R)), s in replaces:
assert sentence_idx == 0
words = words[:L] + word_tokenize(s).split() + words[(R + 1) :]
return " ".join(words)
def generate_from_template(action_type: str, template: List) -> Tuple[List[str], Dict]:
cls = action_type_map[action_type.lower()]
node = cls.generate(template)
dialogue = node.generate_description()
d = node.to_dict()
return dialogue, d
def dicts_match(
d: Dict,
e: Dict,
ignore_values_for_keys: Sequence[str] = ["relative_direction"],
ignore_keys: Sequence[str] = ["has_attribute"],
) -> bool:
if (set(d.keys()) - set(ignore_keys)) != (set(e.keys()) - set(ignore_keys)):
return False
for k, v in d.items():
if type(v) == dict and not dicts_match(v, e[k]):
return False
# allow values of certain keys to differ (e.g. relative_direction)
# allow spans (lowercase strings) to differ
if (
k not in ignore_keys
and k not in ignore_values_for_keys
and type(v) == str
and v == v.upper()
and v != e[k]
):
return False
return True
def recurse_remove_keys(d: Dict, keys: Sequence[str]):
# remove keys from dict
for x in keys:
if x in d:
del d[x]
# recurse
for k, v in d.items():
if type(v) == dict:
recurse_remove_keys(v, keys)
def replace_relative_direction(text: str, gen_d: Dict, d: Dict) -> str:
try:
rel_dir = d["action"]["location"]["relative_direction"]
agent_pos = False
try:
if (
d["action"]["location"]["reference_object"]["location"]["location_type"]
== "AGENT_POS"
):
agent_pos = True
except:
agent_pos = False
# generate direction dict
direction_dict = {}
if not agent_pos:
direction_dict["LEFT"] = ["to the left of", "towards the left of"]
direction_dict["RIGHT"] = ["to the right of", "towards the right of"]
direction_dict["UP"] = ["above", "on top of", "to the top of"]
direction_dict["DOWN"] = ["below", "under"]
direction_dict["FRONT"] = ["in front of"]
direction_dict["BACK"] = ["behind"]
direction_dict["AWAY"] = ["away from"]
direction_dict["INSIDE"] = ["inside"]
direction_dict["OUTSIDE"] = ["outside"]
direction_dict["NEAR"] = ["next to", "close to", "near"]
direction_dict["CLOCKWISE"] = ["clockwise"]
direction_dict["ANTICLOCKWISE"] = ["anticlockwise"]
else:
direction_dict["LEFT"] = ["to the left", "to your left", "east", "left"]
direction_dict["RIGHT"] = ["to the right", "to your right", "right", "west"]
direction_dict["UP"] = ["up", "north"]
direction_dict["DOWN"] = ["down", "south"]
direction_dict["FRONT"] = ["front", "forward", "to the front"]
direction_dict["BACK"] = ["back", "backwards", "to the back"]
direction_dict["AWAY"] = ["away"]
direction_dict["CLOCKWISE"] = ["clockwise"]
direction_dict["ANTICLOCKWISE"] = ["anticlockwise"]
# generate a list of the direction phrases and sort by longest to shortest
direction_list: List[str] = []
for k in direction_dict.keys():
direction_list = direction_list + direction_dict[k]
direction_list = sorted(direction_list, key=len, reverse=True)
# look for direction phrase in the text to replace
for dir_phrase in direction_list:
if dir_phrase in text:
text = text.replace(dir_phrase, direction_dict[rel_dir][0])
break
return text
except:
return text
if __name__ == "__main__":
d = {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action": {
"action_type": "BUILD",
"schematic": {"has_name": "barn"},
"location": {
"location_type": "REFERENCE_OBJECT",
"relative_direction": "LEFT",
"reference_object": {"has_name": "boat house"},
},
},
}
t = adtt(d)
print(t)
| <filename>craftassist/agent/adtt.py
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from preprocess import word_tokenize
from ttad.generation_dialogues.generate_dialogue import action_type_map # Dict[str, Class]
from ttad.generation_dialogues.templates.templates import template_map # Dict[str, List[Template]]
from typing import Dict, List, Tuple, Sequence
def adtt(d: Dict) -> str:
"""Return a string that would produce the action dict `d`
d is post-process_span (i.e. its span values are replaced with strings)
d is pre-coref_resolve (i.e. its coref_resolve values are strings, not
memory objects or keywords)
"""
if d["dialogue_type"] != "HUMAN_GIVE_COMMAND":
raise NotImplementedError("can't handle {}".format(d["dialogue_type"]))
action_type = d["action"]["action_type"] # e.g. "MOVE"
action_type = action_type[0].upper() + action_type[1:].lower() # e.g. "Move"
for template in template_map[action_type]:
dialogue, gen_d = generate_from_template(action_type, template)
recurse_remove_keys(gen_d, ["has_attribute"])
if len(dialogue) != 1:
continue
if dicts_match(d, gen_d):
print(gen_d)
text = replace_spans(dialogue[0], gen_d, d)
print(dialogue[0])
return replace_relative_direction(text, gen_d, d)
raise ValueError("No matching template found for {}".format(d))
def replace_spans(text: str, gen_d: Dict, d: Dict) -> str:
"""Replace words in text with spans from d"""
words = word_tokenize(text).split()
# compile list of spans to replace via recursive search
replaces = []
to_consider = [(gen_d, d)]
while len(to_consider) > 0:
cur_gen_d, cur_d = to_consider.pop()
for k in cur_gen_d.keys():
if type(cur_d[k]) == dict:
to_consider.append((cur_gen_d[k], cur_d[k]))
elif type(cur_d[k]) == str and cur_d[k].upper() != cur_d[k]:
replaces.append((cur_gen_d[k], cur_d[k]))
# replace each span in words
replaces.sort(key=lambda r: r[0][1][0], reverse=True) # sort by L of span
for (sentence_idx, (L, R)), s in replaces:
assert sentence_idx == 0
words = words[:L] + word_tokenize(s).split() + words[(R + 1) :]
return " ".join(words)
def generate_from_template(action_type: str, template: List) -> Tuple[List[str], Dict]:
cls = action_type_map[action_type.lower()]
node = cls.generate(template)
dialogue = node.generate_description()
d = node.to_dict()
return dialogue, d
def dicts_match(
d: Dict,
e: Dict,
ignore_values_for_keys: Sequence[str] = ["relative_direction"],
ignore_keys: Sequence[str] = ["has_attribute"],
) -> bool:
if (set(d.keys()) - set(ignore_keys)) != (set(e.keys()) - set(ignore_keys)):
return False
for k, v in d.items():
if type(v) == dict and not dicts_match(v, e[k]):
return False
# allow values of certain keys to differ (e.g. relative_direction)
# allow spans (lowercase strings) to differ
if (
k not in ignore_keys
and k not in ignore_values_for_keys
and type(v) == str
and v == v.upper()
and v != e[k]
):
return False
return True
def recurse_remove_keys(d: Dict, keys: Sequence[str]):
# remove keys from dict
for x in keys:
if x in d:
del d[x]
# recurse
for k, v in d.items():
if type(v) == dict:
recurse_remove_keys(v, keys)
def replace_relative_direction(text: str, gen_d: Dict, d: Dict) -> str:
try:
rel_dir = d["action"]["location"]["relative_direction"]
agent_pos = False
try:
if (
d["action"]["location"]["reference_object"]["location"]["location_type"]
== "AGENT_POS"
):
agent_pos = True
except:
agent_pos = False
# generate direction dict
direction_dict = {}
if not agent_pos:
direction_dict["LEFT"] = ["to the left of", "towards the left of"]
direction_dict["RIGHT"] = ["to the right of", "towards the right of"]
direction_dict["UP"] = ["above", "on top of", "to the top of"]
direction_dict["DOWN"] = ["below", "under"]
direction_dict["FRONT"] = ["in front of"]
direction_dict["BACK"] = ["behind"]
direction_dict["AWAY"] = ["away from"]
direction_dict["INSIDE"] = ["inside"]
direction_dict["OUTSIDE"] = ["outside"]
direction_dict["NEAR"] = ["next to", "close to", "near"]
direction_dict["CLOCKWISE"] = ["clockwise"]
direction_dict["ANTICLOCKWISE"] = ["anticlockwise"]
else:
direction_dict["LEFT"] = ["to the left", "to your left", "east", "left"]
direction_dict["RIGHT"] = ["to the right", "to your right", "right", "west"]
direction_dict["UP"] = ["up", "north"]
direction_dict["DOWN"] = ["down", "south"]
direction_dict["FRONT"] = ["front", "forward", "to the front"]
direction_dict["BACK"] = ["back", "backwards", "to the back"]
direction_dict["AWAY"] = ["away"]
direction_dict["CLOCKWISE"] = ["clockwise"]
direction_dict["ANTICLOCKWISE"] = ["anticlockwise"]
# generate a list of the direction phrases and sort by longest to shortest
direction_list: List[str] = []
for k in direction_dict.keys():
direction_list = direction_list + direction_dict[k]
direction_list = sorted(direction_list, key=len, reverse=True)
# look for direction phrase in the text to replace
for dir_phrase in direction_list:
if dir_phrase in text:
text = text.replace(dir_phrase, direction_dict[rel_dir][0])
break
return text
except:
return text
if __name__ == "__main__":
d = {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action": {
"action_type": "BUILD",
"schematic": {"has_name": "barn"},
"location": {
"location_type": "REFERENCE_OBJECT",
"relative_direction": "LEFT",
"reference_object": {"has_name": "boat house"},
},
},
}
t = adtt(d)
print(t)
| en | 0.807762 | Copyright (c) Facebook, Inc. and its affiliates. # Dict[str, Class] # Dict[str, List[Template]] Return a string that would produce the action dict `d` d is post-process_span (i.e. its span values are replaced with strings) d is pre-coref_resolve (i.e. its coref_resolve values are strings, not memory objects or keywords) # e.g. "MOVE" # e.g. "Move" Replace words in text with spans from d # compile list of spans to replace via recursive search # replace each span in words # sort by L of span # allow values of certain keys to differ (e.g. relative_direction) # allow spans (lowercase strings) to differ # remove keys from dict # recurse # generate direction dict # generate a list of the direction phrases and sort by longest to shortest # look for direction phrase in the text to replace | 2.52209 | 3 |
tests/device/test_read_device_status.py | Sensirion/python-i2c-sen5x | 0 | 6615767 | <gh_stars>0
# -*- coding: utf-8 -*-
# (c) Copyright 2022 Sensirion AG, Switzerland
from sensirion_i2c_sen5x import Sen5xDeviceStatus
import pytest
@pytest.mark.needs_device
def test_no_args(device):
"""
Test if read_device_status() without argument returns the expected value
and does not clear the status.
"""
status = device.read_device_status()
assert type(status) is Sen5xDeviceStatus
assert status.value == 0
device.start_measurement()
device.start_fan_cleaning()
assert device.read_device_status().fan_cleaning is True
assert device.read_device_status().fan_cleaning is True # Not cleared.
@pytest.mark.needs_device
def test_without_clear(device):
"""
Test if read_device_status() with clear=False returns the expected value
and does not clear the status.
"""
device.start_measurement()
device.start_fan_cleaning()
assert device.read_device_status(False).fan_cleaning is True
assert device.read_device_status().fan_cleaning is True # Not cleared.
@pytest.mark.needs_device
def test_with_clear(device):
"""
Test if read_device_status() with clear=True returns the expected value
and clears the status.
"""
device.start_measurement()
device.start_fan_cleaning()
assert device.read_device_status(True).fan_cleaning is True
assert device.read_device_status().fan_cleaning is False # Cleared.
| # -*- coding: utf-8 -*-
# (c) Copyright 2022 Sensirion AG, Switzerland
from sensirion_i2c_sen5x import Sen5xDeviceStatus
import pytest
@pytest.mark.needs_device
def test_no_args(device):
"""
Test if read_device_status() without argument returns the expected value
and does not clear the status.
"""
status = device.read_device_status()
assert type(status) is Sen5xDeviceStatus
assert status.value == 0
device.start_measurement()
device.start_fan_cleaning()
assert device.read_device_status().fan_cleaning is True
assert device.read_device_status().fan_cleaning is True # Not cleared.
@pytest.mark.needs_device
def test_without_clear(device):
"""
Test if read_device_status() with clear=False returns the expected value
and does not clear the status.
"""
device.start_measurement()
device.start_fan_cleaning()
assert device.read_device_status(False).fan_cleaning is True
assert device.read_device_status().fan_cleaning is True # Not cleared.
@pytest.mark.needs_device
def test_with_clear(device):
"""
Test if read_device_status() with clear=True returns the expected value
and clears the status.
"""
device.start_measurement()
device.start_fan_cleaning()
assert device.read_device_status(True).fan_cleaning is True
assert device.read_device_status().fan_cleaning is False # Cleared. | en | 0.697348 | # -*- coding: utf-8 -*- # (c) Copyright 2022 Sensirion AG, Switzerland Test if read_device_status() without argument returns the expected value and does not clear the status. # Not cleared. Test if read_device_status() with clear=False returns the expected value and does not clear the status. # Not cleared. Test if read_device_status() with clear=True returns the expected value and clears the status. # Cleared. | 2.536068 | 3 |
src/pylib/consts.py | rafelafrance/traiter_butterflynet | 0 | 6615768 | """Utilities and constants."""
from pathlib import Path
from traiter.terms.csv_ import Csv
DATA_DIR = Path.cwd() / 'data'
VOCAB_DIR = Path.cwd() / 'src' / 'vocabulary'
OUTPUT_DIR = Path.cwd() / 'output'
GROUP_STEP = 'group'
TRAIT_STEP = 'traits'
MERGE_STEP = 'merge'
TERMS = Csv.shared('numerics units time animals')
TERMS += Csv.read_csv(VOCAB_DIR / 'lepidoptera.csv')
TERMS.drop('in', field='pattern')
REPLACE = TERMS.pattern_dicts('replace')
EXTREME = TERMS.pattern_dicts('extreme')
APPROX = TERMS.pattern_dicts('approx')
IMPLIED = TERMS.pattern_dicts('implied')
ABBREVS = """
Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
mm cm m
Am Anim Bio Biol Bull Bull Conserv DC Ecol Entomol Fig Hist IUCN Inst Int
Lond Me´m Mol Mus Nat Physiol Rep Sci Soc Syst Zool
"""
CLOSE = [')', ']']
COLON = [':']
COMMA = [',']
CROSS = ['x', '×', '⫻'] # ⫻ = 0x3f
DASH = ['–', '-', '––', '--']
DOT = ['.']
EQ = ['=', '¼'] # ¼ = 0xbc
FEET_QUOTE = ["'"]
INT_RE = r'^\d+([\d,]*\d|\d*)*$'
NUMBER_RE = r'^\d+(\.\d*)?$'
OPEN = ['(', '[']
PLUS = ['+']
QUOTE = ['"', "'"]
SEMICOLON = [';']
SLASH = ['/']
BREAK = DOT + SEMICOLON
| """Utilities and constants."""
from pathlib import Path
from traiter.terms.csv_ import Csv
DATA_DIR = Path.cwd() / 'data'
VOCAB_DIR = Path.cwd() / 'src' / 'vocabulary'
OUTPUT_DIR = Path.cwd() / 'output'
GROUP_STEP = 'group'
TRAIT_STEP = 'traits'
MERGE_STEP = 'merge'
TERMS = Csv.shared('numerics units time animals')
TERMS += Csv.read_csv(VOCAB_DIR / 'lepidoptera.csv')
TERMS.drop('in', field='pattern')
REPLACE = TERMS.pattern_dicts('replace')
EXTREME = TERMS.pattern_dicts('extreme')
APPROX = TERMS.pattern_dicts('approx')
IMPLIED = TERMS.pattern_dicts('implied')
ABBREVS = """
Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
mm cm m
Am Anim Bio Biol Bull Bull Conserv DC Ecol Entomol Fig Hist IUCN Inst Int
Lond Me´m Mol Mus Nat Physiol Rep Sci Soc Syst Zool
"""
CLOSE = [')', ']']
COLON = [':']
COMMA = [',']
CROSS = ['x', '×', '⫻'] # ⫻ = 0x3f
DASH = ['–', '-', '––', '--']
DOT = ['.']
EQ = ['=', '¼'] # ¼ = 0xbc
FEET_QUOTE = ["'"]
INT_RE = r'^\d+([\d,]*\d|\d*)*$'
NUMBER_RE = r'^\d+(\.\d*)?$'
OPEN = ['(', '[']
PLUS = ['+']
QUOTE = ['"', "'"]
SEMICOLON = [';']
SLASH = ['/']
BREAK = DOT + SEMICOLON
| en | 0.733732 | Utilities and constants. Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec mm cm m Am Anim Bio Biol Bull Bull Conserv DC Ecol Entomol Fig Hist IUCN Inst Int Lond Me´m Mol Mus Nat Physiol Rep Sci Soc Syst Zool # ⫻ = 0x3f # ¼ = 0xbc | 2.353864 | 2 |
src/Bot.py | anondax/Discord-copy-bot | 3 | 6615769 | from commands.CopyDiscordCommand import *
from commands.PasteDiscordCommand import *
from file.ConfigFile import *
from Logger import *
import discord
import sys
class Bot(discord.Client):
def __init__(self):
discord.Client.__init__(self);
self.log = Logger();
self.config = ConfigFile(self);
if(self.config.configError):
sys.exit();
self.commands = [CopyDiscordCommand(self), PasteDiscordCommand(self)];
async def on_ready(self):
await self.user.edit(username="Discord Copy");
await self.change_presence(activity=discord.Game(name=self.config.getDiscordCopyCommand()));
self.log.info("Bot online!");
async def on_message(self, msg):
for command in self.commands:
if(await command.processMessage(msg)):
break;
def runBot(self):
self.run(self.config.getDiscordToken());
| from commands.CopyDiscordCommand import *
from commands.PasteDiscordCommand import *
from file.ConfigFile import *
from Logger import *
import discord
import sys
class Bot(discord.Client):
def __init__(self):
discord.Client.__init__(self);
self.log = Logger();
self.config = ConfigFile(self);
if(self.config.configError):
sys.exit();
self.commands = [CopyDiscordCommand(self), PasteDiscordCommand(self)];
async def on_ready(self):
await self.user.edit(username="Discord Copy");
await self.change_presence(activity=discord.Game(name=self.config.getDiscordCopyCommand()));
self.log.info("Bot online!");
async def on_message(self, msg):
for command in self.commands:
if(await command.processMessage(msg)):
break;
def runBot(self):
self.run(self.config.getDiscordToken());
| none | 1 | 2.708827 | 3 | |
Chapter02/simple_neural_network _tf_keras.py | hestrang1993/Hands-On-Computer-Vision-with-TensorFlow-2 | 0 | 6615770 | """
The :mod:`simple_neural_network_tf_keras` is here to demonstrate how to use TensorFlow to create a simple fully
connected neural network.
I will create a model to analyze the MNIST dataset.
By the end of the day, this model should be able to read hand-written digits with >95% accuracy.
This model was also created to test if the GPU accelerated the training. It did.
"""
import tensorflow as tf
number_of_classes = 10
"""
int: The number of items to classify the dataset items into.
"""
image_rows = 28
"""
int: The number of rows (in pixels) per item in the dataset.
"""
image_columns = 28
"""
int: The number of columns (in pixels) per item in the dataset.
"""
number_of_channels = 1
"""
int: The number of color channels in each item of the dataset.
"""
input_shape = (image_rows, image_columns, number_of_channels)
"""
tuple of int, int, int: The shape of each item to test and train on in the dataset.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
def normalize_x_data(x_data):
"""
This function will normalize the training and testing images.
Parameters
----------
x_data : ndarray
The training and/or testing images.
Returns
-------
numpy.ndarray
A normalized training and/or testing image values.
"""
max_value = 255.0
x_data_normalized = x_data / max_value
return x_data_normalized
model = tf.keras.models.Sequential()
"""
tensorflow.python.keras.engine.sequential.Sequential: My simple fully connected neural network.
This will be built using TensorFlow.
"""
flattening_layer = tf.keras.layers.Flatten()
"""
tensorflow.python.keras.layers.core.Flatten: A layer to flatten my input data.
I'll add this to my ``model`` instance.
"""
dense_layer_1_units = 128
"""
int: The dimensionality of my processing dense layers.
"""
dense_layer_1_activation = 'relu'
"""
str: The activation function to use for my processing dense layers.
"""
dense_layer_2_activation = 'softmax'
"""
str: The activation function for the last layer in the model.
"""
dense_layer_1 = tf.keras.layers.Dense(units = dense_layer_1_units, activation = dense_layer_1_activation)
"""
tensorflow.python.keras.layers.core.Dense: The processing dense layer of my model.
"""
dense_layer_2 = tf.keras.layers.Dense(units = number_of_classes, activation = dense_layer_2_activation)
"""
tensorflow.python.keras.layers.core.Dense: The final processing dense layer of my model.
"""
model.add(flattening_layer)
model.add(dense_layer_1)
model.add(dense_layer_2)
model_optimizer = 'sgd'
"""
str: A key for my model's optimizer.
Here, I'll use the stochastic gradient descent (SGD) optimizer.
"""
model_loss = 'sparse_categorical_crossentropy'
"""
str: A key for the loss calculation of my model.
"""
model_metrics = ['accuracy']
"""
list[str]: The metric to measure the model on.
"""
model_callbacks = [tf.keras.callbacks.TensorBoard('./keras')]
"""
tensorflow.python.keras.callbacks.TensorBoard: An instance to handle logging the results of the training.
"""
number_of_epochs = 25
"""
int: The number of epochs the model will go through.
"""
model_verbose_key = 1
"""
int: The key for how verbose the model training will be.
"""
x_train = normalize_x_data(x_train)
x_test = normalize_x_data(x_test)
model_validation_data = (x_test, y_test)
if __name__ == '__main__':
model.compile(optimizer = model_optimizer, loss = model_loss, metrics = model_metrics)
model.fit(
x_train, y_train, epochs = number_of_epochs, verbose = model_verbose_key, validation_data =
model_validation_data, callbacks = model_callbacks
)
| """
The :mod:`simple_neural_network_tf_keras` is here to demonstrate how to use TensorFlow to create a simple fully
connected neural network.
I will create a model to analyze the MNIST dataset.
By the end of the day, this model should be able to read hand-written digits with >95% accuracy.
This model was also created to test if the GPU accelerated the training. It did.
"""
import tensorflow as tf
number_of_classes = 10
"""
int: The number of items to classify the dataset items into.
"""
image_rows = 28
"""
int: The number of rows (in pixels) per item in the dataset.
"""
image_columns = 28
"""
int: The number of columns (in pixels) per item in the dataset.
"""
number_of_channels = 1
"""
int: The number of color channels in each item of the dataset.
"""
input_shape = (image_rows, image_columns, number_of_channels)
"""
tuple of int, int, int: The shape of each item to test and train on in the dataset.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
def normalize_x_data(x_data):
"""
This function will normalize the training and testing images.
Parameters
----------
x_data : ndarray
The training and/or testing images.
Returns
-------
numpy.ndarray
A normalized training and/or testing image values.
"""
max_value = 255.0
x_data_normalized = x_data / max_value
return x_data_normalized
model = tf.keras.models.Sequential()
"""
tensorflow.python.keras.engine.sequential.Sequential: My simple fully connected neural network.
This will be built using TensorFlow.
"""
flattening_layer = tf.keras.layers.Flatten()
"""
tensorflow.python.keras.layers.core.Flatten: A layer to flatten my input data.
I'll add this to my ``model`` instance.
"""
dense_layer_1_units = 128
"""
int: The dimensionality of my processing dense layers.
"""
dense_layer_1_activation = 'relu'
"""
str: The activation function to use for my processing dense layers.
"""
dense_layer_2_activation = 'softmax'
"""
str: The activation function for the last layer in the model.
"""
dense_layer_1 = tf.keras.layers.Dense(units = dense_layer_1_units, activation = dense_layer_1_activation)
"""
tensorflow.python.keras.layers.core.Dense: The processing dense layer of my model.
"""
dense_layer_2 = tf.keras.layers.Dense(units = number_of_classes, activation = dense_layer_2_activation)
"""
tensorflow.python.keras.layers.core.Dense: The final processing dense layer of my model.
"""
model.add(flattening_layer)
model.add(dense_layer_1)
model.add(dense_layer_2)
model_optimizer = 'sgd'
"""
str: A key for my model's optimizer.
Here, I'll use the stochastic gradient descent (SGD) optimizer.
"""
model_loss = 'sparse_categorical_crossentropy'
"""
str: A key for the loss calculation of my model.
"""
model_metrics = ['accuracy']
"""
list[str]: The metric to measure the model on.
"""
model_callbacks = [tf.keras.callbacks.TensorBoard('./keras')]
"""
tensorflow.python.keras.callbacks.TensorBoard: An instance to handle logging the results of the training.
"""
number_of_epochs = 25
"""
int: The number of epochs the model will go through.
"""
model_verbose_key = 1
"""
int: The key for how verbose the model training will be.
"""
x_train = normalize_x_data(x_train)
x_test = normalize_x_data(x_test)
model_validation_data = (x_test, y_test)
if __name__ == '__main__':
model.compile(optimizer = model_optimizer, loss = model_loss, metrics = model_metrics)
model.fit(
x_train, y_train, epochs = number_of_epochs, verbose = model_verbose_key, validation_data =
model_validation_data, callbacks = model_callbacks
)
| en | 0.745122 | The :mod:`simple_neural_network_tf_keras` is here to demonstrate how to use TensorFlow to create a simple fully connected neural network. I will create a model to analyze the MNIST dataset. By the end of the day, this model should be able to read hand-written digits with >95% accuracy. This model was also created to test if the GPU accelerated the training. It did. int: The number of items to classify the dataset items into. int: The number of rows (in pixels) per item in the dataset. int: The number of columns (in pixels) per item in the dataset. int: The number of color channels in each item of the dataset. tuple of int, int, int: The shape of each item to test and train on in the dataset. This function will normalize the training and testing images. Parameters ---------- x_data : ndarray The training and/or testing images. Returns ------- numpy.ndarray A normalized training and/or testing image values. tensorflow.python.keras.engine.sequential.Sequential: My simple fully connected neural network. This will be built using TensorFlow. tensorflow.python.keras.layers.core.Flatten: A layer to flatten my input data. I'll add this to my ``model`` instance. int: The dimensionality of my processing dense layers. str: The activation function to use for my processing dense layers. str: The activation function for the last layer in the model. tensorflow.python.keras.layers.core.Dense: The processing dense layer of my model. tensorflow.python.keras.layers.core.Dense: The final processing dense layer of my model. str: A key for my model's optimizer. Here, I'll use the stochastic gradient descent (SGD) optimizer. str: A key for the loss calculation of my model. list[str]: The metric to measure the model on. tensorflow.python.keras.callbacks.TensorBoard: An instance to handle logging the results of the training. int: The number of epochs the model will go through. int: The key for how verbose the model training will be. | 4.134584 | 4 |
backend/tests/test_epic_api.py | DaniilJSN/timeflow | 0 | 6615771 | <filename>backend/tests/test_epic_api.py
from fastapi.testclient import TestClient
import pytest
import os
from ..main import app, session
from sqlmodel import SQLModel, Session, create_engine
from sqlmodel.pool import StaticPool
from ..api.epic import get_session
@pytest.mark.order(1)
def test_post_epic(client):
response = client.post(
"/api/epics/",
json={
"short_name": "dadmin",
"name": "[dyvenia]admin",
"team_id": 1,
"sponsor_id": 1,
"start_date": "2022-03-08",
"is_active": True,
"created_at": "2022-03-08T12:43:28.006Z",
"updated_at": "2022-03-08T12:43:28.006Z",
},
)
data = response.json()
assert response.status_code == 200
assert data == {
"id": 1,
"short_name": "dadmin",
"name": "[dyvenia]admin",
"team_id": 1,
"sponsor_id": 1,
"start_date": "2022-03-08",
"is_active": True,
"created_at": "2022-03-08T12:43:28.006000",
"updated_at": "2022-03-08T12:43:28.006000",
}
def test_get_epics_list(client):
response = client.get("/api/epics/")
data = response.json()
assert response.status_code == 200
assert data == [
{
"sponsor_id": 1,
"name": "[dyvenia]admin",
"short_name": "dadmin",
"is_active": True,
"updated_at": "2022-03-08T12:43:28.006000",
"id": 1,
"team_id": 1,
"start_date": "2022-03-08",
"created_at": "2022-03-08T12:43:28.006000",
}
]
def test_get_active_epics_list(client):
response = client.get("/api/epics/active")
data = response.json()
assert response.status_code == 200
assert data == [
{
"sponsor_id": 1,
"name": "[dyvenia]admin",
"short_name": "dadmin",
"is_active": True,
"updated_at": "2022-03-08T12:43:28.006000",
"id": 1,
"team_id": 1,
"start_date": "2022-03-08",
"created_at": "2022-03-08T12:43:28.006000",
}
]
# def test_get_client_name_by_epic_id(client):
# response = client.get("api/epics/1/client-name")
# data = response.json()
# assert response.status_code == 200
# assert data == {"client_name": "dyvenia", "client_id": 1}
def test_deactivate_epic(client):
response = client.put("api/epics/1/deactivate")
assert response.status_code == 200
def test_activate_epic(client):
response = client.put("api/epics/1/activate")
assert response.status_code == 200
def test_update_epic(client):
response = client.put("api/epics/?epic_id=1&new_short_name=new_sn&new_name=new_n")
assert response.status_code == 200
| <filename>backend/tests/test_epic_api.py
from fastapi.testclient import TestClient
import pytest
import os
from ..main import app, session
from sqlmodel import SQLModel, Session, create_engine
from sqlmodel.pool import StaticPool
from ..api.epic import get_session
@pytest.mark.order(1)
def test_post_epic(client):
response = client.post(
"/api/epics/",
json={
"short_name": "dadmin",
"name": "[dyvenia]admin",
"team_id": 1,
"sponsor_id": 1,
"start_date": "2022-03-08",
"is_active": True,
"created_at": "2022-03-08T12:43:28.006Z",
"updated_at": "2022-03-08T12:43:28.006Z",
},
)
data = response.json()
assert response.status_code == 200
assert data == {
"id": 1,
"short_name": "dadmin",
"name": "[dyvenia]admin",
"team_id": 1,
"sponsor_id": 1,
"start_date": "2022-03-08",
"is_active": True,
"created_at": "2022-03-08T12:43:28.006000",
"updated_at": "2022-03-08T12:43:28.006000",
}
def test_get_epics_list(client):
response = client.get("/api/epics/")
data = response.json()
assert response.status_code == 200
assert data == [
{
"sponsor_id": 1,
"name": "[dyvenia]admin",
"short_name": "dadmin",
"is_active": True,
"updated_at": "2022-03-08T12:43:28.006000",
"id": 1,
"team_id": 1,
"start_date": "2022-03-08",
"created_at": "2022-03-08T12:43:28.006000",
}
]
def test_get_active_epics_list(client):
response = client.get("/api/epics/active")
data = response.json()
assert response.status_code == 200
assert data == [
{
"sponsor_id": 1,
"name": "[dyvenia]admin",
"short_name": "dadmin",
"is_active": True,
"updated_at": "2022-03-08T12:43:28.006000",
"id": 1,
"team_id": 1,
"start_date": "2022-03-08",
"created_at": "2022-03-08T12:43:28.006000",
}
]
# def test_get_client_name_by_epic_id(client):
# response = client.get("api/epics/1/client-name")
# data = response.json()
# assert response.status_code == 200
# assert data == {"client_name": "dyvenia", "client_id": 1}
def test_deactivate_epic(client):
response = client.put("api/epics/1/deactivate")
assert response.status_code == 200
def test_activate_epic(client):
response = client.put("api/epics/1/activate")
assert response.status_code == 200
def test_update_epic(client):
response = client.put("api/epics/?epic_id=1&new_short_name=new_sn&new_name=new_n")
assert response.status_code == 200
| en | 0.500754 | # def test_get_client_name_by_epic_id(client): # response = client.get("api/epics/1/client-name") # data = response.json() # assert response.status_code == 200 # assert data == {"client_name": "dyvenia", "client_id": 1} | 2.308726 | 2 |
app/__init__.py | alexanderdanson/professional_website | 0 | 6615772 | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
bootstrap = Bootstrap(app)
mail = Mail(app)
from app.main import bp as main_bp
app.register_blueprint(main_bp) | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
bootstrap = Bootstrap(app)
mail = Mail(app)
from app.main import bp as main_bp
app.register_blueprint(main_bp) | none | 1 | 2.020866 | 2 | |
tests/test_nlp_parser.py | KonnexionsGmbH/ocr_bench | 0 | 6615773 | <filename>tests/test_nlp_parser.py
# pylint: disable=unused-argument
"""Testing Module nlp.parser."""
import typing
import cfg.glob
import jellyfish
import pytest
import roman
import dcr
# -----------------------------------------------------------------------------
# Constants & Globals.
# -----------------------------------------------------------------------------
# pylint: disable=W0212
# @pytest.mark.issue
# -----------------------------------------------------------------------------
# Test Levenshtein - arabic.
# -----------------------------------------------------------------------------
def test_levenshtein_arabic():
"""Test Levenshtein - arabic."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
upper_limit: int = 1200
for prev in range(upper_limit):
text_curr = f"Page {prev+1} of {str(upper_limit)}"
text_prev = f"Page {prev} of {str(upper_limit)}"
distance: int = jellyfish.levenshtein_distance(
text_prev,
text_curr,
)
match distance:
case 1:
assert True
case 2:
assert (prev + 1) % 10 == 0, "prev=" + text_prev + " - curr=" + text_curr
case 3:
assert (prev + 1) % 100 == 0, "prev=" + text_prev + " - curr=" + text_curr
case 4:
assert (prev + 1) % 1000 == 0, "prev=" + text_prev + " - curr=" + text_curr
case _:
assert False, "distance=" + str(distance) + " prev=" + text_prev + " - curr=" + text_curr
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Levenshtein - roman.
# -----------------------------------------------------------------------------
def test_levenshtein_roman():
"""Test Levenshtein - roman."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
upper_limit: int = 1200
upper_limit_roman: str = roman.toRoman(upper_limit)
for prev in range(upper_limit):
text_curr = f"Page {roman.toRoman(prev + 1)} of {upper_limit_roman}"
text_prev = f"Page {roman.toRoman(prev)} of {upper_limit_roman}"
distance: int = jellyfish.levenshtein_distance(
text_prev,
text_curr,
)
match distance:
case 1 | 2 | 3 | 4 | 5 | 6 | 7:
assert True
case _:
assert False, "distance=" + str(distance) + " prev=" + text_prev + " - curr=" + text_curr
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_STORE_FROM_PARSER - coverage.
# -----------------------------------------------------------------------------
@pytest.mark.parametrize("verbose_parser", ["all", "none", "text"])
def test_run_action_store_from_parser_coverage(verbose_parser: str, fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_STORE_FROM_PARSER - coverage."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_mini", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "true"),
(cfg.glob.setup._DCR_CFG_VERBOSE_PARSER, verbose_parser),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
(cfg.glob.setup._DCR_CFG_TETML_WORD, "true"),
(cfg.glob.setup._DCR_CFG_VERBOSE_LINE_TYPE, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_STORE_FROM_PARSER])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_store_from_parser_coverage <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
[
"pdf_mini_1.pdf",
],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_STORE_FROM_PARSER - coverage - LineType.
# -----------------------------------------------------------------------------
def test_run_action_store_from_parser_coverage_line_type(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_STORE_FROM_PARSER - coverage - LineType."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("p_2_header_0_footer_2_text_0", "pdf"),
("p_2_header_2_footer_0_text_0", "pdf"),
("p_2_header_2_footer_2_text_0", "pdf"),
("p_3_header_0_footer_4", "pdf"),
("p_3_header_4_footer_4", "pdf"),
("p_5_header_2_footer_2_def_3_footer", "pdf"),
("p_5_header_2_footer_2_def_3_header", "pdf"),
("p_5_header_2_footer_2_man", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_STORE_FROM_PARSER])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_store_from_parser_coverage <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
[
"p_2_header_0_footer_2_text_0_1.line.xml",
"p_2_header_0_footer_2_text_0_1.pdf",
"p_2_header_2_footer_0_text_0_3.line.xml",
"p_2_header_2_footer_0_text_0_3.pdf",
"p_2_header_2_footer_2_text_0_5.line.xml",
"p_2_header_2_footer_2_text_0_5.pdf",
"p_3_header_0_footer_4_7.line.xml",
"p_3_header_0_footer_4_7.pdf",
"p_3_header_4_footer_4_9.line.xml",
"p_3_header_4_footer_4_9.pdf",
"p_5_header_2_footer_2_def_3_footer_11.line.xml",
"p_5_header_2_footer_2_def_3_footer_11.pdf",
"p_5_header_2_footer_2_def_3_header_13.line.xml",
"p_5_header_2_footer_2_def_3_header_13.pdf",
"p_5_header_2_footer_2_man_15.line.xml",
"p_5_header_2_footer_2_man_15.pdf",
],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_STORE_FROM_PARSER - normal.
# -----------------------------------------------------------------------------
def test_run_action_store_from_parser_normal(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_STORE_FROM_PARSER - normal."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_mini", "pdf"),
("pdf_scanned_ok", "pdf"),
("translating_sql_into_relational_algebra_p01_02", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "true"),
(cfg.glob.setup._DCR_CFG_TESSERACT_TIMEOUT, "30"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
(cfg.glob.setup._DCR_CFG_TETML_WORD, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_NON_PDF_2_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_STORE_FROM_PARSER])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_store_from_parser_normal <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
files_expected: typing.List = [
"pdf_mini_1.pdf",
"pdf_scanned_ok_3.pdf",
"translating_sql_into_relational_algebra_p01_02_5.pdf",
]
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
files_expected,
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_STORE_FROM_PARSER - normal - keep.
# -----------------------------------------------------------------------------
def test_run_action_store_from_parser_normal_keep(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_STORE_FROM_PARSER - normal - keep."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_mini", "pdf"),
("pdf_scanned_ok", "pdf"),
("translating_sql_into_relational_algebra_p01_02", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TESSERACT_TIMEOUT, "30"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
(cfg.glob.setup._DCR_CFG_TETML_WORD, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_NON_PDF_2_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_STORE_FROM_PARSER])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_store_from_parser_normal_keep <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
files_expected: typing.List = [
"pdf_mini_1.pdf",
"pdf_mini_1.line.xml",
"pdf_mini_1.word.xml",
"pdf_scanned_ok_3.pdf",
"pdf_scanned_ok_3_1.jpeg",
"pdf_scanned_ok_3_1.pdf",
"pdf_scanned_ok_3_1.line.xml",
"pdf_scanned_ok_3_1.word.xml",
"translating_sql_into_relational_algebra_p01_02_5.pdf",
"translating_sql_into_relational_algebra_p01_02_5_0.pdf",
"translating_sql_into_relational_algebra_p01_02_5_0.line.xml",
"translating_sql_into_relational_algebra_p01_02_5_0.word.xml",
"translating_sql_into_relational_algebra_p01_02_5_1.jpeg",
"translating_sql_into_relational_algebra_p01_02_5_1.pdf",
"translating_sql_into_relational_algebra_p01_02_5_2.jpeg",
"translating_sql_into_relational_algebra_p01_02_5_2.pdf",
]
# TBD
# if platform.system() != "Windows":
# files_expected.append(
# "pdf_scanned_03_ok_11.pdf",
# )
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
files_expected,
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
| <filename>tests/test_nlp_parser.py
# pylint: disable=unused-argument
"""Testing Module nlp.parser."""
import typing
import cfg.glob
import jellyfish
import pytest
import roman
import dcr
# -----------------------------------------------------------------------------
# Constants & Globals.
# -----------------------------------------------------------------------------
# pylint: disable=W0212
# @pytest.mark.issue
# -----------------------------------------------------------------------------
# Test Levenshtein - arabic.
# -----------------------------------------------------------------------------
def test_levenshtein_arabic():
"""Test Levenshtein - arabic."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
upper_limit: int = 1200
for prev in range(upper_limit):
text_curr = f"Page {prev+1} of {str(upper_limit)}"
text_prev = f"Page {prev} of {str(upper_limit)}"
distance: int = jellyfish.levenshtein_distance(
text_prev,
text_curr,
)
match distance:
case 1:
assert True
case 2:
assert (prev + 1) % 10 == 0, "prev=" + text_prev + " - curr=" + text_curr
case 3:
assert (prev + 1) % 100 == 0, "prev=" + text_prev + " - curr=" + text_curr
case 4:
assert (prev + 1) % 1000 == 0, "prev=" + text_prev + " - curr=" + text_curr
case _:
assert False, "distance=" + str(distance) + " prev=" + text_prev + " - curr=" + text_curr
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Levenshtein - roman.
# -----------------------------------------------------------------------------
def test_levenshtein_roman():
"""Test Levenshtein - roman."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
upper_limit: int = 1200
upper_limit_roman: str = roman.toRoman(upper_limit)
for prev in range(upper_limit):
text_curr = f"Page {roman.toRoman(prev + 1)} of {upper_limit_roman}"
text_prev = f"Page {roman.toRoman(prev)} of {upper_limit_roman}"
distance: int = jellyfish.levenshtein_distance(
text_prev,
text_curr,
)
match distance:
case 1 | 2 | 3 | 4 | 5 | 6 | 7:
assert True
case _:
assert False, "distance=" + str(distance) + " prev=" + text_prev + " - curr=" + text_curr
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_STORE_FROM_PARSER - coverage.
# -----------------------------------------------------------------------------
@pytest.mark.parametrize("verbose_parser", ["all", "none", "text"])
def test_run_action_store_from_parser_coverage(verbose_parser: str, fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_STORE_FROM_PARSER - coverage."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_mini", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "true"),
(cfg.glob.setup._DCR_CFG_VERBOSE_PARSER, verbose_parser),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
(cfg.glob.setup._DCR_CFG_TETML_WORD, "true"),
(cfg.glob.setup._DCR_CFG_VERBOSE_LINE_TYPE, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_STORE_FROM_PARSER])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_store_from_parser_coverage <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
[
"pdf_mini_1.pdf",
],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_STORE_FROM_PARSER - coverage - LineType.
# -----------------------------------------------------------------------------
def test_run_action_store_from_parser_coverage_line_type(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_STORE_FROM_PARSER - coverage - LineType."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("p_2_header_0_footer_2_text_0", "pdf"),
("p_2_header_2_footer_0_text_0", "pdf"),
("p_2_header_2_footer_2_text_0", "pdf"),
("p_3_header_0_footer_4", "pdf"),
("p_3_header_4_footer_4", "pdf"),
("p_5_header_2_footer_2_def_3_footer", "pdf"),
("p_5_header_2_footer_2_def_3_header", "pdf"),
("p_5_header_2_footer_2_man", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_STORE_FROM_PARSER])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_store_from_parser_coverage <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
[
"p_2_header_0_footer_2_text_0_1.line.xml",
"p_2_header_0_footer_2_text_0_1.pdf",
"p_2_header_2_footer_0_text_0_3.line.xml",
"p_2_header_2_footer_0_text_0_3.pdf",
"p_2_header_2_footer_2_text_0_5.line.xml",
"p_2_header_2_footer_2_text_0_5.pdf",
"p_3_header_0_footer_4_7.line.xml",
"p_3_header_0_footer_4_7.pdf",
"p_3_header_4_footer_4_9.line.xml",
"p_3_header_4_footer_4_9.pdf",
"p_5_header_2_footer_2_def_3_footer_11.line.xml",
"p_5_header_2_footer_2_def_3_footer_11.pdf",
"p_5_header_2_footer_2_def_3_header_13.line.xml",
"p_5_header_2_footer_2_def_3_header_13.pdf",
"p_5_header_2_footer_2_man_15.line.xml",
"p_5_header_2_footer_2_man_15.pdf",
],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_STORE_FROM_PARSER - normal.
# -----------------------------------------------------------------------------
def test_run_action_store_from_parser_normal(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_STORE_FROM_PARSER - normal."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_mini", "pdf"),
("pdf_scanned_ok", "pdf"),
("translating_sql_into_relational_algebra_p01_02", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "true"),
(cfg.glob.setup._DCR_CFG_TESSERACT_TIMEOUT, "30"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
(cfg.glob.setup._DCR_CFG_TETML_WORD, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_NON_PDF_2_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_STORE_FROM_PARSER])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_store_from_parser_normal <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
files_expected: typing.List = [
"pdf_mini_1.pdf",
"pdf_scanned_ok_3.pdf",
"translating_sql_into_relational_algebra_p01_02_5.pdf",
]
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
files_expected,
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_STORE_FROM_PARSER - normal - keep.
# -----------------------------------------------------------------------------
def test_run_action_store_from_parser_normal_keep(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_STORE_FROM_PARSER - normal - keep."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_mini", "pdf"),
("pdf_scanned_ok", "pdf"),
("translating_sql_into_relational_algebra_p01_02", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TESSERACT_TIMEOUT, "30"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
(cfg.glob.setup._DCR_CFG_TETML_WORD, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_NON_PDF_2_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_STORE_FROM_PARSER])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_store_from_parser_normal_keep <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
files_expected: typing.List = [
"pdf_mini_1.pdf",
"pdf_mini_1.line.xml",
"pdf_mini_1.word.xml",
"pdf_scanned_ok_3.pdf",
"pdf_scanned_ok_3_1.jpeg",
"pdf_scanned_ok_3_1.pdf",
"pdf_scanned_ok_3_1.line.xml",
"pdf_scanned_ok_3_1.word.xml",
"translating_sql_into_relational_algebra_p01_02_5.pdf",
"translating_sql_into_relational_algebra_p01_02_5_0.pdf",
"translating_sql_into_relational_algebra_p01_02_5_0.line.xml",
"translating_sql_into_relational_algebra_p01_02_5_0.word.xml",
"translating_sql_into_relational_algebra_p01_02_5_1.jpeg",
"translating_sql_into_relational_algebra_p01_02_5_1.pdf",
"translating_sql_into_relational_algebra_p01_02_5_2.jpeg",
"translating_sql_into_relational_algebra_p01_02_5_2.pdf",
]
# TBD
# if platform.system() != "Windows":
# files_expected.append(
# "pdf_scanned_03_ok_11.pdf",
# )
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
files_expected,
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
| en | 0.172329 | # pylint: disable=unused-argument Testing Module nlp.parser. # ----------------------------------------------------------------------------- # Constants & Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- # Test Levenshtein - arabic. # ----------------------------------------------------------------------------- Test Levenshtein - arabic. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Test Levenshtein - roman. # ----------------------------------------------------------------------------- Test Levenshtein - roman. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Test RUN_ACTION_STORE_FROM_PARSER - coverage. # ----------------------------------------------------------------------------- Test RUN_ACTION_STORE_FROM_PARSER - coverage. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Test RUN_ACTION_STORE_FROM_PARSER - coverage - LineType. # ----------------------------------------------------------------------------- Test RUN_ACTION_STORE_FROM_PARSER - coverage - LineType. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Test RUN_ACTION_STORE_FROM_PARSER - normal. # ----------------------------------------------------------------------------- Test RUN_ACTION_STORE_FROM_PARSER - normal. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Test RUN_ACTION_STORE_FROM_PARSER - normal - keep. # ----------------------------------------------------------------------------- Test RUN_ACTION_STORE_FROM_PARSER - normal - keep. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # TBD # if platform.system() != "Windows": # files_expected.append( # "pdf_scanned_03_ok_11.pdf", # ) # ------------------------------------------------------------------------- | 1.813962 | 2 |
escalate/core/views/user_views.py | darkreactions/ESCALATE | 11 | 6615774 | <reponame>darkreactions/ESCALATE
#from escalate.core.models.app_tables import OrganizationPassword
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.urls import reverse_lazy, reverse
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.hashers import make_password, check_password
from django.shortcuts import render, redirect
from django.views import View
from django.views.generic.edit import FormView, CreateView, DeleteView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from core.forms.forms import (CustomUserCreationForm, PersonTableForm,
JoinOrganizationForm, PersonForm)
from core.models.view_tables import Actor, Person, Organization, Edocument
from core.models.app_tables import CustomUser, OrganizationPassword
from core.models.core_tables import TypeDef
from core.forms.forms import UploadEdocForm
from django.forms import modelformset_factory
class CreateUserView(View):
template_name = 'core/accounts/create_user.html'
def get(self, request, *args, **kwargs):
user_form = CustomUserCreationForm()
person_form = PersonTableForm()
context = {'person_form': person_form,
'user_form': user_form}
return render(request, self.template_name, context=context)
def post(self, request, *args, **kwargs):
person_form = PersonForm(request.POST)
user_form = CustomUserCreationForm(request.POST)
if person_form.is_valid() and user_form.is_valid():
person = person_form.save()
p = Person.objects.get(pk=person.pk)
user = user_form.save(commit=False)
user.person = p
user.save()
messages.success(request, 'Account created successfully')
return redirect('login')
else:
return render(request, self.template_name, {'person_form': person_form,
'user_form': user_form})
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('change_password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'core/accounts/change_password.html', {
'form': form
})
class UserProfileView(LoginRequiredMixin, View):
template_name = 'core/accounts/user_profile.html'
def get(self, request, *args, **kwargs):
org_form = JoinOrganizationForm()
vw_person = Person.objects.get(pk=request.user.person.pk)
# get edocuments (profile picture)
edocs_raw = Edocument.objects.filter(ref_edocument_uuid=request.user.person.pk, title=str(request.user.username)+"_avatar")
edocs = []
for edoc in edocs_raw:
filename = edoc.filename
# redirect to api link to download
download_url = reverse('edoc_download', args=(edoc.uuid,))
edocs.append({
'filename': filename,
'download_url': download_url
})
context = {'org_form': org_form, 'vw_person': vw_person}
if len(edocs)>0:
context['profile_pic_edoc'] = edocs[0]
else:
context['profile_pic_edoc'] = None
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
if request.POST.get("add_org"):
org_pwd = OrganizationPassword.objects.get(pk=request.POST['organization'])
if check_password(request.POST['password'], org_pwd.password):
person = Person.objects.get(pk=request.user.person.pk)
organization = Organization.objects.get(pk=org_pwd.organization.pk)
actor, created = Actor.objects.get_or_create(person=person, organization=organization)
if created:
messages.success(request, f'Added to {org_pwd.organization} successfully')
else:
messages.info(request, f'Already a member of {org_pwd.organization} no changes made')
else:
messages.error(request, f'Incorrect password for {org_pwd.organization}. Please contact admin for correct password')
return redirect('user_profile')
class UserProfileEdit(LoginRequiredMixin, View):
template_name = 'core/generic/edit.html'
EdocFormSet = modelformset_factory(Edocument, form=UploadEdocForm, can_delete=True)
def get(self, request, *args, **kwargs):
# context = super().get_context_data(**kwargs)
person_form = PersonTableForm(instance=request.user.person)
profile_image_edoc = Edocument.objects.filter(ref_edocument_uuid=request.user.person.pk, title=str(request.user.username)+"_avatar")
# if user already has a picture, load the edocUpload form for that specific edocument picture
# if not, create new form
if len(profile_image_edoc)>0:
edoc_form = UploadEdocForm(instance=profile_image_edoc[0])
else:
edoc_form = UploadEdocForm()
context = {'form':person_form}
# upload profile image
context['edoc_form'] = edoc_form
return render(request, self.template_name, context=context)
def post(self, request, *args, **kwargs):
form = PersonTableForm(request.POST, instance=request.user.person)
profile_image_edoc = Edocument.objects.get_or_create(ref_edocument_uuid=request.user.person.pk, title=str(request.user.username) + "_avatar")
edocumentForm = UploadEdocForm(request.POST, request.FILES, instance=profile_image_edoc[0])
if self.request.user.is_authenticated:
if form.is_valid() and edocumentForm.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
profile_form = form.save(commit=False)
profile_form.title = request.POST.getlist('title')[0]
profile_form.save()
edoc = edocumentForm.save(commit=False)
edoc.title = str(request.user.username) + "_avatar"
if edocumentForm.cleaned_data['file']:
#New edoc or update file of existing edoc
file = edocumentForm.cleaned_data['file']
# Hopefuly every file name is structed as <name>.<ext>
_file_name_detached, ext, *_ = file.name.split('.')
edoc.edocument = file.read()
edoc.filename = file.name
#file type that the user entered
file_type_user = edocumentForm.cleaned_data['file_type']
#try to get the file_type from db that is spelled the same as the file extension
try:
file_type_db = TypeDef.objects.get(category="file",description=ext)
except TypeDef.DoesNotExist:
file_type_db = None
if file_type_db:
#found find file type corresponding to file extension
#use that file type instead of what user entered
edoc.doc_type_uuid = file_type_db
else:
#did not find file type corresponding to file extension
#use file type user entered in form
edoc.doc_type_uuid = file_type_user
# Get the appropriate actor and then add it to the edoc
actor = Actor.objects.get(person=self.request.user.person.pk, organization=None)
edoc.actor = actor
# Get the appropriate uuid of the record being changed.
edoc.ref_edocument_uuid = self.request.user.person.pk
edoc.save()
return redirect('user_profile')
def form_valid(self, form):
self.object = form.save()
if self.EdocFormSet != None:
actor = Actor.objects.get(
person=self.request.user.person.pk, organization=None)
formset = self.EdocFormSet(self.request.POST, self.request.FILES, prefix='edoc')
# Loop through every edoc form
for form in formset:
# Only if the form has changed make an update, otherwise ignore
if form.has_changed() and form.is_valid():
if self.request.user.is_authenticated:
edoc = form.save(commit=False)
if form.cleaned_data['file']:
#New edoc or update file of existing edoc
file = form.cleaned_data['file']
# Hopefuly every file name is structed as <name>.<ext>
_file_name_detached, ext, *_ = file.name.split('.')
edoc.edocument = file.read()
edoc.filename = file.name
#file type that the user entered
file_type_user = form.cleaned_data['file_type']
#try to get the file_type from db that is spelled the same as the file extension
try:
file_type_db = TypeDef.objects.get(category="file",description=ext)
except TypeDef.DoesNotExist:
file_type_db = None
if file_type_db:
#found find file type corresponding to file extension
#use that file type instead of what user entered
edoc.doc_type_uuid = file_type_db
else:
#did not find file type corresponding to file extension
#use file type user entered in form
edoc.doc_type_uuid = file_type_user
# Get the appropriate actor and then add it to the edoc
edoc.actor = actor
# Get the appropriate uuid of the record being changed.
edoc.ref_edocument_uuid = self.object.pk
edoc.save()
# Delete each note we marked in the formset
formset.save(commit=False)
for form in formset.deleted_forms:
form.instance.delete()
# Choose which website we are redirected to
if self.request.POST.get('add_edoc'):
self.success_url = reverse_lazy(
f'{self.context_object_name}_update', kwargs={'pk': self.object.pk})
return redirect('user_profile')
def form_invalid(self, form):
context = self.get_context_data()
context['form'] = form
return render(self.request, self.template_name, context)
| #from escalate.core.models.app_tables import OrganizationPassword
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.urls import reverse_lazy, reverse
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.hashers import make_password, check_password
from django.shortcuts import render, redirect
from django.views import View
from django.views.generic.edit import FormView, CreateView, DeleteView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from core.forms.forms import (CustomUserCreationForm, PersonTableForm,
JoinOrganizationForm, PersonForm)
from core.models.view_tables import Actor, Person, Organization, Edocument
from core.models.app_tables import CustomUser, OrganizationPassword
from core.models.core_tables import TypeDef
from core.forms.forms import UploadEdocForm
from django.forms import modelformset_factory
class CreateUserView(View):
template_name = 'core/accounts/create_user.html'
def get(self, request, *args, **kwargs):
user_form = CustomUserCreationForm()
person_form = PersonTableForm()
context = {'person_form': person_form,
'user_form': user_form}
return render(request, self.template_name, context=context)
def post(self, request, *args, **kwargs):
person_form = PersonForm(request.POST)
user_form = CustomUserCreationForm(request.POST)
if person_form.is_valid() and user_form.is_valid():
person = person_form.save()
p = Person.objects.get(pk=person.pk)
user = user_form.save(commit=False)
user.person = p
user.save()
messages.success(request, 'Account created successfully')
return redirect('login')
else:
return render(request, self.template_name, {'person_form': person_form,
'user_form': user_form})
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('change_password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'core/accounts/change_password.html', {
'form': form
})
class UserProfileView(LoginRequiredMixin, View):
template_name = 'core/accounts/user_profile.html'
def get(self, request, *args, **kwargs):
org_form = JoinOrganizationForm()
vw_person = Person.objects.get(pk=request.user.person.pk)
# get edocuments (profile picture)
edocs_raw = Edocument.objects.filter(ref_edocument_uuid=request.user.person.pk, title=str(request.user.username)+"_avatar")
edocs = []
for edoc in edocs_raw:
filename = edoc.filename
# redirect to api link to download
download_url = reverse('edoc_download', args=(edoc.uuid,))
edocs.append({
'filename': filename,
'download_url': download_url
})
context = {'org_form': org_form, 'vw_person': vw_person}
if len(edocs)>0:
context['profile_pic_edoc'] = edocs[0]
else:
context['profile_pic_edoc'] = None
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
if request.POST.get("add_org"):
org_pwd = OrganizationPassword.objects.get(pk=request.POST['organization'])
if check_password(request.POST['password'], org_pwd.password):
person = Person.objects.get(pk=request.user.person.pk)
organization = Organization.objects.get(pk=org_pwd.organization.pk)
actor, created = Actor.objects.get_or_create(person=person, organization=organization)
if created:
messages.success(request, f'Added to {org_pwd.organization} successfully')
else:
messages.info(request, f'Already a member of {org_pwd.organization} no changes made')
else:
messages.error(request, f'Incorrect password for {org_pwd.organization}. Please contact admin for correct password')
return redirect('user_profile')
class UserProfileEdit(LoginRequiredMixin, View):
template_name = 'core/generic/edit.html'
EdocFormSet = modelformset_factory(Edocument, form=UploadEdocForm, can_delete=True)
def get(self, request, *args, **kwargs):
# context = super().get_context_data(**kwargs)
person_form = PersonTableForm(instance=request.user.person)
profile_image_edoc = Edocument.objects.filter(ref_edocument_uuid=request.user.person.pk, title=str(request.user.username)+"_avatar")
# if user already has a picture, load the edocUpload form for that specific edocument picture
# if not, create new form
if len(profile_image_edoc)>0:
edoc_form = UploadEdocForm(instance=profile_image_edoc[0])
else:
edoc_form = UploadEdocForm()
context = {'form':person_form}
# upload profile image
context['edoc_form'] = edoc_form
return render(request, self.template_name, context=context)
def post(self, request, *args, **kwargs):
form = PersonTableForm(request.POST, instance=request.user.person)
profile_image_edoc = Edocument.objects.get_or_create(ref_edocument_uuid=request.user.person.pk, title=str(request.user.username) + "_avatar")
edocumentForm = UploadEdocForm(request.POST, request.FILES, instance=profile_image_edoc[0])
if self.request.user.is_authenticated:
if form.is_valid() and edocumentForm.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
profile_form = form.save(commit=False)
profile_form.title = request.POST.getlist('title')[0]
profile_form.save()
edoc = edocumentForm.save(commit=False)
edoc.title = str(request.user.username) + "_avatar"
if edocumentForm.cleaned_data['file']:
#New edoc or update file of existing edoc
file = edocumentForm.cleaned_data['file']
# Hopefuly every file name is structed as <name>.<ext>
_file_name_detached, ext, *_ = file.name.split('.')
edoc.edocument = file.read()
edoc.filename = file.name
#file type that the user entered
file_type_user = edocumentForm.cleaned_data['file_type']
#try to get the file_type from db that is spelled the same as the file extension
try:
file_type_db = TypeDef.objects.get(category="file",description=ext)
except TypeDef.DoesNotExist:
file_type_db = None
if file_type_db:
#found find file type corresponding to file extension
#use that file type instead of what user entered
edoc.doc_type_uuid = file_type_db
else:
#did not find file type corresponding to file extension
#use file type user entered in form
edoc.doc_type_uuid = file_type_user
# Get the appropriate actor and then add it to the edoc
actor = Actor.objects.get(person=self.request.user.person.pk, organization=None)
edoc.actor = actor
# Get the appropriate uuid of the record being changed.
edoc.ref_edocument_uuid = self.request.user.person.pk
edoc.save()
return redirect('user_profile')
def form_valid(self, form):
self.object = form.save()
if self.EdocFormSet != None:
actor = Actor.objects.get(
person=self.request.user.person.pk, organization=None)
formset = self.EdocFormSet(self.request.POST, self.request.FILES, prefix='edoc')
# Loop through every edoc form
for form in formset:
# Only if the form has changed make an update, otherwise ignore
if form.has_changed() and form.is_valid():
if self.request.user.is_authenticated:
edoc = form.save(commit=False)
if form.cleaned_data['file']:
#New edoc or update file of existing edoc
file = form.cleaned_data['file']
# Hopefuly every file name is structed as <name>.<ext>
_file_name_detached, ext, *_ = file.name.split('.')
edoc.edocument = file.read()
edoc.filename = file.name
#file type that the user entered
file_type_user = form.cleaned_data['file_type']
#try to get the file_type from db that is spelled the same as the file extension
try:
file_type_db = TypeDef.objects.get(category="file",description=ext)
except TypeDef.DoesNotExist:
file_type_db = None
if file_type_db:
#found find file type corresponding to file extension
#use that file type instead of what user entered
edoc.doc_type_uuid = file_type_db
else:
#did not find file type corresponding to file extension
#use file type user entered in form
edoc.doc_type_uuid = file_type_user
# Get the appropriate actor and then add it to the edoc
edoc.actor = actor
# Get the appropriate uuid of the record being changed.
edoc.ref_edocument_uuid = self.object.pk
edoc.save()
# Delete each note we marked in the formset
formset.save(commit=False)
for form in formset.deleted_forms:
form.instance.delete()
# Choose which website we are redirected to
if self.request.POST.get('add_edoc'):
self.success_url = reverse_lazy(
f'{self.context_object_name}_update', kwargs={'pk': self.object.pk})
return redirect('user_profile')
def form_invalid(self, form):
context = self.get_context_data()
context['form'] = form
return render(self.request, self.template_name, context) | en | 0.884133 | #from escalate.core.models.app_tables import OrganizationPassword # Important! # get edocuments (profile picture) # redirect to api link to download # context = super().get_context_data(**kwargs) # if user already has a picture, load the edocUpload form for that specific edocument picture # if not, create new form # upload profile image # process the data in form.cleaned_data as required (here we just write it to the model due_back field) #New edoc or update file of existing edoc # Hopefuly every file name is structed as <name>.<ext> #file type that the user entered #try to get the file_type from db that is spelled the same as the file extension #found find file type corresponding to file extension #use that file type instead of what user entered #did not find file type corresponding to file extension #use file type user entered in form # Get the appropriate actor and then add it to the edoc # Get the appropriate uuid of the record being changed. # Loop through every edoc form # Only if the form has changed make an update, otherwise ignore #New edoc or update file of existing edoc # Hopefuly every file name is structed as <name>.<ext> #file type that the user entered #try to get the file_type from db that is spelled the same as the file extension #found find file type corresponding to file extension #use that file type instead of what user entered #did not find file type corresponding to file extension #use file type user entered in form # Get the appropriate actor and then add it to the edoc # Get the appropriate uuid of the record being changed. # Delete each note we marked in the formset # Choose which website we are redirected to | 1.911679 | 2 |
3/dictionary.py | gdaPythonProjects/training2019-thursday | 5 | 6615775 | <gh_stars>1-10
x = {
"kot": ["Stefan", "Bubuś"],
"pies": {"rasa": "pitbull",
"name": "puszek"},
"rybka": "Złota"
}
print(x["kot"])
print(x.get("nie istnieje", []))
print(x.get("kot"))
print(x.get(":("))
if "rybka" in x:
del x["rybka"]
for i, j in x.items():
print(i, j)
for i in x.keys():
print(i)
for i in x.values():
print(i)
| x = {
"kot": ["Stefan", "Bubuś"],
"pies": {"rasa": "pitbull",
"name": "puszek"},
"rybka": "Złota"
}
print(x["kot"])
print(x.get("nie istnieje", []))
print(x.get("kot"))
print(x.get(":("))
if "rybka" in x:
del x["rybka"]
for i, j in x.items():
print(i, j)
for i in x.keys():
print(i)
for i in x.values():
print(i) | none | 1 | 3.443483 | 3 | |
ucsb/repository/user_repository.py | jasunchen/agmonitor_backend | 0 | 6615776 | from ucsb.models import user, user_asset
from rest_framework.response import Response
from django.forms.models import model_to_dict
from rest_framework.decorators import api_view
from ucsb.repository.helpers import *
from opt.optimization import *
from opt.base_load import *
from opt.utility.solar import *
from opt.utility.weather import *
from opt.utility.send_email import *
from opt.utility.scheduler import optimization
# from ucsb.repository.helpers import *
import smtplib, ssl
@api_view(['POST', 'DELETE'])
def update_user(request):
if request.method == 'POST':
params = ["email", "low_limit", "max_limit", "battery_size", "cost_or_shutoff", "hours_of_power", "longitude", "latitude", "phone_number"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.data, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.data.get('email')
low_limit = request.data.get('low_limit')
max_limit = request.data.get('max_limit')
battery_size = request.data.get('battery_size')
cost_or_shutoff = request.data.get('cost_or_shutoff')
hours_of_power = request.data.get('hours_of_power')
longitude = request.data.get('longitude')
latitude = request.data.get('latitude')
phone_number = request.data.get('phone_number')
tmp_user = user.objects.get(user_email=email)
tmp_user.low_limit = low_limit
tmp_user.max_limit = max_limit
tmp_user.battery_size = battery_size
tmp_user.cost_or_shutoff = cost_or_shutoff
tmp_user.hours_of_power = hours_of_power
tmp_user.longitude = longitude
tmp_user.latitude = latitude
tmp_user.phone_number = phone_number
tmp_user.save()
return Response({"detail": "User updated successfully"}, status=200)
elif request.method == 'DELETE':
email = request.data.get('email')
if email == '':
return Response({"detail": "Email cannot be empty"}, status=400)
tmp_user = user.objects.get(user_email=email)
tmp_user.delete()
return Response({"detail": "User deleted successfully"})
else:
return Response({"detail": "Error: Invalid request"}, status=400)
#test function
@api_view(['GET'])
def getAllUsers(request):
res = []
result = user.objects.all()
for r in result:
res.append(model_to_dict(r))
return Response(res)
@api_view(['GET'])
def get_user(request):
params = ["email"]
#Check for Required Fields
for p in params:
if request.query_params.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.query_params, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.query_params.get('email')
try:
tmp_user = user.objects.get(user_email=email)
return Response(model_to_dict(tmp_user))
except:
return Response({"detail": "Error: User does not exist"}, status=400)
@api_view(['POST'])
def register_user(request):
if request.method == 'POST':
email = request.data.get('email')
if email == '':
return Response({"detail": "Email cannot be empty"}, status=400)
try:
a_user = user.objects.get(user_email=email)
return Response({"detail": "User has already registered"})
except (user.DoesNotExist, user.MultipleObjectsReturned):
tmp_user = user(user_email=email)
tmp_user.save()
return Response({"detail": "User created successfully"}, status=200)
else:
return Response({"detail": "Error: Invalid request"}, status=400)
@api_view(['POST'])
def opt(request):
params = ["email"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.data, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.data.get('email')
result = optimization(email)
#return best_threshold, good_times, best_schedule, and should_charge
return Response({"detail": result}, status=200) | from ucsb.models import user, user_asset
from rest_framework.response import Response
from django.forms.models import model_to_dict
from rest_framework.decorators import api_view
from ucsb.repository.helpers import *
from opt.optimization import *
from opt.base_load import *
from opt.utility.solar import *
from opt.utility.weather import *
from opt.utility.send_email import *
from opt.utility.scheduler import optimization
# from ucsb.repository.helpers import *
import smtplib, ssl
@api_view(['POST', 'DELETE'])
def update_user(request):
if request.method == 'POST':
params = ["email", "low_limit", "max_limit", "battery_size", "cost_or_shutoff", "hours_of_power", "longitude", "latitude", "phone_number"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.data, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.data.get('email')
low_limit = request.data.get('low_limit')
max_limit = request.data.get('max_limit')
battery_size = request.data.get('battery_size')
cost_or_shutoff = request.data.get('cost_or_shutoff')
hours_of_power = request.data.get('hours_of_power')
longitude = request.data.get('longitude')
latitude = request.data.get('latitude')
phone_number = request.data.get('phone_number')
tmp_user = user.objects.get(user_email=email)
tmp_user.low_limit = low_limit
tmp_user.max_limit = max_limit
tmp_user.battery_size = battery_size
tmp_user.cost_or_shutoff = cost_or_shutoff
tmp_user.hours_of_power = hours_of_power
tmp_user.longitude = longitude
tmp_user.latitude = latitude
tmp_user.phone_number = phone_number
tmp_user.save()
return Response({"detail": "User updated successfully"}, status=200)
elif request.method == 'DELETE':
email = request.data.get('email')
if email == '':
return Response({"detail": "Email cannot be empty"}, status=400)
tmp_user = user.objects.get(user_email=email)
tmp_user.delete()
return Response({"detail": "User deleted successfully"})
else:
return Response({"detail": "Error: Invalid request"}, status=400)
#test function
@api_view(['GET'])
def getAllUsers(request):
res = []
result = user.objects.all()
for r in result:
res.append(model_to_dict(r))
return Response(res)
@api_view(['GET'])
def get_user(request):
params = ["email"]
#Check for Required Fields
for p in params:
if request.query_params.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.query_params, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.query_params.get('email')
try:
tmp_user = user.objects.get(user_email=email)
return Response(model_to_dict(tmp_user))
except:
return Response({"detail": "Error: User does not exist"}, status=400)
@api_view(['POST'])
def register_user(request):
if request.method == 'POST':
email = request.data.get('email')
if email == '':
return Response({"detail": "Email cannot be empty"}, status=400)
try:
a_user = user.objects.get(user_email=email)
return Response({"detail": "User has already registered"})
except (user.DoesNotExist, user.MultipleObjectsReturned):
tmp_user = user(user_email=email)
tmp_user.save()
return Response({"detail": "User created successfully"}, status=200)
else:
return Response({"detail": "Error: Invalid request"}, status=400)
@api_view(['POST'])
def opt(request):
params = ["email"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.data, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.data.get('email')
result = optimization(email)
#return best_threshold, good_times, best_schedule, and should_charge
return Response({"detail": result}, status=200) | en | 0.512639 | # from ucsb.repository.helpers import * #Check for Required Fields #Check for Invalid Parameters #test function #Check for Required Fields #Check for Invalid Parameters #Check for Required Fields #Check for Invalid Parameters #return best_threshold, good_times, best_schedule, and should_charge | 2.112 | 2 |
utils/converter.py | Frognar/Super-Resolution | 1 | 6615777 | <gh_stars>1-10
import PIL
from torchvision.transforms import Normalize, RandomCrop, Resize, ToTensor
class Converter:
def __init__(self, crop_size=224, upscale_factor=4, mean=None, std=None):
if mean is None:
mean = [0.4787, 0.4470, 0.3931]
if std is None:
std = [0.0301, 0.0310, 0.0261]
self.random_crop = RandomCrop(crop_size)
self.resize = Resize(crop_size // upscale_factor, PIL.Image.BICUBIC)
self.convert = ToTensor()
self.normalize = Normalize(mean, std)
def transform(self, image):
hr_image = self.random_crop(image)
lr_image = self.resize(hr_image)
return self.convert(hr_image), self.normalize(self.convert(lr_image))
| import PIL
from torchvision.transforms import Normalize, RandomCrop, Resize, ToTensor
class Converter:
def __init__(self, crop_size=224, upscale_factor=4, mean=None, std=None):
if mean is None:
mean = [0.4787, 0.4470, 0.3931]
if std is None:
std = [0.0301, 0.0310, 0.0261]
self.random_crop = RandomCrop(crop_size)
self.resize = Resize(crop_size // upscale_factor, PIL.Image.BICUBIC)
self.convert = ToTensor()
self.normalize = Normalize(mean, std)
def transform(self, image):
hr_image = self.random_crop(image)
lr_image = self.resize(hr_image)
return self.convert(hr_image), self.normalize(self.convert(lr_image)) | none | 1 | 2.861122 | 3 | |
mod/tools/httpserver.py | guymella/fips | 0 | 6615778 | <reponame>guymella/fips<filename>mod/tools/httpserver.py
"""
wrapper for node's http-server module, this is preferred over
python's SimpleHTTPServer module because it supports
HTTP range requests
"""
import subprocess
from mod import log,util
name = 'http-server'
platforms = ['osx', 'linux', 'win']
optional = True
not_found = "required for running emscripten targets (npm install http-server -g)"
#-------------------------------------------------------------------------------
def check_exists(fips_dir) :
try:
out = subprocess.check_output(['http-server', '-h'])
return True
except (OSError, subprocess.CalledProcessError):
return False
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, target_name, target_cwd):
if not check_exists(fips_dir):
log.error("http-server tool not found (npm install http-server -g)")
return
html_name = target_name + '.html'
if util.get_host_platform() == 'osx' :
try :
subprocess.call(
'open http://localhost:8080/{} ; http-server -c-1 -g'.format(html_name),
cwd = target_cwd, shell=True)
except KeyboardInterrupt :
return
elif util.get_host_platform() == 'win' :
try :
cmd = 'cmd /c start http://localhost:8080/{} && http-server -c-1 -g'.format(html_name)
subprocess.call(cmd, cwd = target_cwd, shell=True)
except KeyboardInterrupt :
return
elif util.get_host_platform() == 'linux' :
try :
subprocess.call(
'xdg-open http://localhost:8080/{}; http-server -c-1 -g'.format(html_name),
cwd = target_cwd, shell=True)
except KeyboardInterrupt :
return
else :
log.error("don't know how to start HTML app on this platform")
| """
wrapper for node's http-server module, this is preferred over
python's SimpleHTTPServer module because it supports
HTTP range requests
"""
import subprocess
from mod import log,util
name = 'http-server'
platforms = ['osx', 'linux', 'win']
optional = True
not_found = "required for running emscripten targets (npm install http-server -g)"
#-------------------------------------------------------------------------------
def check_exists(fips_dir) :
try:
out = subprocess.check_output(['http-server', '-h'])
return True
except (OSError, subprocess.CalledProcessError):
return False
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, target_name, target_cwd):
if not check_exists(fips_dir):
log.error("http-server tool not found (npm install http-server -g)")
return
html_name = target_name + '.html'
if util.get_host_platform() == 'osx' :
try :
subprocess.call(
'open http://localhost:8080/{} ; http-server -c-1 -g'.format(html_name),
cwd = target_cwd, shell=True)
except KeyboardInterrupt :
return
elif util.get_host_platform() == 'win' :
try :
cmd = 'cmd /c start http://localhost:8080/{} && http-server -c-1 -g'.format(html_name)
subprocess.call(cmd, cwd = target_cwd, shell=True)
except KeyboardInterrupt :
return
elif util.get_host_platform() == 'linux' :
try :
subprocess.call(
'xdg-open http://localhost:8080/{}; http-server -c-1 -g'.format(html_name),
cwd = target_cwd, shell=True)
except KeyboardInterrupt :
return
else :
log.error("don't know how to start HTML app on this platform") | en | 0.213146 | wrapper for node's http-server module, this is preferred over python's SimpleHTTPServer module because it supports HTTP range requests #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- | 2.418134 | 2 |
venv/lib/python3.6/site-packages/ansible_collections/sensu/sensu_go/tests/unit/plugins/modules/test_ad_auth_provider.py | usegalaxy-no/usegalaxy | 1 | 6615779 | <reponame>usegalaxy-no/usegalaxy
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import pytest
from ansible_collections.sensu.sensu_go.plugins.module_utils import (
errors,
utils,
)
from ansible_collections.sensu.sensu_go.plugins.modules import ad_auth_provider
from .common.utils import (
AnsibleExitJson,
AnsibleFailJson,
ModuleTestCase,
set_module_args,
)
pytestmark = pytest.mark.skipif(
sys.version_info < (2, 7), reason="requires python2.7 or higher"
)
class TestDoDiffer:
def test_no_changes(self):
desired = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(name="activedirectory"),
)
current = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(
name="activedirectory",
created_by="me",
),
)
assert ad_auth_provider.do_differ(current, desired) is False
def test_changes_are_detected(self):
desired = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
port=636,
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(name="activedirectory"),
)
current = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(
name="activedirectory",
created_by="me",
),
)
assert ad_auth_provider.do_differ(current, desired) is True
def test_changes_are_detected_diff_servers_len(self):
desired = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
),
dict(
host="127.0.0.2",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
),
],
),
metadata=dict(name="activedirectory"),
)
current = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(
name="activedirectory",
created_by="me",
),
)
assert ad_auth_provider.do_differ(current, desired) is True
def test_changes_are_other_params(self):
desired = dict(
spec=dict(
servers=[],
groups_prefix="ad",
username_prefix="ad",
),
metadata=dict(name="activedirectory"),
)
current = dict(
spec=dict(
servers=[],
),
metadata=dict(
name="activedirectory",
created_by="me",
),
)
assert ad_auth_provider.do_differ(current, desired) is True
class TestADAutProvider(ModuleTestCase):
def test_minimal_provider_parameters(self, mocker):
sync_v1_mock = mocker.patch.object(utils, "sync_v1")
sync_v1_mock.return_value = True, {}
set_module_args(
state="present",
name="activedirectory",
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
)
with pytest.raises(AnsibleExitJson):
ad_auth_provider.main()
state, _client, path, payload, check_mode, _do_differ = sync_v1_mock.call_args[
0
]
assert state == "present"
assert path == "/api/enterprise/authentication/v2/authproviders/activedirectory"
assert payload == dict(
type="ad",
api_version="authentication/v2",
metadata=dict(name="activedirectory"),
spec=dict(
servers=[
dict(
host="127.0.0.1",
port=None,
insecure=False,
security="tls",
trusted_ca_file=None,
client_cert_file=None,
client_key_file=None,
default_upn_domain=None,
include_nested_groups=None,
binding=None,
group_search=dict(
base_dn="dc=acme,dc=org",
attribute="member",
name_attribute="cn",
object_class="group",
),
user_search=dict(
base_dn="dc=acme,dc=org",
attribute="sAMAccountName",
name_attribute="displayName",
object_class="person",
),
)
]
),
)
assert check_mode is False
def test_all_provider_parameters(self, mocker):
sync_v1_mock = mocker.patch.object(utils, "sync_v1")
sync_v1_mock.return_value = True, {}
set_module_args(
state="present",
name="activedirectory",
servers=[
dict(
host="127.0.0.1",
port=636,
insecure=False,
security="tls",
trusted_ca_file="/path/to/trusted-certificate-authorities.pem",
client_cert_file="/path/to/ssl/cert.pem",
client_key_file="/path/to/ssl/key.pem",
default_upn_domain="example.org",
include_nested_groups=True,
binding=dict(
user_dn="cn=binder,dc=acme,dc=org",
password="<PASSWORD>",
),
group_search=dict(
base_dn="dc=acme,dc=org",
attribute="member",
name_attribute="cn",
object_class="group",
),
user_search=dict(
base_dn="dc=acme,dc=org",
attribute="sAMAccountName",
name_attribute="displayName",
object_class="person",
),
)
],
groups_prefix="ad",
username_prefix="ad",
)
with pytest.raises(AnsibleExitJson):
ad_auth_provider.main()
state, _client, path, payload, check_mode, _do_differ = sync_v1_mock.call_args[
0
]
assert state == "present"
assert path == "/api/enterprise/authentication/v2/authproviders/activedirectory"
assert payload == dict(
type="ad",
api_version="authentication/v2",
metadata=dict(name="activedirectory"),
spec=dict(
servers=[
dict(
host="127.0.0.1",
port=636,
insecure=False,
security="tls",
trusted_ca_file="/path/to/trusted-certificate-authorities.pem",
client_cert_file="/path/to/ssl/cert.pem",
client_key_file="/path/to/ssl/key.pem",
default_upn_domain="example.org",
include_nested_groups=True,
binding=dict(
user_dn="cn=binder,dc=acme,dc=org",
password="<PASSWORD>_PASSWORD",
),
group_search=dict(
base_dn="dc=acme,dc=org",
attribute="member",
name_attribute="cn",
object_class="group",
),
user_search=dict(
base_dn="dc=acme,dc=org",
attribute="sAMAccountName",
name_attribute="displayName",
object_class="person",
),
)
],
groups_prefix="ad",
username_prefix="ad",
),
)
assert check_mode is False
def test_failure(self, mocker):
sync_mock = mocker.patch.object(utils, "sync_v1")
sync_mock.side_effect = errors.Error("Bad error")
set_module_args()
with pytest.raises(AnsibleFailJson):
ad_auth_provider.main()
| from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import pytest
from ansible_collections.sensu.sensu_go.plugins.module_utils import (
errors,
utils,
)
from ansible_collections.sensu.sensu_go.plugins.modules import ad_auth_provider
from .common.utils import (
AnsibleExitJson,
AnsibleFailJson,
ModuleTestCase,
set_module_args,
)
pytestmark = pytest.mark.skipif(
sys.version_info < (2, 7), reason="requires python2.7 or higher"
)
class TestDoDiffer:
def test_no_changes(self):
desired = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(name="activedirectory"),
)
current = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(
name="activedirectory",
created_by="me",
),
)
assert ad_auth_provider.do_differ(current, desired) is False
def test_changes_are_detected(self):
desired = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
port=636,
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(name="activedirectory"),
)
current = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(
name="activedirectory",
created_by="me",
),
)
assert ad_auth_provider.do_differ(current, desired) is True
def test_changes_are_detected_diff_servers_len(self):
desired = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
),
dict(
host="127.0.0.2",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
),
],
),
metadata=dict(name="activedirectory"),
)
current = dict(
spec=dict(
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
),
metadata=dict(
name="activedirectory",
created_by="me",
),
)
assert ad_auth_provider.do_differ(current, desired) is True
def test_changes_are_other_params(self):
desired = dict(
spec=dict(
servers=[],
groups_prefix="ad",
username_prefix="ad",
),
metadata=dict(name="activedirectory"),
)
current = dict(
spec=dict(
servers=[],
),
metadata=dict(
name="activedirectory",
created_by="me",
),
)
assert ad_auth_provider.do_differ(current, desired) is True
class TestADAutProvider(ModuleTestCase):
def test_minimal_provider_parameters(self, mocker):
sync_v1_mock = mocker.patch.object(utils, "sync_v1")
sync_v1_mock.return_value = True, {}
set_module_args(
state="present",
name="activedirectory",
servers=[
dict(
host="127.0.0.1",
group_search=dict(
base_dn="dc=acme,dc=org",
),
user_search=dict(
base_dn="dc=acme,dc=org",
),
)
],
)
with pytest.raises(AnsibleExitJson):
ad_auth_provider.main()
state, _client, path, payload, check_mode, _do_differ = sync_v1_mock.call_args[
0
]
assert state == "present"
assert path == "/api/enterprise/authentication/v2/authproviders/activedirectory"
assert payload == dict(
type="ad",
api_version="authentication/v2",
metadata=dict(name="activedirectory"),
spec=dict(
servers=[
dict(
host="127.0.0.1",
port=None,
insecure=False,
security="tls",
trusted_ca_file=None,
client_cert_file=None,
client_key_file=None,
default_upn_domain=None,
include_nested_groups=None,
binding=None,
group_search=dict(
base_dn="dc=acme,dc=org",
attribute="member",
name_attribute="cn",
object_class="group",
),
user_search=dict(
base_dn="dc=acme,dc=org",
attribute="sAMAccountName",
name_attribute="displayName",
object_class="person",
),
)
]
),
)
assert check_mode is False
def test_all_provider_parameters(self, mocker):
sync_v1_mock = mocker.patch.object(utils, "sync_v1")
sync_v1_mock.return_value = True, {}
set_module_args(
state="present",
name="activedirectory",
servers=[
dict(
host="127.0.0.1",
port=636,
insecure=False,
security="tls",
trusted_ca_file="/path/to/trusted-certificate-authorities.pem",
client_cert_file="/path/to/ssl/cert.pem",
client_key_file="/path/to/ssl/key.pem",
default_upn_domain="example.org",
include_nested_groups=True,
binding=dict(
user_dn="cn=binder,dc=acme,dc=org",
password="<PASSWORD>",
),
group_search=dict(
base_dn="dc=acme,dc=org",
attribute="member",
name_attribute="cn",
object_class="group",
),
user_search=dict(
base_dn="dc=acme,dc=org",
attribute="sAMAccountName",
name_attribute="displayName",
object_class="person",
),
)
],
groups_prefix="ad",
username_prefix="ad",
)
with pytest.raises(AnsibleExitJson):
ad_auth_provider.main()
state, _client, path, payload, check_mode, _do_differ = sync_v1_mock.call_args[
0
]
assert state == "present"
assert path == "/api/enterprise/authentication/v2/authproviders/activedirectory"
assert payload == dict(
type="ad",
api_version="authentication/v2",
metadata=dict(name="activedirectory"),
spec=dict(
servers=[
dict(
host="127.0.0.1",
port=636,
insecure=False,
security="tls",
trusted_ca_file="/path/to/trusted-certificate-authorities.pem",
client_cert_file="/path/to/ssl/cert.pem",
client_key_file="/path/to/ssl/key.pem",
default_upn_domain="example.org",
include_nested_groups=True,
binding=dict(
user_dn="cn=binder,dc=acme,dc=org",
password="<PASSWORD>_PASSWORD",
),
group_search=dict(
base_dn="dc=acme,dc=org",
attribute="member",
name_attribute="cn",
object_class="group",
),
user_search=dict(
base_dn="dc=acme,dc=org",
attribute="sAMAccountName",
name_attribute="displayName",
object_class="person",
),
)
],
groups_prefix="ad",
username_prefix="ad",
),
)
assert check_mode is False
def test_failure(self, mocker):
sync_mock = mocker.patch.object(utils, "sync_v1")
sync_mock.side_effect = errors.Error("Bad error")
set_module_args()
with pytest.raises(AnsibleFailJson):
ad_auth_provider.main() | none | 1 | 1.906377 | 2 | |
test/integration/validations/validation.py | MxBromelia/SQL-Judge | 2 | 6615780 | # pylint: disable=missing-module-docstring
# pylint: disable=missing-function-docstring
from sql_judge import validates
def is_invalid(entity):
if not entity.valid:
return 'Invalid'
@validates('invalid_entity')
def validate_invalid_entity(_invalid_entity):
return None
def not_a_validation():
return None
@validates('table')
def val_table(table):
return is_invalid(table)
@validates('column')
def val_column(column):
return is_invalid(column)
| # pylint: disable=missing-module-docstring
# pylint: disable=missing-function-docstring
from sql_judge import validates
def is_invalid(entity):
if not entity.valid:
return 'Invalid'
@validates('invalid_entity')
def validate_invalid_entity(_invalid_entity):
return None
def not_a_validation():
return None
@validates('table')
def val_table(table):
return is_invalid(table)
@validates('column')
def val_column(column):
return is_invalid(column)
| en | 0.458821 | # pylint: disable=missing-module-docstring # pylint: disable=missing-function-docstring | 2.102674 | 2 |
website/urls.py | Deepanjalkumar/eframe | 1 | 6615781 | """hunterio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from website import views
urlpatterns = [
path('', views.index, name="home"),
path('domain_search', views.domain_search, name="domain_search"),
path('email_finder', views.email_finder, name="email_finder"),
path('email_verifier', views.email_verifier, name="email_verifier"),
path('resources', views.resources, name="resources"),
path('about_us', views.about_us, name="about_us"),
path('our_data', views.our_data, name="our_data"),
path('loginuser', views.loginuser, name="loginuser"),
path('logoutuser', views.logoutuser, name="logoutuser"),
path('signupuser', views.signupuser, name="signupuser"),
]
| """hunterio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from website import views
urlpatterns = [
path('', views.index, name="home"),
path('domain_search', views.domain_search, name="domain_search"),
path('email_finder', views.email_finder, name="email_finder"),
path('email_verifier', views.email_verifier, name="email_verifier"),
path('resources', views.resources, name="resources"),
path('about_us', views.about_us, name="about_us"),
path('our_data', views.our_data, name="our_data"),
path('loginuser', views.loginuser, name="loginuser"),
path('logoutuser', views.logoutuser, name="logoutuser"),
path('signupuser', views.signupuser, name="signupuser"),
]
| en | 0.553413 | hunterio URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | 2.818883 | 3 |
tests/ml/features_test.py | aliechoes/iflai | 0 | 6615782 |
from iflai.ml.features import *
def test_features():
pass |
from iflai.ml.features import *
def test_features():
pass | none | 1 | 0.737731 | 1 | |
src/experiments/evaluator.py | roberthoenig/VQ-VAE-Speech | 241 | 6615783 | #####################################################################################
# MIT License #
# #
# Copyright (C) 2019 <NAME> #
# #
# This file is part of VQ-VAE-Speech. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
from dataset.spectrogram_parser import SpectrogramParser
from dataset.vctk import VCTK
from error_handling.console_logger import ConsoleLogger
from evaluation.alignment_stats import AlignmentStats
from evaluation.embedding_space_stats import EmbeddingSpaceStats
import matplotlib.pyplot as plt
import torch.nn.functional as F
import os
import numpy as np
from textwrap import wrap
import seaborn as sns
import textgrid
from tqdm import tqdm
import pickle
class Evaluator(object):
def __init__(self, device, model, data_stream, configuration, results_path, experiment_name):
self._device = device
self._model = model
self._data_stream = data_stream
self._configuration = configuration
self._vctk = VCTK(self._configuration['data_root'], ratio=self._configuration['train_val_split'])
self._results_path = results_path
self._experiment_name = experiment_name
def evaluate(self, evaluation_options):
self._model.eval()
if evaluation_options['plot_comparaison_plot'] or \
evaluation_options['plot_quantized_embedding_spaces'] or \
evaluation_options['plot_distances_histogram']:
evaluation_entry = self._evaluate_once()
if evaluation_options['plot_comparaison_plot']:
self._compute_comparaison_plot(evaluation_entry)
if evaluation_options['plot_quantized_embedding_spaces']:
EmbeddingSpaceStats.compute_and_plot_quantized_embedding_space_projections(
self._results_path, self._experiment_name, evaluation_entry,
self._model.vq.embedding, self._data_stream.validation_batch_size
)
if evaluation_options['plot_distances_histogram']:
self._plot_distances_histogram(evaluation_entry)
#self._test_denormalization(evaluation_entry)
if evaluation_options['compute_many_to_one_mapping']:
self._many_to_one_mapping()
if evaluation_options['compute_alignments'] or \
evaluation_options['compute_clustering_metrics'] or \
evaluation_options['compute_groundtruth_average_phonemes_number']:
alignment_stats = AlignmentStats(
self._data_stream,
self._vctk,
self._configuration,
self._device,
self._model,
self._results_path,
self._experiment_name,
evaluation_options['alignment_subset']
)
if evaluation_options['compute_alignments']:
groundtruth_alignments_path = self._results_path + os.sep + \
'vctk_{}_groundtruth_alignments.pickle'.format(evaluation_options['alignment_subset'])
if not os.path.isfile(groundtruth_alignments_path):
alignment_stats.compute_groundtruth_alignments()
alignment_stats.compute_groundtruth_bigrams_matrix(wo_diag=True)
alignment_stats.compute_groundtruth_bigrams_matrix(wo_diag=False)
alignment_stats.compute_groundtruth_phonemes_frequency()
else:
ConsoleLogger.status('Groundtruth alignments already exist')
empirical_alignments_path = self._results_path + os.sep + self._experiment_name + \
'_vctk_{}_empirical_alignments.pickle'.format(evaluation_options['alignment_subset'])
if not os.path.isfile(empirical_alignments_path):
alignment_stats.compute_empirical_alignments()
alignment_stats.compute_empirical_bigrams_matrix(wo_diag=True)
alignment_stats.compute_empirical_bigrams_matrix(wo_diag=False)
alignment_stats.comupte_empirical_encodings_frequency()
else:
ConsoleLogger.status('Empirical alignments already exist')
if evaluation_options['compute_clustering_metrics']:
alignment_stats.compute_clustering_metrics()
if evaluation_options['compute_groundtruth_average_phonemes_number']:
alignment_stats.compute_groundtruth_average_phonemes_number()
def _evaluate_once(self):
self._model.eval()
data = next(iter(self._data_stream.validation_loader))
preprocessed_audio = data['preprocessed_audio'].to(self._device)
valid_originals = data['input_features'].to(self._device)
speaker_ids = data['speaker_id'].to(self._device)
target = data['output_features'].to(self._device)
wav_filename = data['wav_filename']
shifting_time = data['shifting_time'].to(self._device)
preprocessed_length = data['preprocessed_length'].to(self._device)
valid_originals = valid_originals.permute(0, 2, 1).contiguous().float()
batch_size = valid_originals.size(0)
target = target.permute(0, 2, 1).contiguous().float()
wav_filename = wav_filename[0][0]
z = self._model.encoder(valid_originals)
z = self._model.pre_vq_conv(z)
_, quantized, _, encodings, distances, encoding_indices, _, \
encoding_distances, embedding_distances, frames_vs_embedding_distances, \
concatenated_quantized = self._model.vq(z)
valid_reconstructions = self._model.decoder(quantized, self._data_stream.speaker_dic, speaker_ids)[0]
return {
'preprocessed_audio': preprocessed_audio,
'valid_originals': valid_originals,
'speaker_ids': speaker_ids,
'target': target,
'wav_filename': wav_filename,
'shifting_time': shifting_time,
'preprocessed_length': preprocessed_length,
'batch_size': batch_size,
'quantized': quantized,
'encodings': encodings,
'distances': distances,
'encoding_indices': encoding_indices,
'encoding_distances': encoding_distances,
'embedding_distances': embedding_distances,
'frames_vs_embedding_distances': frames_vs_embedding_distances,
'concatenated_quantized': concatenated_quantized,
'valid_reconstructions': valid_reconstructions
}
def _compute_comparaison_plot(self, evaluation_entry):
utterence_key = evaluation_entry['wav_filename'].split('/')[-1].replace('.wav', '')
utterence = self._vctk.utterences[utterence_key].replace('\n', '')
phonemes_alignment_path = os.sep.join(evaluation_entry['wav_filename'].split('/')[:-3]) \
+ os.sep + 'phonemes' + os.sep + utterence_key.split('_')[0] + os.sep \
+ utterence_key + '.TextGrid'
#tg = textgrid.TextGrid()
#tg.read(phonemes_alignment_path)
#for interval in tg.tiers[0]:
ConsoleLogger.status('Original utterence: {}'.format(utterence))
if self._configuration['verbose']:
ConsoleLogger.status('utterence: {}'.format(utterence))
spectrogram_parser = SpectrogramParser()
preprocessed_audio = evaluation_entry['preprocessed_audio'].detach().cpu()[0].numpy().squeeze()
spectrogram = spectrogram_parser.parse_audio(preprocessed_audio).contiguous()
spectrogram = spectrogram.detach().cpu().numpy()
valid_originals = evaluation_entry['valid_originals'].detach().cpu()[0].numpy()
probs = F.softmax(-evaluation_entry['distances'][0], dim=1).detach().cpu().transpose(0, 1).contiguous()
#target = self._target.detach().cpu()[0].numpy()
valid_reconstructions = evaluation_entry['valid_reconstructions'].detach().cpu().numpy()
fig, axs = plt.subplots(6, 1, figsize=(35, 30), sharex=True)
# Waveform of the original speech signal
axs[0].set_title('Waveform of the original speech signal')
axs[0].plot(np.arange(len(preprocessed_audio)) / float(self._configuration['sampling_rate']), preprocessed_audio)
# TODO: Add number of encoding indices at the same rate of the tokens with _compute_unified_time_scale()
"""
# Example of vertical red lines
xposition = [0.3, 0.4, 0.45]
for xc in xposition:
plt.axvline(x=xc, color='r', linestyle='-', linewidth=1)
"""
# Spectrogram of the original speech signal
axs[1].set_title('Spectrogram of the original speech signal')
self._plot_pcolormesh(spectrogram, fig, x=self._compute_unified_time_scale(spectrogram.shape[1]), axis=axs[1])
# MFCC + d + a of the original speech signal
axs[2].set_title('Augmented MFCC + d + a #filters=13+13+13 of the original speech signal')
self._plot_pcolormesh(valid_originals, fig, x=self._compute_unified_time_scale(valid_originals.shape[1]), axis=axs[2])
# Softmax of distances computed in VQ
axs[3].set_title('Softmax of distances computed in VQ\n($||z_e(x) - e_i||^2_2$ with $z_e(x)$ the output of the encoder prior to quantization)')
self._plot_pcolormesh(probs, fig, x=self._compute_unified_time_scale(probs.shape[1], downsampling_factor=2), axis=axs[3])
encodings = evaluation_entry['encodings'].detach().cpu().numpy()
axs[4].set_title('Encodings')
self._plot_pcolormesh(encodings[0].transpose(), fig, x=self._compute_unified_time_scale(encodings[0].transpose().shape[1],
downsampling_factor=2), axis=axs[4])
# Actual reconstruction
axs[5].set_title('Actual reconstruction')
self._plot_pcolormesh(valid_reconstructions, fig, x=self._compute_unified_time_scale(valid_reconstructions.shape[1]), axis=axs[5])
output_path = self._results_path + os.sep + self._experiment_name + '_evaluation-comparaison-plot.png'
plt.savefig(output_path, bbox_inches='tight', pad_inches=0)
plt.close()
def _plot_pcolormesh(self, data, fig, x=None, y=None, axis=None):
axis = plt.gca() if axis is None else axis # default axis if None
x = np.arange(data.shape[1]) if x is None else x # default x shape if None
y = np.arange(data.shape[0]) if y is None else y # default y shape if None
c = axis.pcolormesh(x, y, data)
fig.colorbar(c, ax=axis)
def _compute_unified_time_scale(self, shape, winstep=0.01, downsampling_factor=1):
return np.arange(shape) * winstep * downsampling_factor
def _plot_distances_histogram(self, evaluation_entry):
encoding_distances = evaluation_entry['encoding_distances'][0].detach().cpu().numpy()
embedding_distances = evaluation_entry['embedding_distances'].detach().cpu().numpy()
frames_vs_embedding_distances = evaluation_entry['frames_vs_embedding_distances'].detach()[0].cpu().transpose(0, 1).numpy().ravel()
if self._configuration['verbose']:
ConsoleLogger.status('encoding_distances[0].size(): {}'.format(encoding_distances.shape))
ConsoleLogger.status('embedding_distances.size(): {}'.format(embedding_distances.shape))
ConsoleLogger.status('frames_vs_embedding_distances[0].shape: {}'.format(frames_vs_embedding_distances.shape))
fig, axs = plt.subplots(3, 1, figsize=(30, 20), sharex=True)
axs[0].set_title('\n'.join(wrap('Histogram of the distances between the'
' encodings vectors', 60)))
sns.distplot(encoding_distances, hist=True, kde=False, ax=axs[0], norm_hist=True)
axs[1].set_title('\n'.join(wrap('Histogram of the distances between the'
' embeddings vectors', 60)))
sns.distplot(embedding_distances, hist=True, kde=False, ax=axs[1], norm_hist=True)
axs[2].set_title(
'Histogram of the distances computed in'
' VQ\n($||z_e(x) - e_i||^2_2$ with $z_e(x)$ the output of the encoder'
' prior to quantization)'
)
sns.distplot(frames_vs_embedding_distances, hist=True, kde=False, ax=axs[2], norm_hist=True)
output_path = self._results_path + os.sep + self._experiment_name + '_distances-histogram-plot.png'
fig.savefig(output_path, bbox_inches='tight', pad_inches=0)
plt.close(fig)
def _test_denormalization(self, evaluation_entry):
valid_originals = evaluation_entry['valid_originals'].detach().cpu()[0].numpy()
valid_reconstructions = evaluation_entry['valid_reconstructions'].detach().cpu().numpy()
normalizer = self._data_stream.normalizer
denormalized_valid_originals = (normalizer['train_std'] * valid_originals.transpose() + normalizer['train_mean']).transpose()
denormalized_valid_reconstructions = (normalizer['train_std'] * valid_reconstructions.transpose() + normalizer['train_mean']).transpose()
# TODO: Remove the deltas and the accelerations, remove the zeros because it's the
# energy, and compute the distance between the two
fig, axs = plt.subplots(4, 1, figsize=(30, 20), sharex=True)
# MFCC + d + a of the original speech signal
axs[0].set_title('Augmented MFCC + d + a #filters=13+13+13 of the original speech signal')
self._plot_pcolormesh(valid_originals, fig, x=self._compute_unified_time_scale(valid_originals.shape[1]), axis=axs[0])
# Actual reconstruction
axs[1].set_title('Actual reconstruction')
self._plot_pcolormesh(valid_reconstructions, fig, x=self._compute_unified_time_scale(valid_reconstructions.shape[1]), axis=axs[1])
# Denormalization of the original speech signal
axs[2].set_title('Denormalized target')
self._plot_pcolormesh(denormalized_valid_originals, fig, x=self._compute_unified_time_scale(denormalized_valid_originals.shape[1]), axis=axs[2])
# Denormalization of the original speech signal
axs[3].set_title('Denormalized reconstruction')
self._plot_pcolormesh(denormalized_valid_reconstructions, fig, x=self._compute_unified_time_scale(denormalized_valid_reconstructions.shape[1]), axis=axs[3])
output_path = self._results_path + os.sep + self._experiment_name + '_test-denormalization-plot.png'
plt.savefig(output_path, bbox_inches='tight', pad_inches=0)
plt.close()
def _many_to_one_mapping(self):
# TODO: fix it for batch size greater than one
tokens_selections = list()
val_speaker_ids = set()
with tqdm(self._data_stream.validation_loader) as bar:
for data in bar:
valid_originals = data['input_features'].to(self._device).permute(0, 2, 1).contiguous().float()
speaker_ids = data['speaker_id'].to(self._device)
shifting_times = data['shifting_time'].to(self._device)
wav_filenames = data['wav_filename']
speaker_id = wav_filenames[0][0].split(os.sep)[-2]
val_speaker_ids.add(speaker_id)
if speaker_id not in os.listdir(self._vctk.raw_folder + os.sep + 'VCTK-Corpus' + os.sep + 'phonemes'):
# TODO: log the missing folders
continue
z = self._model.encoder(valid_originals)
z = self._model.pre_vq_conv(z)
_, quantized, _, encodings, _, encoding_indices, _, \
_, _, _, _ = self._model.vq(z)
valid_reconstructions = self._model.decoder(quantized, self._data_stream.speaker_dic, speaker_ids)
B = valid_reconstructions.size(0)
encoding_indices = encoding_indices.view(B, -1, 1)
for i in range(len(valid_reconstructions)):
wav_filename = wav_filenames[0][i]
utterence_key = wav_filename.split('/')[-1].replace('.wav', '')
phonemes_alignment_path = os.sep.join(wav_filename.split('/')[:-3]) + os.sep + 'phonemes' + os.sep + utterence_key.split('_')[0] + os.sep \
+ utterence_key + '.TextGrid'
tg = textgrid.TextGrid()
tg.read(phonemes_alignment_path)
entry = {
'encoding_indices': encoding_indices[i].detach().cpu().numpy(),
'groundtruth': tg.tiers[1],
'shifting_time': shifting_times[i].detach().cpu().item()
}
tokens_selections.append(entry)
ConsoleLogger.status(val_speaker_ids)
ConsoleLogger.status('{} tokens selections retreived'.format(len(tokens_selections)))
phonemes_mapping = dict()
# For each tokens selections (i.e. the number of valuations)
for entry in tokens_selections:
encoding_indices = entry['encoding_indices']
unified_encoding_indices_time_scale = self._compute_unified_time_scale(
encoding_indices.shape[0], downsampling_factor=2) # Compute the time scale array for each token
"""
Search the grountruth phoneme where the selected token index time scale
is within the groundtruth interval.
Then, it adds the selected token index in the list of indices selected for
the a specific token in the tokens mapping dictionnary.
"""
for i in range(len(unified_encoding_indices_time_scale)):
index_time_scale = unified_encoding_indices_time_scale[i] + entry['shifting_time']
corresponding_phoneme = None
for interval in entry['groundtruth']:
# TODO: replace that by nearest interpolation
if index_time_scale >= interval.minTime and index_time_scale <= interval.maxTime:
corresponding_phoneme = interval.mark
break
if not corresponding_phoneme:
ConsoleLogger.warn("Corresponding phoneme not found. unified_encoding_indices_time_scale[{}]: {}"
"entry['shifting_time']: {} index_time_scale: {}".format(i, unified_encoding_indices_time_scale[i],
entry['shifting_time'], index_time_scale))
if corresponding_phoneme not in phonemes_mapping:
phonemes_mapping[corresponding_phoneme] = list()
phonemes_mapping[corresponding_phoneme].append(encoding_indices[i][0])
ConsoleLogger.status('phonemes_mapping: {}'.format(phonemes_mapping))
tokens_mapping = dict() # dictionnary that will contain the distribution for each token to fits with a certain phoneme
"""
Fill the tokens_mapping such that for each token index (key)
it contains the list of tuple of (phoneme, prob) where prob
is the probability that the token fits this phoneme.
"""
for phoneme, indices in phonemes_mapping.items():
for index in list(set(indices)):
if index not in tokens_mapping:
tokens_mapping[index] = list()
tokens_mapping[index].append((phoneme, indices.count(index) / len(indices)))
# Sort the probabilities for each token
for index, distribution in tokens_mapping.items():
tokens_mapping[index] = list(sorted(distribution, key = lambda x: x[1], reverse=True))
ConsoleLogger.status('tokens_mapping: {}'.format(tokens_mapping))
with open(self._results_path + os.sep + self._experiment_name + '_phonemes_mapping.pickle', 'wb') as f:
pickle.dump(phonemes_mapping, f)
with open(self._results_path + os.sep + self._experiment_name + '_tokens_mapping.pickle', 'wb') as f:
pickle.dump(tokens_mapping, f)
def _compute_speaker_dependency_stats(self):
"""
The goal of this function is to investiguate wether or not the supposed
phonemes stored in the embeddings space are speaker independents.
The algorithm is as follow:
- Evaluate the model using the val dataset. Save each resulting
embedding, with the corresponding speaker;
- Group the embeddings by speaker;
- Compute the distribution of each embedding;
- Compute all the distances between all possible distribution couples, using
a distribution distance (e.g. entropy) and plot them.
"""
all_speaker_ids = list()
all_embeddings = torch.tensor([]).to(self._device)
with tqdm(self._data_stream.validation_loader) as bar:
for data in bar:
valid_originals = data['input_features'].to(self._device).permute(0, 2, 1).contiguous().float()
speaker_ids = data['speaker_id'].to(self._device)
wav_filenames = data['wav_filename']
z = self._model.encoder(valid_originals)
z = self._model.pre_vq_conv(z)
_, quantized, _, _, _, _, _, \
_, _, _, _ = self._model.vq(z)
valid_reconstructions = self._model.decoder(quantized, self._data_stream.speaker_dic, speaker_ids)
B = valid_reconstructions.size(0)
all_speaker_ids.append(speaker_ids.detach().cpu().numpy().tolist())
#torch.cat(all_embeddings, self._model.vq.embedding.weight.data) # FIXME
# - Group the embeddings by speaker: create a tensor/numpy per speaker id from all_embeddings
# - Compute the distribution of each embedding (seaborn histogram, softmax)
# - Compute all the distances between all possible distribution couples, using
# a distribution distance (e.g. entropy) and plot them (seaborn histogram?)
# Snippet
#_embedding_distances = [torch.dist(items[0], items[1], 2).to(self._device) for items in combinations(self._embedding.weight, r=2)]
#embedding_distances = torch.tensor(_embedding_distances).to(self._device)
def _compute_entropy_distributions(self):
original_distribution = list()
quantized_distribution = list()
reconstruction_distribution = list()
with tqdm(self._data_stream.validation_loader) as bar:
for data in bar:
valid_originals = data['input_features'].to(self._device).permute(0, 2, 1).contiguous().float()
speaker_ids = data['speaker_id'].to(self._device)
original_probs = F.softmax(valid_originals[0], dim=0).detach().cpu()
original_entropy = -torch.sum(original_probs * torch.log(original_probs + 1e-10))
z = self._model.encoder(valid_originals)
z = self._model.pre_vq_conv(z)
_, quantized, _, _, _, _, _, \
_, _, _, _ = self._model.vq(z)
valid_reconstructions = self._model.decoder(quantized, self._data_stream.speaker_dic, speaker_ids)
quantized_probs = F.softmax(quantized[0], dim=1).detach().cpu()
quantized_entropy = -torch.sum(quantized_probs * torch.log(quantized_probs + 1e-10))
reconstruction_probs = F.softmax(valid_reconstructions[0], dim=0).detach().cpu()
reconstruction_entropy = -torch.sum(reconstruction_probs * torch.log(reconstruction_probs + 1e-10))
original_distribution.append(original_entropy.detach().cpu().numpy())
quantized_distribution.append(quantized_entropy.detach().cpu().numpy())
reconstruction_distribution.append(reconstruction_entropy.detach().cpu().numpy())
fig, axs = plt.subplots(3, 1, figsize=(30, 20), sharex=True)
original_distribution = np.asarray(original_distribution).ravel()
quantized_distribution = np.asarray(quantized_distribution).ravel()
reconstruction_distribution = np.asarray(reconstruction_distribution).ravel()
def dump_distribution(results_path, experiment_name, distribution_name, distribution):
with open(results_path + os.sep + experiment_name + '_' + distribution_name + '.pickle', 'wb') as f:
pickle.dump(distribution_name, f)
dump_distribution(self._results_path, self._experiment_name, 'original_distribution', original_distribution)
dump_distribution(self._results_path, self._experiment_name, 'quantized_distribution', quantized_distribution)
dump_distribution(self._results_path, self._experiment_name, 'reconstruction_distribution', reconstruction_distribution)
sns.distplot(original_distribution, hist=True, kde=False, ax=axs[0], norm_hist=True)
axs[0].set_title('Entropy distribution of validation dataset')
sns.distplot(quantized_distribution, hist=True, kde=False, ax=axs[1], norm_hist=True)
axs[1].set_title('Entropy distribution of quantized validation dataset')
sns.distplot(reconstruction_distribution, hist=True, kde=False, ax=axs[2], norm_hist=True)
axs[2].set_title('Entropy distribution of reconstructed validation dataset')
output_path = self._results_path + os.sep + self._experiment_name + '_entropy-stats-plot.png'
fig.savefig(output_path, bbox_inches='tight', pad_inches=0)
plt.close(fig)
| #####################################################################################
# MIT License #
# #
# Copyright (C) 2019 <NAME> #
# #
# This file is part of VQ-VAE-Speech. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
from dataset.spectrogram_parser import SpectrogramParser
from dataset.vctk import VCTK
from error_handling.console_logger import ConsoleLogger
from evaluation.alignment_stats import AlignmentStats
from evaluation.embedding_space_stats import EmbeddingSpaceStats
import matplotlib.pyplot as plt
import torch.nn.functional as F
import os
import numpy as np
from textwrap import wrap
import seaborn as sns
import textgrid
from tqdm import tqdm
import pickle
class Evaluator(object):
def __init__(self, device, model, data_stream, configuration, results_path, experiment_name):
self._device = device
self._model = model
self._data_stream = data_stream
self._configuration = configuration
self._vctk = VCTK(self._configuration['data_root'], ratio=self._configuration['train_val_split'])
self._results_path = results_path
self._experiment_name = experiment_name
def evaluate(self, evaluation_options):
self._model.eval()
if evaluation_options['plot_comparaison_plot'] or \
evaluation_options['plot_quantized_embedding_spaces'] or \
evaluation_options['plot_distances_histogram']:
evaluation_entry = self._evaluate_once()
if evaluation_options['plot_comparaison_plot']:
self._compute_comparaison_plot(evaluation_entry)
if evaluation_options['plot_quantized_embedding_spaces']:
EmbeddingSpaceStats.compute_and_plot_quantized_embedding_space_projections(
self._results_path, self._experiment_name, evaluation_entry,
self._model.vq.embedding, self._data_stream.validation_batch_size
)
if evaluation_options['plot_distances_histogram']:
self._plot_distances_histogram(evaluation_entry)
#self._test_denormalization(evaluation_entry)
if evaluation_options['compute_many_to_one_mapping']:
self._many_to_one_mapping()
if evaluation_options['compute_alignments'] or \
evaluation_options['compute_clustering_metrics'] or \
evaluation_options['compute_groundtruth_average_phonemes_number']:
alignment_stats = AlignmentStats(
self._data_stream,
self._vctk,
self._configuration,
self._device,
self._model,
self._results_path,
self._experiment_name,
evaluation_options['alignment_subset']
)
if evaluation_options['compute_alignments']:
groundtruth_alignments_path = self._results_path + os.sep + \
'vctk_{}_groundtruth_alignments.pickle'.format(evaluation_options['alignment_subset'])
if not os.path.isfile(groundtruth_alignments_path):
alignment_stats.compute_groundtruth_alignments()
alignment_stats.compute_groundtruth_bigrams_matrix(wo_diag=True)
alignment_stats.compute_groundtruth_bigrams_matrix(wo_diag=False)
alignment_stats.compute_groundtruth_phonemes_frequency()
else:
ConsoleLogger.status('Groundtruth alignments already exist')
empirical_alignments_path = self._results_path + os.sep + self._experiment_name + \
'_vctk_{}_empirical_alignments.pickle'.format(evaluation_options['alignment_subset'])
if not os.path.isfile(empirical_alignments_path):
alignment_stats.compute_empirical_alignments()
alignment_stats.compute_empirical_bigrams_matrix(wo_diag=True)
alignment_stats.compute_empirical_bigrams_matrix(wo_diag=False)
alignment_stats.comupte_empirical_encodings_frequency()
else:
ConsoleLogger.status('Empirical alignments already exist')
if evaluation_options['compute_clustering_metrics']:
alignment_stats.compute_clustering_metrics()
if evaluation_options['compute_groundtruth_average_phonemes_number']:
alignment_stats.compute_groundtruth_average_phonemes_number()
def _evaluate_once(self):
self._model.eval()
data = next(iter(self._data_stream.validation_loader))
preprocessed_audio = data['preprocessed_audio'].to(self._device)
valid_originals = data['input_features'].to(self._device)
speaker_ids = data['speaker_id'].to(self._device)
target = data['output_features'].to(self._device)
wav_filename = data['wav_filename']
shifting_time = data['shifting_time'].to(self._device)
preprocessed_length = data['preprocessed_length'].to(self._device)
valid_originals = valid_originals.permute(0, 2, 1).contiguous().float()
batch_size = valid_originals.size(0)
target = target.permute(0, 2, 1).contiguous().float()
wav_filename = wav_filename[0][0]
z = self._model.encoder(valid_originals)
z = self._model.pre_vq_conv(z)
_, quantized, _, encodings, distances, encoding_indices, _, \
encoding_distances, embedding_distances, frames_vs_embedding_distances, \
concatenated_quantized = self._model.vq(z)
valid_reconstructions = self._model.decoder(quantized, self._data_stream.speaker_dic, speaker_ids)[0]
return {
'preprocessed_audio': preprocessed_audio,
'valid_originals': valid_originals,
'speaker_ids': speaker_ids,
'target': target,
'wav_filename': wav_filename,
'shifting_time': shifting_time,
'preprocessed_length': preprocessed_length,
'batch_size': batch_size,
'quantized': quantized,
'encodings': encodings,
'distances': distances,
'encoding_indices': encoding_indices,
'encoding_distances': encoding_distances,
'embedding_distances': embedding_distances,
'frames_vs_embedding_distances': frames_vs_embedding_distances,
'concatenated_quantized': concatenated_quantized,
'valid_reconstructions': valid_reconstructions
}
def _compute_comparaison_plot(self, evaluation_entry):
utterence_key = evaluation_entry['wav_filename'].split('/')[-1].replace('.wav', '')
utterence = self._vctk.utterences[utterence_key].replace('\n', '')
phonemes_alignment_path = os.sep.join(evaluation_entry['wav_filename'].split('/')[:-3]) \
+ os.sep + 'phonemes' + os.sep + utterence_key.split('_')[0] + os.sep \
+ utterence_key + '.TextGrid'
#tg = textgrid.TextGrid()
#tg.read(phonemes_alignment_path)
#for interval in tg.tiers[0]:
ConsoleLogger.status('Original utterence: {}'.format(utterence))
if self._configuration['verbose']:
ConsoleLogger.status('utterence: {}'.format(utterence))
spectrogram_parser = SpectrogramParser()
preprocessed_audio = evaluation_entry['preprocessed_audio'].detach().cpu()[0].numpy().squeeze()
spectrogram = spectrogram_parser.parse_audio(preprocessed_audio).contiguous()
spectrogram = spectrogram.detach().cpu().numpy()
valid_originals = evaluation_entry['valid_originals'].detach().cpu()[0].numpy()
probs = F.softmax(-evaluation_entry['distances'][0], dim=1).detach().cpu().transpose(0, 1).contiguous()
#target = self._target.detach().cpu()[0].numpy()
valid_reconstructions = evaluation_entry['valid_reconstructions'].detach().cpu().numpy()
fig, axs = plt.subplots(6, 1, figsize=(35, 30), sharex=True)
# Waveform of the original speech signal
axs[0].set_title('Waveform of the original speech signal')
axs[0].plot(np.arange(len(preprocessed_audio)) / float(self._configuration['sampling_rate']), preprocessed_audio)
# TODO: Add number of encoding indices at the same rate of the tokens with _compute_unified_time_scale()
"""
# Example of vertical red lines
xposition = [0.3, 0.4, 0.45]
for xc in xposition:
plt.axvline(x=xc, color='r', linestyle='-', linewidth=1)
"""
# Spectrogram of the original speech signal
axs[1].set_title('Spectrogram of the original speech signal')
self._plot_pcolormesh(spectrogram, fig, x=self._compute_unified_time_scale(spectrogram.shape[1]), axis=axs[1])
# MFCC + d + a of the original speech signal
axs[2].set_title('Augmented MFCC + d + a #filters=13+13+13 of the original speech signal')
self._plot_pcolormesh(valid_originals, fig, x=self._compute_unified_time_scale(valid_originals.shape[1]), axis=axs[2])
# Softmax of distances computed in VQ
axs[3].set_title('Softmax of distances computed in VQ\n($||z_e(x) - e_i||^2_2$ with $z_e(x)$ the output of the encoder prior to quantization)')
self._plot_pcolormesh(probs, fig, x=self._compute_unified_time_scale(probs.shape[1], downsampling_factor=2), axis=axs[3])
encodings = evaluation_entry['encodings'].detach().cpu().numpy()
axs[4].set_title('Encodings')
self._plot_pcolormesh(encodings[0].transpose(), fig, x=self._compute_unified_time_scale(encodings[0].transpose().shape[1],
downsampling_factor=2), axis=axs[4])
# Actual reconstruction
axs[5].set_title('Actual reconstruction')
self._plot_pcolormesh(valid_reconstructions, fig, x=self._compute_unified_time_scale(valid_reconstructions.shape[1]), axis=axs[5])
output_path = self._results_path + os.sep + self._experiment_name + '_evaluation-comparaison-plot.png'
plt.savefig(output_path, bbox_inches='tight', pad_inches=0)
plt.close()
def _plot_pcolormesh(self, data, fig, x=None, y=None, axis=None):
axis = plt.gca() if axis is None else axis # default axis if None
x = np.arange(data.shape[1]) if x is None else x # default x shape if None
y = np.arange(data.shape[0]) if y is None else y # default y shape if None
c = axis.pcolormesh(x, y, data)
fig.colorbar(c, ax=axis)
def _compute_unified_time_scale(self, shape, winstep=0.01, downsampling_factor=1):
return np.arange(shape) * winstep * downsampling_factor
def _plot_distances_histogram(self, evaluation_entry):
encoding_distances = evaluation_entry['encoding_distances'][0].detach().cpu().numpy()
embedding_distances = evaluation_entry['embedding_distances'].detach().cpu().numpy()
frames_vs_embedding_distances = evaluation_entry['frames_vs_embedding_distances'].detach()[0].cpu().transpose(0, 1).numpy().ravel()
if self._configuration['verbose']:
ConsoleLogger.status('encoding_distances[0].size(): {}'.format(encoding_distances.shape))
ConsoleLogger.status('embedding_distances.size(): {}'.format(embedding_distances.shape))
ConsoleLogger.status('frames_vs_embedding_distances[0].shape: {}'.format(frames_vs_embedding_distances.shape))
fig, axs = plt.subplots(3, 1, figsize=(30, 20), sharex=True)
axs[0].set_title('\n'.join(wrap('Histogram of the distances between the'
' encodings vectors', 60)))
sns.distplot(encoding_distances, hist=True, kde=False, ax=axs[0], norm_hist=True)
axs[1].set_title('\n'.join(wrap('Histogram of the distances between the'
' embeddings vectors', 60)))
sns.distplot(embedding_distances, hist=True, kde=False, ax=axs[1], norm_hist=True)
axs[2].set_title(
'Histogram of the distances computed in'
' VQ\n($||z_e(x) - e_i||^2_2$ with $z_e(x)$ the output of the encoder'
' prior to quantization)'
)
sns.distplot(frames_vs_embedding_distances, hist=True, kde=False, ax=axs[2], norm_hist=True)
output_path = self._results_path + os.sep + self._experiment_name + '_distances-histogram-plot.png'
fig.savefig(output_path, bbox_inches='tight', pad_inches=0)
plt.close(fig)
def _test_denormalization(self, evaluation_entry):
valid_originals = evaluation_entry['valid_originals'].detach().cpu()[0].numpy()
valid_reconstructions = evaluation_entry['valid_reconstructions'].detach().cpu().numpy()
normalizer = self._data_stream.normalizer
denormalized_valid_originals = (normalizer['train_std'] * valid_originals.transpose() + normalizer['train_mean']).transpose()
denormalized_valid_reconstructions = (normalizer['train_std'] * valid_reconstructions.transpose() + normalizer['train_mean']).transpose()
# TODO: Remove the deltas and the accelerations, remove the zeros because it's the
# energy, and compute the distance between the two
fig, axs = plt.subplots(4, 1, figsize=(30, 20), sharex=True)
# MFCC + d + a of the original speech signal
axs[0].set_title('Augmented MFCC + d + a #filters=13+13+13 of the original speech signal')
self._plot_pcolormesh(valid_originals, fig, x=self._compute_unified_time_scale(valid_originals.shape[1]), axis=axs[0])
# Actual reconstruction
axs[1].set_title('Actual reconstruction')
self._plot_pcolormesh(valid_reconstructions, fig, x=self._compute_unified_time_scale(valid_reconstructions.shape[1]), axis=axs[1])
# Denormalization of the original speech signal
axs[2].set_title('Denormalized target')
self._plot_pcolormesh(denormalized_valid_originals, fig, x=self._compute_unified_time_scale(denormalized_valid_originals.shape[1]), axis=axs[2])
# Denormalization of the original speech signal
axs[3].set_title('Denormalized reconstruction')
self._plot_pcolormesh(denormalized_valid_reconstructions, fig, x=self._compute_unified_time_scale(denormalized_valid_reconstructions.shape[1]), axis=axs[3])
output_path = self._results_path + os.sep + self._experiment_name + '_test-denormalization-plot.png'
plt.savefig(output_path, bbox_inches='tight', pad_inches=0)
plt.close()
def _many_to_one_mapping(self):
# TODO: fix it for batch size greater than one
tokens_selections = list()
val_speaker_ids = set()
with tqdm(self._data_stream.validation_loader) as bar:
for data in bar:
valid_originals = data['input_features'].to(self._device).permute(0, 2, 1).contiguous().float()
speaker_ids = data['speaker_id'].to(self._device)
shifting_times = data['shifting_time'].to(self._device)
wav_filenames = data['wav_filename']
speaker_id = wav_filenames[0][0].split(os.sep)[-2]
val_speaker_ids.add(speaker_id)
if speaker_id not in os.listdir(self._vctk.raw_folder + os.sep + 'VCTK-Corpus' + os.sep + 'phonemes'):
# TODO: log the missing folders
continue
z = self._model.encoder(valid_originals)
z = self._model.pre_vq_conv(z)
_, quantized, _, encodings, _, encoding_indices, _, \
_, _, _, _ = self._model.vq(z)
valid_reconstructions = self._model.decoder(quantized, self._data_stream.speaker_dic, speaker_ids)
B = valid_reconstructions.size(0)
encoding_indices = encoding_indices.view(B, -1, 1)
for i in range(len(valid_reconstructions)):
wav_filename = wav_filenames[0][i]
utterence_key = wav_filename.split('/')[-1].replace('.wav', '')
phonemes_alignment_path = os.sep.join(wav_filename.split('/')[:-3]) + os.sep + 'phonemes' + os.sep + utterence_key.split('_')[0] + os.sep \
+ utterence_key + '.TextGrid'
tg = textgrid.TextGrid()
tg.read(phonemes_alignment_path)
entry = {
'encoding_indices': encoding_indices[i].detach().cpu().numpy(),
'groundtruth': tg.tiers[1],
'shifting_time': shifting_times[i].detach().cpu().item()
}
tokens_selections.append(entry)
ConsoleLogger.status(val_speaker_ids)
ConsoleLogger.status('{} tokens selections retreived'.format(len(tokens_selections)))
phonemes_mapping = dict()
# For each tokens selections (i.e. the number of valuations)
for entry in tokens_selections:
encoding_indices = entry['encoding_indices']
unified_encoding_indices_time_scale = self._compute_unified_time_scale(
encoding_indices.shape[0], downsampling_factor=2) # Compute the time scale array for each token
"""
Search the grountruth phoneme where the selected token index time scale
is within the groundtruth interval.
Then, it adds the selected token index in the list of indices selected for
the a specific token in the tokens mapping dictionnary.
"""
for i in range(len(unified_encoding_indices_time_scale)):
index_time_scale = unified_encoding_indices_time_scale[i] + entry['shifting_time']
corresponding_phoneme = None
for interval in entry['groundtruth']:
# TODO: replace that by nearest interpolation
if index_time_scale >= interval.minTime and index_time_scale <= interval.maxTime:
corresponding_phoneme = interval.mark
break
if not corresponding_phoneme:
ConsoleLogger.warn("Corresponding phoneme not found. unified_encoding_indices_time_scale[{}]: {}"
"entry['shifting_time']: {} index_time_scale: {}".format(i, unified_encoding_indices_time_scale[i],
entry['shifting_time'], index_time_scale))
if corresponding_phoneme not in phonemes_mapping:
phonemes_mapping[corresponding_phoneme] = list()
phonemes_mapping[corresponding_phoneme].append(encoding_indices[i][0])
ConsoleLogger.status('phonemes_mapping: {}'.format(phonemes_mapping))
tokens_mapping = dict() # dictionnary that will contain the distribution for each token to fits with a certain phoneme
"""
Fill the tokens_mapping such that for each token index (key)
it contains the list of tuple of (phoneme, prob) where prob
is the probability that the token fits this phoneme.
"""
for phoneme, indices in phonemes_mapping.items():
for index in list(set(indices)):
if index not in tokens_mapping:
tokens_mapping[index] = list()
tokens_mapping[index].append((phoneme, indices.count(index) / len(indices)))
# Sort the probabilities for each token
for index, distribution in tokens_mapping.items():
tokens_mapping[index] = list(sorted(distribution, key = lambda x: x[1], reverse=True))
ConsoleLogger.status('tokens_mapping: {}'.format(tokens_mapping))
with open(self._results_path + os.sep + self._experiment_name + '_phonemes_mapping.pickle', 'wb') as f:
pickle.dump(phonemes_mapping, f)
with open(self._results_path + os.sep + self._experiment_name + '_tokens_mapping.pickle', 'wb') as f:
pickle.dump(tokens_mapping, f)
def _compute_speaker_dependency_stats(self):
"""
The goal of this function is to investiguate wether or not the supposed
phonemes stored in the embeddings space are speaker independents.
The algorithm is as follow:
- Evaluate the model using the val dataset. Save each resulting
embedding, with the corresponding speaker;
- Group the embeddings by speaker;
- Compute the distribution of each embedding;
- Compute all the distances between all possible distribution couples, using
a distribution distance (e.g. entropy) and plot them.
"""
all_speaker_ids = list()
all_embeddings = torch.tensor([]).to(self._device)
with tqdm(self._data_stream.validation_loader) as bar:
for data in bar:
valid_originals = data['input_features'].to(self._device).permute(0, 2, 1).contiguous().float()
speaker_ids = data['speaker_id'].to(self._device)
wav_filenames = data['wav_filename']
z = self._model.encoder(valid_originals)
z = self._model.pre_vq_conv(z)
_, quantized, _, _, _, _, _, \
_, _, _, _ = self._model.vq(z)
valid_reconstructions = self._model.decoder(quantized, self._data_stream.speaker_dic, speaker_ids)
B = valid_reconstructions.size(0)
all_speaker_ids.append(speaker_ids.detach().cpu().numpy().tolist())
#torch.cat(all_embeddings, self._model.vq.embedding.weight.data) # FIXME
# - Group the embeddings by speaker: create a tensor/numpy per speaker id from all_embeddings
# - Compute the distribution of each embedding (seaborn histogram, softmax)
# - Compute all the distances between all possible distribution couples, using
# a distribution distance (e.g. entropy) and plot them (seaborn histogram?)
# Snippet
#_embedding_distances = [torch.dist(items[0], items[1], 2).to(self._device) for items in combinations(self._embedding.weight, r=2)]
#embedding_distances = torch.tensor(_embedding_distances).to(self._device)
def _compute_entropy_distributions(self):
original_distribution = list()
quantized_distribution = list()
reconstruction_distribution = list()
with tqdm(self._data_stream.validation_loader) as bar:
for data in bar:
valid_originals = data['input_features'].to(self._device).permute(0, 2, 1).contiguous().float()
speaker_ids = data['speaker_id'].to(self._device)
original_probs = F.softmax(valid_originals[0], dim=0).detach().cpu()
original_entropy = -torch.sum(original_probs * torch.log(original_probs + 1e-10))
z = self._model.encoder(valid_originals)
z = self._model.pre_vq_conv(z)
_, quantized, _, _, _, _, _, \
_, _, _, _ = self._model.vq(z)
valid_reconstructions = self._model.decoder(quantized, self._data_stream.speaker_dic, speaker_ids)
quantized_probs = F.softmax(quantized[0], dim=1).detach().cpu()
quantized_entropy = -torch.sum(quantized_probs * torch.log(quantized_probs + 1e-10))
reconstruction_probs = F.softmax(valid_reconstructions[0], dim=0).detach().cpu()
reconstruction_entropy = -torch.sum(reconstruction_probs * torch.log(reconstruction_probs + 1e-10))
original_distribution.append(original_entropy.detach().cpu().numpy())
quantized_distribution.append(quantized_entropy.detach().cpu().numpy())
reconstruction_distribution.append(reconstruction_entropy.detach().cpu().numpy())
fig, axs = plt.subplots(3, 1, figsize=(30, 20), sharex=True)
original_distribution = np.asarray(original_distribution).ravel()
quantized_distribution = np.asarray(quantized_distribution).ravel()
reconstruction_distribution = np.asarray(reconstruction_distribution).ravel()
def dump_distribution(results_path, experiment_name, distribution_name, distribution):
with open(results_path + os.sep + experiment_name + '_' + distribution_name + '.pickle', 'wb') as f:
pickle.dump(distribution_name, f)
dump_distribution(self._results_path, self._experiment_name, 'original_distribution', original_distribution)
dump_distribution(self._results_path, self._experiment_name, 'quantized_distribution', quantized_distribution)
dump_distribution(self._results_path, self._experiment_name, 'reconstruction_distribution', reconstruction_distribution)
sns.distplot(original_distribution, hist=True, kde=False, ax=axs[0], norm_hist=True)
axs[0].set_title('Entropy distribution of validation dataset')
sns.distplot(quantized_distribution, hist=True, kde=False, ax=axs[1], norm_hist=True)
axs[1].set_title('Entropy distribution of quantized validation dataset')
sns.distplot(reconstruction_distribution, hist=True, kde=False, ax=axs[2], norm_hist=True)
axs[2].set_title('Entropy distribution of reconstructed validation dataset')
output_path = self._results_path + os.sep + self._experiment_name + '_entropy-stats-plot.png'
fig.savefig(output_path, bbox_inches='tight', pad_inches=0)
plt.close(fig)
| en | 0.696 | ##################################################################################### # MIT License # # # # Copyright (C) 2019 <NAME> # # # # This file is part of VQ-VAE-Speech. # # # # Permission is hereby granted, free of charge, to any person obtaining a copy # # of this software and associated documentation files (the "Software"), to deal # # in the Software without restriction, including without limitation the rights # # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # # copies of the Software, and to permit persons to whom the Software is # # furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in all # # copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # # SOFTWARE. # ##################################################################################### #self._test_denormalization(evaluation_entry) #tg = textgrid.TextGrid() #tg.read(phonemes_alignment_path) #for interval in tg.tiers[0]: #target = self._target.detach().cpu()[0].numpy() # Waveform of the original speech signal # TODO: Add number of encoding indices at the same rate of the tokens with _compute_unified_time_scale() # Example of vertical red lines xposition = [0.3, 0.4, 0.45] for xc in xposition: plt.axvline(x=xc, color='r', linestyle='-', linewidth=1) # Spectrogram of the original speech signal # MFCC + d + a of the original speech signal #filters=13+13+13 of the original speech signal') # Softmax of distances computed in VQ # Actual reconstruction # default axis if None # default x shape if None # default y shape if None # TODO: Remove the deltas and the accelerations, remove the zeros because it's the # energy, and compute the distance between the two # MFCC + d + a of the original speech signal #filters=13+13+13 of the original speech signal') # Actual reconstruction # Denormalization of the original speech signal # Denormalization of the original speech signal # TODO: fix it for batch size greater than one # TODO: log the missing folders # For each tokens selections (i.e. the number of valuations) # Compute the time scale array for each token Search the grountruth phoneme where the selected token index time scale is within the groundtruth interval. Then, it adds the selected token index in the list of indices selected for the a specific token in the tokens mapping dictionnary. # TODO: replace that by nearest interpolation # dictionnary that will contain the distribution for each token to fits with a certain phoneme Fill the tokens_mapping such that for each token index (key) it contains the list of tuple of (phoneme, prob) where prob is the probability that the token fits this phoneme. # Sort the probabilities for each token The goal of this function is to investiguate wether or not the supposed phonemes stored in the embeddings space are speaker independents. The algorithm is as follow: - Evaluate the model using the val dataset. Save each resulting embedding, with the corresponding speaker; - Group the embeddings by speaker; - Compute the distribution of each embedding; - Compute all the distances between all possible distribution couples, using a distribution distance (e.g. entropy) and plot them. #torch.cat(all_embeddings, self._model.vq.embedding.weight.data) # FIXME # - Group the embeddings by speaker: create a tensor/numpy per speaker id from all_embeddings # - Compute the distribution of each embedding (seaborn histogram, softmax) # - Compute all the distances between all possible distribution couples, using # a distribution distance (e.g. entropy) and plot them (seaborn histogram?) # Snippet #_embedding_distances = [torch.dist(items[0], items[1], 2).to(self._device) for items in combinations(self._embedding.weight, r=2)] #embedding_distances = torch.tensor(_embedding_distances).to(self._device) | 1.282597 | 1 |
api/data_refinery_api/test/test_stats.py | AlexsLemonade/refinebio | 106 | 6615784 | <reponame>AlexsLemonade/refinebio
from unittest.mock import patch
from django.test import override_settings
from django.urls import reverse
from rest_framework.test import APITestCase
from data_refinery_api.test.test_api_general import API_VERSION
from data_refinery_api.views.stats import get_batch_jobs_breakdown
QUEUE_NAMES = [
"data-refinery-batch-compendia-queue-tests-dev",
"data-refinery-batch-smasher-queue-tests-dev",
"data-refinery-batch-workers-queue-tests-dev-0",
]
def dummy_get_jobs_in_queue(queue):
if queue not in QUEUE_NAMES:
raise ValueError(f"Tried to get jobs for unrecognzied job queue {queue}")
return {
# The queues are defined at the bottom of the file because they're pretty long
"data-refinery-batch-compendia-queue-tests-dev": COMPENDIA_QUEUE,
"data-refinery-batch-smasher-queue-tests-dev": SMASHER_QUEUE,
"data-refinery-batch-workers-queue-tests-dev-0": WORKER_QUEUE,
}[queue]
class StatsTestCases(APITestCase):
def test_stats_empty(self):
response = self.client.get(reverse("stats", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, 200)
def test_stats(self):
response = self.client.get(reverse("stats", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, 200)
@patch("data_refinery_api.views.stats.get_jobs_in_queue")
@override_settings(AWS_BATCH_QUEUE_ALL_NAMES=QUEUE_NAMES)
def test_stats_get_batch_breakdown(self, mock_get_jobs_in_queue):
"""Make sure that the batch breakdown has the right stats"""
mock_get_jobs_in_queue.side_effect = dummy_get_jobs_in_queue
breakdown = get_batch_jobs_breakdown(force=True)
self.assertEqual(
set(breakdown.keys()),
{
"pending_jobs",
"running_jobs",
"pending_jobs_by_type",
"running_jobs_by_type",
"pending_jobs_by_queue",
"running_jobs_by_queue",
},
)
self.assertEqual(
breakdown["pending_jobs"],
PENDING_COMPENDIA_JOBS
+ PENDING_SMASHER_JOBS
+ PENDING_DOWNLOADER_JOBS
+ PENDING_SALMON_JOBS
+ PENDING_AFFY_JOBS,
)
self.assertEqual(
breakdown["running_jobs"],
RUNNING_COMPENDIA_JOBS
+ RUNNING_SMASHER_JOBS
+ RUNNING_DOWNLOADER_JOBS
+ RUNNING_SALMON_JOBS
+ RUNNING_AFFY_JOBS,
)
self.assertEqual(
breakdown["pending_jobs_by_type"],
{
"CREATE_COMPENDIA": PENDING_COMPENDIA_JOBS,
"SMASHER": PENDING_SMASHER_JOBS,
"DOWNLOADER": PENDING_DOWNLOADER_JOBS,
"SALMON": PENDING_SALMON_JOBS,
"AFFY_TO_PCL": PENDING_AFFY_JOBS,
},
)
self.assertEqual(
breakdown["running_jobs_by_type"],
{
"CREATE_COMPENDIA": RUNNING_COMPENDIA_JOBS,
"SMASHER": RUNNING_SMASHER_JOBS,
"DOWNLOADER": RUNNING_DOWNLOADER_JOBS,
"SALMON": RUNNING_SALMON_JOBS,
"AFFY_TO_PCL": RUNNING_AFFY_JOBS,
},
)
self.assertEqual(
breakdown["pending_jobs_by_queue"],
{
"data-refinery-batch-compendia-queue-tests-dev": PENDING_COMPENDIA_JOBS,
"data-refinery-batch-smasher-queue-tests-dev": PENDING_SMASHER_JOBS,
"data-refinery-batch-workers-queue-tests-dev-0": PENDING_DOWNLOADER_JOBS
+ PENDING_SALMON_JOBS
+ PENDING_AFFY_JOBS,
},
)
self.assertEqual(
breakdown["running_jobs_by_queue"],
{
"data-refinery-batch-compendia-queue-tests-dev": RUNNING_COMPENDIA_JOBS,
"data-refinery-batch-smasher-queue-tests-dev": RUNNING_SMASHER_JOBS,
"data-refinery-batch-workers-queue-tests-dev-0": RUNNING_DOWNLOADER_JOBS
+ RUNNING_SALMON_JOBS
+ RUNNING_AFFY_JOBS,
},
)
PENDING_COMPENDIA_JOBS = 10
RUNNING_COMPENDIA_JOBS = 1
COMPENDIA_QUEUE = [
*[
{"jobName": f"tests_dev_CREATE_COMPENDIA_{i}", "status": "PENDING"}
for i in range(PENDING_COMPENDIA_JOBS)
],
*[
{"jobName": f"tests_dev_CREATE_COMPENDIA_{i}", "status": "RUNNING"}
for i in range(RUNNING_COMPENDIA_JOBS)
],
]
PENDING_SMASHER_JOBS = 27
RUNNING_SMASHER_JOBS = 5
# Create some finished jobs that should get ignored
FINISHED_SMASHER_JOBS = 10
SMASHER_QUEUE = [
*[
{"jobName": f"tests_dev_SMASHER_{i}", "status": "RUNNABLE"}
for i in range(PENDING_SMASHER_JOBS)
],
*[
{"jobName": f"tests_dev_SMASHER_{i}", "status": "RUNNING"}
for i in range(RUNNING_SMASHER_JOBS)
],
*[
{"jobName": f"tests_dev_SMASHER_{i}", "status": "SUCCEEDED"}
for i in range(FINISHED_SMASHER_JOBS)
],
]
PENDING_DOWNLOADER_JOBS = 14
RUNNING_DOWNLOADER_JOBS = 10
PENDING_SALMON_JOBS = 2
RUNNING_SALMON_JOBS = 8
PENDING_AFFY_JOBS = 9
RUNNING_AFFY_JOBS = 1
WORKER_QUEUE = [
*[
{"jobName": f"tests_dev_DOWNLOADER_1024_{i}", "status": "STARTING"}
for i in range(PENDING_DOWNLOADER_JOBS)
],
*[
{"jobName": f"tests_dev_DOWNLOADER_1024_{i}", "status": "RUNNING"}
for i in range(RUNNING_DOWNLOADER_JOBS)
],
*[
{"jobName": f"tests_dev_SALMON_1024_{i}", "status": "SUBMITTED"}
for i in range(PENDING_SALMON_JOBS)
],
*[
{"jobName": f"tests_dev_SALMON_1024_{i}", "status": "RUNNING"}
for i in range(RUNNING_SALMON_JOBS)
],
*[
{"jobName": f"tests_dev_AFFY_TO_PCL_1024_{i}", "status": "PENDING"}
for i in range(PENDING_AFFY_JOBS)
],
*[
{"jobName": f"tests_dev_AFFY_TO_PCL_1024_{i}", "status": "RUNNING"}
for i in range(RUNNING_AFFY_JOBS)
],
]
| from unittest.mock import patch
from django.test import override_settings
from django.urls import reverse
from rest_framework.test import APITestCase
from data_refinery_api.test.test_api_general import API_VERSION
from data_refinery_api.views.stats import get_batch_jobs_breakdown
QUEUE_NAMES = [
"data-refinery-batch-compendia-queue-tests-dev",
"data-refinery-batch-smasher-queue-tests-dev",
"data-refinery-batch-workers-queue-tests-dev-0",
]
def dummy_get_jobs_in_queue(queue):
if queue not in QUEUE_NAMES:
raise ValueError(f"Tried to get jobs for unrecognzied job queue {queue}")
return {
# The queues are defined at the bottom of the file because they're pretty long
"data-refinery-batch-compendia-queue-tests-dev": COMPENDIA_QUEUE,
"data-refinery-batch-smasher-queue-tests-dev": SMASHER_QUEUE,
"data-refinery-batch-workers-queue-tests-dev-0": WORKER_QUEUE,
}[queue]
class StatsTestCases(APITestCase):
def test_stats_empty(self):
response = self.client.get(reverse("stats", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, 200)
def test_stats(self):
response = self.client.get(reverse("stats", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, 200)
@patch("data_refinery_api.views.stats.get_jobs_in_queue")
@override_settings(AWS_BATCH_QUEUE_ALL_NAMES=QUEUE_NAMES)
def test_stats_get_batch_breakdown(self, mock_get_jobs_in_queue):
"""Make sure that the batch breakdown has the right stats"""
mock_get_jobs_in_queue.side_effect = dummy_get_jobs_in_queue
breakdown = get_batch_jobs_breakdown(force=True)
self.assertEqual(
set(breakdown.keys()),
{
"pending_jobs",
"running_jobs",
"pending_jobs_by_type",
"running_jobs_by_type",
"pending_jobs_by_queue",
"running_jobs_by_queue",
},
)
self.assertEqual(
breakdown["pending_jobs"],
PENDING_COMPENDIA_JOBS
+ PENDING_SMASHER_JOBS
+ PENDING_DOWNLOADER_JOBS
+ PENDING_SALMON_JOBS
+ PENDING_AFFY_JOBS,
)
self.assertEqual(
breakdown["running_jobs"],
RUNNING_COMPENDIA_JOBS
+ RUNNING_SMASHER_JOBS
+ RUNNING_DOWNLOADER_JOBS
+ RUNNING_SALMON_JOBS
+ RUNNING_AFFY_JOBS,
)
self.assertEqual(
breakdown["pending_jobs_by_type"],
{
"CREATE_COMPENDIA": PENDING_COMPENDIA_JOBS,
"SMASHER": PENDING_SMASHER_JOBS,
"DOWNLOADER": PENDING_DOWNLOADER_JOBS,
"SALMON": PENDING_SALMON_JOBS,
"AFFY_TO_PCL": PENDING_AFFY_JOBS,
},
)
self.assertEqual(
breakdown["running_jobs_by_type"],
{
"CREATE_COMPENDIA": RUNNING_COMPENDIA_JOBS,
"SMASHER": RUNNING_SMASHER_JOBS,
"DOWNLOADER": RUNNING_DOWNLOADER_JOBS,
"SALMON": RUNNING_SALMON_JOBS,
"AFFY_TO_PCL": RUNNING_AFFY_JOBS,
},
)
self.assertEqual(
breakdown["pending_jobs_by_queue"],
{
"data-refinery-batch-compendia-queue-tests-dev": PENDING_COMPENDIA_JOBS,
"data-refinery-batch-smasher-queue-tests-dev": PENDING_SMASHER_JOBS,
"data-refinery-batch-workers-queue-tests-dev-0": PENDING_DOWNLOADER_JOBS
+ PENDING_SALMON_JOBS
+ PENDING_AFFY_JOBS,
},
)
self.assertEqual(
breakdown["running_jobs_by_queue"],
{
"data-refinery-batch-compendia-queue-tests-dev": RUNNING_COMPENDIA_JOBS,
"data-refinery-batch-smasher-queue-tests-dev": RUNNING_SMASHER_JOBS,
"data-refinery-batch-workers-queue-tests-dev-0": RUNNING_DOWNLOADER_JOBS
+ RUNNING_SALMON_JOBS
+ RUNNING_AFFY_JOBS,
},
)
PENDING_COMPENDIA_JOBS = 10
RUNNING_COMPENDIA_JOBS = 1
COMPENDIA_QUEUE = [
*[
{"jobName": f"tests_dev_CREATE_COMPENDIA_{i}", "status": "PENDING"}
for i in range(PENDING_COMPENDIA_JOBS)
],
*[
{"jobName": f"tests_dev_CREATE_COMPENDIA_{i}", "status": "RUNNING"}
for i in range(RUNNING_COMPENDIA_JOBS)
],
]
PENDING_SMASHER_JOBS = 27
RUNNING_SMASHER_JOBS = 5
# Create some finished jobs that should get ignored
FINISHED_SMASHER_JOBS = 10
SMASHER_QUEUE = [
*[
{"jobName": f"tests_dev_SMASHER_{i}", "status": "RUNNABLE"}
for i in range(PENDING_SMASHER_JOBS)
],
*[
{"jobName": f"tests_dev_SMASHER_{i}", "status": "RUNNING"}
for i in range(RUNNING_SMASHER_JOBS)
],
*[
{"jobName": f"tests_dev_SMASHER_{i}", "status": "SUCCEEDED"}
for i in range(FINISHED_SMASHER_JOBS)
],
]
PENDING_DOWNLOADER_JOBS = 14
RUNNING_DOWNLOADER_JOBS = 10
PENDING_SALMON_JOBS = 2
RUNNING_SALMON_JOBS = 8
PENDING_AFFY_JOBS = 9
RUNNING_AFFY_JOBS = 1
WORKER_QUEUE = [
*[
{"jobName": f"tests_dev_DOWNLOADER_1024_{i}", "status": "STARTING"}
for i in range(PENDING_DOWNLOADER_JOBS)
],
*[
{"jobName": f"tests_dev_DOWNLOADER_1024_{i}", "status": "RUNNING"}
for i in range(RUNNING_DOWNLOADER_JOBS)
],
*[
{"jobName": f"tests_dev_SALMON_1024_{i}", "status": "SUBMITTED"}
for i in range(PENDING_SALMON_JOBS)
],
*[
{"jobName": f"tests_dev_SALMON_1024_{i}", "status": "RUNNING"}
for i in range(RUNNING_SALMON_JOBS)
],
*[
{"jobName": f"tests_dev_AFFY_TO_PCL_1024_{i}", "status": "PENDING"}
for i in range(PENDING_AFFY_JOBS)
],
*[
{"jobName": f"tests_dev_AFFY_TO_PCL_1024_{i}", "status": "RUNNING"}
for i in range(RUNNING_AFFY_JOBS)
],
] | en | 0.963576 | # The queues are defined at the bottom of the file because they're pretty long Make sure that the batch breakdown has the right stats # Create some finished jobs that should get ignored | 2.232027 | 2 |
accounts/tests.py | kilinger/marathon-rocketchat-hubot | 1 | 6615785 | # -*- coding: utf-8 -*-
from django.test import TestCase
from accounts.factories import CustomUserFactory
from rest_framework.authtoken.models import Token
from accounts.models import CustomUser
class CustomUserTest(TestCase):
def setUp(self):
self.user = CustomUserFactory()
def test_create_token(self):
token = Token.objects.all()[0]
self.assertEqual(self.user.auth_token, token)
CustomUserFactory.create_batch(10)
user_count = CustomUser.objects.all().count()
token_count = Token.objects.all().count()
self.assertEqual(user_count, token_count)
self.assertEqual(user_count, 11)
| # -*- coding: utf-8 -*-
from django.test import TestCase
from accounts.factories import CustomUserFactory
from rest_framework.authtoken.models import Token
from accounts.models import CustomUser
class CustomUserTest(TestCase):
def setUp(self):
self.user = CustomUserFactory()
def test_create_token(self):
token = Token.objects.all()[0]
self.assertEqual(self.user.auth_token, token)
CustomUserFactory.create_batch(10)
user_count = CustomUser.objects.all().count()
token_count = Token.objects.all().count()
self.assertEqual(user_count, token_count)
self.assertEqual(user_count, 11)
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.285428 | 2 |
upload.py | campanulamediuml/Anduin | 3 | 6615786 | <reponame>campanulamediuml/Anduin
import os
import time
from anduin.Scheduler import time_to_str
SETUP_PATH = './setup.py'
INIT_PATH = './anduin/__init__.py'
COPY_RIGHT_FILE = './COPYING.txt'
ANDUIN_VER = '7.0.29'
# 6.x :正式版
# 7.x :异步框架测试
def add_copy_right_and_version():
# pass
print('写入版权数据,生成版本信息')
split_line = '\n# <=========>\n'
cpright = '"""' + open(COPY_RIGHT_FILE).read() + '"""'
code_content = open(INIT_PATH).read().split(split_line)
if len(code_content) == 1:
code = code_content[0]
else:
code = code_content[1]
code_list = code.split('\n')
code_data = ''
for line in code_list:
if '__version__' in line:
line = line[:14] + '"%s"'%ANDUIN_VER
code_data = code_data+line+'\n'
open(INIT_PATH, 'w',encoding='utf-8').write(cpright + split_line + code_data[:-1])
setup_file = open(SETUP_PATH,encoding='utf-8').read().split('\n')
code_data = ''
for line in setup_file:
if 'VER = ' in line:
line = line[:6] + '"%s"' % ANDUIN_VER
code_data = code_data + line + '\n'
open(SETUP_PATH, 'w',encoding='utf-8').write(code_data[:-1])
def clean_package():
print('清理打包缓存')
try:
query ='rm dist/*'
os.system(query)
except Exception as e:
print(str(e))
def packageandupload():
print('打包...')
query = 'python setup.py sdist'
os.system(query)
print('打包完成~')
print('上传中...')
query = 'python -m twine upload --repository pypi dist/*'
os.system(query)
print(time_to_str(int(time.time())),'更新版本 v%s'%ANDUIN_VER)
if __name__ == "__main__":
clean_package()
add_copy_right_and_version()
packageandupload()
| import os
import time
from anduin.Scheduler import time_to_str
SETUP_PATH = './setup.py'
INIT_PATH = './anduin/__init__.py'
COPY_RIGHT_FILE = './COPYING.txt'
ANDUIN_VER = '7.0.29'
# 6.x :正式版
# 7.x :异步框架测试
def add_copy_right_and_version():
# pass
print('写入版权数据,生成版本信息')
split_line = '\n# <=========>\n'
cpright = '"""' + open(COPY_RIGHT_FILE).read() + '"""'
code_content = open(INIT_PATH).read().split(split_line)
if len(code_content) == 1:
code = code_content[0]
else:
code = code_content[1]
code_list = code.split('\n')
code_data = ''
for line in code_list:
if '__version__' in line:
line = line[:14] + '"%s"'%ANDUIN_VER
code_data = code_data+line+'\n'
open(INIT_PATH, 'w',encoding='utf-8').write(cpright + split_line + code_data[:-1])
setup_file = open(SETUP_PATH,encoding='utf-8').read().split('\n')
code_data = ''
for line in setup_file:
if 'VER = ' in line:
line = line[:6] + '"%s"' % ANDUIN_VER
code_data = code_data + line + '\n'
open(SETUP_PATH, 'w',encoding='utf-8').write(code_data[:-1])
def clean_package():
print('清理打包缓存')
try:
query ='rm dist/*'
os.system(query)
except Exception as e:
print(str(e))
def packageandupload():
print('打包...')
query = 'python setup.py sdist'
os.system(query)
print('打包完成~')
print('上传中...')
query = 'python -m twine upload --repository pypi dist/*'
os.system(query)
print(time_to_str(int(time.time())),'更新版本 v%s'%ANDUIN_VER)
if __name__ == "__main__":
clean_package()
add_copy_right_and_version()
packageandupload() | en | 0.237837 | # 6.x :正式版 # 7.x :异步框架测试 # pass # <=========>\n' ' + open(COPY_RIGHT_FILE).read() + ' | 2.15902 | 2 |
src/random_choice.py | kemingy/daily-coding-problem | 3 | 6615787 | <filename>src/random_choice.py
# You are given n numbers as well as n probabilities that sum up to 1. Write a
# function to generate one of the numbers with its corresponding probability.
# For example, given the numbers [1, 2, 3, 4] and probabilities
# [0.1, 0.5, 0.2, 0.2], your function should return 1 10% of the time, 2 50%
# of the time, and 3 and 4 20% of the time.
# You can generate random numbers between 0 and 1 uniformly.
from random import random
def random_choice(nums, prob):
assert len(nums) == len(prob)
assert sum(prob) - 1 <= 1e-6
r = random()
for i, p in enumerate(prob):
r -= p
if r <= 0:
return nums[i]
if __name__ == "__main__":
numbers = [1, 2, 3, 4]
prob = [0.1, 0.5, 0.2, 0.2]
count = [0] * len(numbers)
for _ in range(100000):
count[random_choice(numbers, prob) - 1] += 1
print(count)
| <filename>src/random_choice.py
# You are given n numbers as well as n probabilities that sum up to 1. Write a
# function to generate one of the numbers with its corresponding probability.
# For example, given the numbers [1, 2, 3, 4] and probabilities
# [0.1, 0.5, 0.2, 0.2], your function should return 1 10% of the time, 2 50%
# of the time, and 3 and 4 20% of the time.
# You can generate random numbers between 0 and 1 uniformly.
from random import random
def random_choice(nums, prob):
assert len(nums) == len(prob)
assert sum(prob) - 1 <= 1e-6
r = random()
for i, p in enumerate(prob):
r -= p
if r <= 0:
return nums[i]
if __name__ == "__main__":
numbers = [1, 2, 3, 4]
prob = [0.1, 0.5, 0.2, 0.2]
count = [0] * len(numbers)
for _ in range(100000):
count[random_choice(numbers, prob) - 1] += 1
print(count)
| en | 0.93067 | # You are given n numbers as well as n probabilities that sum up to 1. Write a # function to generate one of the numbers with its corresponding probability. # For example, given the numbers [1, 2, 3, 4] and probabilities # [0.1, 0.5, 0.2, 0.2], your function should return 1 10% of the time, 2 50% # of the time, and 3 and 4 20% of the time. # You can generate random numbers between 0 and 1 uniformly. | 4.071863 | 4 |
bert-ranker/experiment/qa/data/wikipassageqa/reader.py | UKPLab/emnlp2020-multicqa | 14 | 6615788 | import json
from collections import OrderedDict
from os import path
import numpy as np
from experiment.qa.data import QAData
from experiment.qa.data.models import TextItem, QAPool, Data, Archive
from experiment.qa.data.reader import TSVArchiveReader
def _get_text_item(text, id):
ti = TextItem(text)
ti.metadata['id'] = id
return ti
class WikiPassageQAReader(TSVArchiveReader):
def read_split(self, name, answers):
datapoints = []
split_answers = []
with open(path.join(self.archive_path, '{}.tsv'.format(name)), 'r') as f:
next(f)
for l in f:
qid, question, doc_id, _, relevant_passages = l.strip().split('\t')
question_ti = TextItem(question.lower() if self.lowercased else question)
question_ti.metadata['id'] = 'question-{}'.format(qid)
pool = [a for (k, a) in answers.items() if k.startswith('{}_'.format(doc_id))]
np.random.shuffle(pool)
ground_truth = [answers[doc_id + '_' + a_id] for a_id in relevant_passages.split(',')]
datapoints.append(QAPool(question_ti, pool, ground_truth))
split_answers += pool
return Data('wikipassageqa / {}'.format(name), datapoints, split_answers)
def read(self):
answers = OrderedDict()
with open(path.join(self.archive_path, 'document_passages.json'), 'r') as f:
for document_id, passages in json.loads(f.read()).items():
for passage_id, passage_text in passages.items():
answer_ti = TextItem(passage_text.lower() if self.lowercased else passage_text)
answer_ti.metadata['id'] = 'answer-{}-{}'.format(document_id, passage_id)
answers['{}_{}'.format(document_id, passage_id)] = answer_ti
train = self.read_split('train', answers)
valid = self.read_split('dev', answers)
test = self.read_split('test', answers)
questions = [qa.question for qa in (train.qa + valid.qa + test.qa)]
answers = train.answers + valid.answers + test.answers
return Archive(self.name, train, valid, [test], questions, answers)
class WikiPassageQAData(QAData):
def _get_train_readers(self):
return [WikiPassageQAReader(self.config['wikipassageqa'], self.lowercased, self.logger)]
component = WikiPassageQAData
| import json
from collections import OrderedDict
from os import path
import numpy as np
from experiment.qa.data import QAData
from experiment.qa.data.models import TextItem, QAPool, Data, Archive
from experiment.qa.data.reader import TSVArchiveReader
def _get_text_item(text, id):
ti = TextItem(text)
ti.metadata['id'] = id
return ti
class WikiPassageQAReader(TSVArchiveReader):
def read_split(self, name, answers):
datapoints = []
split_answers = []
with open(path.join(self.archive_path, '{}.tsv'.format(name)), 'r') as f:
next(f)
for l in f:
qid, question, doc_id, _, relevant_passages = l.strip().split('\t')
question_ti = TextItem(question.lower() if self.lowercased else question)
question_ti.metadata['id'] = 'question-{}'.format(qid)
pool = [a for (k, a) in answers.items() if k.startswith('{}_'.format(doc_id))]
np.random.shuffle(pool)
ground_truth = [answers[doc_id + '_' + a_id] for a_id in relevant_passages.split(',')]
datapoints.append(QAPool(question_ti, pool, ground_truth))
split_answers += pool
return Data('wikipassageqa / {}'.format(name), datapoints, split_answers)
def read(self):
answers = OrderedDict()
with open(path.join(self.archive_path, 'document_passages.json'), 'r') as f:
for document_id, passages in json.loads(f.read()).items():
for passage_id, passage_text in passages.items():
answer_ti = TextItem(passage_text.lower() if self.lowercased else passage_text)
answer_ti.metadata['id'] = 'answer-{}-{}'.format(document_id, passage_id)
answers['{}_{}'.format(document_id, passage_id)] = answer_ti
train = self.read_split('train', answers)
valid = self.read_split('dev', answers)
test = self.read_split('test', answers)
questions = [qa.question for qa in (train.qa + valid.qa + test.qa)]
answers = train.answers + valid.answers + test.answers
return Archive(self.name, train, valid, [test], questions, answers)
class WikiPassageQAData(QAData):
def _get_train_readers(self):
return [WikiPassageQAReader(self.config['wikipassageqa'], self.lowercased, self.logger)]
component = WikiPassageQAData
| none | 1 | 2.490631 | 2 | |
Blob Rage App/random/Base_ball.py | povellesto/blobydouche | 1 | 6615789 | <gh_stars>1-10
import random
from Tkinter import*
def drawCell(canvas, row, col):
margin = 5
cellSize = 15
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
canvas.create_rectangle(left, top, right, bottom, fill="white")
def drawTetrisPiece(canvas):
tetrisBoard = canvas.data["tetrisBoard"]
rows = len(tetrisBoard)
cols = len(tetrisBoard[0])
margin = 5
cellSize = 15
left = margin + cols * cellSize
right = left + cellSize
top = margin + rows * cellSize
bottom = top + cellSize
canvas.create_rectangle(left, top, right, bottom, fill="white")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] > 0):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="green")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] == 0):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="brown")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] == 1):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="white")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] == 2):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="purple")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] == 6):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="yellow")
# for debugging, draw the number in the cell
if (canvas.data["inDebugMode"] == True):
canvas.create_text(left+cellSize/2,top+cellSize/2,text=str(tetrisBoard[row][col]))
def loadTetrisBoard(canvas):
tetrisBoard = [ [6,6,6,6,6,6,6,6,6],
[1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0],
[0,3,0,0,0,0,0,4,0],
[0,0,0,0,0,0,0,0,0],
[2,0,0,0,0,0,0,0,2],
[2,2,0,0,0,0,0,2,2],
[2,2,2,0,4,0,2,2,2],
[2,2,2,2,2,2,2,2,2],
]
rows = canvas.data["rows"]
cols = canvas.data["cols"]
canvas.data["tetrisBoard"] = tetrisBoard
def drawTetrisBoard(canvas):
tetrisBoard = canvas.data ["tetrisBoard"]
rows = len(tetrisBoard)
cols = len(tetrisBoard[0])
for row in range(rows):
for col in range(cols):
drawCell(canvas, row, col)
def gameOver(canvas):
canvas.data["isGameOver"] = True
def timerFired(canvas):
redrawAll(canvas)
delay = 1250 # milliseconds
#if canvas.data["isGameOver"] is False:
canvas.after(delay, timerFired, canvas)
def redrawAll(canvas):
canvas.delete(ALL)
drawTetrisBoard(canvas)
drawTetrisPiece(canvas)
def init (canvas, rows, cols):
loadTetrisBoard(canvas)
canvas.data["inDebugMode"] = False
canvas.data["isGameOver"] = False
def run (rows, cols):
margin = 5
cellSize = 15
canvasWidth = 2*margin + cols*cellSize
canvasHeight = 2*margin + rows*cellSize
root = Tk()
canvas = Canvas(root, width=canvasWidth, height=canvasHeight)
canvas.pack()
root.resizable(width=0, height=0)
root.canvas = canvas.canvas = canvas
canvas.data = { }
canvas.data ["rows"] = rows
canvas.data ["cols"] = cols
init(canvas, rows, cols)
tetrisBoard = canvas.data ["tetrisBoard"]
drawTetrisBoard(canvas)
drawTetrisPiece(canvas)
timerFired(canvas)
root.mainloop()
history = [0,0,0]
print("HI, to hit the ball you enter a number from 1 to 3")
def strike():
if strike == 3:
print("Out")
out += 1
Lions = 0
Deer = 0
Lionscore = 0
Deerscore = 0
def scoreboard():
print("Lions " + str(Lionscore))
print("Deer " + str(Deerscore))
def batter ():
global Lions
global Deer
global Lionscore
global Deerscore
global history
strike = 0
out = 0
for i in range (3):
batters = raw_input("Hit The Ball!: ")
Hit = random.randrange(1,4)
if batters == str(Hit):
ballanding = random.randrange(1.0,20.0)
if ballanding >=1.0 and ballanding <=5.0:
if strike != 3:
out += 1
print("\033[31m" + "Strike" + "\033[0m")
print('\033[94m' + "Foul!" + '\033[0m')
if ballanding >5.01 and ballanding <=6.11:
print ('\033[93m' + "First Base!" + '\033[0m')
if history == [1,0,0]:
history = [1,1,0]
elif history == [0,0,0]:
history = [1,0,0]
elif history == [1,1,1]:
history = [0,1,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,0,1]:
history = [1,0,1]
elif history == [0,1,1]:
history = [1,1,1]
elif history == [0,1,0]:
history = [1,0,1]
elif history == [1,0,1]:
history = [1,1,1]
if ballanding >6.11 and ballanding <=7.22:
print('\033[93m' + "Second Base" + '\033[0m')
if history == [1,0,0]:
history = [0,1,1]
elif history == [0,0,0]:
history = [0,1,0]
elif history == [1,1,1]:
history = [0,0,1]
if Lions == True:
Lions += 2
if Deer == True:
Deer += 2
elif history == [0,0,1]:
history = [0,1,1]
elif history == [0,1,1]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,1,0]:
history = [0,1,0]
if Lions == True:
Lions += 2
if Deer == True:
Deer += 2
elif history == [1,0,1]:
history = [0,0,1]
if ballanding >8.22 and ballanding <=9.33:
print('\033[93m' + "Short Stop" + '\033[0m') #Third Base
if ballanding >9.33 and ballanding <=10.44:
print('\033[93m' + "Third Base" + '\033[0m')
if history == [1,0,0]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,0,0]:
history = [0,0,1]
elif history == [1,1,1]:
history = [0,0,1]
if Lions == True:
Lions += 2
if Deer == True:
Deer += 2
elif history == [0,0,1]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,1,1]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,1,0]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [1,0,1]:
history = [0,0,1]
if Lions == True:
Lions += 2
if Deer == True:
Deer += 2
if ballanding >10.44 and ballanding <=11.55:
print('\033[93m' + "Left Field" + '\033[0m') #Second Base
if ballanding >11.55 and ballanding <=12.66:
print('\033[93m' + "Right Field" + '\033[0m') #Second Base
if ballanding >12.66 and ballanding <=13.77:
print('\033[93m' + "Center Field" + '\033[0m') #First Base
if ballanding >13.77 and ballanding <=15.88:
print('"\033[31m"' + "Coach says Out!" + '\033[0m')
out += 1
if ballanding >15.88 and ballanding <=20.0:
print('\033[93m' + "Home Run!" + '\033[0m')
print (Lions, Deer)
if Lions == True:
Lionscore += 1
if Deer == True:
Deerscore += 1
if batters == "ScoreBoard":
print(scoreboard())
if batters != str(Hit) and batters != "ScoreBoard":
print('\033[31m' + "Strike" + '\033[0m')
strike += 1
print("First, Second, Third")
print(history)
if out == 3:
print("Switch")
if strike == 3:
print('\033[32m' + "Coach says Out!" +'\033[0m')
out += 1
ballanding = random.randrange (1.0,20.0)
for i in range (3):
Lions = True
Deer = False
batter()
print ('\033[36m' + "Next batter." + '\033[0m')
print('\033[32m' + "Next Team" + '\033[0m]' )
for i in range (3):
Lions = False
Deer = True
batter()
print ('\033[36m' + "Next batter." + '\033[0m')
print("")
print("The Final Score is ")
scoreboard()
#run(9, 9)
#if strike == 3:
#print("Out")
#out += 1
def homerun ():
homebasescore = 0
#homebasescore =
| import random
from Tkinter import*
def drawCell(canvas, row, col):
margin = 5
cellSize = 15
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
canvas.create_rectangle(left, top, right, bottom, fill="white")
def drawTetrisPiece(canvas):
tetrisBoard = canvas.data["tetrisBoard"]
rows = len(tetrisBoard)
cols = len(tetrisBoard[0])
margin = 5
cellSize = 15
left = margin + cols * cellSize
right = left + cellSize
top = margin + rows * cellSize
bottom = top + cellSize
canvas.create_rectangle(left, top, right, bottom, fill="white")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] > 0):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="green")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] == 0):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="brown")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] == 1):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="white")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] == 2):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="purple")
for row in range(rows):
for col in range(cols):
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
if (tetrisBoard[row][col] == 6):
# draw part of the snake body
canvas.create_rectangle(left, top, right, bottom, fill="yellow")
# for debugging, draw the number in the cell
if (canvas.data["inDebugMode"] == True):
canvas.create_text(left+cellSize/2,top+cellSize/2,text=str(tetrisBoard[row][col]))
def loadTetrisBoard(canvas):
tetrisBoard = [ [6,6,6,6,6,6,6,6,6],
[1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0],
[0,3,0,0,0,0,0,4,0],
[0,0,0,0,0,0,0,0,0],
[2,0,0,0,0,0,0,0,2],
[2,2,0,0,0,0,0,2,2],
[2,2,2,0,4,0,2,2,2],
[2,2,2,2,2,2,2,2,2],
]
rows = canvas.data["rows"]
cols = canvas.data["cols"]
canvas.data["tetrisBoard"] = tetrisBoard
def drawTetrisBoard(canvas):
tetrisBoard = canvas.data ["tetrisBoard"]
rows = len(tetrisBoard)
cols = len(tetrisBoard[0])
for row in range(rows):
for col in range(cols):
drawCell(canvas, row, col)
def gameOver(canvas):
canvas.data["isGameOver"] = True
def timerFired(canvas):
redrawAll(canvas)
delay = 1250 # milliseconds
#if canvas.data["isGameOver"] is False:
canvas.after(delay, timerFired, canvas)
def redrawAll(canvas):
canvas.delete(ALL)
drawTetrisBoard(canvas)
drawTetrisPiece(canvas)
def init (canvas, rows, cols):
loadTetrisBoard(canvas)
canvas.data["inDebugMode"] = False
canvas.data["isGameOver"] = False
def run (rows, cols):
margin = 5
cellSize = 15
canvasWidth = 2*margin + cols*cellSize
canvasHeight = 2*margin + rows*cellSize
root = Tk()
canvas = Canvas(root, width=canvasWidth, height=canvasHeight)
canvas.pack()
root.resizable(width=0, height=0)
root.canvas = canvas.canvas = canvas
canvas.data = { }
canvas.data ["rows"] = rows
canvas.data ["cols"] = cols
init(canvas, rows, cols)
tetrisBoard = canvas.data ["tetrisBoard"]
drawTetrisBoard(canvas)
drawTetrisPiece(canvas)
timerFired(canvas)
root.mainloop()
history = [0,0,0]
print("HI, to hit the ball you enter a number from 1 to 3")
def strike():
if strike == 3:
print("Out")
out += 1
Lions = 0
Deer = 0
Lionscore = 0
Deerscore = 0
def scoreboard():
print("Lions " + str(Lionscore))
print("Deer " + str(Deerscore))
def batter ():
global Lions
global Deer
global Lionscore
global Deerscore
global history
strike = 0
out = 0
for i in range (3):
batters = raw_input("Hit The Ball!: ")
Hit = random.randrange(1,4)
if batters == str(Hit):
ballanding = random.randrange(1.0,20.0)
if ballanding >=1.0 and ballanding <=5.0:
if strike != 3:
out += 1
print("\033[31m" + "Strike" + "\033[0m")
print('\033[94m' + "Foul!" + '\033[0m')
if ballanding >5.01 and ballanding <=6.11:
print ('\033[93m' + "First Base!" + '\033[0m')
if history == [1,0,0]:
history = [1,1,0]
elif history == [0,0,0]:
history = [1,0,0]
elif history == [1,1,1]:
history = [0,1,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,0,1]:
history = [1,0,1]
elif history == [0,1,1]:
history = [1,1,1]
elif history == [0,1,0]:
history = [1,0,1]
elif history == [1,0,1]:
history = [1,1,1]
if ballanding >6.11 and ballanding <=7.22:
print('\033[93m' + "Second Base" + '\033[0m')
if history == [1,0,0]:
history = [0,1,1]
elif history == [0,0,0]:
history = [0,1,0]
elif history == [1,1,1]:
history = [0,0,1]
if Lions == True:
Lions += 2
if Deer == True:
Deer += 2
elif history == [0,0,1]:
history = [0,1,1]
elif history == [0,1,1]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,1,0]:
history = [0,1,0]
if Lions == True:
Lions += 2
if Deer == True:
Deer += 2
elif history == [1,0,1]:
history = [0,0,1]
if ballanding >8.22 and ballanding <=9.33:
print('\033[93m' + "Short Stop" + '\033[0m') #Third Base
if ballanding >9.33 and ballanding <=10.44:
print('\033[93m' + "Third Base" + '\033[0m')
if history == [1,0,0]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,0,0]:
history = [0,0,1]
elif history == [1,1,1]:
history = [0,0,1]
if Lions == True:
Lions += 2
if Deer == True:
Deer += 2
elif history == [0,0,1]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,1,1]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [0,1,0]:
history = [0,0,1]
if Lions == True:
Lions += 1
if Deer == True:
Deer += 1
elif history == [1,0,1]:
history = [0,0,1]
if Lions == True:
Lions += 2
if Deer == True:
Deer += 2
if ballanding >10.44 and ballanding <=11.55:
print('\033[93m' + "Left Field" + '\033[0m') #Second Base
if ballanding >11.55 and ballanding <=12.66:
print('\033[93m' + "Right Field" + '\033[0m') #Second Base
if ballanding >12.66 and ballanding <=13.77:
print('\033[93m' + "Center Field" + '\033[0m') #First Base
if ballanding >13.77 and ballanding <=15.88:
print('"\033[31m"' + "Coach says Out!" + '\033[0m')
out += 1
if ballanding >15.88 and ballanding <=20.0:
print('\033[93m' + "Home Run!" + '\033[0m')
print (Lions, Deer)
if Lions == True:
Lionscore += 1
if Deer == True:
Deerscore += 1
if batters == "ScoreBoard":
print(scoreboard())
if batters != str(Hit) and batters != "ScoreBoard":
print('\033[31m' + "Strike" + '\033[0m')
strike += 1
print("First, Second, Third")
print(history)
if out == 3:
print("Switch")
if strike == 3:
print('\033[32m' + "Coach says Out!" +'\033[0m')
out += 1
ballanding = random.randrange (1.0,20.0)
for i in range (3):
Lions = True
Deer = False
batter()
print ('\033[36m' + "Next batter." + '\033[0m')
print('\033[32m' + "Next Team" + '\033[0m]' )
for i in range (3):
Lions = False
Deer = True
batter()
print ('\033[36m' + "Next batter." + '\033[0m')
print("")
print("The Final Score is ")
scoreboard()
#run(9, 9)
#if strike == 3:
#print("Out")
#out += 1
def homerun ():
homebasescore = 0
#homebasescore = | en | 0.791436 | # draw part of the snake body # draw part of the snake body # draw part of the snake body # draw part of the snake body # draw part of the snake body # for debugging, draw the number in the cell # milliseconds #if canvas.data["isGameOver"] is False: #Third Base #Second Base #Second Base #First Base #run(9, 9) #if strike == 3: #print("Out") #out += 1 #homebasescore = | 3.62474 | 4 |
inoutlogger/decorators.py | PyOneers/in-out-logger | 1 | 6615790 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
decorators.py
~~~~~~~~~~~~~~~~~~~~~
Author : <NAME>
"""
import datetime
from .utils import InOutLogger
def in_out_log(*args, **kwargs):
# Name of Log Handler
handler_name = None
if "handler_name" in kwargs.keys():
handler_name = kwargs["handler_name"]
def decorator(func):
def inner(*args, **kwargs):
log_handler, entry_identifier, exit_identifier, log_time = None, None, None, False
pass_handeler_name_check = False
# Get Log Handlers
handlers = InOutLogger.getResources().LOGGERS
if handler_name is None and len(handlers) > 1:
raise Exception(
"Multiple Logger(inoutlogger.utils.Logger) found. Specify handler_name in decorator arguments to select which logger to be used")
elif handler_name is None and len(handlers) == 1:
pass_handeler_name_check = True
for hndlr in handlers:
if pass_handeler_name_check:
log_handler = hndlr.log_handler
entry_identifier = hndlr.entry_identifier
exit_identifier = hndlr.exit_identifier
log_time = hndlr.log_time
elif hndlr.name == handler_name:
log_handler = hndlr.log_handler
entry_identifier = hndlr.entry_identifier
exit_identifier = hndlr.exit_identifier
log_time = hndlr.log_time
if log_handler is None:
raise Exception("No Logger(inoutlogger.utils.Logger) found with name [ {} ]".format(handler_name))
# Entry
start_time = datetime.datetime.now()
start_time_microsec = datetime.datetime.now().microsecond
log_handler.info(
"{} Entered [ {} ] method with args [ {} ] and kwargs [ {} ] at [ {} ] time".format(
entry_identifier,
func.__name__,
args,
kwargs,
str(start_time)))
# Executing Method
return_value = func(*args, **kwargs)
# End Method
end_time = datetime.datetime.now()
end_time_microsec = datetime.datetime.now().microsecond
# Log execution time
if log_time:
log_handler.info(
"Time taken to execute method is [ {} ]".format(str(end_time_microsec - start_time_microsec)))
# Exit
log_handler.info(
"{} Exited [ {} ] method with return value [ {} ] at [ {} ] time".format(
exit_identifier,
func.__name__,
return_value,
str(end_time)))
return return_value
return inner
return decorator
| # -*- coding: utf-8 -*-
"""
decorators.py
~~~~~~~~~~~~~~~~~~~~~
Author : <NAME>
"""
import datetime
from .utils import InOutLogger
def in_out_log(*args, **kwargs):
# Name of Log Handler
handler_name = None
if "handler_name" in kwargs.keys():
handler_name = kwargs["handler_name"]
def decorator(func):
def inner(*args, **kwargs):
log_handler, entry_identifier, exit_identifier, log_time = None, None, None, False
pass_handeler_name_check = False
# Get Log Handlers
handlers = InOutLogger.getResources().LOGGERS
if handler_name is None and len(handlers) > 1:
raise Exception(
"Multiple Logger(inoutlogger.utils.Logger) found. Specify handler_name in decorator arguments to select which logger to be used")
elif handler_name is None and len(handlers) == 1:
pass_handeler_name_check = True
for hndlr in handlers:
if pass_handeler_name_check:
log_handler = hndlr.log_handler
entry_identifier = hndlr.entry_identifier
exit_identifier = hndlr.exit_identifier
log_time = hndlr.log_time
elif hndlr.name == handler_name:
log_handler = hndlr.log_handler
entry_identifier = hndlr.entry_identifier
exit_identifier = hndlr.exit_identifier
log_time = hndlr.log_time
if log_handler is None:
raise Exception("No Logger(inoutlogger.utils.Logger) found with name [ {} ]".format(handler_name))
# Entry
start_time = datetime.datetime.now()
start_time_microsec = datetime.datetime.now().microsecond
log_handler.info(
"{} Entered [ {} ] method with args [ {} ] and kwargs [ {} ] at [ {} ] time".format(
entry_identifier,
func.__name__,
args,
kwargs,
str(start_time)))
# Executing Method
return_value = func(*args, **kwargs)
# End Method
end_time = datetime.datetime.now()
end_time_microsec = datetime.datetime.now().microsecond
# Log execution time
if log_time:
log_handler.info(
"Time taken to execute method is [ {} ]".format(str(end_time_microsec - start_time_microsec)))
# Exit
log_handler.info(
"{} Exited [ {} ] method with return value [ {} ] at [ {} ] time".format(
exit_identifier,
func.__name__,
return_value,
str(end_time)))
return return_value
return inner
return decorator | en | 0.61081 | # -*- coding: utf-8 -*- decorators.py ~~~~~~~~~~~~~~~~~~~~~ Author : <NAME> # Name of Log Handler # Get Log Handlers # Entry # Executing Method # End Method # Log execution time # Exit | 2.605501 | 3 |
tests/test_parser.py | xeddmc/twtxt | 1,903 | 6615791 | from datetime import datetime, timezone
import pytest
from dateutil.tz import tzoffset
from twtxt.parser import make_aware, parse_iso8601
from twtxt.parser import parse_tweet, parse_tweets
from twtxt.models import Source
def test_make_aware():
"""Test making unaware datetime objects tzinfo aware."""
aware = datetime.now(timezone.utc)
unaware = aware.replace(tzinfo=None)
assert make_aware(unaware) >= aware
assert make_aware(aware) == aware
def test_parse_iso8601():
"""Test parsing ISO-8601 date/time strings."""
as_string = "2016-02-05T02:52:15.030474+01:00"
as_datetime = datetime(2016, 2, 5, 2, 52, 15, 30474, tzinfo=tzoffset(None, 3600))
assert parse_iso8601(as_string) == as_datetime
as_string = "2016-02-05T02:52:15"
as_datetime = datetime(2016, 2, 5, 2, 52, 15, tzinfo=timezone.utc)
assert parse_iso8601(as_string) == as_datetime
with pytest.raises(ValueError) as e:
parse_iso8601("foobar")
assert "Unknown string format" in str(e.value)
def test_parse_tweet():
"""Test parsing single tweet line."""
source = Source("foo", "bar")
raw_line = "2016-02-08T00:00:00\tHallo"
tweet = parse_tweet(raw_line, source)
assert tweet.text == "Hallo"
assert tweet.created_at == datetime(year=2016, month=2, day=8, tzinfo=timezone.utc)
with pytest.raises(ValueError) as e:
raw_line = "3000-02-08T00:00:00\tHallo"
parse_tweet(raw_line, source)
assert "Tweet is from the future" in str(e.value)
def test_parse_tweets():
"""Test parsing multiple tweet lines"""
source = Source("foo", "bar")
raw_tweets = [
"2016-02-08T00:00:00\tHallo",
"2016-02-08T00:00:00\tBar\n",
"2016-02-08T00:00:00\tFoo\n",
"3000-02-08T00:00:00\tHallo\n",
]
tweets = parse_tweets(raw_tweets, source)
assert len(tweets) == 3
| from datetime import datetime, timezone
import pytest
from dateutil.tz import tzoffset
from twtxt.parser import make_aware, parse_iso8601
from twtxt.parser import parse_tweet, parse_tweets
from twtxt.models import Source
def test_make_aware():
"""Test making unaware datetime objects tzinfo aware."""
aware = datetime.now(timezone.utc)
unaware = aware.replace(tzinfo=None)
assert make_aware(unaware) >= aware
assert make_aware(aware) == aware
def test_parse_iso8601():
"""Test parsing ISO-8601 date/time strings."""
as_string = "2016-02-05T02:52:15.030474+01:00"
as_datetime = datetime(2016, 2, 5, 2, 52, 15, 30474, tzinfo=tzoffset(None, 3600))
assert parse_iso8601(as_string) == as_datetime
as_string = "2016-02-05T02:52:15"
as_datetime = datetime(2016, 2, 5, 2, 52, 15, tzinfo=timezone.utc)
assert parse_iso8601(as_string) == as_datetime
with pytest.raises(ValueError) as e:
parse_iso8601("foobar")
assert "Unknown string format" in str(e.value)
def test_parse_tweet():
"""Test parsing single tweet line."""
source = Source("foo", "bar")
raw_line = "2016-02-08T00:00:00\tHallo"
tweet = parse_tweet(raw_line, source)
assert tweet.text == "Hallo"
assert tweet.created_at == datetime(year=2016, month=2, day=8, tzinfo=timezone.utc)
with pytest.raises(ValueError) as e:
raw_line = "3000-02-08T00:00:00\tHallo"
parse_tweet(raw_line, source)
assert "Tweet is from the future" in str(e.value)
def test_parse_tweets():
"""Test parsing multiple tweet lines"""
source = Source("foo", "bar")
raw_tweets = [
"2016-02-08T00:00:00\tHallo",
"2016-02-08T00:00:00\tBar\n",
"2016-02-08T00:00:00\tFoo\n",
"3000-02-08T00:00:00\tHallo\n",
]
tweets = parse_tweets(raw_tweets, source)
assert len(tweets) == 3
| en | 0.458152 | Test making unaware datetime objects tzinfo aware. Test parsing ISO-8601 date/time strings. Test parsing single tweet line. Test parsing multiple tweet lines | 2.806073 | 3 |
backend/api/__init__.py | wanghaiqing2015/fastapi-vue-cms | 7 | 6615792 | <gh_stars>1-10
from fastapi import APIRouter
from . import article
from . import user
from . import login
api_router = APIRouter()
api_router.include_router(article.router, prefix="/articles", tags=["articles"])
api_router.include_router(user.router, prefix="/users", tags=["users"])
api_router.include_router(login.router, tags=["login"])
| from fastapi import APIRouter
from . import article
from . import user
from . import login
api_router = APIRouter()
api_router.include_router(article.router, prefix="/articles", tags=["articles"])
api_router.include_router(user.router, prefix="/users", tags=["users"])
api_router.include_router(login.router, tags=["login"]) | none | 1 | 2.085603 | 2 | |
orion/primitives/estimators.py | PSFC-HEDP/Orion | 543 | 6615793 | import numpy as np
class MeanEstimator:
"""Mean Estimator.
This is a dummy estimator that always returns a constant value,
which consist on the mean value from the given input.
This estimator is here only to serve as reference of what
an estimator primitive looks like, and is not intended to be
used in real scenarios.
"""
def __init__(self, value_column='value'):
self._value_column = value_column
def fit(self, X):
values = X[self._value_column]
self._mean = np.mean(values)
def predict(self, X):
return np.full(len(X), self._mean)
| import numpy as np
class MeanEstimator:
"""Mean Estimator.
This is a dummy estimator that always returns a constant value,
which consist on the mean value from the given input.
This estimator is here only to serve as reference of what
an estimator primitive looks like, and is not intended to be
used in real scenarios.
"""
def __init__(self, value_column='value'):
self._value_column = value_column
def fit(self, X):
values = X[self._value_column]
self._mean = np.mean(values)
def predict(self, X):
return np.full(len(X), self._mean)
| en | 0.88293 | Mean Estimator. This is a dummy estimator that always returns a constant value, which consist on the mean value from the given input. This estimator is here only to serve as reference of what an estimator primitive looks like, and is not intended to be used in real scenarios. | 3.822956 | 4 |
Bins.py | HillaPeter/FinalProject | 0 | 6615794 | import pandas as pd
def bins(row,col):
if row[col] < 100:
val = 1
elif row[col]>= 100 and row[col]< 150 :
val = 2
elif row[col] >= 150 and row[col] < 200 :
val = 3
elif row[col] >=200 and row[col] < 300 :
val = 4
elif row[col] >= 300 and row[col] < 449:
val = 5
else:
val = 6
return val
def bins_reop(row,col):
if row[col] < 10:
val = 1
elif row[col]>= 10 and row[col]< 20 :
val = 2
elif row[col] >= 20 and row[col] < 30 :
val = 3
else:
val = 4
return val
df_hosp = pd.read_csv("Draft/hospid_allyears_expec_hospid_STSRCHOSPD.csv")
df_hosp['bin_total_cardiac'] = df_hosp.apply(bins, col='total surgery count', axis=1)
df_hosp['bin_total_CABG'] = df_hosp.apply(bins, col='total', axis=1)
df_hosp['bin_Reop_CABG'] = df_hosp.apply(bins_reop, col='Reop', axis=1)
df_hosp.to_csv("hospid_allyears_expec_hospid_STSRCHOSPD_div.csv")
df_surg = pd.read_csv("Draft/surgid_allyears_expec_surgid_STSRCHOSPD.csv")
df_surg["bin_total_cardiac"] = df_surg.apply(bins, col="total cardiac surgery", axis=1)
df_surg['bin_total_CABG'] = df_surg.apply(bins, col='total', axis=1)
df_surg['bin_Reop_CABG'] = df_surg.apply(bins_reop, col='Reop', axis=1)
df_surg.to_csv("surgid_allyears_expec_surgid_STSRCHOSPD_div.csv")
df_hosp = pd.read_csv("Draft/hospid_allyears_expec_hospid_STSRCOM.csv")
df_hosp['bin_total_cardiac'] = df_hosp.apply(bins, col='total surgery count', axis=1)
df_hosp['bin_total_CABG'] = df_hosp.apply(bins, col='total', axis=1)
df_hosp['bin_Reop_CABG'] = df_hosp.apply(bins_reop, col='Reop', axis=1)
df_hosp.to_csv("hospid_allyears_expec_hospid_STSRCOM_div.csv")
df_surg = pd.read_csv("Draft/surgid_allyears_expec_surgid_STSRCOM.csv")
df_surg["bin_total_cardiac"] = df_surg.apply(bins, col="total cardiac surgery", axis=1)
df_surg['bin_total_CABG'] = df_surg.apply(bins, col='total', axis=1)
df_surg['bin_Reop_CABG'] = df_surg.apply(bins_reop, col='Reop', axis=1)
df_surg.to_csv("surgid_allyears_expec_surgid_STSRCOM_div.csv")
df_hosp = pd.read_csv("hospid_allyears_expec_hospid_STSRCMM.csv")
df_hosp["hospbin_total_cardiac"] = df_hosp.apply(bins, col="total surgery count", axis=1)
df_hosp['bin_total_CABG'] = df_hosp.apply(bins, col='total', axis=1)
df_hosp['bin_Reop_CABG'] = df_hosp.apply(bins_reop, col='Reop', axis=1)
df_hosp.to_csv("hospid_allyears_expec_hospid_STSRCMM_div.csv")
df_surg = pd.read_csv("surgid_allyears_expec_surgid_STSRCMM.csv")
df_surg["bin_total_cardiac"] = df_surg.apply(bins, col="total cardiac surgery", axis=1)
df_surg['bin_total_CABG'] = df_surg.apply(bins, col='total', axis=1)
df_surg['bin_Reop_CABG'] = df_surg.apply(bins_reop, col='Reop', axis=1)
df_surg.to_csv("surgid_allyears_expec_surgid_STSRCMM_div.csv")
| import pandas as pd
def bins(row,col):
if row[col] < 100:
val = 1
elif row[col]>= 100 and row[col]< 150 :
val = 2
elif row[col] >= 150 and row[col] < 200 :
val = 3
elif row[col] >=200 and row[col] < 300 :
val = 4
elif row[col] >= 300 and row[col] < 449:
val = 5
else:
val = 6
return val
def bins_reop(row,col):
if row[col] < 10:
val = 1
elif row[col]>= 10 and row[col]< 20 :
val = 2
elif row[col] >= 20 and row[col] < 30 :
val = 3
else:
val = 4
return val
df_hosp = pd.read_csv("Draft/hospid_allyears_expec_hospid_STSRCHOSPD.csv")
df_hosp['bin_total_cardiac'] = df_hosp.apply(bins, col='total surgery count', axis=1)
df_hosp['bin_total_CABG'] = df_hosp.apply(bins, col='total', axis=1)
df_hosp['bin_Reop_CABG'] = df_hosp.apply(bins_reop, col='Reop', axis=1)
df_hosp.to_csv("hospid_allyears_expec_hospid_STSRCHOSPD_div.csv")
df_surg = pd.read_csv("Draft/surgid_allyears_expec_surgid_STSRCHOSPD.csv")
df_surg["bin_total_cardiac"] = df_surg.apply(bins, col="total cardiac surgery", axis=1)
df_surg['bin_total_CABG'] = df_surg.apply(bins, col='total', axis=1)
df_surg['bin_Reop_CABG'] = df_surg.apply(bins_reop, col='Reop', axis=1)
df_surg.to_csv("surgid_allyears_expec_surgid_STSRCHOSPD_div.csv")
df_hosp = pd.read_csv("Draft/hospid_allyears_expec_hospid_STSRCOM.csv")
df_hosp['bin_total_cardiac'] = df_hosp.apply(bins, col='total surgery count', axis=1)
df_hosp['bin_total_CABG'] = df_hosp.apply(bins, col='total', axis=1)
df_hosp['bin_Reop_CABG'] = df_hosp.apply(bins_reop, col='Reop', axis=1)
df_hosp.to_csv("hospid_allyears_expec_hospid_STSRCOM_div.csv")
df_surg = pd.read_csv("Draft/surgid_allyears_expec_surgid_STSRCOM.csv")
df_surg["bin_total_cardiac"] = df_surg.apply(bins, col="total cardiac surgery", axis=1)
df_surg['bin_total_CABG'] = df_surg.apply(bins, col='total', axis=1)
df_surg['bin_Reop_CABG'] = df_surg.apply(bins_reop, col='Reop', axis=1)
df_surg.to_csv("surgid_allyears_expec_surgid_STSRCOM_div.csv")
df_hosp = pd.read_csv("hospid_allyears_expec_hospid_STSRCMM.csv")
df_hosp["hospbin_total_cardiac"] = df_hosp.apply(bins, col="total surgery count", axis=1)
df_hosp['bin_total_CABG'] = df_hosp.apply(bins, col='total', axis=1)
df_hosp['bin_Reop_CABG'] = df_hosp.apply(bins_reop, col='Reop', axis=1)
df_hosp.to_csv("hospid_allyears_expec_hospid_STSRCMM_div.csv")
df_surg = pd.read_csv("surgid_allyears_expec_surgid_STSRCMM.csv")
df_surg["bin_total_cardiac"] = df_surg.apply(bins, col="total cardiac surgery", axis=1)
df_surg['bin_total_CABG'] = df_surg.apply(bins, col='total', axis=1)
df_surg['bin_Reop_CABG'] = df_surg.apply(bins_reop, col='Reop', axis=1)
df_surg.to_csv("surgid_allyears_expec_surgid_STSRCMM_div.csv")
| none | 1 | 2.90556 | 3 | |
earthpy/__init__.py | Dineshchandrasekar/earthpy | 0 | 6615795 | <gh_stars>0
"""Utility functions for the working with spatial data."""
from .io import EarthlabData # , list_files
from download import download
import json
import os.path as op
from . import utils, spatial
data = EarthlabData()
# This EPSG mapping converted from:
# https://github.com/jswhit/pyproj/blob/master/lib/pyproj/data/epsg
from pkg_resources import resource_string
epsg = json.loads(
resource_string("earthpy", "example-data/epsg.json").decode("utf-8")
)
| """Utility functions for the working with spatial data."""
from .io import EarthlabData # , list_files
from download import download
import json
import os.path as op
from . import utils, spatial
data = EarthlabData()
# This EPSG mapping converted from:
# https://github.com/jswhit/pyproj/blob/master/lib/pyproj/data/epsg
from pkg_resources import resource_string
epsg = json.loads(
resource_string("earthpy", "example-data/epsg.json").decode("utf-8")
) | en | 0.692447 | Utility functions for the working with spatial data. # , list_files # This EPSG mapping converted from: # https://github.com/jswhit/pyproj/blob/master/lib/pyproj/data/epsg | 2.854571 | 3 |
code/python/tools/_dataset_export_scenes.py | mikeroberts3000/ml-hypersim | 10 | 6615796 | import inspect
import itertools
import fnmatch
import os
from pymxs import runtime as rt
#
# can't import files from current dir by default, so duplicate path_utils here
#
import os, sys, inspect
def add_path_to_sys_path(path, mode, frame):
assert mode == "unchanged" or mode == "relative_to_cwd" or mode == "relative_to_current_source_dir"
if mode == "unchanged":
if path not in sys.path:
sys.path.insert(0,path)
if mode == "relative_to_cwd":
realpath = os.path.realpath(os.path.abspath(path))
if realpath not in sys.path:
sys.path.insert(0,realpath)
if mode == "relative_to_current_source_dir":
realpath = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(frame))[0],path)))
if realpath not in sys.path:
sys.path.insert(0,realpath)
def get_current_source_file_path(frame):
return os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(frame))[0])))
#
# define some useful utility functions
#
import MaxPlus
def _print(obj):
string = str(obj)
MaxPlus.Core.EvalMAXScript('logsystem.logEntry "' + string.replace('\\', '\\\\').replace('"', '\\"') + '" broadcast:true')
def _eval(string, silent=False):
if not silent:
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Executing MAXScript: " + string)
return MaxPlus.Core.EvalMAXScript(string)
#
# parse command-line args
#
for k in rt.maxops.mxsCmdLineArgs.keys:
_print('rt.maxops.mxsCmdLineArgs["' + k + '"] = ' + rt.maxops.mxsCmdLineArgs[k])
args_dataset_dir = rt.maxops.mxsCmdLineArgs[rt.name("dataset_dir")]
if rt.name("scene_names") in rt.maxops.mxsCmdLineArgs.keys:
args_scene_names = rt.maxops.mxsCmdLineArgs[rt.name("scene_names")]
else:
args_scene_names = None
assert os.path.exists(args_dataset_dir)
#
# parse dataset config
#
add_path_to_sys_path(args_dataset_dir, mode="relative_to_cwd", frame=inspect.currentframe())
import _dataset_config
_print("")
_print("")
_print("")
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Begin...")
_print("")
_print("")
_print("")
dataset_scenes_dir = os.path.join(args_dataset_dir, "scenes")
if args_scene_names is not None:
scenes = [ s for s in _dataset_config.scenes if fnmatch.fnmatch(s["name"], args_scene_names) ]
else:
scenes = _dataset_config.scenes
#
# disable VRay prompts
#
_eval('setVRaySilentMode()')
#
# export scenes
#
for s in scenes:
# generate file names
scene_name = s["name"]
scene_max_file = s["asset_file"] + ".max"
scene_dir = os.path.join(dataset_scenes_dir, scene_name)
max_dir = os.path.join(scene_dir, "_asset")
max_export_dir = os.path.join(scene_dir, "_asset_export")
max_file = os.path.join(max_dir, scene_max_file)
metadata_cameras_max_export_csv_file = os.path.join(max_export_dir, "metadata_cameras_asset_export.csv")
obj_file = os.path.join(max_export_dir, "scene.obj")
vrscene_file = os.path.join(max_export_dir, "scene.vrscene")
# create output dirs
if not os.path.exists(max_export_dir): os.makedirs(max_export_dir)
# loadMaxFile
retval = _eval('loadMaxFile @"' + max_file + '" useFileUnits:true')
if not retval.Get():
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Failed to load " + max_file)
assert False
# export cameras
with open(metadata_cameras_max_export_csv_file, "w") as f_cameras:
f_cameras.write("camera_name\n")
for ci in range(len(rt.cameras)):
camera = rt.cameras[ci]
if not "target" in camera.name.lower() and not "terget" in camera.name.lower():
camera_name = "cam_" + camera.name
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Exporting camera: " + camera_name)
f_cameras.write(camera_name + "\n")
camera_file = os.path.join(max_export_dir, camera_name + ".csv")
with open(camera_file, "w") as f_camera:
f_camera.write(
"rotation_world_from_obj_00,rotation_world_from_obj_01,rotation_world_from_obj_02," + \
"rotation_world_from_obj_10,rotation_world_from_obj_11,rotation_world_from_obj_12," + \
"rotation_world_from_obj_20,rotation_world_from_obj_21,rotation_world_from_obj_22," + \
"translation_world_from_obj_x,translation_world_from_obj_y,translation_world_from_obj_z\n")
for ti in range(rt.animationRange.start, rt.animationRange.end+1):
# Note that we iterate in column-major order because 3ds Max returns the transpose of the R_world_from_cam matrix,
# where R_world_from_cam satisfies the equation: p_world == R_world_from_cam*p_cam for the camera space point p_cam
# and the world space point p_world.
for c,r in itertools.product(range(1,4),range(1,4)):
retval = _eval("at time " + str(ti) + " cameras[" + str(ci+1) + "].transform[" + str(r) + "][" + str(c) + "]", silent=True).Get()
f_camera.write("%.20f,"%retval)
for c in range(1,4):
retval = _eval("at time " + str(ti) + " cameras[" + str(ci+1) + "].transform[" + str(4) + "][" + str(c) + "]", silent=True).Get()
if c in range(1,3):
sep = ","
else:
sep = "\n"
f_camera.write("%.20f%s"%(retval,sep))
# exportFile
_eval('exportFile @"' + obj_file + '" #noprompt')
# vrayExportRTScene
_eval('vrayExportRTScene @"' + vrscene_file + '"')
_print("")
_print("")
_print("")
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Finished.")
_print("")
_print("")
_print("")
| import inspect
import itertools
import fnmatch
import os
from pymxs import runtime as rt
#
# can't import files from current dir by default, so duplicate path_utils here
#
import os, sys, inspect
def add_path_to_sys_path(path, mode, frame):
assert mode == "unchanged" or mode == "relative_to_cwd" or mode == "relative_to_current_source_dir"
if mode == "unchanged":
if path not in sys.path:
sys.path.insert(0,path)
if mode == "relative_to_cwd":
realpath = os.path.realpath(os.path.abspath(path))
if realpath not in sys.path:
sys.path.insert(0,realpath)
if mode == "relative_to_current_source_dir":
realpath = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(frame))[0],path)))
if realpath not in sys.path:
sys.path.insert(0,realpath)
def get_current_source_file_path(frame):
return os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(frame))[0])))
#
# define some useful utility functions
#
import MaxPlus
def _print(obj):
string = str(obj)
MaxPlus.Core.EvalMAXScript('logsystem.logEntry "' + string.replace('\\', '\\\\').replace('"', '\\"') + '" broadcast:true')
def _eval(string, silent=False):
if not silent:
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Executing MAXScript: " + string)
return MaxPlus.Core.EvalMAXScript(string)
#
# parse command-line args
#
for k in rt.maxops.mxsCmdLineArgs.keys:
_print('rt.maxops.mxsCmdLineArgs["' + k + '"] = ' + rt.maxops.mxsCmdLineArgs[k])
args_dataset_dir = rt.maxops.mxsCmdLineArgs[rt.name("dataset_dir")]
if rt.name("scene_names") in rt.maxops.mxsCmdLineArgs.keys:
args_scene_names = rt.maxops.mxsCmdLineArgs[rt.name("scene_names")]
else:
args_scene_names = None
assert os.path.exists(args_dataset_dir)
#
# parse dataset config
#
add_path_to_sys_path(args_dataset_dir, mode="relative_to_cwd", frame=inspect.currentframe())
import _dataset_config
_print("")
_print("")
_print("")
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Begin...")
_print("")
_print("")
_print("")
dataset_scenes_dir = os.path.join(args_dataset_dir, "scenes")
if args_scene_names is not None:
scenes = [ s for s in _dataset_config.scenes if fnmatch.fnmatch(s["name"], args_scene_names) ]
else:
scenes = _dataset_config.scenes
#
# disable VRay prompts
#
_eval('setVRaySilentMode()')
#
# export scenes
#
for s in scenes:
# generate file names
scene_name = s["name"]
scene_max_file = s["asset_file"] + ".max"
scene_dir = os.path.join(dataset_scenes_dir, scene_name)
max_dir = os.path.join(scene_dir, "_asset")
max_export_dir = os.path.join(scene_dir, "_asset_export")
max_file = os.path.join(max_dir, scene_max_file)
metadata_cameras_max_export_csv_file = os.path.join(max_export_dir, "metadata_cameras_asset_export.csv")
obj_file = os.path.join(max_export_dir, "scene.obj")
vrscene_file = os.path.join(max_export_dir, "scene.vrscene")
# create output dirs
if not os.path.exists(max_export_dir): os.makedirs(max_export_dir)
# loadMaxFile
retval = _eval('loadMaxFile @"' + max_file + '" useFileUnits:true')
if not retval.Get():
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Failed to load " + max_file)
assert False
# export cameras
with open(metadata_cameras_max_export_csv_file, "w") as f_cameras:
f_cameras.write("camera_name\n")
for ci in range(len(rt.cameras)):
camera = rt.cameras[ci]
if not "target" in camera.name.lower() and not "terget" in camera.name.lower():
camera_name = "cam_" + camera.name
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Exporting camera: " + camera_name)
f_cameras.write(camera_name + "\n")
camera_file = os.path.join(max_export_dir, camera_name + ".csv")
with open(camera_file, "w") as f_camera:
f_camera.write(
"rotation_world_from_obj_00,rotation_world_from_obj_01,rotation_world_from_obj_02," + \
"rotation_world_from_obj_10,rotation_world_from_obj_11,rotation_world_from_obj_12," + \
"rotation_world_from_obj_20,rotation_world_from_obj_21,rotation_world_from_obj_22," + \
"translation_world_from_obj_x,translation_world_from_obj_y,translation_world_from_obj_z\n")
for ti in range(rt.animationRange.start, rt.animationRange.end+1):
# Note that we iterate in column-major order because 3ds Max returns the transpose of the R_world_from_cam matrix,
# where R_world_from_cam satisfies the equation: p_world == R_world_from_cam*p_cam for the camera space point p_cam
# and the world space point p_world.
for c,r in itertools.product(range(1,4),range(1,4)):
retval = _eval("at time " + str(ti) + " cameras[" + str(ci+1) + "].transform[" + str(r) + "][" + str(c) + "]", silent=True).Get()
f_camera.write("%.20f,"%retval)
for c in range(1,4):
retval = _eval("at time " + str(ti) + " cameras[" + str(ci+1) + "].transform[" + str(4) + "][" + str(c) + "]", silent=True).Get()
if c in range(1,3):
sep = ","
else:
sep = "\n"
f_camera.write("%.20f%s"%(retval,sep))
# exportFile
_eval('exportFile @"' + obj_file + '" #noprompt')
# vrayExportRTScene
_eval('vrayExportRTScene @"' + vrscene_file + '"')
_print("")
_print("")
_print("")
_print("[HYPERSIM: _DATASET_EXPORT_SCENES] Finished.")
_print("")
_print("")
_print("")
| en | 0.570812 | # # can't import files from current dir by default, so duplicate path_utils here # # # define some useful utility functions # # # parse command-line args # # # parse dataset config # # # disable VRay prompts # # # export scenes # # generate file names # create output dirs # loadMaxFile # export cameras # Note that we iterate in column-major order because 3ds Max returns the transpose of the R_world_from_cam matrix, # where R_world_from_cam satisfies the equation: p_world == R_world_from_cam*p_cam for the camera space point p_cam # and the world space point p_world. # exportFile #noprompt') # vrayExportRTScene | 2.211126 | 2 |
external/lemonade/dist/lemonade/struct.py | almartin82/bayeslite | 964 | 6615797 | '''
Principal data structures for the LEMON parser generator.
'''
from ccruft import struct
# Symbols (terminals and nonterminals) of the grammar are stored in
# the following:
( # type
TERMINAL,
NONTERMINAL,
MULTITERMINAL,
) = range(3)
( # assoc
LEFT,
RIGHT,
NONE,
UNK,
) = range(4)
symbol = struct(
'symbol',
(
'name', # Name of the symbol
'index', # Index number for this symbol
'type', # Symbols are all either TERMINALS or NTs
'rule', # Linked list of rules of this (if an NT)
'fallback', # fallback token in case this token doesn't parse
'prec', # Precedence if defined (-1 otherwise)
'assoc', # Associativity if predecence is defined
'firstset', # First-set for all rules of this symbol
'_lambda', # True if NT and can generate an empty string
'useCnt', # Number of times used
# The following fields are used by MULTITERMINALs only
'nsubsym', # Number of constituent symbols in the MULTI
'subsym', # Array of constituent symbols
)
)
# Each production rule in the grammar is stored in the following
# structure.
rule = struct(
'rule',
(
'lhs', # Left-hand side of the rule
'lhsalias', # Alias for the LHS (NULL if none)
'lhsStart', # True if left-hand side is the start symbol
'ruleline', # Line number for the rule
'nrhs', # Number of RHS symbols
'rhs', # The RHS symbols
'rhsalias', # An alias for each RHS symbol (NULL if none)
'line', # Line number at which code begins
'code', # The code executed when this rule is reduced
'precsym', # Precedence symbol for this rule
'index', # An index number for this rule
'canReduce', # True if this rule is ever reduced
'nextlhs', # Next rule with the same LHS
'next', # Next rule in the global list
)
)
# A configuration is a production rule of the grammar together with a
# mark (dot) showing how much of that rule has been processed so far.
# Configurations also contain a follow-set which is a list of terminal
# symbols which are allowed to immediately follow the end of the rule.
# Every configuration is recorded as an instance of the following:
( # status
COMPLETE,
INCOMPLETE
) = range(2)
config = struct(
'config',
(
'rp', # The rule upon which the configuration is based
'dot', # The parse point
'fws', # Follow-set for this configuration only
'fplp', # Follow-set forward propagation links
'bplp', # Follow-set backwards propagation links
'stp', # Pointer to state which contains this
'status', # The status is used during followset and shift computations
'next', # Next configuration in the state
'bp', # The next basis configuration
)
)
# Every shift or reduce operation is stored as one of the following
( # type
SHIFT,
ACCEPT,
REDUCE,
ERROR,
SSCONFLICT, # A shift/shift conflict
SRCONFLICT, # Was a reduce, but part of a conflict
RRCONFLICT, # Was a reduce, but part of a conflict
SH_RESOLVED, # Was a shift. Precedence resolved conflict
RD_RESOLVED, # Was reduce. Precedence resolved conflict
NOT_USED, # Deleted by compression
) = range(10)
action = struct(
'action',
(
'sp', # The look-ahead symbol
'type',
'stp', # The new state, if a shift
'rp', # The rule, if a reduce
'next', # Next action for this state
'collide', # Next action with the same hash
)
)
action.x = property(lambda self: self) # union
# Each state of the generated parser's finite state machine is encoded
# as an instance of the following structure.
state = struct(
'state',
(
'bp', # The basis configurations for this state
'cfp', # All configurations in this set
'statenum', # Sequencial number for this state
'ap', # Array of actions for this state
'nTknAct', 'nNtAct', # Number of actions on terminals and nonterminals
'iTknOfst', 'iNtOfst', # yy_action[] offset for terminals and nonterms
'iDflt', # Default action
)
)
NO_OFFSET = -2147483647
# A followset propagation link indicates that the contents of one
# configuration followset should be propagated to another whenever the
# first changes.
plink = struct(
'plink',
(
'cfp', # The configuration to which linked
'next', # The next propagate link
)
)
# The state vector for the entire parser generator is recorded as
# follows. (LEMON uses no global variables and makes little use of
# static variables. Fields in the following structure can be thought
# of as begin global variables in the program.)
lemon = struct(
'lemon',
(
'sorted', # Table of states sorted by state number
'rule', # List of all rules
'nstate', # Number of states
'nrule', # Number of rules
'nsymbol', # Number of terminal and nonterminal symbols
'nterminal', # Number of terminal symbols
'symbols', # Sorted array of pointers to symbols
'errorcnt', # Number of errors
'errsym', # The error symbol
'wildcard', # Token that matches anything
'name', # Name of the generated parser
'start', # Name of the start symbol for the grammar
'filename', # Name of the input file
'outname', # Name of the current output file
'tokenprefix', # A prefix added to token names in the .h file
'nconflict', # Number of parsing conflicts
'tablesize', # Size of the parse tables
'basisflag', # Pr'only basis configurations
'has_fallback', # True if any %fallback is seen in the grammer
'argv0', # Name of the program
)
)
| '''
Principal data structures for the LEMON parser generator.
'''
from ccruft import struct
# Symbols (terminals and nonterminals) of the grammar are stored in
# the following:
( # type
TERMINAL,
NONTERMINAL,
MULTITERMINAL,
) = range(3)
( # assoc
LEFT,
RIGHT,
NONE,
UNK,
) = range(4)
symbol = struct(
'symbol',
(
'name', # Name of the symbol
'index', # Index number for this symbol
'type', # Symbols are all either TERMINALS or NTs
'rule', # Linked list of rules of this (if an NT)
'fallback', # fallback token in case this token doesn't parse
'prec', # Precedence if defined (-1 otherwise)
'assoc', # Associativity if predecence is defined
'firstset', # First-set for all rules of this symbol
'_lambda', # True if NT and can generate an empty string
'useCnt', # Number of times used
# The following fields are used by MULTITERMINALs only
'nsubsym', # Number of constituent symbols in the MULTI
'subsym', # Array of constituent symbols
)
)
# Each production rule in the grammar is stored in the following
# structure.
rule = struct(
'rule',
(
'lhs', # Left-hand side of the rule
'lhsalias', # Alias for the LHS (NULL if none)
'lhsStart', # True if left-hand side is the start symbol
'ruleline', # Line number for the rule
'nrhs', # Number of RHS symbols
'rhs', # The RHS symbols
'rhsalias', # An alias for each RHS symbol (NULL if none)
'line', # Line number at which code begins
'code', # The code executed when this rule is reduced
'precsym', # Precedence symbol for this rule
'index', # An index number for this rule
'canReduce', # True if this rule is ever reduced
'nextlhs', # Next rule with the same LHS
'next', # Next rule in the global list
)
)
# A configuration is a production rule of the grammar together with a
# mark (dot) showing how much of that rule has been processed so far.
# Configurations also contain a follow-set which is a list of terminal
# symbols which are allowed to immediately follow the end of the rule.
# Every configuration is recorded as an instance of the following:
( # status
COMPLETE,
INCOMPLETE
) = range(2)
config = struct(
'config',
(
'rp', # The rule upon which the configuration is based
'dot', # The parse point
'fws', # Follow-set for this configuration only
'fplp', # Follow-set forward propagation links
'bplp', # Follow-set backwards propagation links
'stp', # Pointer to state which contains this
'status', # The status is used during followset and shift computations
'next', # Next configuration in the state
'bp', # The next basis configuration
)
)
# Every shift or reduce operation is stored as one of the following
( # type
SHIFT,
ACCEPT,
REDUCE,
ERROR,
SSCONFLICT, # A shift/shift conflict
SRCONFLICT, # Was a reduce, but part of a conflict
RRCONFLICT, # Was a reduce, but part of a conflict
SH_RESOLVED, # Was a shift. Precedence resolved conflict
RD_RESOLVED, # Was reduce. Precedence resolved conflict
NOT_USED, # Deleted by compression
) = range(10)
action = struct(
'action',
(
'sp', # The look-ahead symbol
'type',
'stp', # The new state, if a shift
'rp', # The rule, if a reduce
'next', # Next action for this state
'collide', # Next action with the same hash
)
)
action.x = property(lambda self: self) # union
# Each state of the generated parser's finite state machine is encoded
# as an instance of the following structure.
state = struct(
'state',
(
'bp', # The basis configurations for this state
'cfp', # All configurations in this set
'statenum', # Sequencial number for this state
'ap', # Array of actions for this state
'nTknAct', 'nNtAct', # Number of actions on terminals and nonterminals
'iTknOfst', 'iNtOfst', # yy_action[] offset for terminals and nonterms
'iDflt', # Default action
)
)
NO_OFFSET = -2147483647
# A followset propagation link indicates that the contents of one
# configuration followset should be propagated to another whenever the
# first changes.
plink = struct(
'plink',
(
'cfp', # The configuration to which linked
'next', # The next propagate link
)
)
# The state vector for the entire parser generator is recorded as
# follows. (LEMON uses no global variables and makes little use of
# static variables. Fields in the following structure can be thought
# of as begin global variables in the program.)
lemon = struct(
'lemon',
(
'sorted', # Table of states sorted by state number
'rule', # List of all rules
'nstate', # Number of states
'nrule', # Number of rules
'nsymbol', # Number of terminal and nonterminal symbols
'nterminal', # Number of terminal symbols
'symbols', # Sorted array of pointers to symbols
'errorcnt', # Number of errors
'errsym', # The error symbol
'wildcard', # Token that matches anything
'name', # Name of the generated parser
'start', # Name of the start symbol for the grammar
'filename', # Name of the input file
'outname', # Name of the current output file
'tokenprefix', # A prefix added to token names in the .h file
'nconflict', # Number of parsing conflicts
'tablesize', # Size of the parse tables
'basisflag', # Pr'only basis configurations
'has_fallback', # True if any %fallback is seen in the grammer
'argv0', # Name of the program
)
)
| en | 0.872026 | Principal data structures for the LEMON parser generator. # Symbols (terminals and nonterminals) of the grammar are stored in # the following: # type # assoc # Name of the symbol # Index number for this symbol # Symbols are all either TERMINALS or NTs # Linked list of rules of this (if an NT) # fallback token in case this token doesn't parse # Precedence if defined (-1 otherwise) # Associativity if predecence is defined # First-set for all rules of this symbol # True if NT and can generate an empty string # Number of times used # The following fields are used by MULTITERMINALs only # Number of constituent symbols in the MULTI # Array of constituent symbols # Each production rule in the grammar is stored in the following # structure. # Left-hand side of the rule # Alias for the LHS (NULL if none) # True if left-hand side is the start symbol # Line number for the rule # Number of RHS symbols # The RHS symbols # An alias for each RHS symbol (NULL if none) # Line number at which code begins # The code executed when this rule is reduced # Precedence symbol for this rule # An index number for this rule # True if this rule is ever reduced # Next rule with the same LHS # Next rule in the global list # A configuration is a production rule of the grammar together with a # mark (dot) showing how much of that rule has been processed so far. # Configurations also contain a follow-set which is a list of terminal # symbols which are allowed to immediately follow the end of the rule. # Every configuration is recorded as an instance of the following: # status # The rule upon which the configuration is based # The parse point # Follow-set for this configuration only # Follow-set forward propagation links # Follow-set backwards propagation links # Pointer to state which contains this # The status is used during followset and shift computations # Next configuration in the state # The next basis configuration # Every shift or reduce operation is stored as one of the following # type # A shift/shift conflict # Was a reduce, but part of a conflict # Was a reduce, but part of a conflict # Was a shift. Precedence resolved conflict # Was reduce. Precedence resolved conflict # Deleted by compression # The look-ahead symbol # The new state, if a shift # The rule, if a reduce # Next action for this state # Next action with the same hash # union # Each state of the generated parser's finite state machine is encoded # as an instance of the following structure. # The basis configurations for this state # All configurations in this set # Sequencial number for this state # Array of actions for this state # Number of actions on terminals and nonterminals # yy_action[] offset for terminals and nonterms # Default action # A followset propagation link indicates that the contents of one # configuration followset should be propagated to another whenever the # first changes. # The configuration to which linked # The next propagate link # The state vector for the entire parser generator is recorded as # follows. (LEMON uses no global variables and makes little use of # static variables. Fields in the following structure can be thought # of as begin global variables in the program.) # Table of states sorted by state number # List of all rules # Number of states # Number of rules # Number of terminal and nonterminal symbols # Number of terminal symbols # Sorted array of pointers to symbols # Number of errors # The error symbol # Token that matches anything # Name of the generated parser # Name of the start symbol for the grammar # Name of the input file # Name of the current output file # A prefix added to token names in the .h file # Number of parsing conflicts # Size of the parse tables # Pr'only basis configurations # True if any %fallback is seen in the grammer # Name of the program | 2.872163 | 3 |
projects/src/main/python/CodeJam/Y12R5P1/EvgeniSergeev/generated_py_36e0654140044b8ab71cef0a1176fe9e.py | DynamicCodeSearch/CodeSeer | 5 | 6615798 | import sys
sys.path.append('/home/george2/Raise/ProgramRepair/CodeSeer/projects/src/main/python')
from CodeJam.Y12R5P1.EvgeniSergeev.game import *
def func_be94cf6f94394e8087c938b8745b1bc0(multiplier, pr):
re += multiplier * running
running *= pr
return re
def func_0fe080b18459436d80aea9a4ef31d0c7(multiplier, pr):
re += multiplier * running
running *= pr
return running
def func_e871aeffa1a74f8d803223add8f1beb7(pr):
running *= pr
multiplier += 1
return running
def func_2fb17cc1afc14af482c887f417b6f927(pr):
running *= pr
multiplier += 1
return multiplier
def func_e09e8bb111514b029eeaa884f67b4b47(pr):
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_19396efc6be14a019fd124d583029f2d(pr):
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_945ce5f8755a43b09bd83576de576a34(pr):
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_6bb1eec3b71a4a19b8910cfaa36eebab(p):
pr = p / 100.0
re = 1
return pr
def func_3cb212d154a640a4adf014959a662ceb(p):
pr = p / 100.0
re = 1
return re
def func_7fe6e233b0bb48f881d84b04ea618511(pr):
re = 1
running = pr
return running
def func_1c03b783ccaf4bb180ef5a7d00cb6366(pr):
re = 1
running = pr
return re
def func_0c600e5d7dbb4732a60099732603afa7(pr):
running = pr
multiplier = 2
return multiplier
def func_f9a66213f5944e38982acebe98cd461e(pr):
running = pr
multiplier = 2
return running
def func_b2ce794c66f248ef88dc6236f47bd12f(pr):
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_a10211f34b1342f0a413920349db467c(pr):
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_5f57a1c38e5d4ccd8790b635a2ca39b8(pr):
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_8fc0629e68064d30b3dab423bd284b1f(pr):
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_b7fefc9e29e74c9f9a5f9240aaaf9f0e(p):
pr = p / 100.0
re = 1
running = pr
return re
def func_307d36495a62413cb9c2f92ab5768db3(p):
pr = p / 100.0
re = 1
running = pr
return running
def func_45d839ec0e7245d59ed6359beb420907(p):
pr = p / 100.0
re = 1
running = pr
return pr
def func_bf71df369cd4409abc7f6c28888f5227(pr):
re = 1
running = pr
multiplier = 2
return multiplier
def func_1f66b0c3efb94d2ca3b573f631d97413(pr):
re = 1
running = pr
multiplier = 2
return running
def func_724f37fc53154ad1aa0ec206c4f1ce62(pr):
re = 1
running = pr
multiplier = 2
return re
def func_3dccec556f874d25bd76be56be698661(pr):
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_f4e2acfbf84d40c08c60693b5c7aed70(pr):
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_b1429485e2084d4e9bb0636af527b939(pr):
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_45b7f6c6d63f46b39d3a7865fec49381(pr):
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_8a76a3e3589f40bba7387d951b9dbdf5(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
return pr
def func_41000a635d0a48e3baeaf1c7b7ae7042(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
return running
def func_8cf3c1f259674f6db1f5cb61a9b6ec61(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
return re
def func_92365e002e4f4ba4a93fd3ad5af77fdf(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
return multiplier
def func_e85446a1626f449cbd36c7073457de45(pr):
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_36416facc37543b4bddda5c01e6256a1(pr):
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_6ae73895c6ed4ef194fe5aa350dbd6e5(pr):
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_faddd0bd29024f9fac1f4dfcde5dfc8c(pr):
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_e81a7558c635454a8ea4469f29f7cb1d(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_2e4d86e7e96347a995480ec66a16f80d(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_c2ff0c24ae524c7291f873124cdd91a7(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_8b11fc9be15e441a9963fa36c9d8aa16(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return pr
def func_2961f6f24dd9441385c8778e441b856d(pr):
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_6382c41eaadb44b9b3792f95fdf68fc5(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_5190849f12b94a7697663ff9f0a2f90e(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
return in1
def func_7ad34969be9f45a6b145a5bb5dd3b7cd(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
return L1
def func_4c2d27b869e746c1a7abe70865899cff(seq, best):
L1 = seq[best][1]
E1 = seq[best][2]
return E1
def func_7bc36c0ef861416c87e78f209f6de137(seq, best):
L1 = seq[best][1]
E1 = seq[best][2]
return L1
def func_8a399102e0da4e45ae211fca8ce840ea(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
return E1
def func_294a2ed0a9bc4f7faa03a1bc763c00cb(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
return in2
def func_40fa675929b14ea8ad764991041b6059(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
return L2
def func_51edaf01ac7d4a1c93a6401230abb177(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
return in2
def func_6d15ded96670402cacd1bafb71283ea0(seq, s):
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_06b5df6a9bdb4d2b939cbb28b02cc628(seq, s):
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_b5af1a59a31d44eaad4adc90b1630e2f(L1, seq, E1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_526659655a17404a90234229f1a3ceac(L1, seq, E1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_01e8dc1e5a924a258ea5c6e40bf4b08d(L1, E1, E2, L2):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_14bba4c592c84203a10154655d8d5acc(L1, E1, E2, L2):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_54f3d8194f9b4bdd9652cf8729812004(in2, E1, E2,
extra_if_1_goes_first, L2, in1, s):
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_fefb3b6ede9e4457980fa08f71880e35(in2, E1, E2,
extra_if_1_goes_first, L2, in1, s):
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_41eb282a305d48ac93b5197d546d808a(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
return E1
def func_cb5bf7244dd4431581eb817e4441ddb3(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
return L1
def func_a9ca2f941fb643ea8a6c630bce274390(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
return in1
def func_7f8ddf43a1de4772a8fecd243da74c38(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return E1
def func_45832876482b4db79a1372b88f84dc1a(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return in2
def func_4b4d35ecc0654d0e8e7f9307064d71da(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return L1
def func_6b7c0da84a27407e80214c334d6ebd1d(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return in2
def func_71fa2a4b56b34b8680bd281461c82243(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return E1
def func_64080b5f3a0e47078d1668c962094b0c(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L2
def func_5cc357461091433db8ccf379a68b9a22(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_3bb2147e6c53426daded3d90af060b15(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_6172212f978a4485bee47f222021393c(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in2
def func_8c2b91bc287c474e9e88ea0bf2345f41(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_e2655fef0a2c4d97a98991d60b45fb78(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_db170ee292db42899ac62782d9a3d7a4(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_d25470dc81ea4c038890e1bc0333b2f3(L1, seq, E1, L2, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_93a00e6abd354d42b52c2d921bf65a56(L1, seq, E1, L2, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_4736f46267b141feb2ac58a439ce6198(L1, seq, E1, L2, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_64d9a02764d544c9960c645ef4786821(L1, in2, E1, E2, L2, in1, s):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_747aaf06f553434e9c96fb1e31d8c3cd(L1, in2, E1, E2, L2, in1, s):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_db95ae82d9db4886be299a5391fa8d28(L1, in2, E1, E2, L2, in1, s):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_9ffb30ddd1414e5ea28f632391042c29(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return E1
def func_4f8aa81a51d64e748260e627ab9adb18(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return in2
def func_000c80b47d234b31af08c99ba52fea89(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return in1
def func_8f23da798fb84a438179aa446e49ede6(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return L1
def func_3a548939a4fb4cd69e0a035b582d92b7(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return in2
def func_4da40f33b9c8454f8b0f7f9617469700(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L2
def func_2a62dc7596cf4c04bbfb32df1315590f(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return E1
def func_499db0ecba754b34b6cb0dcf888cd935(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L1
def func_25e4b2d682aa45e3a881cd14b71dc31a(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_d6ee283d7e964f12a3f4caa4f4d475de(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E1
def func_b9a5badb8de44052ba02dcd82e8704c9(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_0c24cb8d4449422cadd5f0908946de9c(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in2
def func_eb8b4d79f6ec41cdb23ce6ce1e298321(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_cf9433b02353492db959dfcba9ad326f(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in2
def func_19441914f04b42c9a4a467f51ba900f3(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_97c0c7df32db4c57963585edd71f4254(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_9cc4f5132e604888b25a9c88cea71536(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_75bc30a278144eb4bc9a4f2fe0656153(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_7467b5b40cb945cab9c1dbdf83d96d66(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_60f19fd1fa184e04ab269425d9e0c4b6(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_306a3b1594b64af7a86ac671e9cd135b(L1, seq, in2, E1, L2, in1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_61ad1ba4c1a647d7b975342527c23020(L1, seq, in2, E1, L2, in1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_82369c162d6747d3a233e89d9df9a4e7(L1, seq, in2, E1, L2, in1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_2af22ebbcd264940a6d73317b72510e7(L1, seq, in2, E1, L2, in1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_26662626a5584d1fa344f71fdbc59841(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L2
def func_42ff382c43d64534ad7bec2ac37c88d1(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return E1
def func_780ec5d57c9f4336bcac192dc7a3f325(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return in2
def func_b954a1e9ddda4e0789469ae0298a1d30(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return in1
def func_401c2ac7a24e4227b248aa660653de09(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L1
def func_beefbad562264f0e8e7521f6cb2648e1(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in2
def func_e7e88e50f81842778b3e2c691e8a9a88(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_a6745a0d177f401ba4bdcf9e64a5325f(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_fb87b0374dd747a8a6e8acfb2d1e5574(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E1
def func_7017fb4e399c43bbbc267eed23f8c874(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L1
def func_0ec8388b703740f38ddc70f576052fbe(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in2
def func_6d8056da808d4db1a20c03c5e2b85b84(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E1
def func_c7a88a4518b3463c954381baec02ae72(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_beff2c329f85478db41b1cecaeda0130(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_e1a9fb19f3aa4fe7b40f0afbca2623ca(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_a462165d26584dfd919389ace5ff3bd7(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in2
def func_551ec3d6423548adb1a10fe8caa6667b(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_4ae7d9b8ba9c48f59a064dff066ee487(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_b32592bec5e24343a812278ce51aee52(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_633d6fda53294db1b2963fa993027b98(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_8bc346670d6d4c0da556c58fe38a26c2(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_74bd1f2178db4480a53bab2cbdce6b85(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_7a23c47b8849436dbb5b29d10ebf5047(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_08963a8e39b0464dbbca6eb198f537a7(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_b30eb9004d214ba39dd34b37c400b833(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_03d967dc08f24482af096d8c76504f30(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_1cb6c636ed8a4bc59ff16248b5e81f70(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E1
def func_18eb1dc434c748098917acc592b4380f(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in2
def func_78feaee1545a4c2c837b2c3338cd53ca(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L1
def func_5721438a758d4b6d977606195ecaa654(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in1
def func_ed263f70f8ec41fd91da23fc12fd6b90(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_0c2335400863405da6593c8e4efc8e98(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_d03a3f0ed76a4598b67f0ebad074761d(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L1
def func_f9b1010f664c46169e95ce0c3553c05a(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E1
def func_775bd3c9a27d4ff98580dd0d11cadde5(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_cba28b78cdb24ee9806cc031ed172552(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_676b1a64f7174b53beb60562ef1e6e45(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in2
def func_e0caa257f46046fb8e4fb24888e83539(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E1
def func_ca98bf527a954be7b11def3e268570e6(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_fad10c0470b34aa7bfe4c9fcadd08438(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in2
def func_d4ed34f0fea343beb0f94e82b78ab428(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_baae37bebdd943e48e0c073c6f5bdcde(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_40e1a5f9c2b44936bfd31548367e4b15(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_2b6343bf37a846669e903838a27527fb(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_c24918d6b4904cc48a0c7d64823143f0(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_434575e2f676436bbb13a6396eefd999(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_2f9820ce353546228d9a4c82934a2ed1(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_b0aecc598af04f3395f5ed883ff50f2d(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_c709e7a26873426ab7a509abb73d5962(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_276365d95a0942de848ec47239aac9f3(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_31b4bb651bd44a198cd507ba0162691b(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in2
def func_dd32d80551fe4b84a555324952b881bf(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L1
def func_e52679317d3746cbba41fb4abb0cc7a5(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in1
def func_0a90a4ecaabe489293e0f9f633ee1a4b(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E1
def func_05a8a0c06acf4aa5a0ed9f7ed58d3157(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_d20be7e881044963a450d0bc92a293d4(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_b07d445cfa784c638be4aa4d63e89835(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in2
def func_bce732822e334ec888da4d09d8a77532(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L1
def func_603bcf919bb348b19a460e7b037a446d(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E1
def func_3cd208cc36d74b829ac8740f7e3f0e00(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_a3cb3eff22db4d77888b39bdc7fc99c6(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_74494249e563475687ae1754aa405565(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_dc9a9681f87a4070b00fa2efa87cc6c1(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_8f757dc928804bc28204e902824240a3(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_ba21fd8c4d004f368c58df23d98074b5(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E1
def func_8754b9b0e49d40fb8fa684a0557c02de(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_d419ec5c85474a6dbee6810c7373db52(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_5ad6e516312548a196872f01b2759c9e(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_63928484963e440cbc39bb40612c0238(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_690b9990450841f484b01f29ff52426d(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_c9db3e364c6a4516a7e5bd9cbd723934(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_769474dd72394ff38007f8ebc4813679(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in1
def func_aba6d50056d1478087b015aa0e7c4e95(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_b4c87e5d06ef4f07a3aaae28bdadbec2(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_7721bbb36df04368896f12c9e3c61ae2(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_1df32fc3dc3346c8bd90d9eaaff83871(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E1
def func_212129b862f84dd287e35750d77cff00(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L1
def func_d1201522f1824d12b819240532a6b3e0(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in2
def func_bf4fb5c181b74cafb912eafe40c48108(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_1886e344e4c844c4a3db49dfee49cbc8(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E1
def func_904dcc12369f41b5ad8b6b76c146a34e(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_13fb6fe60bd54ea4bf24a57a67efc007(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_a243aec6c6614d1980e0a323fc06dbb7(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_b22d7b5ff209488a9be6166b67433846(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_7ea3c21b8cbf49fbb595c4a967178632(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L1
def func_dd6224bd72d94d869ee4dbbd12fb24f1(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_07043727f6f948a7a7a1490b8a0968a6(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_b9ca9923382c40c9b35f6d44526b8896(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_5a8a5ee4ec724918899e1a6c6c1362e1(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_a3eedcc3ab6c46b39cf2df44898310fc(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L1
def func_76de85a4b1a447038f4b7c73eb2d4aab(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_9c72fdaae5f14d9eafd8fe6276dc08bd(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_ef32b7bbd3174599ba8798674d905b9e(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in1
def func_ad7e1b369b0b436fb363603fb16c1d37(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_46739286d7904cbba1b56c8c0bd0ec7b(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E1
def func_73de7073405f42009af5465731a73768(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_b3b2ada0bcaa4c008db1cd1bd6785357(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_32a6ae8bbff643b889e4baaebb1abebc(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in1
def func_25f66e33518e43cabb798fe0997f2c77(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return s
def func_7ee323bb675047f2a14343d9d16bd326(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L1
def func_6e36f2ce8e3c4b7c9222b1456bfd605f(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E1
def func_8d4065eae50c484f87adfb303a7b2934(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_137d28c93f044646930563931cf75235(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_80078cad413d48db8f51c7ba603269e3(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_8c7364c107ba406e9ba978704e8bf711(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_ab0239ee4dac411ab04ebb0730226cbe(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
return Ls
def func_3e4bd242dfd2451b8f389343baa9d94a(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
return N
def func_cde448ee01334e60a65fd6b6c858d2a0(infile):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return Ls
def func_9639a71df776492790c8c3a6ac0da181(infile):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return Ps
def func_d3b6a650f5564e5b90a032b7a3e2bb08(infile, ets):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return p
def func_27c0a508c9d948f8b92647df98a38242(infile, ets):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ps
def func_ab879c2452ad43c78a2f38f9640c60ac(infile, ets):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return etries
def func_077bc562c8394eaf99d51cd8fed06d89(Ls, Ps, ets, N):
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return etries
def func_0fa80cbdf6fa4516bca1c5d961065bd7(Ls, Ps, ets, N):
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return index
def func_da4bec54e658440d8999a6e7a976eade(Ls, Ps, ets, N):
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return p
def func_ced487153e7e4b6795ae2c3440234a41(Ls, Ps, ets, N):
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return seq
def func_52b07e3f61f548628c8d9cbd919cca40(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return Ps
def func_106798b0a5cb4e11ab3137a73c86bcf0(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return Ls
def func_f4faed0dfc55450893722923a5980d38(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return N
def func_3cd8022712a6482aa81af48ef5850bc1(infile, ets):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return p
def func_7f84f6becef14e58ad1fcd51da4131fc(infile, ets):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ls
def func_b39202cbed004cdb81d99629cf298acc(infile, ets):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ps
def func_31c9d0ba9c494a31865a5769bfcf3059(infile, ets):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return etries
def func_955f3a3d2fb445849ae6d6c7981b3351(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return p
def func_729abf6a759047e3827a87ab92f27dd3(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return index
def func_85fd21ea065849618478e191881db288(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ps
def func_e8f2bfdc5c334191bb47ec6b822b3bcb(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return etries
def func_91db7761d8c24b88a484f65bc78ef0cb(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return seq
def func_c08bc7ba09d04bf38d80c2bcf448f01a(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return p
def func_8bc1f8b697e241f694c4a222e3a37465(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ps
def func_4e7bf5783882457481831962dc408335(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return etries
def func_1f91512e7f414973a285a50b0bbffa4a(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return N
def func_d9e00f6b96354522b06e87d60664a7e5(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ls
def func_e35e61f7e77b490c812d97b58735c18c(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return index
def func_9375d76e03774388959ebb3a9ac6fe86(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ps
def func_378d331e7abf41e98d3712e84752cd04(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return etries
def func_6846222b9d114f9ebd8579063fdeeb13(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ls
def func_f0bd6b989400494ca911620a3afb0100(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return seq
def func_ecdfdd332c46456bb6c2ad62c3774cb5(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return p
def func_9a46fa26703846e0aa1c3c18d49e1930(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return p
def func_4eeb744a2c5144aa8b7e8789b5966467(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return seq
def func_0c3a6c61cc824b0b8fd4571c4dff0b96(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return index
def func_70b619a471a74a5d87326c2801faff09(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return etries
def func_a734e4b0348840cf922386525d3b8d7e(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ls
def func_b152e58705c0487ab6b6c58f4336ec42(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return N
def func_52f4696459424bafbb948019eab79396(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ps
def func_946182c5c46c4b27a7e2ff7b791610ef():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
return p
def func_df57f5c322bc4bbf8b547e9a8d26d453():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
return ets
def func_0b12d1ed8c4440ccb21403d9983a4160():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
return infile
def func_5e2fd955e30341c5b5b9fa681d13d3dd():
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return T
def func_e8ad28292c584451bccfa1c636c818d8():
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return infile
def func_7bbf3a29f3514bc5b4a2fb057f324aca(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return etries
def func_60a9e5a69dd8489eb195b45ac2807e61(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L2
def func_a4a27d35cce54e2d8e8b999ebdb4c837(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in2
def func_5e4b007911514ed2872d70ed44321176(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return k
def func_ce871fc9b71a4e6ab837d400e63b48dd(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E2
def func_d7cf97b1463841168ce6a2d4f9736ca5(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_2_goes
def func_0d0ae20f4fb44b8da5da5e24ba50b9ad(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return best
def func_907244e2a9394d5499b482f2cebe8eb2(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return s
def func_7735a52ea424434f8f78609b7310d17c(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return seq
def func_ffd1fc3e28d24efa89494d08edf56095(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_1_goes_first
def func_17798f329f0441d0907e878cabebda21(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return N
def func_ddc9bf543d374680b710afccfc4d2c3a(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return tcase
def func_2f3daf0e750847bca748d1fb6e827a39(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in1
def func_9ad6a0caecfe434b9cec770ae8534f61(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ps
def func_c52233971aa547ffb4c7dcfd75efb8b3(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return index
def func_df97638f69df4c02aba7d120897b1166(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ls
def func_6b8ea3a1aa6f46fd8a2354e0513c75a4(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E1
def func_c3de0d3ff2c84f4e90bbe49178985516(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L1
def func_4563c0ee033b421f8863109cb48daeda(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return p
def func_24324fdf885f4939a94d171b7536065b(infile, in2, best, in1, s):
if in2 < in1:
best = s
infile.close()
return best
def func_7b7d9d55155048fe8bb67088c629f456():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return infile
def func_669c054bd1b044d7b8de14a4ec77c747():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return ets
def func_135ba30eec61462b828462bf2a62c397():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return p
def func_2b328366d3374952bd576c73c19c0201():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return T
def func_89240182cd234172bb577afce79cd2af(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return etries
def func_606a8cad57a842f688849a95e2dfb5f7(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return N
def func_e88d3a822ce54ddf84f9dcd61a66c487(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return index
def func_cefdcb91adf0468186c3d8b4445b0e4d(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return p
def func_5d2adbbf437f4376b16b0e53f0a493d1(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in2
def func_0a1e20ec3b5042cea485b28da84692fd(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in1
def func_1ab3f9a95f5d4433ad5bc0ce6896057d(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return k
def func_2d872ed0fd2142cc995fe189455d7271(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L1
def func_b4b71910f07c4fc994168a099189ac35(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return tcase
def func_ad1a6add18a34d5484842f054512a610(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ls
def func_7e8f35a6c0324e918e3880f4b8465113(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return best
def func_e55a33d55dc3457cb12dfd3b4f2e0ba3(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return s
def func_55af51f0392e47909a652582a67f0413(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return T
def func_f7c058576adf42e4b5826aad3a5d914d(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E1
def func_8908ba15402e4dda8a2a0ddf9099bc97(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ps
def func_b836853a0b5545be84bb60b52dbbc33a(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_1_goes_first
def func_a81028678613485f9ba9d090ba5ad21d(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_2_goes
def func_02550088718b4a7ca15e55dcc4d2e548(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return seq
def func_d9a583dcf77c412385b4ad362d41cc6b(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E2
def func_2dbafd8341bd4c7c878b9099aaed4062(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L2
def func_4f42d5288708422394bf384f142862bf(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return infile
def func_349a24733e5e4b21aaed8390c0bbfd19(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_2_goes
def func_ab4efed1e790487c8ad25a95750c9391(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return index
def func_474b8e6e0e16487487f2f621d5157a32(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in2
def func_d518370f1e894f7c96c1b4b0ce157682(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L1
def func_298882a2019c4ed4807baa14a5322931(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ps
def func_0dc56dcdcc8b4455978aa0586fa777be(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return N
def func_b3494619a5de49cb906c5590cca1198b(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return best
def func_775dd034affc4544886fef0d4cd42666(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return tcase
def func_b432f9f81fec4444989992c564c49ad0(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return T
def func_e9c6e87ef9e74a76bca1adfa1d997efe(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return etries
def func_2f775a07e82847098ea1b21efd6a4576(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ls
def func_83e436c076e54b99b5b9fe428c5ae45f(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in1
def func_9ccebdf4e16f4d20971d4707bb263334(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E1
def func_6eafa7e5fd3f41e488a55e2cfed0c4b4(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return p
def func_c79e67e18b51455e815ddfe33fc61e05(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E2
def func_dcd53c4fcab74e879dcfc4764ba66ce5(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_1_goes_first
def func_8cf7f31eee2149f0a554c9299ca93099(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return k
def func_e3520b77881c48e0bfe30b06eb10980d(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L2
def func_39c5f879552e47ec934b8f07646bd9c4(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return s
def func_2ecb734f247943ba945937e3f44dc1b2(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return seq
def func_82f7cb4853894b909a299beef45fa74a():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return ets
def func_ccffcdc160594851be797fb5376ad794():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in1
def func_fa559a7ef73144e4b39ca7e2f57bbe7c():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return k
def func_716267b36d9a43a18f1cdaf4ac34be03():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L2
def func_d1710f63634f408faef12b86cc66e3e7():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return best
def func_e57c7b4d814c426da80046f0317360b0():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return T
def func_099583d4c9a443568bf21eeace702030():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return tcase
def func_daab5199ffab451da4c42b5857266ac9():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return s
def func_398c79f837814d7b82eadf54ae23f10c():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in2
def func_3d2373c3faa1429a92ab599f39f80350():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return N
def func_1cb5c69110ee49c992a25bf4346fb84f():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return index
def func_b03ab3c331d64147b6f1f4cba2e981d8():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E2
def func_5343f66d0c3d43dda2c4e9ea17c2183b():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return seq
def func_1d910e25479941e096a7e6f28296cb84():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return infile
def func_d0e0497e337249ccbeba931dfccfe33d():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L1
def func_772ef3879b724987af8f4a672bd466cb():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return p
def func_ac7d38a970104ec0ad14e14114963d5b():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_2_goes
def func_65022ba6f5a14df590c5ba98a8c99f3d():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ps
def func_32ebd92069a14fdb8d58dd336958ff9b():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ls
def func_ef13852658fe4eb08c3b8171deb00ad3():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return etries
def func_c2d96ab8fffc4fb884843fb76bb9ab72():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_1_goes_first
def func_4aa381869e2d4e518bbbb93eace0a4a5():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E1
| import sys
sys.path.append('/home/george2/Raise/ProgramRepair/CodeSeer/projects/src/main/python')
from CodeJam.Y12R5P1.EvgeniSergeev.game import *
def func_be94cf6f94394e8087c938b8745b1bc0(multiplier, pr):
re += multiplier * running
running *= pr
return re
def func_0fe080b18459436d80aea9a4ef31d0c7(multiplier, pr):
re += multiplier * running
running *= pr
return running
def func_e871aeffa1a74f8d803223add8f1beb7(pr):
running *= pr
multiplier += 1
return running
def func_2fb17cc1afc14af482c887f417b6f927(pr):
running *= pr
multiplier += 1
return multiplier
def func_e09e8bb111514b029eeaa884f67b4b47(pr):
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_19396efc6be14a019fd124d583029f2d(pr):
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_945ce5f8755a43b09bd83576de576a34(pr):
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_6bb1eec3b71a4a19b8910cfaa36eebab(p):
pr = p / 100.0
re = 1
return pr
def func_3cb212d154a640a4adf014959a662ceb(p):
pr = p / 100.0
re = 1
return re
def func_7fe6e233b0bb48f881d84b04ea618511(pr):
re = 1
running = pr
return running
def func_1c03b783ccaf4bb180ef5a7d00cb6366(pr):
re = 1
running = pr
return re
def func_0c600e5d7dbb4732a60099732603afa7(pr):
running = pr
multiplier = 2
return multiplier
def func_f9a66213f5944e38982acebe98cd461e(pr):
running = pr
multiplier = 2
return running
def func_b2ce794c66f248ef88dc6236f47bd12f(pr):
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_a10211f34b1342f0a413920349db467c(pr):
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_5f57a1c38e5d4ccd8790b635a2ca39b8(pr):
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_8fc0629e68064d30b3dab423bd284b1f(pr):
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_b7fefc9e29e74c9f9a5f9240aaaf9f0e(p):
pr = p / 100.0
re = 1
running = pr
return re
def func_307d36495a62413cb9c2f92ab5768db3(p):
pr = p / 100.0
re = 1
running = pr
return running
def func_45d839ec0e7245d59ed6359beb420907(p):
pr = p / 100.0
re = 1
running = pr
return pr
def func_bf71df369cd4409abc7f6c28888f5227(pr):
re = 1
running = pr
multiplier = 2
return multiplier
def func_1f66b0c3efb94d2ca3b573f631d97413(pr):
re = 1
running = pr
multiplier = 2
return running
def func_724f37fc53154ad1aa0ec206c4f1ce62(pr):
re = 1
running = pr
multiplier = 2
return re
def func_3dccec556f874d25bd76be56be698661(pr):
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_f4e2acfbf84d40c08c60693b5c7aed70(pr):
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_b1429485e2084d4e9bb0636af527b939(pr):
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_45b7f6c6d63f46b39d3a7865fec49381(pr):
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_8a76a3e3589f40bba7387d951b9dbdf5(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
return pr
def func_41000a635d0a48e3baeaf1c7b7ae7042(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
return running
def func_8cf3c1f259674f6db1f5cb61a9b6ec61(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
return re
def func_92365e002e4f4ba4a93fd3ad5af77fdf(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
return multiplier
def func_e85446a1626f449cbd36c7073457de45(pr):
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_36416facc37543b4bddda5c01e6256a1(pr):
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_6ae73895c6ed4ef194fe5aa350dbd6e5(pr):
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_faddd0bd29024f9fac1f4dfcde5dfc8c(pr):
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_e81a7558c635454a8ea4469f29f7cb1d(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return re
def func_2e4d86e7e96347a995480ec66a16f80d(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return running
def func_c2ff0c24ae524c7291f873124cdd91a7(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return multiplier
def func_8b11fc9be15e441a9963fa36c9d8aa16(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return pr
def func_2961f6f24dd9441385c8778e441b856d(pr):
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_6382c41eaadb44b9b3792f95fdf68fc5(p):
pr = p / 100.0
re = 1
running = pr
multiplier = 2
while multiplier < 10000 or multiplier * running > 1e-09:
re += multiplier * running
running *= pr
multiplier += 1
return (1 - pr) * re
def func_5190849f12b94a7697663ff9f0a2f90e(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
return in1
def func_7ad34969be9f45a6b145a5bb5dd3b7cd(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
return L1
def func_4c2d27b869e746c1a7abe70865899cff(seq, best):
L1 = seq[best][1]
E1 = seq[best][2]
return E1
def func_7bc36c0ef861416c87e78f209f6de137(seq, best):
L1 = seq[best][1]
E1 = seq[best][2]
return L1
def func_8a399102e0da4e45ae211fca8ce840ea(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
return E1
def func_294a2ed0a9bc4f7faa03a1bc763c00cb(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
return in2
def func_40fa675929b14ea8ad764991041b6059(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
return L2
def func_51edaf01ac7d4a1c93a6401230abb177(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
return in2
def func_6d15ded96670402cacd1bafb71283ea0(seq, s):
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_06b5df6a9bdb4d2b939cbb28b02cc628(seq, s):
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_b5af1a59a31d44eaad4adc90b1630e2f(L1, seq, E1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_526659655a17404a90234229f1a3ceac(L1, seq, E1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_01e8dc1e5a924a258ea5c6e40bf4b08d(L1, E1, E2, L2):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_14bba4c592c84203a10154655d8d5acc(L1, E1, E2, L2):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_54f3d8194f9b4bdd9652cf8729812004(in2, E1, E2,
extra_if_1_goes_first, L2, in1, s):
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_fefb3b6ede9e4457980fa08f71880e35(in2, E1, E2,
extra_if_1_goes_first, L2, in1, s):
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_41eb282a305d48ac93b5197d546d808a(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
return E1
def func_cb5bf7244dd4431581eb817e4441ddb3(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
return L1
def func_a9ca2f941fb643ea8a6c630bce274390(seq, best):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
return in1
def func_7f8ddf43a1de4772a8fecd243da74c38(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return E1
def func_45832876482b4db79a1372b88f84dc1a(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return in2
def func_4b4d35ecc0654d0e8e7f9307064d71da(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return L1
def func_6b7c0da84a27407e80214c334d6ebd1d(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return in2
def func_71fa2a4b56b34b8680bd281461c82243(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return E1
def func_64080b5f3a0e47078d1668c962094b0c(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L2
def func_5cc357461091433db8ccf379a68b9a22(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_3bb2147e6c53426daded3d90af060b15(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_6172212f978a4485bee47f222021393c(seq, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in2
def func_8c2b91bc287c474e9e88ea0bf2345f41(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_e2655fef0a2c4d97a98991d60b45fb78(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_db170ee292db42899ac62782d9a3d7a4(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_d25470dc81ea4c038890e1bc0333b2f3(L1, seq, E1, L2, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_93a00e6abd354d42b52c2d921bf65a56(L1, seq, E1, L2, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_4736f46267b141feb2ac58a439ce6198(L1, seq, E1, L2, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_64d9a02764d544c9960c645ef4786821(L1, in2, E1, E2, L2, in1, s):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_747aaf06f553434e9c96fb1e31d8c3cd(L1, in2, E1, E2, L2, in1, s):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_db95ae82d9db4886be299a5391fa8d28(L1, in2, E1, E2, L2, in1, s):
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_9ffb30ddd1414e5ea28f632391042c29(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return E1
def func_4f8aa81a51d64e748260e627ab9adb18(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return in2
def func_000c80b47d234b31af08c99ba52fea89(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return in1
def func_8f23da798fb84a438179aa446e49ede6(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
return L1
def func_3a548939a4fb4cd69e0a035b582d92b7(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return in2
def func_4da40f33b9c8454f8b0f7f9617469700(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L2
def func_2a62dc7596cf4c04bbfb32df1315590f(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return E1
def func_499db0ecba754b34b6cb0dcf888cd935(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L1
def func_25e4b2d682aa45e3a881cd14b71dc31a(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_d6ee283d7e964f12a3f4caa4f4d475de(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E1
def func_b9a5badb8de44052ba02dcd82e8704c9(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_0c24cb8d4449422cadd5f0908946de9c(seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in2
def func_eb8b4d79f6ec41cdb23ce6ce1e298321(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_cf9433b02353492db959dfcba9ad326f(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in2
def func_19441914f04b42c9a4a467f51ba900f3(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_97c0c7df32db4c57963585edd71f4254(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_9cc4f5132e604888b25a9c88cea71536(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_75bc30a278144eb4bc9a4f2fe0656153(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_7467b5b40cb945cab9c1dbdf83d96d66(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_60f19fd1fa184e04ab269425d9e0c4b6(L1, seq, E1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_306a3b1594b64af7a86ac671e9cd135b(L1, seq, in2, E1, L2, in1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_61ad1ba4c1a647d7b975342527c23020(L1, seq, in2, E1, L2, in1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_82369c162d6747d3a233e89d9df9a4e7(L1, seq, in2, E1, L2, in1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_2af22ebbcd264940a6d73317b72510e7(L1, seq, in2, E1, L2, in1, s):
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_26662626a5584d1fa344f71fdbc59841(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L2
def func_42ff382c43d64534ad7bec2ac37c88d1(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return E1
def func_780ec5d57c9f4336bcac192dc7a3f325(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return in2
def func_b954a1e9ddda4e0789469ae0298a1d30(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return in1
def func_401c2ac7a24e4227b248aa660653de09(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
return L1
def func_beefbad562264f0e8e7521f6cb2648e1(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in2
def func_e7e88e50f81842778b3e2c691e8a9a88(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_a6745a0d177f401ba4bdcf9e64a5325f(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_fb87b0374dd747a8a6e8acfb2d1e5574(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E1
def func_7017fb4e399c43bbbc267eed23f8c874(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L1
def func_0ec8388b703740f38ddc70f576052fbe(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in2
def func_6d8056da808d4db1a20c03c5e2b85b84(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E1
def func_c7a88a4518b3463c954381baec02ae72(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_beff2c329f85478db41b1cecaeda0130(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_e1a9fb19f3aa4fe7b40f0afbca2623ca(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_a462165d26584dfd919389ace5ff3bd7(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in2
def func_551ec3d6423548adb1a10fe8caa6667b(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_4ae7d9b8ba9c48f59a064dff066ee487(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_b32592bec5e24343a812278ce51aee52(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_633d6fda53294db1b2963fa993027b98(L1, seq, E1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_8bc346670d6d4c0da556c58fe38a26c2(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_74bd1f2178db4480a53bab2cbdce6b85(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_7a23c47b8849436dbb5b29d10ebf5047(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_08963a8e39b0464dbbca6eb198f537a7(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_b30eb9004d214ba39dd34b37c400b833(L1, seq, in2, E1, in1, s):
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_03d967dc08f24482af096d8c76504f30(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L2
def func_1cb6c636ed8a4bc59ff16248b5e81f70(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E1
def func_18eb1dc434c748098917acc592b4380f(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in2
def func_78feaee1545a4c2c837b2c3338cd53ca(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return L1
def func_5721438a758d4b6d977606195ecaa654(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return in1
def func_ed263f70f8ec41fd91da23fc12fd6b90(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
return E2
def func_0c2335400863405da6593c8e4efc8e98(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_d03a3f0ed76a4598b67f0ebad074761d(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L1
def func_f9b1010f664c46169e95ce0c3553c05a(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E1
def func_775bd3c9a27d4ff98580dd0d11cadde5(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_cba28b78cdb24ee9806cc031ed172552(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_676b1a64f7174b53beb60562ef1e6e45(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in2
def func_e0caa257f46046fb8e4fb24888e83539(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E1
def func_ca98bf527a954be7b11def3e268570e6(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_fad10c0470b34aa7bfe4c9fcadd08438(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in2
def func_d4ed34f0fea343beb0f94e82b78ab428(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_baae37bebdd943e48e0c073c6f5bdcde(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_40e1a5f9c2b44936bfd31548367e4b15(L1, seq, best, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_2b6343bf37a846669e903838a27527fb(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_c24918d6b4904cc48a0c7d64823143f0(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_434575e2f676436bbb13a6396eefd999(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_2f9820ce353546228d9a4c82934a2ed1(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_b0aecc598af04f3395f5ed883ff50f2d(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_c709e7a26873426ab7a509abb73d5962(L1, seq, E1, in1, s):
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_276365d95a0942de848ec47239aac9f3(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return extra_if_1_goes_first
def func_31b4bb651bd44a198cd507ba0162691b(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in2
def func_dd32d80551fe4b84a555324952b881bf(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L1
def func_e52679317d3746cbba41fb4abb0cc7a5(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return in1
def func_0a90a4ecaabe489293e0f9f633ee1a4b(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E1
def func_05a8a0c06acf4aa5a0ed9f7ed58d3157(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return L2
def func_d20be7e881044963a450d0bc92a293d4(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
return E2
def func_b07d445cfa784c638be4aa4d63e89835(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in2
def func_bce732822e334ec888da4d09d8a77532(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L1
def func_603bcf919bb348b19a460e7b037a446d(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E1
def func_3cd208cc36d74b829ac8740f7e3f0e00(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_a3cb3eff22db4d77888b39bdc7fc99c6(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_74494249e563475687ae1754aa405565(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_dc9a9681f87a4070b00fa2efa87cc6c1(seq, best, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_8f757dc928804bc28204e902824240a3(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_ba21fd8c4d004f368c58df23d98074b5(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E1
def func_8754b9b0e49d40fb8fa684a0557c02de(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_d419ec5c85474a6dbee6810c7373db52(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_5ad6e516312548a196872f01b2759c9e(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_63928484963e440cbc39bb40612c0238(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_690b9990450841f484b01f29ff52426d(L1, seq, in1, s):
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_c9db3e364c6a4516a7e5bd9cbd723934(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_1_goes_first
def func_769474dd72394ff38007f8ebc4813679(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in1
def func_aba6d50056d1478087b015aa0e7c4e95(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return extra_if_2_goes
def func_b4c87e5d06ef4f07a3aaae28bdadbec2(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L2
def func_7721bbb36df04368896f12c9e3c61ae2(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E2
def func_1df32fc3dc3346c8bd90d9eaaff83871(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return E1
def func_212129b862f84dd287e35750d77cff00(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return L1
def func_d1201522f1824d12b819240532a6b3e0(seq, best, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
return in2
def func_bf4fb5c181b74cafb912eafe40c48108(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_1886e344e4c844c4a3db49dfee49cbc8(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E1
def func_904dcc12369f41b5ad8b6b76c146a34e(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_13fb6fe60bd54ea4bf24a57a67efc007(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_a243aec6c6614d1980e0a323fc06dbb7(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_b22d7b5ff209488a9be6166b67433846(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_7ea3c21b8cbf49fbb595c4a967178632(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L1
def func_dd6224bd72d94d869ee4dbbd12fb24f1(seq, in1, s):
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_07043727f6f948a7a7a1490b8a0968a6(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_b9ca9923382c40c9b35f6d44526b8896(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_5a8a5ee4ec724918899e1a6c6c1362e1(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_a3eedcc3ab6c46b39cf2df44898310fc(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L1
def func_76de85a4b1a447038f4b7c73eb2d4aab(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_9c72fdaae5f14d9eafd8fe6276dc08bd(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_ef32b7bbd3174599ba8798674d905b9e(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in1
def func_ad7e1b369b0b436fb363603fb16c1d37(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_46739286d7904cbba1b56c8c0bd0ec7b(seq, s):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E1
def func_73de7073405f42009af5465731a73768(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_1_goes_first
def func_b3b2ada0bcaa4c008db1cd1bd6785357(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in2
def func_32a6ae8bbff643b889e4baaebb1abebc(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return in1
def func_25f66e33518e43cabb798fe0997f2c77(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return s
def func_7ee323bb675047f2a14343d9d16bd326(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L1
def func_6e36f2ce8e3c4b7c9222b1456bfd605f(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E1
def func_8d4065eae50c484f87adfb303a7b2934(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return L2
def func_137d28c93f044646930563931cf75235(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return E2
def func_80078cad413d48db8f51c7ba603269e3(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return extra_if_2_goes
def func_8c7364c107ba406e9ba978704e8bf711(seq, N, k):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
return best
def func_ab0239ee4dac411ab04ebb0730226cbe(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
return Ls
def func_3e4bd242dfd2451b8f389343baa9d94a(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
return N
def func_cde448ee01334e60a65fd6b6c858d2a0(infile):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return Ls
def func_9639a71df776492790c8c3a6ac0da181(infile):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return Ps
def func_d3b6a650f5564e5b90a032b7a3e2bb08(infile, ets):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return p
def func_27c0a508c9d948f8b92647df98a38242(infile, ets):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ps
def func_ab879c2452ad43c78a2f38f9640c60ac(infile, ets):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return etries
def func_077bc562c8394eaf99d51cd8fed06d89(Ls, Ps, ets, N):
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return etries
def func_0fa80cbdf6fa4516bca1c5d961065bd7(Ls, Ps, ets, N):
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return index
def func_da4bec54e658440d8999a6e7a976eade(Ls, Ps, ets, N):
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return p
def func_ced487153e7e4b6795ae2c3440234a41(Ls, Ps, ets, N):
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return seq
def func_52b07e3f61f548628c8d9cbd919cca40(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return Ps
def func_106798b0a5cb4e11ab3137a73c86bcf0(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return Ls
def func_f4faed0dfc55450893722923a5980d38(infile):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
return N
def func_3cd8022712a6482aa81af48ef5850bc1(infile, ets):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return p
def func_7f84f6becef14e58ad1fcd51da4131fc(infile, ets):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ls
def func_b39202cbed004cdb81d99629cf298acc(infile, ets):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ps
def func_31c9d0ba9c494a31865a5769bfcf3059(infile, ets):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return etries
def func_955f3a3d2fb445849ae6d6c7981b3351(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return p
def func_729abf6a759047e3827a87ab92f27dd3(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return index
def func_85fd21ea065849618478e191881db288(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ps
def func_e8f2bfdc5c334191bb47ec6b822b3bcb(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return etries
def func_91db7761d8c24b88a484f65bc78ef0cb(infile, Ls, ets, N):
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return seq
def func_c08bc7ba09d04bf38d80c2bcf448f01a(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return p
def func_8bc1f8b697e241f694c4a222e3a37465(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ps
def func_4e7bf5783882457481831962dc408335(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return etries
def func_1f91512e7f414973a285a50b0bbffa4a(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return N
def func_d9e00f6b96354522b06e87d60664a7e5(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
return Ls
def func_e35e61f7e77b490c812d97b58735c18c(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return index
def func_9375d76e03774388959ebb3a9ac6fe86(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ps
def func_378d331e7abf41e98d3712e84752cd04(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return etries
def func_6846222b9d114f9ebd8579063fdeeb13(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ls
def func_f0bd6b989400494ca911620a3afb0100(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return seq
def func_ecdfdd332c46456bb6c2ad62c3774cb5(infile, ets, N):
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return p
def func_9a46fa26703846e0aa1c3c18d49e1930(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return p
def func_4eeb744a2c5144aa8b7e8789b5966467(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return seq
def func_0c3a6c61cc824b0b8fd4571c4dff0b96(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return index
def func_70b619a471a74a5d87326c2801faff09(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return etries
def func_a734e4b0348840cf922386525d3b8d7e(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ls
def func_b152e58705c0487ab6b6c58f4336ec42(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return N
def func_52f4696459424bafbb948019eab79396(infile, ets):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
return Ps
def func_946182c5c46c4b27a7e2ff7b791610ef():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
return p
def func_df57f5c322bc4bbf8b547e9a8d26d453():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
return ets
def func_0b12d1ed8c4440ccb21403d9983a4160():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
return infile
def func_5e2fd955e30341c5b5b9fa681d13d3dd():
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return T
def func_e8ad28292c584451bccfa1c636c818d8():
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return infile
def func_7bbf3a29f3514bc5b4a2fb057f324aca(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return etries
def func_60a9e5a69dd8489eb195b45ac2807e61(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L2
def func_a4a27d35cce54e2d8e8b999ebdb4c837(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in2
def func_5e4b007911514ed2872d70ed44321176(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return k
def func_ce871fc9b71a4e6ab837d400e63b48dd(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E2
def func_d7cf97b1463841168ce6a2d4f9736ca5(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_2_goes
def func_0d0ae20f4fb44b8da5da5e24ba50b9ad(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return best
def func_907244e2a9394d5499b482f2cebe8eb2(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return s
def func_7735a52ea424434f8f78609b7310d17c(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return seq
def func_ffd1fc3e28d24efa89494d08edf56095(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_1_goes_first
def func_17798f329f0441d0907e878cabebda21(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return N
def func_ddc9bf543d374680b710afccfc4d2c3a(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return tcase
def func_2f3daf0e750847bca748d1fb6e827a39(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in1
def func_9ad6a0caecfe434b9cec770ae8534f61(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ps
def func_c52233971aa547ffb4c7dcfd75efb8b3(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return index
def func_df97638f69df4c02aba7d120897b1166(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ls
def func_6b8ea3a1aa6f46fd8a2354e0513c75a4(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E1
def func_c3de0d3ff2c84f4e90bbe49178985516(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L1
def func_4563c0ee033b421f8863109cb48daeda(infile, ets, T):
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return p
def func_24324fdf885f4939a94d171b7536065b(infile, in2, best, in1, s):
if in2 < in1:
best = s
infile.close()
return best
def func_7b7d9d55155048fe8bb67088c629f456():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return infile
def func_669c054bd1b044d7b8de14a4ec77c747():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return ets
def func_135ba30eec61462b828462bf2a62c397():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return p
def func_2b328366d3374952bd576c73c19c0201():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return T
def func_89240182cd234172bb577afce79cd2af(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return etries
def func_606a8cad57a842f688849a95e2dfb5f7(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return N
def func_e88d3a822ce54ddf84f9dcd61a66c487(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return index
def func_cefdcb91adf0468186c3d8b4445b0e4d(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return p
def func_5d2adbbf437f4376b16b0e53f0a493d1(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in2
def func_0a1e20ec3b5042cea485b28da84692fd(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in1
def func_1ab3f9a95f5d4433ad5bc0ce6896057d(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return k
def func_2d872ed0fd2142cc995fe189455d7271(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L1
def func_b4b71910f07c4fc994168a099189ac35(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return tcase
def func_ad1a6add18a34d5484842f054512a610(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ls
def func_7e8f35a6c0324e918e3880f4b8465113(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return best
def func_e55a33d55dc3457cb12dfd3b4f2e0ba3(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return s
def func_55af51f0392e47909a652582a67f0413(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return T
def func_f7c058576adf42e4b5826aad3a5d914d(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E1
def func_8908ba15402e4dda8a2a0ddf9099bc97(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ps
def func_b836853a0b5545be84bb60b52dbbc33a(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_1_goes_first
def func_a81028678613485f9ba9d090ba5ad21d(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_2_goes
def func_02550088718b4a7ca15e55dcc4d2e548(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return seq
def func_d9a583dcf77c412385b4ad362d41cc6b(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E2
def func_2dbafd8341bd4c7c878b9099aaed4062(infile, ets):
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L2
def func_4f42d5288708422394bf384f142862bf(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return infile
def func_349a24733e5e4b21aaed8390c0bbfd19(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_2_goes
def func_ab4efed1e790487c8ad25a95750c9391(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return index
def func_474b8e6e0e16487487f2f621d5157a32(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in2
def func_d518370f1e894f7c96c1b4b0ce157682(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L1
def func_298882a2019c4ed4807baa14a5322931(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ps
def func_0dc56dcdcc8b4455978aa0586fa777be(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return N
def func_b3494619a5de49cb906c5590cca1198b(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return best
def func_775dd034affc4544886fef0d4cd42666(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return tcase
def func_b432f9f81fec4444989992c564c49ad0(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return T
def func_e9c6e87ef9e74a76bca1adfa1d997efe(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return etries
def func_2f775a07e82847098ea1b21efd6a4576(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ls
def func_83e436c076e54b99b5b9fe428c5ae45f(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in1
def func_9ccebdf4e16f4d20971d4707bb263334(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E1
def func_6eafa7e5fd3f41e488a55e2cfed0c4b4(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return p
def func_c79e67e18b51455e815ddfe33fc61e05(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E2
def func_dcd53c4fcab74e879dcfc4764ba66ce5(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_1_goes_first
def func_8cf7f31eee2149f0a554c9299ca93099(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return k
def func_e3520b77881c48e0bfe30b06eb10980d(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L2
def func_39c5f879552e47ec934b8f07646bd9c4(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return s
def func_2ecb734f247943ba945937e3f44dc1b2(ets):
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return seq
def func_82f7cb4853894b909a299beef45fa74a():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return ets
def func_ccffcdc160594851be797fb5376ad794():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in1
def func_fa559a7ef73144e4b39ca7e2f57bbe7c():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return k
def func_716267b36d9a43a18f1cdaf4ac34be03():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L2
def func_d1710f63634f408faef12b86cc66e3e7():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return best
def func_e57c7b4d814c426da80046f0317360b0():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return T
def func_099583d4c9a443568bf21eeace702030():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return tcase
def func_daab5199ffab451da4c42b5857266ac9():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return s
def func_398c79f837814d7b82eadf54ae23f10c():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return in2
def func_3d2373c3faa1429a92ab599f39f80350():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return N
def func_1cb5c69110ee49c992a25bf4346fb84f():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return index
def func_b03ab3c331d64147b6f1f4cba2e981d8():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E2
def func_5343f66d0c3d43dda2c4e9ea17c2183b():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return seq
def func_1d910e25479941e096a7e6f28296cb84():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return infile
def func_d0e0497e337249ccbeba931dfccfe33d():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return L1
def func_772ef3879b724987af8f4a672bd466cb():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return p
def func_ac7d38a970104ec0ad14e14114963d5b():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_2_goes
def func_65022ba6f5a14df590c5ba98a8c99f3d():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ps
def func_32ebd92069a14fdb8d58dd336958ff9b():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return Ls
def func_ef13852658fe4eb08c3b8171deb00ad3():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return etries
def func_c2d96ab8fffc4fb884843fb76bb9ab72():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return extra_if_1_goes_first
def func_4aa381869e2d4e518bbbb93eace0a4a5():
ets = [computeEtries(p) for p in range(100)]
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
for tcase in range(T):
N = int(infile.readline())
Ls = map(int, infile.readline().split())
Ps = map(int, infile.readline().split())
etries = [ets[p] for p in Ps]
seq = [(index, Ls[index], etries[index]) for index in range(N)]
for k in range(N):
best = k
for s in range(k + 1, N):
in1 = seq[best][0]
L1 = seq[best][1]
E1 = seq[best][2]
in2 = seq[s][0]
L2 = seq[s][1]
E2 = seq[s][2]
extra_if_1_goes_first = (E2 - 1) * E1 * L1
extra_if_2_goes = (E1 - 1) * E2 * L2
if abs(extra_if_1_goes_first - extra_if_2_goes) < 1e-09:
if in2 < in1:
best = s
elif extra_if_2_goes < extra_if_1_goes_first:
best = s
swap = seq[best]
seq[best] = seq[k]
seq[k] = swap
print 'Case #%d:' % (tcase + 1),
print ' '.join(map(str, [seq[i][0] for i in range(N)]))
if in2 < in1:
best = s
return E1
| nl | 0.122193 | #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), #%d:' % (tcase + 1), | 1.984145 | 2 |
bot/reviewbot/utils/text.py | reviewboard/ReviewBot | 91 | 6615799 | """Utility functions for working with text."""
from __future__ import division, unicode_literals
import re
_BASE62_CHARS = \
'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_SPLIT_RE = re.compile(r'\s*,+\s*')
def base62_encode(value):
"""Return a base62-encoded string representing a numeric value.
Args:
value (int):
The number to encode. This must be a positive number.
Returns:
bytes:
The base62-encoded string.
"""
if value == 0:
return b'0'
assert value > 0
encoded = []
while value > 0:
value, remainder = divmod(value, 62)
encoded.append(_BASE62_CHARS[remainder])
encoded.reverse()
return ''.join(encoded).encode('ascii')
def split_comma_separated(s):
"""Return a list of values from a comma-separated string.
Any blank values will be filtered out.
Args:
s (unicode):
The string to split.
Returns:
list of unicode:
The list of values.
"""
return [
item
for item in _SPLIT_RE.split(s)
if item
]
| """Utility functions for working with text."""
from __future__ import division, unicode_literals
import re
_BASE62_CHARS = \
'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_SPLIT_RE = re.compile(r'\s*,+\s*')
def base62_encode(value):
"""Return a base62-encoded string representing a numeric value.
Args:
value (int):
The number to encode. This must be a positive number.
Returns:
bytes:
The base62-encoded string.
"""
if value == 0:
return b'0'
assert value > 0
encoded = []
while value > 0:
value, remainder = divmod(value, 62)
encoded.append(_BASE62_CHARS[remainder])
encoded.reverse()
return ''.join(encoded).encode('ascii')
def split_comma_separated(s):
"""Return a list of values from a comma-separated string.
Any blank values will be filtered out.
Args:
s (unicode):
The string to split.
Returns:
list of unicode:
The list of values.
"""
return [
item
for item in _SPLIT_RE.split(s)
if item
]
| en | 0.591956 | Utility functions for working with text. Return a base62-encoded string representing a numeric value. Args: value (int): The number to encode. This must be a positive number. Returns: bytes: The base62-encoded string. Return a list of values from a comma-separated string. Any blank values will be filtered out. Args: s (unicode): The string to split. Returns: list of unicode: The list of values. | 3.71226 | 4 |
oauth_example/as/aatest/parse_cnf.py | StudienprojektUniTrier/Authorization-Server | 0 | 6615800 | <reponame>StudienprojektUniTrier/Authorization-Server<filename>oauth_example/as/aatest/parse_cnf.py
import json
import yaml
from aatest import Unknown
from aatest.func import factory as aafactory
__author__ = 'roland'
class MissingParent(Exception):
pass
def _get_cls(name, factories, use=''):
if use:
try:
cls = factories[use](name)
except Unknown:
pass
else:
return cls
try:
cls = factories[''](name)
except Unknown:
raise Exception("Unknown Class: '{}'".format(name))
return cls
def _get_func(dic, func_factory):
"""
Convert function names into function references
:param dic: A key, value dictionary where keys are function names
:param func_factory: Factory function used to find functions
:return: A dictionary with the keys replace with references to functions
"""
res = {}
for fname, val in dic.items():
func = func_factory(fname)
if func is None:
func = aafactory(fname)
if func is None:
raise Exception("Unknown function: '{}'".format(fname))
res[func] = val
return res
def parse_yaml_conf(cnf_file, cls_factories, func_factory, use=''):
"""
:param cnf_file:
:param use:
:return:
"""
stream = open(cnf_file, 'r')
yc = yaml.safe_load(stream)
stream.close()
for tid, spec in yc['Flows'].items():
seq = []
for oper in spec["sequence"]:
if isinstance(oper, dict): # Must be only one key, value item
if len(oper) > 1:
raise SyntaxError(tid)
key, val = list(oper.items())[0]
try:
seq.append((_get_cls(key, cls_factories, use),
_get_func(val, func_factory)))
except Exception:
print('tid:{}'.format(tid))
raise
else:
try:
seq.append(_get_cls(oper, cls_factories, use))
except Exception:
print('tid:{}'.format(tid))
raise
spec["sequence"] = seq
return yc
def parse_json_conf(cnf_file, cls_factories, func_factory, use=''):
"""
:param cnf_file:
:param use:
:return:
"""
stream = open(cnf_file, 'r')
js = json.load(stream)
stream.close()
for tid, spec in js['Flows'].items():
seq = []
for oper in spec["sequence"]:
if isinstance(oper, dict): # Must be only one key, value item
if len(oper) > 1:
raise SyntaxError(tid)
key, val = list(oper.items())[0]
try:
seq.append((_get_cls(key, cls_factories, use),
_get_func(val, func_factory)))
except Exception:
print('tid:{}'.format(tid))
raise
else:
try:
seq.append(_get_cls(oper, cls_factories, use))
except Exception:
print('tid:{}'.format(tid))
raise
spec["sequence"] = seq
return js
class Item(object):
def __init__(self, parent, name, desc):
self.parent = parent
self.desc = desc
self.name = name
self.child = []
def build_hierarchy(flows):
items = {}
for id, desc in flows.items():
items[id] = Item('', id, desc)
for item in items.values():
try:
_pre = item.desc['super']
except KeyError:
continue
else:
try:
_parent = items[_pre]
_parent.child.append(item)
item.parent = _parent
except KeyError:
raise MissingParent(item.desc['super'])
return items
def flatten(interim):
res = []
for f in interim:
res.append(f)
if f.child:
res.extend(flatten(sorted(f.child, key=lambda x: x.name)))
return res
def sort(display_order, flows):
items = build_hierarchy(flows)
# toplevel
f_names = [f for f in items.values() if not f.parent]
interim = []
for k in display_order:
k += '-'
l = [z for z in f_names if z.name.startswith(k)]
interim.extend(sorted(l, key=lambda x: x.name))
return flatten(interim)
| import json
import yaml
from aatest import Unknown
from aatest.func import factory as aafactory
__author__ = 'roland'
class MissingParent(Exception):
pass
def _get_cls(name, factories, use=''):
if use:
try:
cls = factories[use](name)
except Unknown:
pass
else:
return cls
try:
cls = factories[''](name)
except Unknown:
raise Exception("Unknown Class: '{}'".format(name))
return cls
def _get_func(dic, func_factory):
"""
Convert function names into function references
:param dic: A key, value dictionary where keys are function names
:param func_factory: Factory function used to find functions
:return: A dictionary with the keys replace with references to functions
"""
res = {}
for fname, val in dic.items():
func = func_factory(fname)
if func is None:
func = aafactory(fname)
if func is None:
raise Exception("Unknown function: '{}'".format(fname))
res[func] = val
return res
def parse_yaml_conf(cnf_file, cls_factories, func_factory, use=''):
"""
:param cnf_file:
:param use:
:return:
"""
stream = open(cnf_file, 'r')
yc = yaml.safe_load(stream)
stream.close()
for tid, spec in yc['Flows'].items():
seq = []
for oper in spec["sequence"]:
if isinstance(oper, dict): # Must be only one key, value item
if len(oper) > 1:
raise SyntaxError(tid)
key, val = list(oper.items())[0]
try:
seq.append((_get_cls(key, cls_factories, use),
_get_func(val, func_factory)))
except Exception:
print('tid:{}'.format(tid))
raise
else:
try:
seq.append(_get_cls(oper, cls_factories, use))
except Exception:
print('tid:{}'.format(tid))
raise
spec["sequence"] = seq
return yc
def parse_json_conf(cnf_file, cls_factories, func_factory, use=''):
"""
:param cnf_file:
:param use:
:return:
"""
stream = open(cnf_file, 'r')
js = json.load(stream)
stream.close()
for tid, spec in js['Flows'].items():
seq = []
for oper in spec["sequence"]:
if isinstance(oper, dict): # Must be only one key, value item
if len(oper) > 1:
raise SyntaxError(tid)
key, val = list(oper.items())[0]
try:
seq.append((_get_cls(key, cls_factories, use),
_get_func(val, func_factory)))
except Exception:
print('tid:{}'.format(tid))
raise
else:
try:
seq.append(_get_cls(oper, cls_factories, use))
except Exception:
print('tid:{}'.format(tid))
raise
spec["sequence"] = seq
return js
class Item(object):
def __init__(self, parent, name, desc):
self.parent = parent
self.desc = desc
self.name = name
self.child = []
def build_hierarchy(flows):
items = {}
for id, desc in flows.items():
items[id] = Item('', id, desc)
for item in items.values():
try:
_pre = item.desc['super']
except KeyError:
continue
else:
try:
_parent = items[_pre]
_parent.child.append(item)
item.parent = _parent
except KeyError:
raise MissingParent(item.desc['super'])
return items
def flatten(interim):
res = []
for f in interim:
res.append(f)
if f.child:
res.extend(flatten(sorted(f.child, key=lambda x: x.name)))
return res
def sort(display_order, flows):
items = build_hierarchy(flows)
# toplevel
f_names = [f for f in items.values() if not f.parent]
interim = []
for k in display_order:
k += '-'
l = [z for z in f_names if z.name.startswith(k)]
interim.extend(sorted(l, key=lambda x: x.name))
return flatten(interim) | en | 0.527877 | Convert function names into function references :param dic: A key, value dictionary where keys are function names :param func_factory: Factory function used to find functions :return: A dictionary with the keys replace with references to functions :param cnf_file: :param use: :return: # Must be only one key, value item :param cnf_file: :param use: :return: # Must be only one key, value item # toplevel | 2.383164 | 2 |
home_seller_app/properties/urls.py | seifgh/home-seller-web-app | 0 | 6615801 | from django.urls import path, include
from .views import *
urlpatterns = [
# React routers
path('', PropertiesPageView.as_view()),
path('property/<int:property_id>', PropertiesPageView.as_view()),
path('bookmarks', PropertiesPageView.as_view()),
path('search', PropertiesPageView.as_view()),
# api
path('api/', include('home_seller_app.properties.api.urls'))
]
| from django.urls import path, include
from .views import *
urlpatterns = [
# React routers
path('', PropertiesPageView.as_view()),
path('property/<int:property_id>', PropertiesPageView.as_view()),
path('bookmarks', PropertiesPageView.as_view()),
path('search', PropertiesPageView.as_view()),
# api
path('api/', include('home_seller_app.properties.api.urls'))
]
| en | 0.456699 | # React routers # api | 1.773566 | 2 |
tests/test_get_profiles_sorting.py | razzius/hms-weave | 4 | 6615802 | import datetime
import http
from .utils import create_test_profile, create_test_verification_token
def test_sort_profiles_by_date_updated(client, auth):
own_profile = create_test_profile(
email="<EMAIL>",
name="<NAME>",
date_updated=datetime.datetime(2018, 1, 1),
available_for_mentoring=True,
)
token = create_test_verification_token(
verification_email=own_profile.verification_email
)
recently_updated_profile = create_test_profile(
email="<EMAIL>",
name="Z",
date_updated=datetime.datetime(2019, 10, 1),
available_for_mentoring=True,
)
not_recently_updated_profile = create_test_profile(
email="<EMAIL>",
name="A",
date_updated=datetime.datetime(2017, 10, 1),
available_for_mentoring=True,
)
auth.login(token.token)
response = client.get("/api/profiles?sorting=date_updated")
assert response.status_code == http.HTTPStatus.OK.value
assert response.json["profile_count"] == 3
results = response.json["profiles"]
assert results[0]["id"] == own_profile.id
assert results[1]["id"] == recently_updated_profile.id
assert results[2]["id"] == not_recently_updated_profile.id
| import datetime
import http
from .utils import create_test_profile, create_test_verification_token
def test_sort_profiles_by_date_updated(client, auth):
own_profile = create_test_profile(
email="<EMAIL>",
name="<NAME>",
date_updated=datetime.datetime(2018, 1, 1),
available_for_mentoring=True,
)
token = create_test_verification_token(
verification_email=own_profile.verification_email
)
recently_updated_profile = create_test_profile(
email="<EMAIL>",
name="Z",
date_updated=datetime.datetime(2019, 10, 1),
available_for_mentoring=True,
)
not_recently_updated_profile = create_test_profile(
email="<EMAIL>",
name="A",
date_updated=datetime.datetime(2017, 10, 1),
available_for_mentoring=True,
)
auth.login(token.token)
response = client.get("/api/profiles?sorting=date_updated")
assert response.status_code == http.HTTPStatus.OK.value
assert response.json["profile_count"] == 3
results = response.json["profiles"]
assert results[0]["id"] == own_profile.id
assert results[1]["id"] == recently_updated_profile.id
assert results[2]["id"] == not_recently_updated_profile.id
| none | 1 | 2.498577 | 2 | |
jai_benchmark/utils/params_base.py | LaudateCorpus1/edgeai-benchmark | 21 | 6615803 | # Copyright (c) 2018-2021, Texas Instruments
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ParamsBase:
def __init__(self):
self.is_initialized = False
def initialize(self):
assert hasattr(self, 'kwargs') and isinstance(self.kwargs, dict), \
'the child class must have a dict called kwargs'
self.is_initialized = True
def get_param(self, param_name):
assert self.is_initialized, 'initialize must be called before get_param() can be done'
return self.peek_param(param_name)
def set_param(self, param_name, value):
assert hasattr(self, 'kwargs') and isinstance(self.kwargs, dict), \
'the child class must have a dict called kwargs'
if hasattr(self, param_name):
setattr(self, param_name, value)
elif param_name in self.kwargs:
self.kwargs[param_name] = value
else:
assert False, f'param {param_name} could not be found in object {self.__class__.__name__}'
#
def peek_param(self, param_name):
assert hasattr(self, 'kwargs') and isinstance(self.kwargs, dict), \
'the child class must have a dict called kwargs'
# param may not be final yet - use get_param instead to be sure
if hasattr(self, param_name):
return getattr(self, param_name)
elif param_name in self.kwargs:
return self.kwargs[param_name]
else:
assert False, f'param {param_name} could not be found in object {self.__class__.__name__}'
#
def get_params(self):
assert self.is_initialized, 'initialize must be called before get_param() can be done'
return self.kwargs
def peek_params(self):
assert hasattr(self, 'kwargs') and isinstance(self.kwargs, dict), \
'the child class must have a dict called kwargs'
return self.kwargs | # Copyright (c) 2018-2021, Texas Instruments
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ParamsBase:
def __init__(self):
self.is_initialized = False
def initialize(self):
assert hasattr(self, 'kwargs') and isinstance(self.kwargs, dict), \
'the child class must have a dict called kwargs'
self.is_initialized = True
def get_param(self, param_name):
assert self.is_initialized, 'initialize must be called before get_param() can be done'
return self.peek_param(param_name)
def set_param(self, param_name, value):
assert hasattr(self, 'kwargs') and isinstance(self.kwargs, dict), \
'the child class must have a dict called kwargs'
if hasattr(self, param_name):
setattr(self, param_name, value)
elif param_name in self.kwargs:
self.kwargs[param_name] = value
else:
assert False, f'param {param_name} could not be found in object {self.__class__.__name__}'
#
def peek_param(self, param_name):
assert hasattr(self, 'kwargs') and isinstance(self.kwargs, dict), \
'the child class must have a dict called kwargs'
# param may not be final yet - use get_param instead to be sure
if hasattr(self, param_name):
return getattr(self, param_name)
elif param_name in self.kwargs:
return self.kwargs[param_name]
else:
assert False, f'param {param_name} could not be found in object {self.__class__.__name__}'
#
def get_params(self):
assert self.is_initialized, 'initialize must be called before get_param() can be done'
return self.kwargs
def peek_params(self):
assert hasattr(self, 'kwargs') and isinstance(self.kwargs, dict), \
'the child class must have a dict called kwargs'
return self.kwargs | en | 0.717183 | # Copyright (c) 2018-2021, Texas Instruments # All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # param may not be final yet - use get_param instead to be sure # | 1.712767 | 2 |
microphone_match_gui.py | piotrwicijowski/whistler | 0 | 6615804 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import sys
from sys import platform
import os
import datetime
from PyQt5.QtWidgets import (
QApplication,
QWidget,
QPushButton,
QVBoxLayout,
QHBoxLayout,
QFormLayout,
QStackedWidget,
QLabel,
QSizePolicy,
QCheckBox,
QListWidget,
QListWidgetItem,
QLineEdit,
QMainWindow,
QAction,
QProgressBar,
QDialog,
QTableWidget,
QTableWidgetItem,
QFileDialog,
QDialogButtonBox,
QGraphicsScene,
QGraphicsView,
QGraphicsPixmapItem
)
from PyQt5.QtGui import (
QIcon,
QPixmap,
QImage,
QPalette
)
from PyQt5.QtCore import (
QCoreApplication,
QThread,
QBasicTimer,
QUrl,
pyqtProperty,
pyqtSlot,
pyqtSignal,
Qt,
QT_VERSION_STR
)
from PyQt5.QtQml import (qmlRegisterType, QQmlComponent, QQmlEngine)
from PyQt5.QtQuick import (QQuickView)
from PyQt5.QtQuickWidgets import (QQuickWidget)
import locale
os_encoding = locale.getpreferredencoding()
import microphone_match
import scannerSettingsDialog
import matcherSettingsDialog
import audioSettingsDialog
import uiSettingsDialog
import re
major, minor, bugfix = QT_VERSION_STR.split('.')
major = int(major)
minor = int(minor)
bugfix = int(bugfix)
if platform == "win32" or major<5 or minor<8:
enableQmlFullscreen = False
else:
enableQmlFullscreen = True
def main(argv):
app = QApplication(argv)
w = MainWindow()
sys.exit(app.exec_())
class RecorderMatcherThread(QThread):
def __init__(self, matcher):
super(self.__class__, self).__init__()
self.matcher = matcher
def __del__(self):
self.wait()
def run(self):
# database_file_path = QApplication.instance().arguments()[1] if len(QApplication.instance().arguments())>1 else os.path.join(os.path.dirname(os.path.abspath(__file__)),'fpdbase.pklz')
# microphone_match.recordAndMatch(database_file_path)
# self.recordButton.setText('Record')
self.result = self.matcher.recordAndMatch2()
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
self.fullscreenWindow = None
self.initUI()
def initUI(self):
self.setWindowTitle('Whistler')
self.stackedWidget = QStackedWidget(self)
self.centralWidget = QWidget(self.stackedWidget)
self.stackedWidget.addWidget(self.centralWidget)
# self.continuousMatching = True
self.continuousMatching = False
self.threadInterrupter = {'interrupted':False}
self.continuousMatcher = microphone_match.ContinuousMatcher(self.threadInterrupter)
self.matcherThread = RecorderMatcherThread(self.continuousMatcher)
self.matcherThread.finished.connect(self.recordingFinished)
self.recordButton = QPushButton(u'Nagrywaj')
self.recordButton.resize(self.recordButton.sizeHint())
self.recordButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.recordButton.clicked.connect(self.recordAndMatch)
self.resultLabel = QLabel()
if self.continuousMatcher.ready:
self.resultLabel.setText(u'Gotowy')
else:
self.resultLabel.setText(u'Proszę wybrać katalog z bazą danych')
self.pictureLabel = QLabel();
self.pictureImage = QImage("image.jpg")
self.pictureImage = self.pictureImage.scaled(200,200,Qt.IgnoreAspectRatio,Qt.FastTransformation)
self.pictureLabel.setAlignment( Qt.AlignRight | Qt.AlignVCenter );
self.pictureLabel.setPixmap(QPixmap.fromImage(self.pictureImage))
# self.continuousCheckBox = QCheckBox()
# self.continuousCheckBox.setText('Continuous')
# self.continuousCheckBox.setChecked(self.continuousMatching)
# self.continuousCheckBox.stateChanged.connect(self.toggleContinuous)
self.progress = 0.0
self.progressBar = QProgressBar()
self.progressTimer = QBasicTimer()
self.recentList = []
self.recentListWidget = QListWidget()
self.optionsHBox = QHBoxLayout()
# self.optionsHBox.addWidget(self.continuousCheckBox)
self.recResHBox = QHBoxLayout()
self.recResHBox.addWidget(self.recordButton)
self.recResHBox.addWidget(self.resultLabel)
self.recResHBox.addWidget(self.pictureLabel)
self.mainVBox = QVBoxLayout()
self.mainVBox.addLayout(self.recResHBox)
self.mainVBox.addLayout(self.optionsHBox)
# self.mainVBox.addWidget(self.recentListWidget)
self.mainVBox.addWidget(self.progressBar)
# self.mainVBox.addStretch(1)
self.centralWidget.setLayout(self.mainVBox)
self.setCentralWidget(self.stackedWidget)
self.runningInFullscreen = False
self.defaultImagePath = os.path.join(self.continuousMatcher.applicationPath,'default.png')
self.defaultText = u'Niestety, nie znaleziono pasującego nagrania,<br>Spróbuj ponownie'
if enableQmlFullscreen:
self.setupFullscreenView()
if(self.continuousMatcher.startFullscreen):
self.runFullscreen()
self.setupMenuBar()
self.show()
def setupMenuBar(self):
menubar = self.menuBar()
fileMenu = menubar.addMenu('&Plik')
settingsMenu = menubar.addMenu('&Ustawienia')
if enableQmlFullscreen:
runFullscreenAction = QAction(QIcon.fromTheme('fullscreen'), u'&Pełny ekran', self)
runFullscreenAction.setShortcut('F11')
runFullscreenAction.setStatusTip(u'Uruchom widok pełnoekranowy')
runFullscreenAction.triggered.connect(self.runFullscreen)
databaseManagementAction = QAction(QIcon.fromTheme('database'), u'&Baza danych', self)
databaseManagementAction.setShortcut('Ctrl+B')
databaseManagementAction.setStatusTip(u'Zarządzaj bazą danych')
databaseManagementAction.triggered.connect(self.openDatabaseManagement)
chooseDatabaseAction = QAction(QIcon.fromTheme('fileopen'), u'&Otwórz bazę danych', self)
chooseDatabaseAction.setShortcut('Ctrl+O')
chooseDatabaseAction.setStatusTip('Otwórz katalog zawierający bazę danych')
chooseDatabaseAction.triggered.connect(self.chooseDatabaseDirectory)
exitAction = QAction(QIcon.fromTheme('application-exit'), u'&Wyjście', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Zamknij program')
exitAction.triggered.connect(QApplication.quit)
uiSettingsAction = QAction(QIcon.fromTheme('gnome-settings'), u'Ustawienia &interfejsu', self)
uiSettingsAction.setShortcut('Ctrl+Shift+U')
uiSettingsAction.setStatusTip(u'Zmień ustawienia interfejsu')
uiSettingsAction.triggered.connect(self.openUiSettings)
audioSettingsAction = QAction(QIcon.fromTheme('gnome-settings'), u'Ustawienia &nagrywania', self)
audioSettingsAction.setShortcut('Ctrl+Shift+R')
audioSettingsAction.setStatusTip(u'Zmień ustawienia nagrywania')
audioSettingsAction.triggered.connect(self.openAudioSettings)
matcherSettingsAction = QAction(QIcon.fromTheme('gnome-settings'), u'Ustawienia &dopasowywania', self)
matcherSettingsAction.setShortcut('Ctrl+Shift+M')
matcherSettingsAction.setStatusTip(u'Zmień ustawienia dopasowywania')
matcherSettingsAction.triggered.connect(self.openMatcherSettings)
scannerSettingsAction = QAction(QIcon.fromTheme('gnome-settings'), u'Ustawienia &skanowania', self)
scannerSettingsAction.setShortcut('Ctrl+Shift+S')
scannerSettingsAction.setStatusTip(u'Zmień ustawienia skanowania')
scannerSettingsAction.triggered.connect(self.openScannerSettings)
if enableQmlFullscreen:
fileMenu.addAction(runFullscreenAction)
fileMenu.addAction(chooseDatabaseAction)
fileMenu.addAction(databaseManagementAction)
fileMenu.addAction(exitAction)
settingsMenu.addAction(uiSettingsAction)
settingsMenu.addAction(audioSettingsAction)
settingsMenu.addAction(matcherSettingsAction)
settingsMenu.addAction(scannerSettingsAction)
def setupFullscreenView(self):
self.fullscreenWidget = QQuickWidget(self)
self.fullscreenWidget.setResizeMode(QQuickWidget.SizeRootObjectToView)
self.fullscreenWidget.setSource(QUrl(os.path.join(self.continuousMatcher.applicationPath,'fullscreen.qml')))
mainRootObject = self.fullscreenWidget.rootObject()
mainRootObject.startRecording.connect(self.recordAndMatch)
mainRootObject.stopRecording.connect(self.interruptRecording)
mainRootObject.closeWindow.connect(self.closeFullscreenWindow)
self.recordingStartedSignal.connect(mainRootObject.stateRecording)
self.recordingFinishedSignal.connect(mainRootObject.stateReady)
self.progressChangedSignal.connect(mainRootObject.setProgress)
self.enablePlaybackSignal.connect(mainRootObject.enablePlayback)
self.enableAutoPlaybackSignal.connect(mainRootObject.enableAutoPlayback)
self.enablePlaybackSignal.emit(self.continuousMatcher.enablePlayback)
self.enableAutoPlaybackSignal.emit(self.continuousMatcher.autoPlayback)
self.stackedWidget.addWidget(self.fullscreenWidget)
def runFullscreen(self):
if enableQmlFullscreen:
if not self.runningInFullscreen:
self.runningInFullscreen = True
self.stackedWidget.setCurrentIndex(1)
self.menuBar().setVisible(False)
self.showFullScreen()
else:
self.runningInFullscreen = False
self.stackedWidget.setCurrentIndex(0)
self.menuBar().setVisible(True)
self.showNormal()
def closeFullscreenWindow(self):
if enableQmlFullscreen:
self.runningInFullscreen = False
self.stackedWidget.setCurrentIndex(0)
self.menuBar().setVisible(True)
self.showNormal()
def openDatabaseManagement(self, newValue):
databaseDialog = QDialog(self)
databaseTable = QTableWidget()
self.fillDatabaseManagementTable(databaseTable)
rescanButton = QPushButton(u'Skanuj ponownie')
rescanButton.clicked.connect(lambda: self.rescanDatabaseAndFillTable(databaseTable))
dialogButtons = QDialogButtonBox(QDialogButtonBox.Close)
dialogButtons.rejected.connect(databaseDialog.accept)
databaseLayout = QVBoxLayout()
databaseLayout.addWidget(databaseTable)
databaseLayout.addWidget(rescanButton)
databaseLayout.addWidget(dialogButtons)
databaseDialog.setLayout(databaseLayout)
databaseDialog.exec_()
def rescanDatabaseAndFillTable(self,table):
self.continuousMatcher.scanDirectory()
self.fillDatabaseManagementTable(table)
def fillDatabaseManagementTable(self, table):
tableHeaders = [u'Obraz',u'Artysta',u'Tytuł',u'Audio']
table.setRowCount(0)
table.setRowCount(len(self.continuousMatcher.hash_tab.metadata))
table.setColumnCount(len(tableHeaders))
table.setHorizontalHeaderLabels(tableHeaders)
for i, val in enumerate(self.continuousMatcher.hash_tab.metadata):
artistItem = QTableWidgetItem(val.get("artist",""))
titleItem = QTableWidgetItem(val.get("title",""))
audioItem = QTableWidgetItem(self.continuousMatcher.hash_tab.names[i])
table.setItem(i,1,artistItem)
table.setItem(i,2,titleItem)
table.setItem(i,3,audioItem)
table.resizeColumnsToContents()
table.resizeRowsToContents()
def openScannerSettings(self, newValue):
settingsDialog = scannerSettingsDialog.ScannerSettingsDialog(self, self.continuousMatcher)
settingsDialog.run()
def openMatcherSettings(self, newValue):
settingsDialog = matcherSettingsDialog.MatcherSettingsDialog(self, self.continuousMatcher)
settingsDialog.run()
def openUiSettings(self, newValue):
settingsDialog = uiSettingsDialog.UiSettingDialog(self,self.continuousMatcher)
settingsDialog.run()
self.enablePlaybackSignal.emit(self.continuousMatcher.enablePlayback)
self.enableAutoPlaybackSignal.emit(self.continuousMatcher.autoPlayback)
def openAudioSettings(self, newValue):
settingsDialog = audioSettingsDialog.AudioSettingDialog(self,self.continuousMatcher)
settingsDialog.run()
def chooseDatabaseDirectory(self):
prevDirPath = os.path.join(self.continuousMatcher.applicationPath, self.continuousMatcher.databaseDirectoryPath)
prevDirPath = os.path.normpath(prevDirPath)
dirPath = QFileDialog.getExistingDirectory(self, u'Wybierz katalog z bazą danych', prevDirPath, QFileDialog.ShowDirsOnly )
if dirPath:
self.continuousMatcher.changeDatabaseDirectory(dirPath)
self.continuousMatcher.openDatabaseDirectory()
if self.continuousMatcher.ready:
self.resultLabel.setText(u'Gotowy')
else:
self.resultLabel.setText(u'Proszę wybrać katalog z bazą danych')
def interruptRecording(self):
self.threadInterrupter['interrupted'] = True
enableAutoPlaybackSignal = pyqtSignal(bool)
enablePlaybackSignal = pyqtSignal(bool)
recordingStartedSignal = pyqtSignal()
def recordAndMatch(self):
self.threadInterrupter['interrupted'] = False
self.recordButton.setText(u'Nagrywanie')
self.progress = 0.0
self.progressBar.setValue(0)
self.progressTimer.start(100,self)
self.progressChangedSignal.emit(self.progress)
self.matcherThread.start()
self.recordButton.clicked.disconnect()
self.recordButton.clicked.connect(self.interruptRecording)
self.recordingStartedSignal.emit()
recordingFinishedSignal = pyqtSignal(str, str, str, str)
def recordingFinished(self):
currentResult = self.resultTextFormatter(self.matcherThread.result)
rawFilenameWithoutExtension = os.path.splitext(self.matcherThread.result["filename"])[0]
filenameWithoutExtension = re.sub(r"\[.*\]","",rawFilenameWithoutExtension)
resultAudioPath = self.matcherThread.result["filename"];
videoExtensions = ['AVI', 'avi', 'MOV', 'mov']
possibleVideoPaths = [os.path.normpath(os.path.join(self.continuousMatcher.databaseDirectoryPath, filenameWithoutExtension + "." + ext)) for ext in videoExtensions]
videoPaths = [path for path in possibleVideoPaths if os.path.exists(path)]
if len(videoPaths) > 0:
resultVideoPath = videoPaths[0]
else:
resultVideoPath = ""
imageExtensions = ['png', 'jpg', 'jpeg', 'bmp']
possibleImagePaths = [os.path.normpath(os.path.join(self.continuousMatcher.databaseDirectoryPath, filenameWithoutExtension + "." + ext)) for ext in imageExtensions]
imagePaths = [path for path in possibleImagePaths if os.path.exists(path)]
if len(imagePaths) > 0:
resultImagePath = imagePaths[0]
else:
resultImagePath = self.defaultImagePath
textExtensions = ['html', 'txt']
possibleTextPaths = [os.path.normpath(os.path.join(self.continuousMatcher.databaseDirectoryPath, filenameWithoutExtension + "." + ext)) for ext in textExtensions]
textPaths = [path for path in possibleTextPaths if os.path.exists(path)]
if len(textPaths) > 0:
resultText = self.parseResultTextFile(textPaths[0])
resultText = re.sub(r"(\n)+$","",resultText)
resultText = re.sub(r"^(\n)+","",resultText)
else:
resultText = self.defaultText
self.resultLabel.setText(resultText)
self.pictureImage = QImage(resultImagePath)
self.pictureImage = self.pictureImage.scaled(200,200,Qt.IgnoreAspectRatio,Qt.FastTransformation)
self.pictureLabel.setAlignment( Qt.AlignRight | Qt.AlignVCenter );
self.pictureLabel.setPixmap(QPixmap.fromImage(self.pictureImage))
# if(len(self.recentList) == 0 or self.recentList[-1] != resultText):
# self.recentList.append(resultText)
# self.recentListWidget.addItem(QListWidgetItem(resultText))
self.progressBar.setValue(100)
self.progress = 100.0
self.progressChangedSignal.emit(self.progress)
self.progressTimer.stop()
if(self.continuousMatching and not self.threadInterrupter['interrupted']):
self.recordAndMatch()
else:
self.recordButton.setText(u'Nagrywaj')
self.recordButton.clicked.disconnect()
self.recordButton.clicked.connect(self.recordAndMatch)
self.recordingFinishedSignal.emit(resultText,resultImagePath,resultAudioPath,resultVideoPath)
def parseResultTextFile(self, textPath):
with open(textPath) as file:
result = file.read()
return result
def resultTextFormatter(self, result):
matchedStringFormat = '{artist} - {title}'
formatedResult = ""
artist = result['metadata'].get("artist","")
title = result['metadata'].get("title","")
msg = result['msg']
filename = result['filename']
if artist and title:
formatedResult = matchedStringFormat.format(**{'artist':artist,'title':title})
elif filename:
formatedResult = filename
elif msg:
formatedResult = msg
else:
formatedResult = u'Coś poszło nie tak...'
return formatedResult
progressChangedSignal = pyqtSignal(float)
def timerEvent(self, e):
if self.progress >= 100:
self.progressTimer.stop()
return
self.progress = self.progress + 10.0 * 1.0/10.0
self.progressBar.setValue(self.progress)
self.progressChangedSignal.emit(self.progress)
def toggleContinuous(self):
self.continuousMatching = self.continuousCheckBox.isChecked()
self.continuousCheckBox.setChecked(self.continuousMatching)
if __name__ == '__main__':
main(sys.argv)
| #!/usr/bin/python2
# -*- coding: utf-8 -*-
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import sys
from sys import platform
import os
import datetime
from PyQt5.QtWidgets import (
QApplication,
QWidget,
QPushButton,
QVBoxLayout,
QHBoxLayout,
QFormLayout,
QStackedWidget,
QLabel,
QSizePolicy,
QCheckBox,
QListWidget,
QListWidgetItem,
QLineEdit,
QMainWindow,
QAction,
QProgressBar,
QDialog,
QTableWidget,
QTableWidgetItem,
QFileDialog,
QDialogButtonBox,
QGraphicsScene,
QGraphicsView,
QGraphicsPixmapItem
)
from PyQt5.QtGui import (
QIcon,
QPixmap,
QImage,
QPalette
)
from PyQt5.QtCore import (
QCoreApplication,
QThread,
QBasicTimer,
QUrl,
pyqtProperty,
pyqtSlot,
pyqtSignal,
Qt,
QT_VERSION_STR
)
from PyQt5.QtQml import (qmlRegisterType, QQmlComponent, QQmlEngine)
from PyQt5.QtQuick import (QQuickView)
from PyQt5.QtQuickWidgets import (QQuickWidget)
import locale
os_encoding = locale.getpreferredencoding()
import microphone_match
import scannerSettingsDialog
import matcherSettingsDialog
import audioSettingsDialog
import uiSettingsDialog
import re
major, minor, bugfix = QT_VERSION_STR.split('.')
major = int(major)
minor = int(minor)
bugfix = int(bugfix)
if platform == "win32" or major<5 or minor<8:
enableQmlFullscreen = False
else:
enableQmlFullscreen = True
def main(argv):
app = QApplication(argv)
w = MainWindow()
sys.exit(app.exec_())
class RecorderMatcherThread(QThread):
def __init__(self, matcher):
super(self.__class__, self).__init__()
self.matcher = matcher
def __del__(self):
self.wait()
def run(self):
# database_file_path = QApplication.instance().arguments()[1] if len(QApplication.instance().arguments())>1 else os.path.join(os.path.dirname(os.path.abspath(__file__)),'fpdbase.pklz')
# microphone_match.recordAndMatch(database_file_path)
# self.recordButton.setText('Record')
self.result = self.matcher.recordAndMatch2()
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
self.fullscreenWindow = None
self.initUI()
def initUI(self):
self.setWindowTitle('Whistler')
self.stackedWidget = QStackedWidget(self)
self.centralWidget = QWidget(self.stackedWidget)
self.stackedWidget.addWidget(self.centralWidget)
# self.continuousMatching = True
self.continuousMatching = False
self.threadInterrupter = {'interrupted':False}
self.continuousMatcher = microphone_match.ContinuousMatcher(self.threadInterrupter)
self.matcherThread = RecorderMatcherThread(self.continuousMatcher)
self.matcherThread.finished.connect(self.recordingFinished)
self.recordButton = QPushButton(u'Nagrywaj')
self.recordButton.resize(self.recordButton.sizeHint())
self.recordButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.recordButton.clicked.connect(self.recordAndMatch)
self.resultLabel = QLabel()
if self.continuousMatcher.ready:
self.resultLabel.setText(u'Gotowy')
else:
self.resultLabel.setText(u'Proszę wybrać katalog z bazą danych')
self.pictureLabel = QLabel();
self.pictureImage = QImage("image.jpg")
self.pictureImage = self.pictureImage.scaled(200,200,Qt.IgnoreAspectRatio,Qt.FastTransformation)
self.pictureLabel.setAlignment( Qt.AlignRight | Qt.AlignVCenter );
self.pictureLabel.setPixmap(QPixmap.fromImage(self.pictureImage))
# self.continuousCheckBox = QCheckBox()
# self.continuousCheckBox.setText('Continuous')
# self.continuousCheckBox.setChecked(self.continuousMatching)
# self.continuousCheckBox.stateChanged.connect(self.toggleContinuous)
self.progress = 0.0
self.progressBar = QProgressBar()
self.progressTimer = QBasicTimer()
self.recentList = []
self.recentListWidget = QListWidget()
self.optionsHBox = QHBoxLayout()
# self.optionsHBox.addWidget(self.continuousCheckBox)
self.recResHBox = QHBoxLayout()
self.recResHBox.addWidget(self.recordButton)
self.recResHBox.addWidget(self.resultLabel)
self.recResHBox.addWidget(self.pictureLabel)
self.mainVBox = QVBoxLayout()
self.mainVBox.addLayout(self.recResHBox)
self.mainVBox.addLayout(self.optionsHBox)
# self.mainVBox.addWidget(self.recentListWidget)
self.mainVBox.addWidget(self.progressBar)
# self.mainVBox.addStretch(1)
self.centralWidget.setLayout(self.mainVBox)
self.setCentralWidget(self.stackedWidget)
self.runningInFullscreen = False
self.defaultImagePath = os.path.join(self.continuousMatcher.applicationPath,'default.png')
self.defaultText = u'Niestety, nie znaleziono pasującego nagrania,<br>Spróbuj ponownie'
if enableQmlFullscreen:
self.setupFullscreenView()
if(self.continuousMatcher.startFullscreen):
self.runFullscreen()
self.setupMenuBar()
self.show()
def setupMenuBar(self):
menubar = self.menuBar()
fileMenu = menubar.addMenu('&Plik')
settingsMenu = menubar.addMenu('&Ustawienia')
if enableQmlFullscreen:
runFullscreenAction = QAction(QIcon.fromTheme('fullscreen'), u'&Pełny ekran', self)
runFullscreenAction.setShortcut('F11')
runFullscreenAction.setStatusTip(u'Uruchom widok pełnoekranowy')
runFullscreenAction.triggered.connect(self.runFullscreen)
databaseManagementAction = QAction(QIcon.fromTheme('database'), u'&Baza danych', self)
databaseManagementAction.setShortcut('Ctrl+B')
databaseManagementAction.setStatusTip(u'Zarządzaj bazą danych')
databaseManagementAction.triggered.connect(self.openDatabaseManagement)
chooseDatabaseAction = QAction(QIcon.fromTheme('fileopen'), u'&Otwórz bazę danych', self)
chooseDatabaseAction.setShortcut('Ctrl+O')
chooseDatabaseAction.setStatusTip('Otwórz katalog zawierający bazę danych')
chooseDatabaseAction.triggered.connect(self.chooseDatabaseDirectory)
exitAction = QAction(QIcon.fromTheme('application-exit'), u'&Wyjście', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Zamknij program')
exitAction.triggered.connect(QApplication.quit)
uiSettingsAction = QAction(QIcon.fromTheme('gnome-settings'), u'Ustawienia &interfejsu', self)
uiSettingsAction.setShortcut('Ctrl+Shift+U')
uiSettingsAction.setStatusTip(u'Zmień ustawienia interfejsu')
uiSettingsAction.triggered.connect(self.openUiSettings)
audioSettingsAction = QAction(QIcon.fromTheme('gnome-settings'), u'Ustawienia &nagrywania', self)
audioSettingsAction.setShortcut('Ctrl+Shift+R')
audioSettingsAction.setStatusTip(u'Zmień ustawienia nagrywania')
audioSettingsAction.triggered.connect(self.openAudioSettings)
matcherSettingsAction = QAction(QIcon.fromTheme('gnome-settings'), u'Ustawienia &dopasowywania', self)
matcherSettingsAction.setShortcut('Ctrl+Shift+M')
matcherSettingsAction.setStatusTip(u'Zmień ustawienia dopasowywania')
matcherSettingsAction.triggered.connect(self.openMatcherSettings)
scannerSettingsAction = QAction(QIcon.fromTheme('gnome-settings'), u'Ustawienia &skanowania', self)
scannerSettingsAction.setShortcut('Ctrl+Shift+S')
scannerSettingsAction.setStatusTip(u'Zmień ustawienia skanowania')
scannerSettingsAction.triggered.connect(self.openScannerSettings)
if enableQmlFullscreen:
fileMenu.addAction(runFullscreenAction)
fileMenu.addAction(chooseDatabaseAction)
fileMenu.addAction(databaseManagementAction)
fileMenu.addAction(exitAction)
settingsMenu.addAction(uiSettingsAction)
settingsMenu.addAction(audioSettingsAction)
settingsMenu.addAction(matcherSettingsAction)
settingsMenu.addAction(scannerSettingsAction)
def setupFullscreenView(self):
self.fullscreenWidget = QQuickWidget(self)
self.fullscreenWidget.setResizeMode(QQuickWidget.SizeRootObjectToView)
self.fullscreenWidget.setSource(QUrl(os.path.join(self.continuousMatcher.applicationPath,'fullscreen.qml')))
mainRootObject = self.fullscreenWidget.rootObject()
mainRootObject.startRecording.connect(self.recordAndMatch)
mainRootObject.stopRecording.connect(self.interruptRecording)
mainRootObject.closeWindow.connect(self.closeFullscreenWindow)
self.recordingStartedSignal.connect(mainRootObject.stateRecording)
self.recordingFinishedSignal.connect(mainRootObject.stateReady)
self.progressChangedSignal.connect(mainRootObject.setProgress)
self.enablePlaybackSignal.connect(mainRootObject.enablePlayback)
self.enableAutoPlaybackSignal.connect(mainRootObject.enableAutoPlayback)
self.enablePlaybackSignal.emit(self.continuousMatcher.enablePlayback)
self.enableAutoPlaybackSignal.emit(self.continuousMatcher.autoPlayback)
self.stackedWidget.addWidget(self.fullscreenWidget)
def runFullscreen(self):
if enableQmlFullscreen:
if not self.runningInFullscreen:
self.runningInFullscreen = True
self.stackedWidget.setCurrentIndex(1)
self.menuBar().setVisible(False)
self.showFullScreen()
else:
self.runningInFullscreen = False
self.stackedWidget.setCurrentIndex(0)
self.menuBar().setVisible(True)
self.showNormal()
def closeFullscreenWindow(self):
if enableQmlFullscreen:
self.runningInFullscreen = False
self.stackedWidget.setCurrentIndex(0)
self.menuBar().setVisible(True)
self.showNormal()
def openDatabaseManagement(self, newValue):
databaseDialog = QDialog(self)
databaseTable = QTableWidget()
self.fillDatabaseManagementTable(databaseTable)
rescanButton = QPushButton(u'Skanuj ponownie')
rescanButton.clicked.connect(lambda: self.rescanDatabaseAndFillTable(databaseTable))
dialogButtons = QDialogButtonBox(QDialogButtonBox.Close)
dialogButtons.rejected.connect(databaseDialog.accept)
databaseLayout = QVBoxLayout()
databaseLayout.addWidget(databaseTable)
databaseLayout.addWidget(rescanButton)
databaseLayout.addWidget(dialogButtons)
databaseDialog.setLayout(databaseLayout)
databaseDialog.exec_()
def rescanDatabaseAndFillTable(self,table):
self.continuousMatcher.scanDirectory()
self.fillDatabaseManagementTable(table)
def fillDatabaseManagementTable(self, table):
tableHeaders = [u'Obraz',u'Artysta',u'Tytuł',u'Audio']
table.setRowCount(0)
table.setRowCount(len(self.continuousMatcher.hash_tab.metadata))
table.setColumnCount(len(tableHeaders))
table.setHorizontalHeaderLabels(tableHeaders)
for i, val in enumerate(self.continuousMatcher.hash_tab.metadata):
artistItem = QTableWidgetItem(val.get("artist",""))
titleItem = QTableWidgetItem(val.get("title",""))
audioItem = QTableWidgetItem(self.continuousMatcher.hash_tab.names[i])
table.setItem(i,1,artistItem)
table.setItem(i,2,titleItem)
table.setItem(i,3,audioItem)
table.resizeColumnsToContents()
table.resizeRowsToContents()
def openScannerSettings(self, newValue):
settingsDialog = scannerSettingsDialog.ScannerSettingsDialog(self, self.continuousMatcher)
settingsDialog.run()
def openMatcherSettings(self, newValue):
settingsDialog = matcherSettingsDialog.MatcherSettingsDialog(self, self.continuousMatcher)
settingsDialog.run()
def openUiSettings(self, newValue):
settingsDialog = uiSettingsDialog.UiSettingDialog(self,self.continuousMatcher)
settingsDialog.run()
self.enablePlaybackSignal.emit(self.continuousMatcher.enablePlayback)
self.enableAutoPlaybackSignal.emit(self.continuousMatcher.autoPlayback)
def openAudioSettings(self, newValue):
settingsDialog = audioSettingsDialog.AudioSettingDialog(self,self.continuousMatcher)
settingsDialog.run()
def chooseDatabaseDirectory(self):
prevDirPath = os.path.join(self.continuousMatcher.applicationPath, self.continuousMatcher.databaseDirectoryPath)
prevDirPath = os.path.normpath(prevDirPath)
dirPath = QFileDialog.getExistingDirectory(self, u'Wybierz katalog z bazą danych', prevDirPath, QFileDialog.ShowDirsOnly )
if dirPath:
self.continuousMatcher.changeDatabaseDirectory(dirPath)
self.continuousMatcher.openDatabaseDirectory()
if self.continuousMatcher.ready:
self.resultLabel.setText(u'Gotowy')
else:
self.resultLabel.setText(u'Proszę wybrać katalog z bazą danych')
def interruptRecording(self):
self.threadInterrupter['interrupted'] = True
enableAutoPlaybackSignal = pyqtSignal(bool)
enablePlaybackSignal = pyqtSignal(bool)
recordingStartedSignal = pyqtSignal()
def recordAndMatch(self):
self.threadInterrupter['interrupted'] = False
self.recordButton.setText(u'Nagrywanie')
self.progress = 0.0
self.progressBar.setValue(0)
self.progressTimer.start(100,self)
self.progressChangedSignal.emit(self.progress)
self.matcherThread.start()
self.recordButton.clicked.disconnect()
self.recordButton.clicked.connect(self.interruptRecording)
self.recordingStartedSignal.emit()
recordingFinishedSignal = pyqtSignal(str, str, str, str)
def recordingFinished(self):
currentResult = self.resultTextFormatter(self.matcherThread.result)
rawFilenameWithoutExtension = os.path.splitext(self.matcherThread.result["filename"])[0]
filenameWithoutExtension = re.sub(r"\[.*\]","",rawFilenameWithoutExtension)
resultAudioPath = self.matcherThread.result["filename"];
videoExtensions = ['AVI', 'avi', 'MOV', 'mov']
possibleVideoPaths = [os.path.normpath(os.path.join(self.continuousMatcher.databaseDirectoryPath, filenameWithoutExtension + "." + ext)) for ext in videoExtensions]
videoPaths = [path for path in possibleVideoPaths if os.path.exists(path)]
if len(videoPaths) > 0:
resultVideoPath = videoPaths[0]
else:
resultVideoPath = ""
imageExtensions = ['png', 'jpg', 'jpeg', 'bmp']
possibleImagePaths = [os.path.normpath(os.path.join(self.continuousMatcher.databaseDirectoryPath, filenameWithoutExtension + "." + ext)) for ext in imageExtensions]
imagePaths = [path for path in possibleImagePaths if os.path.exists(path)]
if len(imagePaths) > 0:
resultImagePath = imagePaths[0]
else:
resultImagePath = self.defaultImagePath
textExtensions = ['html', 'txt']
possibleTextPaths = [os.path.normpath(os.path.join(self.continuousMatcher.databaseDirectoryPath, filenameWithoutExtension + "." + ext)) for ext in textExtensions]
textPaths = [path for path in possibleTextPaths if os.path.exists(path)]
if len(textPaths) > 0:
resultText = self.parseResultTextFile(textPaths[0])
resultText = re.sub(r"(\n)+$","",resultText)
resultText = re.sub(r"^(\n)+","",resultText)
else:
resultText = self.defaultText
self.resultLabel.setText(resultText)
self.pictureImage = QImage(resultImagePath)
self.pictureImage = self.pictureImage.scaled(200,200,Qt.IgnoreAspectRatio,Qt.FastTransformation)
self.pictureLabel.setAlignment( Qt.AlignRight | Qt.AlignVCenter );
self.pictureLabel.setPixmap(QPixmap.fromImage(self.pictureImage))
# if(len(self.recentList) == 0 or self.recentList[-1] != resultText):
# self.recentList.append(resultText)
# self.recentListWidget.addItem(QListWidgetItem(resultText))
self.progressBar.setValue(100)
self.progress = 100.0
self.progressChangedSignal.emit(self.progress)
self.progressTimer.stop()
if(self.continuousMatching and not self.threadInterrupter['interrupted']):
self.recordAndMatch()
else:
self.recordButton.setText(u'Nagrywaj')
self.recordButton.clicked.disconnect()
self.recordButton.clicked.connect(self.recordAndMatch)
self.recordingFinishedSignal.emit(resultText,resultImagePath,resultAudioPath,resultVideoPath)
def parseResultTextFile(self, textPath):
with open(textPath) as file:
result = file.read()
return result
def resultTextFormatter(self, result):
matchedStringFormat = '{artist} - {title}'
formatedResult = ""
artist = result['metadata'].get("artist","")
title = result['metadata'].get("title","")
msg = result['msg']
filename = result['filename']
if artist and title:
formatedResult = matchedStringFormat.format(**{'artist':artist,'title':title})
elif filename:
formatedResult = filename
elif msg:
formatedResult = msg
else:
formatedResult = u'Coś poszło nie tak...'
return formatedResult
progressChangedSignal = pyqtSignal(float)
def timerEvent(self, e):
if self.progress >= 100:
self.progressTimer.stop()
return
self.progress = self.progress + 10.0 * 1.0/10.0
self.progressBar.setValue(self.progress)
self.progressChangedSignal.emit(self.progress)
def toggleContinuous(self):
self.continuousMatching = self.continuousCheckBox.isChecked()
self.continuousCheckBox.setChecked(self.continuousMatching)
if __name__ == '__main__':
main(sys.argv)
| en | 0.188116 | #!/usr/bin/python2 # -*- coding: utf-8 -*- # database_file_path = QApplication.instance().arguments()[1] if len(QApplication.instance().arguments())>1 else os.path.join(os.path.dirname(os.path.abspath(__file__)),'fpdbase.pklz') # microphone_match.recordAndMatch(database_file_path) # self.recordButton.setText('Record') # self.continuousMatching = True # self.continuousCheckBox = QCheckBox() # self.continuousCheckBox.setText('Continuous') # self.continuousCheckBox.setChecked(self.continuousMatching) # self.continuousCheckBox.stateChanged.connect(self.toggleContinuous) # self.optionsHBox.addWidget(self.continuousCheckBox) # self.mainVBox.addWidget(self.recentListWidget) # self.mainVBox.addStretch(1) # if(len(self.recentList) == 0 or self.recentList[-1] != resultText): # self.recentList.append(resultText) # self.recentListWidget.addItem(QListWidgetItem(resultText)) | 1.813765 | 2 |
controller.py | magnuswenzer/mini_inventory | 0 | 6615805 |
from db import Database
class Item:
db = Database()
def __init__(self, **kwargs):
self.data = {key.lower(): value for (key, value) in kwargs.items()}
@property
def name(self):
return self.data.get('item_name').capitalize()
@property
def quantity(self):
value = self.data.get('quantity')
if not value:
value = 0
return str(value)
@quantity.setter
def quantity(self, quantity):
if not quantity:
quantity = 0
else:
quantity = int(quantity)
self.db.update_item(self.name, quantity=quantity)
self.data['quantity'] = quantity
@property
def amount(self):
return str(self.data.get('amount', 0))
@amount.setter
def amount(self, amount):
if not amount:
amount = 0
else:
amount = int(amount)
self.db.update_item(self.name, amount=amount)
self.data['amount'] = amount
@property
def category(self):
return self.data.get('category_name', '') or ''
@category.setter
def category(self, category):
print('UPDATING CATEGORY IN CONTROLLER:', category)
self.db.update_item(self.name, category=category)
self.data['category_name'] = category
@property
def common(self):
return bool(self.data.get('common', 0))
@common.setter
def common(self, common):
common = int(common)
self.db.update_item(self.name, common=common)
self.data['common'] = common
class Controller:
db = Database()
def __init__(self):
pass
# self._init_database()
# def _init_database(self):
# self.db = Database()
def add_unit(self, name):
self.db.add_unit(name)
def add_category(self, name):
self.db.add_category(name)
def add_item(self, **kwargs):
print('- ADDING ITEMS')
data = {}
for key, value in kwargs.items():
if type(value) == str:
value = value.lower()
data[key] = value
self.db.add_item(**data)
def delete_unit(self, name):
self.db.delete_unit(name)
def delete_category(self, name):
self.db.delete_category(name)
def delete_item(self, name):
self.db.delete_item(name)
def get_unit_list(self):
return self._capitalized_list(self.db.get_unit_list())
def get_category_list(self):
return self._capitalized_list(self.db.get_category_list())
def get_item_list(self, unit):
return self._capitalized_list(self.db.get_item_list(unit))
def get_items(self, unit_name=None, category_name=None):
if unit_name:
unit_name = unit_name.lower()
if category_name:
category_name = category_name.lower()
data = self.db.get_item_data(unit_name=unit_name,
category_name=category_name)
return_dict = {}
print(data)
for item in data:
# key = f'{unit}_{item["item_name"]}'
key = f'{item["unit_name"]}_{item["item_name"]}'
return_dict[key] = Item(**item)
return return_dict
def update_item(self, name, **kwargs):
self.db.update_item(name, **kwargs)
def _capitalized_list(self, lst):
return [item.capitalize() for item in lst]
if __name__ == '__main__':
c = Controller()
print(c.get_item_list('skap1')) |
from db import Database
class Item:
db = Database()
def __init__(self, **kwargs):
self.data = {key.lower(): value for (key, value) in kwargs.items()}
@property
def name(self):
return self.data.get('item_name').capitalize()
@property
def quantity(self):
value = self.data.get('quantity')
if not value:
value = 0
return str(value)
@quantity.setter
def quantity(self, quantity):
if not quantity:
quantity = 0
else:
quantity = int(quantity)
self.db.update_item(self.name, quantity=quantity)
self.data['quantity'] = quantity
@property
def amount(self):
return str(self.data.get('amount', 0))
@amount.setter
def amount(self, amount):
if not amount:
amount = 0
else:
amount = int(amount)
self.db.update_item(self.name, amount=amount)
self.data['amount'] = amount
@property
def category(self):
return self.data.get('category_name', '') or ''
@category.setter
def category(self, category):
print('UPDATING CATEGORY IN CONTROLLER:', category)
self.db.update_item(self.name, category=category)
self.data['category_name'] = category
@property
def common(self):
return bool(self.data.get('common', 0))
@common.setter
def common(self, common):
common = int(common)
self.db.update_item(self.name, common=common)
self.data['common'] = common
class Controller:
db = Database()
def __init__(self):
pass
# self._init_database()
# def _init_database(self):
# self.db = Database()
def add_unit(self, name):
self.db.add_unit(name)
def add_category(self, name):
self.db.add_category(name)
def add_item(self, **kwargs):
print('- ADDING ITEMS')
data = {}
for key, value in kwargs.items():
if type(value) == str:
value = value.lower()
data[key] = value
self.db.add_item(**data)
def delete_unit(self, name):
self.db.delete_unit(name)
def delete_category(self, name):
self.db.delete_category(name)
def delete_item(self, name):
self.db.delete_item(name)
def get_unit_list(self):
return self._capitalized_list(self.db.get_unit_list())
def get_category_list(self):
return self._capitalized_list(self.db.get_category_list())
def get_item_list(self, unit):
return self._capitalized_list(self.db.get_item_list(unit))
def get_items(self, unit_name=None, category_name=None):
if unit_name:
unit_name = unit_name.lower()
if category_name:
category_name = category_name.lower()
data = self.db.get_item_data(unit_name=unit_name,
category_name=category_name)
return_dict = {}
print(data)
for item in data:
# key = f'{unit}_{item["item_name"]}'
key = f'{item["unit_name"]}_{item["item_name"]}'
return_dict[key] = Item(**item)
return return_dict
def update_item(self, name, **kwargs):
self.db.update_item(name, **kwargs)
def _capitalized_list(self, lst):
return [item.capitalize() for item in lst]
if __name__ == '__main__':
c = Controller()
print(c.get_item_list('skap1')) | en | 0.172374 | # self._init_database() # def _init_database(self): # self.db = Database() # key = f'{unit}_{item["item_name"]}' | 3.227951 | 3 |
topology/Topology.py | nicmcd/fabcalc | 0 | 6615806 | <filename>topology/Topology.py<gh_stars>0
"""
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of prim nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior
* written permission.
*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
class Topology(object):
"""
This is an abstract class that represents a fabric technology
The 'topology' defines structure and makes connections
"""
@staticmethod
def using_options():
"""
Tells the base class which option keys it uses
"""
return []
def __init__(self, **kwargs):
"""
Constructs a Topology object
"""
pass
def structure(self):
"""
This returns the structure of the system in terms of:
nodes : total number of nodes
chassis : number of chassis per rack
total_racks : total number of racks
Returns:
tuple (int, int, int, int) : nodes, chassis, racks, rows
"""
raise NotImplementedError('subclasses must override this')
def routers(self):
"""
This is a generator that generates (radix, count) tuples
'radix' and 'count' are of type int
"""
raise NotImplementedError('subclasses must override this')
def cables(self):
"""
This is a generator that generates (source, destination, count) tuples
'source' and 'destination' are of type layout.Coordinate
'count' is of type int
"""
raise NotImplementedError('subclasses must override this')
def notify_length(self, length, count):
"""
This notifies the topology module of the length of cables generated. This
can be used by the topology module to generate topology specific cable
length statistics
length : length of cable
count : count of cables
"""
pass # this is only used when desired by the topology module
def info_file(self, filename):
"""
This writes topology specific information to a file
filename : the file to be written
"""
raise NotImplementedError('subclasses must override this')
| <filename>topology/Topology.py<gh_stars>0
"""
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of prim nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior
* written permission.
*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
class Topology(object):
"""
This is an abstract class that represents a fabric technology
The 'topology' defines structure and makes connections
"""
@staticmethod
def using_options():
"""
Tells the base class which option keys it uses
"""
return []
def __init__(self, **kwargs):
"""
Constructs a Topology object
"""
pass
def structure(self):
"""
This returns the structure of the system in terms of:
nodes : total number of nodes
chassis : number of chassis per rack
total_racks : total number of racks
Returns:
tuple (int, int, int, int) : nodes, chassis, racks, rows
"""
raise NotImplementedError('subclasses must override this')
def routers(self):
"""
This is a generator that generates (radix, count) tuples
'radix' and 'count' are of type int
"""
raise NotImplementedError('subclasses must override this')
def cables(self):
"""
This is a generator that generates (source, destination, count) tuples
'source' and 'destination' are of type layout.Coordinate
'count' is of type int
"""
raise NotImplementedError('subclasses must override this')
def notify_length(self, length, count):
"""
This notifies the topology module of the length of cables generated. This
can be used by the topology module to generate topology specific cable
length statistics
length : length of cable
count : count of cables
"""
pass # this is only used when desired by the topology module
def info_file(self, filename):
"""
This writes topology specific information to a file
filename : the file to be written
"""
raise NotImplementedError('subclasses must override this')
| en | 0.765127 | * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * - Neither the name of prim nor the names of its contributors may be used to * endorse or promote products derived from this software without specific prior * written permission. * * See the NOTICE file distributed with this work for additional information * regarding copyright ownership. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. This is an abstract class that represents a fabric technology The 'topology' defines structure and makes connections Tells the base class which option keys it uses Constructs a Topology object This returns the structure of the system in terms of: nodes : total number of nodes chassis : number of chassis per rack total_racks : total number of racks Returns: tuple (int, int, int, int) : nodes, chassis, racks, rows This is a generator that generates (radix, count) tuples 'radix' and 'count' are of type int This is a generator that generates (source, destination, count) tuples 'source' and 'destination' are of type layout.Coordinate 'count' is of type int This notifies the topology module of the length of cables generated. This can be used by the topology module to generate topology specific cable length statistics length : length of cable count : count of cables # this is only used when desired by the topology module This writes topology specific information to a file filename : the file to be written | 1.847787 | 2 |
codenames/preprocessing/utils.py | vladimir-tikhonov/codenames_ai | 0 | 6615807 | <filename>codenames/preprocessing/utils.py
from pathlib import Path
from typing import List
from itertools import chain
def get_all_images_in(directory: Path) -> List[Path]:
return list(chain(directory.glob('*.jpg'), directory.glob('*.jpeg'), directory.glob('*.png')))
| <filename>codenames/preprocessing/utils.py
from pathlib import Path
from typing import List
from itertools import chain
def get_all_images_in(directory: Path) -> List[Path]:
return list(chain(directory.glob('*.jpg'), directory.glob('*.jpeg'), directory.glob('*.png')))
| none | 1 | 2.682205 | 3 | |
accelbyte_py_sdk/api/platform/operations/item/__init__.py | AccelByte/accelbyte-python-sdk | 0 | 6615808 | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the justice-platform-service."""
__version__ = "4.10.0"
__author__ = "AccelByte"
__email__ = "<EMAIL>"
# pylint: disable=line-too-long
from .acquire_item import AcquireItem
from .bulk_get_locale_items import BulkGetLocaleItems
from .create_item import CreateItem
from .defeature_item import DefeatureItem
from .delete_item import DeleteItem
from .disable_item import DisableItem
from .enable_item import EnableItem
from .feature_item import FeatureItem
from .get_app import GetApp
from .get_bulk_item_id_by_skus import GetBulkItemIdBySkus
from .get_item import GetItem
from .get_item_by_app_id import GetItemByAppId
from .get_item_by_sku import GetItemBySku
from .get_item_dynamic_data import GetItemDynamicData
from .get_item_id_by_sku import GetItemIdBySku
from .get_locale_item import GetLocaleItem
from .get_locale_item_by_sku import GetLocaleItemBySku
from .list_basic_items_by_features import ListBasicItemsByFeatures
from .public_bulk_get_items import PublicBulkGetItems
from .public_get_app import PublicGetApp
from .public_get_item import PublicGetItem
from .public_get_item_by_app_id import PublicGetItemByAppId
from .public_get_item_by_sku import PublicGetItemBySku
from .public_get_item_dynamic_data import PublicGetItemDynamicData
from .public_query_items import PublicQueryItems
from .public_query_items import AppTypeEnum as PublicQueryItemsAppTypeEnum, ItemTypeEnum as PublicQueryItemsItemTypeEnum, SortByEnum as PublicQueryItemsSortByEnum
from .public_search_items import PublicSearchItems
from .query_items import QueryItems
from .query_items import AppTypeEnum as QueryItemsAppTypeEnum, ItemTypeEnum as QueryItemsItemTypeEnum, SortByEnum as QueryItemsSortByEnum
from .query_uncategorized_items import QueryUncategorizedItems
from .query_uncategorized_items import SortByEnum as QueryUncategorizedItemsSortByEnum
from .return_item import ReturnItem
from .search_items import SearchItems
from .sync_in_game_item import SyncInGameItem
from .update_app import UpdateApp
from .update_item import UpdateItem
| # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the justice-platform-service."""
__version__ = "4.10.0"
__author__ = "AccelByte"
__email__ = "<EMAIL>"
# pylint: disable=line-too-long
from .acquire_item import AcquireItem
from .bulk_get_locale_items import BulkGetLocaleItems
from .create_item import CreateItem
from .defeature_item import DefeatureItem
from .delete_item import DeleteItem
from .disable_item import DisableItem
from .enable_item import EnableItem
from .feature_item import FeatureItem
from .get_app import GetApp
from .get_bulk_item_id_by_skus import GetBulkItemIdBySkus
from .get_item import GetItem
from .get_item_by_app_id import GetItemByAppId
from .get_item_by_sku import GetItemBySku
from .get_item_dynamic_data import GetItemDynamicData
from .get_item_id_by_sku import GetItemIdBySku
from .get_locale_item import GetLocaleItem
from .get_locale_item_by_sku import GetLocaleItemBySku
from .list_basic_items_by_features import ListBasicItemsByFeatures
from .public_bulk_get_items import PublicBulkGetItems
from .public_get_app import PublicGetApp
from .public_get_item import PublicGetItem
from .public_get_item_by_app_id import PublicGetItemByAppId
from .public_get_item_by_sku import PublicGetItemBySku
from .public_get_item_dynamic_data import PublicGetItemDynamicData
from .public_query_items import PublicQueryItems
from .public_query_items import AppTypeEnum as PublicQueryItemsAppTypeEnum, ItemTypeEnum as PublicQueryItemsItemTypeEnum, SortByEnum as PublicQueryItemsSortByEnum
from .public_search_items import PublicSearchItems
from .query_items import QueryItems
from .query_items import AppTypeEnum as QueryItemsAppTypeEnum, ItemTypeEnum as QueryItemsItemTypeEnum, SortByEnum as QueryItemsSortByEnum
from .query_uncategorized_items import QueryUncategorizedItems
from .query_uncategorized_items import SortByEnum as QueryUncategorizedItemsSortByEnum
from .return_item import ReturnItem
from .search_items import SearchItems
from .sync_in_game_item import SyncInGameItem
from .update_app import UpdateApp
from .update_item import UpdateItem
| en | 0.816722 | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved. # This is licensed software from AccelByte Inc, for limitations # and restrictions contact your company contract manager. # # Code generated. DO NOT EDIT! # template file: justice_py_sdk_codegen/__main__.py Auto-generated package that contains models used by the justice-platform-service. # pylint: disable=line-too-long | 1.191781 | 1 |
06.py | flurincoretti/adventofcode | 0 | 6615809 | import itertools
def get_line(line, orbits):
last = line.split(')', 1)[0]
try:
line = orbits[last] + ")" + line
return get_line(line, orbits)
except KeyError:
return line
def get_lines(orbits):
objects = set(list(itertools.chain.from_iterable(
[orbit.split(')') for orbit in orbits]
)))
vals = [orbit.split(')')[0] for orbit in orbits]
keys = [orbit.split(')')[1] for orbit in orbits]
orbits = dict(zip(keys, vals))
lines = [get_line(obj, orbits) for obj in objects]
return lines
def verify_map(orbits):
lines = get_lines(orbits)
count_direct = len(lines) - 1
count_indirect = 0
for objects in [line.split(')') for line in lines]:
if len(objects) > 2:
count_indirect += len(objects) - 2
return count_direct + count_indirect
def get_transfers(orbits):
lines = get_lines(orbits)
try:
you_line = [l for l in lines if 'YOU' in l][0].split(')')
san_line = [l for l in lines if 'SAN' in l][0].split(')')
common_object = ''
for i in range(min(len(you_line), len(san_line) - 1)):
if you_line[i] == san_line[i]:
if you_line[i+1] != san_line[i+1]:
common_object = you_line[i]
break
transfers = you_line[you_line.index(common_object)+1:-1][::-1]
transfers += san_line[san_line.index(common_object):-1]
return len(transfers) - 1
except IndexError:
print('Error')
if __name__ == "__main__":
inputs = open('inputs/06.txt', 'r')
orbit_map = inputs.read().split('\n')
print("Total number of direct and indirect orbits: {}".format(
verify_map(orbit_map)))
print("Minimum number of orbital transfers: {}".format(
get_transfers(orbit_map))) | import itertools
def get_line(line, orbits):
last = line.split(')', 1)[0]
try:
line = orbits[last] + ")" + line
return get_line(line, orbits)
except KeyError:
return line
def get_lines(orbits):
objects = set(list(itertools.chain.from_iterable(
[orbit.split(')') for orbit in orbits]
)))
vals = [orbit.split(')')[0] for orbit in orbits]
keys = [orbit.split(')')[1] for orbit in orbits]
orbits = dict(zip(keys, vals))
lines = [get_line(obj, orbits) for obj in objects]
return lines
def verify_map(orbits):
lines = get_lines(orbits)
count_direct = len(lines) - 1
count_indirect = 0
for objects in [line.split(')') for line in lines]:
if len(objects) > 2:
count_indirect += len(objects) - 2
return count_direct + count_indirect
def get_transfers(orbits):
lines = get_lines(orbits)
try:
you_line = [l for l in lines if 'YOU' in l][0].split(')')
san_line = [l for l in lines if 'SAN' in l][0].split(')')
common_object = ''
for i in range(min(len(you_line), len(san_line) - 1)):
if you_line[i] == san_line[i]:
if you_line[i+1] != san_line[i+1]:
common_object = you_line[i]
break
transfers = you_line[you_line.index(common_object)+1:-1][::-1]
transfers += san_line[san_line.index(common_object):-1]
return len(transfers) - 1
except IndexError:
print('Error')
if __name__ == "__main__":
inputs = open('inputs/06.txt', 'r')
orbit_map = inputs.read().split('\n')
print("Total number of direct and indirect orbits: {}".format(
verify_map(orbit_map)))
print("Minimum number of orbital transfers: {}".format(
get_transfers(orbit_map))) | none | 1 | 3.13576 | 3 | |
tools/scripts/clang-format-all.py | wangshankun/Tengine_Atlas | 4,697 | 6615810 | <filename>tools/scripts/clang-format-all.py
#! /usr/bin/python
# -*- coding: utf-8 -*-
import os
def format_files(path):
for root, dirs, files in os.walk(path):
fname = []
for file in files:
if root.find("sysroot")>=0:
continue
if root.find("install")>=0:
continue
if root.find ("CMakeFiles")>=0:
continue
if os.path.splitext(file)[1] == '.cpp' or os.path.splitext(file)[1] == '.c' or \
os.path.splitext(file)[1] == '.hpp' or os.path.splitext(file)[1] == '.h' :
fname = os.path.join(root, file)
if fname.find("include/any.hpp")>=0:
continue
print("dos2unix %s" %(fname))
os.system("dos2unix %s" %(fname))
print("clang-format -style=file -i %s" %(fname))
os.system("clang-format -style=file -i %s" %(fname))
if __name__ == '__main__':
path = './'
format_files(path)
| <filename>tools/scripts/clang-format-all.py
#! /usr/bin/python
# -*- coding: utf-8 -*-
import os
def format_files(path):
for root, dirs, files in os.walk(path):
fname = []
for file in files:
if root.find("sysroot")>=0:
continue
if root.find("install")>=0:
continue
if root.find ("CMakeFiles")>=0:
continue
if os.path.splitext(file)[1] == '.cpp' or os.path.splitext(file)[1] == '.c' or \
os.path.splitext(file)[1] == '.hpp' or os.path.splitext(file)[1] == '.h' :
fname = os.path.join(root, file)
if fname.find("include/any.hpp")>=0:
continue
print("dos2unix %s" %(fname))
os.system("dos2unix %s" %(fname))
print("clang-format -style=file -i %s" %(fname))
os.system("clang-format -style=file -i %s" %(fname))
if __name__ == '__main__':
path = './'
format_files(path)
| en | 0.43794 | #! /usr/bin/python # -*- coding: utf-8 -*- | 2.749732 | 3 |
jadielinsta/views.py | jadielmwangi/photo-app-clone | 0 | 6615811 | <gh_stars>0
from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404,HttpResponseRedirect
import datetime as dt
from .models import Post
from .forms import PostForm
from .email import send_welcome_email
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url='/accounts/register/')
def new_post(request):
date = dt.date.today()
posts = Post.objects.all()
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
caption = form.cleaned_data['caption']
image = form.cleaned_data['Upload image']
recipient = PostRecipients(caption = caption,image=image)
recipient.save()
send_welcome_email(caption,image)
HttpResponseRedirect('newpost')
print('valid')
else:
form = PostForm()
return render(request, 'all-insta/post.html', {"date": date,'posts': posts,"postForm":form})
## view function that will handle the logic for displaying the search results
def search_results(request):
if 'post' in request.GET and request.GET["post"]:
search_term = request.GET.get("post")
searched_posts = Post.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'all-insta/search.html',{"message":message,"posts": searched_posts})
else:
message = "You haven't searched for any term"
return render(request, 'all-insta/search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def post(request):
try:
post = Post.objects.get(id = post_id)
except DoesNotExist:
raise Http404()
return render(request,"all-insta/single_post.html", {"post":post})
@login_required(login_url='/accounts/login/')
def profile(request):
current_user = request.user
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.profile = current_user
post.save()
return redirect('newpost')
else:
form = PostForm()
return render(request, 'profile.html', {"form": form})
| from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404,HttpResponseRedirect
import datetime as dt
from .models import Post
from .forms import PostForm
from .email import send_welcome_email
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url='/accounts/register/')
def new_post(request):
date = dt.date.today()
posts = Post.objects.all()
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
caption = form.cleaned_data['caption']
image = form.cleaned_data['Upload image']
recipient = PostRecipients(caption = caption,image=image)
recipient.save()
send_welcome_email(caption,image)
HttpResponseRedirect('newpost')
print('valid')
else:
form = PostForm()
return render(request, 'all-insta/post.html', {"date": date,'posts': posts,"postForm":form})
## view function that will handle the logic for displaying the search results
def search_results(request):
if 'post' in request.GET and request.GET["post"]:
search_term = request.GET.get("post")
searched_posts = Post.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'all-insta/search.html',{"message":message,"posts": searched_posts})
else:
message = "You haven't searched for any term"
return render(request, 'all-insta/search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def post(request):
try:
post = Post.objects.get(id = post_id)
except DoesNotExist:
raise Http404()
return render(request,"all-insta/single_post.html", {"post":post})
@login_required(login_url='/accounts/login/')
def profile(request):
current_user = request.user
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.profile = current_user
post.save()
return redirect('newpost')
else:
form = PostForm()
return render(request, 'profile.html', {"form": form}) | en | 0.85244 | # Create your views here. ## view function that will handle the logic for displaying the search results | 2.279675 | 2 |
label_ListPage/migrations/0003_auto_20200125_1304.py | hewimetall/django_Help_Desk | 0 | 6615812 | <filename>label_ListPage/migrations/0003_auto_20200125_1304.py
# Generated by Django 3.0.2 on 2020-01-25 13:04
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('label_ListPage', '0002_auto_20200124_1150'),
]
operations = [
migrations.AddField(
model_name='dashbourdbd',
name='manager_a',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING,
related_name='manager_aCr', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='dashbourdbd',
name='priority',
field=models.IntegerField(blank=True, choices=[(1, 'Низкий'), (2, 'Нормальный'), (3, 'Срочный')], default=1,
null=True),
),
migrations.AlterField(
model_name='dashbourdbd',
name='status',
field=models.IntegerField(
choices=[(1, 'В обработке'), (2, 'Отправленно на доработку'), (3, 'В работе'), (4, 'Выполнена'),
(5, 'Закрыта')], default=1),
),
]
| <filename>label_ListPage/migrations/0003_auto_20200125_1304.py
# Generated by Django 3.0.2 on 2020-01-25 13:04
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('label_ListPage', '0002_auto_20200124_1150'),
]
operations = [
migrations.AddField(
model_name='dashbourdbd',
name='manager_a',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING,
related_name='manager_aCr', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='dashbourdbd',
name='priority',
field=models.IntegerField(blank=True, choices=[(1, 'Низкий'), (2, 'Нормальный'), (3, 'Срочный')], default=1,
null=True),
),
migrations.AlterField(
model_name='dashbourdbd',
name='status',
field=models.IntegerField(
choices=[(1, 'В обработке'), (2, 'Отправленно на доработку'), (3, 'В работе'), (4, 'Выполнена'),
(5, 'Закрыта')], default=1),
),
]
| en | 0.817068 | # Generated by Django 3.0.2 on 2020-01-25 13:04 | 1.415516 | 1 |
control/migrations/0001_initial.py | SocialGouv/ecollecte | 9 | 6615813 | # Generated by Django 2.1.3 on 2018-11-19 15:40
from django.db import migrations, models
import django.db.models.deletion
import control.upload_path
class Migration(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
name='Control',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
],
options={
'verbose_name': 'Controle',
'verbose_name_plural': 'Controles',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('description', models.TextField(max_length=255, verbose_name='description')),
],
options={
'verbose_name': 'Question',
'verbose_name_plural': 'Questions',
'ordering': ('theme', 'order'),
},
),
migrations.CreateModel(
name='QuestionFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('file', models.FileField(default='', upload_to=control.upload_path.question_file_path, verbose_name='fichier')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='control.Question', verbose_name='question')),
],
options={
'verbose_name': 'Fichier Attaché',
'verbose_name_plural': 'Fichiers Attachés',
},
),
migrations.CreateModel(
name='Questionnaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('title', models.CharField(max_length=255, verbose_name='titre')),
('end_date', models.DateField(blank=True, null=True, verbose_name='échéance')),
('description', models.TextField(blank=True, verbose_name='description')),
('control', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='questionnaires', to='control.Control', verbose_name='controle')),
],
options={
'verbose_name': 'Questionnaire',
'verbose_name_plural': 'Questionnaires',
'ordering': ('order',),
},
),
migrations.CreateModel(
name='Theme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='titre')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='control.Theme')),
('questionnaire', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='themes', to='control.Questionnaire', verbose_name='questionnaire')),
],
options={
'verbose_name': 'Thème',
'verbose_name_plural': 'Thèmes',
},
),
migrations.AddField(
model_name='question',
name='theme',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='control.Theme', verbose_name='thème'),
),
]
| # Generated by Django 2.1.3 on 2018-11-19 15:40
from django.db import migrations, models
import django.db.models.deletion
import control.upload_path
class Migration(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
name='Control',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
],
options={
'verbose_name': 'Controle',
'verbose_name_plural': 'Controles',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('description', models.TextField(max_length=255, verbose_name='description')),
],
options={
'verbose_name': 'Question',
'verbose_name_plural': 'Questions',
'ordering': ('theme', 'order'),
},
),
migrations.CreateModel(
name='QuestionFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('file', models.FileField(default='', upload_to=control.upload_path.question_file_path, verbose_name='fichier')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='control.Question', verbose_name='question')),
],
options={
'verbose_name': 'Fichier Attaché',
'verbose_name_plural': 'Fichiers Attachés',
},
),
migrations.CreateModel(
name='Questionnaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('title', models.CharField(max_length=255, verbose_name='titre')),
('end_date', models.DateField(blank=True, null=True, verbose_name='échéance')),
('description', models.TextField(blank=True, verbose_name='description')),
('control', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='questionnaires', to='control.Control', verbose_name='controle')),
],
options={
'verbose_name': 'Questionnaire',
'verbose_name_plural': 'Questionnaires',
'ordering': ('order',),
},
),
migrations.CreateModel(
name='Theme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='titre')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='control.Theme')),
('questionnaire', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='themes', to='control.Questionnaire', verbose_name='questionnaire')),
],
options={
'verbose_name': 'Thème',
'verbose_name_plural': 'Thèmes',
},
),
migrations.AddField(
model_name='question',
name='theme',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='control.Theme', verbose_name='thème'),
),
]
| en | 0.748042 | # Generated by Django 2.1.3 on 2018-11-19 15:40 | 1.838044 | 2 |
thawra/tests/test_hero.py | joehakimrahme/thawra | 0 | 6615814 | <gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from thawra import hero
class HeroTest(unittest.TestCase):
def setUp(self):
self.hero = hero.Hero(name="",
skillmap="",
attributes=[8, 8, 3],
element="fire",
macros=hero.randattack)
def test_attributes(self):
self.assertEqual(self.hero.strength, 8)
self.assertEqual(self.hero.intelligence, 8)
self.assertEqual(self.hero.agility, 3)
def test_level(self):
self.assertEqual(self.hero.level, 1)
def test_hero_maxHP(self):
return self.assertEqual(self.hero.hp, self.hero.intelligence * 100)
def test_hero_maxMP(self):
return self.assertEqual(self.hero.mp, self.hero.intelligence * 100)
def test_hero_stats(self):
return self.assertEqual(self.hero.stats, {
'ATK': self.hero.strength * 10,
'DEF': self.hero.strength * 2,
'MAG': self.hero.intelligence * 7,
'MDE': self.hero.intelligence * 2,
'SPD': self.hero.agility * 30})
def test_hero_hp(self):
self.assertEqual(self.hero.hp, self.hero.maxHP)
self.hero.hp -= self.hero.maxHP + 1
self.assertEqual(self.hero.hp, 0)
self.hero.hp += self.hero.maxHP * 2
self.assertEqual(self.hero.hp, self.hero.maxHP)
def test_invalid_attributes(self):
self.assertRaises(hero.InvalidHero, hero.Hero,
"", "", [10], "", None)
def test_choice(self):
"""This test should be renamed test_randattack gambit.
Or something.
"""
choice, target = self.hero.choice([self.hero], [self.hero])
self.assertEqual(choice, "ATK")
self.assertEqual(target, [self.hero])
if __name__ == "__main__":
unittest.main()
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from thawra import hero
class HeroTest(unittest.TestCase):
def setUp(self):
self.hero = hero.Hero(name="",
skillmap="",
attributes=[8, 8, 3],
element="fire",
macros=hero.randattack)
def test_attributes(self):
self.assertEqual(self.hero.strength, 8)
self.assertEqual(self.hero.intelligence, 8)
self.assertEqual(self.hero.agility, 3)
def test_level(self):
self.assertEqual(self.hero.level, 1)
def test_hero_maxHP(self):
return self.assertEqual(self.hero.hp, self.hero.intelligence * 100)
def test_hero_maxMP(self):
return self.assertEqual(self.hero.mp, self.hero.intelligence * 100)
def test_hero_stats(self):
return self.assertEqual(self.hero.stats, {
'ATK': self.hero.strength * 10,
'DEF': self.hero.strength * 2,
'MAG': self.hero.intelligence * 7,
'MDE': self.hero.intelligence * 2,
'SPD': self.hero.agility * 30})
def test_hero_hp(self):
self.assertEqual(self.hero.hp, self.hero.maxHP)
self.hero.hp -= self.hero.maxHP + 1
self.assertEqual(self.hero.hp, 0)
self.hero.hp += self.hero.maxHP * 2
self.assertEqual(self.hero.hp, self.hero.maxHP)
def test_invalid_attributes(self):
self.assertRaises(hero.InvalidHero, hero.Hero,
"", "", [10], "", None)
def test_choice(self):
"""This test should be renamed test_randattack gambit.
Or something.
"""
choice, target = self.hero.choice([self.hero], [self.hero])
self.assertEqual(choice, "ATK")
self.assertEqual(target, [self.hero])
if __name__ == "__main__":
unittest.main() | en | 0.858115 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. This test should be renamed test_randattack gambit. Or something. | 2.613365 | 3 |
python/learn/base/data/tuple.py | qrsforever/workspace | 2 | 6615815 | #!/usr/bin/python2.7
#coding:utf-8
print dir(tuple)
t = (1, 3, 5, 1)
tt = t, 5, 3
print list(t)
print tt
print len(tt)
print t.count(1)
print tt.count(1)
# t[0] = 2 # 元组是不可变的
| #!/usr/bin/python2.7
#coding:utf-8
print dir(tuple)
t = (1, 3, 5, 1)
tt = t, 5, 3
print list(t)
print tt
print len(tt)
print t.count(1)
print tt.count(1)
# t[0] = 2 # 元组是不可变的
| zh | 0.27043 | #!/usr/bin/python2.7 #coding:utf-8 # t[0] = 2 # 元组是不可变的 | 3.480876 | 3 |
problems/compare-version-numbers.py | sailikhithk/tech-interview-prep | 0 | 6615816 | """
i1 and i2 is the pointer we point to the starting point.
Character before i1 and i2 we already processed.
When they are set to -1, the whole string are already processed.
#for-loop can't find the '.' any more. [0]
after two version is fully processed and couldn't find which is larger, return 0. [1]
Time Complexity is O(N).
N is the length of those version, because we potentially loop through them once.
Space Complexity is O(1).
Because we only store two pointers and two integer.
"""
class Solution(object):
def compareVersion(self, version1, version2):
def getVersion(version, start):
if start==-1: return 0, -1
for i in xrange(start, len(version)):
if version[i]=='.':
return int(version[start:i]), i+1
return int(version[start:]), -1 #[0]
i1 = i2 = 0
while True:
sub_version1, i1 = getVersion(version1, i1)
sub_version2, i2 = getVersion(version2, i2)
if sub_version1>sub_version2:
return 1
elif sub_version1<sub_version2:
return -1
elif i1==-1 and i2==-1: #[1]
return 0 | """
i1 and i2 is the pointer we point to the starting point.
Character before i1 and i2 we already processed.
When they are set to -1, the whole string are already processed.
#for-loop can't find the '.' any more. [0]
after two version is fully processed and couldn't find which is larger, return 0. [1]
Time Complexity is O(N).
N is the length of those version, because we potentially loop through them once.
Space Complexity is O(1).
Because we only store two pointers and two integer.
"""
class Solution(object):
def compareVersion(self, version1, version2):
def getVersion(version, start):
if start==-1: return 0, -1
for i in xrange(start, len(version)):
if version[i]=='.':
return int(version[start:i]), i+1
return int(version[start:]), -1 #[0]
i1 = i2 = 0
while True:
sub_version1, i1 = getVersion(version1, i1)
sub_version2, i2 = getVersion(version2, i2)
if sub_version1>sub_version2:
return 1
elif sub_version1<sub_version2:
return -1
elif i1==-1 and i2==-1: #[1]
return 0 | en | 0.952522 | i1 and i2 is the pointer we point to the starting point. Character before i1 and i2 we already processed. When they are set to -1, the whole string are already processed. #for-loop can't find the '.' any more. [0] after two version is fully processed and couldn't find which is larger, return 0. [1] Time Complexity is O(N). N is the length of those version, because we potentially loop through them once. Space Complexity is O(1). Because we only store two pointers and two integer. #[0] #[1] | 3.814531 | 4 |
streampy/units/socket/pickleReceiver.py | 69kosh/streamPy | 0 | 6615817 | <filename>streampy/units/socket/pickleReceiver.py
'''
Набор общих юнитов
@author: Kosh
'''
from streampy.units.base.socketServer import Pool, Worker as Base
import pickle
from io import BytesIO
class Worker(Base):
def init(self):
self.buffer = BytesIO()
def prepare(self):
'''
Реализация работы с сокетом для получения pickle-пакетов
'''
result = None
# цель - получить полный пакет, который может
# быть передан в несколько пакетов передачи данных
while True:
try:
# пытаемся достать пакет
picklePos = self.buffer.tell()
result = pickle.load(self.buffer)
# print(('data!', len(result)))
self.buffer = BytesIO(self.buffer.read())
# если получилось, то пытаемся обнулить буфер,
# в случае если это последний пакет в буфере
# pos = self.buffer.tell()
# size = self.buffer.seek(0, 2)
# if pos == size:
# self.buffer.close()
# self.buffer = BytesIO()
# else:
# print((pos, size))
# self.buffer.seek(pos, 0)
break
except:
# восстанавливаем позицию
self.buffer.seek(picklePos, 0)
# если не удалось достать пакет, то пробуем
# добавить информации в буфер из сокета
try:
received = self.connect.recv(self.config.get('bufferSize', 128*1024))
except:
break
# print(('received!', len(received)))
if not received:
break
# если получили данные - добавляем их в буфер, восстанавливая позицию
pos = self.buffer.tell()
# print(('pos!', pos))
self.buffer.seek(0, 2)
pos2 = self.buffer.tell()
# print(('pos2!', pos2))
self.buffer.write(received)
self.buffer.seek(pos, 0)
pos = self.buffer.tell()
# print(('pos2!', pos))
return result
| <filename>streampy/units/socket/pickleReceiver.py
'''
Набор общих юнитов
@author: Kosh
'''
from streampy.units.base.socketServer import Pool, Worker as Base
import pickle
from io import BytesIO
class Worker(Base):
def init(self):
self.buffer = BytesIO()
def prepare(self):
'''
Реализация работы с сокетом для получения pickle-пакетов
'''
result = None
# цель - получить полный пакет, который может
# быть передан в несколько пакетов передачи данных
while True:
try:
# пытаемся достать пакет
picklePos = self.buffer.tell()
result = pickle.load(self.buffer)
# print(('data!', len(result)))
self.buffer = BytesIO(self.buffer.read())
# если получилось, то пытаемся обнулить буфер,
# в случае если это последний пакет в буфере
# pos = self.buffer.tell()
# size = self.buffer.seek(0, 2)
# if pos == size:
# self.buffer.close()
# self.buffer = BytesIO()
# else:
# print((pos, size))
# self.buffer.seek(pos, 0)
break
except:
# восстанавливаем позицию
self.buffer.seek(picklePos, 0)
# если не удалось достать пакет, то пробуем
# добавить информации в буфер из сокета
try:
received = self.connect.recv(self.config.get('bufferSize', 128*1024))
except:
break
# print(('received!', len(received)))
if not received:
break
# если получили данные - добавляем их в буфер, восстанавливая позицию
pos = self.buffer.tell()
# print(('pos!', pos))
self.buffer.seek(0, 2)
pos2 = self.buffer.tell()
# print(('pos2!', pos2))
self.buffer.write(received)
self.buffer.seek(pos, 0)
pos = self.buffer.tell()
# print(('pos2!', pos))
return result
| ru | 0.913962 | Набор общих юнитов
@author: Kosh Реализация работы с сокетом для получения pickle-пакетов # цель - получить полный пакет, который может # быть передан в несколько пакетов передачи данных # пытаемся достать пакет # print(('data!', len(result))) # если получилось, то пытаемся обнулить буфер, # в случае если это последний пакет в буфере # pos = self.buffer.tell() # size = self.buffer.seek(0, 2) # if pos == size: # self.buffer.close() # self.buffer = BytesIO() # else: # print((pos, size)) # self.buffer.seek(pos, 0) # восстанавливаем позицию # если не удалось достать пакет, то пробуем # добавить информации в буфер из сокета # print(('received!', len(received))) # если получили данные - добавляем их в буфер, восстанавливая позицию # print(('pos!', pos)) # print(('pos2!', pos2)) # print(('pos2!', pos)) | 2.627215 | 3 |
test/services_tests.py | jeritgeorge/ros_robodk_post_processors | 0 | 6615818 | <reponame>jeritgeorge/ros_robodk_post_processors
#!/usr/bin/env python
package = 'ros_robodk_post_processors'
service_base_name = "/robodk_post_processors/"
from ros_robodk_post_processors.srv import *
import geometry_msgs.msg
import rospy
import unittest
def checkService(service_name):
service_available = False
try:
rospy.wait_for_service(service_name, 1)
service_available = True
except:
rospy.logerr("Could not connect to service %s" % service_name)
return service_available
class ServicesTests(unittest.TestCase):
def testWaitForServices(self):
services = ["move_c", "move_j", "move_l", "pause", "prog_finish", "prog_save", "prog_send_robot", "prog_start", "run_code", "run_message", "set_do", "set_go", "set_frame", "set_speed", "set_speed_joints", "set_tool", "set_zone_data", "wait_di"]
for name in services:
service = service_base_name + name
self.assertEquals(checkService(service), True, "Service %s is not available!" % service)
def testFanucProgram(self):
rospy.wait_for_service(service_base_name + "prog_start")
rospy.wait_for_service(service_base_name + "set_tool")
rospy.wait_for_service(service_base_name + "set_frame")
rospy.wait_for_service(service_base_name + "move_c")
rospy.wait_for_service(service_base_name + "set_speed_joints")
rospy.wait_for_service(service_base_name + "move_j")
rospy.wait_for_service(service_base_name + "set_zone_data")
rospy.wait_for_service(service_base_name + "set_speed")
rospy.wait_for_service(service_base_name + "move_l")
rospy.wait_for_service(service_base_name + "run_message")
rospy.wait_for_service(service_base_name + "pause")
rospy.wait_for_service(service_base_name + "set_do")
rospy.wait_for_service(service_base_name + "set_go")
rospy.wait_for_service(service_base_name + "run_message")
rospy.wait_for_service(service_base_name + "wait_di")
rospy.wait_for_service(service_base_name + "run_code")
rospy.wait_for_service(service_base_name + "prog_finish")
rospy.wait_for_service(service_base_name + "prog_save")
#------prog_start-----
service = service_base_name + "prog_start"
srv = rospy.ServiceProxy(service, ProgStart)
success = False
try:
resp = srv("Fanuc_R30iA", "test", "")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_tool-----
service = service_base_name + "set_tool"
srv = rospy.ServiceProxy(service, SetTool)
success = False
try:
resp = srv(0, "tool", geometry_msgs.msg.Pose(geometry_msgs.msg.Point(0, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)))
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_frame-----
service = service_base_name + "set_frame"
srv = rospy.ServiceProxy(service, SetFrame)
success = False
try:
resp = srv(1, "frame", geometry_msgs.msg.Pose(geometry_msgs.msg.Point(0, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)))
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_c-----
# service = service_base_name + "move_c"
# srv = rospy.ServiceProxy(service, MoveC)
# success = False
# try:
# resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0],
# geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1.5, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0])
# success = True
# except rospy.ServiceException as exc:
# rospy.logerr("Service did not process request: " + str(exc))
# self.assertEquals(success, True, "Failed to call service %s" % srv)
# self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_speed_joints-----
service = service_base_name + "set_speed_joints"
srv = rospy.ServiceProxy(service, SetSpeedJoints)
success = False
try:
resp = srv(20.0) #takes in degrees/sec, inserts % speed for joint moves
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_j-----
service = service_base_name + "move_j"
srv = rospy.ServiceProxy(service, MoveJ)
success = False
try:
resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0])
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_zone_data-----
service = service_base_name + "set_zone_data"
srv = rospy.ServiceProxy(service, SetZoneData)
success = False
try:
resp = srv(2.0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_speed-----
service = service_base_name + "set_speed"
srv = rospy.ServiceProxy(service, SetSpeed)
success = False
try:
resp = srv(20.0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_l-----
service = service_base_name + "move_l"
srv = rospy.ServiceProxy(service, MoveL)
success = False
try:
resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0.5, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0])
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_l-----joint position
service = service_base_name + "move_l"
srv = rospy.ServiceProxy(service, MoveL)
success = False
try:
resp = srv(None,
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0])
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------run_message-----
service = service_base_name + "run_message"
srv = rospy.ServiceProxy(service, RunMessage)
success = False
try:
resp = srv("A run message")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------pause-----
service = service_base_name + "pause"
srv = rospy.ServiceProxy(service, Pause)
success = False
try:
resp = srv(1.0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_do-----
service = service_base_name + "set_do"
srv = rospy.ServiceProxy(service, SetDO)
success = False
try:
resp = srv('1', True)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_go-----
service = service_base_name + "set_go"
srv = rospy.ServiceProxy(service, SetGO)
success = False
try:
resp = srv('12', '1')
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------wait_di-----
service = service_base_name + "wait_di"
srv = rospy.ServiceProxy(service, WaitDI)
success = False
try:
resp = srv('2', True, 0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------run_code-----
service = service_base_name + "run_code"
srv = rospy.ServiceProxy(service, RunCode)
success = False
try:
resp = srv("MY_FUNC", False)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------prog_finish-----
service = service_base_name + "prog_finish"
srv = rospy.ServiceProxy(service, ProgFinish)
success = False
try:
resp = srv("test")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------prog_save-----
service = service_base_name + "prog_save"
srv = rospy.ServiceProxy(service, ProgSave)
success = False
try:
resp = srv("test", "/home/controls/catkin_ws/src/ros_robodk_post_processors")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
if __name__ == '__main__':
import rostest
rostest.rosrun(package, "services_tests", ServicesTests, sys.argv)
| #!/usr/bin/env python
package = 'ros_robodk_post_processors'
service_base_name = "/robodk_post_processors/"
from ros_robodk_post_processors.srv import *
import geometry_msgs.msg
import rospy
import unittest
def checkService(service_name):
service_available = False
try:
rospy.wait_for_service(service_name, 1)
service_available = True
except:
rospy.logerr("Could not connect to service %s" % service_name)
return service_available
class ServicesTests(unittest.TestCase):
def testWaitForServices(self):
services = ["move_c", "move_j", "move_l", "pause", "prog_finish", "prog_save", "prog_send_robot", "prog_start", "run_code", "run_message", "set_do", "set_go", "set_frame", "set_speed", "set_speed_joints", "set_tool", "set_zone_data", "wait_di"]
for name in services:
service = service_base_name + name
self.assertEquals(checkService(service), True, "Service %s is not available!" % service)
def testFanucProgram(self):
rospy.wait_for_service(service_base_name + "prog_start")
rospy.wait_for_service(service_base_name + "set_tool")
rospy.wait_for_service(service_base_name + "set_frame")
rospy.wait_for_service(service_base_name + "move_c")
rospy.wait_for_service(service_base_name + "set_speed_joints")
rospy.wait_for_service(service_base_name + "move_j")
rospy.wait_for_service(service_base_name + "set_zone_data")
rospy.wait_for_service(service_base_name + "set_speed")
rospy.wait_for_service(service_base_name + "move_l")
rospy.wait_for_service(service_base_name + "run_message")
rospy.wait_for_service(service_base_name + "pause")
rospy.wait_for_service(service_base_name + "set_do")
rospy.wait_for_service(service_base_name + "set_go")
rospy.wait_for_service(service_base_name + "run_message")
rospy.wait_for_service(service_base_name + "wait_di")
rospy.wait_for_service(service_base_name + "run_code")
rospy.wait_for_service(service_base_name + "prog_finish")
rospy.wait_for_service(service_base_name + "prog_save")
#------prog_start-----
service = service_base_name + "prog_start"
srv = rospy.ServiceProxy(service, ProgStart)
success = False
try:
resp = srv("Fanuc_R30iA", "test", "")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_tool-----
service = service_base_name + "set_tool"
srv = rospy.ServiceProxy(service, SetTool)
success = False
try:
resp = srv(0, "tool", geometry_msgs.msg.Pose(geometry_msgs.msg.Point(0, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)))
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_frame-----
service = service_base_name + "set_frame"
srv = rospy.ServiceProxy(service, SetFrame)
success = False
try:
resp = srv(1, "frame", geometry_msgs.msg.Pose(geometry_msgs.msg.Point(0, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)))
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_c-----
# service = service_base_name + "move_c"
# srv = rospy.ServiceProxy(service, MoveC)
# success = False
# try:
# resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0],
# geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1.5, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0])
# success = True
# except rospy.ServiceException as exc:
# rospy.logerr("Service did not process request: " + str(exc))
# self.assertEquals(success, True, "Failed to call service %s" % srv)
# self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_speed_joints-----
service = service_base_name + "set_speed_joints"
srv = rospy.ServiceProxy(service, SetSpeedJoints)
success = False
try:
resp = srv(20.0) #takes in degrees/sec, inserts % speed for joint moves
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_j-----
service = service_base_name + "move_j"
srv = rospy.ServiceProxy(service, MoveJ)
success = False
try:
resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0])
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_zone_data-----
service = service_base_name + "set_zone_data"
srv = rospy.ServiceProxy(service, SetZoneData)
success = False
try:
resp = srv(2.0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_speed-----
service = service_base_name + "set_speed"
srv = rospy.ServiceProxy(service, SetSpeed)
success = False
try:
resp = srv(20.0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_l-----
service = service_base_name + "move_l"
srv = rospy.ServiceProxy(service, MoveL)
success = False
try:
resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0.5, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0])
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_l-----joint position
service = service_base_name + "move_l"
srv = rospy.ServiceProxy(service, MoveL)
success = False
try:
resp = srv(None,
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0])
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------run_message-----
service = service_base_name + "run_message"
srv = rospy.ServiceProxy(service, RunMessage)
success = False
try:
resp = srv("A run message")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------pause-----
service = service_base_name + "pause"
srv = rospy.ServiceProxy(service, Pause)
success = False
try:
resp = srv(1.0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_do-----
service = service_base_name + "set_do"
srv = rospy.ServiceProxy(service, SetDO)
success = False
try:
resp = srv('1', True)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_go-----
service = service_base_name + "set_go"
srv = rospy.ServiceProxy(service, SetGO)
success = False
try:
resp = srv('12', '1')
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------wait_di-----
service = service_base_name + "wait_di"
srv = rospy.ServiceProxy(service, WaitDI)
success = False
try:
resp = srv('2', True, 0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------run_code-----
service = service_base_name + "run_code"
srv = rospy.ServiceProxy(service, RunCode)
success = False
try:
resp = srv("MY_FUNC", False)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------prog_finish-----
service = service_base_name + "prog_finish"
srv = rospy.ServiceProxy(service, ProgFinish)
success = False
try:
resp = srv("test")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------prog_save-----
service = service_base_name + "prog_save"
srv = rospy.ServiceProxy(service, ProgSave)
success = False
try:
resp = srv("test", "/home/controls/catkin_ws/src/ros_robodk_post_processors")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
if __name__ == '__main__':
import rostest
rostest.rosrun(package, "services_tests", ServicesTests, sys.argv) | en | 0.16137 | #!/usr/bin/env python #------prog_start----- #------set_tool----- #------set_frame----- #------move_c----- # service = service_base_name + "move_c" # srv = rospy.ServiceProxy(service, MoveC) # success = False # try: # resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)), # [0, 0, 0, 0, 0, 0], # [0, 0, 0], # geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1.5, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)), # [0, 0, 0, 0, 0, 0], # [0, 0, 0]) # success = True # except rospy.ServiceException as exc: # rospy.logerr("Service did not process request: " + str(exc)) # self.assertEquals(success, True, "Failed to call service %s" % srv) # self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error)) #------set_speed_joints----- #takes in degrees/sec, inserts % speed for joint moves #------move_j----- #------set_zone_data----- #------set_speed----- #------move_l----- #------move_l-----joint position #------run_message----- #------pause----- #------set_do----- #------set_go----- #------wait_di----- #------run_code----- #------prog_finish----- #------prog_save----- | 2.329605 | 2 |
dotfiles/spectrwm/purp/.config/sublime-text-3/Packages/LaTeXTools/change_environment.py | jturne19/jordans_things | 0 | 6615819 | import sublime
import sublime_plugin
import re
class LatexChangeEnvironmentCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
new_regions = _find_env_regions(view)
if not new_regions:
return
view.sel().clear()
for r in new_regions:
view.sel().add(r)
class LatexToggleEnvironmentStarCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
new_regions = _find_env_regions(view)
if not new_regions:
return
# replace '*' with '' or vice versa for each region
for r in reversed(new_regions):
if view.substr(r).endswith('*'):
view.replace(edit, r, view.substr(r)[:-1])
else:
view.replace(edit, r, view.substr(r) + "*")
def _find_env_regions(view):
"""returns the regions corresponding to nearest matching environments"""
begin_re = r"\\begin(?:\[[^\]]*\])?\{([^\}]*)\}"
end_re = r"\\end\{([^\}]*)\}"
begins = view.find_all(begin_re, sublime.IGNORECASE)
ends = view.find_all(end_re, sublime.IGNORECASE)
# compile the begin_re (findall does not work if its compiled)
begin_re = re.compile(begin_re)
comment_line_re = re.compile(r"\s*%.*")
def is_comment(reg):
line_str = view.substr(view.line(reg))
return comment_line_re.match(line_str) is not None
begins = [b for b in begins if not is_comment(b)]
ends = [e for e in ends if not is_comment(e)]
def extract_begin_region(region):
"""creates a sublime.Region: \\begin{|text|}"""
s = view.substr(region)
boffset = len("\\begin{")
m = begin_re.search(s)
if m:
boffset = m.regs[1][0]
return sublime.Region(region.begin() + boffset, region.end() - 1)
def extract_end_region(region):
"""creates a sublime.Region: \\end{|text|}"""
boffset = len("\\end{")
return sublime.Region(region.begin() + boffset, region.end() - 1)
new_regions = []
one_sel = len(view.sel()) == 1
for sel in view.sel():
# partition the open and closed environments
begin_before, begin_after =\
_partition(begins, lambda b: b.begin() <= sel.begin())
end_before, end_after =\
_partition(ends, lambda e: e.end() < sel.begin())
# get the nearest open environments
try:
begin = _get_closest_begin(begin_before, end_before)
end = _get_closest_end(end_after, begin_after)
except NoEnvError as e:
if one_sel:
sublime.status_message(e.args[0])
return []
else:
continue
# extract the regions for the environments
begin_region = extract_begin_region(begin)
end_region = extract_end_region(end)
# validity check: matching env name
if view.substr(begin_region) == view.substr(end_region):
new_regions.append(begin_region)
new_regions.append(end_region)
elif one_sel:
sublime.status_message(
"The environment begin and end does not match:"
"'{0}' and '{1}'"
.format(view.substr(begin_region), view.substr(end_region))
)
if not new_regions:
sublime.status_message("Environment detection failed")
return new_regions
def _partition(env_list, is_before):
"""partition the list in the list items before and after the sel"""
before, after = [], []
iterator = iter(env_list)
while True:
try:
item = next(iterator)
except:
break
if is_before(item):
before.append(item)
else:
after.append(item)
after.extend(iterator)
break
return before, after
class NoEnvError(Exception):
pass
def _get_closest_begin(begin_before, end_before):
"""returns the closest \\begin, that is open"""
end_iter = reversed(end_before)
begin_iter = reversed(begin_before)
while True:
try:
b = next(begin_iter)
except:
raise NoEnvError("No open environment detected")
try:
e = next(end_iter)
except:
break
if not b.begin() < e.begin():
break
return b
def _get_closest_end(end_after, begin_after):
"""returns the closest \\end, that is open"""
end_iter = iter(end_after)
begin_iter = iter(begin_after)
while True:
try:
e = next(end_iter)
except:
raise NoEnvError("No closing environment detected")
try:
b = next(begin_iter)
except:
break
if not e.begin() > b.begin():
break
return e
| import sublime
import sublime_plugin
import re
class LatexChangeEnvironmentCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
new_regions = _find_env_regions(view)
if not new_regions:
return
view.sel().clear()
for r in new_regions:
view.sel().add(r)
class LatexToggleEnvironmentStarCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
new_regions = _find_env_regions(view)
if not new_regions:
return
# replace '*' with '' or vice versa for each region
for r in reversed(new_regions):
if view.substr(r).endswith('*'):
view.replace(edit, r, view.substr(r)[:-1])
else:
view.replace(edit, r, view.substr(r) + "*")
def _find_env_regions(view):
"""returns the regions corresponding to nearest matching environments"""
begin_re = r"\\begin(?:\[[^\]]*\])?\{([^\}]*)\}"
end_re = r"\\end\{([^\}]*)\}"
begins = view.find_all(begin_re, sublime.IGNORECASE)
ends = view.find_all(end_re, sublime.IGNORECASE)
# compile the begin_re (findall does not work if its compiled)
begin_re = re.compile(begin_re)
comment_line_re = re.compile(r"\s*%.*")
def is_comment(reg):
line_str = view.substr(view.line(reg))
return comment_line_re.match(line_str) is not None
begins = [b for b in begins if not is_comment(b)]
ends = [e for e in ends if not is_comment(e)]
def extract_begin_region(region):
"""creates a sublime.Region: \\begin{|text|}"""
s = view.substr(region)
boffset = len("\\begin{")
m = begin_re.search(s)
if m:
boffset = m.regs[1][0]
return sublime.Region(region.begin() + boffset, region.end() - 1)
def extract_end_region(region):
"""creates a sublime.Region: \\end{|text|}"""
boffset = len("\\end{")
return sublime.Region(region.begin() + boffset, region.end() - 1)
new_regions = []
one_sel = len(view.sel()) == 1
for sel in view.sel():
# partition the open and closed environments
begin_before, begin_after =\
_partition(begins, lambda b: b.begin() <= sel.begin())
end_before, end_after =\
_partition(ends, lambda e: e.end() < sel.begin())
# get the nearest open environments
try:
begin = _get_closest_begin(begin_before, end_before)
end = _get_closest_end(end_after, begin_after)
except NoEnvError as e:
if one_sel:
sublime.status_message(e.args[0])
return []
else:
continue
# extract the regions for the environments
begin_region = extract_begin_region(begin)
end_region = extract_end_region(end)
# validity check: matching env name
if view.substr(begin_region) == view.substr(end_region):
new_regions.append(begin_region)
new_regions.append(end_region)
elif one_sel:
sublime.status_message(
"The environment begin and end does not match:"
"'{0}' and '{1}'"
.format(view.substr(begin_region), view.substr(end_region))
)
if not new_regions:
sublime.status_message("Environment detection failed")
return new_regions
def _partition(env_list, is_before):
"""partition the list in the list items before and after the sel"""
before, after = [], []
iterator = iter(env_list)
while True:
try:
item = next(iterator)
except:
break
if is_before(item):
before.append(item)
else:
after.append(item)
after.extend(iterator)
break
return before, after
class NoEnvError(Exception):
pass
def _get_closest_begin(begin_before, end_before):
"""returns the closest \\begin, that is open"""
end_iter = reversed(end_before)
begin_iter = reversed(begin_before)
while True:
try:
b = next(begin_iter)
except:
raise NoEnvError("No open environment detected")
try:
e = next(end_iter)
except:
break
if not b.begin() < e.begin():
break
return b
def _get_closest_end(end_after, begin_after):
"""returns the closest \\end, that is open"""
end_iter = iter(end_after)
begin_iter = iter(begin_after)
while True:
try:
e = next(end_iter)
except:
raise NoEnvError("No closing environment detected")
try:
b = next(begin_iter)
except:
break
if not e.begin() > b.begin():
break
return e
| en | 0.760069 | # replace '*' with '' or vice versa for each region returns the regions corresponding to nearest matching environments # compile the begin_re (findall does not work if its compiled) creates a sublime.Region: \\begin{|text|} creates a sublime.Region: \\end{|text|} # partition the open and closed environments # get the nearest open environments # extract the regions for the environments # validity check: matching env name partition the list in the list items before and after the sel returns the closest \\begin, that is open returns the closest \\end, that is open | 2.648805 | 3 |
ibm/urls.py | arianmotti/story-contest | 3 | 6615820 | from django.conf.urls import re_path
from . import views
urlpatterns = [
re_path(r'^convert_post_to_speech/(?P<pk>\d{1,})/$' , views.convert_post_to_speech , name = 'convert_post_to_speech'),
re_path(r'^convert_comment_to_speech/(?P<pk>\d{1,})/$' , views.convert_comment_to_speech , name = 'convert_comment_to_speech'),
] | from django.conf.urls import re_path
from . import views
urlpatterns = [
re_path(r'^convert_post_to_speech/(?P<pk>\d{1,})/$' , views.convert_post_to_speech , name = 'convert_post_to_speech'),
re_path(r'^convert_comment_to_speech/(?P<pk>\d{1,})/$' , views.convert_comment_to_speech , name = 'convert_comment_to_speech'),
] | none | 1 | 1.739433 | 2 | |
utils/rmsd/wrapper.py | ruixingw/rxcclib | 1 | 6615821 | from .calculate_rmsd import *
from rxcclib.utils.cclib.utils import PeriodicTable
def getrmsd(xyz1, xyz2, nohydrogen=False):
p_atoms = [PeriodicTable.element[x] for x in xyz1.fchk.atomnos]
p_all = np.array(xyz1.fchk.atomcoords[-1])
q_atoms = [PeriodicTable.element[x] for x in xyz2.fchk.atomnos]
q_all = np.array(xyz2.fchk.atomcoords[-1])
if np.count_nonzero(p_atoms != q_atoms):
exit("Atoms not in the same order")
P = p_all
Q = q_all
if nohydrogen:
not_hydrogens = np.where(p_atoms != 'H')
P = p_all[not_hydrogens]
Q = q_all[not_hydrogens]
# elif args.remove_idx:
# N, = p_atoms.shape
# index = list(range(N))
# index = set(index) - set(args.remove_idx)
# index = list(index)
# P = p_all[index]
# Q = q_all[index]
# elif args.add_idx:
# P = p_all[args.add_idx]
# Q = q_all[args.add_idx]
# Calculate 'dumb' RMSD
normal_rmsd = rmsd(P, Q)
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
Pc = centroid(P)
Qc = centroid(Q)
P -= Pc
Q -= Qc
# if args.output:
# U = kabsch(P, Q)
# p_all -= Pc
# p_all = np.dot(p_all, U)
# write_coordinates(p_atoms, p_all, title="{} translated".format(args.structure_a))
# quit()
return quaternion_rmsd(P, Q)
| from .calculate_rmsd import *
from rxcclib.utils.cclib.utils import PeriodicTable
def getrmsd(xyz1, xyz2, nohydrogen=False):
p_atoms = [PeriodicTable.element[x] for x in xyz1.fchk.atomnos]
p_all = np.array(xyz1.fchk.atomcoords[-1])
q_atoms = [PeriodicTable.element[x] for x in xyz2.fchk.atomnos]
q_all = np.array(xyz2.fchk.atomcoords[-1])
if np.count_nonzero(p_atoms != q_atoms):
exit("Atoms not in the same order")
P = p_all
Q = q_all
if nohydrogen:
not_hydrogens = np.where(p_atoms != 'H')
P = p_all[not_hydrogens]
Q = q_all[not_hydrogens]
# elif args.remove_idx:
# N, = p_atoms.shape
# index = list(range(N))
# index = set(index) - set(args.remove_idx)
# index = list(index)
# P = p_all[index]
# Q = q_all[index]
# elif args.add_idx:
# P = p_all[args.add_idx]
# Q = q_all[args.add_idx]
# Calculate 'dumb' RMSD
normal_rmsd = rmsd(P, Q)
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
Pc = centroid(P)
Qc = centroid(Q)
P -= Pc
Q -= Qc
# if args.output:
# U = kabsch(P, Q)
# p_all -= Pc
# p_all = np.dot(p_all, U)
# write_coordinates(p_atoms, p_all, title="{} translated".format(args.structure_a))
# quit()
return quaternion_rmsd(P, Q)
| en | 0.331466 | # elif args.remove_idx: # N, = p_atoms.shape # index = list(range(N)) # index = set(index) - set(args.remove_idx) # index = list(index) # P = p_all[index] # Q = q_all[index] # elif args.add_idx: # P = p_all[args.add_idx] # Q = q_all[args.add_idx] # Calculate 'dumb' RMSD # Create the centroid of P and Q which is the geometric center of a # N-dimensional region and translate P and Q onto that center. # http://en.wikipedia.org/wiki/Centroid # if args.output: # U = kabsch(P, Q) # p_all -= Pc # p_all = np.dot(p_all, U) # write_coordinates(p_atoms, p_all, title="{} translated".format(args.structure_a)) # quit() | 2.560722 | 3 |
data_preprocessor.py | jlhbaseball15/nmt_chinese_to_english | 3 | 6615822 | import numpy as np
import os
import gzip
import pickle
from IPython import embed
import xml.etree.ElementTree as ET
class CorpusFileMapping:
def __init__(self, english_filename, chinese_filename, sentence_mappings):
self.english_filename = english_filename
self.chinese_filename = chinese_filename
self.sentence_mappings = sentence_mappings
class Sentence:
def __init__(self, sentence, tag):
self.tag = tag
self.sentence = sentence
class DatasetProcessor:
def __init__(self):
self.ChineseDictionary = {}
self.EnglishDictionary = {}
self.EnglishDataset = []
self.ChineseDataset = []
def CreateDataset(self, filename, saveDictionary=True, saveDataset=True):
sentence_mappings = self.read_sentence_mapping(filename)
self.ProcessSentenceMappings(sentence_mappings)
if saveDictionary:
self.save_dictionaries()
def LoadCorpusFiles(self, filename):
english_corpus_files = []
chinese_corpus_files = []
return english_corpus_files, chinese_corpus_files
def CloseCorpusFiles(self, files):
for f in files:
f.close()
def ProcessSentenceMappings(self, file_mappings, saveDatasets=True):
dataset_count = 0
for i, fm in enumerate(file_mappings):
print "Processing " + fm.english_filename + " and " + fm.chinese_filename
english_data = self.ProcessCorpusFile(fm.english_filename, 'English')
chinese_data = self.ProcessCorpusFile(fm.chinese_filename, 'Chinese')
english_data, chinese_data = self.AlignDatasets(english_data, chinese_data, fm.sentence_mappings)
print "Aligned " + fm.english_filename + " and " + fm.chinese_filename
self.EnglishDataset.extend(english_data)
self.ChineseDataset.extend(chinese_data)
if i % 25 == 24:
if saveDatasets:
print "Saving Dataset" + str(dataset_count)
self.saveDatasets(dataset_count)
dataset_count += 1
self.EnglishDataset = []
self.ChineseDataset = []
self.saveDatasets(dataset_count)
def read_sentence_mapping(self, xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
file_maps = []
for linkGroup in root:
english_file = linkGroup.attrib['fromDoc']
chinese_file = linkGroup.attrib['toDoc']
sentence_mappings = []
for link in linkGroup:
mapping = self.processXMLMapping(link.attrib['xtargets'])
sentence_mappings.append(mapping)
file_map = CorpusFileMapping(english_file, chinese_file, sentence_mappings)
file_maps.append(file_map)
return file_maps
def AlignDatasets(self, english_data, chinese_data, sentence_mappings):
edata = []
cdata = []
for sm in sentence_mappings:
english = []
for i in sm[0]:
try:
english.extend(english_data[i - 1])
except:
print len(english_data)
print i
chinese = []
for i in sm[1]:
chinese.extend(chinese_data[i - 1])
edata.append(english)
cdata.append(chinese)
return edata, cdata
def processXMLMapping(self, link_attrib):
english_chinese_split = link_attrib.split(';')
for s in range(len(english_chinese_split)):
if english_chinese_split[s] is '':
english_chinese_split[s] = '-1'
english_chinese_split[0] = map(int, english_chinese_split[0].split(' '))
english_chinese_split[1] = map(int, english_chinese_split[1].split(' '))
return english_chinese_split
# this will need to change based on different xml structures, but for our data set, this splits and tokenizes the sentences
def ProcessCorpusFile(self, filename, language):
with gzip.open(filename, 'rb') as f:
tree = ET.parse(f)
data = []
root = tree.getroot()
f.close()
for child in root:
sentence = []
for token in child:
if (token.tag == 'w'):
text = token.text
if language is 'English':
text = self.fix_lower_l(text)
self.add_to_dictionary(text, language)
sentence.append(text)
sentence.append("</s>")
data.append(sentence)
return data
def fix_lower_l(self, text):
if 'l' in text:
if text.replace('l', '') == text.replace('l', '').upper():
text = text.replace('l', 'I')
return text
def add_to_dictionary(self, word, language):
d = None
if language is 'English':
d = self.EnglishDictionary
elif language is 'Chinese':
d = self.ChineseDictionary
if word not in d.keys():
d[word] = len(d.keys())
def save_dictionaries(self):
with open('Chinese_Dictionary.pkl', 'wb') as f:
pickle.dump(self.ChineseDictionary, f, pickle.HIGHEST_PROTOCOL)
f.close()
with open('English_Dictionary.pkl', 'wb') as f:
pickle.dump(self.EnglishDictionary, f, pickle.HIGHEST_PROTOCOL)
f.close()
def saveDatasets(self, dataset_count):
e_filename = "pickle/english_dataset_" + str(dataset_count) + ".pkl"
c_filename = "pickle/chinese_dataset_" + str(dataset_count) + ".pkl"
e_file = open(e_filename, 'wb')
c_file = open(c_filename, 'wb')
pickle.dump(self.EnglishDataset, e_file)
pickle.dump(self.ChineseDataset, c_file)
e_file.close()
c_file.close()
def main():
dp = DatasetProcessor()
dp.CreateDataset('en-zh_cn.xml')
embed()
if __name__ == '__main__':
main()
| import numpy as np
import os
import gzip
import pickle
from IPython import embed
import xml.etree.ElementTree as ET
class CorpusFileMapping:
def __init__(self, english_filename, chinese_filename, sentence_mappings):
self.english_filename = english_filename
self.chinese_filename = chinese_filename
self.sentence_mappings = sentence_mappings
class Sentence:
def __init__(self, sentence, tag):
self.tag = tag
self.sentence = sentence
class DatasetProcessor:
def __init__(self):
self.ChineseDictionary = {}
self.EnglishDictionary = {}
self.EnglishDataset = []
self.ChineseDataset = []
def CreateDataset(self, filename, saveDictionary=True, saveDataset=True):
sentence_mappings = self.read_sentence_mapping(filename)
self.ProcessSentenceMappings(sentence_mappings)
if saveDictionary:
self.save_dictionaries()
def LoadCorpusFiles(self, filename):
english_corpus_files = []
chinese_corpus_files = []
return english_corpus_files, chinese_corpus_files
def CloseCorpusFiles(self, files):
for f in files:
f.close()
def ProcessSentenceMappings(self, file_mappings, saveDatasets=True):
dataset_count = 0
for i, fm in enumerate(file_mappings):
print "Processing " + fm.english_filename + " and " + fm.chinese_filename
english_data = self.ProcessCorpusFile(fm.english_filename, 'English')
chinese_data = self.ProcessCorpusFile(fm.chinese_filename, 'Chinese')
english_data, chinese_data = self.AlignDatasets(english_data, chinese_data, fm.sentence_mappings)
print "Aligned " + fm.english_filename + " and " + fm.chinese_filename
self.EnglishDataset.extend(english_data)
self.ChineseDataset.extend(chinese_data)
if i % 25 == 24:
if saveDatasets:
print "Saving Dataset" + str(dataset_count)
self.saveDatasets(dataset_count)
dataset_count += 1
self.EnglishDataset = []
self.ChineseDataset = []
self.saveDatasets(dataset_count)
def read_sentence_mapping(self, xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
file_maps = []
for linkGroup in root:
english_file = linkGroup.attrib['fromDoc']
chinese_file = linkGroup.attrib['toDoc']
sentence_mappings = []
for link in linkGroup:
mapping = self.processXMLMapping(link.attrib['xtargets'])
sentence_mappings.append(mapping)
file_map = CorpusFileMapping(english_file, chinese_file, sentence_mappings)
file_maps.append(file_map)
return file_maps
def AlignDatasets(self, english_data, chinese_data, sentence_mappings):
edata = []
cdata = []
for sm in sentence_mappings:
english = []
for i in sm[0]:
try:
english.extend(english_data[i - 1])
except:
print len(english_data)
print i
chinese = []
for i in sm[1]:
chinese.extend(chinese_data[i - 1])
edata.append(english)
cdata.append(chinese)
return edata, cdata
def processXMLMapping(self, link_attrib):
english_chinese_split = link_attrib.split(';')
for s in range(len(english_chinese_split)):
if english_chinese_split[s] is '':
english_chinese_split[s] = '-1'
english_chinese_split[0] = map(int, english_chinese_split[0].split(' '))
english_chinese_split[1] = map(int, english_chinese_split[1].split(' '))
return english_chinese_split
# this will need to change based on different xml structures, but for our data set, this splits and tokenizes the sentences
def ProcessCorpusFile(self, filename, language):
with gzip.open(filename, 'rb') as f:
tree = ET.parse(f)
data = []
root = tree.getroot()
f.close()
for child in root:
sentence = []
for token in child:
if (token.tag == 'w'):
text = token.text
if language is 'English':
text = self.fix_lower_l(text)
self.add_to_dictionary(text, language)
sentence.append(text)
sentence.append("</s>")
data.append(sentence)
return data
def fix_lower_l(self, text):
if 'l' in text:
if text.replace('l', '') == text.replace('l', '').upper():
text = text.replace('l', 'I')
return text
def add_to_dictionary(self, word, language):
d = None
if language is 'English':
d = self.EnglishDictionary
elif language is 'Chinese':
d = self.ChineseDictionary
if word not in d.keys():
d[word] = len(d.keys())
def save_dictionaries(self):
with open('Chinese_Dictionary.pkl', 'wb') as f:
pickle.dump(self.ChineseDictionary, f, pickle.HIGHEST_PROTOCOL)
f.close()
with open('English_Dictionary.pkl', 'wb') as f:
pickle.dump(self.EnglishDictionary, f, pickle.HIGHEST_PROTOCOL)
f.close()
def saveDatasets(self, dataset_count):
e_filename = "pickle/english_dataset_" + str(dataset_count) + ".pkl"
c_filename = "pickle/chinese_dataset_" + str(dataset_count) + ".pkl"
e_file = open(e_filename, 'wb')
c_file = open(c_filename, 'wb')
pickle.dump(self.EnglishDataset, e_file)
pickle.dump(self.ChineseDataset, c_file)
e_file.close()
c_file.close()
def main():
dp = DatasetProcessor()
dp.CreateDataset('en-zh_cn.xml')
embed()
if __name__ == '__main__':
main()
| en | 0.927854 | # this will need to change based on different xml structures, but for our data set, this splits and tokenizes the sentences | 2.510254 | 3 |
game/game_play_window.py | ProgrammingGym/Simple-AI-Boggle-Game | 0 | 6615823 | <reponame>ProgrammingGym/Simple-AI-Boggle-Game<gh_stars>0
from models import *
from mutual import *
from solution_window import *
from boggle_game import introduction
def start_game():
"""start the game
"""
# open the dictionary file and store all these words in a list and convert all words to upper case
words_file = open("words_alpha.txt")
dictionary = list([word.strip().upper() for word in words_file])
# make sure that the grid doesn't have too many possible words/solution, because the screen won't be big/wide enough to display all of them.
grid = Grid()
n = get_number_all_possible_solution(grid, dictionary) # n = number of all possible solutions/words
while n > 180:
grid = Grid()
n = get_number_all_possible_solution(grid, dictionary)
player = Player()
# check the bonus letters.
update_bonus_letters(grid)
# define the buttons and the text box.
text_box = TextBox(14, grid_off_set_y+4, grid_off_set_x-100, 50, notes_color, gray , 16, black, 16)
enter_button = Button(grid_off_set_x-75, grid_off_set_y+4, 60, 50, beer, orange, 18, black, white, "Enter")
main_menu_button = Button( grid_off_set_x, 6, 130, grid_off_set_y-15, beer, orange, 16, black, white, "Main Menu")
show_solution_button = Button( (width+grid_off_set_x)//2 - 130//2, 6, 130, grid_off_set_y-15, beer, orange, 16, black, white, "Show Solution")
restart_button = Button(width-130-6, 6, 130, grid_off_set_y-15, beer, orange, 16, black, white, "Restart")
flashing_index = 0 # to flash the cursor of the textbox and the caution messages intermittently
player.start_time = pygame.time.get_ticks() # to start the timer exactly from 3 minutes
while True:
if flashing_index > 60:
flashing_index = 0
flashing_index += 1
manage_game(grid, player, dictionary, text_box, flashing_index, enter_button, main_menu_button, show_solution_button, restart_button)
manage_events(grid, player, dictionary, text_box, flashing_index, enter_button, main_menu_button, show_solution_button, restart_button)
clock.tick(60)
def get_number_all_possible_solution(grid, dictionary):
"""
get number of all the possible solutions/words
Args:
grid (Grid): the main grid
dictionary ([str]): dictionary as a list of strings that has all the words
Returns:
int: number of all the possible solutions/words
"""
tmp_invisible_textbox = TextBox(14, grid_off_set_y+4, grid_off_set_x-100, 50, notes_color, gray , 16, black, 16)
n = 0
for word in dictionary:
if 3 <= len(word) <= 16:
tmp_invisible_textbox.clear_textbox()
tmp_invisible_textbox.append_text(word)
if are_all_letter_in_grid(grid, tmp_invisible_textbox) and is_pattern_valid(grid, tmp_invisible_textbox, []):
n += 1
return n
def manage_game(grid, player, dictionary, text_box, flashing_index, enter_button, main_menu_button, show_solution_button, restart_button):
"""
main method of the game. To manage the whole game
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
enter_button (Button): button to enter/save the typed word in the text box
main_menu_button (Button): button to go to the main menu
show_solution_button (Button): button to display all the possible words/solutions. Defaults to None.
restart_button (Button): button to get a new grid and restart the time and the whole game
"""
# background
DISPLAY_SCREEN.fill(gray)
# toolbar and buttons:
draw_toolbar(main_menu_button, restart_button, show_solution_button)
# draw cubes
draw_cubes(grid)
# grid external borders
pygame.draw.rect(DISPLAY_SCREEN , black, (grid_off_set_x-2, grid_off_set_y-2, grid_width, grid_height), 6)
draw_paper_screen()
# draw time
draw_timer(grid, player, dictionary, enter_button, main_menu_button)
draw_correctly_guessed_words(grid, player, dictionary)
draw_player_score(player)
draw_text_box(text_box, flashing_index, enter_button)
draw_input_match_grid(grid, text_box, flashing_index)
pygame.display.update()
def draw_timer(grid, player, dictionary, enter_button, main_menu_button):
"""
calculate the remaining time and to draw it.
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
enter_button (Button): button to enter/save the typed word in the text box
main_menu_button (Button): button to go to the main menu
"""
player.time_since_start_in_milli_sec = pygame.time.get_ticks() - player.start_time
remaining_time_in_milli_sec = player.end_time_in_milli_sec-player.time_since_start_in_milli_sec
remaining_minutes = remaining_time_in_milli_sec//1000 //60
remaining_seconds = (remaining_time_in_milli_sec//1000) % 60
font = pygame.font.Font("freesansbold.ttf", 24)
rendered_text = font.render(f"Time Remaining = {remaining_minutes} : {remaining_seconds}" , True, black)
DISPLAY_SCREEN.blit(rendered_text, (50, 17))
DISPLAY_SCREEN.blit(sand_timer_icon, (10,12)) # sand timer icon]]
if is_time_over(remaining_time_in_milli_sec): # check if time is over.
show_best_solution(grid, player, dictionary, enter_button, main_menu_button)
def draw_input_match_grid(grid, text_box, flashing_index):
"""
manage the input and draw the cubes of the valid path with different colors
Args:
grid (Grid): the main grid
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
"""
if len(text_box.text) == 0:
for row in grid.cubes:
for cube in row:
cube.is_being_guessed = False
return
if not are_all_letter_in_grid(grid, text_box): # if there is any letter that doesn't exist in the grid, tell the player.
draw_caution(flashing_index, "letters are not in grid!")
return
found_path = [] # just to draw the cubes in different color if they are being guessed
if is_pattern_valid(grid, text_box, found_path):
draw_path(grid, found_path)
else: # if the pattern/ path is wrong, tell the player
draw_caution(flashing_index, "path/pattern is not valid!")
def draw_caution(flashing_index, caution_message:str):
"""
draw caution messages to tell the player what he has done wrong.
Args:
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
caution_message (str): a caution message to tell the player what he has done wrong.
"""
if flashing_index % 30 < 15:
font = pygame.font.Font("freesansbold.ttf", 18)
rendered_text = font.render(caution_message , True, ryb_red)
DISPLAY_SCREEN.blit(rendered_text, (grid_off_set_x-270, 120))
clock.tick(60)
def draw_text_box(text_box, flashing_index, enter_button):
"""
draw the text box, the cursor and the numbers of characters under it.
Args:
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
enter_button (Button): button to enter/save the typed word in the text box
"""
text_box.blit(DISPLAY_SCREEN)
if flashing_index % 30 < 15:
text_box.blit_cursor(DISPLAY_SCREEN)
clock.tick(60)
text_box.blit_number_of_chars(DISPLAY_SCREEN)
mouse_position = pygame.mouse.get_pos() # get the position of the mouse
if enter_button.is_hovered_over(mouse_position):
enter_button.blit_hovered_over(DISPLAY_SCREEN)
else:
enter_button.blit(DISPLAY_SCREEN, gray)
def draw_paper_screen():
"""
draw the yellow screen/notebook/paper on the left.
"""
pygame.draw.rect(DISPLAY_SCREEN , gray, (2, 2, grid_off_set_x-8, height-4), 6)
pygame.draw.rect(DISPLAY_SCREEN , notes_color, (6, 6, grid_off_set_x-14, height-11))
def update_correctly_guessed_words(grid, player, dictionary):
"""
update the list of the correctly typed words for the player
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
"""
tmp_invisible_textbox = TextBox(14, grid_off_set_y+4, grid_off_set_x-100, 50, notes_color, gray , 16, black, 16)
for word in player.get_all_guessed_words():
tmp_invisible_textbox.text = word
if (is_pattern_valid(grid, tmp_invisible_textbox, [])) and (word in dictionary) and (word not in player.get_correctly_guessed_words()):
player.add_to_correctly_guessed_words(word)
update_score(player, word)
def is_time_over(remaining_time_in_milli_sec):
"""
check whether the time is over or not.
Args:
remaining_time_in_milli_sec (int): the remaining time in milliseconds.
Returns:
boolean: True if remaining time is over. Otherwise, False.
"""
return True if remaining_time_in_milli_sec <= 0 else False
def update_bonus_letters(grid):
"""
iterate over the grid and if any bonus letter will be found, it will be added to the list of bonus letters.
Args:
grid (Grid): the main grid
"""
global bonus_letters
bonus_letters.clear()
for row in grid.cubes:
for cube in row:
if cube.has_bonus:
bonus_letters.append(cube.text_str)
def is_input_valid(player, text_box):
"""
check whether the word/characters that the player is trying to enter/save are valid or not.
Args:
player (Player): The Player/user
text_box (TextBox): the text box where the player/user types the words/characters
Returns:
boolean: True the word/characters that the player is trying to enter/save are valid. Otherwise, False.
"""
if len(text_box.text) > 2 and text_box.text not in player.get_all_guessed_words():
return True
else:
if len(text_box.text) <= 2:
pygame.draw.rect(DISPLAY_SCREEN, notes_color, (grid_off_set_x-270, 120, 240, 30))
draw_caution(0, "go for 3 letters at least!")
pygame.display.update()
time.sleep(1)
if text_box.text in player.get_all_guessed_words():
pygame.draw.rect(DISPLAY_SCREEN, notes_color, (grid_off_set_x-270, 120, 240, 30))
draw_caution(0, "word was entered before!")
pygame.display.update()
time.sleep(1)
return False
def manage_events(grid, player, dictionary, text_box, flashing_index, enter_button, main_menu_button, show_solution_button, restart_button):
"""
manage all possible events of the game.
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
enter_button (Button): button to enter/save the typed word in the text box
main_menu_button (Button): button to go to the main menu
show_solution_button (Button): button to display all the possible words/solutions. Defaults to None.
restart_button (Button): button to get a new grid and restart the time and the whole game
"""
mouse_position = pygame.mouse.get_pos() # get the position of the mouse
# check for events
for event in pygame.event.get():
# exit game when user/player clicks on the X icon of the displaying windows.
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# manage/handel the events of the mouse and the buttons of the game
if event.type == pygame.MOUSEBUTTONDOWN:
if enter_button.is_clicked(mouse_position, event) and is_input_valid(player, text_box):
player.add_to_guessed_words(text_box.text)
text_box.clear_textbox()
elif main_menu_button.is_clicked(mouse_position, event):
introduction()
elif show_solution_button.is_clicked(mouse_position, event):
show_best_solution(grid, player, dictionary, enter_button, main_menu_button)
elif restart_button.is_clicked(mouse_position, event):
start_game()
# manage/handel the events of the keys of the keyboard
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
text_box.append_text("A")
elif event.key == pygame.K_b:
text_box.append_text("B")
elif event.key == pygame.K_c:
text_box.append_text("C")
elif event.key == pygame.K_d:
text_box.append_text("D")
elif event.key == pygame.K_e:
text_box.append_text("E")
elif event.key == pygame.K_f:
text_box.append_text("F")
elif event.key == pygame.K_g:
text_box.append_text("G")
elif event.key == pygame.K_h:
text_box.append_text("H")
elif event.key == pygame.K_i:
text_box.append_text("I")
elif event.key == pygame.K_j:
text_box.append_text("J")
elif event.key == pygame.K_k:
text_box.append_text("K")
elif event.key == pygame.K_l:
text_box.append_text("L")
elif event.key == pygame.K_m:
text_box.append_text("M")
elif event.key == pygame.K_n:
text_box.append_text("N")
elif event.key == pygame.K_o:
text_box.append_text("O")
elif event.key == pygame.K_p:
text_box.append_text("P")
elif event.key == pygame.K_q:
text_box.append_text("Q")
elif event.key == pygame.K_r:
text_box.append_text("R")
elif event.key == pygame.K_s:
text_box.append_text("S")
elif event.key == pygame.K_t:
text_box.append_text("T")
elif event.key == pygame.K_u:
text_box.append_text("U")
elif event.key == pygame.K_v:
text_box.append_text("V")
elif event.key == pygame.K_w:
text_box.append_text("W")
elif event.key == pygame.K_x:
text_box.append_text("X")
elif event.key == pygame.K_y:
text_box.append_text("Y")
elif event.key == pygame.K_z:
text_box.append_text("Z")
# if the player pressed on the backspace key
elif event.key == pygame.K_BACKSPACE:
text_box.backspace()
# if the player pressed on the enter key
elif event.key == pygame.K_RETURN and is_input_valid(player, text_box):
enter_button.blit_hovered_over(DISPLAY_SCREEN)
pygame.display.update()
clock.tick(60)
player.add_to_guessed_words(text_box.text)
text_box.clear_textbox()
| from models import *
from mutual import *
from solution_window import *
from boggle_game import introduction
def start_game():
"""start the game
"""
# open the dictionary file and store all these words in a list and convert all words to upper case
words_file = open("words_alpha.txt")
dictionary = list([word.strip().upper() for word in words_file])
# make sure that the grid doesn't have too many possible words/solution, because the screen won't be big/wide enough to display all of them.
grid = Grid()
n = get_number_all_possible_solution(grid, dictionary) # n = number of all possible solutions/words
while n > 180:
grid = Grid()
n = get_number_all_possible_solution(grid, dictionary)
player = Player()
# check the bonus letters.
update_bonus_letters(grid)
# define the buttons and the text box.
text_box = TextBox(14, grid_off_set_y+4, grid_off_set_x-100, 50, notes_color, gray , 16, black, 16)
enter_button = Button(grid_off_set_x-75, grid_off_set_y+4, 60, 50, beer, orange, 18, black, white, "Enter")
main_menu_button = Button( grid_off_set_x, 6, 130, grid_off_set_y-15, beer, orange, 16, black, white, "Main Menu")
show_solution_button = Button( (width+grid_off_set_x)//2 - 130//2, 6, 130, grid_off_set_y-15, beer, orange, 16, black, white, "Show Solution")
restart_button = Button(width-130-6, 6, 130, grid_off_set_y-15, beer, orange, 16, black, white, "Restart")
flashing_index = 0 # to flash the cursor of the textbox and the caution messages intermittently
player.start_time = pygame.time.get_ticks() # to start the timer exactly from 3 minutes
while True:
if flashing_index > 60:
flashing_index = 0
flashing_index += 1
manage_game(grid, player, dictionary, text_box, flashing_index, enter_button, main_menu_button, show_solution_button, restart_button)
manage_events(grid, player, dictionary, text_box, flashing_index, enter_button, main_menu_button, show_solution_button, restart_button)
clock.tick(60)
def get_number_all_possible_solution(grid, dictionary):
"""
get number of all the possible solutions/words
Args:
grid (Grid): the main grid
dictionary ([str]): dictionary as a list of strings that has all the words
Returns:
int: number of all the possible solutions/words
"""
tmp_invisible_textbox = TextBox(14, grid_off_set_y+4, grid_off_set_x-100, 50, notes_color, gray , 16, black, 16)
n = 0
for word in dictionary:
if 3 <= len(word) <= 16:
tmp_invisible_textbox.clear_textbox()
tmp_invisible_textbox.append_text(word)
if are_all_letter_in_grid(grid, tmp_invisible_textbox) and is_pattern_valid(grid, tmp_invisible_textbox, []):
n += 1
return n
def manage_game(grid, player, dictionary, text_box, flashing_index, enter_button, main_menu_button, show_solution_button, restart_button):
"""
main method of the game. To manage the whole game
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
enter_button (Button): button to enter/save the typed word in the text box
main_menu_button (Button): button to go to the main menu
show_solution_button (Button): button to display all the possible words/solutions. Defaults to None.
restart_button (Button): button to get a new grid and restart the time and the whole game
"""
# background
DISPLAY_SCREEN.fill(gray)
# toolbar and buttons:
draw_toolbar(main_menu_button, restart_button, show_solution_button)
# draw cubes
draw_cubes(grid)
# grid external borders
pygame.draw.rect(DISPLAY_SCREEN , black, (grid_off_set_x-2, grid_off_set_y-2, grid_width, grid_height), 6)
draw_paper_screen()
# draw time
draw_timer(grid, player, dictionary, enter_button, main_menu_button)
draw_correctly_guessed_words(grid, player, dictionary)
draw_player_score(player)
draw_text_box(text_box, flashing_index, enter_button)
draw_input_match_grid(grid, text_box, flashing_index)
pygame.display.update()
def draw_timer(grid, player, dictionary, enter_button, main_menu_button):
"""
calculate the remaining time and to draw it.
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
enter_button (Button): button to enter/save the typed word in the text box
main_menu_button (Button): button to go to the main menu
"""
player.time_since_start_in_milli_sec = pygame.time.get_ticks() - player.start_time
remaining_time_in_milli_sec = player.end_time_in_milli_sec-player.time_since_start_in_milli_sec
remaining_minutes = remaining_time_in_milli_sec//1000 //60
remaining_seconds = (remaining_time_in_milli_sec//1000) % 60
font = pygame.font.Font("freesansbold.ttf", 24)
rendered_text = font.render(f"Time Remaining = {remaining_minutes} : {remaining_seconds}" , True, black)
DISPLAY_SCREEN.blit(rendered_text, (50, 17))
DISPLAY_SCREEN.blit(sand_timer_icon, (10,12)) # sand timer icon]]
if is_time_over(remaining_time_in_milli_sec): # check if time is over.
show_best_solution(grid, player, dictionary, enter_button, main_menu_button)
def draw_input_match_grid(grid, text_box, flashing_index):
"""
manage the input and draw the cubes of the valid path with different colors
Args:
grid (Grid): the main grid
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
"""
if len(text_box.text) == 0:
for row in grid.cubes:
for cube in row:
cube.is_being_guessed = False
return
if not are_all_letter_in_grid(grid, text_box): # if there is any letter that doesn't exist in the grid, tell the player.
draw_caution(flashing_index, "letters are not in grid!")
return
found_path = [] # just to draw the cubes in different color if they are being guessed
if is_pattern_valid(grid, text_box, found_path):
draw_path(grid, found_path)
else: # if the pattern/ path is wrong, tell the player
draw_caution(flashing_index, "path/pattern is not valid!")
def draw_caution(flashing_index, caution_message:str):
"""
draw caution messages to tell the player what he has done wrong.
Args:
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
caution_message (str): a caution message to tell the player what he has done wrong.
"""
if flashing_index % 30 < 15:
font = pygame.font.Font("freesansbold.ttf", 18)
rendered_text = font.render(caution_message , True, ryb_red)
DISPLAY_SCREEN.blit(rendered_text, (grid_off_set_x-270, 120))
clock.tick(60)
def draw_text_box(text_box, flashing_index, enter_button):
"""
draw the text box, the cursor and the numbers of characters under it.
Args:
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
enter_button (Button): button to enter/save the typed word in the text box
"""
text_box.blit(DISPLAY_SCREEN)
if flashing_index % 30 < 15:
text_box.blit_cursor(DISPLAY_SCREEN)
clock.tick(60)
text_box.blit_number_of_chars(DISPLAY_SCREEN)
mouse_position = pygame.mouse.get_pos() # get the position of the mouse
if enter_button.is_hovered_over(mouse_position):
enter_button.blit_hovered_over(DISPLAY_SCREEN)
else:
enter_button.blit(DISPLAY_SCREEN, gray)
def draw_paper_screen():
"""
draw the yellow screen/notebook/paper on the left.
"""
pygame.draw.rect(DISPLAY_SCREEN , gray, (2, 2, grid_off_set_x-8, height-4), 6)
pygame.draw.rect(DISPLAY_SCREEN , notes_color, (6, 6, grid_off_set_x-14, height-11))
def update_correctly_guessed_words(grid, player, dictionary):
"""
update the list of the correctly typed words for the player
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
"""
tmp_invisible_textbox = TextBox(14, grid_off_set_y+4, grid_off_set_x-100, 50, notes_color, gray , 16, black, 16)
for word in player.get_all_guessed_words():
tmp_invisible_textbox.text = word
if (is_pattern_valid(grid, tmp_invisible_textbox, [])) and (word in dictionary) and (word not in player.get_correctly_guessed_words()):
player.add_to_correctly_guessed_words(word)
update_score(player, word)
def is_time_over(remaining_time_in_milli_sec):
"""
check whether the time is over or not.
Args:
remaining_time_in_milli_sec (int): the remaining time in milliseconds.
Returns:
boolean: True if remaining time is over. Otherwise, False.
"""
return True if remaining_time_in_milli_sec <= 0 else False
def update_bonus_letters(grid):
"""
iterate over the grid and if any bonus letter will be found, it will be added to the list of bonus letters.
Args:
grid (Grid): the main grid
"""
global bonus_letters
bonus_letters.clear()
for row in grid.cubes:
for cube in row:
if cube.has_bonus:
bonus_letters.append(cube.text_str)
def is_input_valid(player, text_box):
"""
check whether the word/characters that the player is trying to enter/save are valid or not.
Args:
player (Player): The Player/user
text_box (TextBox): the text box where the player/user types the words/characters
Returns:
boolean: True the word/characters that the player is trying to enter/save are valid. Otherwise, False.
"""
if len(text_box.text) > 2 and text_box.text not in player.get_all_guessed_words():
return True
else:
if len(text_box.text) <= 2:
pygame.draw.rect(DISPLAY_SCREEN, notes_color, (grid_off_set_x-270, 120, 240, 30))
draw_caution(0, "go for 3 letters at least!")
pygame.display.update()
time.sleep(1)
if text_box.text in player.get_all_guessed_words():
pygame.draw.rect(DISPLAY_SCREEN, notes_color, (grid_off_set_x-270, 120, 240, 30))
draw_caution(0, "word was entered before!")
pygame.display.update()
time.sleep(1)
return False
def manage_events(grid, player, dictionary, text_box, flashing_index, enter_button, main_menu_button, show_solution_button, restart_button):
"""
manage all possible events of the game.
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
enter_button (Button): button to enter/save the typed word in the text box
main_menu_button (Button): button to go to the main menu
show_solution_button (Button): button to display all the possible words/solutions. Defaults to None.
restart_button (Button): button to get a new grid and restart the time and the whole game
"""
mouse_position = pygame.mouse.get_pos() # get the position of the mouse
# check for events
for event in pygame.event.get():
# exit game when user/player clicks on the X icon of the displaying windows.
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# manage/handel the events of the mouse and the buttons of the game
if event.type == pygame.MOUSEBUTTONDOWN:
if enter_button.is_clicked(mouse_position, event) and is_input_valid(player, text_box):
player.add_to_guessed_words(text_box.text)
text_box.clear_textbox()
elif main_menu_button.is_clicked(mouse_position, event):
introduction()
elif show_solution_button.is_clicked(mouse_position, event):
show_best_solution(grid, player, dictionary, enter_button, main_menu_button)
elif restart_button.is_clicked(mouse_position, event):
start_game()
# manage/handel the events of the keys of the keyboard
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
text_box.append_text("A")
elif event.key == pygame.K_b:
text_box.append_text("B")
elif event.key == pygame.K_c:
text_box.append_text("C")
elif event.key == pygame.K_d:
text_box.append_text("D")
elif event.key == pygame.K_e:
text_box.append_text("E")
elif event.key == pygame.K_f:
text_box.append_text("F")
elif event.key == pygame.K_g:
text_box.append_text("G")
elif event.key == pygame.K_h:
text_box.append_text("H")
elif event.key == pygame.K_i:
text_box.append_text("I")
elif event.key == pygame.K_j:
text_box.append_text("J")
elif event.key == pygame.K_k:
text_box.append_text("K")
elif event.key == pygame.K_l:
text_box.append_text("L")
elif event.key == pygame.K_m:
text_box.append_text("M")
elif event.key == pygame.K_n:
text_box.append_text("N")
elif event.key == pygame.K_o:
text_box.append_text("O")
elif event.key == pygame.K_p:
text_box.append_text("P")
elif event.key == pygame.K_q:
text_box.append_text("Q")
elif event.key == pygame.K_r:
text_box.append_text("R")
elif event.key == pygame.K_s:
text_box.append_text("S")
elif event.key == pygame.K_t:
text_box.append_text("T")
elif event.key == pygame.K_u:
text_box.append_text("U")
elif event.key == pygame.K_v:
text_box.append_text("V")
elif event.key == pygame.K_w:
text_box.append_text("W")
elif event.key == pygame.K_x:
text_box.append_text("X")
elif event.key == pygame.K_y:
text_box.append_text("Y")
elif event.key == pygame.K_z:
text_box.append_text("Z")
# if the player pressed on the backspace key
elif event.key == pygame.K_BACKSPACE:
text_box.backspace()
# if the player pressed on the enter key
elif event.key == pygame.K_RETURN and is_input_valid(player, text_box):
enter_button.blit_hovered_over(DISPLAY_SCREEN)
pygame.display.update()
clock.tick(60)
player.add_to_guessed_words(text_box.text)
text_box.clear_textbox() | en | 0.834667 | start the game # open the dictionary file and store all these words in a list and convert all words to upper case # make sure that the grid doesn't have too many possible words/solution, because the screen won't be big/wide enough to display all of them. # n = number of all possible solutions/words # check the bonus letters. # define the buttons and the text box. # to flash the cursor of the textbox and the caution messages intermittently # to start the timer exactly from 3 minutes get number of all the possible solutions/words
Args:
grid (Grid): the main grid
dictionary ([str]): dictionary as a list of strings that has all the words
Returns:
int: number of all the possible solutions/words main method of the game. To manage the whole game
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
enter_button (Button): button to enter/save the typed word in the text box
main_menu_button (Button): button to go to the main menu
show_solution_button (Button): button to display all the possible words/solutions. Defaults to None.
restart_button (Button): button to get a new grid and restart the time and the whole game # background # toolbar and buttons: # draw cubes # grid external borders # draw time calculate the remaining time and to draw it.
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
enter_button (Button): button to enter/save the typed word in the text box
main_menu_button (Button): button to go to the main menu # sand timer icon]] # check if time is over. manage the input and draw the cubes of the valid path with different colors
Args:
grid (Grid): the main grid
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash # if there is any letter that doesn't exist in the grid, tell the player. # just to draw the cubes in different color if they are being guessed # if the pattern/ path is wrong, tell the player draw caution messages to tell the player what he has done wrong.
Args:
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
caution_message (str): a caution message to tell the player what he has done wrong. draw the text box, the cursor and the numbers of characters under it.
Args:
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
enter_button (Button): button to enter/save the typed word in the text box # get the position of the mouse draw the yellow screen/notebook/paper on the left. update the list of the correctly typed words for the player
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words check whether the time is over or not.
Args:
remaining_time_in_milli_sec (int): the remaining time in milliseconds.
Returns:
boolean: True if remaining time is over. Otherwise, False. iterate over the grid and if any bonus letter will be found, it will be added to the list of bonus letters.
Args:
grid (Grid): the main grid check whether the word/characters that the player is trying to enter/save are valid or not.
Args:
player (Player): The Player/user
text_box (TextBox): the text box where the player/user types the words/characters
Returns:
boolean: True the word/characters that the player is trying to enter/save are valid. Otherwise, False. manage all possible events of the game.
Args:
grid (Grid): the main grid
player (Player): The Player/user
dictionary ([str]): dictionary as a list of strings that has all the words
text_box (TextBox): the text box where the player/user types the words/characters
flashing_index (index): to draw the cautions messages in an intermittent wat => to make the cautions messages flash
enter_button (Button): button to enter/save the typed word in the text box
main_menu_button (Button): button to go to the main menu
show_solution_button (Button): button to display all the possible words/solutions. Defaults to None.
restart_button (Button): button to get a new grid and restart the time and the whole game # get the position of the mouse # check for events # exit game when user/player clicks on the X icon of the displaying windows. # manage/handel the events of the mouse and the buttons of the game # manage/handel the events of the keys of the keyboard # if the player pressed on the backspace key # if the player pressed on the enter key | 3.017366 | 3 |
Project_Euler/Problem 17 script.py | JasPass/Projects | 0 | 6615824 | # Project Euler: Problem 17
#
#
# If all the numbers from 1 to 1000 (one thousand)
# inclusive were written out in words, how many letters would be used?
import time
# Sets starting time of program
startTime = time.time()
# List containing the necessary numbers in words
words = [
[
'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen',
'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'
], [
'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety'
]
]
# Variable to hold answer
ans = 0
# Loops through all numbers from 1 to 999 as words
for i in range(0, 10):
for j in range(0, 9):
for k in range(0, 20):
# Checks if we have [1-9] hundreds
if i:
# Checks if we have [20-90] tens
if j:
# Checks if we have [1-19] ones
if k:
# Only [1-9] ones are allowed here
if (k - 1) <= 8:
# Adds length of word to (ans)
ans += len(words[0][i - 1] + 'hundred' + 'and' + words[1][j - 1] + words[0][k - 1])
else:
# Adds length of word to (ans)
ans += len(words[0][i - 1] + 'hundred' + 'and' + words[1][j - 1])
else:
# Checks if we have [1-19] ones
if k:
# Adds length of word to (ans)
ans += len(words[0][i - 1] + 'hundred' + 'and' + words[0][k - 1])
else:
# Adds length of word to (ans)
ans += len(words[0][i - 1] + 'hundred')
else:
# Checks if we have [20-90] tens
if j:
# Checks if we have [1-19] ones
if k:
# Only [1-9] ones are allowed here
if (k - 1) <= 8:
# Adds length of word to (ans)
ans += len(words[1][j - 1] + words[0][k - 1])
else:
# Adds length of word to (ans)
ans += len(words[1][j - 1])
else:
# Checks if we have [1-19] ones
if k:
# Adds length of word to (ans)
ans += len(words[0][k - 1])
# Adds the last word to (ans)
ans += len('one' + 'thousand')
# Prints out the answer
print('The answer to Project Euler problem 17 is:', ans)
# Sets finishing time of program
stopTime = time.time()
# Prints the time it took the program to execute
print('The computation took', '%.2g' % (stopTime - startTime), 'seconds')
| # Project Euler: Problem 17
#
#
# If all the numbers from 1 to 1000 (one thousand)
# inclusive were written out in words, how many letters would be used?
import time
# Sets starting time of program
startTime = time.time()
# List containing the necessary numbers in words
words = [
[
'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen',
'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'
], [
'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety'
]
]
# Variable to hold answer
ans = 0
# Loops through all numbers from 1 to 999 as words
for i in range(0, 10):
for j in range(0, 9):
for k in range(0, 20):
# Checks if we have [1-9] hundreds
if i:
# Checks if we have [20-90] tens
if j:
# Checks if we have [1-19] ones
if k:
# Only [1-9] ones are allowed here
if (k - 1) <= 8:
# Adds length of word to (ans)
ans += len(words[0][i - 1] + 'hundred' + 'and' + words[1][j - 1] + words[0][k - 1])
else:
# Adds length of word to (ans)
ans += len(words[0][i - 1] + 'hundred' + 'and' + words[1][j - 1])
else:
# Checks if we have [1-19] ones
if k:
# Adds length of word to (ans)
ans += len(words[0][i - 1] + 'hundred' + 'and' + words[0][k - 1])
else:
# Adds length of word to (ans)
ans += len(words[0][i - 1] + 'hundred')
else:
# Checks if we have [20-90] tens
if j:
# Checks if we have [1-19] ones
if k:
# Only [1-9] ones are allowed here
if (k - 1) <= 8:
# Adds length of word to (ans)
ans += len(words[1][j - 1] + words[0][k - 1])
else:
# Adds length of word to (ans)
ans += len(words[1][j - 1])
else:
# Checks if we have [1-19] ones
if k:
# Adds length of word to (ans)
ans += len(words[0][k - 1])
# Adds the last word to (ans)
ans += len('one' + 'thousand')
# Prints out the answer
print('The answer to Project Euler problem 17 is:', ans)
# Sets finishing time of program
stopTime = time.time()
# Prints the time it took the program to execute
print('The computation took', '%.2g' % (stopTime - startTime), 'seconds')
| en | 0.89954 | # Project Euler: Problem 17 # # # If all the numbers from 1 to 1000 (one thousand) # inclusive were written out in words, how many letters would be used? # Sets starting time of program # List containing the necessary numbers in words # Variable to hold answer # Loops through all numbers from 1 to 999 as words # Checks if we have [1-9] hundreds # Checks if we have [20-90] tens # Checks if we have [1-19] ones # Only [1-9] ones are allowed here # Adds length of word to (ans) # Adds length of word to (ans) # Checks if we have [1-19] ones # Adds length of word to (ans) # Adds length of word to (ans) # Checks if we have [20-90] tens # Checks if we have [1-19] ones # Only [1-9] ones are allowed here # Adds length of word to (ans) # Adds length of word to (ans) # Checks if we have [1-19] ones # Adds length of word to (ans) # Adds the last word to (ans) # Prints out the answer # Sets finishing time of program # Prints the time it took the program to execute | 3.627244 | 4 |
scholarly_citation_finder/apps/parser/AuthorParser.py | citationfinder/scholarly_citation_finder | 1 | 6615825 | <filename>scholarly_citation_finder/apps/parser/AuthorParser.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from django.db.utils import DataError
from scholarly_citation_finder.tools.nameparser.AuthorNameParser import AuthorNameParser
from scholarly_citation_finder.apps.core.models import Author, AuthorNameBlock, AuthorNameVariation
from scholarly_citation_finder.apps.parser.Exceptions import ParserDataError
logger = logging.getLogger(__name__)
class AuthorParser:
'''
Parse an author.
'''
def __init__(self, database):
'''
Create object.
:param database: Database name
'''
self.database = database
def parse(self, name):
'''
Parse an author.
:param name: Author name as string
'''
name = AuthorNameParser(name, normalize=True)
if name.title and not name.first:
name.first = name.title
#name.title = ''
name_middle = name.middle if name.middle else None
name_suffix = name.suffix if name.suffix else None
name_nickname = name.nickname if name.nickname else None
if name.last and name.first:
try:
# Get block
block, _ = AuthorNameBlock.objects.using(self.database).get_or_create(name='%s,%s' % (name.last, name.first[0]))
# Get or create name variation
variation = AuthorNameVariation.objects.using(self.database).filter(block_id=block.id,
first=name.first,
middle=name_middle,
last=name.last,
suffix=name_suffix,
nickname=name_nickname)[:1]
if variation:
return variation[0].author_id
else:
variation_short = AuthorNameVariation.objects.using(self.database).filter(block_id=block.id,
first=name.first[0],
middle=name_middle[0] if name_middle else None,
last=name.last)[:1]
if variation_short:
author_id = variation_short[0].author_id
else:
#name.capitalize()
author = Author.objects.using(self.database).create(name=str(name).title())
author_id = author.id
if len(name.first) > 1: # Otherwise this version was already stored above
self.__store_shortname_variation(block.id, author_id, name.first, name_middle, name.last)
AuthorNameVariation.objects.using(self.database).create(block_id=block.id,
author_id=author_id,
first=name.first,
middle=name_middle,
last=name.last,
suffix=name_suffix,
nickname=name_nickname)
return author_id
except(DataError) as e:
raise ParserDataError('Author name is invalid: %s' % str(e))
else:
raise ParserDataError('Author name has no last or first name: %s' % name)
def __store_shortname_variation(self, block_id, author_id, first, middle, last):
'''
Store the short version of the name variation.
:param block_id: ID of the block
:param author_id: ID of the author
:param first: First name
:param middle: Middle name
:param last: Last name
'''
middle = middle[0] if middle else None
AuthorNameVariation.objects.using(self.database).get_or_create(block_id=block_id,
author_id=author_id,
first=first[0],
middle=middle,
last=last) | <filename>scholarly_citation_finder/apps/parser/AuthorParser.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from django.db.utils import DataError
from scholarly_citation_finder.tools.nameparser.AuthorNameParser import AuthorNameParser
from scholarly_citation_finder.apps.core.models import Author, AuthorNameBlock, AuthorNameVariation
from scholarly_citation_finder.apps.parser.Exceptions import ParserDataError
logger = logging.getLogger(__name__)
class AuthorParser:
'''
Parse an author.
'''
def __init__(self, database):
'''
Create object.
:param database: Database name
'''
self.database = database
def parse(self, name):
'''
Parse an author.
:param name: Author name as string
'''
name = AuthorNameParser(name, normalize=True)
if name.title and not name.first:
name.first = name.title
#name.title = ''
name_middle = name.middle if name.middle else None
name_suffix = name.suffix if name.suffix else None
name_nickname = name.nickname if name.nickname else None
if name.last and name.first:
try:
# Get block
block, _ = AuthorNameBlock.objects.using(self.database).get_or_create(name='%s,%s' % (name.last, name.first[0]))
# Get or create name variation
variation = AuthorNameVariation.objects.using(self.database).filter(block_id=block.id,
first=name.first,
middle=name_middle,
last=name.last,
suffix=name_suffix,
nickname=name_nickname)[:1]
if variation:
return variation[0].author_id
else:
variation_short = AuthorNameVariation.objects.using(self.database).filter(block_id=block.id,
first=name.first[0],
middle=name_middle[0] if name_middle else None,
last=name.last)[:1]
if variation_short:
author_id = variation_short[0].author_id
else:
#name.capitalize()
author = Author.objects.using(self.database).create(name=str(name).title())
author_id = author.id
if len(name.first) > 1: # Otherwise this version was already stored above
self.__store_shortname_variation(block.id, author_id, name.first, name_middle, name.last)
AuthorNameVariation.objects.using(self.database).create(block_id=block.id,
author_id=author_id,
first=name.first,
middle=name_middle,
last=name.last,
suffix=name_suffix,
nickname=name_nickname)
return author_id
except(DataError) as e:
raise ParserDataError('Author name is invalid: %s' % str(e))
else:
raise ParserDataError('Author name has no last or first name: %s' % name)
def __store_shortname_variation(self, block_id, author_id, first, middle, last):
'''
Store the short version of the name variation.
:param block_id: ID of the block
:param author_id: ID of the author
:param first: First name
:param middle: Middle name
:param last: Last name
'''
middle = middle[0] if middle else None
AuthorNameVariation.objects.using(self.database).get_or_create(block_id=block_id,
author_id=author_id,
first=first[0],
middle=middle,
last=last) | en | 0.706455 | #!/usr/bin/python # -*- coding: utf-8 -*- Parse an author. Create object. :param database: Database name Parse an author. :param name: Author name as string #name.title = '' # Get block # Get or create name variation #name.capitalize() # Otherwise this version was already stored above Store the short version of the name variation. :param block_id: ID of the block :param author_id: ID of the author :param first: First name :param middle: Middle name :param last: Last name | 2.414461 | 2 |
openGaussBase/testcase/SQL/DDL/alter_function/Opengauss_Function_DDL_Alter_Function_Case0009.py | opengauss-mirror/Yat | 0 | 6615826 | <filename>openGaussBase/testcase/SQL/DDL/alter_function/Opengauss_Function_DDL_Alter_Function_Case0009.py<gh_stars>0
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type :
Case Name : 修改函数的新所有者,新所有者已存在
Description :
1.创建函数
2.查看函数的proowner
3.创建新用户
4.修改函数的所有者为test_u1
5.修改函数的所有者为test_u1
6.查看函数的所有者
7.删除函数和用户
Expect :
1.创建函数成功
2.查看函数的proowner成功
3.创建新用户成功
4.修改函数的所有者为test_u1成功
5.修改函数的所有者为test_u1成功
6.函数的所有者为test_u1
7.删除函数和用户成功
History :添加marco文件
"""
import sys
import unittest
from yat.test import macro
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
class Privategrant(unittest.TestCase):
def setUp(self):
logger.info('------------------------Opengauss_Function_DDL_Alter_Function_Case0009开始执行-----------------------------')
def test_sysadmin_user_permission(self):
sql_cmd1 = commonsh.execut_db_sql('''drop FUNCTION if EXISTS u_testfun60(c_int int);''')
logger.info(sql_cmd1)
self.assertIn(constant.DROP_FUNCTION_SUCCESS_MSG, sql_cmd1)
sql_cmd2 = commonsh.execut_db_sql('''
CREATE FUNCTION u_testfun60 (INOUT c_int int) RETURNS int AS \$\$
BEGIN
RETURN (c_int);
END;
\$\$ LANGUAGE plpgsql;''')
logger.info(sql_cmd2)
self.assertIn(constant.CREATE_FUNCTION_SUCCESS_MSG, sql_cmd2)
sql_cmd3 = commonsh.execut_db_sql('''select proowner,proname from pg_proc where proname='u_testfun60';''')
logger.info(sql_cmd3)
self.assertIn('u_testfun60', sql_cmd3)
sql = f'''drop user if exists test_u1 cascade;
create user test_u1 with password '{<PASSWORD>}';'''
sql_cmd4 = commonsh.execut_db_sql(sql)
logger.info(sql_cmd4)
self.assertIn(constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd4)
sql_cmd5 = commonsh.execut_db_sql('''ALTER FUNCTION u_testfun60 ( INOUT c_int int) owner to test_u1; ''')
logger.info(sql_cmd5)
self.assertIn(constant.ALTER_FUNCTION_SUCCESS_MSG, sql_cmd5)
sql_cmd6 = commonsh.execut_db_sql('''select proowner,proname from pg_proc where proname='u_testfun60';''')
logger.info(sql_cmd6)
self.assertIn('u_testfun60', sql_cmd6)
# 清理环境
def tearDown(self):
logger.info('----------this is teardown-------')
sql_cmd7 = commonsh.execut_db_sql(''' drop FUNCTION u_testfun60;
drop user if exists test_u1 cascade;''')
logger.info(sql_cmd7)
logger.info('------------------------Opengauss_Function_DDL_Alter_Function_Case0009执行结束--------------------------')
| <filename>openGaussBase/testcase/SQL/DDL/alter_function/Opengauss_Function_DDL_Alter_Function_Case0009.py<gh_stars>0
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type :
Case Name : 修改函数的新所有者,新所有者已存在
Description :
1.创建函数
2.查看函数的proowner
3.创建新用户
4.修改函数的所有者为test_u1
5.修改函数的所有者为test_u1
6.查看函数的所有者
7.删除函数和用户
Expect :
1.创建函数成功
2.查看函数的proowner成功
3.创建新用户成功
4.修改函数的所有者为test_u1成功
5.修改函数的所有者为test_u1成功
6.函数的所有者为test_u1
7.删除函数和用户成功
History :添加marco文件
"""
import sys
import unittest
from yat.test import macro
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
class Privategrant(unittest.TestCase):
def setUp(self):
logger.info('------------------------Opengauss_Function_DDL_Alter_Function_Case0009开始执行-----------------------------')
def test_sysadmin_user_permission(self):
sql_cmd1 = commonsh.execut_db_sql('''drop FUNCTION if EXISTS u_testfun60(c_int int);''')
logger.info(sql_cmd1)
self.assertIn(constant.DROP_FUNCTION_SUCCESS_MSG, sql_cmd1)
sql_cmd2 = commonsh.execut_db_sql('''
CREATE FUNCTION u_testfun60 (INOUT c_int int) RETURNS int AS \$\$
BEGIN
RETURN (c_int);
END;
\$\$ LANGUAGE plpgsql;''')
logger.info(sql_cmd2)
self.assertIn(constant.CREATE_FUNCTION_SUCCESS_MSG, sql_cmd2)
sql_cmd3 = commonsh.execut_db_sql('''select proowner,proname from pg_proc where proname='u_testfun60';''')
logger.info(sql_cmd3)
self.assertIn('u_testfun60', sql_cmd3)
sql = f'''drop user if exists test_u1 cascade;
create user test_u1 with password '{<PASSWORD>}';'''
sql_cmd4 = commonsh.execut_db_sql(sql)
logger.info(sql_cmd4)
self.assertIn(constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd4)
sql_cmd5 = commonsh.execut_db_sql('''ALTER FUNCTION u_testfun60 ( INOUT c_int int) owner to test_u1; ''')
logger.info(sql_cmd5)
self.assertIn(constant.ALTER_FUNCTION_SUCCESS_MSG, sql_cmd5)
sql_cmd6 = commonsh.execut_db_sql('''select proowner,proname from pg_proc where proname='u_testfun60';''')
logger.info(sql_cmd6)
self.assertIn('u_testfun60', sql_cmd6)
# 清理环境
def tearDown(self):
logger.info('----------this is teardown-------')
sql_cmd7 = commonsh.execut_db_sql(''' drop FUNCTION u_testfun60;
drop user if exists test_u1 cascade;''')
logger.info(sql_cmd7)
logger.info('------------------------Opengauss_Function_DDL_Alter_Function_Case0009执行结束--------------------------')
| en | 0.280916 | Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. Case Type : Case Name : 修改函数的新所有者,新所有者已存在 Description : 1.创建函数 2.查看函数的proowner 3.创建新用户 4.修改函数的所有者为test_u1 5.修改函数的所有者为test_u1 6.查看函数的所有者 7.删除函数和用户 Expect : 1.创建函数成功 2.查看函数的proowner成功 3.创建新用户成功 4.修改函数的所有者为test_u1成功 5.修改函数的所有者为test_u1成功 6.函数的所有者为test_u1 7.删除函数和用户成功 History :添加marco文件 drop FUNCTION if EXISTS u_testfun60(c_int int); CREATE FUNCTION u_testfun60 (INOUT c_int int) RETURNS int AS \$\$ BEGIN RETURN (c_int); END; \$\$ LANGUAGE plpgsql; select proowner,proname from pg_proc where proname='u_testfun60'; drop user if exists test_u1 cascade; create user test_u1 with password '{<PASSWORD>}'; ALTER FUNCTION u_testfun60 ( INOUT c_int int) owner to test_u1; select proowner,proname from pg_proc where proname='u_testfun60'; # 清理环境 drop FUNCTION u_testfun60; drop user if exists test_u1 cascade; | 1.955641 | 2 |
meiduo_mall/meiduo_mall/apps/meiduo_admin/views/statistical.py | Yingguang-93/web | 0 | 6615827 | <reponame>Yingguang-93/web
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import IsAdminUser
from rest_framework import mixins
from django.utils import timezone
from goods.models import GoodsVisitCount
from meiduo_admin.serializers.statistical import GoodsVisitSerializer
from users.models import User
# GET /meiduo_admin/statistical/total_count/
class TotalCountView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取网站用户的总数:
1. 查询数据统计网站用户的总数
2. 返回响应
"""
# 1. 查询数据统计网站用户的总数
count = User.objects.count()
# 2. 返回响应
now_date = timezone.now() # 年-月-日 时:分:秒
response_data = {
'count': count,
'date': now_date.date()
}
return Response(response_data)
# GET /meiduo_admin/statistical/day_increment/
class DayIncrementView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
统计网站日增用户数量:
1. 查询数据库统计网站日增用户数量
2. 返回响应
"""
# 1. 查询数据库统计网站日增用户数量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(date_joined__gte=now_date).count()
# 2. 返回响应
response_data = {
'count': count,
'date': now_date.date()
}
return Response(response_data)
# GET /meiduo_admin/statistical/day_active/
class DayActiveView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
统计网站日活跃用户数量:
1. 查询数据库统计网站日活跃用户数量
2. 返回响应
"""
# 1. 查询数据库统计网站日活跃用户数量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(last_login__gte=now_date).count()
# 2. 返回响应
response_data = {
'count': count,
'date': now_date.date()
}
return Response(response_data)
# GET /meiduo_admin/statistical/day_orders/
class DayOrdersView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
统计网站日下单用户数量:
1. 查询数据库统计网站日下单用户数量
2. 返回响应
"""
# 1. 查询数据库统计网站日下单用户数量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(orders__create_time__gte=now_date).distinct().count()
# 2. 返回响应
response_data = {
'count': count,
'date': now_date.date()
}
return Response(response_data)
# GET /meiduo_admin/statistical/month_increment/
class MonthIncrementView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
统计网站最近30天每天新增用户数量:
1. 查询数据库统计网站最近30天每天新增用户数量
2. 返回响应
"""
# 1. 查询数据库统计网站最近30天每天新增用户数量
# 时间范围:当天时间-29天 <-> 当天时间
# 结束时间
end_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
# 起始时间
begin_date = end_date - timezone.timedelta(days=29)
# 数据统计
cur_date = begin_date
month_li = []
while cur_date <= end_date:
# 统计cur_date这一天新增用户数据
next_date = cur_date + timezone.timedelta(days=1)
count = User.objects.filter(date_joined__gte=cur_date, date_joined__lt=next_date).count()
# 添加数据
month_li.append({
'count': count,
'date': cur_date.date()
})
# 当前日期向后加1天
cur_date = next_date
# 2. 返回响应
return Response(month_li)
# GET /meiduo_admin/statistical/goods_day_views/
class GoodsDayViewsView(ListAPIView):
permission_classes = [IsAdminUser]
# 指定视图使用的序列化器类
serializer_class = GoodsVisitSerializer
# queryset = GoodsVisitCount.objects.filter(date=now_date)
def get_queryset(self):
now_date = timezone.now().date()
return GoodsVisitCount.objects.filter(date=now_date) # QuerySet
# 关闭分页
pagination_class = None
# def get(self, request):
# return self.list(request)
# def get(self, request):
# """
# 获取日分类商品的访问量:
# 1. 查询数据库获取当天日分类商品访问的数据
# 2. 将数据序列化并返回
# """
# # 1. 查询数据库获取当天日分类商品访问的数据
# g_visits = self.get_queryset()
#
# # 2. 将数据序列化并返回
# serializer = self.get_serializer(g_visits, many=True)
# return Response(serializer.data)
| from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import IsAdminUser
from rest_framework import mixins
from django.utils import timezone
from goods.models import GoodsVisitCount
from meiduo_admin.serializers.statistical import GoodsVisitSerializer
from users.models import User
# GET /meiduo_admin/statistical/total_count/
class TotalCountView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取网站用户的总数:
1. 查询数据统计网站用户的总数
2. 返回响应
"""
# 1. 查询数据统计网站用户的总数
count = User.objects.count()
# 2. 返回响应
now_date = timezone.now() # 年-月-日 时:分:秒
response_data = {
'count': count,
'date': now_date.date()
}
return Response(response_data)
# GET /meiduo_admin/statistical/day_increment/
class DayIncrementView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
统计网站日增用户数量:
1. 查询数据库统计网站日增用户数量
2. 返回响应
"""
# 1. 查询数据库统计网站日增用户数量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(date_joined__gte=now_date).count()
# 2. 返回响应
response_data = {
'count': count,
'date': now_date.date()
}
return Response(response_data)
# GET /meiduo_admin/statistical/day_active/
class DayActiveView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
统计网站日活跃用户数量:
1. 查询数据库统计网站日活跃用户数量
2. 返回响应
"""
# 1. 查询数据库统计网站日活跃用户数量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(last_login__gte=now_date).count()
# 2. 返回响应
response_data = {
'count': count,
'date': now_date.date()
}
return Response(response_data)
# GET /meiduo_admin/statistical/day_orders/
class DayOrdersView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
统计网站日下单用户数量:
1. 查询数据库统计网站日下单用户数量
2. 返回响应
"""
# 1. 查询数据库统计网站日下单用户数量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(orders__create_time__gte=now_date).distinct().count()
# 2. 返回响应
response_data = {
'count': count,
'date': now_date.date()
}
return Response(response_data)
# GET /meiduo_admin/statistical/month_increment/
class MonthIncrementView(APIView):
# 仅管理员才能进行访问
permission_classes = [IsAdminUser]
def get(self, request):
"""
统计网站最近30天每天新增用户数量:
1. 查询数据库统计网站最近30天每天新增用户数量
2. 返回响应
"""
# 1. 查询数据库统计网站最近30天每天新增用户数量
# 时间范围:当天时间-29天 <-> 当天时间
# 结束时间
end_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
# 起始时间
begin_date = end_date - timezone.timedelta(days=29)
# 数据统计
cur_date = begin_date
month_li = []
while cur_date <= end_date:
# 统计cur_date这一天新增用户数据
next_date = cur_date + timezone.timedelta(days=1)
count = User.objects.filter(date_joined__gte=cur_date, date_joined__lt=next_date).count()
# 添加数据
month_li.append({
'count': count,
'date': cur_date.date()
})
# 当前日期向后加1天
cur_date = next_date
# 2. 返回响应
return Response(month_li)
# GET /meiduo_admin/statistical/goods_day_views/
class GoodsDayViewsView(ListAPIView):
permission_classes = [IsAdminUser]
# 指定视图使用的序列化器类
serializer_class = GoodsVisitSerializer
# queryset = GoodsVisitCount.objects.filter(date=now_date)
def get_queryset(self):
now_date = timezone.now().date()
return GoodsVisitCount.objects.filter(date=now_date) # QuerySet
# 关闭分页
pagination_class = None
# def get(self, request):
# return self.list(request)
# def get(self, request):
# """
# 获取日分类商品的访问量:
# 1. 查询数据库获取当天日分类商品访问的数据
# 2. 将数据序列化并返回
# """
# # 1. 查询数据库获取当天日分类商品访问的数据
# g_visits = self.get_queryset()
#
# # 2. 将数据序列化并返回
# serializer = self.get_serializer(g_visits, many=True)
# return Response(serializer.data) | zh | 0.883933 | # GET /meiduo_admin/statistical/total_count/ # 仅管理员才能进行访问 获取网站用户的总数: 1. 查询数据统计网站用户的总数 2. 返回响应 # 1. 查询数据统计网站用户的总数 # 2. 返回响应 # 年-月-日 时:分:秒 # GET /meiduo_admin/statistical/day_increment/ # 仅管理员才能进行访问 统计网站日增用户数量: 1. 查询数据库统计网站日增用户数量 2. 返回响应 # 1. 查询数据库统计网站日增用户数量 # 2. 返回响应 # GET /meiduo_admin/statistical/day_active/ # 仅管理员才能进行访问 统计网站日活跃用户数量: 1. 查询数据库统计网站日活跃用户数量 2. 返回响应 # 1. 查询数据库统计网站日活跃用户数量 # 2. 返回响应 # GET /meiduo_admin/statistical/day_orders/ # 仅管理员才能进行访问 统计网站日下单用户数量: 1. 查询数据库统计网站日下单用户数量 2. 返回响应 # 1. 查询数据库统计网站日下单用户数量 # 2. 返回响应 # GET /meiduo_admin/statistical/month_increment/ # 仅管理员才能进行访问 统计网站最近30天每天新增用户数量: 1. 查询数据库统计网站最近30天每天新增用户数量 2. 返回响应 # 1. 查询数据库统计网站最近30天每天新增用户数量 # 时间范围:当天时间-29天 <-> 当天时间 # 结束时间 # 起始时间 # 数据统计 # 统计cur_date这一天新增用户数据 # 添加数据 # 当前日期向后加1天 # 2. 返回响应 # GET /meiduo_admin/statistical/goods_day_views/ # 指定视图使用的序列化器类 # queryset = GoodsVisitCount.objects.filter(date=now_date) # QuerySet # 关闭分页 # def get(self, request): # return self.list(request) # def get(self, request): # """ # 获取日分类商品的访问量: # 1. 查询数据库获取当天日分类商品访问的数据 # 2. 将数据序列化并返回 # """ # # 1. 查询数据库获取当天日分类商品访问的数据 # g_visits = self.get_queryset() # # # 2. 将数据序列化并返回 # serializer = self.get_serializer(g_visits, many=True) # return Response(serializer.data) | 1.977655 | 2 |
10/01/02/5.py | pylangstudy/201707 | 0 | 6615828 | import time
class MyClass:
def __del__(self): print('MyClass.__del__')
c1 = MyClass()
del c1
time.sleep(2)
c2 = MyClass()
| import time
class MyClass:
def __del__(self): print('MyClass.__del__')
c1 = MyClass()
del c1
time.sleep(2)
c2 = MyClass()
| none | 1 | 3.14854 | 3 | |
COVID Admin Level1 Mozambique/Mozambique_Admin1.py | vrautenbach/wazimap-adh-data | 0 | 6615829 | #!/usr/bin/env python
# coding: utf-8
# In[27]:
import pandas as pd
import glob
import numpy as np
from pandas_profiling import ProfileReport
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
# Loading Mozambique Admin Level 1 boundaries
data = pd.read_excel ('mozambique-covid-19-cases.xlsx', engine='openpyxl')
df = pd.DataFrame(data)
df = df.iloc[1:]
# Loading Africa Admin Level 1 boundaries
FILE_LOCATION = "africa_admin1.csv"
admin = pd.read_csv(FILE_LOCATION, delimiter=",")
admin = admin.rename(columns={'parent_cod': 'ISO_3'})
# In[28]:
# Convert text to GADM code
df['ISO_3'] = 'MOZ'
merged_df = df.merge(admin, on='ISO_3')
merged_df['fuzzy_ratio'] = merged_df.apply(lambda row: fuzz.ratio(row['Province'], row['name']), axis=1)
mask = (merged_df['fuzzy_ratio']>80)
moz = merged_df[mask]
moz = moz.drop("ISO_3", 1)
moz = moz.drop("Province", 1)
moz = moz.drop("name", 1)
moz = moz.drop("area", 1)
moz = moz.drop("fuzzy_ratio", 1)
moz = moz.rename(columns={'Positive Cases': 'Cases', 'Recovered': 'Recoveries', 'code' : 'Geography'})
# In[15]:
# Calculate totals per monthly and transform data into wazi format
covid_monthly = (moz.groupby([pd.Grouper(key='Date', freq='MS'), 'Geography'])['Cases', 'Deaths', 'Recoveries']
.sum()
.reset_index())
covid_monthly = covid_monthly.melt(id_vars=["Geography", "Date"],
var_name="Indicator",
value_name="Count")
covid_monthly['Date']= covid_monthly['Date'].dt.strftime('%b %Y')
# covid_monthly = covid_monthly.astype(object).replace(np.nan, 'Null')
cases_monthly = covid_monthly[covid_monthly["Indicator"].isin(['Cases'])]
deaths_monthly = covid_monthly[covid_monthly["Indicator"].isin(['Deaths'])]
recoveries_monthly = covid_monthly[covid_monthly["Indicator"].isin(['Recoveries'])]
cases_monthly = cases_monthly.drop("Indicator", 1)
deaths_monthly = deaths_monthly.drop("Indicator", 1)
recoveries_monthly = recoveries_monthly.drop("Indicator", 1)
cases_monthly = cases_monthly[cases_monthly['Count'].notna()]
deaths_monthly = deaths_monthly[deaths_monthly['Count'].notna()]
recoveries_monthly = recoveries_monthly[recoveries_monthly['Count'].notna()]
cases_monthly.to_csv(r'./output/moz_cases_monthly.csv', index = False, sep=',')
deaths_monthly.to_csv(r'./output/moz_deaths_monthly.csv', index = False, sep=',')
recoveries_monthly.to_csv(r'./output/moz_recoveries_monthly.csv', index = False, sep=',')
# In[30]:
moz['Date'] = pd.to_datetime(moz['Date'], errors='coerce')
moz['Cases'] = pd.to_numeric(moz['Cases'],errors='coerce')
moz['Deaths'] = pd.to_numeric(moz['Deaths'],errors='coerce')
moz['Recoveries'] = pd.to_numeric(moz['Recoveries'],errors='coerce')
# Calculate average per week from January 2021 and transform data into wazi format
start_date = '2021-01-01'
end_date = '2021-04-08'
mask = (moz['Date'] > start_date) & (moz['Date'] <= end_date)
covid_weekly = moz.loc[mask]
cases_weekly = covid_weekly[["Date", "Geography", "Cases"]]
cases_weekly = cases_weekly[cases_weekly['Cases'].notna()]
cases_weekly = (cases_weekly.groupby([pd.Grouper(key='Date', freq='W'), 'Geography'])['Cases']
.sum()
.reset_index())
cases_weekly = cases_weekly.melt(id_vars=["Geography", "Date"],
var_name="Indicator",
value_name="Count")
cases_weekly['Date']= cases_weekly['Date'].dt.strftime('2021-WN%U')
cases_weekly = cases_weekly.drop("Indicator", 1)
cases_weekly.to_csv(r'./output/moz_cases_weekly.csv', index = False, sep=',')
covid_weekly = (covid_weekly.groupby([pd.Grouper(key='Date', freq='W'), 'Geography'])['Cases', 'Deaths', 'Recoveries']
.mean().round(0)
.reset_index())
covid_weekly = covid_weekly.melt(id_vars=["Geography", "Date"],
var_name="Indicator",
value_name="Count")
covid_weekly['Date']= covid_weekly['Date'].dt.strftime('2021-WN%U')
# covid_weekly = covid_weekly.astype(object).replace(np.nan, 'Null')
cases_weekly = covid_weekly[covid_weekly["Indicator"].isin(['Cases'])]
deaths_weekly = covid_weekly[covid_weekly["Indicator"].isin(['Deaths'])]
recoveries_weekly = covid_weekly[covid_weekly["Indicator"].isin(['Recoveries'])]
cases_weekly = cases_weekly.drop("Indicator", 1)
deaths_weekly = deaths_weekly.drop("Indicator", 1)
recoveries_weekly = recoveries_weekly.drop("Indicator", 1)
cases_weekly = cases_weekly[cases_weekly['Count'].notna()]
deaths_weekly = deaths_weekly[deaths_weekly['Count'].notna()]
recoveries_weekly = recoveries_weekly[recoveries_weekly['Count'].notna()]
cases_weekly.to_csv(r'./output/moz_cases_weekly.csv', index = False, sep=',')
deaths_weekly.to_csv(r'./output/moz_deaths_weekly.csv', index = False, sep=',')
recoveries_weekly.to_csv(r'./output/moz_recoveries_weekly.csv', index = False, sep=',')
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# In[27]:
import pandas as pd
import glob
import numpy as np
from pandas_profiling import ProfileReport
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
# Loading Mozambique Admin Level 1 boundaries
data = pd.read_excel ('mozambique-covid-19-cases.xlsx', engine='openpyxl')
df = pd.DataFrame(data)
df = df.iloc[1:]
# Loading Africa Admin Level 1 boundaries
FILE_LOCATION = "africa_admin1.csv"
admin = pd.read_csv(FILE_LOCATION, delimiter=",")
admin = admin.rename(columns={'parent_cod': 'ISO_3'})
# In[28]:
# Convert text to GADM code
df['ISO_3'] = 'MOZ'
merged_df = df.merge(admin, on='ISO_3')
merged_df['fuzzy_ratio'] = merged_df.apply(lambda row: fuzz.ratio(row['Province'], row['name']), axis=1)
mask = (merged_df['fuzzy_ratio']>80)
moz = merged_df[mask]
moz = moz.drop("ISO_3", 1)
moz = moz.drop("Province", 1)
moz = moz.drop("name", 1)
moz = moz.drop("area", 1)
moz = moz.drop("fuzzy_ratio", 1)
moz = moz.rename(columns={'Positive Cases': 'Cases', 'Recovered': 'Recoveries', 'code' : 'Geography'})
# In[15]:
# Calculate totals per monthly and transform data into wazi format
covid_monthly = (moz.groupby([pd.Grouper(key='Date', freq='MS'), 'Geography'])['Cases', 'Deaths', 'Recoveries']
.sum()
.reset_index())
covid_monthly = covid_monthly.melt(id_vars=["Geography", "Date"],
var_name="Indicator",
value_name="Count")
covid_monthly['Date']= covid_monthly['Date'].dt.strftime('%b %Y')
# covid_monthly = covid_monthly.astype(object).replace(np.nan, 'Null')
cases_monthly = covid_monthly[covid_monthly["Indicator"].isin(['Cases'])]
deaths_monthly = covid_monthly[covid_monthly["Indicator"].isin(['Deaths'])]
recoveries_monthly = covid_monthly[covid_monthly["Indicator"].isin(['Recoveries'])]
cases_monthly = cases_monthly.drop("Indicator", 1)
deaths_monthly = deaths_monthly.drop("Indicator", 1)
recoveries_monthly = recoveries_monthly.drop("Indicator", 1)
cases_monthly = cases_monthly[cases_monthly['Count'].notna()]
deaths_monthly = deaths_monthly[deaths_monthly['Count'].notna()]
recoveries_monthly = recoveries_monthly[recoveries_monthly['Count'].notna()]
cases_monthly.to_csv(r'./output/moz_cases_monthly.csv', index = False, sep=',')
deaths_monthly.to_csv(r'./output/moz_deaths_monthly.csv', index = False, sep=',')
recoveries_monthly.to_csv(r'./output/moz_recoveries_monthly.csv', index = False, sep=',')
# In[30]:
moz['Date'] = pd.to_datetime(moz['Date'], errors='coerce')
moz['Cases'] = pd.to_numeric(moz['Cases'],errors='coerce')
moz['Deaths'] = pd.to_numeric(moz['Deaths'],errors='coerce')
moz['Recoveries'] = pd.to_numeric(moz['Recoveries'],errors='coerce')
# Calculate average per week from January 2021 and transform data into wazi format
start_date = '2021-01-01'
end_date = '2021-04-08'
mask = (moz['Date'] > start_date) & (moz['Date'] <= end_date)
covid_weekly = moz.loc[mask]
cases_weekly = covid_weekly[["Date", "Geography", "Cases"]]
cases_weekly = cases_weekly[cases_weekly['Cases'].notna()]
cases_weekly = (cases_weekly.groupby([pd.Grouper(key='Date', freq='W'), 'Geography'])['Cases']
.sum()
.reset_index())
cases_weekly = cases_weekly.melt(id_vars=["Geography", "Date"],
var_name="Indicator",
value_name="Count")
cases_weekly['Date']= cases_weekly['Date'].dt.strftime('2021-WN%U')
cases_weekly = cases_weekly.drop("Indicator", 1)
cases_weekly.to_csv(r'./output/moz_cases_weekly.csv', index = False, sep=',')
covid_weekly = (covid_weekly.groupby([pd.Grouper(key='Date', freq='W'), 'Geography'])['Cases', 'Deaths', 'Recoveries']
.mean().round(0)
.reset_index())
covid_weekly = covid_weekly.melt(id_vars=["Geography", "Date"],
var_name="Indicator",
value_name="Count")
covid_weekly['Date']= covid_weekly['Date'].dt.strftime('2021-WN%U')
# covid_weekly = covid_weekly.astype(object).replace(np.nan, 'Null')
cases_weekly = covid_weekly[covid_weekly["Indicator"].isin(['Cases'])]
deaths_weekly = covid_weekly[covid_weekly["Indicator"].isin(['Deaths'])]
recoveries_weekly = covid_weekly[covid_weekly["Indicator"].isin(['Recoveries'])]
cases_weekly = cases_weekly.drop("Indicator", 1)
deaths_weekly = deaths_weekly.drop("Indicator", 1)
recoveries_weekly = recoveries_weekly.drop("Indicator", 1)
cases_weekly = cases_weekly[cases_weekly['Count'].notna()]
deaths_weekly = deaths_weekly[deaths_weekly['Count'].notna()]
recoveries_weekly = recoveries_weekly[recoveries_weekly['Count'].notna()]
cases_weekly.to_csv(r'./output/moz_cases_weekly.csv', index = False, sep=',')
deaths_weekly.to_csv(r'./output/moz_deaths_weekly.csv', index = False, sep=',')
recoveries_weekly.to_csv(r'./output/moz_recoveries_weekly.csv', index = False, sep=',')
# In[ ]:
| en | 0.689655 | #!/usr/bin/env python # coding: utf-8 # In[27]: # Loading Mozambique Admin Level 1 boundaries # Loading Africa Admin Level 1 boundaries # In[28]: # Convert text to GADM code # In[15]: # Calculate totals per monthly and transform data into wazi format # covid_monthly = covid_monthly.astype(object).replace(np.nan, 'Null') # In[30]: # Calculate average per week from January 2021 and transform data into wazi format # covid_weekly = covid_weekly.astype(object).replace(np.nan, 'Null') # In[ ]: | 2.51247 | 3 |
check_md5/cli/check_md5.py | Smithsonian/MassDigi-scripts | 2 | 6615830 | #!/usr/bin/env python3
#
# Check MD5 hashes
# Version 0.1
#
# 19 Dec 2019
#
# Digitization Program Office,
# Office of the Chief Information Officer,
# Smithsonian Institution
# https://dpo.si.edu
#
# Import modules
import urllib.request
from time import localtime, strftime
import pandas as pd
import locale, logging, os, glob, glob, sys, shutil, hashlib
from pathlib import Path
from functools import partial
from timeit import default_timer as timer
from pathlib import Path
from tqdm import tqdm
from pyfiglet import Figlet
# Script variables
script_title = "Check MD5 Tool"
subtitle = "Digitization Program Office\nOffice of the Chief Information Officer\nSmithsonian Institution\nhttps://dpo.si.edu"
ver = "0.1"
vercheck = "https://raw.githubusercontent.com/Smithsonian/MassDigi-tools/master/check_md5/toolversion.txt"
repo = "https://github.com/Smithsonian/MassDigi-tools/"
lic = "Available under the Apache 2.0 License"
# Set locale to UTF-8
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
# Get current time
current_time = strftime("%Y%m%d_%H%M%S", localtime())
# Check args
if len(sys.argv) == 1:
sys.exit("Missing path")
if len(sys.argv) > 2:
sys.exit("Script takes a single argument")
# Check for updates to the script
try:
with urllib.request.urlopen(vercheck) as response:
current_ver = response.read()
cur_ver = current_ver.decode('ascii').replace('\n', '')
if cur_ver != ver:
msg_text = "{subtitle}\n\n{repo}\n{lic}\n\nver. {ver}\nThis version is outdated. Current version is {cur_ver}.\nPlease download the updated version at: {repo}"
else:
msg_text = "{subtitle}\n\n{repo}\n{lic}\n\nver. {ver}"
except:
msg_text = "{subtitle}\n\n{repo}\n{lic}\n\nver. {ver}"
cur_ver = ver
f = Figlet(font='slant')
print("\n")
print(f.renderText(script_title))
# print(script_title)
print(msg_text.format(subtitle=subtitle, ver=ver, repo=repo, lic=lic, cur_ver=cur_ver))
folder_to_check = sys.argv[1]
print("\nChecking path {}".format(folder_to_check))
# Logging
if os.path.isdir('logs') == False:
os.mkdir('logs')
logfile_name = 'logs/{}.log'.format(current_time)
# from http://stackoverflow.com/a/9321890
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=logfile_name,
filemode='a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logger1 = logging.getLogger("check_md5")
logger1.info("folder_to_check: {}".format(folder_to_check))
if os.path.isdir(folder_to_check) == False:
logger1.error("Path not found: {}".format(folder_to_check))
sys.exit(1)
md5_file = glob.glob("{}/*.md5".format(folder_to_check))
if len(md5_file) == 0:
exit_msg = "ERROR: md5 file not found"
print(exit_msg)
logger1.error(exit_msg)
sys.exit(1)
if len(md5_file) > 1:
exit_msg = "ERROR: Multiple md5 files found"
print(exit_msg)
logger1.error(exit_msg)
sys.exit(2)
else:
# read md5 file
md5_hashes = pd.read_csv(md5_file[0], sep=' ', header=None, names=['md5', 'file'])
def check_md5(files):
bad_files = []
for file in tqdm(files):
filename = Path(file).name
md5_hash = hashlib.md5()
with open(file, "rb") as f:
# Read and update hash in chunks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
md5_hash.update(byte_block)
file_md5 = md5_hash.hexdigest()
md5_from_file = md5_hashes[md5_hashes.file == filename]['md5'].to_string(index=False).strip()
if file_md5 == md5_from_file:
continue
else:
bad_files.append("{}|{}|{}".format(filename, file_md5, md5_from_file))
if len(bad_files) > 0:
return bad_files
else:
return 0
print("\nWorking...\n")
# get list of files
files = glob.glob("{}/*".format(folder_to_check))
files = [x for x in files if '.md5' not in x]
if len(files) != md5_hashes.shape[0]:
logger1.error("The number of files ({}) does not match the number of lines in the md5 file ({})".format(len(files),
md5_hashes.shape[
0]))
sys.exit(99)
res = check_md5(files)
if res == 0:
exit_msg = "SUCCESS: Files match md5"
print(exit_msg)
logger1.info(exit_msg)
sys.exit(0)
else:
exit_msg = "ERROR: {} files do not match md5:".format(len(res))
print(exit_msg)
for file in res:
print(file)
sys.exit(9)
| #!/usr/bin/env python3
#
# Check MD5 hashes
# Version 0.1
#
# 19 Dec 2019
#
# Digitization Program Office,
# Office of the Chief Information Officer,
# Smithsonian Institution
# https://dpo.si.edu
#
# Import modules
import urllib.request
from time import localtime, strftime
import pandas as pd
import locale, logging, os, glob, glob, sys, shutil, hashlib
from pathlib import Path
from functools import partial
from timeit import default_timer as timer
from pathlib import Path
from tqdm import tqdm
from pyfiglet import Figlet
# Script variables
script_title = "Check MD5 Tool"
subtitle = "Digitization Program Office\nOffice of the Chief Information Officer\nSmithsonian Institution\nhttps://dpo.si.edu"
ver = "0.1"
vercheck = "https://raw.githubusercontent.com/Smithsonian/MassDigi-tools/master/check_md5/toolversion.txt"
repo = "https://github.com/Smithsonian/MassDigi-tools/"
lic = "Available under the Apache 2.0 License"
# Set locale to UTF-8
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
# Get current time
current_time = strftime("%Y%m%d_%H%M%S", localtime())
# Check args
if len(sys.argv) == 1:
sys.exit("Missing path")
if len(sys.argv) > 2:
sys.exit("Script takes a single argument")
# Check for updates to the script
try:
with urllib.request.urlopen(vercheck) as response:
current_ver = response.read()
cur_ver = current_ver.decode('ascii').replace('\n', '')
if cur_ver != ver:
msg_text = "{subtitle}\n\n{repo}\n{lic}\n\nver. {ver}\nThis version is outdated. Current version is {cur_ver}.\nPlease download the updated version at: {repo}"
else:
msg_text = "{subtitle}\n\n{repo}\n{lic}\n\nver. {ver}"
except:
msg_text = "{subtitle}\n\n{repo}\n{lic}\n\nver. {ver}"
cur_ver = ver
f = Figlet(font='slant')
print("\n")
print(f.renderText(script_title))
# print(script_title)
print(msg_text.format(subtitle=subtitle, ver=ver, repo=repo, lic=lic, cur_ver=cur_ver))
folder_to_check = sys.argv[1]
print("\nChecking path {}".format(folder_to_check))
# Logging
if os.path.isdir('logs') == False:
os.mkdir('logs')
logfile_name = 'logs/{}.log'.format(current_time)
# from http://stackoverflow.com/a/9321890
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=logfile_name,
filemode='a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logger1 = logging.getLogger("check_md5")
logger1.info("folder_to_check: {}".format(folder_to_check))
if os.path.isdir(folder_to_check) == False:
logger1.error("Path not found: {}".format(folder_to_check))
sys.exit(1)
md5_file = glob.glob("{}/*.md5".format(folder_to_check))
if len(md5_file) == 0:
exit_msg = "ERROR: md5 file not found"
print(exit_msg)
logger1.error(exit_msg)
sys.exit(1)
if len(md5_file) > 1:
exit_msg = "ERROR: Multiple md5 files found"
print(exit_msg)
logger1.error(exit_msg)
sys.exit(2)
else:
# read md5 file
md5_hashes = pd.read_csv(md5_file[0], sep=' ', header=None, names=['md5', 'file'])
def check_md5(files):
bad_files = []
for file in tqdm(files):
filename = Path(file).name
md5_hash = hashlib.md5()
with open(file, "rb") as f:
# Read and update hash in chunks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
md5_hash.update(byte_block)
file_md5 = md5_hash.hexdigest()
md5_from_file = md5_hashes[md5_hashes.file == filename]['md5'].to_string(index=False).strip()
if file_md5 == md5_from_file:
continue
else:
bad_files.append("{}|{}|{}".format(filename, file_md5, md5_from_file))
if len(bad_files) > 0:
return bad_files
else:
return 0
print("\nWorking...\n")
# get list of files
files = glob.glob("{}/*".format(folder_to_check))
files = [x for x in files if '.md5' not in x]
if len(files) != md5_hashes.shape[0]:
logger1.error("The number of files ({}) does not match the number of lines in the md5 file ({})".format(len(files),
md5_hashes.shape[
0]))
sys.exit(99)
res = check_md5(files)
if res == 0:
exit_msg = "SUCCESS: Files match md5"
print(exit_msg)
logger1.info(exit_msg)
sys.exit(0)
else:
exit_msg = "ERROR: {} files do not match md5:".format(len(res))
print(exit_msg)
for file in res:
print(file)
sys.exit(9)
| en | 0.60301 | #!/usr/bin/env python3 # # Check MD5 hashes # Version 0.1 # # 19 Dec 2019 # # Digitization Program Office, # Office of the Chief Information Officer, # Smithsonian Institution # https://dpo.si.edu # # Import modules # Script variables # Set locale to UTF-8 # Get current time # Check args # Check for updates to the script # print(script_title) # Logging # from http://stackoverflow.com/a/9321890 # read md5 file # Read and update hash in chunks of 4K # get list of files | 2.421894 | 2 |
tpDcc/tools/renamer/dccs/max/server.py | tpRigToolkit/tpRigToolkit-tools-renamer | 3 | 6615831 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains tpDcc-tools-renamer server implementation for 3ds Max
"""
from __future__ import print_function, division, absolute_import
from tpDcc import dcc
from tpDcc.core import server
class RenamerServer(server.DccServer, object):
PORT = 16231
def simple_rename(self, data, reply):
new_name = data.get('new_name', '')
if not new_name:
reply['msg'] = 'Please type a new name and try the operation again!'
reply['success'] = False
return
nodes = data.get('nodes', list())
if not nodes:
nodes = dcc.selected_nodes()
for node in nodes:
dcc.rename_node(node, new_name)
reply['success'] = True
def add_prefix(self, data, reply):
prefix_text = data.get('prefix_text', '')
if not prefix_text:
reply['success'] = False
reply['msg'] = 'No prefix to add defined.'
return
selection_only = data.get('only_selection', True)
dcc.add_name_prefix(prefix=prefix_text, selection_only=selection_only)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains tpDcc-tools-renamer server implementation for 3ds Max
"""
from __future__ import print_function, division, absolute_import
from tpDcc import dcc
from tpDcc.core import server
class RenamerServer(server.DccServer, object):
PORT = 16231
def simple_rename(self, data, reply):
new_name = data.get('new_name', '')
if not new_name:
reply['msg'] = 'Please type a new name and try the operation again!'
reply['success'] = False
return
nodes = data.get('nodes', list())
if not nodes:
nodes = dcc.selected_nodes()
for node in nodes:
dcc.rename_node(node, new_name)
reply['success'] = True
def add_prefix(self, data, reply):
prefix_text = data.get('prefix_text', '')
if not prefix_text:
reply['success'] = False
reply['msg'] = 'No prefix to add defined.'
return
selection_only = data.get('only_selection', True)
dcc.add_name_prefix(prefix=prefix_text, selection_only=selection_only)
| en | 0.55156 | #!/usr/bin/env python # -*- coding: utf-8 -*- Module that contains tpDcc-tools-renamer server implementation for 3ds Max | 2.477572 | 2 |
luibeal/input.py | shirangi/decline_curves_2pointOooh | 1 | 6615832 | class Input:
"""
Describes the input to the model in terms of sequence lenght
and number of metasequences.
"""
def __init__(self, lseq, meta_sequence_names=[]):
"""
Args:
lseq: Data values per production curve, length of a single sequence.
meta_sequence_names: Names of addditional data streams per production curve, ie stages of sorts,
like perforation stage, additional wells stage etc...also referred to as meta sequences.
"""
self.lseq = lseq
self.meta_sequence_names = meta_sequence_names
| class Input:
"""
Describes the input to the model in terms of sequence lenght
and number of metasequences.
"""
def __init__(self, lseq, meta_sequence_names=[]):
"""
Args:
lseq: Data values per production curve, length of a single sequence.
meta_sequence_names: Names of addditional data streams per production curve, ie stages of sorts,
like perforation stage, additional wells stage etc...also referred to as meta sequences.
"""
self.lseq = lseq
self.meta_sequence_names = meta_sequence_names
| en | 0.862982 | Describes the input to the model in terms of sequence lenght and number of metasequences. Args: lseq: Data values per production curve, length of a single sequence. meta_sequence_names: Names of addditional data streams per production curve, ie stages of sorts, like perforation stage, additional wells stage etc...also referred to as meta sequences. | 2.293101 | 2 |
va_saas/webhooks.py | VapourApps/billing_backend | 2 | 6615833 | <filename>va_saas/webhooks.py
import json, requests, threading
from django.db import models
from rest_hooks.models import AbstractHook
from silver.models import Subscription
#This is the very specific case where we have a subscription hook which creates a vm
#We should definitely try and get this to be a general case but for the moment I guess not
#This gets called if the webhook procced on subscription.added.new_vm. Then, if the subscription.metadata contains a "default_data" field, it generates proper default_data payload.
def subscription_vm_handler(hook, target, payload):
default_data = {'server_name' : 'hook-test', u'username': u'root', u'network': u'eth0', u'image': u'va-master-img', u'storage': u'500', u'provider_name': u'lxc', u'size': u'va-small', 'subscription_id' : payload['pk'], 'role' : 'va-master'}
print ('Payload', payload)
# default_data = payload['fields']['meta']
# default_data = json.loads(default_data)
# novobox_name = default_data.get('novobox_name', '')
# default_data = default_data.get('default_data', {})
# default_data['server_name'] = novobox_name
print ('Headers : ', hook.headers)
headers = json.loads(hook.headers) or {'Content-type' : "application/json"}
print ('Calling ', hook.method.lower(), ' on ', target,' with headers ', headers, ' and data ', default_data)
subscription = Subscription.objects.get(pk = payload['pk'])
if subscription_should_create_vm(subscription):
print ('Starting creating task')
vm_creation_task = threading.Thread(target = subscription_handle_vm_creation, args = [hook.method.lower(), target, headers, default_data])
vm_creation_task.start()
# print ('Starting (eventually) checking task')
# vm_check_status = threading.Thread(target = subscription_vm_check_status, args = [target, headers])
return default_data
def subscription_should_create_vm(subscription):
print ('Sub state ', subscription.state, ' vm data ', subscription.meta)
if subscription.state == 'active' and subscription.meta.get('default_data') and not subscription.meta.get('default_data', {}).get('status'):
print ('Starting vm!')
return True
def subscription_handle_vm_creation(method, target, headers, default_data):
print ('In creation!, calling data')
data = getattr(requests, method)(target, verify = False, headers = headers, data = json.dumps(default_data))
print ('Finished!', data.text)
def subscription_vm_check_status(target, headers):
pass
specific_cases = {
'subscription.updated' : subscription_vm_handler,
}
class VAHook(AbstractHook):
headers = models.CharField(max_length = 200, default = '{}')
method = models.CharField(max_length = 6, default = 'get')
def __str__(self):
return self.target
def __unicode__(self):
return self.__str__()
def rest_hook_handler(target, payload, instance, hook):
print ("I have ", target, payload, instance, hook.__dict__, hook.target)
hook = VAHook.objects.filter(target = hook.target)
if not hook: return
hook = hook[0]
url_data = payload['data']
print ('Pure data is : ', url_data)
event = payload['hook']['event']
#We currently use extra_data to support creating VMs when creating subscrtiptions for a handful of specific cases.
#We should probably find a generic way to add custom extra data, but for the moment, this will have to do.
#We use a global dict with specific cases for this. Hopefully this never grows out of control.
if event in specific_cases:
specific_handler = specific_cases[event]
return specific_handler(hook, target, url_data)
headers = json.loads(hook.headers) or {'Content-type' : "application/json"}
print ('Calling ', hook.method.lower(), ' on ', target,' with headers ', headers, ' and data ', url_data)
data = getattr(requests, hook.method.lower())(target, verify = False, headers = headers, data = json.dumps(url_data))
print (data.text)
return data
#This was a test to see if the custom hook firing works but for some reason it doesn't. Oh well, I managed to hack together a solution.
def find_and_fire_hook(event_name, instance, **kwargs):
filters = {
'event': event_name,
}
hooks = VAHook.objects.filter(**filters)
for hook in hooks:
hook.deliver_hook(instance)
| <filename>va_saas/webhooks.py
import json, requests, threading
from django.db import models
from rest_hooks.models import AbstractHook
from silver.models import Subscription
#This is the very specific case where we have a subscription hook which creates a vm
#We should definitely try and get this to be a general case but for the moment I guess not
#This gets called if the webhook procced on subscription.added.new_vm. Then, if the subscription.metadata contains a "default_data" field, it generates proper default_data payload.
def subscription_vm_handler(hook, target, payload):
default_data = {'server_name' : 'hook-test', u'username': u'root', u'network': u'eth0', u'image': u'va-master-img', u'storage': u'500', u'provider_name': u'lxc', u'size': u'va-small', 'subscription_id' : payload['pk'], 'role' : 'va-master'}
print ('Payload', payload)
# default_data = payload['fields']['meta']
# default_data = json.loads(default_data)
# novobox_name = default_data.get('novobox_name', '')
# default_data = default_data.get('default_data', {})
# default_data['server_name'] = novobox_name
print ('Headers : ', hook.headers)
headers = json.loads(hook.headers) or {'Content-type' : "application/json"}
print ('Calling ', hook.method.lower(), ' on ', target,' with headers ', headers, ' and data ', default_data)
subscription = Subscription.objects.get(pk = payload['pk'])
if subscription_should_create_vm(subscription):
print ('Starting creating task')
vm_creation_task = threading.Thread(target = subscription_handle_vm_creation, args = [hook.method.lower(), target, headers, default_data])
vm_creation_task.start()
# print ('Starting (eventually) checking task')
# vm_check_status = threading.Thread(target = subscription_vm_check_status, args = [target, headers])
return default_data
def subscription_should_create_vm(subscription):
print ('Sub state ', subscription.state, ' vm data ', subscription.meta)
if subscription.state == 'active' and subscription.meta.get('default_data') and not subscription.meta.get('default_data', {}).get('status'):
print ('Starting vm!')
return True
def subscription_handle_vm_creation(method, target, headers, default_data):
print ('In creation!, calling data')
data = getattr(requests, method)(target, verify = False, headers = headers, data = json.dumps(default_data))
print ('Finished!', data.text)
def subscription_vm_check_status(target, headers):
pass
specific_cases = {
'subscription.updated' : subscription_vm_handler,
}
class VAHook(AbstractHook):
headers = models.CharField(max_length = 200, default = '{}')
method = models.CharField(max_length = 6, default = 'get')
def __str__(self):
return self.target
def __unicode__(self):
return self.__str__()
def rest_hook_handler(target, payload, instance, hook):
print ("I have ", target, payload, instance, hook.__dict__, hook.target)
hook = VAHook.objects.filter(target = hook.target)
if not hook: return
hook = hook[0]
url_data = payload['data']
print ('Pure data is : ', url_data)
event = payload['hook']['event']
#We currently use extra_data to support creating VMs when creating subscrtiptions for a handful of specific cases.
#We should probably find a generic way to add custom extra data, but for the moment, this will have to do.
#We use a global dict with specific cases for this. Hopefully this never grows out of control.
if event in specific_cases:
specific_handler = specific_cases[event]
return specific_handler(hook, target, url_data)
headers = json.loads(hook.headers) or {'Content-type' : "application/json"}
print ('Calling ', hook.method.lower(), ' on ', target,' with headers ', headers, ' and data ', url_data)
data = getattr(requests, hook.method.lower())(target, verify = False, headers = headers, data = json.dumps(url_data))
print (data.text)
return data
#This was a test to see if the custom hook firing works but for some reason it doesn't. Oh well, I managed to hack together a solution.
def find_and_fire_hook(event_name, instance, **kwargs):
filters = {
'event': event_name,
}
hooks = VAHook.objects.filter(**filters)
for hook in hooks:
hook.deliver_hook(instance)
| en | 0.7325 | #This is the very specific case where we have a subscription hook which creates a vm #We should definitely try and get this to be a general case but for the moment I guess not #This gets called if the webhook procced on subscription.added.new_vm. Then, if the subscription.metadata contains a "default_data" field, it generates proper default_data payload. # default_data = payload['fields']['meta'] # default_data = json.loads(default_data) # novobox_name = default_data.get('novobox_name', '') # default_data = default_data.get('default_data', {}) # default_data['server_name'] = novobox_name # print ('Starting (eventually) checking task') # vm_check_status = threading.Thread(target = subscription_vm_check_status, args = [target, headers]) #We currently use extra_data to support creating VMs when creating subscrtiptions for a handful of specific cases. #We should probably find a generic way to add custom extra data, but for the moment, this will have to do. #We use a global dict with specific cases for this. Hopefully this never grows out of control. #This was a test to see if the custom hook firing works but for some reason it doesn't. Oh well, I managed to hack together a solution. | 1.97392 | 2 |
report.py | toanalien/binance-toolkit | 0 | 6615834 | <reponame>toanalien/binance-toolkit
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ref: https://www.nuomiphp.com/eplan/en/107216.html
from __future__ import print_function
import logging
import os
import os.path
import pickle
import pprint
import sys
import time
from datetime import datetime, timedelta
from functools import reduce
from operator import itemgetter
import ccxt
from dotenv import load_dotenv
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import Flow
from googleapiclient.discovery import build
pp = pprint.PrettyPrinter(indent=4)
load_dotenv()
local_tz = os.environ.get("local_tz", "UTC")
os.environ["TZ"] = local_tz
time.tzset()
work_dir = os.path.dirname(os.path.abspath(__file__))
# logging.basicConfig(level=logging.DEBUG)
api_key = os.environ.get("apiKey")
secret_key = os.environ.get("secretKey")
if not (api_key and secret_key):
logging.error("api_key or secret_key is empty")
exit(1)
exchange = ccxt.binance({
"apiKey": api_key,
"secret": secret_key,
"enableRateLimit": True
})
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
SAMPLE_SPREADSHEET_ID = os.environ.get('SAMPLE_SPREADSHEET_ID')
def main():
creds = None
if os.path.exists(os.path.join(work_dir, 'token.pickle')):
with open(os.path.join(work_dir, 'token.pickle'), 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = Flow.from_client_secrets_file(
os.path.join(work_dir, 'credentials.json'),
SCOPES,
redirect_uri='http://localhost')
auth_url, _ = flow.authorization_url(prompt='consent')
print('Please go to this URL: {}'.format(auth_url))
code = input('Enter the authorization code: ')
flow.fetch_token(code=code)
creds = flow.credentials
with open(os.path.join(work_dir, 'token.pickle'), 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
dict_ticker_price = exchange.fetchTickers()
dict_ticker_price = {
k.replace("/", ""): v
for k, v in dict_ticker_price.items()
}
margin_cro = exchange.sapi_get_margin_account()
cro_symbol_has_asset = list(
filter(
lambda x: x["free"] != "0" or x["borrowed"] != "0",
margin_cro["userAssets"],
))
margin_cro["totalNetAssetOfUSDT"] = float(
margin_cro["totalNetAssetOfBtc"]) * float(
dict_ticker_price["BTCUSDT"]["bid"])
margin_openorders = exchange.sapi_get_margin_openorders()
values = [
[
# Sep 15, 2020, 6:10:59:59 PM
datetime.now().timestamp(),
margin_cro["totalNetAssetOfBtc"],
margin_cro["totalNetAssetOfUSDT"],
len(margin_openorders)
],
]
body = {'values': values}
result = service.spreadsheets().values().append(
spreadsheetId=SAMPLE_SPREADSHEET_ID,
body=body,
valueInputOption='USER_ENTERED',
range='A1').execute()
print('{0} cells appended.'.format(result \
.get('updates') \
.get('updatedCells')))
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ref: https://www.nuomiphp.com/eplan/en/107216.html
from __future__ import print_function
import logging
import os
import os.path
import pickle
import pprint
import sys
import time
from datetime import datetime, timedelta
from functools import reduce
from operator import itemgetter
import ccxt
from dotenv import load_dotenv
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import Flow
from googleapiclient.discovery import build
pp = pprint.PrettyPrinter(indent=4)
load_dotenv()
local_tz = os.environ.get("local_tz", "UTC")
os.environ["TZ"] = local_tz
time.tzset()
work_dir = os.path.dirname(os.path.abspath(__file__))
# logging.basicConfig(level=logging.DEBUG)
api_key = os.environ.get("apiKey")
secret_key = os.environ.get("secretKey")
if not (api_key and secret_key):
logging.error("api_key or secret_key is empty")
exit(1)
exchange = ccxt.binance({
"apiKey": api_key,
"secret": secret_key,
"enableRateLimit": True
})
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
SAMPLE_SPREADSHEET_ID = os.environ.get('SAMPLE_SPREADSHEET_ID')
def main():
creds = None
if os.path.exists(os.path.join(work_dir, 'token.pickle')):
with open(os.path.join(work_dir, 'token.pickle'), 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = Flow.from_client_secrets_file(
os.path.join(work_dir, 'credentials.json'),
SCOPES,
redirect_uri='http://localhost')
auth_url, _ = flow.authorization_url(prompt='consent')
print('Please go to this URL: {}'.format(auth_url))
code = input('Enter the authorization code: ')
flow.fetch_token(code=code)
creds = flow.credentials
with open(os.path.join(work_dir, 'token.pickle'), 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
dict_ticker_price = exchange.fetchTickers()
dict_ticker_price = {
k.replace("/", ""): v
for k, v in dict_ticker_price.items()
}
margin_cro = exchange.sapi_get_margin_account()
cro_symbol_has_asset = list(
filter(
lambda x: x["free"] != "0" or x["borrowed"] != "0",
margin_cro["userAssets"],
))
margin_cro["totalNetAssetOfUSDT"] = float(
margin_cro["totalNetAssetOfBtc"]) * float(
dict_ticker_price["BTCUSDT"]["bid"])
margin_openorders = exchange.sapi_get_margin_openorders()
values = [
[
# Sep 15, 2020, 6:10:59:59 PM
datetime.now().timestamp(),
margin_cro["totalNetAssetOfBtc"],
margin_cro["totalNetAssetOfUSDT"],
len(margin_openorders)
],
]
body = {'values': values}
result = service.spreadsheets().values().append(
spreadsheetId=SAMPLE_SPREADSHEET_ID,
body=body,
valueInputOption='USER_ENTERED',
range='A1').execute()
print('{0} cells appended.'.format(result \
.get('updates') \
.get('updatedCells')))
if __name__ == '__main__':
main() | en | 0.701794 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # ref: https://www.nuomiphp.com/eplan/en/107216.html # logging.basicConfig(level=logging.DEBUG) # If modifying these scopes, delete the file token.pickle. # Sep 15, 2020, 6:10:59:59 PM | 2.064386 | 2 |
web/urls.py | Sritterbush/arxcode | 0 | 6615835 | <gh_stars>0
"""
Url definition file to redistribute incoming URL requests to django
views. Search the Django documentation for "URL dispatcher" for more
help.
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.views.generic import RedirectView
from django.views.static import serve
def include_app(url_path, namespace):
return include((url_path, namespace), namespace=namespace)
urlpatterns = [
# User Authentication
url(r'^accounts/login', auth_views.LoginView.as_view(template_name="login.html"), name='login'),
url(r'^accounts/logout', auth_views.LogoutView.as_view(), name="logout"),
# Front page
url(r'^', include('web.website.urls')),
# News stuff
url(r'^news/', include('web.news.urls')),
# Admin interface
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^webclient/', include_app('web.website.webclient_urls', "webclient")),
# favicon
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/images/favicon.ico', permanent=False)),
url(r'^character/', include_app('web.character.urls', 'character')),
url(r'^topics/', include_app('web.help_topics.urls', namespace='help_topics')),
url(r'^dom/', include_app('world.dominion.urls', namespace='dominion')),
url(r'^comms/', include_app('world.msgs.urls', namespace='msgs')),
url(r'^static/(?P<path>.*)$', serve, {'document_root': settings.STATIC_ROOT}),
url(r'^support/', include('web.helpdesk.urls')),
url(r'^admintools/', include_app('web.admintools.urls', namespace='admintools')),
url(r'^explore/', include_app('world.exploration.urls', namespace='exploration')),
]
# This sets up the server if the user want to run the Django
# test server (this should normally not be needed).
if settings.SERVE_MEDIA:
urlpatterns += [
(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
]
if settings.DEBUG:
try:
# noinspection PyPackageRequirements
import debug_toolbar
except ImportError:
debug_toolbar = None
if debug_toolbar:
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
handler500 = 'web.website.views.arx_500_view'
| """
Url definition file to redistribute incoming URL requests to django
views. Search the Django documentation for "URL dispatcher" for more
help.
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.views.generic import RedirectView
from django.views.static import serve
def include_app(url_path, namespace):
return include((url_path, namespace), namespace=namespace)
urlpatterns = [
# User Authentication
url(r'^accounts/login', auth_views.LoginView.as_view(template_name="login.html"), name='login'),
url(r'^accounts/logout', auth_views.LogoutView.as_view(), name="logout"),
# Front page
url(r'^', include('web.website.urls')),
# News stuff
url(r'^news/', include('web.news.urls')),
# Admin interface
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^webclient/', include_app('web.website.webclient_urls', "webclient")),
# favicon
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/images/favicon.ico', permanent=False)),
url(r'^character/', include_app('web.character.urls', 'character')),
url(r'^topics/', include_app('web.help_topics.urls', namespace='help_topics')),
url(r'^dom/', include_app('world.dominion.urls', namespace='dominion')),
url(r'^comms/', include_app('world.msgs.urls', namespace='msgs')),
url(r'^static/(?P<path>.*)$', serve, {'document_root': settings.STATIC_ROOT}),
url(r'^support/', include('web.helpdesk.urls')),
url(r'^admintools/', include_app('web.admintools.urls', namespace='admintools')),
url(r'^explore/', include_app('world.exploration.urls', namespace='exploration')),
]
# This sets up the server if the user want to run the Django
# test server (this should normally not be needed).
if settings.SERVE_MEDIA:
urlpatterns += [
(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
]
if settings.DEBUG:
try:
# noinspection PyPackageRequirements
import debug_toolbar
except ImportError:
debug_toolbar = None
if debug_toolbar:
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
handler500 = 'web.website.views.arx_500_view' | en | 0.677888 | Url definition file to redistribute incoming URL requests to django views. Search the Django documentation for "URL dispatcher" for more help. # User Authentication # Front page # News stuff # Admin interface # favicon # This sets up the server if the user want to run the Django # test server (this should normally not be needed). # noinspection PyPackageRequirements | 2.091831 | 2 |
485hub/crc_test.py | tsengapola/teensy_pid_motor_control | 0 | 6615836 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import crcmod
# this is a standard CCITT CRC even if it does not look like
# (crcmod applies xorOut to initCrc, so initCrc is in reality 0xffff, not 0)
_CRC_FUNC = crcmod.mkCrcFun(poly=0x18005, initCrc=0xffff, rev=0x4b37, xorOut=0x0000)
data = bytearray.fromhex("010320ab0002")
crc = _CRC_FUNC(data)
data.append(crc & 0xff)
data.append(((crc >> 8) & 0xff))
print (data.hex())
| # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import crcmod
# this is a standard CCITT CRC even if it does not look like
# (crcmod applies xorOut to initCrc, so initCrc is in reality 0xffff, not 0)
_CRC_FUNC = crcmod.mkCrcFun(poly=0x18005, initCrc=0xffff, rev=0x4b37, xorOut=0x0000)
data = bytearray.fromhex("010320ab0002")
crc = _CRC_FUNC(data)
data.append(crc & 0xff)
data.append(((crc >> 8) & 0xff))
print (data.hex()) | en | 0.782939 | # -*- coding: utf-8 -*- Spyder Editor This is a temporary script file. # this is a standard CCITT CRC even if it does not look like # (crcmod applies xorOut to initCrc, so initCrc is in reality 0xffff, not 0) | 2.35221 | 2 |
src/day1/solution.py | justinhsg/AoC2020 | 0 | 6615837 | <reponame>justinhsg/AoC2020<gh_stars>0
import sys
import os
day_number = sys.path[0].split('\\')[-1]
if len(sys.argv)==1:
path_to_source = os.path.join("\\".join(sys.path[0].split("\\")[:-2]), f"input\\{day_number}")
else:
path_to_source = os.path.join("\\".join(sys.path[0].split("\\")[:-2]), f"sample\\{day_number}")
with open(path_to_source, "r") as infile:
numbers = list(map(int, infile.read().split("\n")))
part1 = None
part2 = None
unique = set(numbers)
if(len(unique) == len(numbers)):
for n in unique:
if (2020-n) in unique:
part1 = n*(2020-n)
break
part2_found = False
for n in unique:
for m in unique:
if(n != m and 2020-n-m in unique and 2020-n-m != n and 2020-n-m != m):
part2 = n*m*(2020-n-m)
break
if(part2 is not None):
break
print(part1)
print(part2)
| import sys
import os
day_number = sys.path[0].split('\\')[-1]
if len(sys.argv)==1:
path_to_source = os.path.join("\\".join(sys.path[0].split("\\")[:-2]), f"input\\{day_number}")
else:
path_to_source = os.path.join("\\".join(sys.path[0].split("\\")[:-2]), f"sample\\{day_number}")
with open(path_to_source, "r") as infile:
numbers = list(map(int, infile.read().split("\n")))
part1 = None
part2 = None
unique = set(numbers)
if(len(unique) == len(numbers)):
for n in unique:
if (2020-n) in unique:
part1 = n*(2020-n)
break
part2_found = False
for n in unique:
for m in unique:
if(n != m and 2020-n-m in unique and 2020-n-m != n and 2020-n-m != m):
part2 = n*m*(2020-n-m)
break
if(part2 is not None):
break
print(part1)
print(part2) | none | 1 | 3.101339 | 3 | |
src/dataset.py | lRomul/argus-tgs-salt | 74 | 6615838 | import cv2
import os
from os.path import join
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from src.transforms import SimpleDepthTransform
from src.config import TEST_DIR
def get_samples(train_folds_path, folds):
images_lst = []
target_lst = []
depth_lst = []
train_folds_df = pd.read_csv(train_folds_path)
for i, row in train_folds_df.iterrows():
if row.fold not in folds:
continue
image = cv2.imread(row.image_path, cv2.IMREAD_GRAYSCALE)
if image is None:
raise FileNotFoundError(f"Image not found {row.image_path}")
mask = cv2.imread(row.mask_path, cv2.IMREAD_GRAYSCALE)
if mask is None:
raise FileNotFoundError(f"Mask not found {row.mask_path}")
images_lst.append(image)
target_lst.append(mask)
depth_lst.append(row.z)
return images_lst, target_lst, depth_lst
class SaltDataset(Dataset):
def __init__(self, train_folds_path, folds,
transform=None,
depth_transform=None):
super().__init__()
self.train_folds_path = train_folds_path
self.folds = folds
self.transform = transform
if depth_transform is None:
self.depth_transform = SimpleDepthTransform()
else:
self.depth_transform = depth_transform
self.images_lst, self.target_lst, self.depth_lst = \
get_samples(train_folds_path, folds)
def __len__(self):
return len(self.images_lst)
def __getitem__(self, idx):
image = self.images_lst[idx]
depth = self.depth_lst[idx]
target = self.target_lst[idx]
input = self.depth_transform(image, depth)
if self.transform is not None:
input, target = self.transform(input, target)
return input, target
def get_test_samples(test_images_dir):
images_lst = []
depth_lst = []
for image_name in os.listdir(test_images_dir):
image_path = join(test_images_dir, image_name)
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
if np.sum(image): # skip black images
images_lst.append(image)
depth_lst.append(0) # TODO: load depth
return images_lst, depth_lst
class SaltTestDataset(Dataset):
def __init__(self, test_dir,
transform=None,
depth_transform=None):
super().__init__()
self.test_dir = test_dir
self.transform = transform
if depth_transform is None:
self.depth_transform = SimpleDepthTransform()
else:
self.depth_transform = depth_transform
self.images_lst, self.depth_lst = \
get_test_samples(test_dir)
def __len__(self):
return len(self.images_lst)
def __getitem__(self, idx):
image = self.images_lst[idx]
depth = self.depth_lst[idx]
input = self.depth_transform(image, depth)
if self.transform is not None:
input = self.transform(input)
return input
| import cv2
import os
from os.path import join
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from src.transforms import SimpleDepthTransform
from src.config import TEST_DIR
def get_samples(train_folds_path, folds):
images_lst = []
target_lst = []
depth_lst = []
train_folds_df = pd.read_csv(train_folds_path)
for i, row in train_folds_df.iterrows():
if row.fold not in folds:
continue
image = cv2.imread(row.image_path, cv2.IMREAD_GRAYSCALE)
if image is None:
raise FileNotFoundError(f"Image not found {row.image_path}")
mask = cv2.imread(row.mask_path, cv2.IMREAD_GRAYSCALE)
if mask is None:
raise FileNotFoundError(f"Mask not found {row.mask_path}")
images_lst.append(image)
target_lst.append(mask)
depth_lst.append(row.z)
return images_lst, target_lst, depth_lst
class SaltDataset(Dataset):
def __init__(self, train_folds_path, folds,
transform=None,
depth_transform=None):
super().__init__()
self.train_folds_path = train_folds_path
self.folds = folds
self.transform = transform
if depth_transform is None:
self.depth_transform = SimpleDepthTransform()
else:
self.depth_transform = depth_transform
self.images_lst, self.target_lst, self.depth_lst = \
get_samples(train_folds_path, folds)
def __len__(self):
return len(self.images_lst)
def __getitem__(self, idx):
image = self.images_lst[idx]
depth = self.depth_lst[idx]
target = self.target_lst[idx]
input = self.depth_transform(image, depth)
if self.transform is not None:
input, target = self.transform(input, target)
return input, target
def get_test_samples(test_images_dir):
images_lst = []
depth_lst = []
for image_name in os.listdir(test_images_dir):
image_path = join(test_images_dir, image_name)
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
if np.sum(image): # skip black images
images_lst.append(image)
depth_lst.append(0) # TODO: load depth
return images_lst, depth_lst
class SaltTestDataset(Dataset):
def __init__(self, test_dir,
transform=None,
depth_transform=None):
super().__init__()
self.test_dir = test_dir
self.transform = transform
if depth_transform is None:
self.depth_transform = SimpleDepthTransform()
else:
self.depth_transform = depth_transform
self.images_lst, self.depth_lst = \
get_test_samples(test_dir)
def __len__(self):
return len(self.images_lst)
def __getitem__(self, idx):
image = self.images_lst[idx]
depth = self.depth_lst[idx]
input = self.depth_transform(image, depth)
if self.transform is not None:
input = self.transform(input)
return input
| en | 0.165506 | # skip black images # TODO: load depth | 2.541727 | 3 |
Smart User Targeted Advertising/MinorPro/FINALPROJECT/Resources/test.py | saransh808/Projects | 0 | 6615839 | import sqlite3
conncreate = sqlite3.connect('Survey.db')
conncreate.execute('''CREATE TABLE IF NOT EXISTS data (
att_age VARCHAR(3),
att_gender VARCHAR(1),
att_marr_status VARCHAR(3),
att_stud VARCHAR(10),
att_game VARCHAR(3),
att_tv VARCHAR(3),
att_shop VARCHAR(3),
att_trav VARCHAR(3),
att_invest VARCHAR(3),
att_gadget VARCHAR(3),
att_food VARCHAR(3),
att_movie VARCHAR(3),
att_decor VARCHAR(3)
);''');
conncreate.commit();
conncreate.close();
| import sqlite3
conncreate = sqlite3.connect('Survey.db')
conncreate.execute('''CREATE TABLE IF NOT EXISTS data (
att_age VARCHAR(3),
att_gender VARCHAR(1),
att_marr_status VARCHAR(3),
att_stud VARCHAR(10),
att_game VARCHAR(3),
att_tv VARCHAR(3),
att_shop VARCHAR(3),
att_trav VARCHAR(3),
att_invest VARCHAR(3),
att_gadget VARCHAR(3),
att_food VARCHAR(3),
att_movie VARCHAR(3),
att_decor VARCHAR(3)
);''');
conncreate.commit();
conncreate.close();
| en | 0.311868 | CREATE TABLE IF NOT EXISTS data ( att_age VARCHAR(3), att_gender VARCHAR(1), att_marr_status VARCHAR(3), att_stud VARCHAR(10), att_game VARCHAR(3), att_tv VARCHAR(3), att_shop VARCHAR(3), att_trav VARCHAR(3), att_invest VARCHAR(3), att_gadget VARCHAR(3), att_food VARCHAR(3), att_movie VARCHAR(3), att_decor VARCHAR(3) ); | 3.501723 | 4 |
Calculator/Proportion.py | jerilj/stats_calculator | 0 | 6615840 | import math
class Proportion:
def proportion(CSValues):
try:
ans = []
total = sum(CSValues)
for i in CSValues:
temp = i/total
ans.append('{:.4f}'.format(temp))
return ans
except:
return 'Pay attion, also I can not divide by zero :('
#if __name__=="__main__":
# print(Proportion.p roportion([5,9,10,12,6,3,4]))
| import math
class Proportion:
def proportion(CSValues):
try:
ans = []
total = sum(CSValues)
for i in CSValues:
temp = i/total
ans.append('{:.4f}'.format(temp))
return ans
except:
return 'Pay attion, also I can not divide by zero :('
#if __name__=="__main__":
# print(Proportion.p roportion([5,9,10,12,6,3,4]))
| en | 0.301225 | #if __name__=="__main__": # print(Proportion.p roportion([5,9,10,12,6,3,4])) | 3.38008 | 3 |
brim/test/unit/test_wsgi_echo.py | gholt/python-brim | 0 | 6615841 | """Tests for brim.wsgi_echo."""
"""Copyright and License.
Copyright 2012-2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from StringIO import StringIO
from unittest import main, TestCase
from brim import wsgi_echo
from brim.conf import Conf
class FakeStats(object):
def __init__(self):
self.stats = {}
def get(self, name):
return self.stats.get(name, 0)
def set(self, name, value):
self.stats[name] = value
def incr(self, name):
self.stats[name] = self.stats.get(name, 0) + 1
class TestWSGIEcho(TestCase):
def setUp(self):
self.next_app_calls = []
self.start_response_calls = []
def _next_app(env, start_response):
self.next_app_calls.append((env, start_response))
start_response('204 No Content', ('Content-Length', '0'))
return []
def _start_response(*args):
self.start_response_calls.append(args)
self.next_app = _next_app
self.start_response = _start_response
self.env = {'PATH_INFO': '/testpath', 'brim.stats': FakeStats(),
'wsgi.input': StringIO('testbody')}
self.parsed_conf = {'path': '/testpath', 'max_echo': 10}
def test_call_ignores_non_path(self):
self.env['PATH_INFO'] = '/'
wsgi_echo.WSGIEcho('test', self.parsed_conf,
self.next_app)(self.env, self.start_response)
self.assertEqual(
self.next_app_calls, [(self.env, self.start_response)])
self.assertEqual(self.start_response_calls, [
('204 No Content', ('Content-Length', '0'))])
def test_call_non_path_no_stat_incr(self):
self.env['PATH_INFO'] = '/'
wsgi_echo.WSGIEcho('test', self.parsed_conf, self.next_app)(
self.env, self.start_response)
self.assertEqual(self.env['brim.stats'].get('test.requests'), 0)
def test_call_stat_incr(self):
wsgi_echo.WSGIEcho('test', self.parsed_conf, self.next_app)(
self.env, self.start_response)
self.assertEqual(self.env['brim.stats'].get('test.requests'), 1)
def test_call_echo(self):
body = ''.join(wsgi_echo.WSGIEcho(
'test', self.parsed_conf,
self.next_app)(self.env, self.start_response))
self.assertEqual(self.start_response_calls, [
('200 OK', [('Content-Length', '8')])])
self.assertEqual(body, 'testbody')
def test_call_echo_capped(self):
self.env['wsgi.input'] = StringIO('1234567890123')
body = ''.join(wsgi_echo.WSGIEcho(
'test', self.parsed_conf,
self.next_app)(self.env, self.start_response))
self.assertEqual(self.start_response_calls, [
('200 OK', [('Content-Length', '10')])])
self.assertEqual(body, '1234567890')
def test_call_echo_exception_on_read(self):
del self.env['wsgi.input']
body = ''.join(wsgi_echo.WSGIEcho(
'test', self.parsed_conf,
self.next_app)(self.env, self.start_response))
self.assertEqual(self.start_response_calls, [
('200 OK', [('Content-Length', '0')])])
self.assertEqual(body, '')
def test_parse_conf(self):
c = wsgi_echo.WSGIEcho.parse_conf('test', Conf({}))
self.assertEqual(c, {'path': '/echo', 'max_echo': 65536})
c = wsgi_echo.WSGIEcho.parse_conf(
'test', Conf({'test': {'path': '/blah', 'max_echo': 1}}))
self.assertEqual(c, {'path': '/blah', 'max_echo': 1})
c = wsgi_echo.WSGIEcho.parse_conf(
'test', Conf({'test2': {'path': '/blah', 'max_echo': 1}}))
self.assertEqual(c, {'path': '/echo', 'max_echo': 65536})
def test_stats_conf(self):
self.assertEqual(wsgi_echo.WSGIEcho.stats_conf(
'test', self.parsed_conf), [('test.requests', 'sum')])
if __name__ == '__main__':
main()
| """Tests for brim.wsgi_echo."""
"""Copyright and License.
Copyright 2012-2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from StringIO import StringIO
from unittest import main, TestCase
from brim import wsgi_echo
from brim.conf import Conf
class FakeStats(object):
def __init__(self):
self.stats = {}
def get(self, name):
return self.stats.get(name, 0)
def set(self, name, value):
self.stats[name] = value
def incr(self, name):
self.stats[name] = self.stats.get(name, 0) + 1
class TestWSGIEcho(TestCase):
def setUp(self):
self.next_app_calls = []
self.start_response_calls = []
def _next_app(env, start_response):
self.next_app_calls.append((env, start_response))
start_response('204 No Content', ('Content-Length', '0'))
return []
def _start_response(*args):
self.start_response_calls.append(args)
self.next_app = _next_app
self.start_response = _start_response
self.env = {'PATH_INFO': '/testpath', 'brim.stats': FakeStats(),
'wsgi.input': StringIO('testbody')}
self.parsed_conf = {'path': '/testpath', 'max_echo': 10}
def test_call_ignores_non_path(self):
self.env['PATH_INFO'] = '/'
wsgi_echo.WSGIEcho('test', self.parsed_conf,
self.next_app)(self.env, self.start_response)
self.assertEqual(
self.next_app_calls, [(self.env, self.start_response)])
self.assertEqual(self.start_response_calls, [
('204 No Content', ('Content-Length', '0'))])
def test_call_non_path_no_stat_incr(self):
self.env['PATH_INFO'] = '/'
wsgi_echo.WSGIEcho('test', self.parsed_conf, self.next_app)(
self.env, self.start_response)
self.assertEqual(self.env['brim.stats'].get('test.requests'), 0)
def test_call_stat_incr(self):
wsgi_echo.WSGIEcho('test', self.parsed_conf, self.next_app)(
self.env, self.start_response)
self.assertEqual(self.env['brim.stats'].get('test.requests'), 1)
def test_call_echo(self):
body = ''.join(wsgi_echo.WSGIEcho(
'test', self.parsed_conf,
self.next_app)(self.env, self.start_response))
self.assertEqual(self.start_response_calls, [
('200 OK', [('Content-Length', '8')])])
self.assertEqual(body, 'testbody')
def test_call_echo_capped(self):
self.env['wsgi.input'] = StringIO('1234567890123')
body = ''.join(wsgi_echo.WSGIEcho(
'test', self.parsed_conf,
self.next_app)(self.env, self.start_response))
self.assertEqual(self.start_response_calls, [
('200 OK', [('Content-Length', '10')])])
self.assertEqual(body, '1234567890')
def test_call_echo_exception_on_read(self):
del self.env['wsgi.input']
body = ''.join(wsgi_echo.WSGIEcho(
'test', self.parsed_conf,
self.next_app)(self.env, self.start_response))
self.assertEqual(self.start_response_calls, [
('200 OK', [('Content-Length', '0')])])
self.assertEqual(body, '')
def test_parse_conf(self):
c = wsgi_echo.WSGIEcho.parse_conf('test', Conf({}))
self.assertEqual(c, {'path': '/echo', 'max_echo': 65536})
c = wsgi_echo.WSGIEcho.parse_conf(
'test', Conf({'test': {'path': '/blah', 'max_echo': 1}}))
self.assertEqual(c, {'path': '/blah', 'max_echo': 1})
c = wsgi_echo.WSGIEcho.parse_conf(
'test', Conf({'test2': {'path': '/blah', 'max_echo': 1}}))
self.assertEqual(c, {'path': '/echo', 'max_echo': 65536})
def test_stats_conf(self):
self.assertEqual(wsgi_echo.WSGIEcho.stats_conf(
'test', self.parsed_conf), [('test.requests', 'sum')])
if __name__ == '__main__':
main()
| en | 0.831206 | Tests for brim.wsgi_echo. Copyright and License. Copyright 2012-2014 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 2.205815 | 2 |
masked_lm/utils.py | dom-s/dark-terms | 0 | 6615842 | import os
import pandas as pd
from nltk import word_tokenize
from typing import List, Tuple, Union
def clean_str(s: str) -> str:
s = s.lower()
for pattern in ['\\n', '\\r', '\\t', '\n', '\r', '\t']:
s = s.replace(pattern, '')
s.strip()
return s
def word_window(sequence: str, target: str, size: int) -> Union[Tuple[List[str], List[str]], None]:
"""
Retrieves word windows of 'size' to the left and 'size' to the right.
If size == 0: Take the entire sequence as window
"""
assert size >= 0
tokens = word_tokenize(sequence.lower())
if target not in tokens:
return None
else:
target_idx = tokens.index(target)
left_idx = max(target_idx-size, 0) if size > 0 else 0
right_idx = target_idx+size+1 if size > 0 else len(tokens)
return tokens[left_idx:target_idx], tokens[target_idx+1:right_idx]
def get_dark_term_list(config: dict) -> List[str]:
input_dir = config['data']['resource_dir']
dark_term_csv = config['data']['dark_term_csv']
dark_terms = set(pd.read_csv(os.path.join(input_dir, dark_term_csv))['dark term'])
for kl_div_file in config['data']['kl_div_files']:
dark_terms.update(set(pd.read_csv(os.path.join(input_dir, kl_div_file))['dark_word']))
return list(dark_terms)
| import os
import pandas as pd
from nltk import word_tokenize
from typing import List, Tuple, Union
def clean_str(s: str) -> str:
s = s.lower()
for pattern in ['\\n', '\\r', '\\t', '\n', '\r', '\t']:
s = s.replace(pattern, '')
s.strip()
return s
def word_window(sequence: str, target: str, size: int) -> Union[Tuple[List[str], List[str]], None]:
"""
Retrieves word windows of 'size' to the left and 'size' to the right.
If size == 0: Take the entire sequence as window
"""
assert size >= 0
tokens = word_tokenize(sequence.lower())
if target not in tokens:
return None
else:
target_idx = tokens.index(target)
left_idx = max(target_idx-size, 0) if size > 0 else 0
right_idx = target_idx+size+1 if size > 0 else len(tokens)
return tokens[left_idx:target_idx], tokens[target_idx+1:right_idx]
def get_dark_term_list(config: dict) -> List[str]:
input_dir = config['data']['resource_dir']
dark_term_csv = config['data']['dark_term_csv']
dark_terms = set(pd.read_csv(os.path.join(input_dir, dark_term_csv))['dark term'])
for kl_div_file in config['data']['kl_div_files']:
dark_terms.update(set(pd.read_csv(os.path.join(input_dir, kl_div_file))['dark_word']))
return list(dark_terms)
| en | 0.8933 | Retrieves word windows of 'size' to the left and 'size' to the right. If size == 0: Take the entire sequence as window | 2.888066 | 3 |
experiments/utils/runner.py | ravirahman/sancus | 2 | 6615843 | <gh_stars>1-10
import json
import logging
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import uuid
from contextlib import contextmanager
from csv import writer
from dataclasses import dataclass
from datetime import datetime, timedelta
from decimal import Decimal
from queue import Queue
from types import TracebackType
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Generator,
List,
Optional,
Tuple,
Type,
TypeVar,
cast,
)
import bitcoin
import bitcoin.core
import bitcoin.rpc
import dotenv
import grpc
import jwt
import web3
import web3.types
from common.config import GRPCConfig
from common.utils.grpc_channel import make_grpc_channel
from common.utils.soft_webauthn_client import SoftWebauthnClient
from eth_account.account import Account as ETHAccount
from grpc_health.v1.health_pb2 import HealthCheckRequest, HealthCheckResponse
from grpc_health.v1.health_pb2_grpc import HealthStub
from hexbytes.main import HexBytes
from protobufs.institution.account_pb2 import (
AccountResponse,
ListAccountsRequest,
MakeAccountRequest,
)
from protobufs.institution.account_pb2_grpc import AccountStub
from protobufs.institution.auth_pb2 import (
MakeRegistrationChallengeRequest,
RegisterRequest,
)
from protobufs.institution.auth_pb2_grpc import AuthStub
from protobufs.institution.deposit_pb2 import MakeDepositKeyRequest
from protobufs.institution.deposit_pb2_grpc import DepositStub
from protobufs.institution.exchange_pb2 import (
InitiateExchangeRequest,
ProcessExchangeRequest,
)
from protobufs.institution.exchange_pb2_grpc import ExchangeStub
from protobufs.institution.marketdata_pb2 import (
GetLatestProcessedBlockNumberRequest,
GetMarketExchangeRateRequest,
)
from protobufs.institution.marketdata_pb2_grpc import MarketdataStub
from protobufs.institution.withdrawal_pb2 import (
InitiateWithdrawalRequest,
ProcessWithdrawalRequest,
)
from protobufs.institution.withdrawal_pb2_grpc import WithdrawalStub
from protobufs.validator.auditor_pb2 import GetLatestAuditVersionRequest
from protobufs.validator.auditor_pb2_grpc import AuditorStub
from sqlalchemy import create_engine
from sqlalchemy_utils import create_database
from web3.middleware.geth_poa import geth_poa_middleware
from utils.constants import BTC_HOST, MAX_BTC_WORKERS, WEBAUTHN_ORIGIN, get_w3_provider
from utils.contract_deployer import ContractDeployer
from utils.experiment_processor import ExperimentProcessor
from utils.list_rpc import list_rpc_yield
from utils.wait_for_it import wait_for_it
if TYPE_CHECKING:
from protobufs.account_pb2 import AccountType # pylint: disable=ungrouped-imports
MANAGE_INFRA = False
ENABLE_PY_SPY = False
EXPERIMENTS_DIRECTORY = os.path.join(os.path.dirname(__file__), "..")
LOGGER = logging.getLogger(__name__)
ETH_DOTENV_PATH = os.path.join(os.path.dirname(__file__), "..", "..", "infra", "output", "eth.env")
TResponse = TypeVar("TResponse")
if ENABLE_PY_SPY:
_PY_SPY = shutil.which("py-spy")
assert _PY_SPY is not None
PY_SPY = _PY_SPY
bitcoin.SelectParams("regtest")
@dataclass
class Account:
account_id: bytes
account_type: int
currency: str
deposit_addresses: List[str]
@dataclass
class User:
user_id: bytes
currency_and_account_type_to_accounts: Dict[Tuple[str, int], List[Account]]
account_id_to_account: Dict[bytes, Account]
grpc_channel: grpc.Channel
deposit_stub: DepositStub
account_stub: AccountStub
exchange_stub: ExchangeStub
marketdata_stub: MarketdataStub
withdrawal_stub: WithdrawalStub
username: str
def _get_erc20_abi() -> str:
with open(os.path.join(os.path.dirname(__file__), "erc20abi.json"), "r") as f:
return f.read()
ERC20_ABI = _get_erc20_abi()
class Runner:
def __init__(
self, experiment_name: str, *, account_anonymity_set_size: int, deposit_key_decoy_set_size: int
) -> None:
self.experiment_name = experiment_name
current_time = datetime.now().isoformat()
self.experiment_tag = f"{self.experiment_name}-{current_time.replace(':','-').replace('.','-')}"
self.output_dir = os.path.join(os.path.dirname(__file__), "..", "results", self.experiment_name, current_time)
os.makedirs(self.output_dir)
logging.getLogger("__main__").setLevel(logging.DEBUG)
logging.getLogger("experiments").setLevel(logging.DEBUG)
logging.getLogger("utils").setLevel(logging.DEBUG)
logging.basicConfig(
filename=os.path.join(self.output_dir, "experiment.log"),
filemode="x",
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
self.backend_output_dir = os.path.join(self.output_dir, "backend")
os.makedirs(self.backend_output_dir)
self.auditgen_output_dir = os.path.join(self.output_dir, "auditgen")
os.makedirs(self.auditgen_output_dir)
self.auditor_output_dir = os.path.join(self.output_dir, "auditor")
os.makedirs(self.auditor_output_dir)
self.infra_output_dir = os.path.join(self.output_dir, "infra")
os.makedirs(self.infra_output_dir)
self.profile_output_dir = os.path.join(self.output_dir, "profile")
os.makedirs(self.profile_output_dir)
self.audit_counter = 0
self.btc_proxy_queue: "Queue[None]" = Queue(MAX_BTC_WORKERS)
for _ in range(MAX_BTC_WORKERS):
self.btc_proxy_queue.put_nowait(None)
self.infra_stdout_file = open(os.path.join(self.infra_output_dir, "stdout.log"), "x")
self.infra_stderr_file = open(os.path.join(self.infra_output_dir, "stderr.log"), "x")
LOGGER.info("Waiting for the infra to spin up")
self.infra_proc = self.create_infra()
wait_for_it("localhost", 3306, timedelta(seconds=15))
wait_for_it("localhost", 18444, timedelta(seconds=15))
wait_for_it("localhost", 5001, timedelta(seconds=15))
wait_for_it("localhost", 8545, timedelta(seconds=15))
self.w3 = web3.Web3(provider=get_w3_provider(), middlewares=(geth_poa_middleware,))
# ensure that we are actually connected to the ethereum node
LOGGER.info("Attempting to get the eth block number to ensure we are connected to w3")
self.get_latest_eth_block_number()
self.auditor_db = f"mysql+pymysql://root:password@127.0.0.1:3306/auditor-{self.experiment_tag}"
self.backend_db = f"mysql+pymysql://root:password@127.0.0.1:3306/backend-{self.experiment_tag}"
def create_auditor_database() -> None:
create_database(self.auditor_db)
def create_backend_database() -> None:
create_database(self.backend_db)
# ensure that we have the databases. it takes 2-3 minutes for the docker mysql to start up
LOGGER.info("Attempting to create the auditor db")
self.try_repeat_timeout(create_auditor_database, timedelta(seconds=240))
LOGGER.info("Attempting to create the backend db")
self.try_repeat_timeout(create_backend_database, timedelta(seconds=30))
# TODO ensure that we have the bitcoin node and ipfs node
assert os.path.exists(ETH_DOTENV_PATH)
dotenv.load_dotenv(ETH_DOTENV_PATH)
self.eth_main_address = os.environ["ETH_MAIN_ADDRESS"]
LOGGER.info("Deploying contracts")
self.contract_deployer = ContractDeployer(self.eth_main_address)
gusd_contract_address, audit_publisher_contract_address = self.contract_deployer.deploy_contracts()
self.gusd_contract = self.w3.eth.contract(address=gusd_contract_address, abi=ERC20_ABI)
eth_latest_block_number = self.get_latest_eth_block_number()
LOGGER.info("eth start block number: %d", eth_latest_block_number)
btc_latest_block_number = self.get_latest_btc_block_number()
LOGGER.info("btc start block number: %d", btc_latest_block_number)
self.sock_folder = tempfile.TemporaryDirectory()
self.backend_sock_abspath = os.path.abspath(os.path.join(self.sock_folder.name, "backend.sock"))
self.backend_grpc_socket = "unix://" + self.backend_sock_abspath
LOGGER.info("running backend grpc at %s", self.backend_grpc_socket)
self.auditor_sock_abspath = os.path.abspath(os.path.join(self.sock_folder.name, "auditor.sock"))
self.auditor_grpc_socket = "unix://" + self.auditor_sock_abspath
LOGGER.info("running auditor grpc at %s", self.auditor_grpc_socket)
auditor_folder = os.path.join(self.auditor_output_dir, "audits")
os.makedirs(auditor_folder)
self.experiment_processor = ExperimentProcessor(
outfile=os.path.join(self.profile_output_dir, "aggregate_data.csv"),
btc_outfile=os.path.join(self.profile_output_dir, "btc_data.csv"),
eth_outfile=os.path.join(self.profile_output_dir, "eth_data.csv"),
experiment_name=self.experiment_name,
current_time=current_time,
w3=self.w3,
)
self.env_vars: Dict[str, str] = {
"BACKEND_DB": self.backend_db,
"BACKEND_LOG_FILE": os.path.join(self.backend_output_dir, "backend.log"),
"BACKEND_GRPC_SOCKFILE": self.backend_grpc_socket,
"AUDITOR_GRPC_SOCKFILE": self.auditor_grpc_socket,
"ETH_START_BLOCK_NUMBER": str(eth_latest_block_number),
"GUSD_CONTRACT_ADDRESS": gusd_contract_address,
"BTC_START_BLOCK_NUMBER": str(btc_latest_block_number),
"AUDIT_PUBLISHER_CONTRACT_ADDRESS": audit_publisher_contract_address,
"ETH_CONTRACTS_OWNER": self.eth_main_address,
"ETH_MAIN_ADDRESS": self.eth_main_address,
# "GRPC_TRACE": "api,call_error,p_failure",
"GRPC_VERBOSITY": "INFO",
# "GRPC_STACKTRACE_MINLOGLEVEL": "INFO",
"AUDITOR_LOG_FILE": os.path.join(self.auditor_output_dir, "auditor.log"),
"AUDITOR_DB": self.auditor_db,
"AUDITOR_FOLDER": auditor_folder,
"PROFILE_DATA_FOLDER": self.profile_output_dir,
"ACCOUNT_ANONYMITY_SET_SIZE": str(account_anonymity_set_size),
"DEPOSIT_KEY_DECOY_SET_SIZE": str(deposit_key_decoy_set_size),
"EXCHANGE_RATE_EPSILON": "1000000", # effectively disable exchange rate validation
}
self.stopped = False
self.users: List[User] = []
self.soft_webauthn = SoftWebauthnClient(WEBAUTHN_ORIGIN)
self.background_job = threading.Thread(target=self.loop)
# let's start the docker compose
backend_pstats = os.path.join(self.backend_output_dir, "backend-profile.svg")
command_prefix = ["taskset", "-ac", os.environ["BACKEND_CPUS"]] if "BACKEND_CPUS" in os.environ else []
if ENABLE_PY_SPY:
command_prefix.extend(
[
PY_SPY,
"record",
"-o",
backend_pstats,
"--rate",
"20",
"--nonblocking",
"--",
]
)
command = [
*command_prefix,
sys.executable,
"-m",
"utils.backend",
]
LOGGER.info(
"Starting backend with command: cd %s; %s %s",
EXPERIMENTS_DIRECTORY,
" ".join([f"{name}={value}" for (name, value) in self.env_vars.items()]),
" ".join(command),
)
self.backend_stdout_file = open(os.path.join(self.backend_output_dir, "stdout.log"), "x")
self.backend_stderr_file = open(os.path.join(self.backend_output_dir, "stderr.log"), "x")
self.backend_proc = subprocess.Popen(
command,
cwd=EXPERIMENTS_DIRECTORY,
stdout=self.backend_stdout_file,
stderr=self.backend_stderr_file,
env=self.env_vars,
universal_newlines=True,
)
self.auditor_stdout_file = open(os.path.join(self.auditor_output_dir, "stdout.log"), "x")
self.auditor_stderr_file = open(os.path.join(self.auditor_output_dir, "stderr.log"), "x")
command_prefix = ["taskset", "-ac", os.environ["AUDITOR_CPUS"]] if "AUDITOR_CPUS" in os.environ else []
auditor_pstats = os.path.join(self.auditor_output_dir, "auditor-profile.svg")
auditor_cwd = os.path.join(os.path.dirname(__file__), "..", "..", "auditor")
if ENABLE_PY_SPY:
command_prefix.extend(
[
PY_SPY,
"record",
"-o",
auditor_pstats,
"--rate",
"20",
"--nonblocking",
"--",
]
)
command = [
*command_prefix,
sys.executable,
"-m",
"utils.auditor",
]
LOGGER.info(
"Starting auditor with command: cd %s; %s %s",
auditor_cwd,
" ".join([f"{name}={value}" for (name, value) in self.env_vars.items()]),
" ".join(command),
)
self.auditor_proc = subprocess.Popen(
command,
cwd=EXPERIMENTS_DIRECTORY,
stdout=self.auditor_stdout_file,
stderr=self.auditor_stderr_file,
env=self.env_vars,
universal_newlines=True,
)
LOGGER.info("Checking for backend sockfile")
def check_for_backend_sockfile() -> None:
if not os.path.exists(self.backend_sock_abspath):
LOGGER.info("Waiting for backend sockfile")
raise Exception("Waiting for backend sockfile")
self.try_repeat_timeout(check_for_backend_sockfile, timedelta(minutes=5))
LOGGER.info("backend sockfile exists")
self.backend_grpc_config = GRPCConfig(
host=self.backend_grpc_socket,
max_workers=10,
)
self.unauthenticated_channel = make_grpc_channel(self.backend_grpc_config)
self.auth_stub = AuthStub(self.unauthenticated_channel)
self.backend_health_stub = HealthStub(self.unauthenticated_channel)
self.marketdata_stub = MarketdataStub(self.unauthenticated_channel)
def health_check_backend() -> None:
request = HealthCheckRequest(service="sancus.institution.Auth")
resp = self.backend_health_stub.Check(request)
if resp.status != HealthCheckResponse.SERVING:
LOGGER.info("Not yet serving backend")
raise Exception("Not yet serving")
self.try_repeat_timeout(health_check_backend, timedelta(seconds=10))
LOGGER.info("Serving backend")
def check_for_auditor_sockfile() -> None:
if not os.path.exists(self.auditor_sock_abspath):
LOGGER.info("Waiting for auditor sockfile")
raise Exception("Waiting for auditor sockfile")
self.try_repeat_timeout(check_for_auditor_sockfile, timedelta(minutes=5))
LOGGER.info("Found auditor sockfile")
self.auditor_grpc_config = GRPCConfig(
host=self.auditor_grpc_socket,
max_workers=10,
)
self.auditor_channel = make_grpc_channel(self.auditor_grpc_config)
self.auditor_stub = AuditorStub(self.auditor_channel)
self.auditor_health_stub = HealthStub(self.auditor_channel)
def health_check_auditor() -> None:
request = HealthCheckRequest(service="sancus.validator.Auditor")
resp = self.auditor_health_stub.Check(request)
if resp.status != HealthCheckResponse.SERVING:
LOGGER.info("Not yet serving auditor")
raise Exception("Not yet serving auditor")
self.try_repeat_timeout(health_check_auditor, timedelta(minutes=5))
LOGGER.info("Serving auditor")
self.background_job.start()
def deposit(self, address: str, currency: str, amount: Decimal) -> HexBytes:
LOGGER.info("Depositing %s %s into %s", amount, currency, address)
if currency == "GUSD":
tx_params = self.gusd_contract.functions.transfer(address, int(amount * 100)).buildTransaction(
{
"from": self.eth_main_address,
}
)
txn_hash = self.w3.eth.send_transaction(tx_params)
return HexBytes(txn_hash)
if currency == "ETH":
txn_hash = self.w3.eth.send_transaction(
{
"from": self.eth_main_address,
"to": address,
"value": int(amount * 10 ** 18),
}
)
return HexBytes(txn_hash)
if currency == "BTC":
with self.get_btc() as proxy:
txn_hash = proxy.sendtoaddress(address, int(amount * bitcoin.core.COIN))
return HexBytes(txn_hash)
raise ValueError("Invalid currency")
def wait_for_tx(self, currency: str, transaction_id: HexBytes) -> int:
# returns the block number containing the transaction
LOGGER.info("waiting for %s %s", currency, transaction_id.hex())
if currency == "BTC":
def check_for_transaction() -> int:
with self.get_btc() as proxy:
tx = proxy.getrawtransaction(transaction_id, verbose=True)
if tx["blockhash"] is None:
raise Exception(f"tx {transaction_id.hex()} not in chain")
block_header = proxy.getblockheader(tx["blockhash"], verbose=True)
block_number: int = block_header["height"]
return block_number
elif currency in ("GUSD", "ETH"):
def check_for_transaction() -> int:
tx_receipt = cast(web3.types.TxReceipt, self.w3.eth.getTransactionReceipt(transaction_id))
block_number: int = tx_receipt.blockNumber
return block_number
else:
raise ValueError(f"Unknown currency {currency}")
block_number = self.try_repeat_timeout(check_for_transaction, timedelta(minutes=5))
LOGGER.info("transaction %s %s has block number %d", currency, transaction_id.hex(), block_number)
return block_number
@staticmethod
def make_deposit_key(user: User, account_id: bytes) -> None:
LOGGER.info("Making deposit key for user(%s), account(%s)", user.username, account_id.hex())
deposit_key_request = MakeDepositKeyRequest(accountId=account_id)
deposit_key_response = user.deposit_stub.MakeDepositKey(deposit_key_request)
user.account_id_to_account[account_id].deposit_addresses.append(deposit_key_response.depositKey.address)
def deposit_into_account(self, account: Account, amount: Decimal) -> HexBytes:
address = account.deposit_addresses[0]
currency = account.currency
return self.deposit(address, currency, amount)
def create_admin_user(self) -> User:
return self._create_user("admin")
def create_user(self) -> User:
username = f"user_{uuid.uuid4()}"
return self._create_user(username)
def _create_user(self, username: str) -> User:
LOGGER.info("Creating user %s", username)
assert self.auth_stub is not None
registration_challenge_response = self.auth_stub.MakeRegistrationChallenge(
MakeRegistrationChallengeRequest(username=username),
)
attestation = self.soft_webauthn.create_credential(registration_challenge_response.credentialRequest)
register_response = self.auth_stub.Register(
RegisterRequest(
challengeNonce=registration_challenge_response.challengeRequest.nonce,
attestation=attestation,
)
)
user_jwt = register_response.jwt
user_channel = make_grpc_channel(self.backend_grpc_config, user_jwt)
account_stub = AccountStub(user_channel)
account_id_to_account: Dict[bytes, Account] = {}
currency_and_account_type_to_accounts: Dict[Tuple[str, int], List[Account]] = {}
for account_response in list_rpc_yield(ListAccountsRequest(), account_stub.ListAccounts):
assert isinstance(account_response, AccountResponse)
account_id = account_response.id
currency = account_response.currency
account_type = account_response.accountType
account = Account(account_id=account_id, account_type=account_type, currency=currency, deposit_addresses=[])
if (currency, account_type) not in currency_and_account_type_to_accounts:
currency_and_account_type_to_accounts[currency, account_type] = []
currency_and_account_type_to_accounts[currency, account_type].append(account)
account_id_to_account[account_id] = account
user = User(
user_id=bytes.fromhex(jwt.decode(user_jwt, options={"verify_signature": False})["sub"]),
currency_and_account_type_to_accounts=currency_and_account_type_to_accounts,
account_id_to_account=account_id_to_account,
grpc_channel=user_channel,
deposit_stub=DepositStub(user_channel),
account_stub=account_stub,
exchange_stub=ExchangeStub(user_channel),
marketdata_stub=MarketdataStub(user_channel),
withdrawal_stub=WithdrawalStub(user_channel),
username=username,
)
self.users.append(user)
return user
def get_latest_block_processed(self, currency: str) -> int:
if currency in ("GUSD", "ETH"):
return self.marketdata_stub.GetLatestProcessedBlockNumber(
GetLatestProcessedBlockNumberRequest(blockchain="ETH")
).blockNumber
if currency == "BTC":
return self.marketdata_stub.GetLatestProcessedBlockNumber(
GetLatestProcessedBlockNumberRequest(blockchain="BTC")
).blockNumber
raise ValueError(f"Invalid currency: {currency}")
def ensure_block_processed(
self,
currency: str,
timeout: timedelta,
minimum_block_number: Optional[int] = None,
) -> None:
# ensures that at least one block for both bitcoin and ethereum are processed
deadline = datetime.now() + timeout
def get_currency_block_processed() -> int:
return self.get_latest_block_processed(currency)
if minimum_block_number is None:
start_block_number = self.try_repeat_timeout(get_currency_block_processed, timeout)
minimum_block_number = start_block_number + 1
LOGGER.info("Waiting for backend to process block %s for currency %s", minimum_block_number, currency)
while datetime.now() < deadline:
new_block_number = get_currency_block_processed()
if new_block_number >= minimum_block_number:
LOGGER.info(
"Backend finished processing block %s >= %s for currency %s",
new_block_number,
minimum_block_number,
currency,
)
return
LOGGER.info(
"Backend finished processing block %s < %s for currency %s; sleeping 1 second",
new_block_number,
minimum_block_number,
currency,
)
time.sleep(1)
raise Exception("Failed to process blocks before timeout")
def exchange(self, user: User, from_account_id: bytes, to_account_id: bytes, amount: Decimal) -> None:
LOGGER.info(
"Exchanging %s from account %s to account %s for user %s",
amount,
from_account_id.hex(),
to_account_id.hex(),
user.username,
)
from_currency = user.account_id_to_account[from_account_id].currency
to_currency = user.account_id_to_account[to_account_id].currency
exchange_rate_request = GetMarketExchangeRateRequest(fromCurrency=from_currency, toCurrency=to_currency)
exchange_rate_response = user.marketdata_stub.GetMarketExchangeRate(exchange_rate_request)
initiate_exchange_request = InitiateExchangeRequest(
exchangeRateJWT=exchange_rate_response.exchangeRateJWT,
amount=str(amount),
fromAccountId=from_account_id,
toAccountId=to_account_id,
)
initiate_exchange_response = user.exchange_stub.InitiateExchange(initiate_exchange_request)
exchange_assertion = self.soft_webauthn.request_assertion(
initiate_exchange_response.challengeRequest, initiate_exchange_response.credentialRequest
)
process_exchange_request = ProcessExchangeRequest(
id=initiate_exchange_response.id, assertion=exchange_assertion
)
user.exchange_stub.ProcessExchange(process_exchange_request)
def withdraw(self, user: User, from_account_id: bytes, amount: Decimal) -> str:
LOGGER.info("Withdrawing %s from account %s for user %s", amount, from_account_id.hex(), user.username)
currency = user.account_id_to_account[from_account_id].currency
if currency in ("ETH", "GUSD"):
account = ETHAccount.create() # pylint: disable=no-value-for-parameter
destination_address = str(account.address)
elif currency == "BTC":
def get_address() -> str:
# need to use the proxy, rather than doing it locally, so the address is in the wallet
# and we can get the balance
with self.get_btc() as proxy:
return str(proxy.getnewaddress())
destination_address = self.try_repeat_timeout(get_address, timeout=timedelta(minutes=5))
else:
raise ValueError("invalid account currency")
initiate_request = InitiateWithdrawalRequest(
amount=str(amount),
fromAccountId=from_account_id,
destinationAddress=destination_address,
)
initiate_response = user.withdrawal_stub.InitiateWithdrawal(initiate_request)
withdrawal_assertion = self.soft_webauthn.request_assertion(
initiate_response.challengeRequest, initiate_response.credentialRequest
)
process_request = ProcessWithdrawalRequest(id=initiate_response.id, assertion=withdrawal_assertion)
user.withdrawal_stub.ProcessWithdrawal(process_request)
return destination_address
def get_chain_balance(self, currency: str, address: str) -> Decimal:
if currency == "ETH":
def get_bal() -> Decimal:
return Decimal(self.w3.eth.get_balance(address, "latest")) / Decimal(10 ** 18)
elif currency == "GUSD":
def get_bal() -> Decimal:
return Decimal(
self.gusd_contract.functions.balanceOf(address).call(block_identifier="latest")
) / Decimal(10 ** 2)
elif currency == "BTC":
def get_bal() -> Decimal:
with self.get_btc() as proxy:
return Decimal(proxy.getreceivedbyaddress(address)) / Decimal(10 ** 9)
else:
raise ValueError(f"Unknown currency: {currency}")
return self.try_repeat_timeout(get_bal, timeout=timedelta(minutes=5))
def wait_for_withdrawal(
self,
currency: str,
address: str,
amount: Decimal,
timeout: timedelta,
) -> None:
def check() -> None:
chain_amount = self.get_chain_balance(currency, address)
if chain_amount < amount:
raise Exception(f"Chain amount {chain_amount} < expected amount {amount}")
self.try_repeat_timeout(check, timeout)
def audit(self, timeout: timedelta = timedelta(minutes=30)) -> None:
self.audit_counter += 1 # audit versions are 1-indexed
auditgen_output_dir = os.path.join(self.auditgen_output_dir, f"audit_{self.audit_counter}")
auditgen_pstats = os.path.join(auditgen_output_dir, "auditgen-profile.svg")
command_prefix = ["taskset", "-ac", os.environ["AUDITGEN_CPUS"]] if "AUDITGEN_CPUS" in os.environ else []
check = True
if ENABLE_PY_SPY:
command_prefix.extend(
[
PY_SPY,
"record",
"-o",
auditgen_pstats,
"--",
]
)
check = False # there's a bug with check py-spy -- the return code isn't properly set
command = [
*command_prefix,
sys.executable,
"-m",
"utils.auditgen",
f"--output_directory={auditgen_output_dir}",
]
LOGGER.info(
"Auditing with command cd %s; %s %s",
EXPERIMENTS_DIRECTORY,
" ".join([f"{name}={value}" for (name, value) in self.env_vars.items()]),
" ".join(command),
)
# call auditgen via subprocess
# Generate, publish, and validate an audit
os.makedirs(auditgen_output_dir)
with open(os.path.join(auditgen_output_dir, "stdout.log"), "x") as stdout_file:
with open(os.path.join(auditgen_output_dir, "stderr.log"), "x") as stderr_file:
# there's an issue where py-spy changes the exit code, so currently ignoring it
subprocess.run(
command,
cwd=EXPERIMENTS_DIRECTORY,
stdout=stdout_file,
stderr=stderr_file,
env=self.env_vars,
universal_newlines=True,
check=check,
)
audit_version = self.audit_counter
# wait for the audit to finish
def check_audit_version() -> None:
resp = self.auditor_stub.GetLatestAuditVersion(GetLatestAuditVersionRequest())
if resp.version < audit_version:
raise Exception(f"audit version {resp.version} < desired audit version audit_version")
self.try_repeat_timeout(check_audit_version, timeout)
# record the size of the auditor DB
def record_db_size() -> None:
db_names = {"auditor": self.auditor_db, "backend": self.backend_db}
for name in db_names:
statement = "SELECT table_schema, table_name, data_length, index_length FROM information_schema.tables"
profile_data_folder = self.env_vars["PROFILE_DATA_FOLDER"]
output_dir = os.path.join(profile_data_folder, f"{name}_db")
engine = create_engine(db_names[name])
with engine.connect() as con:
res = con.execute(statement)
all_tables = res.fetchall()
key = f"{name}-{self.experiment_tag}"
filtered_res = [t for t in all_tables if t[0] == key]
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
with open(f"{output_dir}/{name}_db_size.csv", "a") as f:
writer_object = writer(f)
if self.audit_counter == 1:
writer_object.writerow(
["audit_version", "table_schema", "table_name", "data_length", "index_length"]
)
for t in filtered_res:
writer_object.writerow([self.audit_counter] + list(t))
self.try_repeat_timeout(record_db_size, timeout)
LOGGER.info("Auditing %d finished", audit_version)
@staticmethod
def try_repeat_timeout(func: Callable[[], TResponse], timeout: timedelta) -> TResponse:
deadline = datetime.now() + timeout
while True:
try:
return func()
except Exception as e:
if datetime.now() < deadline:
# LOGGER.info("Check failed; sleeping 1 second and trying again")
time.sleep(1)
continue
LOGGER.error("Try-repeat-timeout failed", exc_info=True)
raise Exception("Try-repeat-timeout failed") from e
@staticmethod
def make_account(user: User, currency: str, account_type: "AccountType.V") -> None:
request = MakeAccountRequest(accountType=account_type, currency=currency)
response = user.account_stub.MakeAccount(request)
account_id = response.accountId
account = Account(account_id=account_id, account_type=account_type, currency=currency, deposit_addresses=[])
user.account_id_to_account[account_id] = account
if (currency, account_type) not in user.currency_and_account_type_to_accounts:
user.currency_and_account_type_to_accounts[currency, account_type] = []
user.currency_and_account_type_to_accounts[currency, account_type].append(account)
def create_infra(self) -> "Optional[subprocess.Popen[str]]":
if MANAGE_INFRA:
infra_env_vars: Dict[str, str] = {}
def conditional_merge(key: str) -> None:
if key in os.environ:
infra_env_vars[key] = os.environ[key]
conditional_merge("MYSQL_CPUS")
conditional_merge("GETH_CPUS")
conditional_merge("IPFS_CPUS")
conditional_merge("BITCOIN_CORE_CPUS")
conditional_merge("BITCOIN_MINER_CPUS")
infra_file = os.environ.get("INFRA_COMPOSE_FILE", "docker-compose.yml")
# stop the existing infra
LOGGER.info("Stopping the existing infra")
subprocess.check_call(
[
"/usr/local/bin/docker-compose",
"-f",
infra_file,
"down",
"-v",
],
cwd=os.path.join(os.path.dirname(__file__), "..", "..", "infra"),
env=infra_env_vars,
universal_newlines=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
LOGGER.info("Stopped the existing infra")
command_prefix = ["taskset", "-ac", os.environ["INFRA_CPUS"]] if "INFRA_CPUS" in os.environ else []
command = [
*command_prefix,
"/usr/local/bin/docker-compose",
"-f",
infra_file,
"up",
"--build",
]
LOGGER.info("Starting infra with command %s", " ".join(command))
return subprocess.Popen(
command,
cwd=os.path.join(os.path.dirname(__file__), "..", "..", "infra"),
stdout=self.infra_stdout_file,
stderr=self.infra_stderr_file,
env=infra_env_vars,
universal_newlines=True,
)
return None
def get_latest_eth_block_number(self) -> int:
def get_eth_block() -> int:
block_number: int = self.w3.eth.block_number
return block_number
return self.try_repeat_timeout(get_eth_block, timedelta(seconds=30))
def get_latest_btc_block_number(self) -> int:
def btc_block_count() -> int:
with self.get_btc() as proxy:
block_count = proxy.getblockcount()
assert isinstance(block_count, int)
return block_count
return self.try_repeat_timeout(btc_block_count, timedelta(seconds=60))
@contextmanager
def get_btc(self) -> Generator[bitcoin.rpc.Proxy, None, None]: # type: ignore[misc]
self.btc_proxy_queue.get(timeout=30) # get a "Lock" for a proxy from the pool
try:
proxy = bitcoin.rpc.Proxy(BTC_HOST, timeout=60)
try:
yield proxy
finally:
proxy.close()
finally:
self.btc_proxy_queue.put_nowait(None)
def dump_env_vars(self) -> None:
with open(os.path.join(self.profile_output_dir, "env_vars.json"), "w") as env_vars_file:
json.dump(self.env_vars, env_vars_file)
def __enter__(self) -> "Runner":
return self
def close(self) -> None:
LOGGER.info("Attempting an orderly shutdown")
LOGGER.info("Dumping Envionment Variables")
self.dump_env_vars()
LOGGER.info("Processing Profile Data")
self.experiment_processor.execute_script()
LOGGER.info("Closing user channels")
for user in self.users:
user.grpc_channel.close()
self.users.clear()
LOGGER.info("Marking backend as stopped")
self.stopped = True
self.unauthenticated_channel.close()
self.auditor_channel.close()
LOGGER.info("Joining the background job")
self.background_job.join()
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.close()
if exc_type is None:
LOGGER.info("Experiment %s finished", self.experiment_name)
def loop(self) -> None:
error = False
while not self.stopped:
if self.backend_proc.poll() is not None:
LOGGER.error("Backend crashed")
error = True
break
if self.auditor_proc.poll() is not None:
LOGGER.error("Auditor crashed")
error = True
break
if self.infra_proc is not None and self.infra_proc.poll() is not None:
LOGGER.error("Infra crashed")
error = True
break
time.sleep(1)
LOGGER.info("Sending sigint to background process")
self.backend_proc.send_signal(signal.SIGINT)
LOGGER.info("Sending sigint to auditor process")
self.auditor_proc.send_signal(signal.SIGINT)
try:
LOGGER.info("Waiting 60 seconds for background process to respond to SIGINT")
self.backend_proc.wait(60)
except subprocess.TimeoutExpired:
self.backend_proc.kill()
try:
LOGGER.info("Waiting 60 seconds for auditor process to terminate")
self.auditor_proc.wait()
except subprocess.TimeoutExpired:
self.auditor_proc.kill()
if self.infra_proc is not None:
LOGGER.info("Killing the infra proc")
self.infra_proc.kill()
if not self.backend_stderr_file.closed:
self.backend_stderr_file.close()
if not self.backend_stdout_file.closed:
self.backend_stdout_file.close()
if not self.auditor_stdout_file.closed:
self.auditor_stdout_file.close()
if not self.auditor_stderr_file.closed:
self.auditor_stderr_file.close()
if not self.infra_stderr_file.closed:
self.infra_stderr_file.close()
if not self.infra_stdout_file.closed:
self.infra_stdout_file.close()
self.sock_folder.cleanup()
if error:
# need to terminate the current process if error, since this is in the background loop
os.killpg(os.getpgid(os.getpid()), signal.SIGTERM)
| import json
import logging
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import uuid
from contextlib import contextmanager
from csv import writer
from dataclasses import dataclass
from datetime import datetime, timedelta
from decimal import Decimal
from queue import Queue
from types import TracebackType
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Generator,
List,
Optional,
Tuple,
Type,
TypeVar,
cast,
)
import bitcoin
import bitcoin.core
import bitcoin.rpc
import dotenv
import grpc
import jwt
import web3
import web3.types
from common.config import GRPCConfig
from common.utils.grpc_channel import make_grpc_channel
from common.utils.soft_webauthn_client import SoftWebauthnClient
from eth_account.account import Account as ETHAccount
from grpc_health.v1.health_pb2 import HealthCheckRequest, HealthCheckResponse
from grpc_health.v1.health_pb2_grpc import HealthStub
from hexbytes.main import HexBytes
from protobufs.institution.account_pb2 import (
AccountResponse,
ListAccountsRequest,
MakeAccountRequest,
)
from protobufs.institution.account_pb2_grpc import AccountStub
from protobufs.institution.auth_pb2 import (
MakeRegistrationChallengeRequest,
RegisterRequest,
)
from protobufs.institution.auth_pb2_grpc import AuthStub
from protobufs.institution.deposit_pb2 import MakeDepositKeyRequest
from protobufs.institution.deposit_pb2_grpc import DepositStub
from protobufs.institution.exchange_pb2 import (
InitiateExchangeRequest,
ProcessExchangeRequest,
)
from protobufs.institution.exchange_pb2_grpc import ExchangeStub
from protobufs.institution.marketdata_pb2 import (
GetLatestProcessedBlockNumberRequest,
GetMarketExchangeRateRequest,
)
from protobufs.institution.marketdata_pb2_grpc import MarketdataStub
from protobufs.institution.withdrawal_pb2 import (
InitiateWithdrawalRequest,
ProcessWithdrawalRequest,
)
from protobufs.institution.withdrawal_pb2_grpc import WithdrawalStub
from protobufs.validator.auditor_pb2 import GetLatestAuditVersionRequest
from protobufs.validator.auditor_pb2_grpc import AuditorStub
from sqlalchemy import create_engine
from sqlalchemy_utils import create_database
from web3.middleware.geth_poa import geth_poa_middleware
from utils.constants import BTC_HOST, MAX_BTC_WORKERS, WEBAUTHN_ORIGIN, get_w3_provider
from utils.contract_deployer import ContractDeployer
from utils.experiment_processor import ExperimentProcessor
from utils.list_rpc import list_rpc_yield
from utils.wait_for_it import wait_for_it
if TYPE_CHECKING:
from protobufs.account_pb2 import AccountType # pylint: disable=ungrouped-imports
MANAGE_INFRA = False
ENABLE_PY_SPY = False
EXPERIMENTS_DIRECTORY = os.path.join(os.path.dirname(__file__), "..")
LOGGER = logging.getLogger(__name__)
ETH_DOTENV_PATH = os.path.join(os.path.dirname(__file__), "..", "..", "infra", "output", "eth.env")
TResponse = TypeVar("TResponse")
if ENABLE_PY_SPY:
_PY_SPY = shutil.which("py-spy")
assert _PY_SPY is not None
PY_SPY = _PY_SPY
bitcoin.SelectParams("regtest")
@dataclass
class Account:
account_id: bytes
account_type: int
currency: str
deposit_addresses: List[str]
@dataclass
class User:
user_id: bytes
currency_and_account_type_to_accounts: Dict[Tuple[str, int], List[Account]]
account_id_to_account: Dict[bytes, Account]
grpc_channel: grpc.Channel
deposit_stub: DepositStub
account_stub: AccountStub
exchange_stub: ExchangeStub
marketdata_stub: MarketdataStub
withdrawal_stub: WithdrawalStub
username: str
def _get_erc20_abi() -> str:
with open(os.path.join(os.path.dirname(__file__), "erc20abi.json"), "r") as f:
return f.read()
ERC20_ABI = _get_erc20_abi()
class Runner:
def __init__(
self, experiment_name: str, *, account_anonymity_set_size: int, deposit_key_decoy_set_size: int
) -> None:
self.experiment_name = experiment_name
current_time = datetime.now().isoformat()
self.experiment_tag = f"{self.experiment_name}-{current_time.replace(':','-').replace('.','-')}"
self.output_dir = os.path.join(os.path.dirname(__file__), "..", "results", self.experiment_name, current_time)
os.makedirs(self.output_dir)
logging.getLogger("__main__").setLevel(logging.DEBUG)
logging.getLogger("experiments").setLevel(logging.DEBUG)
logging.getLogger("utils").setLevel(logging.DEBUG)
logging.basicConfig(
filename=os.path.join(self.output_dir, "experiment.log"),
filemode="x",
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
self.backend_output_dir = os.path.join(self.output_dir, "backend")
os.makedirs(self.backend_output_dir)
self.auditgen_output_dir = os.path.join(self.output_dir, "auditgen")
os.makedirs(self.auditgen_output_dir)
self.auditor_output_dir = os.path.join(self.output_dir, "auditor")
os.makedirs(self.auditor_output_dir)
self.infra_output_dir = os.path.join(self.output_dir, "infra")
os.makedirs(self.infra_output_dir)
self.profile_output_dir = os.path.join(self.output_dir, "profile")
os.makedirs(self.profile_output_dir)
self.audit_counter = 0
self.btc_proxy_queue: "Queue[None]" = Queue(MAX_BTC_WORKERS)
for _ in range(MAX_BTC_WORKERS):
self.btc_proxy_queue.put_nowait(None)
self.infra_stdout_file = open(os.path.join(self.infra_output_dir, "stdout.log"), "x")
self.infra_stderr_file = open(os.path.join(self.infra_output_dir, "stderr.log"), "x")
LOGGER.info("Waiting for the infra to spin up")
self.infra_proc = self.create_infra()
wait_for_it("localhost", 3306, timedelta(seconds=15))
wait_for_it("localhost", 18444, timedelta(seconds=15))
wait_for_it("localhost", 5001, timedelta(seconds=15))
wait_for_it("localhost", 8545, timedelta(seconds=15))
self.w3 = web3.Web3(provider=get_w3_provider(), middlewares=(geth_poa_middleware,))
# ensure that we are actually connected to the ethereum node
LOGGER.info("Attempting to get the eth block number to ensure we are connected to w3")
self.get_latest_eth_block_number()
self.auditor_db = f"mysql+pymysql://root:password@127.0.0.1:3306/auditor-{self.experiment_tag}"
self.backend_db = f"mysql+pymysql://root:password@127.0.0.1:3306/backend-{self.experiment_tag}"
def create_auditor_database() -> None:
create_database(self.auditor_db)
def create_backend_database() -> None:
create_database(self.backend_db)
# ensure that we have the databases. it takes 2-3 minutes for the docker mysql to start up
LOGGER.info("Attempting to create the auditor db")
self.try_repeat_timeout(create_auditor_database, timedelta(seconds=240))
LOGGER.info("Attempting to create the backend db")
self.try_repeat_timeout(create_backend_database, timedelta(seconds=30))
# TODO ensure that we have the bitcoin node and ipfs node
assert os.path.exists(ETH_DOTENV_PATH)
dotenv.load_dotenv(ETH_DOTENV_PATH)
self.eth_main_address = os.environ["ETH_MAIN_ADDRESS"]
LOGGER.info("Deploying contracts")
self.contract_deployer = ContractDeployer(self.eth_main_address)
gusd_contract_address, audit_publisher_contract_address = self.contract_deployer.deploy_contracts()
self.gusd_contract = self.w3.eth.contract(address=gusd_contract_address, abi=ERC20_ABI)
eth_latest_block_number = self.get_latest_eth_block_number()
LOGGER.info("eth start block number: %d", eth_latest_block_number)
btc_latest_block_number = self.get_latest_btc_block_number()
LOGGER.info("btc start block number: %d", btc_latest_block_number)
self.sock_folder = tempfile.TemporaryDirectory()
self.backend_sock_abspath = os.path.abspath(os.path.join(self.sock_folder.name, "backend.sock"))
self.backend_grpc_socket = "unix://" + self.backend_sock_abspath
LOGGER.info("running backend grpc at %s", self.backend_grpc_socket)
self.auditor_sock_abspath = os.path.abspath(os.path.join(self.sock_folder.name, "auditor.sock"))
self.auditor_grpc_socket = "unix://" + self.auditor_sock_abspath
LOGGER.info("running auditor grpc at %s", self.auditor_grpc_socket)
auditor_folder = os.path.join(self.auditor_output_dir, "audits")
os.makedirs(auditor_folder)
self.experiment_processor = ExperimentProcessor(
outfile=os.path.join(self.profile_output_dir, "aggregate_data.csv"),
btc_outfile=os.path.join(self.profile_output_dir, "btc_data.csv"),
eth_outfile=os.path.join(self.profile_output_dir, "eth_data.csv"),
experiment_name=self.experiment_name,
current_time=current_time,
w3=self.w3,
)
self.env_vars: Dict[str, str] = {
"BACKEND_DB": self.backend_db,
"BACKEND_LOG_FILE": os.path.join(self.backend_output_dir, "backend.log"),
"BACKEND_GRPC_SOCKFILE": self.backend_grpc_socket,
"AUDITOR_GRPC_SOCKFILE": self.auditor_grpc_socket,
"ETH_START_BLOCK_NUMBER": str(eth_latest_block_number),
"GUSD_CONTRACT_ADDRESS": gusd_contract_address,
"BTC_START_BLOCK_NUMBER": str(btc_latest_block_number),
"AUDIT_PUBLISHER_CONTRACT_ADDRESS": audit_publisher_contract_address,
"ETH_CONTRACTS_OWNER": self.eth_main_address,
"ETH_MAIN_ADDRESS": self.eth_main_address,
# "GRPC_TRACE": "api,call_error,p_failure",
"GRPC_VERBOSITY": "INFO",
# "GRPC_STACKTRACE_MINLOGLEVEL": "INFO",
"AUDITOR_LOG_FILE": os.path.join(self.auditor_output_dir, "auditor.log"),
"AUDITOR_DB": self.auditor_db,
"AUDITOR_FOLDER": auditor_folder,
"PROFILE_DATA_FOLDER": self.profile_output_dir,
"ACCOUNT_ANONYMITY_SET_SIZE": str(account_anonymity_set_size),
"DEPOSIT_KEY_DECOY_SET_SIZE": str(deposit_key_decoy_set_size),
"EXCHANGE_RATE_EPSILON": "1000000", # effectively disable exchange rate validation
}
self.stopped = False
self.users: List[User] = []
self.soft_webauthn = SoftWebauthnClient(WEBAUTHN_ORIGIN)
self.background_job = threading.Thread(target=self.loop)
# let's start the docker compose
backend_pstats = os.path.join(self.backend_output_dir, "backend-profile.svg")
command_prefix = ["taskset", "-ac", os.environ["BACKEND_CPUS"]] if "BACKEND_CPUS" in os.environ else []
if ENABLE_PY_SPY:
command_prefix.extend(
[
PY_SPY,
"record",
"-o",
backend_pstats,
"--rate",
"20",
"--nonblocking",
"--",
]
)
command = [
*command_prefix,
sys.executable,
"-m",
"utils.backend",
]
LOGGER.info(
"Starting backend with command: cd %s; %s %s",
EXPERIMENTS_DIRECTORY,
" ".join([f"{name}={value}" for (name, value) in self.env_vars.items()]),
" ".join(command),
)
self.backend_stdout_file = open(os.path.join(self.backend_output_dir, "stdout.log"), "x")
self.backend_stderr_file = open(os.path.join(self.backend_output_dir, "stderr.log"), "x")
self.backend_proc = subprocess.Popen(
command,
cwd=EXPERIMENTS_DIRECTORY,
stdout=self.backend_stdout_file,
stderr=self.backend_stderr_file,
env=self.env_vars,
universal_newlines=True,
)
self.auditor_stdout_file = open(os.path.join(self.auditor_output_dir, "stdout.log"), "x")
self.auditor_stderr_file = open(os.path.join(self.auditor_output_dir, "stderr.log"), "x")
command_prefix = ["taskset", "-ac", os.environ["AUDITOR_CPUS"]] if "AUDITOR_CPUS" in os.environ else []
auditor_pstats = os.path.join(self.auditor_output_dir, "auditor-profile.svg")
auditor_cwd = os.path.join(os.path.dirname(__file__), "..", "..", "auditor")
if ENABLE_PY_SPY:
command_prefix.extend(
[
PY_SPY,
"record",
"-o",
auditor_pstats,
"--rate",
"20",
"--nonblocking",
"--",
]
)
command = [
*command_prefix,
sys.executable,
"-m",
"utils.auditor",
]
LOGGER.info(
"Starting auditor with command: cd %s; %s %s",
auditor_cwd,
" ".join([f"{name}={value}" for (name, value) in self.env_vars.items()]),
" ".join(command),
)
self.auditor_proc = subprocess.Popen(
command,
cwd=EXPERIMENTS_DIRECTORY,
stdout=self.auditor_stdout_file,
stderr=self.auditor_stderr_file,
env=self.env_vars,
universal_newlines=True,
)
LOGGER.info("Checking for backend sockfile")
def check_for_backend_sockfile() -> None:
if not os.path.exists(self.backend_sock_abspath):
LOGGER.info("Waiting for backend sockfile")
raise Exception("Waiting for backend sockfile")
self.try_repeat_timeout(check_for_backend_sockfile, timedelta(minutes=5))
LOGGER.info("backend sockfile exists")
self.backend_grpc_config = GRPCConfig(
host=self.backend_grpc_socket,
max_workers=10,
)
self.unauthenticated_channel = make_grpc_channel(self.backend_grpc_config)
self.auth_stub = AuthStub(self.unauthenticated_channel)
self.backend_health_stub = HealthStub(self.unauthenticated_channel)
self.marketdata_stub = MarketdataStub(self.unauthenticated_channel)
def health_check_backend() -> None:
request = HealthCheckRequest(service="sancus.institution.Auth")
resp = self.backend_health_stub.Check(request)
if resp.status != HealthCheckResponse.SERVING:
LOGGER.info("Not yet serving backend")
raise Exception("Not yet serving")
self.try_repeat_timeout(health_check_backend, timedelta(seconds=10))
LOGGER.info("Serving backend")
def check_for_auditor_sockfile() -> None:
if not os.path.exists(self.auditor_sock_abspath):
LOGGER.info("Waiting for auditor sockfile")
raise Exception("Waiting for auditor sockfile")
self.try_repeat_timeout(check_for_auditor_sockfile, timedelta(minutes=5))
LOGGER.info("Found auditor sockfile")
self.auditor_grpc_config = GRPCConfig(
host=self.auditor_grpc_socket,
max_workers=10,
)
self.auditor_channel = make_grpc_channel(self.auditor_grpc_config)
self.auditor_stub = AuditorStub(self.auditor_channel)
self.auditor_health_stub = HealthStub(self.auditor_channel)
def health_check_auditor() -> None:
request = HealthCheckRequest(service="sancus.validator.Auditor")
resp = self.auditor_health_stub.Check(request)
if resp.status != HealthCheckResponse.SERVING:
LOGGER.info("Not yet serving auditor")
raise Exception("Not yet serving auditor")
self.try_repeat_timeout(health_check_auditor, timedelta(minutes=5))
LOGGER.info("Serving auditor")
self.background_job.start()
def deposit(self, address: str, currency: str, amount: Decimal) -> HexBytes:
LOGGER.info("Depositing %s %s into %s", amount, currency, address)
if currency == "GUSD":
tx_params = self.gusd_contract.functions.transfer(address, int(amount * 100)).buildTransaction(
{
"from": self.eth_main_address,
}
)
txn_hash = self.w3.eth.send_transaction(tx_params)
return HexBytes(txn_hash)
if currency == "ETH":
txn_hash = self.w3.eth.send_transaction(
{
"from": self.eth_main_address,
"to": address,
"value": int(amount * 10 ** 18),
}
)
return HexBytes(txn_hash)
if currency == "BTC":
with self.get_btc() as proxy:
txn_hash = proxy.sendtoaddress(address, int(amount * bitcoin.core.COIN))
return HexBytes(txn_hash)
raise ValueError("Invalid currency")
def wait_for_tx(self, currency: str, transaction_id: HexBytes) -> int:
# returns the block number containing the transaction
LOGGER.info("waiting for %s %s", currency, transaction_id.hex())
if currency == "BTC":
def check_for_transaction() -> int:
with self.get_btc() as proxy:
tx = proxy.getrawtransaction(transaction_id, verbose=True)
if tx["blockhash"] is None:
raise Exception(f"tx {transaction_id.hex()} not in chain")
block_header = proxy.getblockheader(tx["blockhash"], verbose=True)
block_number: int = block_header["height"]
return block_number
elif currency in ("GUSD", "ETH"):
def check_for_transaction() -> int:
tx_receipt = cast(web3.types.TxReceipt, self.w3.eth.getTransactionReceipt(transaction_id))
block_number: int = tx_receipt.blockNumber
return block_number
else:
raise ValueError(f"Unknown currency {currency}")
block_number = self.try_repeat_timeout(check_for_transaction, timedelta(minutes=5))
LOGGER.info("transaction %s %s has block number %d", currency, transaction_id.hex(), block_number)
return block_number
@staticmethod
def make_deposit_key(user: User, account_id: bytes) -> None:
LOGGER.info("Making deposit key for user(%s), account(%s)", user.username, account_id.hex())
deposit_key_request = MakeDepositKeyRequest(accountId=account_id)
deposit_key_response = user.deposit_stub.MakeDepositKey(deposit_key_request)
user.account_id_to_account[account_id].deposit_addresses.append(deposit_key_response.depositKey.address)
def deposit_into_account(self, account: Account, amount: Decimal) -> HexBytes:
address = account.deposit_addresses[0]
currency = account.currency
return self.deposit(address, currency, amount)
def create_admin_user(self) -> User:
return self._create_user("admin")
def create_user(self) -> User:
username = f"user_{uuid.uuid4()}"
return self._create_user(username)
def _create_user(self, username: str) -> User:
LOGGER.info("Creating user %s", username)
assert self.auth_stub is not None
registration_challenge_response = self.auth_stub.MakeRegistrationChallenge(
MakeRegistrationChallengeRequest(username=username),
)
attestation = self.soft_webauthn.create_credential(registration_challenge_response.credentialRequest)
register_response = self.auth_stub.Register(
RegisterRequest(
challengeNonce=registration_challenge_response.challengeRequest.nonce,
attestation=attestation,
)
)
user_jwt = register_response.jwt
user_channel = make_grpc_channel(self.backend_grpc_config, user_jwt)
account_stub = AccountStub(user_channel)
account_id_to_account: Dict[bytes, Account] = {}
currency_and_account_type_to_accounts: Dict[Tuple[str, int], List[Account]] = {}
for account_response in list_rpc_yield(ListAccountsRequest(), account_stub.ListAccounts):
assert isinstance(account_response, AccountResponse)
account_id = account_response.id
currency = account_response.currency
account_type = account_response.accountType
account = Account(account_id=account_id, account_type=account_type, currency=currency, deposit_addresses=[])
if (currency, account_type) not in currency_and_account_type_to_accounts:
currency_and_account_type_to_accounts[currency, account_type] = []
currency_and_account_type_to_accounts[currency, account_type].append(account)
account_id_to_account[account_id] = account
user = User(
user_id=bytes.fromhex(jwt.decode(user_jwt, options={"verify_signature": False})["sub"]),
currency_and_account_type_to_accounts=currency_and_account_type_to_accounts,
account_id_to_account=account_id_to_account,
grpc_channel=user_channel,
deposit_stub=DepositStub(user_channel),
account_stub=account_stub,
exchange_stub=ExchangeStub(user_channel),
marketdata_stub=MarketdataStub(user_channel),
withdrawal_stub=WithdrawalStub(user_channel),
username=username,
)
self.users.append(user)
return user
def get_latest_block_processed(self, currency: str) -> int:
if currency in ("GUSD", "ETH"):
return self.marketdata_stub.GetLatestProcessedBlockNumber(
GetLatestProcessedBlockNumberRequest(blockchain="ETH")
).blockNumber
if currency == "BTC":
return self.marketdata_stub.GetLatestProcessedBlockNumber(
GetLatestProcessedBlockNumberRequest(blockchain="BTC")
).blockNumber
raise ValueError(f"Invalid currency: {currency}")
def ensure_block_processed(
self,
currency: str,
timeout: timedelta,
minimum_block_number: Optional[int] = None,
) -> None:
# ensures that at least one block for both bitcoin and ethereum are processed
deadline = datetime.now() + timeout
def get_currency_block_processed() -> int:
return self.get_latest_block_processed(currency)
if minimum_block_number is None:
start_block_number = self.try_repeat_timeout(get_currency_block_processed, timeout)
minimum_block_number = start_block_number + 1
LOGGER.info("Waiting for backend to process block %s for currency %s", minimum_block_number, currency)
while datetime.now() < deadline:
new_block_number = get_currency_block_processed()
if new_block_number >= minimum_block_number:
LOGGER.info(
"Backend finished processing block %s >= %s for currency %s",
new_block_number,
minimum_block_number,
currency,
)
return
LOGGER.info(
"Backend finished processing block %s < %s for currency %s; sleeping 1 second",
new_block_number,
minimum_block_number,
currency,
)
time.sleep(1)
raise Exception("Failed to process blocks before timeout")
def exchange(self, user: User, from_account_id: bytes, to_account_id: bytes, amount: Decimal) -> None:
LOGGER.info(
"Exchanging %s from account %s to account %s for user %s",
amount,
from_account_id.hex(),
to_account_id.hex(),
user.username,
)
from_currency = user.account_id_to_account[from_account_id].currency
to_currency = user.account_id_to_account[to_account_id].currency
exchange_rate_request = GetMarketExchangeRateRequest(fromCurrency=from_currency, toCurrency=to_currency)
exchange_rate_response = user.marketdata_stub.GetMarketExchangeRate(exchange_rate_request)
initiate_exchange_request = InitiateExchangeRequest(
exchangeRateJWT=exchange_rate_response.exchangeRateJWT,
amount=str(amount),
fromAccountId=from_account_id,
toAccountId=to_account_id,
)
initiate_exchange_response = user.exchange_stub.InitiateExchange(initiate_exchange_request)
exchange_assertion = self.soft_webauthn.request_assertion(
initiate_exchange_response.challengeRequest, initiate_exchange_response.credentialRequest
)
process_exchange_request = ProcessExchangeRequest(
id=initiate_exchange_response.id, assertion=exchange_assertion
)
user.exchange_stub.ProcessExchange(process_exchange_request)
def withdraw(self, user: User, from_account_id: bytes, amount: Decimal) -> str:
LOGGER.info("Withdrawing %s from account %s for user %s", amount, from_account_id.hex(), user.username)
currency = user.account_id_to_account[from_account_id].currency
if currency in ("ETH", "GUSD"):
account = ETHAccount.create() # pylint: disable=no-value-for-parameter
destination_address = str(account.address)
elif currency == "BTC":
def get_address() -> str:
# need to use the proxy, rather than doing it locally, so the address is in the wallet
# and we can get the balance
with self.get_btc() as proxy:
return str(proxy.getnewaddress())
destination_address = self.try_repeat_timeout(get_address, timeout=timedelta(minutes=5))
else:
raise ValueError("invalid account currency")
initiate_request = InitiateWithdrawalRequest(
amount=str(amount),
fromAccountId=from_account_id,
destinationAddress=destination_address,
)
initiate_response = user.withdrawal_stub.InitiateWithdrawal(initiate_request)
withdrawal_assertion = self.soft_webauthn.request_assertion(
initiate_response.challengeRequest, initiate_response.credentialRequest
)
process_request = ProcessWithdrawalRequest(id=initiate_response.id, assertion=withdrawal_assertion)
user.withdrawal_stub.ProcessWithdrawal(process_request)
return destination_address
def get_chain_balance(self, currency: str, address: str) -> Decimal:
if currency == "ETH":
def get_bal() -> Decimal:
return Decimal(self.w3.eth.get_balance(address, "latest")) / Decimal(10 ** 18)
elif currency == "GUSD":
def get_bal() -> Decimal:
return Decimal(
self.gusd_contract.functions.balanceOf(address).call(block_identifier="latest")
) / Decimal(10 ** 2)
elif currency == "BTC":
def get_bal() -> Decimal:
with self.get_btc() as proxy:
return Decimal(proxy.getreceivedbyaddress(address)) / Decimal(10 ** 9)
else:
raise ValueError(f"Unknown currency: {currency}")
return self.try_repeat_timeout(get_bal, timeout=timedelta(minutes=5))
def wait_for_withdrawal(
self,
currency: str,
address: str,
amount: Decimal,
timeout: timedelta,
) -> None:
def check() -> None:
chain_amount = self.get_chain_balance(currency, address)
if chain_amount < amount:
raise Exception(f"Chain amount {chain_amount} < expected amount {amount}")
self.try_repeat_timeout(check, timeout)
def audit(self, timeout: timedelta = timedelta(minutes=30)) -> None:
self.audit_counter += 1 # audit versions are 1-indexed
auditgen_output_dir = os.path.join(self.auditgen_output_dir, f"audit_{self.audit_counter}")
auditgen_pstats = os.path.join(auditgen_output_dir, "auditgen-profile.svg")
command_prefix = ["taskset", "-ac", os.environ["AUDITGEN_CPUS"]] if "AUDITGEN_CPUS" in os.environ else []
check = True
if ENABLE_PY_SPY:
command_prefix.extend(
[
PY_SPY,
"record",
"-o",
auditgen_pstats,
"--",
]
)
check = False # there's a bug with check py-spy -- the return code isn't properly set
command = [
*command_prefix,
sys.executable,
"-m",
"utils.auditgen",
f"--output_directory={auditgen_output_dir}",
]
LOGGER.info(
"Auditing with command cd %s; %s %s",
EXPERIMENTS_DIRECTORY,
" ".join([f"{name}={value}" for (name, value) in self.env_vars.items()]),
" ".join(command),
)
# call auditgen via subprocess
# Generate, publish, and validate an audit
os.makedirs(auditgen_output_dir)
with open(os.path.join(auditgen_output_dir, "stdout.log"), "x") as stdout_file:
with open(os.path.join(auditgen_output_dir, "stderr.log"), "x") as stderr_file:
# there's an issue where py-spy changes the exit code, so currently ignoring it
subprocess.run(
command,
cwd=EXPERIMENTS_DIRECTORY,
stdout=stdout_file,
stderr=stderr_file,
env=self.env_vars,
universal_newlines=True,
check=check,
)
audit_version = self.audit_counter
# wait for the audit to finish
def check_audit_version() -> None:
resp = self.auditor_stub.GetLatestAuditVersion(GetLatestAuditVersionRequest())
if resp.version < audit_version:
raise Exception(f"audit version {resp.version} < desired audit version audit_version")
self.try_repeat_timeout(check_audit_version, timeout)
# record the size of the auditor DB
def record_db_size() -> None:
db_names = {"auditor": self.auditor_db, "backend": self.backend_db}
for name in db_names:
statement = "SELECT table_schema, table_name, data_length, index_length FROM information_schema.tables"
profile_data_folder = self.env_vars["PROFILE_DATA_FOLDER"]
output_dir = os.path.join(profile_data_folder, f"{name}_db")
engine = create_engine(db_names[name])
with engine.connect() as con:
res = con.execute(statement)
all_tables = res.fetchall()
key = f"{name}-{self.experiment_tag}"
filtered_res = [t for t in all_tables if t[0] == key]
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
with open(f"{output_dir}/{name}_db_size.csv", "a") as f:
writer_object = writer(f)
if self.audit_counter == 1:
writer_object.writerow(
["audit_version", "table_schema", "table_name", "data_length", "index_length"]
)
for t in filtered_res:
writer_object.writerow([self.audit_counter] + list(t))
self.try_repeat_timeout(record_db_size, timeout)
LOGGER.info("Auditing %d finished", audit_version)
@staticmethod
def try_repeat_timeout(func: Callable[[], TResponse], timeout: timedelta) -> TResponse:
deadline = datetime.now() + timeout
while True:
try:
return func()
except Exception as e:
if datetime.now() < deadline:
# LOGGER.info("Check failed; sleeping 1 second and trying again")
time.sleep(1)
continue
LOGGER.error("Try-repeat-timeout failed", exc_info=True)
raise Exception("Try-repeat-timeout failed") from e
@staticmethod
def make_account(user: User, currency: str, account_type: "AccountType.V") -> None:
request = MakeAccountRequest(accountType=account_type, currency=currency)
response = user.account_stub.MakeAccount(request)
account_id = response.accountId
account = Account(account_id=account_id, account_type=account_type, currency=currency, deposit_addresses=[])
user.account_id_to_account[account_id] = account
if (currency, account_type) not in user.currency_and_account_type_to_accounts:
user.currency_and_account_type_to_accounts[currency, account_type] = []
user.currency_and_account_type_to_accounts[currency, account_type].append(account)
def create_infra(self) -> "Optional[subprocess.Popen[str]]":
if MANAGE_INFRA:
infra_env_vars: Dict[str, str] = {}
def conditional_merge(key: str) -> None:
if key in os.environ:
infra_env_vars[key] = os.environ[key]
conditional_merge("MYSQL_CPUS")
conditional_merge("GETH_CPUS")
conditional_merge("IPFS_CPUS")
conditional_merge("BITCOIN_CORE_CPUS")
conditional_merge("BITCOIN_MINER_CPUS")
infra_file = os.environ.get("INFRA_COMPOSE_FILE", "docker-compose.yml")
# stop the existing infra
LOGGER.info("Stopping the existing infra")
subprocess.check_call(
[
"/usr/local/bin/docker-compose",
"-f",
infra_file,
"down",
"-v",
],
cwd=os.path.join(os.path.dirname(__file__), "..", "..", "infra"),
env=infra_env_vars,
universal_newlines=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
LOGGER.info("Stopped the existing infra")
command_prefix = ["taskset", "-ac", os.environ["INFRA_CPUS"]] if "INFRA_CPUS" in os.environ else []
command = [
*command_prefix,
"/usr/local/bin/docker-compose",
"-f",
infra_file,
"up",
"--build",
]
LOGGER.info("Starting infra with command %s", " ".join(command))
return subprocess.Popen(
command,
cwd=os.path.join(os.path.dirname(__file__), "..", "..", "infra"),
stdout=self.infra_stdout_file,
stderr=self.infra_stderr_file,
env=infra_env_vars,
universal_newlines=True,
)
return None
def get_latest_eth_block_number(self) -> int:
def get_eth_block() -> int:
block_number: int = self.w3.eth.block_number
return block_number
return self.try_repeat_timeout(get_eth_block, timedelta(seconds=30))
def get_latest_btc_block_number(self) -> int:
def btc_block_count() -> int:
with self.get_btc() as proxy:
block_count = proxy.getblockcount()
assert isinstance(block_count, int)
return block_count
return self.try_repeat_timeout(btc_block_count, timedelta(seconds=60))
@contextmanager
def get_btc(self) -> Generator[bitcoin.rpc.Proxy, None, None]: # type: ignore[misc]
self.btc_proxy_queue.get(timeout=30) # get a "Lock" for a proxy from the pool
try:
proxy = bitcoin.rpc.Proxy(BTC_HOST, timeout=60)
try:
yield proxy
finally:
proxy.close()
finally:
self.btc_proxy_queue.put_nowait(None)
def dump_env_vars(self) -> None:
with open(os.path.join(self.profile_output_dir, "env_vars.json"), "w") as env_vars_file:
json.dump(self.env_vars, env_vars_file)
def __enter__(self) -> "Runner":
return self
def close(self) -> None:
LOGGER.info("Attempting an orderly shutdown")
LOGGER.info("Dumping Envionment Variables")
self.dump_env_vars()
LOGGER.info("Processing Profile Data")
self.experiment_processor.execute_script()
LOGGER.info("Closing user channels")
for user in self.users:
user.grpc_channel.close()
self.users.clear()
LOGGER.info("Marking backend as stopped")
self.stopped = True
self.unauthenticated_channel.close()
self.auditor_channel.close()
LOGGER.info("Joining the background job")
self.background_job.join()
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.close()
if exc_type is None:
LOGGER.info("Experiment %s finished", self.experiment_name)
def loop(self) -> None:
error = False
while not self.stopped:
if self.backend_proc.poll() is not None:
LOGGER.error("Backend crashed")
error = True
break
if self.auditor_proc.poll() is not None:
LOGGER.error("Auditor crashed")
error = True
break
if self.infra_proc is not None and self.infra_proc.poll() is not None:
LOGGER.error("Infra crashed")
error = True
break
time.sleep(1)
LOGGER.info("Sending sigint to background process")
self.backend_proc.send_signal(signal.SIGINT)
LOGGER.info("Sending sigint to auditor process")
self.auditor_proc.send_signal(signal.SIGINT)
try:
LOGGER.info("Waiting 60 seconds for background process to respond to SIGINT")
self.backend_proc.wait(60)
except subprocess.TimeoutExpired:
self.backend_proc.kill()
try:
LOGGER.info("Waiting 60 seconds for auditor process to terminate")
self.auditor_proc.wait()
except subprocess.TimeoutExpired:
self.auditor_proc.kill()
if self.infra_proc is not None:
LOGGER.info("Killing the infra proc")
self.infra_proc.kill()
if not self.backend_stderr_file.closed:
self.backend_stderr_file.close()
if not self.backend_stdout_file.closed:
self.backend_stdout_file.close()
if not self.auditor_stdout_file.closed:
self.auditor_stdout_file.close()
if not self.auditor_stderr_file.closed:
self.auditor_stderr_file.close()
if not self.infra_stderr_file.closed:
self.infra_stderr_file.close()
if not self.infra_stdout_file.closed:
self.infra_stdout_file.close()
self.sock_folder.cleanup()
if error:
# need to terminate the current process if error, since this is in the background loop
os.killpg(os.getpgid(os.getpid()), signal.SIGTERM) | en | 0.852383 | # pylint: disable=ungrouped-imports # ensure that we are actually connected to the ethereum node # ensure that we have the databases. it takes 2-3 minutes for the docker mysql to start up # TODO ensure that we have the bitcoin node and ipfs node # "GRPC_TRACE": "api,call_error,p_failure", # "GRPC_STACKTRACE_MINLOGLEVEL": "INFO", # effectively disable exchange rate validation # let's start the docker compose # returns the block number containing the transaction # ensures that at least one block for both bitcoin and ethereum are processed # pylint: disable=no-value-for-parameter # need to use the proxy, rather than doing it locally, so the address is in the wallet # and we can get the balance # audit versions are 1-indexed # there's a bug with check py-spy -- the return code isn't properly set # call auditgen via subprocess # Generate, publish, and validate an audit # there's an issue where py-spy changes the exit code, so currently ignoring it # wait for the audit to finish # record the size of the auditor DB # LOGGER.info("Check failed; sleeping 1 second and trying again") # stop the existing infra # type: ignore[misc] # get a "Lock" for a proxy from the pool # need to terminate the current process if error, since this is in the background loop | 1.395715 | 1 |
tests/test_base.py | dogmatic69/nordigen-python | 2 | 6615844 | <filename>tests/test_base.py
import unittest
from apiclient import HeaderAuthentication
from nordigen.client import NordigenClient, next_page_by_url
header_auth = HeaderAuthentication(scheme="Token", token="<PASSWORD>")
class TestBaseAuth(unittest.TestCase):
def test_token_auth(self):
client = NordigenClient(auth=header_auth)
self.assertEqual(
client.get_default_headers(),
{
"Authorization": "Token <PASSWORD>",
},
)
class TestBasePagination(unittest.TestCase):
def test_pagination(self):
result = next_page_by_url({"next": "http://example.com/page/2"}, None)
self.assertEqual(result, "http://example.com/page/2")
class TestBaseUrl(unittest.TestCase):
def test_url_host(self):
client = NordigenClient(auth=None, host="localhost")
result = client.url("foo")
self.assertEqual(result, "https://localhost/api/v2/foo/")
def test_url_scheme(self):
client = NordigenClient(auth=None, scheme="sftp")
result = client.url("foo")
self.assertEqual(result, "sftp://ob.nordigen.com/api/v2/foo/")
def test_url_base(self):
client = NordigenClient(auth=None, base="")
result = client.url("foo")
self.assertEqual(result, "https://ob.nordigen.com/v2/foo/")
client = NordigenClient(auth=None, base="/some/thing/here")
result = client.url("foo")
self.assertEqual(result, "https://ob.nordigen.com/some/thing/here/v2/foo/")
def test_url_basic(self):
client = NordigenClient(auth=None)
result = client.url("foo")
self.assertEqual(result, "https://ob.nordigen.com/api/v2/foo/")
result = client.url("foo/bar")
self.assertEqual(result, "https://ob.nordigen.com/api/v2/foo/bar/")
def test_url_args(self):
client = NordigenClient(auth=None)
result = client.url("foo", url_args={})
self.assertEqual(result, "https://ob.nordigen.com/api/v2/foo/")
result = client.url("foo", url_args={"fizz": "buzz"})
self.assertEqual(result, "https://ob.nordigen.com/api/v2/foo/?fizz=buzz")
| <filename>tests/test_base.py
import unittest
from apiclient import HeaderAuthentication
from nordigen.client import NordigenClient, next_page_by_url
header_auth = HeaderAuthentication(scheme="Token", token="<PASSWORD>")
class TestBaseAuth(unittest.TestCase):
def test_token_auth(self):
client = NordigenClient(auth=header_auth)
self.assertEqual(
client.get_default_headers(),
{
"Authorization": "Token <PASSWORD>",
},
)
class TestBasePagination(unittest.TestCase):
def test_pagination(self):
result = next_page_by_url({"next": "http://example.com/page/2"}, None)
self.assertEqual(result, "http://example.com/page/2")
class TestBaseUrl(unittest.TestCase):
def test_url_host(self):
client = NordigenClient(auth=None, host="localhost")
result = client.url("foo")
self.assertEqual(result, "https://localhost/api/v2/foo/")
def test_url_scheme(self):
client = NordigenClient(auth=None, scheme="sftp")
result = client.url("foo")
self.assertEqual(result, "sftp://ob.nordigen.com/api/v2/foo/")
def test_url_base(self):
client = NordigenClient(auth=None, base="")
result = client.url("foo")
self.assertEqual(result, "https://ob.nordigen.com/v2/foo/")
client = NordigenClient(auth=None, base="/some/thing/here")
result = client.url("foo")
self.assertEqual(result, "https://ob.nordigen.com/some/thing/here/v2/foo/")
def test_url_basic(self):
client = NordigenClient(auth=None)
result = client.url("foo")
self.assertEqual(result, "https://ob.nordigen.com/api/v2/foo/")
result = client.url("foo/bar")
self.assertEqual(result, "https://ob.nordigen.com/api/v2/foo/bar/")
def test_url_args(self):
client = NordigenClient(auth=None)
result = client.url("foo", url_args={})
self.assertEqual(result, "https://ob.nordigen.com/api/v2/foo/")
result = client.url("foo", url_args={"fizz": "buzz"})
self.assertEqual(result, "https://ob.nordigen.com/api/v2/foo/?fizz=buzz")
| none | 1 | 2.642722 | 3 | |
src/charts/posttraffic.py | ucipass/www | 0 | 6615845 | import requests
from time import sleep
from random import uniform
from time import gmtime, strftime
url = "http://127.0.0.1:3000/charts"
def post(value):
try:
data = {
"data":
{
"id":"test",
"type":"charts",
"attributes":{
"cmd":"log",
"data": str(value),
"logname":"random"
}
}
}
print("Data:",value,"To:",url)
r = requests.post( url, json = data)
print("Response",r)
except Exception:
print("Post Failure", strftime("%Y-%m-%d %H:%M:%S", gmtime()))
while True:
val = uniform(0,10)
post(val)
sleep(1) | import requests
from time import sleep
from random import uniform
from time import gmtime, strftime
url = "http://127.0.0.1:3000/charts"
def post(value):
try:
data = {
"data":
{
"id":"test",
"type":"charts",
"attributes":{
"cmd":"log",
"data": str(value),
"logname":"random"
}
}
}
print("Data:",value,"To:",url)
r = requests.post( url, json = data)
print("Response",r)
except Exception:
print("Post Failure", strftime("%Y-%m-%d %H:%M:%S", gmtime()))
while True:
val = uniform(0,10)
post(val)
sleep(1) | none | 1 | 2.871164 | 3 | |
Simulation/CustomLibraries/equacoes.py | Zakonildo/FEI-Projectile-Motion-Simulation | 1 | 6615846 | <filename>Simulation/CustomLibraries/equacoes.py
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
from math import *
import numpy as np
class Equacoes:
def __init__(self):
self.criou = True
def aXDef(self):
return 0
def aYDef(self, gravidade):
return -gravidade
def vXDef(self, angulo, v0, tempo):
return ( v0*cos(radians(angulo)) )
def vYDef(self, angulo, v0, tempo, gravidade):
return ( ( v0*sin(radians(angulo)) - gravidade*tempo ) )
def xDef(self, angulo, v0, tempo):
return ( 0 + v0*cos(radians(angulo)) * tempo )
def yDef(self, angulo, v0, tempo, gravidade):
return ( (v0*sin(radians(angulo)) * tempo - ((gravidade*(tempo**2))/2)) )
def trajetoriaDef(self, angulo, v0, x, gravidade):
return ( ( x*tan(radians(angulo)) - 0.5*gravidade * (x/(v0*cos(radians(angulo))))**2 ) )
def aX(self, beta, angulo, v0, tempo):
return (-beta*self.vX(beta, angulo, v0, tempo))
def aY(self, beta, angulo, v0, tempo, gravidade):
return (-beta*(self.vY(beta, angulo, v0, tempo, gravidade) + (gravidade/beta)))
def vX(self, beta, angulo, v0, tempo):
return ( ( v0*cos(radians(angulo)) ) * exp( -beta *tempo ) )
def vY(self, beta, angulo, v0, tempo, gravidade):
return ( ( v0*sin(radians(angulo)) + (gravidade/beta) ) * exp( -beta*tempo ) - (gravidade/beta) )
def x(self, beta, angulo, v0, tempo):
return ( v0*cos(radians(angulo)) * ( (1 - exp(-beta*tempo))/beta ) )
def y(self, beta, angulo, v0, tempo, gravidade):
return ( (v0*sin(radians(angulo)) + (gravidade/beta)) * ( (1 - exp(-beta*tempo))/beta ) - (gravidade/beta)*tempo )
def trajetoria(self, beta, angulo, v0, x, gravidade):
a = (1 - ((beta*x)/(v0*cos(radians(angulo))) ))
return ( (tan(radians(angulo)) + (gravidade/(beta*v0*cos(radians(angulo)))))*x + (gravidade/(beta**2)) * np.log(a) )
def xNumerico(self, s0, vx, ax, t):
return(s0 + vx*t + 0.5*ax*(t**2))
def yNumerico(self, s0, vy, ay, t):
return(s0 + vy*t + 0.5*ay*(t**2))
def vXNumerico(self, c, b, angulo, vx, vy, passo):
k1 = -b*vx - c*sqrt(vx**2 + vy**2)*vx
k2 = -b*(vx + k1*(passo/2)) - c*sqrt((vx + k1*(passo/2))**2 + (vy + k1*(passo/2))**2)*(vx + k1*(passo/2))
k3 = -b*(vx + k2*(passo/2)) - c*sqrt((vx + k2*(passo/2))**2 + (vy + k2*(passo/2))**2)*(vx + k2*(passo/2))
k4 = -b*(vx + k3*passo) - c*sqrt((vx + k3*passo)**2 + (vy + k3*passo)**2)*(vx + k3*passo)
return (vx + ((k1 + 2*k2 + 2*k3 + k4)*passo)/6)
def vYNumerico(self, c, b, angulo, vx, vy, g, passo):
k1 = -g - b*vy - c*sqrt(vx**2 + vy**2)*vy
k2 = -g - b*(vy + k1*(passo/2)) - c*sqrt((vx + k1*(passo/2))**2 + (vy + k1*(passo/2))**2)*(vy + k1*(passo/2))
k3 = -g - b*(vy + k2*(passo/2)) - c*sqrt((vx + k2*(passo/2))**2 + (vy + k2*(passo/2))**2)*(vy + k2*(passo/2))
k4 = -g - b*(vy + k3*passo) - c*sqrt((vx + k3*passo)**2 + (vy + k3*passo)**2)*(vy + k3*passo)
return (vy + ((k1 + 2*k2 + 2*k3 + k4)*passo)/6)
def aXNumerico(self, c, b, vx, vy):
return (-b*vx - c*sqrt(vx**2 + vy**2)*vx)
def aYNumerico(self, c, b, vx, vy, g):
return (-g - b*vy - c*sqrt(vx**2 + vy**2)*vy)
def cCalculator(self, C, p, r, m):
return ((1/2)*C*p*pi*r*r)/m
def bCalculator(self, n, r, m):
return (6*pi*n*r)/m | <filename>Simulation/CustomLibraries/equacoes.py
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
from math import *
import numpy as np
class Equacoes:
def __init__(self):
self.criou = True
def aXDef(self):
return 0
def aYDef(self, gravidade):
return -gravidade
def vXDef(self, angulo, v0, tempo):
return ( v0*cos(radians(angulo)) )
def vYDef(self, angulo, v0, tempo, gravidade):
return ( ( v0*sin(radians(angulo)) - gravidade*tempo ) )
def xDef(self, angulo, v0, tempo):
return ( 0 + v0*cos(radians(angulo)) * tempo )
def yDef(self, angulo, v0, tempo, gravidade):
return ( (v0*sin(radians(angulo)) * tempo - ((gravidade*(tempo**2))/2)) )
def trajetoriaDef(self, angulo, v0, x, gravidade):
return ( ( x*tan(radians(angulo)) - 0.5*gravidade * (x/(v0*cos(radians(angulo))))**2 ) )
def aX(self, beta, angulo, v0, tempo):
return (-beta*self.vX(beta, angulo, v0, tempo))
def aY(self, beta, angulo, v0, tempo, gravidade):
return (-beta*(self.vY(beta, angulo, v0, tempo, gravidade) + (gravidade/beta)))
def vX(self, beta, angulo, v0, tempo):
return ( ( v0*cos(radians(angulo)) ) * exp( -beta *tempo ) )
def vY(self, beta, angulo, v0, tempo, gravidade):
return ( ( v0*sin(radians(angulo)) + (gravidade/beta) ) * exp( -beta*tempo ) - (gravidade/beta) )
def x(self, beta, angulo, v0, tempo):
return ( v0*cos(radians(angulo)) * ( (1 - exp(-beta*tempo))/beta ) )
def y(self, beta, angulo, v0, tempo, gravidade):
return ( (v0*sin(radians(angulo)) + (gravidade/beta)) * ( (1 - exp(-beta*tempo))/beta ) - (gravidade/beta)*tempo )
def trajetoria(self, beta, angulo, v0, x, gravidade):
a = (1 - ((beta*x)/(v0*cos(radians(angulo))) ))
return ( (tan(radians(angulo)) + (gravidade/(beta*v0*cos(radians(angulo)))))*x + (gravidade/(beta**2)) * np.log(a) )
def xNumerico(self, s0, vx, ax, t):
return(s0 + vx*t + 0.5*ax*(t**2))
def yNumerico(self, s0, vy, ay, t):
return(s0 + vy*t + 0.5*ay*(t**2))
def vXNumerico(self, c, b, angulo, vx, vy, passo):
k1 = -b*vx - c*sqrt(vx**2 + vy**2)*vx
k2 = -b*(vx + k1*(passo/2)) - c*sqrt((vx + k1*(passo/2))**2 + (vy + k1*(passo/2))**2)*(vx + k1*(passo/2))
k3 = -b*(vx + k2*(passo/2)) - c*sqrt((vx + k2*(passo/2))**2 + (vy + k2*(passo/2))**2)*(vx + k2*(passo/2))
k4 = -b*(vx + k3*passo) - c*sqrt((vx + k3*passo)**2 + (vy + k3*passo)**2)*(vx + k3*passo)
return (vx + ((k1 + 2*k2 + 2*k3 + k4)*passo)/6)
def vYNumerico(self, c, b, angulo, vx, vy, g, passo):
k1 = -g - b*vy - c*sqrt(vx**2 + vy**2)*vy
k2 = -g - b*(vy + k1*(passo/2)) - c*sqrt((vx + k1*(passo/2))**2 + (vy + k1*(passo/2))**2)*(vy + k1*(passo/2))
k3 = -g - b*(vy + k2*(passo/2)) - c*sqrt((vx + k2*(passo/2))**2 + (vy + k2*(passo/2))**2)*(vy + k2*(passo/2))
k4 = -g - b*(vy + k3*passo) - c*sqrt((vx + k3*passo)**2 + (vy + k3*passo)**2)*(vy + k3*passo)
return (vy + ((k1 + 2*k2 + 2*k3 + k4)*passo)/6)
def aXNumerico(self, c, b, vx, vy):
return (-b*vx - c*sqrt(vx**2 + vy**2)*vx)
def aYNumerico(self, c, b, vx, vy, g):
return (-g - b*vy - c*sqrt(vx**2 + vy**2)*vy)
def cCalculator(self, C, p, r, m):
return ((1/2)*C*p*pi*r*r)/m
def bCalculator(self, n, r, m):
return (6*pi*n*r)/m | en | 0.326665 | #!/usr/bin/env python # coding: utf-8 # In[ ]: | 2.985462 | 3 |
intermediate_example_1.py | AshwinRJ/Variational-Inference | 10 | 6615847 | import numpy as np
import tensorflow as tf
import random
import matplotlib.pyplot as plt
from gm_generate import *
class IntermediateExample1(object):
"""
Class implementing variational inference for 1D gaussian data by using monte-carlo approximation to ELBO
"""
def __init__(self, mu_p=0.0, sigma_p=1.0, mu_var_p=0.0, sigma_var_p=1.0, learning_rate=1e-4, K=1, N=100):
"""
__init__(IntermediateExample1, float, float, float, float, float, int, int) -> None
mu_p: Hyperparamer for p(mu)
sigma_p: Hyperparameter for p(mu)
mu_var_p: Hyperparameter for p(sigma)
sigma_var_p: Hyperparameter for p(sigma)
learning_rate: Learning rate for optimizer
K: Number of samples used for Monte-Carlo approximation of ELBO
N: Number of examples per batch
"""
self.N = N
self.x = tf.placeholder(tf.float32, (self.N,))
self.mu_p = mu_p
self.sigma_p = sigma_p
self.mu_var_p = mu_var_p
self.sigma_var_p = sigma_var_p
self.learning_rate = learning_rate
self.K = K
# Initialize the variables
self.sigma = tf.Variable(random.random())
self.mu = tf.Variable(random.random())
self.sigma_var = tf.Variable(random.random())
self.mu_var = tf.Variable(random.random())
# Generate K samples of z
eps_samples = tf.random_normal(shape=(K,)) # Reparameterisation trick for normal
mu_samples = self.mu + eps_samples*self.sigma**2
eps_samples2 = tf.random_uniform(shape=(K,)) # Reparameterisation trick for exponential
sigma_samples = (self.mu_var + eps_samples2*self.sigma_var**2) ** 2
# Approximate ELBO (without constant term)
self.ELBO = tf.reduce_mean(-(N/2.0) * tf.log(sigma_samples) - \
0.5 * tf.reduce_sum((self.x - tf.stack([mu_samples]*N, axis=1))**2, axis=1) / sigma_samples - \
0.5 * (mu_samples - self.mu_p)**2 / self.sigma_p**2 - \
0.5 * (sigma_samples - self.mu_var_p)**2 / self.sigma_var_p**2 - \
0.5 * tf.log(self.sigma**2) + \
0.5 * (mu_samples - self.mu)**2 / self.sigma**2 + \
0.5 * tf.log(self.sigma_var**2) + \
0.5 * (sigma_samples - self.mu_var)**2 / self.sigma_var**2)
self.ELBO = -self.ELBO
# Maximize ELBO
self.train = tf.train.AdamOptimizer(self.learning_rate).minimize(self.ELBO)
# Set up the session
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
def train_step(self, x):
"""
train_step(IntermediateExample1, ndarray) -> (float, float, float, float, float)
x: Input data, vector of real numbers
Returns:
cost: -ELBO after a step of optimization
mu, sigma, mu_var, sigma_var
"""
_, cost = self.sess.run([self.train, self.ELBO], feed_dict={self.x: x})
mu, sigma, mu_var, sigma_var = self.sess.run([self.mu, self.sigma, self.mu_var, self.sigma_var])
return (-cost, mu, sigma**2, mu_var, sigma_var**2)
def run_intermediate_example_1(num_examples=100, data_mu=0.0, data_sigma=1.0, mu_p=0.0, sigma_p=1.0, mu_var_p=0.0, sigma_var_p=1.0, learning_rate=1e-4, num_iter=1000, K=1, N=100):
"""
run_intermediate_example_1(int, float, float, float, float, float, float, float, float, int, int) -> None
Runs the demo shown in the notebook for different settings of hyperparameters.
"""
# Generate the data
pz = [1]
mu = [np.asarray([data_mu])]
sigma = [np.asarray([data_sigma]).reshape((1, 1))]
gmm = GMM(pz=pz, muks=mu, sigmaks=sigma, k=1, d=1)
x, _ = gmm.generate_points(n=num_examples)
x = x.reshape((-1,))
# Compute variational inference estimate for the parameters
costs = []
example = IntermediateExample1(mu_p, sigma_p, mu_var_p, sigma_var_p, learning_rate, K, N)
for i in range(num_iter):
cost, mu_ex, sigma_ex, mu_var, sigma_var = example.train_step(x)
costs.append(cost)
# Compute mu_expected, sigma_expected
mu_expected = mu_ex # Expected value of mu using q_1
sigma_expected = mu_var**2 # Expected value of sigma using q_2
# Print results
print 'Optimal m:', mu_ex
print 'Optimal s^2:', sigma_ex
print 'Optimal m var:', mu_var
print 'Optimal s^2 var:', sigma_var
print 'Expected Value for mu:', mu_expected
print 'Optimal Value for sigma^2:', sigma_expected
# Plot cost vs iterations
plt.plot(costs)
plt.title('Iteration vs ELBO')
plt.xlabel('Iterations')
plt.ylabel('ELBO')
plt.show()
# Show the histogram, true distribution and estimated distribution
plt.hist(x, normed=True, color='#cccccc')
def true_dist(x):
return (1.0/np.sqrt(2*np.pi*sigma[0])*np.exp(-0.5*((x-mu[0])/sigma[0])**2))
def estimated_dist(x):
return (1.0/np.sqrt(2*np.pi*sigma_expected)*np.exp(-0.5*((x-mu_expected)/sigma_expected)**2))
x_axis = np.arange(np.min(x)-0.5, np.max(x)+0.5, 0.01)
plt.plot(x_axis, true_dist(x_axis).reshape((-1,)), 'ro', label='True Distribution')
plt.plot(x_axis, estimated_dist(x_axis).reshape((-1,)), 'co', label='Estimated Distribution')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
"""
if __name__ == '__main__':
ex = IntermediateExample1(learning_rate=0.05)
for i in range(1000):
cost, mu, sigma, alpha, beta = ex.train_step(np.asarray([1.0, 2.0, 3.0]))
print (cost, mu, sigma, alpha, beta)
"""
| import numpy as np
import tensorflow as tf
import random
import matplotlib.pyplot as plt
from gm_generate import *
class IntermediateExample1(object):
"""
Class implementing variational inference for 1D gaussian data by using monte-carlo approximation to ELBO
"""
def __init__(self, mu_p=0.0, sigma_p=1.0, mu_var_p=0.0, sigma_var_p=1.0, learning_rate=1e-4, K=1, N=100):
"""
__init__(IntermediateExample1, float, float, float, float, float, int, int) -> None
mu_p: Hyperparamer for p(mu)
sigma_p: Hyperparameter for p(mu)
mu_var_p: Hyperparameter for p(sigma)
sigma_var_p: Hyperparameter for p(sigma)
learning_rate: Learning rate for optimizer
K: Number of samples used for Monte-Carlo approximation of ELBO
N: Number of examples per batch
"""
self.N = N
self.x = tf.placeholder(tf.float32, (self.N,))
self.mu_p = mu_p
self.sigma_p = sigma_p
self.mu_var_p = mu_var_p
self.sigma_var_p = sigma_var_p
self.learning_rate = learning_rate
self.K = K
# Initialize the variables
self.sigma = tf.Variable(random.random())
self.mu = tf.Variable(random.random())
self.sigma_var = tf.Variable(random.random())
self.mu_var = tf.Variable(random.random())
# Generate K samples of z
eps_samples = tf.random_normal(shape=(K,)) # Reparameterisation trick for normal
mu_samples = self.mu + eps_samples*self.sigma**2
eps_samples2 = tf.random_uniform(shape=(K,)) # Reparameterisation trick for exponential
sigma_samples = (self.mu_var + eps_samples2*self.sigma_var**2) ** 2
# Approximate ELBO (without constant term)
self.ELBO = tf.reduce_mean(-(N/2.0) * tf.log(sigma_samples) - \
0.5 * tf.reduce_sum((self.x - tf.stack([mu_samples]*N, axis=1))**2, axis=1) / sigma_samples - \
0.5 * (mu_samples - self.mu_p)**2 / self.sigma_p**2 - \
0.5 * (sigma_samples - self.mu_var_p)**2 / self.sigma_var_p**2 - \
0.5 * tf.log(self.sigma**2) + \
0.5 * (mu_samples - self.mu)**2 / self.sigma**2 + \
0.5 * tf.log(self.sigma_var**2) + \
0.5 * (sigma_samples - self.mu_var)**2 / self.sigma_var**2)
self.ELBO = -self.ELBO
# Maximize ELBO
self.train = tf.train.AdamOptimizer(self.learning_rate).minimize(self.ELBO)
# Set up the session
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
def train_step(self, x):
"""
train_step(IntermediateExample1, ndarray) -> (float, float, float, float, float)
x: Input data, vector of real numbers
Returns:
cost: -ELBO after a step of optimization
mu, sigma, mu_var, sigma_var
"""
_, cost = self.sess.run([self.train, self.ELBO], feed_dict={self.x: x})
mu, sigma, mu_var, sigma_var = self.sess.run([self.mu, self.sigma, self.mu_var, self.sigma_var])
return (-cost, mu, sigma**2, mu_var, sigma_var**2)
def run_intermediate_example_1(num_examples=100, data_mu=0.0, data_sigma=1.0, mu_p=0.0, sigma_p=1.0, mu_var_p=0.0, sigma_var_p=1.0, learning_rate=1e-4, num_iter=1000, K=1, N=100):
"""
run_intermediate_example_1(int, float, float, float, float, float, float, float, float, int, int) -> None
Runs the demo shown in the notebook for different settings of hyperparameters.
"""
# Generate the data
pz = [1]
mu = [np.asarray([data_mu])]
sigma = [np.asarray([data_sigma]).reshape((1, 1))]
gmm = GMM(pz=pz, muks=mu, sigmaks=sigma, k=1, d=1)
x, _ = gmm.generate_points(n=num_examples)
x = x.reshape((-1,))
# Compute variational inference estimate for the parameters
costs = []
example = IntermediateExample1(mu_p, sigma_p, mu_var_p, sigma_var_p, learning_rate, K, N)
for i in range(num_iter):
cost, mu_ex, sigma_ex, mu_var, sigma_var = example.train_step(x)
costs.append(cost)
# Compute mu_expected, sigma_expected
mu_expected = mu_ex # Expected value of mu using q_1
sigma_expected = mu_var**2 # Expected value of sigma using q_2
# Print results
print 'Optimal m:', mu_ex
print 'Optimal s^2:', sigma_ex
print 'Optimal m var:', mu_var
print 'Optimal s^2 var:', sigma_var
print 'Expected Value for mu:', mu_expected
print 'Optimal Value for sigma^2:', sigma_expected
# Plot cost vs iterations
plt.plot(costs)
plt.title('Iteration vs ELBO')
plt.xlabel('Iterations')
plt.ylabel('ELBO')
plt.show()
# Show the histogram, true distribution and estimated distribution
plt.hist(x, normed=True, color='#cccccc')
def true_dist(x):
return (1.0/np.sqrt(2*np.pi*sigma[0])*np.exp(-0.5*((x-mu[0])/sigma[0])**2))
def estimated_dist(x):
return (1.0/np.sqrt(2*np.pi*sigma_expected)*np.exp(-0.5*((x-mu_expected)/sigma_expected)**2))
x_axis = np.arange(np.min(x)-0.5, np.max(x)+0.5, 0.01)
plt.plot(x_axis, true_dist(x_axis).reshape((-1,)), 'ro', label='True Distribution')
plt.plot(x_axis, estimated_dist(x_axis).reshape((-1,)), 'co', label='Estimated Distribution')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
"""
if __name__ == '__main__':
ex = IntermediateExample1(learning_rate=0.05)
for i in range(1000):
cost, mu, sigma, alpha, beta = ex.train_step(np.asarray([1.0, 2.0, 3.0]))
print (cost, mu, sigma, alpha, beta)
"""
| en | 0.538486 | Class implementing variational inference for 1D gaussian data by using monte-carlo approximation to ELBO __init__(IntermediateExample1, float, float, float, float, float, int, int) -> None mu_p: Hyperparamer for p(mu) sigma_p: Hyperparameter for p(mu) mu_var_p: Hyperparameter for p(sigma) sigma_var_p: Hyperparameter for p(sigma) learning_rate: Learning rate for optimizer K: Number of samples used for Monte-Carlo approximation of ELBO N: Number of examples per batch # Initialize the variables # Generate K samples of z # Reparameterisation trick for normal # Reparameterisation trick for exponential # Approximate ELBO (without constant term) # Maximize ELBO # Set up the session train_step(IntermediateExample1, ndarray) -> (float, float, float, float, float) x: Input data, vector of real numbers Returns: cost: -ELBO after a step of optimization mu, sigma, mu_var, sigma_var run_intermediate_example_1(int, float, float, float, float, float, float, float, float, int, int) -> None Runs the demo shown in the notebook for different settings of hyperparameters. # Generate the data # Compute variational inference estimate for the parameters # Compute mu_expected, sigma_expected # Expected value of mu using q_1 # Expected value of sigma using q_2 # Print results # Plot cost vs iterations # Show the histogram, true distribution and estimated distribution if __name__ == '__main__': ex = IntermediateExample1(learning_rate=0.05) for i in range(1000): cost, mu, sigma, alpha, beta = ex.train_step(np.asarray([1.0, 2.0, 3.0])) print (cost, mu, sigma, alpha, beta) | 2.979053 | 3 |
data_ingestion_example.py | SAP-samples/epd-connected-products-iot-connectivity | 0 | 6615848 | # prerequisite: pip install paho-mqtt
# see documentation https://www.eclipse.org/paho/clients/python/docs/
# run with: python data_ingestion_example.py
import paho.mqtt.client as mqtt
import ssl
import time
import logging
import json
import time
logging.basicConfig(format='[{asctime},{msecs:03.0f}] {levelname} {name}.{lineno}| {message}',
datefmt='%H:%M:%S',
level=logging.DEBUG,
style='{')
LOGGER = logging.getLogger()
# insert here your MQTT host, the device/sensor IDs, the path to device certificate (as part of the
# IoT service key), and the password for that certificate file
#
# Mapping of entity IDs:
# IoT deviceAlternateId -> AC equipment external object ID
# IoT sensorAlternateId -> AC model template ID
# IoT capabilityAlternateId -> AC indicator group ID
host = "sample.cp.iot.sap"
deviceAlternateId = "BB61BF0AED"
sensorAlternateId = "pipe_right"
capabilityAlternateId = "strains_raw"
certfile = "PATH/TO/CERTIFICATE.pem"
# !! Do not put this password into a SCM (e.e., Git)!!
# Instead, read it from an environment variable at runtime
certfilePassword = "PASSWORD"
# Example payload - each 'measure' item has to match exactly to the capability generated in IoT!
# Note the special requirements for transferring the time stamp, with property "_time":
# - needs not to be modeled in SAP Asset Central
# - content is EPOCH-milliseconds
def get_payload():
millis = int(round(time.time() * 1000))
payload = {
"measures": [
{
"_time": millis - 500,
"strain01": 100,
"strain02": 101,
"strain03": 102,
"strain04": 103
}, {
"_time": millis,
"strain01": 200,
"strain02": 201,
"strain03": 202,
"strain04": 203
}
],
"sensorAlternateId": sensorAlternateId,
"capabilityAlternateId": capabilityAlternateId
}
return payload
###############################################################################
return_codes = {
0: "Connection successful",
1: "Connection refused – incorrect protocol version",
2: "Connection refused – invalid client identifier",
3: "Connection refused – server unavailable",
4: "Connection refused – bad username or password",
5: "Connection refused – not authorised",
}
def on_connect(mqttc, obj, flags, rc):
LOGGER.info(f"==on_connect== connect return code {str(rc)}: {return_codes[rc]}")
def on_message(mqttc, obj, msg):
LOGGER.info("==on_message== " + msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
def on_publish(mqttc, obj, mid):
LOGGER.info("==on_publish== Message ID: " + str(mid))
def on_subscribe(mqttc, obj, mid, granted_qos):
LOGGER.info("==on_subscribe== " + str(mid) + " " + str(granted_qos))
# the client ID is essential here!
mqttc = mqtt.Client(client_id=deviceAlternateId)
port = 8883
mqttc.enable_logger()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
keepalive = 60
topic = f"measures/{deviceAlternateId}"
qos = 2
LOGGER.debug(f"Connecting to {host} port: {str(port)}, keepalive {keepalive}")
mqttc.connect(host, port, keepalive)
ssl_context = ssl.create_default_context()
ssl_context.load_cert_chain(certfile, password=<PASSWORD>filePassword)
mqttc.tls_set_context(ssl_context)
LOGGER.debug("Loop starting...")
mqttc.loop_start()
for x in range(0, 10):
message = json.dumps(get_payload())
LOGGER.debug(f"Publishing # {x}: topic {topic}, qos {qos}, message {message}")
# here comes now the data transfer:
infot = mqttc.publish(topic, message, qos=qos)
LOGGER.debug(f"Return code: {infot.rc}")
infot.wait_for_publish()
LOGGER.debug("Publishing DONE")
time.sleep(1)
mqttc.loop_stop()
mqttc.disconnect()
| # prerequisite: pip install paho-mqtt
# see documentation https://www.eclipse.org/paho/clients/python/docs/
# run with: python data_ingestion_example.py
import paho.mqtt.client as mqtt
import ssl
import time
import logging
import json
import time
logging.basicConfig(format='[{asctime},{msecs:03.0f}] {levelname} {name}.{lineno}| {message}',
datefmt='%H:%M:%S',
level=logging.DEBUG,
style='{')
LOGGER = logging.getLogger()
# insert here your MQTT host, the device/sensor IDs, the path to device certificate (as part of the
# IoT service key), and the password for that certificate file
#
# Mapping of entity IDs:
# IoT deviceAlternateId -> AC equipment external object ID
# IoT sensorAlternateId -> AC model template ID
# IoT capabilityAlternateId -> AC indicator group ID
host = "sample.cp.iot.sap"
deviceAlternateId = "BB61BF0AED"
sensorAlternateId = "pipe_right"
capabilityAlternateId = "strains_raw"
certfile = "PATH/TO/CERTIFICATE.pem"
# !! Do not put this password into a SCM (e.e., Git)!!
# Instead, read it from an environment variable at runtime
certfilePassword = "PASSWORD"
# Example payload - each 'measure' item has to match exactly to the capability generated in IoT!
# Note the special requirements for transferring the time stamp, with property "_time":
# - needs not to be modeled in SAP Asset Central
# - content is EPOCH-milliseconds
def get_payload():
millis = int(round(time.time() * 1000))
payload = {
"measures": [
{
"_time": millis - 500,
"strain01": 100,
"strain02": 101,
"strain03": 102,
"strain04": 103
}, {
"_time": millis,
"strain01": 200,
"strain02": 201,
"strain03": 202,
"strain04": 203
}
],
"sensorAlternateId": sensorAlternateId,
"capabilityAlternateId": capabilityAlternateId
}
return payload
###############################################################################
return_codes = {
0: "Connection successful",
1: "Connection refused – incorrect protocol version",
2: "Connection refused – invalid client identifier",
3: "Connection refused – server unavailable",
4: "Connection refused – bad username or password",
5: "Connection refused – not authorised",
}
def on_connect(mqttc, obj, flags, rc):
LOGGER.info(f"==on_connect== connect return code {str(rc)}: {return_codes[rc]}")
def on_message(mqttc, obj, msg):
LOGGER.info("==on_message== " + msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
def on_publish(mqttc, obj, mid):
LOGGER.info("==on_publish== Message ID: " + str(mid))
def on_subscribe(mqttc, obj, mid, granted_qos):
LOGGER.info("==on_subscribe== " + str(mid) + " " + str(granted_qos))
# the client ID is essential here!
mqttc = mqtt.Client(client_id=deviceAlternateId)
port = 8883
mqttc.enable_logger()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
keepalive = 60
topic = f"measures/{deviceAlternateId}"
qos = 2
LOGGER.debug(f"Connecting to {host} port: {str(port)}, keepalive {keepalive}")
mqttc.connect(host, port, keepalive)
ssl_context = ssl.create_default_context()
ssl_context.load_cert_chain(certfile, password=<PASSWORD>filePassword)
mqttc.tls_set_context(ssl_context)
LOGGER.debug("Loop starting...")
mqttc.loop_start()
for x in range(0, 10):
message = json.dumps(get_payload())
LOGGER.debug(f"Publishing # {x}: topic {topic}, qos {qos}, message {message}")
# here comes now the data transfer:
infot = mqttc.publish(topic, message, qos=qos)
LOGGER.debug(f"Return code: {infot.rc}")
infot.wait_for_publish()
LOGGER.debug("Publishing DONE")
time.sleep(1)
mqttc.loop_stop()
mqttc.disconnect()
| en | 0.706025 | # prerequisite: pip install paho-mqtt # see documentation https://www.eclipse.org/paho/clients/python/docs/ # run with: python data_ingestion_example.py # insert here your MQTT host, the device/sensor IDs, the path to device certificate (as part of the # IoT service key), and the password for that certificate file # # Mapping of entity IDs: # IoT deviceAlternateId -> AC equipment external object ID # IoT sensorAlternateId -> AC model template ID # IoT capabilityAlternateId -> AC indicator group ID # !! Do not put this password into a SCM (e.e., Git)!! # Instead, read it from an environment variable at runtime # Example payload - each 'measure' item has to match exactly to the capability generated in IoT! # Note the special requirements for transferring the time stamp, with property "_time": # - needs not to be modeled in SAP Asset Central # - content is EPOCH-milliseconds ############################################################################### # the client ID is essential here! # {x}: topic {topic}, qos {qos}, message {message}") # here comes now the data transfer: | 2.672127 | 3 |
app/utils/__init__.py | LukeSamkharadze/wallet-management | 0 | 6615849 | from app.root_file import get_app_path
def get_root_path() -> str:
root_dir = get_app_path()
return root_dir
| from app.root_file import get_app_path
def get_root_path() -> str:
root_dir = get_app_path()
return root_dir
| none | 1 | 2.060935 | 2 | |
Data Scientist Career Path/7. Summary Statistics/3. Variance n Standard Deviation/1. Variance/1. intro.py | myarist/Codecademy | 23 | 6615850 | <reponame>myarist/Codecademy
import numpy as np
import matplotlib.pyplot as plt
import codecademylib3_seaborn
teacher_one_grades = [83.42, 88.04, 82.12, 85.02, 82.52, 87.47, 84.69, 85.18, 86.29, 85.53, 81.29, 82.54, 83.47, 83.91, 86.83, 88.5, 84.95, 83.79, 84.74, 84.03, 87.62, 81.15, 83.45, 80.24, 82.76, 83.98, 84.95, 83.37, 84.89, 87.29]
teacher_two_grades = [85.15, 95.64, 84.73, 71.46, 95.99, 81.61, 86.55, 79.81, 77.06, 92.86, 83.67, 73.63, 90.12, 80.64, 78.46, 76.86, 104.4, 88.53, 74.62, 91.27, 76.53, 94.37, 84.74, 81.84, 97.69, 70.77, 84.44, 88.06, 91.62, 65.82]
print("Teacher One mean: " + str(np.mean(teacher_one_grades)))
print("Teacher Two mean: " + str(np.mean(teacher_two_grades)))
plt.subplot(211)
plt.title("Teacher One Grades")
plt.xlabel("Grades")
plt.hist(teacher_one_grades)
plt.xlim(65, 105)
plt.subplot(212)
plt.title("Teacher Two Grades")
plt.xlabel("Grades")
plt.hist(teacher_two_grades, bins = 20)
plt.xlim(65, 105)
plt.tight_layout()
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import codecademylib3_seaborn
teacher_one_grades = [83.42, 88.04, 82.12, 85.02, 82.52, 87.47, 84.69, 85.18, 86.29, 85.53, 81.29, 82.54, 83.47, 83.91, 86.83, 88.5, 84.95, 83.79, 84.74, 84.03, 87.62, 81.15, 83.45, 80.24, 82.76, 83.98, 84.95, 83.37, 84.89, 87.29]
teacher_two_grades = [85.15, 95.64, 84.73, 71.46, 95.99, 81.61, 86.55, 79.81, 77.06, 92.86, 83.67, 73.63, 90.12, 80.64, 78.46, 76.86, 104.4, 88.53, 74.62, 91.27, 76.53, 94.37, 84.74, 81.84, 97.69, 70.77, 84.44, 88.06, 91.62, 65.82]
print("Teacher One mean: " + str(np.mean(teacher_one_grades)))
print("Teacher Two mean: " + str(np.mean(teacher_two_grades)))
plt.subplot(211)
plt.title("Teacher One Grades")
plt.xlabel("Grades")
plt.hist(teacher_one_grades)
plt.xlim(65, 105)
plt.subplot(212)
plt.title("Teacher Two Grades")
plt.xlabel("Grades")
plt.hist(teacher_two_grades, bins = 20)
plt.xlim(65, 105)
plt.tight_layout()
plt.show() | none | 1 | 3.321654 | 3 |