code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from itertools import product
from LAMARCK_ML.data_util import IOLabel
from LAMARCK_ML.reproduction.methods import Mutation, Recombination, RandomStep
from LAMARCK_ML.architectures.losses import Reduce, LossInterface
from LAMARCK_ML.architectures.losses import SoftmaxCrossEntropyWithLogits, MeanSquaredError
from LAMARCK_ML.individuals.implementations.networkIndividualInterface import NetworkIndividualInterface
from LAMARCK_ML.individuals.implementations.NetworkIndividual_pb2 import NetworkIndividualProto
from LAMARCK_ML.architectures.weightAgnosticNN import WeightAgnosticNeuralNetwork
from LAMARCK_ML.data_util.attribute import attr2pb, pb2attr
from LAMARCK_ML.data_util import TypeShape, Shape, DimNames
from LAMARCK_ML.architectures.functions import Perceptron
from LAMARCK_ML.metrics import Accuracy
class WeightAgnosticIndividual(NetworkIndividualInterface,
Recombination.Interface,
Mutation.Interface,
RandomStep.Interface,
Accuracy.Interface,
):
arg_WEIGHTS = 'test_weights'
arg_NODES = 'nodes'
arg_INITIAL_DEPTH = 'initial_depth'
def __init__(self, **kwargs):
super(WeightAgnosticIndividual, self).__init__(**kwargs)
if len(self._networks) > 1:
raise Exception('Expected 1 or 0 networks got: ' + str(len(self._networks)))
elif len(self._networks) == 1:
self.network = self._networks[0]
else:
_input = self._data_nts[IOLabel.DATA]
_output = self._data_nts[IOLabel.TARGET]
in_name = _input[1]
shapes = list()
batch = _input[0].shape[DimNames.BATCH]
has_batch = False
dtype = _input[0].dtype
for dim in _input[0].shape.dim:
if dim.name != DimNames.BATCH:
shapes.append(list(range(dim.size)))
else:
has_batch = True
_input = dict()
for p in product(*shapes):
key = ':'.join([str(i) for i in p])
_input[key] = (IOLabel.DATA,
TypeShape(dtype, Shape((DimNames.UNITS, 1) if not has_batch else
(DimNames.BATCH, batch), (DimNames.UNITS, 1))),
in_name + '_' + key)
shapes = list()
batch = _output[0].shape[DimNames.BATCH]
has_batch = False
dtype = _output[0].dtype
for dim in _output[0].shape.dim:
if dim.name != DimNames.BATCH:
shapes.append(list(range(dim.size)))
else:
has_batch = True
_output = dict()
for p in product(*shapes):
_output[':'.join([str(i) for i in p])] = \
TypeShape(dtype, Shape((DimNames.UNITS, 1) if not has_batch else
(DimNames.BATCH, batch), (DimNames.UNITS, 1)))
self.network = WeightAgnosticNeuralNetwork(**{
WeightAgnosticNeuralNetwork.arg_INPUTS: _input,
WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: _output,
WeightAgnosticNeuralNetwork.arg_FUNCTIONS: kwargs.get(self.arg_WEIGHTS, [Perceptron]),
WeightAgnosticNeuralNetwork.arg_INITIAL_DEPTH: kwargs.get(self.arg_INITIAL_DEPTH, 1),
})
self._networks.append(self.network)
weights = kwargs.get(self.arg_WEIGHTS)
if weights is None or not (isinstance(weights, list) and all([isinstance(w, float) for w in weights])):
weights = [i - 2 for i in range(5)]
self.attr[self.arg_WEIGHTS] = weights
if len(self._losses) != 0:
raise Exception('Expected no loss!')
_output = self._data_nts[IOLabel.TARGET][0]
_output_units = _output.shape[DimNames.UNITS]
if _output_units == 1:
self.loss = MeanSquaredError(**{
LossInterface.arg_REDUCE: Reduce.MEAN,
})
else:
self.loss = SoftmaxCrossEntropyWithLogits(**{
LossInterface.arg_REDUCE: Reduce.MEAN
})
self._losses.append(self.loss)
def __sub__(self, other):
if not isinstance(other, self.__class__):
return -1
return self.network - other.network
def _cls_setstate(self, state):
if isinstance(state, str) or isinstance(state, bytes):
_individual = NetworkIndividualProto()
_individual.ParseFromString(state)
elif isinstance(state, NetworkIndividualProto):
_individual = state
else:
return
self._networks = list()
for network in _individual.networks:
_obj = WeightAgnosticNeuralNetwork.__new__(WeightAgnosticNeuralNetwork)
_obj.__setstate__(network)
self._networks.append(_obj)
self._data_nts = dict([(d.label, (TypeShape.from_pb(d.tsp), d.id_name)) for d in _individual.data_sources])
self._losses = list()
for loss in _individual.losses:
_obj = LossInterface.__new__(LossInterface)
_obj.__setstate__(loss)
self._losses.append(_obj)
super(NetworkIndividualInterface, self)._cls_setstate(_individual.baseIndividual)
if len(self._networks) != 1:
raise Exception('Restored individual has an invalid number of networks: ' + str(len(self._networks)))
self.network = self._networks[0]
if len(self._losses) != 1:
raise Exception('Restored individual has an invalid number of losses: ' + str(len(self._losses)))
self.loss = self._losses[0]
def __eq__(self, other):
if (super(WeightAgnosticIndividual, self).__eq__(other)
and self.loss == other.loss
and self.network == other.network
):
return True
return False
def norm(self, other):
if not isinstance(other, self.__class__):
return 0
return self.network.norm(other.network)
def update_state(self, *args, **kwargs):
self.network.update_state(*args, **kwargs)
def mutate(self, prob):
result = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
result.metrics = dict()
result.attr = dict([pb2attr(attr2pb(key, value)) for key, value in self.attr.items()])
result._data_nts = {label: (nts.__copy__(), id_name) for label, (nts, id_name) in self._data_nts.items()}
result._losses = list(self._losses)
result.loss = self.loss
result._networks = self.network.mutate(prob=prob)
result.network = result._networks[0]
result._id_name = self.getNewName()
return [result]
def step(self, step_size):
result = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
result.metrics = dict()
result.attr = dict([pb2attr(attr2pb(key, value)) for key, value in self.attr.items()])
result._data_nts = {label: (nts.__copy__(), id_name) for label, (nts, id_name) in self._data_nts.items()}
result._losses = list(self._losses)
result.loss = self.loss
result._networks = self.network.step(step_size=step_size)
result.network = result._networks[0]
result._id_name = self.getNewName()
return [result]
def recombine(self, other):
result = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
result.metrics = dict()
result.attr = dict([pb2attr(attr2pb(key, value)) for key, value in self.attr.items()])
result._data_nts = {label: (nts.__copy__(), id_name) for label, (nts, id_name) in self._data_nts.items()}
result._losses = list(self._losses)
result.loss = self.loss
result._networks = self.network.recombine(other.network)
result.network = result._networks[0]
result._id_name = self.getNewName()
return [result]
def build_instance(self, nn_framework):
nn_framework.init_model()
for f in self.network.functions:
nn_framework.add_function(f)
nn_framework.set_train_parameters(**{
nn_framework.arg_LOSS: self.loss.__class__,
})
nn_framework.finalize_model(output_ids=self.network.output_mapping.values())
# nn_framework.train() # This individual doesn't need to be trained
def train_instance(self, nn_framework):
return dict()
def accuracy(self, nn_framework):
acc = 0
weights = self.attr.get(self.arg_WEIGHTS, [])
for w in weights:
nn_framework.set_weights(**{
f.id_name: w for f in self.network.functions
})
acc += nn_framework.accuracy(self)
return acc / len(weights) | LAMARCK_ML/individuals/implementations/weightAgnosticIndividual.py | from itertools import product
from LAMARCK_ML.data_util import IOLabel
from LAMARCK_ML.reproduction.methods import Mutation, Recombination, RandomStep
from LAMARCK_ML.architectures.losses import Reduce, LossInterface
from LAMARCK_ML.architectures.losses import SoftmaxCrossEntropyWithLogits, MeanSquaredError
from LAMARCK_ML.individuals.implementations.networkIndividualInterface import NetworkIndividualInterface
from LAMARCK_ML.individuals.implementations.NetworkIndividual_pb2 import NetworkIndividualProto
from LAMARCK_ML.architectures.weightAgnosticNN import WeightAgnosticNeuralNetwork
from LAMARCK_ML.data_util.attribute import attr2pb, pb2attr
from LAMARCK_ML.data_util import TypeShape, Shape, DimNames
from LAMARCK_ML.architectures.functions import Perceptron
from LAMARCK_ML.metrics import Accuracy
class WeightAgnosticIndividual(NetworkIndividualInterface,
Recombination.Interface,
Mutation.Interface,
RandomStep.Interface,
Accuracy.Interface,
):
arg_WEIGHTS = 'test_weights'
arg_NODES = 'nodes'
arg_INITIAL_DEPTH = 'initial_depth'
def __init__(self, **kwargs):
super(WeightAgnosticIndividual, self).__init__(**kwargs)
if len(self._networks) > 1:
raise Exception('Expected 1 or 0 networks got: ' + str(len(self._networks)))
elif len(self._networks) == 1:
self.network = self._networks[0]
else:
_input = self._data_nts[IOLabel.DATA]
_output = self._data_nts[IOLabel.TARGET]
in_name = _input[1]
shapes = list()
batch = _input[0].shape[DimNames.BATCH]
has_batch = False
dtype = _input[0].dtype
for dim in _input[0].shape.dim:
if dim.name != DimNames.BATCH:
shapes.append(list(range(dim.size)))
else:
has_batch = True
_input = dict()
for p in product(*shapes):
key = ':'.join([str(i) for i in p])
_input[key] = (IOLabel.DATA,
TypeShape(dtype, Shape((DimNames.UNITS, 1) if not has_batch else
(DimNames.BATCH, batch), (DimNames.UNITS, 1))),
in_name + '_' + key)
shapes = list()
batch = _output[0].shape[DimNames.BATCH]
has_batch = False
dtype = _output[0].dtype
for dim in _output[0].shape.dim:
if dim.name != DimNames.BATCH:
shapes.append(list(range(dim.size)))
else:
has_batch = True
_output = dict()
for p in product(*shapes):
_output[':'.join([str(i) for i in p])] = \
TypeShape(dtype, Shape((DimNames.UNITS, 1) if not has_batch else
(DimNames.BATCH, batch), (DimNames.UNITS, 1)))
self.network = WeightAgnosticNeuralNetwork(**{
WeightAgnosticNeuralNetwork.arg_INPUTS: _input,
WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: _output,
WeightAgnosticNeuralNetwork.arg_FUNCTIONS: kwargs.get(self.arg_WEIGHTS, [Perceptron]),
WeightAgnosticNeuralNetwork.arg_INITIAL_DEPTH: kwargs.get(self.arg_INITIAL_DEPTH, 1),
})
self._networks.append(self.network)
weights = kwargs.get(self.arg_WEIGHTS)
if weights is None or not (isinstance(weights, list) and all([isinstance(w, float) for w in weights])):
weights = [i - 2 for i in range(5)]
self.attr[self.arg_WEIGHTS] = weights
if len(self._losses) != 0:
raise Exception('Expected no loss!')
_output = self._data_nts[IOLabel.TARGET][0]
_output_units = _output.shape[DimNames.UNITS]
if _output_units == 1:
self.loss = MeanSquaredError(**{
LossInterface.arg_REDUCE: Reduce.MEAN,
})
else:
self.loss = SoftmaxCrossEntropyWithLogits(**{
LossInterface.arg_REDUCE: Reduce.MEAN
})
self._losses.append(self.loss)
def __sub__(self, other):
if not isinstance(other, self.__class__):
return -1
return self.network - other.network
def _cls_setstate(self, state):
if isinstance(state, str) or isinstance(state, bytes):
_individual = NetworkIndividualProto()
_individual.ParseFromString(state)
elif isinstance(state, NetworkIndividualProto):
_individual = state
else:
return
self._networks = list()
for network in _individual.networks:
_obj = WeightAgnosticNeuralNetwork.__new__(WeightAgnosticNeuralNetwork)
_obj.__setstate__(network)
self._networks.append(_obj)
self._data_nts = dict([(d.label, (TypeShape.from_pb(d.tsp), d.id_name)) for d in _individual.data_sources])
self._losses = list()
for loss in _individual.losses:
_obj = LossInterface.__new__(LossInterface)
_obj.__setstate__(loss)
self._losses.append(_obj)
super(NetworkIndividualInterface, self)._cls_setstate(_individual.baseIndividual)
if len(self._networks) != 1:
raise Exception('Restored individual has an invalid number of networks: ' + str(len(self._networks)))
self.network = self._networks[0]
if len(self._losses) != 1:
raise Exception('Restored individual has an invalid number of losses: ' + str(len(self._losses)))
self.loss = self._losses[0]
def __eq__(self, other):
if (super(WeightAgnosticIndividual, self).__eq__(other)
and self.loss == other.loss
and self.network == other.network
):
return True
return False
def norm(self, other):
if not isinstance(other, self.__class__):
return 0
return self.network.norm(other.network)
def update_state(self, *args, **kwargs):
self.network.update_state(*args, **kwargs)
def mutate(self, prob):
result = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
result.metrics = dict()
result.attr = dict([pb2attr(attr2pb(key, value)) for key, value in self.attr.items()])
result._data_nts = {label: (nts.__copy__(), id_name) for label, (nts, id_name) in self._data_nts.items()}
result._losses = list(self._losses)
result.loss = self.loss
result._networks = self.network.mutate(prob=prob)
result.network = result._networks[0]
result._id_name = self.getNewName()
return [result]
def step(self, step_size):
result = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
result.metrics = dict()
result.attr = dict([pb2attr(attr2pb(key, value)) for key, value in self.attr.items()])
result._data_nts = {label: (nts.__copy__(), id_name) for label, (nts, id_name) in self._data_nts.items()}
result._losses = list(self._losses)
result.loss = self.loss
result._networks = self.network.step(step_size=step_size)
result.network = result._networks[0]
result._id_name = self.getNewName()
return [result]
def recombine(self, other):
result = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
result.metrics = dict()
result.attr = dict([pb2attr(attr2pb(key, value)) for key, value in self.attr.items()])
result._data_nts = {label: (nts.__copy__(), id_name) for label, (nts, id_name) in self._data_nts.items()}
result._losses = list(self._losses)
result.loss = self.loss
result._networks = self.network.recombine(other.network)
result.network = result._networks[0]
result._id_name = self.getNewName()
return [result]
def build_instance(self, nn_framework):
nn_framework.init_model()
for f in self.network.functions:
nn_framework.add_function(f)
nn_framework.set_train_parameters(**{
nn_framework.arg_LOSS: self.loss.__class__,
})
nn_framework.finalize_model(output_ids=self.network.output_mapping.values())
# nn_framework.train() # This individual doesn't need to be trained
def train_instance(self, nn_framework):
return dict()
def accuracy(self, nn_framework):
acc = 0
weights = self.attr.get(self.arg_WEIGHTS, [])
for w in weights:
nn_framework.set_weights(**{
f.id_name: w for f in self.network.functions
})
acc += nn_framework.accuracy(self)
return acc / len(weights) | 0.735926 | 0.377541 |
import pytest
from grunt.db import get_db
def test_character_index(client, auth):
response = client.get('/')
assert b"Log In" in response.data
assert b"Register" in response.data
auth.login()
response = client.get('/')
assert b'Log Out' in response.data
assert b'test title' in response.data
assert b'by test on 2018-01-01' in response.data
assert b'test\nbody' in response.data
assert b'href="/1/update"' in response.data
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
'/1/delete',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == 'http://localhost/auth/login'
def test_author_required(app, client, auth):
# change the creator to another user
with app.app_context():
db = get_db()
db.execute('UPDATE character SET user_id = 2 WHERE id = 1')
db.commit()
auth.login()
# current user can't modify other user's character
assert client.post('/1/update').status_code == 403
assert client.post('/1/delete').status_code == 403
# current user doesn't see edit link
assert b'href="/1/update"' not in client.get('/').data
@pytest.mark.parametrize('path', (
'/2/update',
'/2/delete',
))
def test_exists_required(client, auth, path):
auth.login()
assert client.post(path).status_code == 404
def test_create(client, auth, app):
auth.login()
assert client.get('/create').status_code == 200
client.post('/create', data={'character_name': 'created'})
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM character').fetchone()[0]
assert count == 2
def test_update(client, auth, app):
auth.login()
assert client.get('/1/update').status_code == 200
client.post('/1/update', data={'character_name': 'updated'})
with app.app_context():
db = get_db()
character = db.execute('SELECT * FROM character WHERE id = 1').fetchone()
assert character['character_name'] == 'updated'
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
))
def test_create_update_validate(client, auth, path):
auth.login()
response = client.post(path, data={'character_name': ''})
assert b'Character name is required.' in response.data | tests/test_character.py | import pytest
from grunt.db import get_db
def test_character_index(client, auth):
response = client.get('/')
assert b"Log In" in response.data
assert b"Register" in response.data
auth.login()
response = client.get('/')
assert b'Log Out' in response.data
assert b'test title' in response.data
assert b'by test on 2018-01-01' in response.data
assert b'test\nbody' in response.data
assert b'href="/1/update"' in response.data
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
'/1/delete',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == 'http://localhost/auth/login'
def test_author_required(app, client, auth):
# change the creator to another user
with app.app_context():
db = get_db()
db.execute('UPDATE character SET user_id = 2 WHERE id = 1')
db.commit()
auth.login()
# current user can't modify other user's character
assert client.post('/1/update').status_code == 403
assert client.post('/1/delete').status_code == 403
# current user doesn't see edit link
assert b'href="/1/update"' not in client.get('/').data
@pytest.mark.parametrize('path', (
'/2/update',
'/2/delete',
))
def test_exists_required(client, auth, path):
auth.login()
assert client.post(path).status_code == 404
def test_create(client, auth, app):
auth.login()
assert client.get('/create').status_code == 200
client.post('/create', data={'character_name': 'created'})
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM character').fetchone()[0]
assert count == 2
def test_update(client, auth, app):
auth.login()
assert client.get('/1/update').status_code == 200
client.post('/1/update', data={'character_name': 'updated'})
with app.app_context():
db = get_db()
character = db.execute('SELECT * FROM character WHERE id = 1').fetchone()
assert character['character_name'] == 'updated'
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
))
def test_create_update_validate(client, auth, path):
auth.login()
response = client.post(path, data={'character_name': ''})
assert b'Character name is required.' in response.data | 0.450601 | 0.306281 |
from django.test import TestCase
from dcim.models import Platform
from netbox_onboarding.onboard import NetdevKeeper, OnboardException
class NetdevKeeperTestCase(TestCase):
"""Test the NetdevKeeper Class."""
def setUp(self):
"""Create a superuser and token for API calls."""
self.platform1 = Platform.objects.create(name="JunOS", slug="junos", napalm_driver="junos")
self.platform2 = Platform.objects.create(name="Cisco NX-OS", slug="cisco-nx-os")
def test_get_platform_object_from_netbox(self):
"""Test of platform object from netbox."""
# Test assigning platform
platform = NetdevKeeper.get_platform_object_from_netbox("junos", create_platform_if_missing=False)
self.assertIsInstance(platform, Platform)
# Test creation of missing platform object
platform = NetdevKeeper.get_platform_object_from_netbox("arista_eos", create_platform_if_missing=True)
self.assertIsInstance(platform, Platform)
self.assertEqual(platform.napalm_driver, "eos")
# Test failed unable to find the device and not part of the NETMIKO TO NAPALM keys
with self.assertRaises(OnboardException) as exc_info:
platform = NetdevKeeper.get_platform_object_from_netbox("notthere", create_platform_if_missing=True)
self.assertEqual(
exc_info.exception.message,
"ERROR platform not found in NetBox and it's eligible for auto-creation: notthere",
)
self.assertEqual(exc_info.exception.reason, "fail-general")
# Test searching for an object, does not exist, but create_platform is false
with self.assertRaises(OnboardException) as exc_info:
platform = NetdevKeeper.get_platform_object_from_netbox("cisco_ios", create_platform_if_missing=False)
self.assertEqual(exc_info.exception.message, "ERROR platform not found in NetBox: cisco_ios")
self.assertEqual(exc_info.exception.reason, "fail-general")
# Test NAPALM Driver not defined in NetBox
with self.assertRaises(OnboardException) as exc_info:
platform = NetdevKeeper.get_platform_object_from_netbox("cisco-nx-os", create_platform_if_missing=False)
self.assertEqual(exc_info.exception.message, "ERROR platform is missing the NAPALM Driver: cisco-nx-os")
self.assertEqual(exc_info.exception.reason, "fail-general") | netbox_onboarding/tests/test_netdev_keeper.py | from django.test import TestCase
from dcim.models import Platform
from netbox_onboarding.onboard import NetdevKeeper, OnboardException
class NetdevKeeperTestCase(TestCase):
"""Test the NetdevKeeper Class."""
def setUp(self):
"""Create a superuser and token for API calls."""
self.platform1 = Platform.objects.create(name="JunOS", slug="junos", napalm_driver="junos")
self.platform2 = Platform.objects.create(name="Cisco NX-OS", slug="cisco-nx-os")
def test_get_platform_object_from_netbox(self):
"""Test of platform object from netbox."""
# Test assigning platform
platform = NetdevKeeper.get_platform_object_from_netbox("junos", create_platform_if_missing=False)
self.assertIsInstance(platform, Platform)
# Test creation of missing platform object
platform = NetdevKeeper.get_platform_object_from_netbox("arista_eos", create_platform_if_missing=True)
self.assertIsInstance(platform, Platform)
self.assertEqual(platform.napalm_driver, "eos")
# Test failed unable to find the device and not part of the NETMIKO TO NAPALM keys
with self.assertRaises(OnboardException) as exc_info:
platform = NetdevKeeper.get_platform_object_from_netbox("notthere", create_platform_if_missing=True)
self.assertEqual(
exc_info.exception.message,
"ERROR platform not found in NetBox and it's eligible for auto-creation: notthere",
)
self.assertEqual(exc_info.exception.reason, "fail-general")
# Test searching for an object, does not exist, but create_platform is false
with self.assertRaises(OnboardException) as exc_info:
platform = NetdevKeeper.get_platform_object_from_netbox("cisco_ios", create_platform_if_missing=False)
self.assertEqual(exc_info.exception.message, "ERROR platform not found in NetBox: cisco_ios")
self.assertEqual(exc_info.exception.reason, "fail-general")
# Test NAPALM Driver not defined in NetBox
with self.assertRaises(OnboardException) as exc_info:
platform = NetdevKeeper.get_platform_object_from_netbox("cisco-nx-os", create_platform_if_missing=False)
self.assertEqual(exc_info.exception.message, "ERROR platform is missing the NAPALM Driver: cisco-nx-os")
self.assertEqual(exc_info.exception.reason, "fail-general") | 0.704973 | 0.293835 |
"""Tests for xls.tools.delay_info_main."""
import subprocess
from xls.common import runfiles
from xls.common import test_base
DELAY_INFO_MAIN_PATH = runfiles.get_path('xls/tools/delay_info_main')
NOT_ADD_IR = """package not_add
fn not_add(x: bits[32], y: bits[32]) -> bits[32] {
sum: bits[32] = add(x, y)
ret not_sum: bits[32] = not(sum)
}
"""
NOT_ADD_SCHEDULE = """
stages {
stage: 0
nodes: "x"
nodes: "y"
}
stages {
stage: 1
nodes: "sum"
nodes: "not_sum"
}
"""
class DelayInfoMainTest(test_base.TestCase):
def test_without_schedule(self):
"""Test tool without specifying --schedule_path."""
ir_file = self.create_tempfile(content=NOT_ADD_IR)
optimized_ir = subprocess.check_output(
[DELAY_INFO_MAIN_PATH, '--delay_model=unit',
ir_file.full_path]).decode('utf-8')
self.assertEqual(
optimized_ir, """# Critical path:
3ps (+ 1ps): not_sum: bits[32] = not(sum: bits[32], id=4)
2ps (+ 1ps): sum: bits[32] = add(x: bits[32], y: bits[32], id=3)
1ps (+ 1ps): x: bits[32] = param(x, id=1)
# Delay of all nodes:
x : 1ps
y : 1ps
sum : 1ps
not_sum : 1ps
""")
def test_with_schedule(self):
"""Test tool with specifying --schedule_path."""
ir_file = self.create_tempfile(content=NOT_ADD_IR)
schedule_file = self.create_tempfile(content=NOT_ADD_SCHEDULE)
optimized_ir = subprocess.check_output([
DELAY_INFO_MAIN_PATH, '--delay_model=unit', '--alsologtostderr',
f'--schedule_path={schedule_file.full_path}', ir_file.full_path
]).decode('utf-8')
self.assertEqual(
optimized_ir, """# Critical path for stage 0:
2ps (+ 1ps): tuple.7: (bits[32], bits[32]) = tuple(x: bits[32], y: bits[32], id=7)
1ps (+ 1ps): x: bits[32] = param(x, id=5)
# Critical path for stage 1:
3ps (+ 1ps): not_sum: bits[32] = not(sum: bits[32], id=11)
2ps (+ 1ps): sum: bits[32] = add(x: bits[32], y: bits[32], id=10)
1ps (+ 1ps): x: bits[32] = param(x, id=8)
# Delay of all nodes:
x : 1ps
y : 1ps
sum : 1ps
not_sum : 1ps
""")
if __name__ == '__main__':
test_base.main() | xls/tools/delay_info_main_test.py | """Tests for xls.tools.delay_info_main."""
import subprocess
from xls.common import runfiles
from xls.common import test_base
DELAY_INFO_MAIN_PATH = runfiles.get_path('xls/tools/delay_info_main')
NOT_ADD_IR = """package not_add
fn not_add(x: bits[32], y: bits[32]) -> bits[32] {
sum: bits[32] = add(x, y)
ret not_sum: bits[32] = not(sum)
}
"""
NOT_ADD_SCHEDULE = """
stages {
stage: 0
nodes: "x"
nodes: "y"
}
stages {
stage: 1
nodes: "sum"
nodes: "not_sum"
}
"""
class DelayInfoMainTest(test_base.TestCase):
def test_without_schedule(self):
"""Test tool without specifying --schedule_path."""
ir_file = self.create_tempfile(content=NOT_ADD_IR)
optimized_ir = subprocess.check_output(
[DELAY_INFO_MAIN_PATH, '--delay_model=unit',
ir_file.full_path]).decode('utf-8')
self.assertEqual(
optimized_ir, """# Critical path:
3ps (+ 1ps): not_sum: bits[32] = not(sum: bits[32], id=4)
2ps (+ 1ps): sum: bits[32] = add(x: bits[32], y: bits[32], id=3)
1ps (+ 1ps): x: bits[32] = param(x, id=1)
# Delay of all nodes:
x : 1ps
y : 1ps
sum : 1ps
not_sum : 1ps
""")
def test_with_schedule(self):
"""Test tool with specifying --schedule_path."""
ir_file = self.create_tempfile(content=NOT_ADD_IR)
schedule_file = self.create_tempfile(content=NOT_ADD_SCHEDULE)
optimized_ir = subprocess.check_output([
DELAY_INFO_MAIN_PATH, '--delay_model=unit', '--alsologtostderr',
f'--schedule_path={schedule_file.full_path}', ir_file.full_path
]).decode('utf-8')
self.assertEqual(
optimized_ir, """# Critical path for stage 0:
2ps (+ 1ps): tuple.7: (bits[32], bits[32]) = tuple(x: bits[32], y: bits[32], id=7)
1ps (+ 1ps): x: bits[32] = param(x, id=5)
# Critical path for stage 1:
3ps (+ 1ps): not_sum: bits[32] = not(sum: bits[32], id=11)
2ps (+ 1ps): sum: bits[32] = add(x: bits[32], y: bits[32], id=10)
1ps (+ 1ps): x: bits[32] = param(x, id=8)
# Delay of all nodes:
x : 1ps
y : 1ps
sum : 1ps
not_sum : 1ps
""")
if __name__ == '__main__':
test_base.main() | 0.620162 | 0.282976 |
from optparse import OptionParser
import simplejson
import structlog
from kafka import KafkaConsumer
import pickle
import struct
import socket
import sys
import time
from kafka.consumer.fetcher import ConsumerRecord
from kafka.errors import KafkaError
from common.utils.consulhelpers import get_endpoint_from_consul
log = structlog.get_logger()
class Graphite:
def __init__(self, host='localhost', port=2004, retry=5, delay=3,
backoff=2, timeout=10):
self.host = host
self.port = port
self.retry = retry
self.delay = delay
self.backoff = backoff
self.timeout = timeout
# Create initial socket
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.settimeout(self.timeout)
# Initiate connection
self.connect()
def _backoff(self, retry, delay, backoff):
"""Exponential backoff."""
retry -= 1
if retry == 0:
raise Exception('Timeout')
time.sleep(delay)
delay *= backoff
return retry, delay, backoff
def _retry(self, exception, func, *args):
"""
Retry calling the func catching a tuple of exceptions with backoff.
"""
retry = self.retry
delay = self.delay
backoff = self.backoff
while retry > 0:
try:
return func(*args)
except exception, e:
retry, delay, backoff = self._backoff(retry, delay, backoff)
def connect(self):
"""Connect to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
while retry > 0:
try:
# Attempt to connect to Graphite, break if success
self.conn.connect((self.host, self.port))
break
except socket.error, e:
# Ditch this socket. Create a new one
self.conn.close()
self.conn.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
def close(self):
"""Close connection go Graphite."""
self.conn.close()
def send(self, data, retry=3):
"""Send data to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
# Attempt to send any data in the queue
while retry > 0:
# Check socket
if not self.conn:
# Attempt to restablish connection
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
try:
# Send data to socket
self.conn.sendall(data)
break
except socket.error, e:
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
def _pickle(batch):
"""Pickle metrics into graphite format."""
payload = pickle.dumps(batch)
header = struct.pack("!L", len(payload))
message = header + payload
return message
def _convert(msg):
"""Convert a graphite key value string to pickle."""
def extract_slice(ts, prefixes):
for object_path, metrics in prefixes.iteritems():
for metric_name, value in metrics['metrics'].iteritems():
path = '.'.join((object_path, metric_name))
yield (path, ts, value)
assert isinstance(msg, dict)
type = msg.get('type')
if type == 'slice':
extractor, kw = extract_slice, dict(ts=msg['ts'],
prefixes=msg['prefixes'])
else:
raise Exception('Unknown format')
batch = []
for path, timestamp, value in extractor(**kw):
batch.append((path, (timestamp, value)))
return batch
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-K", "--kafka", dest="kafka",
default="localhost:9092", help="Kafka bootstrap server")
parser.add_option("-c", "--consul", dest="consul",
default="localhost:8500",
help="Consul server (needed if kafak server is specifed"
"with '@kafka' value)")
parser.add_option("-t", "--topic", dest="topic", help="Kafka topic")
parser.add_option("-H", "--host", dest="graphite_host",
default="localhost", help="Graphite host")
parser.add_option("-p", "--port", dest="graphite_port", type=int,
default=2004, help="Graphite port")
(options, args) = parser.parse_args()
# Assign OptParse variables
kafka = options.kafka
consul = options.consul
topic = options.topic
host = options.graphite_host
port = options.graphite_port
# Connect to Graphite
try:
graphite = Graphite(host, port)
except socket.error, e:
print "Could not connect to graphite host %s:%s" % (host, port)
sys.exit(1)
except socket.gaierror, e:
print "Invalid hostname for graphite host %s" % (host)
sys.exit(1)
log.info('Connected to graphite at {}:{}'.format(host, port))
# Resolve Kafka value if it is based on consul lookup
if kafka.startswith('@'):
kafka = get_endpoint_from_consul(consul, kafka[1:])
# Connect to Kafka
try:
log.info('connect-to-kafka', kafka=kafka)
consumer = KafkaConsumer(topic, bootstrap_servers=kafka)
except KafkaError, e:
log.error('failed-to-connect-to-kafka', kafka=kafka, e=e)
sys.exit(1)
# Consume Kafka topic
log.info('start-loop', topic=topic)
for record in consumer:
assert isinstance(record, ConsumerRecord)
msg = record.value
try:
batch = _convert(simplejson.loads(msg))
except Exception, e:
log.warn('unknown-format', msg=msg)
continue
pickled = _pickle(batch)
graphite.send(pickled)
log.debug('sent', batch_len=len(batch))
log.info('exited') | shovel/main.py | from optparse import OptionParser
import simplejson
import structlog
from kafka import KafkaConsumer
import pickle
import struct
import socket
import sys
import time
from kafka.consumer.fetcher import ConsumerRecord
from kafka.errors import KafkaError
from common.utils.consulhelpers import get_endpoint_from_consul
log = structlog.get_logger()
class Graphite:
def __init__(self, host='localhost', port=2004, retry=5, delay=3,
backoff=2, timeout=10):
self.host = host
self.port = port
self.retry = retry
self.delay = delay
self.backoff = backoff
self.timeout = timeout
# Create initial socket
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.settimeout(self.timeout)
# Initiate connection
self.connect()
def _backoff(self, retry, delay, backoff):
"""Exponential backoff."""
retry -= 1
if retry == 0:
raise Exception('Timeout')
time.sleep(delay)
delay *= backoff
return retry, delay, backoff
def _retry(self, exception, func, *args):
"""
Retry calling the func catching a tuple of exceptions with backoff.
"""
retry = self.retry
delay = self.delay
backoff = self.backoff
while retry > 0:
try:
return func(*args)
except exception, e:
retry, delay, backoff = self._backoff(retry, delay, backoff)
def connect(self):
"""Connect to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
while retry > 0:
try:
# Attempt to connect to Graphite, break if success
self.conn.connect((self.host, self.port))
break
except socket.error, e:
# Ditch this socket. Create a new one
self.conn.close()
self.conn.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
def close(self):
"""Close connection go Graphite."""
self.conn.close()
def send(self, data, retry=3):
"""Send data to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
# Attempt to send any data in the queue
while retry > 0:
# Check socket
if not self.conn:
# Attempt to restablish connection
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
try:
# Send data to socket
self.conn.sendall(data)
break
except socket.error, e:
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
def _pickle(batch):
"""Pickle metrics into graphite format."""
payload = pickle.dumps(batch)
header = struct.pack("!L", len(payload))
message = header + payload
return message
def _convert(msg):
"""Convert a graphite key value string to pickle."""
def extract_slice(ts, prefixes):
for object_path, metrics in prefixes.iteritems():
for metric_name, value in metrics['metrics'].iteritems():
path = '.'.join((object_path, metric_name))
yield (path, ts, value)
assert isinstance(msg, dict)
type = msg.get('type')
if type == 'slice':
extractor, kw = extract_slice, dict(ts=msg['ts'],
prefixes=msg['prefixes'])
else:
raise Exception('Unknown format')
batch = []
for path, timestamp, value in extractor(**kw):
batch.append((path, (timestamp, value)))
return batch
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-K", "--kafka", dest="kafka",
default="localhost:9092", help="Kafka bootstrap server")
parser.add_option("-c", "--consul", dest="consul",
default="localhost:8500",
help="Consul server (needed if kafak server is specifed"
"with '@kafka' value)")
parser.add_option("-t", "--topic", dest="topic", help="Kafka topic")
parser.add_option("-H", "--host", dest="graphite_host",
default="localhost", help="Graphite host")
parser.add_option("-p", "--port", dest="graphite_port", type=int,
default=2004, help="Graphite port")
(options, args) = parser.parse_args()
# Assign OptParse variables
kafka = options.kafka
consul = options.consul
topic = options.topic
host = options.graphite_host
port = options.graphite_port
# Connect to Graphite
try:
graphite = Graphite(host, port)
except socket.error, e:
print "Could not connect to graphite host %s:%s" % (host, port)
sys.exit(1)
except socket.gaierror, e:
print "Invalid hostname for graphite host %s" % (host)
sys.exit(1)
log.info('Connected to graphite at {}:{}'.format(host, port))
# Resolve Kafka value if it is based on consul lookup
if kafka.startswith('@'):
kafka = get_endpoint_from_consul(consul, kafka[1:])
# Connect to Kafka
try:
log.info('connect-to-kafka', kafka=kafka)
consumer = KafkaConsumer(topic, bootstrap_servers=kafka)
except KafkaError, e:
log.error('failed-to-connect-to-kafka', kafka=kafka, e=e)
sys.exit(1)
# Consume Kafka topic
log.info('start-loop', topic=topic)
for record in consumer:
assert isinstance(record, ConsumerRecord)
msg = record.value
try:
batch = _convert(simplejson.loads(msg))
except Exception, e:
log.warn('unknown-format', msg=msg)
continue
pickled = _pickle(batch)
graphite.send(pickled)
log.debug('sent', batch_len=len(batch))
log.info('exited') | 0.444565 | 0.087994 |
from __future__ import print_function
import os
import sys
from command import Command
from git_config import IsImmutable
from git_command import git
import gitc_utils
from progress import Progress
from project import SyncBuffer
class Start(Command):
common = True
helpSummary = "Start a new branch for development"
helpUsage = """
%prog <newbranchname> [--all | <project>...]
"""
helpDescription = """
'%prog' begins a new branch of development, starting from the
revision specified in the manifest.
"""
def _Options(self, p):
p.add_option('--all',
dest='all', action='store_true',
help='begin branch in all projects')
def ValidateOptions(self, opt, args):
if not args:
self.Usage()
nb = args[0]
if not git.check_ref_format('heads/%s' % nb):
self.OptionParser.error("'%s' is not a valid name" % nb)
def Execute(self, opt, args):
nb = args[0]
err = []
projects = []
if not opt.all:
projects = args[1:]
if len(projects) < 1:
projects = ['.',] # start it in the local project by default
all_projects = self.GetProjects(projects,
missing_ok=bool(self.gitc_manifest))
# This must happen after we find all_projects, since GetProjects may need
# the local directory, which will disappear once we save the GITC manifest.
if self.gitc_manifest:
gitc_projects = self.GetProjects(projects, manifest=self.gitc_manifest,
missing_ok=True)
for project in gitc_projects:
if project.old_revision:
project.already_synced = True
else:
project.already_synced = False
project.old_revision = project.revisionExpr
project.revisionExpr = None
# Save the GITC manifest.
gitc_utils.save_manifest(self.gitc_manifest)
# Make sure we have a valid CWD
if not os.path.exists(os.getcwd()):
os.chdir(self.manifest.topdir)
pm = Progress('Starting %s' % nb, len(all_projects))
for project in all_projects:
pm.update()
if self.gitc_manifest:
gitc_project = self.gitc_manifest.paths[project.relpath]
# Sync projects that have not been opened.
if not gitc_project.already_synced:
proj_localdir = os.path.join(self.gitc_manifest.gitc_client_dir,
project.relpath)
project.worktree = proj_localdir
if not os.path.exists(proj_localdir):
os.makedirs(proj_localdir)
project.Sync_NetworkHalf()
sync_buf = SyncBuffer(self.manifest.manifestProject.config)
project.Sync_LocalHalf(sync_buf)
project.revisionId = gitc_project.old_revision
# If the current revision is immutable, such as a SHA1, a tag or
# a change, then we can't push back to it. Substitute with
# dest_branch, if defined; or with manifest default revision instead.
branch_merge = ''
if IsImmutable(project.revisionExpr):
if project.dest_branch:
branch_merge = project.dest_branch
else:
branch_merge = self.manifest.default.revisionExpr
if not project.StartBranch(nb, branch_merge=branch_merge):
err.append(project)
pm.end()
if err:
for p in err:
print("error: %s/: cannot start %s" % (p.relpath, nb),
file=sys.stderr)
sys.exit(1) | subcmds/start.py |
from __future__ import print_function
import os
import sys
from command import Command
from git_config import IsImmutable
from git_command import git
import gitc_utils
from progress import Progress
from project import SyncBuffer
class Start(Command):
common = True
helpSummary = "Start a new branch for development"
helpUsage = """
%prog <newbranchname> [--all | <project>...]
"""
helpDescription = """
'%prog' begins a new branch of development, starting from the
revision specified in the manifest.
"""
def _Options(self, p):
p.add_option('--all',
dest='all', action='store_true',
help='begin branch in all projects')
def ValidateOptions(self, opt, args):
if not args:
self.Usage()
nb = args[0]
if not git.check_ref_format('heads/%s' % nb):
self.OptionParser.error("'%s' is not a valid name" % nb)
def Execute(self, opt, args):
nb = args[0]
err = []
projects = []
if not opt.all:
projects = args[1:]
if len(projects) < 1:
projects = ['.',] # start it in the local project by default
all_projects = self.GetProjects(projects,
missing_ok=bool(self.gitc_manifest))
# This must happen after we find all_projects, since GetProjects may need
# the local directory, which will disappear once we save the GITC manifest.
if self.gitc_manifest:
gitc_projects = self.GetProjects(projects, manifest=self.gitc_manifest,
missing_ok=True)
for project in gitc_projects:
if project.old_revision:
project.already_synced = True
else:
project.already_synced = False
project.old_revision = project.revisionExpr
project.revisionExpr = None
# Save the GITC manifest.
gitc_utils.save_manifest(self.gitc_manifest)
# Make sure we have a valid CWD
if not os.path.exists(os.getcwd()):
os.chdir(self.manifest.topdir)
pm = Progress('Starting %s' % nb, len(all_projects))
for project in all_projects:
pm.update()
if self.gitc_manifest:
gitc_project = self.gitc_manifest.paths[project.relpath]
# Sync projects that have not been opened.
if not gitc_project.already_synced:
proj_localdir = os.path.join(self.gitc_manifest.gitc_client_dir,
project.relpath)
project.worktree = proj_localdir
if not os.path.exists(proj_localdir):
os.makedirs(proj_localdir)
project.Sync_NetworkHalf()
sync_buf = SyncBuffer(self.manifest.manifestProject.config)
project.Sync_LocalHalf(sync_buf)
project.revisionId = gitc_project.old_revision
# If the current revision is immutable, such as a SHA1, a tag or
# a change, then we can't push back to it. Substitute with
# dest_branch, if defined; or with manifest default revision instead.
branch_merge = ''
if IsImmutable(project.revisionExpr):
if project.dest_branch:
branch_merge = project.dest_branch
else:
branch_merge = self.manifest.default.revisionExpr
if not project.StartBranch(nb, branch_merge=branch_merge):
err.append(project)
pm.end()
if err:
for p in err:
print("error: %s/: cannot start %s" % (p.relpath, nb),
file=sys.stderr)
sys.exit(1) | 0.325735 | 0.076408 |
from tasks.tags import SubsectionTags, SectionTags, UnitTags
from tasks.meta import (GEOM_REF, current_session, GEOM_NAME, OBSColumn)
from tasks.util import ColumnsTask, TableTask, Carto2TempTableTask, MetaWrapper
from collections import OrderedDict
class ImportThai(Carto2TempTableTask):
subdomain = 'solutions'
table = 'thai_districts'
class ThaiColumns(ColumnsTask):
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'units': UnitTags(),
}
def version(self):
return 5
def columns(self):
inputs = self.input()
age_gender = inputs['subsections']['age_gender']
boundaries = inputs['subsections']['boundary']
thailand = inputs['sections']['th']
people = inputs['units']['people']
names = inputs['subsections']['names']
the_geom = OBSColumn(
name='District',
description='Districts in Thailand, also known as amphoes, are '
'administrative regions analogous to counties that make up the provinces. '
'There are 878 amphoes in Thailand and '
'50 urban districts of Bangkok known as khets.',
type='Geometry',
weight=5,
tags=[thailand, boundaries],
)
id_2 = OBSColumn(
type='Text',
weight=0,
tags=[],
targets={the_geom: GEOM_REF},
)
pop = OBSColumn(
name='Population in 2010',
type='Numeric',
aggregate='sum',
weight=5,
tags=[thailand, age_gender, people],
)
name = OBSColumn(
name='Name of District',
type='Text',
weight=5,
tags=[thailand, names],
targets={the_geom: GEOM_NAME},
)
return OrderedDict([
('the_geom', the_geom),
('id_2', id_2),
('pop', pop),
('name', name),
])
class ThaiDistricts(TableTask):
def requires(self):
return {
'meta': ThaiColumns(),
'data': ImportThai(),
}
def version(self):
return 4
def timespan(self):
return '2010'
def columns(self):
return self.input()['meta']
def populate(self):
session = current_session()
session.execute(' INSERT INTO {output} '
' SELECT the_geom, id_2, pop2010, name_2 '
' FROM {input} '.format(
output=self.output().table,
input=self.input()['data'].table
))
class ThaiMetaWrapper(MetaWrapper):
def tables(self):
yield ThaiDistricts() | tasks/th/thaipop.py | from tasks.tags import SubsectionTags, SectionTags, UnitTags
from tasks.meta import (GEOM_REF, current_session, GEOM_NAME, OBSColumn)
from tasks.util import ColumnsTask, TableTask, Carto2TempTableTask, MetaWrapper
from collections import OrderedDict
class ImportThai(Carto2TempTableTask):
subdomain = 'solutions'
table = 'thai_districts'
class ThaiColumns(ColumnsTask):
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'units': UnitTags(),
}
def version(self):
return 5
def columns(self):
inputs = self.input()
age_gender = inputs['subsections']['age_gender']
boundaries = inputs['subsections']['boundary']
thailand = inputs['sections']['th']
people = inputs['units']['people']
names = inputs['subsections']['names']
the_geom = OBSColumn(
name='District',
description='Districts in Thailand, also known as amphoes, are '
'administrative regions analogous to counties that make up the provinces. '
'There are 878 amphoes in Thailand and '
'50 urban districts of Bangkok known as khets.',
type='Geometry',
weight=5,
tags=[thailand, boundaries],
)
id_2 = OBSColumn(
type='Text',
weight=0,
tags=[],
targets={the_geom: GEOM_REF},
)
pop = OBSColumn(
name='Population in 2010',
type='Numeric',
aggregate='sum',
weight=5,
tags=[thailand, age_gender, people],
)
name = OBSColumn(
name='Name of District',
type='Text',
weight=5,
tags=[thailand, names],
targets={the_geom: GEOM_NAME},
)
return OrderedDict([
('the_geom', the_geom),
('id_2', id_2),
('pop', pop),
('name', name),
])
class ThaiDistricts(TableTask):
def requires(self):
return {
'meta': ThaiColumns(),
'data': ImportThai(),
}
def version(self):
return 4
def timespan(self):
return '2010'
def columns(self):
return self.input()['meta']
def populate(self):
session = current_session()
session.execute(' INSERT INTO {output} '
' SELECT the_geom, id_2, pop2010, name_2 '
' FROM {input} '.format(
output=self.output().table,
input=self.input()['data'].table
))
class ThaiMetaWrapper(MetaWrapper):
def tables(self):
yield ThaiDistricts() | 0.659953 | 0.349477 |
import os,sys
import datetime
import time
from schainpy.controller import Project
path = '/home/alex/Downloads/NEW_WR2/spc16removeDC'
figpath = path
desc = "Simulator Test"
controllerObj = Project()
controllerObj.setup(id='10',name='Test Simulator',description=desc)
readUnitConfObj = controllerObj.addReadUnit(datatype='SimulatorReader',
frequency=9.345e9,
FixRCP_IPP= 60,
Tau_0 = 30,
AcqH0_0=0,
samples=330,
AcqDH_0=0.15,
FixRCP_TXA=0.15,
FixRCP_TXB=0.15,
Fdoppler=600.0,
Hdoppler=36,
Adoppler=300,
delay=0,
online=0,
walk=0,
nTotalReadFiles=3)
opObj11 = readUnitConfObj.addOperation(name='printInfo')
procUnitConfObjA = controllerObj.addProcUnit(datatype='VoltageProc', inputId=readUnitConfObj.getId())
opObj10 = procUnitConfObjA.addOperation(name='selectChannels')
opObj10.addParameter(name='channelList', value=[0])
procUnitConfObjB = controllerObj.addProcUnit(datatype='SpectraProc', inputId=procUnitConfObjA.getId())
procUnitConfObjB.addParameter(name='nFFTPoints', value=300, format='int')
procUnitConfObjB.addParameter(name='nProfiles', value=300, format='int')
opObj11 = procUnitConfObjB.addOperation(name='removeDC')
opObj11.addParameter(name='mode', value=2)
#opObj11 = procUnitConfObjB.addOperation(name='IncohInt', optype='other')
#opObj11.addParameter(name='n', value='10', format='int')
#opObj11 = procUnitConfObjB.addOperation(name='SpectraPlot')
#opObj11 = procUnitConfObjB.addOperation(name='PowerProfilePlot')
#opObj11.addParameter(name='xmin', value=13)
#opObj11.addParameter(name='xmax', value=.4)
#opObj11 = procUnitConfObjB.addOperation(name='NoisePlot')
#opObj11.addParameter(name='xmin', value=13)
#opObj11.addParameter(name='xmax', value=14)
procUnitConfObjC = controllerObj.addProcUnit(datatype='ParametersProc', inputId=procUnitConfObjB.getId())
procUnitConfObjC.addOperation(name='SpectralMoments')
opObj11 = procUnitConfObjC.addOperation(name='SpectralMomentsPlot')
#opObj11.addParameter(name='xmin', value=14)
#opObj11.addParameter(name='xmax', value=15)
#opObj11.addParameter(name='save', value=figpath)
opObj11.addParameter(name='showprofile', value=1)
#opObj11.addParameter(name='save_period', value=10)
'''
opObj11 = procUnitConfObjC.addOperation(name='SnrPlot')
opObj11.addParameter(name='zmin', value=-10)
opObj11.addParameter(name='zmax', value=40)
#opObj11.addParameter(name='save', value=figpath)
#opObj11.addParameter(name='showprofile', value=1)
#opObj11.addParameter(name='save_period', value=10)
'''
opObj11 = procUnitConfObjC.addOperation(name='SpectralWidthPlot')
opObj11.addParameter(name='xmin', value=5)
opObj11.addParameter(name='xmax', value=6)
#opObj11.addParameter(name='save', value=figpath)
#opObj11.addParameter(name='showprofile', value=1)
#opObj11.addParameter(name='save_period', value=10)
controllerObj.start() | schainpy/scripts/test_sim0008.py | import os,sys
import datetime
import time
from schainpy.controller import Project
path = '/home/alex/Downloads/NEW_WR2/spc16removeDC'
figpath = path
desc = "Simulator Test"
controllerObj = Project()
controllerObj.setup(id='10',name='Test Simulator',description=desc)
readUnitConfObj = controllerObj.addReadUnit(datatype='SimulatorReader',
frequency=9.345e9,
FixRCP_IPP= 60,
Tau_0 = 30,
AcqH0_0=0,
samples=330,
AcqDH_0=0.15,
FixRCP_TXA=0.15,
FixRCP_TXB=0.15,
Fdoppler=600.0,
Hdoppler=36,
Adoppler=300,
delay=0,
online=0,
walk=0,
nTotalReadFiles=3)
opObj11 = readUnitConfObj.addOperation(name='printInfo')
procUnitConfObjA = controllerObj.addProcUnit(datatype='VoltageProc', inputId=readUnitConfObj.getId())
opObj10 = procUnitConfObjA.addOperation(name='selectChannels')
opObj10.addParameter(name='channelList', value=[0])
procUnitConfObjB = controllerObj.addProcUnit(datatype='SpectraProc', inputId=procUnitConfObjA.getId())
procUnitConfObjB.addParameter(name='nFFTPoints', value=300, format='int')
procUnitConfObjB.addParameter(name='nProfiles', value=300, format='int')
opObj11 = procUnitConfObjB.addOperation(name='removeDC')
opObj11.addParameter(name='mode', value=2)
#opObj11 = procUnitConfObjB.addOperation(name='IncohInt', optype='other')
#opObj11.addParameter(name='n', value='10', format='int')
#opObj11 = procUnitConfObjB.addOperation(name='SpectraPlot')
#opObj11 = procUnitConfObjB.addOperation(name='PowerProfilePlot')
#opObj11.addParameter(name='xmin', value=13)
#opObj11.addParameter(name='xmax', value=.4)
#opObj11 = procUnitConfObjB.addOperation(name='NoisePlot')
#opObj11.addParameter(name='xmin', value=13)
#opObj11.addParameter(name='xmax', value=14)
procUnitConfObjC = controllerObj.addProcUnit(datatype='ParametersProc', inputId=procUnitConfObjB.getId())
procUnitConfObjC.addOperation(name='SpectralMoments')
opObj11 = procUnitConfObjC.addOperation(name='SpectralMomentsPlot')
#opObj11.addParameter(name='xmin', value=14)
#opObj11.addParameter(name='xmax', value=15)
#opObj11.addParameter(name='save', value=figpath)
opObj11.addParameter(name='showprofile', value=1)
#opObj11.addParameter(name='save_period', value=10)
'''
opObj11 = procUnitConfObjC.addOperation(name='SnrPlot')
opObj11.addParameter(name='zmin', value=-10)
opObj11.addParameter(name='zmax', value=40)
#opObj11.addParameter(name='save', value=figpath)
#opObj11.addParameter(name='showprofile', value=1)
#opObj11.addParameter(name='save_period', value=10)
'''
opObj11 = procUnitConfObjC.addOperation(name='SpectralWidthPlot')
opObj11.addParameter(name='xmin', value=5)
opObj11.addParameter(name='xmax', value=6)
#opObj11.addParameter(name='save', value=figpath)
#opObj11.addParameter(name='showprofile', value=1)
#opObj11.addParameter(name='save_period', value=10)
controllerObj.start() | 0.28577 | 0.048699 |
from PIL import Image, ImageOps
import numpy as np
import skimage.io as io
from src.models.class_patcher import patcher
from src.utils.imgproc import *
from skimage.color import rgb2hsv, hsv2rgb
class patcher(patcher):
def __init__(self, body='./body/body_kyoko.png', **options):
super().__init__('京狐', body=body, pantie_position=[718, 1464], **options)
self.mask = io.imread('./mask/mask_kyoko.png')
try:
self.with_garter = self.options['with_garter']
except:
self.with_garter = self.ask(question='With garter belt?', default=True)
if self.with_garter:
self.garter_position = [701, 1272]
self.garter = np.float32(io.imread('./material/garter_kyoko.png') / 255)
self.garter_shade = np.float32(io.imread('./material/garter_kyoko_shade.png') / 255)
self.garter_shade_alpha = self.garter_shade[:, :, -1]
self.bra_position = [700, 1008]
self.bra = np.float32(io.imread('./mask/bra_kyoko.png') / 255)
self.bra_center = np.float32(io.imread('./mask/bra_kyoko_center.png') / 255)
self.bra_shade = np.float32(io.imread('./material/bra_kyoko_shade.png') / 255)
self.bra_lace = np.float32(io.imread('./material/bra_kyoko_lace.png') / 255)
self.bra_shade_alpha = self.bra_shade[:, :, -1]
self.bra_lace_mask = self.bra_lace[:, :, -1] > 0.3
self.pantie_ribbon_position = [745, 1528]
self.bra_ribbon_position = [800, 1173]
self.ribbon = np.float32(io.imread('./material/ribbon_kyoko.png') / 255)
self.ribbon_shade = np.float32(io.imread('./material/ribbon_kyoko_shade.png') / 255)
self.ribbon_shade_alpha = self.ribbon_shade[:, :, -1]
def pick_color(self, arr):
return np.mean(np.mean(arr, axis=0), axis=0)
def extract_base_color(self, pantie):
front = pantie[20:100, 30:80, :3] / 255.0
front_shade = pantie[130:150, 0:40, :3] / 255.0
front_color = self.pick_color(front)
front_shade_color = self.pick_color(front_shade)
front_shade_color = rgb2hsv(front_shade_color[None, None])
front_shade_color[0, 0, 1] *= front_shade_color[0, 0, 2] / 0.3
if front_shade_color[0, 0, 1] > 0.7:
front_shade_color[0, 0, 1] *= 0.7
front_shade_color[0, 0, 2] *= front_shade_color[0, 0, 2] / 0.4
front_shade_color = np.clip(hsv2rgb(front_shade_color)[0, 0], 0, 1)
return front_color, front_shade_color
def gen_ribbon(self, image):
pantie = np.array(image)
ribbon = pantie[24:32, 15:27, :3] / 255.0
ribbon_color = self.pick_color(ribbon)
ribbon_shade = pantie[26:30, 12:15, :3] / 255.0
ribbon_shade_color = self.pick_color(ribbon_shade)
ribbon = self.ribbon[:, :, :3] * ribbon_color
ribbon_shade = (self.ribbon_shade[:, :, -1])[:, :, None] * ribbon_shade_color
ribbon = alpha_brend(ribbon_shade, ribbon, self.ribbon_shade_alpha)
ribbon = np.dstack((ribbon, self.ribbon[:, :, -1] > 0.5))
return Image.fromarray(np.uint8(np.clip(ribbon, 0, 1) * 255))
def gen_garter(self, image):
pantie = np.array(image)
front_color, front_shade_color = self.extract_base_color(pantie)
garter = self.garter[:, :, :3] * front_color
garter_shade = (self.garter_shade[:, :, -1])[:, :, None] * front_shade_color
garter = alpha_brend(garter_shade, garter, self.garter_shade_alpha)
garter = np.dstack((garter, self.garter[:, :, -1] > 0.5))
return Image.fromarray(np.uint8(np.clip(garter, 0, 1) * 255))
def gen_bra(self, image):
pantie = np.array(image)
front_color, front_shade_color = self.extract_base_color(pantie)
ribbon = pantie[24:32, 15:27, :3] / 255.0
ribbon_color = self.pick_color(ribbon)
center = np.float32(pantie[20:170, -200:-15, :3][:, ::-1]) / 255
bra_center = np.copy(self.bra_center)
bra_center[80:80 + center.shape[0], 30:30 + center.shape[1], :3] = center * np.float32(bra_center[80:80 + center.shape[0], 30:30 + center.shape[1], :3] > 0)
bra = self.bra[:, :, :3] * front_color
bra_shade = (self.bra_shade[:, :, -1])[:, :, None] * front_shade_color
bra_lace = self.bra_lace[:, :, :3] * ribbon_color
bra = alpha_brend(bra_center[:, :, :3], bra[:, :, :3], bra_center[:, :, 0] > 0.1)
bra = alpha_brend(bra_lace, bra, self.bra_lace_mask)
bra = alpha_brend(bra_shade, bra, self.bra_shade_alpha)
bra = np.dstack((bra, self.bra[:, :, 0] > 0.8))
return Image.fromarray(np.uint8(np.clip(bra, 0, 1) * 255))
def convert(self, image):
pantie = np.array(image)
pantie = ribbon_inpaint(pantie)
patch = np.copy(pantie[-140:-5, 546:, :])
[pr, pc, d] = patch.shape
pantie[125:125 + pr, :pc, :] = patch
pantie[-140:, 546:, :] = 0
pantie = np.uint8(resize(pantie, [0.7, 0.7]) * 255)[:170]
pantie = np.bitwise_and(pantie, self.mask)
return Image.fromarray(pantie)
def patch(self, image, transparent=False):
pantie = self.convert(image)
if transparent:
patched = Image.new("RGBA", (4096, 4096))
else:
patched = self.body.copy()
patched = self.paste(patched, pantie, self.pantie_position)
patched = self.paste(patched, self.gen_bra(image), self.bra_position)
ribbon = self.gen_ribbon(image)
patched = self.paste(patched, ribbon, self.pantie_ribbon_position)
patched = self.paste(patched, ribbon.resize((int(ribbon.width * 0.62), int(ribbon.height * 0.62))), self.bra_ribbon_position)
if self.with_garter:
patched = self.paste(patched, self.gen_garter(image), self.garter_position)
return patched | src/models/kyoko.py | from PIL import Image, ImageOps
import numpy as np
import skimage.io as io
from src.models.class_patcher import patcher
from src.utils.imgproc import *
from skimage.color import rgb2hsv, hsv2rgb
class patcher(patcher):
def __init__(self, body='./body/body_kyoko.png', **options):
super().__init__('京狐', body=body, pantie_position=[718, 1464], **options)
self.mask = io.imread('./mask/mask_kyoko.png')
try:
self.with_garter = self.options['with_garter']
except:
self.with_garter = self.ask(question='With garter belt?', default=True)
if self.with_garter:
self.garter_position = [701, 1272]
self.garter = np.float32(io.imread('./material/garter_kyoko.png') / 255)
self.garter_shade = np.float32(io.imread('./material/garter_kyoko_shade.png') / 255)
self.garter_shade_alpha = self.garter_shade[:, :, -1]
self.bra_position = [700, 1008]
self.bra = np.float32(io.imread('./mask/bra_kyoko.png') / 255)
self.bra_center = np.float32(io.imread('./mask/bra_kyoko_center.png') / 255)
self.bra_shade = np.float32(io.imread('./material/bra_kyoko_shade.png') / 255)
self.bra_lace = np.float32(io.imread('./material/bra_kyoko_lace.png') / 255)
self.bra_shade_alpha = self.bra_shade[:, :, -1]
self.bra_lace_mask = self.bra_lace[:, :, -1] > 0.3
self.pantie_ribbon_position = [745, 1528]
self.bra_ribbon_position = [800, 1173]
self.ribbon = np.float32(io.imread('./material/ribbon_kyoko.png') / 255)
self.ribbon_shade = np.float32(io.imread('./material/ribbon_kyoko_shade.png') / 255)
self.ribbon_shade_alpha = self.ribbon_shade[:, :, -1]
def pick_color(self, arr):
return np.mean(np.mean(arr, axis=0), axis=0)
def extract_base_color(self, pantie):
front = pantie[20:100, 30:80, :3] / 255.0
front_shade = pantie[130:150, 0:40, :3] / 255.0
front_color = self.pick_color(front)
front_shade_color = self.pick_color(front_shade)
front_shade_color = rgb2hsv(front_shade_color[None, None])
front_shade_color[0, 0, 1] *= front_shade_color[0, 0, 2] / 0.3
if front_shade_color[0, 0, 1] > 0.7:
front_shade_color[0, 0, 1] *= 0.7
front_shade_color[0, 0, 2] *= front_shade_color[0, 0, 2] / 0.4
front_shade_color = np.clip(hsv2rgb(front_shade_color)[0, 0], 0, 1)
return front_color, front_shade_color
def gen_ribbon(self, image):
pantie = np.array(image)
ribbon = pantie[24:32, 15:27, :3] / 255.0
ribbon_color = self.pick_color(ribbon)
ribbon_shade = pantie[26:30, 12:15, :3] / 255.0
ribbon_shade_color = self.pick_color(ribbon_shade)
ribbon = self.ribbon[:, :, :3] * ribbon_color
ribbon_shade = (self.ribbon_shade[:, :, -1])[:, :, None] * ribbon_shade_color
ribbon = alpha_brend(ribbon_shade, ribbon, self.ribbon_shade_alpha)
ribbon = np.dstack((ribbon, self.ribbon[:, :, -1] > 0.5))
return Image.fromarray(np.uint8(np.clip(ribbon, 0, 1) * 255))
def gen_garter(self, image):
pantie = np.array(image)
front_color, front_shade_color = self.extract_base_color(pantie)
garter = self.garter[:, :, :3] * front_color
garter_shade = (self.garter_shade[:, :, -1])[:, :, None] * front_shade_color
garter = alpha_brend(garter_shade, garter, self.garter_shade_alpha)
garter = np.dstack((garter, self.garter[:, :, -1] > 0.5))
return Image.fromarray(np.uint8(np.clip(garter, 0, 1) * 255))
def gen_bra(self, image):
pantie = np.array(image)
front_color, front_shade_color = self.extract_base_color(pantie)
ribbon = pantie[24:32, 15:27, :3] / 255.0
ribbon_color = self.pick_color(ribbon)
center = np.float32(pantie[20:170, -200:-15, :3][:, ::-1]) / 255
bra_center = np.copy(self.bra_center)
bra_center[80:80 + center.shape[0], 30:30 + center.shape[1], :3] = center * np.float32(bra_center[80:80 + center.shape[0], 30:30 + center.shape[1], :3] > 0)
bra = self.bra[:, :, :3] * front_color
bra_shade = (self.bra_shade[:, :, -1])[:, :, None] * front_shade_color
bra_lace = self.bra_lace[:, :, :3] * ribbon_color
bra = alpha_brend(bra_center[:, :, :3], bra[:, :, :3], bra_center[:, :, 0] > 0.1)
bra = alpha_brend(bra_lace, bra, self.bra_lace_mask)
bra = alpha_brend(bra_shade, bra, self.bra_shade_alpha)
bra = np.dstack((bra, self.bra[:, :, 0] > 0.8))
return Image.fromarray(np.uint8(np.clip(bra, 0, 1) * 255))
def convert(self, image):
pantie = np.array(image)
pantie = ribbon_inpaint(pantie)
patch = np.copy(pantie[-140:-5, 546:, :])
[pr, pc, d] = patch.shape
pantie[125:125 + pr, :pc, :] = patch
pantie[-140:, 546:, :] = 0
pantie = np.uint8(resize(pantie, [0.7, 0.7]) * 255)[:170]
pantie = np.bitwise_and(pantie, self.mask)
return Image.fromarray(pantie)
def patch(self, image, transparent=False):
pantie = self.convert(image)
if transparent:
patched = Image.new("RGBA", (4096, 4096))
else:
patched = self.body.copy()
patched = self.paste(patched, pantie, self.pantie_position)
patched = self.paste(patched, self.gen_bra(image), self.bra_position)
ribbon = self.gen_ribbon(image)
patched = self.paste(patched, ribbon, self.pantie_ribbon_position)
patched = self.paste(patched, ribbon.resize((int(ribbon.width * 0.62), int(ribbon.height * 0.62))), self.bra_ribbon_position)
if self.with_garter:
patched = self.paste(patched, self.gen_garter(image), self.garter_position)
return patched | 0.383295 | 0.234056 |
from __future__ import division
import numpy as np
from scipy.constants import mu_0, pi, epsilon_0
from scipy.special import erf
from SimPEG import utils
import warnings
def hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0):
"""
The analytical expression is given in Equation 4.56 in Ward and Hohmann,
1988, and the example reproduces their Figure 4.2.
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from SimPEG import electromagnetics as EM
freq = np.logspace(-1, 5, 301)
test = EM.analytics.hzAnalyticDipoleF(
100, freq, 0.01, secondary=False)
plt.loglog(freq, test.real, 'C0-', label='Real')
plt.loglog(freq, -test.real, 'C0--')
plt.loglog(freq, test.imag, 'C1-', label='Imaginary')
plt.loglog(freq, -test.imag, 'C1--')
plt.title('Response at $r=100$ m')
plt.xlim([1e-1, 1e5])
plt.ylim([1e-12, 1e-6])
plt.xlabel('Frequency (Hz)')
plt.ylabel('$H_z$ (A/m)')
plt.legend(loc=6)
plt.show()
**Reference**
- <NAME>., and <NAME>, 1988, Electromagnetic theory for
geophysical applications, Chapter 4 of Electromagnetic Methods in Applied
Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:
`10.1190/1.9781560802631.ch4
<https://doi.org/10.1190/1.9781560802631.ch4>`_.
"""
r = np.abs(r)
k = np.sqrt(-1j * 2.0 * np.pi * freq * mu * sigma)
m = 1
front = m / (2.0 * np.pi * (k ** 2) * (r ** 5))
back = 9 - (
9 + 9j * k * r - 4 * (k ** 2) * (r ** 2) - 1j * (k ** 3) * (r ** 3)
) * np.exp(-1j * k * r)
hz = front * back
if secondary:
hp = -1 / (4 * np.pi * r ** 3)
hz = hz - hp
if hz.ndim == 1:
hz = utils.mkvc(hz, 2)
return hz
def MagneticDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment, fieldType="b", mu_r=1, eps_r=1, **kwargs
):
"""
Analytical solution for a dipole in a whole-space.
The analytical expression is given in Equation 2.57 in Ward and Hohmann,
1988, and the example reproduces their Figure 2.2.
TODOs:
- set it up to instead take a mesh & survey
- add divide by zero safety
.. plot::
import numpy as np
from SimPEG import electromagnetics as EM
import matplotlib.pyplot as plt
from scipy.constants import mu_0
freqs = np.logspace(-2, 5, 301)
Bx, By, Bz = EM.analytics.FDEM.MagneticDipoleWholeSpace(
[0, 100, 0], [0, 0, 0], 1e-2, freqs, moment='Z')
plt.figure()
plt.loglog(freqs, Bz.real/mu_0, 'C0', label='Real')
plt.loglog(freqs, -Bz.real/mu_0, 'C0--')
plt.loglog(freqs, Bz.imag/mu_0, 'C1', label='Imaginary')
plt.loglog(freqs, -Bz.imag/mu_0, 'C1--')
plt.legend()
plt.xlim([1e-2, 1e5])
plt.ylim([1e-13, 1e-6])
plt.show()
**Reference**
- <NAME>., and <NAME>, 1988, Electromagnetic theory for
geophysical applications, Chapter 4 of Electromagnetic Methods in Applied
Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:
`10.1190/1.9781560802631.ch4
<https://doi.org/10.1190/1.9781560802631.ch4>`_.
"""
orient = kwargs.pop("orientation", None)
if orient is not None:
warnings.warn(
"orientation kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the moment argument",
FutureWarning,
)
magnitude = moment
moment = orient
else:
magnitude = 1
mu = kwargs.pop("mu", None)
if mu is not None:
warnings.warn(
"mu kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the mu_r argument.",
FutureWarning,
)
mu_r = mu / mu_0
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx ** 2.0 + dy ** 2.0 + dz ** 2.0)
k = np.sqrt(-1j * w * mu * sig + w ** 2 * mu * eps)
kr = k * r
if fieldType in ["h", "b"]:
front = 1 / (4.0 * pi * r ** 3.0) * np.exp(-1j * kr)
mid = -(kr ** 2.0) + 3.0 * 1j * kr + 3.0
Fx = front * (
mx * ((dx / r) ** 2.0 * mid + (kr ** 2.0 - 1j * kr - 1.0))
+ my * ((dy * dx / r ** 2.0) * mid)
+ mz * ((dx * dz / r ** 2.0) * mid)
)
Fy = front * (
mx * ((dx * dy / r ** 2.0) * mid)
+ my * ((dy / r) ** 2.0 * mid + (kr ** 2.0 - 1j * kr - 1.0))
+ mz * ((dy * dz / r ** 2.0) * mid)
)
Fz = front * (
mx * ((dx * dz / r ** 2.0) * mid)
+ my * ((dy * dz / r ** 2.0) * mid)
+ mz * ((dz / r) ** 2.0 * mid + (kr ** 2.0 - 1j * kr - 1.0))
)
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
elif fieldType == "e":
front = 1j * w * mu * (1 + 1j * kr) / (4.0 * pi * r ** 3.0) * np.exp(-1j * kr)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
return Fx, Fy, Fz
def ElectricDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment="X", fieldType="e", mu_r=1, eps_r=1, **kwargs
):
orient = kwargs.pop("orientation", None)
if orient is not None:
warnings.warn(
"orientation kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the moment argument.",
FutureWarning,
)
moment = orient
mu = kwargs.pop("mu", None)
if mu is not None:
warnings.warn(
"mu kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the mu_r argument.",
FutureWarning,
)
mu_r = mu / mu_0
cur = kwargs.pop("current", None)
if cur is not None:
warnings.warn(
"current kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the moment argument.",
FutureWarning,
)
magnitude = cur
else:
magnitude = 1
length = kwargs.pop("length", None)
if length is not None:
warnings.warn(
"length kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the moment argument.",
FutureWarning,
)
magnitude *= length
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment.upper() == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment.upper() == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment.upper() == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx ** 2.0 + dy ** 2.0 + dz ** 2.0)
k = np.sqrt(-1j * w * mu * sig + w ** 2 * mu * eps)
kr = k * r
if fieldType == "e":
front = 1 / (4.0 * np.pi * sig * r ** 3) * np.exp(-1j * k * r)
mid = -(k ** 2) * r ** 2 + 3 * 1j * k * r + 3
Fx = front * (
mx * ((dx ** 2 / r ** 2) * mid + (k ** 2 * r ** 2 - 1j * k * r - 1.0))
+ my * (dy * dx / r ** 2) * mid
+ mz * (dz * dx / r ** 2) * mid
)
Fy = front * (
mx * (dx * dy / r ** 2) * mid
+ my * ((dy ** 2 / r ** 2) * mid + (k ** 2 * r ** 2 - 1j * k * r - 1.0))
+ mz * (dz * dy / r ** 2) * mid
)
Fz = front * (
mx * (dx * dz / r ** 2) * mid
+ my * (dy * dz / r ** 2) * mid
+ mz * ((dz ** 2 / r ** 2) * mid + (k ** 2 * r ** 2 - 1j * k * r - 1.0))
)
elif fieldType in ["h", "b"]:
front = (1 + 1j * kr) / (4.0 * np.pi * r ** 2) * np.exp(-1j * k * r)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
return Fx, Fy, Fz | SimPEG/electromagnetics/analytics/FDEM.py | from __future__ import division
import numpy as np
from scipy.constants import mu_0, pi, epsilon_0
from scipy.special import erf
from SimPEG import utils
import warnings
def hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0):
"""
The analytical expression is given in Equation 4.56 in Ward and Hohmann,
1988, and the example reproduces their Figure 4.2.
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from SimPEG import electromagnetics as EM
freq = np.logspace(-1, 5, 301)
test = EM.analytics.hzAnalyticDipoleF(
100, freq, 0.01, secondary=False)
plt.loglog(freq, test.real, 'C0-', label='Real')
plt.loglog(freq, -test.real, 'C0--')
plt.loglog(freq, test.imag, 'C1-', label='Imaginary')
plt.loglog(freq, -test.imag, 'C1--')
plt.title('Response at $r=100$ m')
plt.xlim([1e-1, 1e5])
plt.ylim([1e-12, 1e-6])
plt.xlabel('Frequency (Hz)')
plt.ylabel('$H_z$ (A/m)')
plt.legend(loc=6)
plt.show()
**Reference**
- <NAME>., and <NAME>, 1988, Electromagnetic theory for
geophysical applications, Chapter 4 of Electromagnetic Methods in Applied
Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:
`10.1190/1.9781560802631.ch4
<https://doi.org/10.1190/1.9781560802631.ch4>`_.
"""
r = np.abs(r)
k = np.sqrt(-1j * 2.0 * np.pi * freq * mu * sigma)
m = 1
front = m / (2.0 * np.pi * (k ** 2) * (r ** 5))
back = 9 - (
9 + 9j * k * r - 4 * (k ** 2) * (r ** 2) - 1j * (k ** 3) * (r ** 3)
) * np.exp(-1j * k * r)
hz = front * back
if secondary:
hp = -1 / (4 * np.pi * r ** 3)
hz = hz - hp
if hz.ndim == 1:
hz = utils.mkvc(hz, 2)
return hz
def MagneticDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment, fieldType="b", mu_r=1, eps_r=1, **kwargs
):
"""
Analytical solution for a dipole in a whole-space.
The analytical expression is given in Equation 2.57 in Ward and Hohmann,
1988, and the example reproduces their Figure 2.2.
TODOs:
- set it up to instead take a mesh & survey
- add divide by zero safety
.. plot::
import numpy as np
from SimPEG import electromagnetics as EM
import matplotlib.pyplot as plt
from scipy.constants import mu_0
freqs = np.logspace(-2, 5, 301)
Bx, By, Bz = EM.analytics.FDEM.MagneticDipoleWholeSpace(
[0, 100, 0], [0, 0, 0], 1e-2, freqs, moment='Z')
plt.figure()
plt.loglog(freqs, Bz.real/mu_0, 'C0', label='Real')
plt.loglog(freqs, -Bz.real/mu_0, 'C0--')
plt.loglog(freqs, Bz.imag/mu_0, 'C1', label='Imaginary')
plt.loglog(freqs, -Bz.imag/mu_0, 'C1--')
plt.legend()
plt.xlim([1e-2, 1e5])
plt.ylim([1e-13, 1e-6])
plt.show()
**Reference**
- <NAME>., and <NAME>, 1988, Electromagnetic theory for
geophysical applications, Chapter 4 of Electromagnetic Methods in Applied
Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:
`10.1190/1.9781560802631.ch4
<https://doi.org/10.1190/1.9781560802631.ch4>`_.
"""
orient = kwargs.pop("orientation", None)
if orient is not None:
warnings.warn(
"orientation kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the moment argument",
FutureWarning,
)
magnitude = moment
moment = orient
else:
magnitude = 1
mu = kwargs.pop("mu", None)
if mu is not None:
warnings.warn(
"mu kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the mu_r argument.",
FutureWarning,
)
mu_r = mu / mu_0
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx ** 2.0 + dy ** 2.0 + dz ** 2.0)
k = np.sqrt(-1j * w * mu * sig + w ** 2 * mu * eps)
kr = k * r
if fieldType in ["h", "b"]:
front = 1 / (4.0 * pi * r ** 3.0) * np.exp(-1j * kr)
mid = -(kr ** 2.0) + 3.0 * 1j * kr + 3.0
Fx = front * (
mx * ((dx / r) ** 2.0 * mid + (kr ** 2.0 - 1j * kr - 1.0))
+ my * ((dy * dx / r ** 2.0) * mid)
+ mz * ((dx * dz / r ** 2.0) * mid)
)
Fy = front * (
mx * ((dx * dy / r ** 2.0) * mid)
+ my * ((dy / r) ** 2.0 * mid + (kr ** 2.0 - 1j * kr - 1.0))
+ mz * ((dy * dz / r ** 2.0) * mid)
)
Fz = front * (
mx * ((dx * dz / r ** 2.0) * mid)
+ my * ((dy * dz / r ** 2.0) * mid)
+ mz * ((dz / r) ** 2.0 * mid + (kr ** 2.0 - 1j * kr - 1.0))
)
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
elif fieldType == "e":
front = 1j * w * mu * (1 + 1j * kr) / (4.0 * pi * r ** 3.0) * np.exp(-1j * kr)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
return Fx, Fy, Fz
def ElectricDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment="X", fieldType="e", mu_r=1, eps_r=1, **kwargs
):
orient = kwargs.pop("orientation", None)
if orient is not None:
warnings.warn(
"orientation kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the moment argument.",
FutureWarning,
)
moment = orient
mu = kwargs.pop("mu", None)
if mu is not None:
warnings.warn(
"mu kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the mu_r argument.",
FutureWarning,
)
mu_r = mu / mu_0
cur = kwargs.pop("current", None)
if cur is not None:
warnings.warn(
"current kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the moment argument.",
FutureWarning,
)
magnitude = cur
else:
magnitude = 1
length = kwargs.pop("length", None)
if length is not None:
warnings.warn(
"length kwarg has been deprecated and will be removed"
" in SimPEG version 0.16.0, please use the moment argument.",
FutureWarning,
)
magnitude *= length
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment.upper() == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment.upper() == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment.upper() == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx ** 2.0 + dy ** 2.0 + dz ** 2.0)
k = np.sqrt(-1j * w * mu * sig + w ** 2 * mu * eps)
kr = k * r
if fieldType == "e":
front = 1 / (4.0 * np.pi * sig * r ** 3) * np.exp(-1j * k * r)
mid = -(k ** 2) * r ** 2 + 3 * 1j * k * r + 3
Fx = front * (
mx * ((dx ** 2 / r ** 2) * mid + (k ** 2 * r ** 2 - 1j * k * r - 1.0))
+ my * (dy * dx / r ** 2) * mid
+ mz * (dz * dx / r ** 2) * mid
)
Fy = front * (
mx * (dx * dy / r ** 2) * mid
+ my * ((dy ** 2 / r ** 2) * mid + (k ** 2 * r ** 2 - 1j * k * r - 1.0))
+ mz * (dz * dy / r ** 2) * mid
)
Fz = front * (
mx * (dx * dz / r ** 2) * mid
+ my * (dy * dz / r ** 2) * mid
+ mz * ((dz ** 2 / r ** 2) * mid + (k ** 2 * r ** 2 - 1j * k * r - 1.0))
)
elif fieldType in ["h", "b"]:
front = (1 + 1j * kr) / (4.0 * np.pi * r ** 2) * np.exp(-1j * k * r)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
return Fx, Fy, Fz | 0.648021 | 0.535098 |
import numpy, cdtime, vcs
from vcs.testing.common import test_values_setting
x=vcs.init()
x.drawlogooff()
p=x.createprojection()
assert(p.type == "linear")
assert(vcs.queries.isprojection(p))
test_values_setting(p, "type", [-1,-2,-3,'linear', 'albers equal area', 'lambert', 'mercator', 'polar', 'polyconic', 'equid conic a', 'transverse mercator', 'stereographic', 'lambert azimuthal', 'azimuthal', 'gnomonic', 'orthographic', 'gen. vert. near per', 'sinusoidal', 'equirectangular', 'miller', 'van der grinten', 'hotin', 'robinson', 'space oblique', 'alaska', 'interrupted goode', 'mollweide', 'interrupted mollweide', 'hammer', 'wagner iv', 'wagner vii', 'oblated', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,p,"POLAR","leac"," POlyConic ",],["utm","state plane","foo",-4,31,256,[],{},])
b = x.createprojection("test_b_ok",p.name)
assert(b.name == "test_b_ok")
assert(b.type == "polyconic")
## From vcs validation
for t in range(31):
good = []
bad =[]
pos = []
for param,val in vcs.VCS_validation_functions.proj_ok_parameters.iteritems():
if t in val[0]:
good.append(param)
pos.append(val[1])
else:
bad.append(param)
b.type=t
b._parameters = [1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20]
for i,att in enumerate(good):
if (att in ['azimuthalangle','azimuthallongitude','satellite','path',] and (b.parameters[12]==0. or b.parameters[12]==1.e20)) \
or \
( att=='standardparallel' and b.parameters[8]==1) \
or \
( att in ['standardparallel1','standardparallel2'] and (b.parameters[8]==0 or b.parameters[8]==1.e20) and t==8)\
:
continue
test_values_setting(b,att,[0.,])
if b.type == "equid conic" and att=="subtype":
ipos = 8
else:
ipos = pos[i]
assert(b.parameters[ipos]==0.)
for att in bad:
try:
setattr(b,att,[],[0.,])
success = True
except:
success = False
else:
if success:
raise ValueError, "Shouldn't have been able to set '%s' on projection of type %s" % (att,b.type) | testing/vcs/test_vcs_verify_proj_basics.py | import numpy, cdtime, vcs
from vcs.testing.common import test_values_setting
x=vcs.init()
x.drawlogooff()
p=x.createprojection()
assert(p.type == "linear")
assert(vcs.queries.isprojection(p))
test_values_setting(p, "type", [-1,-2,-3,'linear', 'albers equal area', 'lambert', 'mercator', 'polar', 'polyconic', 'equid conic a', 'transverse mercator', 'stereographic', 'lambert azimuthal', 'azimuthal', 'gnomonic', 'orthographic', 'gen. vert. near per', 'sinusoidal', 'equirectangular', 'miller', 'van der grinten', 'hotin', 'robinson', 'space oblique', 'alaska', 'interrupted goode', 'mollweide', 'interrupted mollweide', 'hammer', 'wagner iv', 'wagner vii', 'oblated', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,p,"POLAR","leac"," POlyConic ",],["utm","state plane","foo",-4,31,256,[],{},])
b = x.createprojection("test_b_ok",p.name)
assert(b.name == "test_b_ok")
assert(b.type == "polyconic")
## From vcs validation
for t in range(31):
good = []
bad =[]
pos = []
for param,val in vcs.VCS_validation_functions.proj_ok_parameters.iteritems():
if t in val[0]:
good.append(param)
pos.append(val[1])
else:
bad.append(param)
b.type=t
b._parameters = [1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20, 1e+20]
for i,att in enumerate(good):
if (att in ['azimuthalangle','azimuthallongitude','satellite','path',] and (b.parameters[12]==0. or b.parameters[12]==1.e20)) \
or \
( att=='standardparallel' and b.parameters[8]==1) \
or \
( att in ['standardparallel1','standardparallel2'] and (b.parameters[8]==0 or b.parameters[8]==1.e20) and t==8)\
:
continue
test_values_setting(b,att,[0.,])
if b.type == "equid conic" and att=="subtype":
ipos = 8
else:
ipos = pos[i]
assert(b.parameters[ipos]==0.)
for att in bad:
try:
setattr(b,att,[],[0.,])
success = True
except:
success = False
else:
if success:
raise ValueError, "Shouldn't have been able to set '%s' on projection of type %s" % (att,b.type) | 0.282493 | 0.557665 |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.toonbase import ToontownGlobals
from toontown.catalog import CatalogItem
from toontown.catalog.CatalogItemList import CatalogItemList
from toontown.catalog.CatalogPoleItem import CatalogPoleItem
from toontown.catalog.CatalogBeanItem import CatalogBeanItem
from toontown.catalog.CatalogChatItem import CatalogChatItem
from toontown.catalog.CatalogClothingItem import CatalogClothingItem, getAllClothes
from toontown.catalog.CatalogAccessoryItem import CatalogAccessoryItem
from toontown.catalog.CatalogRentalItem import CatalogRentalItem
from toontown.catalog.CatalogInvalidItem import CatalogInvalidItem
import time
class TTCodeRedemptionMgrAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('TTCodeRedemptionMgrAI')
Success = 0
InvalidCode = 1
ExpiredCode = 2
Ineligible = 3
AwardError = 4
TooManyFails = 5
ServiceUnavailable = 6
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.air = air
def announceGenerate(self):
DistributedObjectAI.announceGenerate(self)
def delete(self):
DistributedObjectAI.delete(self)
def giveAwardToToonResult(self, todo0, todo1):
pass
def redeemCode(self, context, code):
avId = self.air.getAvatarIdFromSender()
if not avId:
self.air.writeServerEvent('suspicious', avId=avId, issue='Tried to redeem a code from an invalid avId')
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId=avId, issue='Invalid avatar tried to redeem a code')
return
valid = True
eligible = True
expired = False
delivered = False
codes = av.getRedeemedCodes()
print codes
if not codes:
codes = [
code]
av.setRedeemedCodes(codes)
else:
if code not in codes:
codes.append(code)
av.setRedeemedCodes(codes)
valid = True
else:
valid = False
if not valid:
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Invalid code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.InvalidCode, 0])
return
if expired:
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Expired code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.ExpiredCode, 0])
return
if not eligible:
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Ineligible for code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.Ineligible, 0])
return
items = self.getItemsForCode(code)
for item in items:
if isinstance(item, CatalogInvalidItem):
self.air.writeServerEvent('suspicious', avId=avId, issue="Invalid CatalogItem's for code: %s" % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.InvalidCode, 0])
break
if len(av.mailboxContents) + len(av.onGiftOrder) >= ToontownGlobals.MaxMailboxContents:
delivered = False
break
item.deliveryDate = int(time.time() / 60) + 1
av.onOrder.append(item)
av.b_setDeliverySchedule(av.onOrder)
delivered = True
if not delivered:
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Could not deliver items for code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.InvalidCode, 0])
return
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Successfuly redeemed code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.Success, 0])
def getItemsForCode(self, code):
avId = self.air.getAvatarIdFromSender()
if not avId:
self.air.writeServerEvent('suspicious', avId=avId, issue='Could not parse the gender of an invalid avId')
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId=avId, issue='Could not parse the gender of an invalid avatar')
return
code = code.lower()
if code == 'bdisanerd':
beans = CatalogBeanItem(420, tagCode=2)
return [
beans]
if code == 'flip-for-flippy':
shirt = CatalogClothingItem(2001, 0)
return [
shirt]
if code == 'dont-be-wacky':
shirt = CatalogClothingItem(2002, 0)
return [
shirt]
if code == 'gadzooks':
shirt = CatalogClothingItem(1807, 0)
return [
shirt]
if code == 'sillymeter' or code == 'silly meter' or code == 'silly-meter':
shirt = CatalogClothingItem(1753, 0)
return [
shirt]
if code == 'gc-sbfo' or code == 'gc sbfo' or code == 'gcsbfo':
shirt = CatalogClothingItem(1788, 0)
return [
shirt]
if code == 'getconnected' or code == 'get connected' or code == 'get_connected':
shirt = CatalogClothingItem(1752, 0)
return [
shirt]
if code == 'summer':
shirt = CatalogClothingItem(1709, 0)
return [
shirt]
if code == 'brrrgh':
shirt = CatalogClothingItem(1800, 0)
return [
shirt]
if code == 'toontastic':
shirt = CatalogClothingItem(1820, 0)
return [
shirt]
if code == 'sunburst':
shirt = CatalogClothingItem(1809, 0)
return [
shirt]
if code == 'sweet' or code == 'schweet':
beans = CatalogBeanItem(12000, tagCode=2)
return [
beans]
if code == 'winter' or code == 'cannons':
rent = CatalogRentalItem(ToontownGlobals.RentalCannon, 2880, 0)
return [
rent]
if code == 'toonfest2014' or code == 'toonfest':
shirt = CatalogClothingItem(2003, 0)
if av.getStyle().getGender() == 'm':
bot = CatalogClothingItem(2004, 0)
else:
bot = CatalogClothingItem(2005, 0)
return [shirt, bot]
if code == 'beta-bughunt':
shirt = CatalogClothingItem(2006, 0)
if av.getStyle().getGender() == 'm':
bot = CatalogClothingItem(2007, 0)
else:
bot = CatalogClothingItem(2008, 0)
return [shirt, bot]
if code == 'patience-pays':
shirt = CatalogClothingItem(2009, 0)
if av.getStyle().getGender() == 'm':
bot = CatalogClothingItem(2010, 0)
else:
bot = CatalogClothingItem(2011, 0)
return [shirt, bot]
if code == 'doomsday-inventor':
shirt = CatalogClothingItem(2012, 0)
return [
shirt]
return []
def requestCodeRedeem(self, todo0, todo1):
pass
def redeemCodeResult(self, todo0, todo1, todo2):
pass | v2.5.7/toontown/coderedemption/TTCodeRedemptionMgrAI.py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.toonbase import ToontownGlobals
from toontown.catalog import CatalogItem
from toontown.catalog.CatalogItemList import CatalogItemList
from toontown.catalog.CatalogPoleItem import CatalogPoleItem
from toontown.catalog.CatalogBeanItem import CatalogBeanItem
from toontown.catalog.CatalogChatItem import CatalogChatItem
from toontown.catalog.CatalogClothingItem import CatalogClothingItem, getAllClothes
from toontown.catalog.CatalogAccessoryItem import CatalogAccessoryItem
from toontown.catalog.CatalogRentalItem import CatalogRentalItem
from toontown.catalog.CatalogInvalidItem import CatalogInvalidItem
import time
class TTCodeRedemptionMgrAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('TTCodeRedemptionMgrAI')
Success = 0
InvalidCode = 1
ExpiredCode = 2
Ineligible = 3
AwardError = 4
TooManyFails = 5
ServiceUnavailable = 6
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.air = air
def announceGenerate(self):
DistributedObjectAI.announceGenerate(self)
def delete(self):
DistributedObjectAI.delete(self)
def giveAwardToToonResult(self, todo0, todo1):
pass
def redeemCode(self, context, code):
avId = self.air.getAvatarIdFromSender()
if not avId:
self.air.writeServerEvent('suspicious', avId=avId, issue='Tried to redeem a code from an invalid avId')
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId=avId, issue='Invalid avatar tried to redeem a code')
return
valid = True
eligible = True
expired = False
delivered = False
codes = av.getRedeemedCodes()
print codes
if not codes:
codes = [
code]
av.setRedeemedCodes(codes)
else:
if code not in codes:
codes.append(code)
av.setRedeemedCodes(codes)
valid = True
else:
valid = False
if not valid:
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Invalid code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.InvalidCode, 0])
return
if expired:
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Expired code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.ExpiredCode, 0])
return
if not eligible:
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Ineligible for code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.Ineligible, 0])
return
items = self.getItemsForCode(code)
for item in items:
if isinstance(item, CatalogInvalidItem):
self.air.writeServerEvent('suspicious', avId=avId, issue="Invalid CatalogItem's for code: %s" % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.InvalidCode, 0])
break
if len(av.mailboxContents) + len(av.onGiftOrder) >= ToontownGlobals.MaxMailboxContents:
delivered = False
break
item.deliveryDate = int(time.time() / 60) + 1
av.onOrder.append(item)
av.b_setDeliverySchedule(av.onOrder)
delivered = True
if not delivered:
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Could not deliver items for code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.InvalidCode, 0])
return
self.air.writeServerEvent('code-redeemed', avId=avId, issue='Successfuly redeemed code: %s' % code)
self.sendUpdateToAvatarId(avId, 'redeemCodeResult', [context, self.Success, 0])
def getItemsForCode(self, code):
avId = self.air.getAvatarIdFromSender()
if not avId:
self.air.writeServerEvent('suspicious', avId=avId, issue='Could not parse the gender of an invalid avId')
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId=avId, issue='Could not parse the gender of an invalid avatar')
return
code = code.lower()
if code == 'bdisanerd':
beans = CatalogBeanItem(420, tagCode=2)
return [
beans]
if code == 'flip-for-flippy':
shirt = CatalogClothingItem(2001, 0)
return [
shirt]
if code == 'dont-be-wacky':
shirt = CatalogClothingItem(2002, 0)
return [
shirt]
if code == 'gadzooks':
shirt = CatalogClothingItem(1807, 0)
return [
shirt]
if code == 'sillymeter' or code == 'silly meter' or code == 'silly-meter':
shirt = CatalogClothingItem(1753, 0)
return [
shirt]
if code == 'gc-sbfo' or code == 'gc sbfo' or code == 'gcsbfo':
shirt = CatalogClothingItem(1788, 0)
return [
shirt]
if code == 'getconnected' or code == 'get connected' or code == 'get_connected':
shirt = CatalogClothingItem(1752, 0)
return [
shirt]
if code == 'summer':
shirt = CatalogClothingItem(1709, 0)
return [
shirt]
if code == 'brrrgh':
shirt = CatalogClothingItem(1800, 0)
return [
shirt]
if code == 'toontastic':
shirt = CatalogClothingItem(1820, 0)
return [
shirt]
if code == 'sunburst':
shirt = CatalogClothingItem(1809, 0)
return [
shirt]
if code == 'sweet' or code == 'schweet':
beans = CatalogBeanItem(12000, tagCode=2)
return [
beans]
if code == 'winter' or code == 'cannons':
rent = CatalogRentalItem(ToontownGlobals.RentalCannon, 2880, 0)
return [
rent]
if code == 'toonfest2014' or code == 'toonfest':
shirt = CatalogClothingItem(2003, 0)
if av.getStyle().getGender() == 'm':
bot = CatalogClothingItem(2004, 0)
else:
bot = CatalogClothingItem(2005, 0)
return [shirt, bot]
if code == 'beta-bughunt':
shirt = CatalogClothingItem(2006, 0)
if av.getStyle().getGender() == 'm':
bot = CatalogClothingItem(2007, 0)
else:
bot = CatalogClothingItem(2008, 0)
return [shirt, bot]
if code == 'patience-pays':
shirt = CatalogClothingItem(2009, 0)
if av.getStyle().getGender() == 'm':
bot = CatalogClothingItem(2010, 0)
else:
bot = CatalogClothingItem(2011, 0)
return [shirt, bot]
if code == 'doomsday-inventor':
shirt = CatalogClothingItem(2012, 0)
return [
shirt]
return []
def requestCodeRedeem(self, todo0, todo1):
pass
def redeemCodeResult(self, todo0, todo1, todo2):
pass | 0.427636 | 0.104295 |
from ghpythonlib.componentbase import dotnetcompiledcomponent as component
import Grasshopper, GhPython
import System
from Grasshopper import DataTree
from Grasshopper.Kernel.Data import GH_Path
__author__ = "<NAME>"
__laboratory__ = "IBOIS, Laboratory for Timber Construction"
__university__ = "EPFL, Ecole Polytechnique Federale de Lausanne"
__funding__ = "NCCR Digital Fabrication, ETH Zurich"
__version__ = "2021.09"
class MyComponent(component):
def __new__(cls):
instance = Grasshopper.Kernel.GH_Component.__new__(cls,
"Contact Zones", "Contact", """Get contact zones between each pair of plates.""", "Manis", "Adjacency")
return instance
def get_ComponentGuid(self):
return System.Guid("b45d962c-61e9-4ff8-ab7e-e6e61ca16bf5")
def SetUpParam(self, p, name, nickname, description):
p.Name = name
p.NickName = nickname
p.Description = description
p.Optional = True
def RegisterInputParams(self, pManager):
p = Grasshopper.Kernel.Parameters.Param_GenericObject()
self.SetUpParam(p, "model", "model", "Plate model.")
p.Access = Grasshopper.Kernel.GH_ParamAccess.item
self.Params.Input.Add(p)
def RegisterOutputParams(self, pManager):
p = Grasshopper.Kernel.Parameters.Param_Surface()
self.SetUpParam(p, "zones", "zones", "Contact zone as surface.")
self.Params.Output.Add(p)
p = Grasshopper.Kernel.Parameters.Param_Plane()
self.SetUpParam(p, "planes", "planes", "Centered plane of the contact zone.")
self.Params.Output.Add(p)
def SolveInstance(self, DA):
p0 = self.marshal.GetInput(DA, 0)
result = self.RunScript(p0)
if result is not None:
if not hasattr(result, '__getitem__'):
self.marshal.SetOutput(result, DA, 0, True)
else:
self.marshal.SetOutput(result[0], DA, 0, True)
self.marshal.SetOutput(result[1], DA, 1, True)
def get_Internal_Icon_24x24(self):
o = "<KEY>"
return System.Drawing.Bitmap(System.IO.MemoryStream(System.Convert.FromBase64String(o)))
def RunScript(self, model):
def list_to_datatree(raggedList):
rl = raggedList
result = DataTree[object]()
for i in range(len(rl)):
temp = []
for j in range(len(rl[i])):
temp.append(rl[i][j])
path = GH_Path(i)
result.AddRange(temp, path)
return result
zones = None
planes = None
if model:
zones = list_to_datatree(model.contact_zones)
planes = list_to_datatree(model.contact_planes)
else:
self.AddRuntimeMessage(Grasshopper.Kernel.GH_RuntimeMessageLevel.Warning, 'Waiting to get a model as input.')
return (zones, planes)
class AssemblyInfo(GhPython.Assemblies.PythonAssemblyInfo):
def get_AssemblyName(self):
return "Contact Zones"
def get_AssemblyDescription(self):
return """"""
def get_AssemblyVersion(self):
return "0.1"
def get_AuthorName(self):
return "<NAME>"
def get_Id(self):
return System.Guid("2fae44fc-6e63-4d9b-9ce2-caf8c5a3bdc9") | Gh compilation files/contact.py |
from ghpythonlib.componentbase import dotnetcompiledcomponent as component
import Grasshopper, GhPython
import System
from Grasshopper import DataTree
from Grasshopper.Kernel.Data import GH_Path
__author__ = "<NAME>"
__laboratory__ = "IBOIS, Laboratory for Timber Construction"
__university__ = "EPFL, Ecole Polytechnique Federale de Lausanne"
__funding__ = "NCCR Digital Fabrication, ETH Zurich"
__version__ = "2021.09"
class MyComponent(component):
def __new__(cls):
instance = Grasshopper.Kernel.GH_Component.__new__(cls,
"Contact Zones", "Contact", """Get contact zones between each pair of plates.""", "Manis", "Adjacency")
return instance
def get_ComponentGuid(self):
return System.Guid("b45d962c-61e9-4ff8-ab7e-e6e61ca16bf5")
def SetUpParam(self, p, name, nickname, description):
p.Name = name
p.NickName = nickname
p.Description = description
p.Optional = True
def RegisterInputParams(self, pManager):
p = Grasshopper.Kernel.Parameters.Param_GenericObject()
self.SetUpParam(p, "model", "model", "Plate model.")
p.Access = Grasshopper.Kernel.GH_ParamAccess.item
self.Params.Input.Add(p)
def RegisterOutputParams(self, pManager):
p = Grasshopper.Kernel.Parameters.Param_Surface()
self.SetUpParam(p, "zones", "zones", "Contact zone as surface.")
self.Params.Output.Add(p)
p = Grasshopper.Kernel.Parameters.Param_Plane()
self.SetUpParam(p, "planes", "planes", "Centered plane of the contact zone.")
self.Params.Output.Add(p)
def SolveInstance(self, DA):
p0 = self.marshal.GetInput(DA, 0)
result = self.RunScript(p0)
if result is not None:
if not hasattr(result, '__getitem__'):
self.marshal.SetOutput(result, DA, 0, True)
else:
self.marshal.SetOutput(result[0], DA, 0, True)
self.marshal.SetOutput(result[1], DA, 1, True)
def get_Internal_Icon_24x24(self):
o = "<KEY>"
return System.Drawing.Bitmap(System.IO.MemoryStream(System.Convert.FromBase64String(o)))
def RunScript(self, model):
def list_to_datatree(raggedList):
rl = raggedList
result = DataTree[object]()
for i in range(len(rl)):
temp = []
for j in range(len(rl[i])):
temp.append(rl[i][j])
path = GH_Path(i)
result.AddRange(temp, path)
return result
zones = None
planes = None
if model:
zones = list_to_datatree(model.contact_zones)
planes = list_to_datatree(model.contact_planes)
else:
self.AddRuntimeMessage(Grasshopper.Kernel.GH_RuntimeMessageLevel.Warning, 'Waiting to get a model as input.')
return (zones, planes)
class AssemblyInfo(GhPython.Assemblies.PythonAssemblyInfo):
def get_AssemblyName(self):
return "Contact Zones"
def get_AssemblyDescription(self):
return """"""
def get_AssemblyVersion(self):
return "0.1"
def get_AuthorName(self):
return "<NAME>"
def get_Id(self):
return System.Guid("2fae44fc-6e63-4d9b-9ce2-caf8c5a3bdc9") | 0.397938 | 0.10004 |
import json
import os
from decouple import config
DEBUG = False
TEMPLATE_DEBUG = False
secrets_dir = "/tmp/secrets"
db_creds_file = open(f"{secrets_dir}/db.json")
env_file = open(f"{secrets_dir}/env.json")
db_creds = json.load(db_creds_file)["data"]
env = json.load(env_file)["data"]["data"]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": config("OW4_DATABASE_NAME", default="ow4dev"),
"USER": db_creds["username"],
"PASSWORD": db_creds["password"],
"HOST": env["POSTGRES_HOST"],
"PORT": "5432",
},
}
SECRET_KEY = env["SECRET_KEY"]
DATAPORTEN = {
"STUDY": {
"ENABLED": config("OW4_DP_STUDY_ENABLED", cast=bool, default=False),
"TESTING": config("OW4_DP_STUDY_TESTING", cast=bool, default=True),
"CLIENT_ID": env["DP_STUDY_CLIENT_ID"],
"CLIENT_SECRET": env["DP_STUDY_CLIENT_SECRET"],
"REDIRECT_URI": config("OW4_DP_STUDY_REDIRECT_URI", default=""),
"PROVIDER_URL": "https://auth.dataporten.no/oauth/token",
"SCOPES": ["openid", "userid-feide", "profile", "groups", "email"],
}
}
VIMEO_API_TOKEN = env["VIMEO_API_TOKEN"]
WEB_PUSH_PRIVATE_KEY = env["WEB_PUSH_PRIVATE_KEY"]
RECAPTCHA_PUBLIC_KEY = env["RECAPTCHA_PUBLIC_KEY"]
RECAPTCHA_PRIVATE_KEY = env["RECAPTCHA_PRIVATE_KEY"]
NOCAPTCHA = True
RECAPTCHA_USE_SSL = True
STRIPE_PUBLIC_KEYS = {
"arrkom": env["STRIPE_PUBKEY_ARRKOM"],
"prokom": env["STRIPE_PUBKEY_PROKOM"],
"trikom": env["STRIPE_PUBKEY_TRIKOM"],
"fagkom": env["STRIPE_PUBKEY_FAGKOM"],
}
STRIPE_PRIVATE_KEYS = {
"arrkom": env["STRIPE_PRIVKEY_ARRKOM"],
"prokom": env["STRIPE_PRIVKEY_PROKOM"],
"trikom": env["STRIPE_PRIVKEY_TRIKOM"],
"fagkom": env["STRIPE_PRIVKEY_FAGKOM"],
}
SLACK_INVITER = {"team_name": "onlinentnu", "token": env["SLACK_TOKEN"]}
APPROVAL_SETTINGS = {
"SEND_APPLICANT_NOTIFICATION_EMAIL": True,
"SEND_APPROVER_NOTIFICATION_EMAIL": True,
}
AWS_SES_REGION_NAME = "eu-north-1"
AWS_SES_REGION_ENDPOINT = f"email.{AWS_SES_REGION_NAME}.amazonaws.com"
SESSION_COOKIE_SAMESITE = None
ADMINS = (("dotKom", "<EMAIL>"),)
# Override "spam-settings" for django-wiki
WIKI_REVISIONS_PER_HOUR = 1000
WIKI_REVISIONS_PER_MINUTES = 500
WIKI_ATTACHMENTS_EXTENSIONS = [
"pdf",
"doc",
"odt",
"docx",
"txt",
"xlsx",
"xls",
"png",
"psd",
"ai",
"ods",
"zip",
"jpg",
"jpeg",
"gif",
"patch",
]
WIKI_MARKDOWN_HTML_WHITELIST = [
"br",
"hr",
]
BEDKOM_GROUP_ID = 3
FAGKOM_GROUP_ID = 6
COMMON_GROUP_ID = 17
WIKI_OPEN_EDIT_ACCESS = [
12, # Komiteer
14, # Eldstesaadet
]
WIKI_OPEN_EDIT_ACCESS_GROUP_ID = 22
GROUP_SYNCER = [
{
"name": "Komite-enkeltgrupper til gruppen Komiteer",
"source": [
1, # arrKom
2, # banKom
3, # bedKom
4, # dotKom
5, # eksKom
6, # fagKom
7, # proKom
8, # triKom
33, # Realfagskjelleren
18, # seniorKom
10, # pangKom
11, # Hovedstyret
16, # appKom
9, # velKom
24, # itex
36, # Online IL
],
"destination": [12], # Komiteer
},
{
"name": "bedKom og fagKom til felles gruppe (bed&fagKom)",
"source": [3, 6], # bedKom # fagKom
"destination": [17], # bed&fagKom
},
{
"name": "Komiteer som kan redigere Online public wiki",
"source": [12, 14], # Komiteer # Eldsteraadet
"destination": [22], # Wiki - Online edit permissions
},
{
"name": "Komiteer som kan redigere Online Komiteer wiki",
"source": [12, 18], # Komiteer # SeniorKom
"destination": [23], # Wiki - Komiteer access permissions
},
{
"name": "Buddyssystemet for tilgang til wiki",
"source": [
27, # Riddere
18, # Seniorkom
31, # Ex-Hovedstyre
11, # Hovedstyret
],
"destination": [30], # Buddy
},
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"class": "logging.StreamHandler",
},
},
"root": {
"handlers": ["console"],
"level": "INFO",
},
} | onlineweb4/settings/zappa.py | import json
import os
from decouple import config
DEBUG = False
TEMPLATE_DEBUG = False
secrets_dir = "/tmp/secrets"
db_creds_file = open(f"{secrets_dir}/db.json")
env_file = open(f"{secrets_dir}/env.json")
db_creds = json.load(db_creds_file)["data"]
env = json.load(env_file)["data"]["data"]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": config("OW4_DATABASE_NAME", default="ow4dev"),
"USER": db_creds["username"],
"PASSWORD": db_creds["password"],
"HOST": env["POSTGRES_HOST"],
"PORT": "5432",
},
}
SECRET_KEY = env["SECRET_KEY"]
DATAPORTEN = {
"STUDY": {
"ENABLED": config("OW4_DP_STUDY_ENABLED", cast=bool, default=False),
"TESTING": config("OW4_DP_STUDY_TESTING", cast=bool, default=True),
"CLIENT_ID": env["DP_STUDY_CLIENT_ID"],
"CLIENT_SECRET": env["DP_STUDY_CLIENT_SECRET"],
"REDIRECT_URI": config("OW4_DP_STUDY_REDIRECT_URI", default=""),
"PROVIDER_URL": "https://auth.dataporten.no/oauth/token",
"SCOPES": ["openid", "userid-feide", "profile", "groups", "email"],
}
}
VIMEO_API_TOKEN = env["VIMEO_API_TOKEN"]
WEB_PUSH_PRIVATE_KEY = env["WEB_PUSH_PRIVATE_KEY"]
RECAPTCHA_PUBLIC_KEY = env["RECAPTCHA_PUBLIC_KEY"]
RECAPTCHA_PRIVATE_KEY = env["RECAPTCHA_PRIVATE_KEY"]
NOCAPTCHA = True
RECAPTCHA_USE_SSL = True
STRIPE_PUBLIC_KEYS = {
"arrkom": env["STRIPE_PUBKEY_ARRKOM"],
"prokom": env["STRIPE_PUBKEY_PROKOM"],
"trikom": env["STRIPE_PUBKEY_TRIKOM"],
"fagkom": env["STRIPE_PUBKEY_FAGKOM"],
}
STRIPE_PRIVATE_KEYS = {
"arrkom": env["STRIPE_PRIVKEY_ARRKOM"],
"prokom": env["STRIPE_PRIVKEY_PROKOM"],
"trikom": env["STRIPE_PRIVKEY_TRIKOM"],
"fagkom": env["STRIPE_PRIVKEY_FAGKOM"],
}
SLACK_INVITER = {"team_name": "onlinentnu", "token": env["SLACK_TOKEN"]}
APPROVAL_SETTINGS = {
"SEND_APPLICANT_NOTIFICATION_EMAIL": True,
"SEND_APPROVER_NOTIFICATION_EMAIL": True,
}
AWS_SES_REGION_NAME = "eu-north-1"
AWS_SES_REGION_ENDPOINT = f"email.{AWS_SES_REGION_NAME}.amazonaws.com"
SESSION_COOKIE_SAMESITE = None
ADMINS = (("dotKom", "<EMAIL>"),)
# Override "spam-settings" for django-wiki
WIKI_REVISIONS_PER_HOUR = 1000
WIKI_REVISIONS_PER_MINUTES = 500
WIKI_ATTACHMENTS_EXTENSIONS = [
"pdf",
"doc",
"odt",
"docx",
"txt",
"xlsx",
"xls",
"png",
"psd",
"ai",
"ods",
"zip",
"jpg",
"jpeg",
"gif",
"patch",
]
WIKI_MARKDOWN_HTML_WHITELIST = [
"br",
"hr",
]
BEDKOM_GROUP_ID = 3
FAGKOM_GROUP_ID = 6
COMMON_GROUP_ID = 17
WIKI_OPEN_EDIT_ACCESS = [
12, # Komiteer
14, # Eldstesaadet
]
WIKI_OPEN_EDIT_ACCESS_GROUP_ID = 22
GROUP_SYNCER = [
{
"name": "Komite-enkeltgrupper til gruppen Komiteer",
"source": [
1, # arrKom
2, # banKom
3, # bedKom
4, # dotKom
5, # eksKom
6, # fagKom
7, # proKom
8, # triKom
33, # Realfagskjelleren
18, # seniorKom
10, # pangKom
11, # Hovedstyret
16, # appKom
9, # velKom
24, # itex
36, # Online IL
],
"destination": [12], # Komiteer
},
{
"name": "bedKom og fagKom til felles gruppe (bed&fagKom)",
"source": [3, 6], # bedKom # fagKom
"destination": [17], # bed&fagKom
},
{
"name": "Komiteer som kan redigere Online public wiki",
"source": [12, 14], # Komiteer # Eldsteraadet
"destination": [22], # Wiki - Online edit permissions
},
{
"name": "Komiteer som kan redigere Online Komiteer wiki",
"source": [12, 18], # Komiteer # SeniorKom
"destination": [23], # Wiki - Komiteer access permissions
},
{
"name": "Buddyssystemet for tilgang til wiki",
"source": [
27, # Riddere
18, # Seniorkom
31, # Ex-Hovedstyre
11, # Hovedstyret
],
"destination": [30], # Buddy
},
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"class": "logging.StreamHandler",
},
},
"root": {
"handlers": ["console"],
"level": "INFO",
},
} | 0.137475 | 0.099426 |
import io
import random
from tempest.api.compute import base
from tempest.common import image as common_image
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class FlavorsV2NegativeTest(base.BaseV2ComputeTest):
@decorators.attr(type=['negative'])
@utils.services('image')
@decorators.idempotent_id('90f0d93a-91c1-450c-91e6-07d18172cefe')
def test_boot_with_low_ram(self):
"""Try boot a vm with lower than min ram
Create an image with min_ram value
Try to create server with flavor of insufficient ram size from
that image
"""
flavor = self.flavors_client.show_flavor(
CONF.compute.flavor_ref)['flavor']
min_img_ram = flavor['ram'] + 1
size = random.randint(1024, 4096)
image_file = io.BytesIO(data_utils.random_bytes(size))
params = {
'name': data_utils.rand_name('image'),
'container_format': CONF.image.container_formats[0],
'disk_format': CONF.image.disk_formats[0],
'min_ram': min_img_ram
}
if CONF.image_feature_enabled.api_v1:
params.update({'is_public': False})
params = {'headers': common_image.image_meta_to_headers(**params)}
else:
params.update({'visibility': 'private'})
image = self.images_client.create_image(**params)
image = image['image'] if 'image' in image else image
self.addCleanup(self.images_client.delete_image, image['id'])
if CONF.image_feature_enabled.api_v1:
self.images_client.update_image(image['id'], data=image_file)
else:
self.images_client.store_image_file(image['id'], data=image_file)
self.assertEqual(min_img_ram, image['min_ram'])
# Try to create server with flavor of insufficient ram size
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
image_id=image['id'],
flavor=flavor['id']) | tempest/api/compute/flavors/test_flavors_negative.py |
import io
import random
from tempest.api.compute import base
from tempest.common import image as common_image
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class FlavorsV2NegativeTest(base.BaseV2ComputeTest):
@decorators.attr(type=['negative'])
@utils.services('image')
@decorators.idempotent_id('90f0d93a-91c1-450c-91e6-07d18172cefe')
def test_boot_with_low_ram(self):
"""Try boot a vm with lower than min ram
Create an image with min_ram value
Try to create server with flavor of insufficient ram size from
that image
"""
flavor = self.flavors_client.show_flavor(
CONF.compute.flavor_ref)['flavor']
min_img_ram = flavor['ram'] + 1
size = random.randint(1024, 4096)
image_file = io.BytesIO(data_utils.random_bytes(size))
params = {
'name': data_utils.rand_name('image'),
'container_format': CONF.image.container_formats[0],
'disk_format': CONF.image.disk_formats[0],
'min_ram': min_img_ram
}
if CONF.image_feature_enabled.api_v1:
params.update({'is_public': False})
params = {'headers': common_image.image_meta_to_headers(**params)}
else:
params.update({'visibility': 'private'})
image = self.images_client.create_image(**params)
image = image['image'] if 'image' in image else image
self.addCleanup(self.images_client.delete_image, image['id'])
if CONF.image_feature_enabled.api_v1:
self.images_client.update_image(image['id'], data=image_file)
else:
self.images_client.store_image_file(image['id'], data=image_file)
self.assertEqual(min_img_ram, image['min_ram'])
# Try to create server with flavor of insufficient ram size
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
image_id=image['id'],
flavor=flavor['id']) | 0.403449 | 0.284576 |
import random
import string
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from pathlib import Path
from textwrap import dedent
from typing import List, Tuple
from feast import cli
from feast.feature_store import FeatureStore
def get_example_repo(example_repo_py) -> str:
parent = Path(__file__).parent
traversal_limit = 5
while traversal_limit > 0 and parent.parts[-1] != "tests":
traversal_limit -= 1
parent = parent.parent
if parent.parts[-1] != "tests":
raise ValueError(f"Unable to find where repo {example_repo_py} is located")
return (parent / "example_repos" / example_repo_py).read_text()
class CliRunner:
"""
NB. We can't use test runner helper from click here, since it doesn't start a new Python
interpreter. And we need a new interpreter for each test since we dynamically import
modules from the feature repo, and it is hard to clean up that state otherwise.
"""
def run(self, args: List[str], cwd: Path) -> subprocess.CompletedProcess:
return subprocess.run([sys.executable, cli.__file__] + args, cwd=cwd)
def run_with_output(self, args: List[str], cwd: Path) -> Tuple[int, bytes]:
try:
return (
0,
subprocess.check_output(
[sys.executable, cli.__file__] + args,
cwd=cwd,
stderr=subprocess.STDOUT,
),
)
except subprocess.CalledProcessError as e:
return e.returncode, e.output
@contextmanager
def local_repo(self, example_repo_py: str, offline_store: str):
"""
Convenience method to set up all the boilerplate for a local feature repo.
"""
project_id = "test" + "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
with tempfile.TemporaryDirectory() as repo_dir_name, tempfile.TemporaryDirectory() as data_dir_name:
repo_path = Path(repo_dir_name)
data_path = Path(data_dir_name)
repo_config = repo_path / "feature_store.yaml"
repo_config.write_text(
dedent(
f"""
project: {project_id}
registry: {data_path / "registry.db"}
provider: local
online_store:
path: {data_path / "online_store.db"}
offline_store:
type: {offline_store}
"""
)
)
repo_example = repo_path / "example.py"
repo_example.write_text(example_repo_py)
result = self.run(["apply"], cwd=repo_path)
assert result.returncode == 0
yield FeatureStore(repo_path=str(repo_path), config=None)
result = self.run(["teardown"], cwd=repo_path)
assert result.returncode == 0 | sdk/python/tests/utils/cli_utils.py | import random
import string
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from pathlib import Path
from textwrap import dedent
from typing import List, Tuple
from feast import cli
from feast.feature_store import FeatureStore
def get_example_repo(example_repo_py) -> str:
parent = Path(__file__).parent
traversal_limit = 5
while traversal_limit > 0 and parent.parts[-1] != "tests":
traversal_limit -= 1
parent = parent.parent
if parent.parts[-1] != "tests":
raise ValueError(f"Unable to find where repo {example_repo_py} is located")
return (parent / "example_repos" / example_repo_py).read_text()
class CliRunner:
"""
NB. We can't use test runner helper from click here, since it doesn't start a new Python
interpreter. And we need a new interpreter for each test since we dynamically import
modules from the feature repo, and it is hard to clean up that state otherwise.
"""
def run(self, args: List[str], cwd: Path) -> subprocess.CompletedProcess:
return subprocess.run([sys.executable, cli.__file__] + args, cwd=cwd)
def run_with_output(self, args: List[str], cwd: Path) -> Tuple[int, bytes]:
try:
return (
0,
subprocess.check_output(
[sys.executable, cli.__file__] + args,
cwd=cwd,
stderr=subprocess.STDOUT,
),
)
except subprocess.CalledProcessError as e:
return e.returncode, e.output
@contextmanager
def local_repo(self, example_repo_py: str, offline_store: str):
"""
Convenience method to set up all the boilerplate for a local feature repo.
"""
project_id = "test" + "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
with tempfile.TemporaryDirectory() as repo_dir_name, tempfile.TemporaryDirectory() as data_dir_name:
repo_path = Path(repo_dir_name)
data_path = Path(data_dir_name)
repo_config = repo_path / "feature_store.yaml"
repo_config.write_text(
dedent(
f"""
project: {project_id}
registry: {data_path / "registry.db"}
provider: local
online_store:
path: {data_path / "online_store.db"}
offline_store:
type: {offline_store}
"""
)
)
repo_example = repo_path / "example.py"
repo_example.write_text(example_repo_py)
result = self.run(["apply"], cwd=repo_path)
assert result.returncode == 0
yield FeatureStore(repo_path=str(repo_path), config=None)
result = self.run(["teardown"], cwd=repo_path)
assert result.returncode == 0 | 0.511961 | 0.226912 |
import scrapy
from crawlerbot.items import TgItem
import json
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import os
from crawlerbot.district_names import district_map
from selenium.common.exceptions import NoSuchElementException, TimeoutException
class threntSpider(scrapy.Spider):
name = 'threntspider'
output_name = 'thrent'
custom_settings = {
'ITEM_PIPELINES': {
'crawlerbot.pipelines.JsonPipeline': 300,
'crawlerbot.pipelines.MongoPipeline': 400
}
# 'LOG_FILE': 'crawlerbot/logs/demospider.log',
# 'LOG_LEVEL': 'DEBUG'
}
curDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dirName = os.path.join(curDir, 'json')
try:
with open(os.path.join(dirName, 'thlinkrent.json'), 'r') as f:
data = json.load(f)
urls = [d['link'] for d in data]
# start_urls = urls
start_urls = urls[:5000]
# start_urls = ['https://www.thaihometown.com/home/1398887', 'https://www.thaihometown.com/condo/1475282', 'https://www.thaihometown.com/condo/1591028']
except FileNotFoundError:
pass
def parse(self, response):
item = TgItem()
item['pid'] = response.request.url.split('/')[4]
item['ptype'] = response.request.url.split('/')[3].capitalize()
if item['ptype'] == 'Home':
item['ptype'] = 'House'
item['name'] = response.xpath('//div[@class="namedesw9"]/h1/text()').extract_first()
rooms = response.xpath('//td[contains(text(),"จำนวนห้อง")]/../td[@class="table_set3"]/a/text()').extract()
if len(rooms[0].split()) > 1:
item['bed'] = rooms[0].split()[0]
else:
item['bed'] = rooms[0]
item['bath'] = rooms[1].split()[0]
district = response.xpath('//td[contains(text(),"เขตที่ตั้ง")]/../td[@class="table_set3"]/a/text()').extract_first()
item['location'] = district_map[district]
area = response.xpath('//div[@class="sqm_right"]/a/text()').extract_first().split()
if area[1] == 'ตารางวา':
item['size'] = float(area[0]) * 4
else:
item['size'] = float(area[0])
price = response.xpath('//a[contains(text(),"บาท/เดือน")]/text()').extract()[1].split()
item['price'] = price[1].replace(',','')
item['daypost'] = response.xpath('//div[@class="datedetail"]/text()').extract_first()
map_url = response.xpath('//iframe[@id="GMap"]/@src').extract_first()
ggmap_url = response.xpath('//div[@class="maps_google2"]/a/@href').extract_first()
if map_url:
request = scrapy.Request(map_url, callback=self.parse_latlng)
request.meta['item'] = item
return request
elif ggmap_url:
browser = webdriver.Chrome()
browser.get(ggmap_url)
wait = WebDriverWait(browser, 30)
# browser.find_element_by_xpath("//div[@id='pclose']").click()
browser.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
# browser.implicitly_wait(30) # seconds
browser.switch_to.frame(browser.find_element_by_xpath('//div[@id="divMapFull"]/iframe'))
try:
print("11111111 --------")
nav1 = wait.until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"ดูแผนที่ขนาดใหญ่")]')))
# nav1 = browser.find_element_by_xpath("//a[contains(text(),'ดูแผนที่ขนาดใหญ่')]")
map_url = nav1.get_attribute('href')
print(map_url + " --- 11111111")
except TimeoutException:
print("22222222 --------")
nav2 = wait.until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"View larger map")]')))
# nav2 = browser.find_element_by_xpath("//a[contains(text(),'View larger map')]")
map_url = nav2.get_attribute('href')
print(map_url + " --- 22222222")
# nav = browser.find_element_by_xpath("//div[@class='google-maps-link']/a")
# nav = wait.until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"ดูแผนที่ขนาดใหญ่")]')))
browser.close()
try:
item['latlng'] = re.search('=.+&z', map_url).group(0)[1:-2].split(',')
except AttributeError:
item['latlng'] = ','.split(',')
else:
item['latlng'] = ','.split(',')
return item
def parse_latlng(self, response):
item = response.meta['item']
text = response.xpath('//script[@type="text/javascript"]/text()').extract_first()
try:
item['latlng'] = re.search('\(.+\)', text).group(0)[1:-1].split(',')
except AttributeError:
item['latlng'] = ','.split(',')
return item | crawlerbot/spiders/thrent.py | import scrapy
from crawlerbot.items import TgItem
import json
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import os
from crawlerbot.district_names import district_map
from selenium.common.exceptions import NoSuchElementException, TimeoutException
class threntSpider(scrapy.Spider):
name = 'threntspider'
output_name = 'thrent'
custom_settings = {
'ITEM_PIPELINES': {
'crawlerbot.pipelines.JsonPipeline': 300,
'crawlerbot.pipelines.MongoPipeline': 400
}
# 'LOG_FILE': 'crawlerbot/logs/demospider.log',
# 'LOG_LEVEL': 'DEBUG'
}
curDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dirName = os.path.join(curDir, 'json')
try:
with open(os.path.join(dirName, 'thlinkrent.json'), 'r') as f:
data = json.load(f)
urls = [d['link'] for d in data]
# start_urls = urls
start_urls = urls[:5000]
# start_urls = ['https://www.thaihometown.com/home/1398887', 'https://www.thaihometown.com/condo/1475282', 'https://www.thaihometown.com/condo/1591028']
except FileNotFoundError:
pass
def parse(self, response):
item = TgItem()
item['pid'] = response.request.url.split('/')[4]
item['ptype'] = response.request.url.split('/')[3].capitalize()
if item['ptype'] == 'Home':
item['ptype'] = 'House'
item['name'] = response.xpath('//div[@class="namedesw9"]/h1/text()').extract_first()
rooms = response.xpath('//td[contains(text(),"จำนวนห้อง")]/../td[@class="table_set3"]/a/text()').extract()
if len(rooms[0].split()) > 1:
item['bed'] = rooms[0].split()[0]
else:
item['bed'] = rooms[0]
item['bath'] = rooms[1].split()[0]
district = response.xpath('//td[contains(text(),"เขตที่ตั้ง")]/../td[@class="table_set3"]/a/text()').extract_first()
item['location'] = district_map[district]
area = response.xpath('//div[@class="sqm_right"]/a/text()').extract_first().split()
if area[1] == 'ตารางวา':
item['size'] = float(area[0]) * 4
else:
item['size'] = float(area[0])
price = response.xpath('//a[contains(text(),"บาท/เดือน")]/text()').extract()[1].split()
item['price'] = price[1].replace(',','')
item['daypost'] = response.xpath('//div[@class="datedetail"]/text()').extract_first()
map_url = response.xpath('//iframe[@id="GMap"]/@src').extract_first()
ggmap_url = response.xpath('//div[@class="maps_google2"]/a/@href').extract_first()
if map_url:
request = scrapy.Request(map_url, callback=self.parse_latlng)
request.meta['item'] = item
return request
elif ggmap_url:
browser = webdriver.Chrome()
browser.get(ggmap_url)
wait = WebDriverWait(browser, 30)
# browser.find_element_by_xpath("//div[@id='pclose']").click()
browser.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
# browser.implicitly_wait(30) # seconds
browser.switch_to.frame(browser.find_element_by_xpath('//div[@id="divMapFull"]/iframe'))
try:
print("11111111 --------")
nav1 = wait.until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"ดูแผนที่ขนาดใหญ่")]')))
# nav1 = browser.find_element_by_xpath("//a[contains(text(),'ดูแผนที่ขนาดใหญ่')]")
map_url = nav1.get_attribute('href')
print(map_url + " --- 11111111")
except TimeoutException:
print("22222222 --------")
nav2 = wait.until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"View larger map")]')))
# nav2 = browser.find_element_by_xpath("//a[contains(text(),'View larger map')]")
map_url = nav2.get_attribute('href')
print(map_url + " --- 22222222")
# nav = browser.find_element_by_xpath("//div[@class='google-maps-link']/a")
# nav = wait.until(EC.presence_of_element_located((By.XPATH, '//a[contains(text(),"ดูแผนที่ขนาดใหญ่")]')))
browser.close()
try:
item['latlng'] = re.search('=.+&z', map_url).group(0)[1:-2].split(',')
except AttributeError:
item['latlng'] = ','.split(',')
else:
item['latlng'] = ','.split(',')
return item
def parse_latlng(self, response):
item = response.meta['item']
text = response.xpath('//script[@type="text/javascript"]/text()').extract_first()
try:
item['latlng'] = re.search('\(.+\)', text).group(0)[1:-1].split(',')
except AttributeError:
item['latlng'] = ','.split(',')
return item | 0.124559 | 0.088269 |
import numpy as np
from .core import SpectrumBasedEstimatorBase, ensure_covariance_size
def f_bartlett(A, R):
r"""Computes the spectrum output of the Bartlett beamformer.
.. math::
P_{\mathrm{Bartlett}}(\theta)
= \mathbf{a}(\theta)^H \mathbf{R} \mathbf{a}(\theta)
Args:
A: m x k steering matrix of candidate direction-of-arrivals, where
m is the number of sensors and k is the number of candidate
direction-of-arrivals.
R: m x m covariance matrix.
"""
return np.sum(A.conj() * (R @ A), axis=0).real
def f_mvdr(A, R):
r"""Compute the spectrum output of the Bartlett beamformer.
.. math::
P_{\mathrm{MVDR}}(\theta)
= \frac{1}{\mathbf{a}(\theta)^H \mathbf{R}^{-1} \mathbf{a}(\theta)}
Args:
A: m x k steering matrix of candidate direction-of-arrivals, where
m is the number of sensors and k is the number of candidate
direction-of-arrivals.
R: m x m covariance matrix.
"""
return 1.0 / np.sum(A.conj() * np.linalg.lstsq(R, A, None)[0], axis=0).real
class BartlettBeamformer(SpectrumBasedEstimatorBase):
"""Creates a Barlett-beamformer based estimator.
This estimator is also named beamscan based estimator.
The spectrum is computed on a predefined-grid using
:meth:`~doatools.estimation.beamforming.f_bartlett`, and the source
locations are estimated by identifying the peaks.
Args:
array (~doatools.model.arrays.ArrayDesign): Array design.
wavelength (float): Wavelength of the carrier wave.
search_grid (~doatools.estimation.grid.SearchGrid): The search grid
used to locate the sources.
**kwargs: Other keyword arguments supported by
:class:`~doatools.estimation.core.SpectrumBasedEstimatorBase`.
References:
[1] <NAME>, Optimum array processing. New York: Wiley, 2002.
"""
def __init__(self, array, wavelength, search_grid, **kwargs):
super().__init__(array, wavelength, search_grid, **kwargs)
def estimate(self, R, k, **kwargs):
"""Estimates the source locations from the given covariance matrix.
Args:
R (~numpy.ndarray): Covariance matrix input. The size of R must
match that of the array design used when creating this
estimator.
k (int): Expected number of sources.
return_spectrum (bool): Set to ``True`` to also output the spectrum
for visualization. Default value if ``False``.
refine_estimates (bool): Set to True to enable grid refinement to
obtain potentially more accurate estimates.
refinement_density (int): Density of the refinement grids. Higher
density values lead to denser refinement grids and increased
computational complexity. Default value is 10.
refinement_iters (int): Number of refinement iterations. More
iterations generally lead to better results, at the cost of
increased computational complexity. Default value is 3.
Returns:
A tuple with the following elements.
* resolved (:class:`bool`): A boolean indicating if the desired
number of sources are found. This flag does **not** guarantee that
the estimated source locations are correct. The estimated source
locations may be completely wrong!
If resolved is False, both ``estimates`` and ``spectrum`` will be
``None``.
* estimates (:class:`~doatools.model.sources.SourcePlacement`):
A :class:`~doatools.model.sources.SourcePlacement` instance of the
same type as the one used in the search grid, represeting the
estimated source locations. Will be ``None`` if resolved is
``False``.
* spectrum (:class:`~numpy.ndarray`): An numpy array of the same
shape of the specified search grid, consisting of values evaluated
at the grid points. Only present if ``return_spectrum`` is
``True``.
"""
ensure_covariance_size(R, self._array)
return self._estimate(lambda A: f_bartlett(A, R), k, **kwargs)
class MVDRBeamformer(SpectrumBasedEstimatorBase):
"""Creates a MVDR-beamformer based estimator.
The spectrum is computed on a predefined-grid using
:meth:`~doatools.estimation.beamforming.f_mvdr`, and the source locations
are estimated by identifying the peaks.
Args:
array (~doatools.model.arrays.ArrayDesign): Array design.
wavelength (float): Wavelength of the carrier wave.
search_grid (~doatools.estimation.grid.SearchGrid): The search grid
used to locate the sources.
**kwargs: Other keyword arguments supported by
:class:`~doatools.estimation.core.SpectrumBasedEstimatorBase`.
References:
[1] <NAME>, Optimum array processing. New York: Wiley, 2002.
"""
def __init__(self, array, wavelength, search_grid, **kwargs):
super().__init__(array, wavelength, search_grid, **kwargs)
def estimate(self, R, k, **kwargs):
"""
Estimates the source locations from the given covariance matrix.
Args:
R (~numpy.ndarray): Covariance matrix input. The size of R must
match that of the array design used when creating this
estimator.
k (int): Expected number of sources.
return_spectrum (bool): Set to ``True`` to also output the spectrum
for visualization. Default value if ``False``.
refine_estimates (bool): Set to True to enable grid refinement to
obtain potentially more accurate estimates.
refinement_density (int): Density of the refinement grids. Higher
density values lead to denser refinement grids and increased
computational complexity. Default value is 10.
refinement_iters (int): Number of refinement iterations. More
iterations generally lead to better results, at the cost of
increased computational complexity. Default value is 3.
Returns:
A tuple with the following elements.
* resolved (:class:`bool`): A boolean indicating if the desired
number of sources are found. This flag does **not** guarantee that
the estimated source locations are correct. The estimated source
locations may be completely wrong!
If resolved is False, both ``estimates`` and ``spectrum`` will be
``None``.
* estimates (:class:`~doatools.model.sources.SourcePlacement`):
A :class:`~doatools.model.sources.SourcePlacement` instance of the
same type as the one used in the search grid, represeting the
estimated source locations. Will be ``None`` if resolved is
``False``.
* spectrum (:class:`~numpy.ndarray`): An numpy array of the same
shape of the specified search grid, consisting of values evaluated
at the grid points. Only present if ``return_spectrum`` is
``True``.
"""
ensure_covariance_size(R, self._array)
return self._estimate(lambda A: f_mvdr(A, R), k, **kwargs) | doatools/estimation/beamforming.py | import numpy as np
from .core import SpectrumBasedEstimatorBase, ensure_covariance_size
def f_bartlett(A, R):
r"""Computes the spectrum output of the Bartlett beamformer.
.. math::
P_{\mathrm{Bartlett}}(\theta)
= \mathbf{a}(\theta)^H \mathbf{R} \mathbf{a}(\theta)
Args:
A: m x k steering matrix of candidate direction-of-arrivals, where
m is the number of sensors and k is the number of candidate
direction-of-arrivals.
R: m x m covariance matrix.
"""
return np.sum(A.conj() * (R @ A), axis=0).real
def f_mvdr(A, R):
r"""Compute the spectrum output of the Bartlett beamformer.
.. math::
P_{\mathrm{MVDR}}(\theta)
= \frac{1}{\mathbf{a}(\theta)^H \mathbf{R}^{-1} \mathbf{a}(\theta)}
Args:
A: m x k steering matrix of candidate direction-of-arrivals, where
m is the number of sensors and k is the number of candidate
direction-of-arrivals.
R: m x m covariance matrix.
"""
return 1.0 / np.sum(A.conj() * np.linalg.lstsq(R, A, None)[0], axis=0).real
class BartlettBeamformer(SpectrumBasedEstimatorBase):
"""Creates a Barlett-beamformer based estimator.
This estimator is also named beamscan based estimator.
The spectrum is computed on a predefined-grid using
:meth:`~doatools.estimation.beamforming.f_bartlett`, and the source
locations are estimated by identifying the peaks.
Args:
array (~doatools.model.arrays.ArrayDesign): Array design.
wavelength (float): Wavelength of the carrier wave.
search_grid (~doatools.estimation.grid.SearchGrid): The search grid
used to locate the sources.
**kwargs: Other keyword arguments supported by
:class:`~doatools.estimation.core.SpectrumBasedEstimatorBase`.
References:
[1] <NAME>, Optimum array processing. New York: Wiley, 2002.
"""
def __init__(self, array, wavelength, search_grid, **kwargs):
super().__init__(array, wavelength, search_grid, **kwargs)
def estimate(self, R, k, **kwargs):
"""Estimates the source locations from the given covariance matrix.
Args:
R (~numpy.ndarray): Covariance matrix input. The size of R must
match that of the array design used when creating this
estimator.
k (int): Expected number of sources.
return_spectrum (bool): Set to ``True`` to also output the spectrum
for visualization. Default value if ``False``.
refine_estimates (bool): Set to True to enable grid refinement to
obtain potentially more accurate estimates.
refinement_density (int): Density of the refinement grids. Higher
density values lead to denser refinement grids and increased
computational complexity. Default value is 10.
refinement_iters (int): Number of refinement iterations. More
iterations generally lead to better results, at the cost of
increased computational complexity. Default value is 3.
Returns:
A tuple with the following elements.
* resolved (:class:`bool`): A boolean indicating if the desired
number of sources are found. This flag does **not** guarantee that
the estimated source locations are correct. The estimated source
locations may be completely wrong!
If resolved is False, both ``estimates`` and ``spectrum`` will be
``None``.
* estimates (:class:`~doatools.model.sources.SourcePlacement`):
A :class:`~doatools.model.sources.SourcePlacement` instance of the
same type as the one used in the search grid, represeting the
estimated source locations. Will be ``None`` if resolved is
``False``.
* spectrum (:class:`~numpy.ndarray`): An numpy array of the same
shape of the specified search grid, consisting of values evaluated
at the grid points. Only present if ``return_spectrum`` is
``True``.
"""
ensure_covariance_size(R, self._array)
return self._estimate(lambda A: f_bartlett(A, R), k, **kwargs)
class MVDRBeamformer(SpectrumBasedEstimatorBase):
"""Creates a MVDR-beamformer based estimator.
The spectrum is computed on a predefined-grid using
:meth:`~doatools.estimation.beamforming.f_mvdr`, and the source locations
are estimated by identifying the peaks.
Args:
array (~doatools.model.arrays.ArrayDesign): Array design.
wavelength (float): Wavelength of the carrier wave.
search_grid (~doatools.estimation.grid.SearchGrid): The search grid
used to locate the sources.
**kwargs: Other keyword arguments supported by
:class:`~doatools.estimation.core.SpectrumBasedEstimatorBase`.
References:
[1] <NAME>, Optimum array processing. New York: Wiley, 2002.
"""
def __init__(self, array, wavelength, search_grid, **kwargs):
super().__init__(array, wavelength, search_grid, **kwargs)
def estimate(self, R, k, **kwargs):
"""
Estimates the source locations from the given covariance matrix.
Args:
R (~numpy.ndarray): Covariance matrix input. The size of R must
match that of the array design used when creating this
estimator.
k (int): Expected number of sources.
return_spectrum (bool): Set to ``True`` to also output the spectrum
for visualization. Default value if ``False``.
refine_estimates (bool): Set to True to enable grid refinement to
obtain potentially more accurate estimates.
refinement_density (int): Density of the refinement grids. Higher
density values lead to denser refinement grids and increased
computational complexity. Default value is 10.
refinement_iters (int): Number of refinement iterations. More
iterations generally lead to better results, at the cost of
increased computational complexity. Default value is 3.
Returns:
A tuple with the following elements.
* resolved (:class:`bool`): A boolean indicating if the desired
number of sources are found. This flag does **not** guarantee that
the estimated source locations are correct. The estimated source
locations may be completely wrong!
If resolved is False, both ``estimates`` and ``spectrum`` will be
``None``.
* estimates (:class:`~doatools.model.sources.SourcePlacement`):
A :class:`~doatools.model.sources.SourcePlacement` instance of the
same type as the one used in the search grid, represeting the
estimated source locations. Will be ``None`` if resolved is
``False``.
* spectrum (:class:`~numpy.ndarray`): An numpy array of the same
shape of the specified search grid, consisting of values evaluated
at the grid points. Only present if ``return_spectrum`` is
``True``.
"""
ensure_covariance_size(R, self._array)
return self._estimate(lambda A: f_mvdr(A, R), k, **kwargs) | 0.959639 | 0.824956 |
import csv
import gzip
import casanova
import pytest
import time
import sys
from io import StringIO
from collections import defaultdict
from quenouille import imap_unordered
from test.utils import collect_csv
from casanova.resuming import (
LastCellComparisonResumer,
RowCountResumer,
ThreadSafeResumer
)
from casanova.exceptions import (
EmptyFileError
)
class TestEnricher(object):
def test_exceptions(self, tmpdir):
with pytest.raises(EmptyFileError):
casanova.enricher(StringIO(''), StringIO(''))
output_path = str(tmpdir.join('./wrong-resumer.csv'))
with pytest.raises(TypeError):
resumer = ThreadSafeResumer(output_path)
with open('./test/resources/people.csv') as f, resumer:
casanova.enricher(f, resumer)
def test_basics(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
assert collect_csv(output_path) == [
['name', 'surname', 'line'],
['John', 'Matthews', '0'],
['Mary', 'Sue', '1'],
['Julia', 'Stone', '2']
]
def test_dialect(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with open('./test/resources/semicolons.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, add=('line',), delimiter=';')
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
assert collect_csv(output_path) == [
['name', 'surname', 'line'],
['Rose', 'Philips', '0'],
['Luke', 'Atman', '1']
]
def test_gzip(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with gzip.open('./test/resources/people.csv.gz', 'rt') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
assert collect_csv(output_path) == [
['name', 'surname', 'line'],
['John', 'Matthews', '0'],
['Mary', 'Sue', '1'],
['Julia', 'Stone', '2']
]
def test_keep(self, tmpdir):
output_path = str(tmpdir.join('./enriched-keep.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, keep=('name',), add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
assert collect_csv(output_path) == [
['name', 'line'],
['John', '0'],
['Mary', '1'],
['Julia', '2']
]
def test_padding(self, tmpdir):
output_path = str(tmpdir.join('./enriched-padding.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, keep=('name',), add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row)
assert collect_csv(output_path) == [
['name', 'line'],
['John', ''],
['Mary', ''],
['Julia', '']
]
def test_resumable(self, tmpdir):
log = defaultdict(list)
def listener(name, row):
log[name].append(list(row))
output_path = str(tmpdir.join('./enriched-resumable.csv'))
resumer = RowCountResumer(output_path, listener=listener)
with open('./test/resources/people.csv') as f, resumer:
enricher = casanova.enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
row = next(iter(enricher))
enricher.writerow(row, [2])
assert collect_csv(output_path) == [
['name', 'x2'],
['John', '2']
]
with open('./test/resources/people.csv') as f, resumer:
enricher = casanova.enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
for i, row in enumerate(enricher):
enricher.writerow(row, [(i + 2) * 2])
assert collect_csv(output_path) == [
['name', 'x2'],
['John', '2'],
['Mary', '4'],
['Julia', '6']
]
assert log == {
'output.row': [['John', '2']],
'input.row': [['John', 'Matthews']]
}
def test_resumable_last_cell_comparison(self, tmpdir):
log = defaultdict(list)
def listener(name, row):
log[name].append(list(row))
output_path = str(tmpdir.join('./enriched-resumable.csv'))
resumer = LastCellComparisonResumer(output_path, value_column=0, listener=listener)
with open('./test/resources/people.csv') as f, resumer:
enricher = casanova.enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
row = next(iter(enricher))
enricher.writerow(row, [2])
assert collect_csv(output_path) == [
['name', 'x2'],
['John', '2']
]
with open('./test/resources/people.csv') as f, resumer:
enricher = casanova.enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
for i, row in enumerate(enricher):
enricher.writerow(row, [(i + 2) * 2])
assert collect_csv(output_path) == [
['name', 'x2'],
['John', '2'],
['Mary', '4'],
['Julia', '6']
]
assert log == {'input.row': [['John', 'Matthews']]}
def test_threadsafe(self, tmpdir):
def job(payload):
i, row = payload
s = int(row[2])
time.sleep(s * .01)
return i, row
output_path = str(tmpdir.join('./enriched-resumable-threadsafe.csv'))
with open('./test/resources/people_unordered.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.threadsafe_enricher(
f, of,
add=('x2',),
keep=('name',)
)
for i, row in imap_unordered(enricher, job, 3):
enricher.writerow(i, row, [(i + 1) * 2])
def sort_output(o):
return sorted(tuple(i) for i in o)
assert sort_output(collect_csv(output_path)) == sort_output([
['name', 'index', 'x2'],
['Mary', '1', '4'],
['Julia', '2', '6'],
['John', '0', '2']
])
def test_threadsafe_cells(self, tmpdir):
output_path = str(tmpdir.join('./enriched-resumable-threadsafe.csv'))
with open('./test/resources/people_unordered.csv') as f, \
open(output_path, 'a+') as of:
enricher = casanova.threadsafe_enricher(
f, of,
add=('x2',),
keep=('name',)
)
names = [t for t in enricher.cells('name')]
assert sorted(names) == sorted([(0, 'John'), (1, 'Mary'), (2, 'Julia')])
with open('./test/resources/people_unordered.csv') as f, \
open(output_path, 'a+') as of:
enricher = casanova.threadsafe_enricher(
f, of,
add=('x2',),
keep=('name',)
)
names = [(i, v) for i, row, v in enricher.cells('name', with_rows=True)]
assert names == [(0, 'John'), (1, 'Mary'), (2, 'Julia')]
def test_threadsafe_resumable(self, tmpdir):
log = defaultdict(list)
def listener(name, row):
log[name].append(list(row))
def job(payload):
i, row = payload
s = int(row[2])
time.sleep(s * .1)
return i, row
output_path = str(tmpdir.join('./enriched-resumable-threadsafe.csv'))
resumer = ThreadSafeResumer(output_path, listener=listener)
with open('./test/resources/people_unordered.csv') as f, resumer:
enricher = casanova.threadsafe_enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
for j, (i, row) in enumerate(imap_unordered(enricher, job, 3)):
enricher.writerow(i, row, [(i + 1) * 2])
if j == 1:
break
def sort_output(o):
return sorted(tuple(i) for i in o)
assert sort_output(collect_csv(output_path)) == sort_output([
['name', 'index', 'x2'],
['Mary', '1', '4'],
['Julia', '2', '6']
])
with open('./test/resources/people_unordered.csv') as f, resumer:
enricher = casanova.threadsafe_enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
for j, (i, row) in enumerate(imap_unordered(enricher, job, 3)):
enricher.writerow(i, row, [(i + 1) * 2])
assert sort_output(collect_csv(output_path)) == sort_output([
['name', 'index', 'x2'],
['Mary', '1', '4'],
['Julia', '2', '6'],
['John', '0', '2']
])
assert sort_output(log['output.row']) == sort_output([['Mary', '1', '4'], ['Julia', '2', '6']])
assert sort_output(log['filter.row']) == sort_output([[1, ['Mary', 'Sue', '1']], [2, ['Julia', 'Stone', '2']]])
def test_stdout(self, capsys):
sys.stdout.write('this,should,happen\n')
with open('./test/resources/people.csv') as f:
enricher = casanova.enricher(f, sys.stdout, add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
result = list(csv.reader(StringIO(capsys.readouterr().out)))
assert result == [
['this', 'should', 'happen'],
['name', 'surname', 'line'],
['John', 'Matthews', '0'],
['Mary', 'Sue', '1'],
['Julia', 'Stone', '2']
]
def test_combined_pos(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, add=('line',), keep=('surname',))
assert len(enricher.output_headers) == 2
assert enricher.output_headers.surname == 0
assert enricher.output_headers.line == 1
def test_batch_enricher(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.batch_enricher(f, of, add=('color',), keep=('surname',))
for row in enricher:
enricher.writebatch(row, [['blue'], ['red']], cursor='next')
enricher.writebatch(row, [['purple'], ['cyan']])
assert collect_csv(output_path) == [
['surname', 'cursor', 'color'],
['Matthews', '', 'blue'],
['Matthews', 'next', 'red'],
['Matthews', '', 'purple'],
['Matthews', 'end', 'cyan'],
['Sue', '', 'blue'],
['Sue', 'next', 'red'],
['Sue', '', 'purple'],
['Sue', 'end', 'cyan'],
['Stone', '', 'blue'],
['Stone', 'next', 'red'],
['Stone', '', 'purple'],
['Stone', 'end', 'cyan']
] | test/enricher_test.py | import csv
import gzip
import casanova
import pytest
import time
import sys
from io import StringIO
from collections import defaultdict
from quenouille import imap_unordered
from test.utils import collect_csv
from casanova.resuming import (
LastCellComparisonResumer,
RowCountResumer,
ThreadSafeResumer
)
from casanova.exceptions import (
EmptyFileError
)
class TestEnricher(object):
def test_exceptions(self, tmpdir):
with pytest.raises(EmptyFileError):
casanova.enricher(StringIO(''), StringIO(''))
output_path = str(tmpdir.join('./wrong-resumer.csv'))
with pytest.raises(TypeError):
resumer = ThreadSafeResumer(output_path)
with open('./test/resources/people.csv') as f, resumer:
casanova.enricher(f, resumer)
def test_basics(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
assert collect_csv(output_path) == [
['name', 'surname', 'line'],
['John', 'Matthews', '0'],
['Mary', 'Sue', '1'],
['Julia', 'Stone', '2']
]
def test_dialect(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with open('./test/resources/semicolons.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, add=('line',), delimiter=';')
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
assert collect_csv(output_path) == [
['name', 'surname', 'line'],
['Rose', 'Philips', '0'],
['Luke', 'Atman', '1']
]
def test_gzip(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with gzip.open('./test/resources/people.csv.gz', 'rt') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
assert collect_csv(output_path) == [
['name', 'surname', 'line'],
['John', 'Matthews', '0'],
['Mary', 'Sue', '1'],
['Julia', 'Stone', '2']
]
def test_keep(self, tmpdir):
output_path = str(tmpdir.join('./enriched-keep.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, keep=('name',), add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
assert collect_csv(output_path) == [
['name', 'line'],
['John', '0'],
['Mary', '1'],
['Julia', '2']
]
def test_padding(self, tmpdir):
output_path = str(tmpdir.join('./enriched-padding.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, keep=('name',), add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row)
assert collect_csv(output_path) == [
['name', 'line'],
['John', ''],
['Mary', ''],
['Julia', '']
]
def test_resumable(self, tmpdir):
log = defaultdict(list)
def listener(name, row):
log[name].append(list(row))
output_path = str(tmpdir.join('./enriched-resumable.csv'))
resumer = RowCountResumer(output_path, listener=listener)
with open('./test/resources/people.csv') as f, resumer:
enricher = casanova.enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
row = next(iter(enricher))
enricher.writerow(row, [2])
assert collect_csv(output_path) == [
['name', 'x2'],
['John', '2']
]
with open('./test/resources/people.csv') as f, resumer:
enricher = casanova.enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
for i, row in enumerate(enricher):
enricher.writerow(row, [(i + 2) * 2])
assert collect_csv(output_path) == [
['name', 'x2'],
['John', '2'],
['Mary', '4'],
['Julia', '6']
]
assert log == {
'output.row': [['John', '2']],
'input.row': [['John', 'Matthews']]
}
def test_resumable_last_cell_comparison(self, tmpdir):
log = defaultdict(list)
def listener(name, row):
log[name].append(list(row))
output_path = str(tmpdir.join('./enriched-resumable.csv'))
resumer = LastCellComparisonResumer(output_path, value_column=0, listener=listener)
with open('./test/resources/people.csv') as f, resumer:
enricher = casanova.enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
row = next(iter(enricher))
enricher.writerow(row, [2])
assert collect_csv(output_path) == [
['name', 'x2'],
['John', '2']
]
with open('./test/resources/people.csv') as f, resumer:
enricher = casanova.enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
for i, row in enumerate(enricher):
enricher.writerow(row, [(i + 2) * 2])
assert collect_csv(output_path) == [
['name', 'x2'],
['John', '2'],
['Mary', '4'],
['Julia', '6']
]
assert log == {'input.row': [['John', 'Matthews']]}
def test_threadsafe(self, tmpdir):
def job(payload):
i, row = payload
s = int(row[2])
time.sleep(s * .01)
return i, row
output_path = str(tmpdir.join('./enriched-resumable-threadsafe.csv'))
with open('./test/resources/people_unordered.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.threadsafe_enricher(
f, of,
add=('x2',),
keep=('name',)
)
for i, row in imap_unordered(enricher, job, 3):
enricher.writerow(i, row, [(i + 1) * 2])
def sort_output(o):
return sorted(tuple(i) for i in o)
assert sort_output(collect_csv(output_path)) == sort_output([
['name', 'index', 'x2'],
['Mary', '1', '4'],
['Julia', '2', '6'],
['John', '0', '2']
])
def test_threadsafe_cells(self, tmpdir):
output_path = str(tmpdir.join('./enriched-resumable-threadsafe.csv'))
with open('./test/resources/people_unordered.csv') as f, \
open(output_path, 'a+') as of:
enricher = casanova.threadsafe_enricher(
f, of,
add=('x2',),
keep=('name',)
)
names = [t for t in enricher.cells('name')]
assert sorted(names) == sorted([(0, 'John'), (1, 'Mary'), (2, 'Julia')])
with open('./test/resources/people_unordered.csv') as f, \
open(output_path, 'a+') as of:
enricher = casanova.threadsafe_enricher(
f, of,
add=('x2',),
keep=('name',)
)
names = [(i, v) for i, row, v in enricher.cells('name', with_rows=True)]
assert names == [(0, 'John'), (1, 'Mary'), (2, 'Julia')]
def test_threadsafe_resumable(self, tmpdir):
log = defaultdict(list)
def listener(name, row):
log[name].append(list(row))
def job(payload):
i, row = payload
s = int(row[2])
time.sleep(s * .1)
return i, row
output_path = str(tmpdir.join('./enriched-resumable-threadsafe.csv'))
resumer = ThreadSafeResumer(output_path, listener=listener)
with open('./test/resources/people_unordered.csv') as f, resumer:
enricher = casanova.threadsafe_enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
for j, (i, row) in enumerate(imap_unordered(enricher, job, 3)):
enricher.writerow(i, row, [(i + 1) * 2])
if j == 1:
break
def sort_output(o):
return sorted(tuple(i) for i in o)
assert sort_output(collect_csv(output_path)) == sort_output([
['name', 'index', 'x2'],
['Mary', '1', '4'],
['Julia', '2', '6']
])
with open('./test/resources/people_unordered.csv') as f, resumer:
enricher = casanova.threadsafe_enricher(
f, resumer,
add=('x2',),
keep=('name',)
)
for j, (i, row) in enumerate(imap_unordered(enricher, job, 3)):
enricher.writerow(i, row, [(i + 1) * 2])
assert sort_output(collect_csv(output_path)) == sort_output([
['name', 'index', 'x2'],
['Mary', '1', '4'],
['Julia', '2', '6'],
['John', '0', '2']
])
assert sort_output(log['output.row']) == sort_output([['Mary', '1', '4'], ['Julia', '2', '6']])
assert sort_output(log['filter.row']) == sort_output([[1, ['Mary', 'Sue', '1']], [2, ['Julia', 'Stone', '2']]])
def test_stdout(self, capsys):
sys.stdout.write('this,should,happen\n')
with open('./test/resources/people.csv') as f:
enricher = casanova.enricher(f, sys.stdout, add=('line',))
for i, row in enumerate(enricher):
enricher.writerow(row, [i])
result = list(csv.reader(StringIO(capsys.readouterr().out)))
assert result == [
['this', 'should', 'happen'],
['name', 'surname', 'line'],
['John', 'Matthews', '0'],
['Mary', 'Sue', '1'],
['Julia', 'Stone', '2']
]
def test_combined_pos(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.enricher(f, of, add=('line',), keep=('surname',))
assert len(enricher.output_headers) == 2
assert enricher.output_headers.surname == 0
assert enricher.output_headers.line == 1
def test_batch_enricher(self, tmpdir):
output_path = str(tmpdir.join('./enriched.csv'))
with open('./test/resources/people.csv') as f, \
open(output_path, 'w', newline='') as of:
enricher = casanova.batch_enricher(f, of, add=('color',), keep=('surname',))
for row in enricher:
enricher.writebatch(row, [['blue'], ['red']], cursor='next')
enricher.writebatch(row, [['purple'], ['cyan']])
assert collect_csv(output_path) == [
['surname', 'cursor', 'color'],
['Matthews', '', 'blue'],
['Matthews', 'next', 'red'],
['Matthews', '', 'purple'],
['Matthews', 'end', 'cyan'],
['Sue', '', 'blue'],
['Sue', 'next', 'red'],
['Sue', '', 'purple'],
['Sue', 'end', 'cyan'],
['Stone', '', 'blue'],
['Stone', 'next', 'red'],
['Stone', '', 'purple'],
['Stone', 'end', 'cyan']
] | 0.238728 | 0.25363 |
import mock
from git_stacktrace.tests import base
from git_stacktrace import api
from git_stacktrace import git
class TestApi(base.TestCase):
@mock.patch('git_stacktrace.git.convert_since')
def test_convert_since(self, mocked_command):
expected = "HASH1..HASH2"
mocked_command.return_value = expected
self.assertEqual(expected, api.convert_since('1.day'))
@mock.patch('git_stacktrace.git.valid_range')
def test_valid_range(self, mocked_command):
expected = True
mocked_command.return_value = expected
self.assertEqual(expected, api.valid_range('hash1..hash2'))
expected = False
mocked_command.return_value = expected
self.assertEqual(expected, api.valid_range('hash1..hash2'))
def get_traceback(self, java=False):
if java:
with open('git_stacktrace/tests/examples/java1.trace') as f:
traceback = api.parse_trace(f.readlines())
else:
with open('git_stacktrace/tests/examples/python3.trace') as f:
traceback = api.parse_trace(f.readlines())
return traceback
def setup_mocks(self, mock_files, mock_files_touched):
mock_files_touched.return_value = {'hash2': [git.GitFile('common/utils/geo_utils.py', 'M')]}
mock_files.return_value = ['common/utils/geo_utils.py']
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_python(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
mock_line_match.return_value = False
traceback = self.get_traceback()
self.setup_mocks(mock_files, mock_files_touched)
self.assertEqual(0, api.lookup_stacktrace(traceback, "hash1..hash3", fast=False).
get_sorted_results()[0]._line_numbers_matched)
self.assertEqual(3, mock_pickaxe.call_count)
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_java(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
mock_line_match.return_value = True
traceback = self.get_traceback(java=True)
mock_files.return_value = ['devdaily/src/main/java/com/devdaily/tests/ExceptionTest.java']
mock_files_touched.return_value = {
'hash2':
[git.GitFile('devdaily/src/main/java/com/devdaily/tests/ExceptionTest.java', 'M')]}
self.assertEqual(2, api.lookup_stacktrace(traceback, "hash1..hash3", fast=False).
get_sorted_results()[0]._line_numbers_matched)
self.assertEqual(0, mock_pickaxe.call_count)
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_fast(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
traceback = self.get_traceback()
self.setup_mocks(mock_files, mock_files_touched)
api.lookup_stacktrace(traceback, "hash1..hash3", fast=True)
self.assertEqual(1, mock_pickaxe.call_count)
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_line_match(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
mock_line_match.return_value = True
traceback = self.get_traceback()
self.setup_mocks(mock_files, mock_files_touched)
self.assertEqual(1, api.lookup_stacktrace(traceback, "hash1..hash3", fast=False).
get_sorted_results()[0]._line_numbers_matched)
self.assertEqual(3, mock_pickaxe.call_count) | git_stacktrace/tests/test_api.py | import mock
from git_stacktrace.tests import base
from git_stacktrace import api
from git_stacktrace import git
class TestApi(base.TestCase):
@mock.patch('git_stacktrace.git.convert_since')
def test_convert_since(self, mocked_command):
expected = "HASH1..HASH2"
mocked_command.return_value = expected
self.assertEqual(expected, api.convert_since('1.day'))
@mock.patch('git_stacktrace.git.valid_range')
def test_valid_range(self, mocked_command):
expected = True
mocked_command.return_value = expected
self.assertEqual(expected, api.valid_range('hash1..hash2'))
expected = False
mocked_command.return_value = expected
self.assertEqual(expected, api.valid_range('hash1..hash2'))
def get_traceback(self, java=False):
if java:
with open('git_stacktrace/tests/examples/java1.trace') as f:
traceback = api.parse_trace(f.readlines())
else:
with open('git_stacktrace/tests/examples/python3.trace') as f:
traceback = api.parse_trace(f.readlines())
return traceback
def setup_mocks(self, mock_files, mock_files_touched):
mock_files_touched.return_value = {'hash2': [git.GitFile('common/utils/geo_utils.py', 'M')]}
mock_files.return_value = ['common/utils/geo_utils.py']
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_python(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
mock_line_match.return_value = False
traceback = self.get_traceback()
self.setup_mocks(mock_files, mock_files_touched)
self.assertEqual(0, api.lookup_stacktrace(traceback, "hash1..hash3", fast=False).
get_sorted_results()[0]._line_numbers_matched)
self.assertEqual(3, mock_pickaxe.call_count)
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_java(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
mock_line_match.return_value = True
traceback = self.get_traceback(java=True)
mock_files.return_value = ['devdaily/src/main/java/com/devdaily/tests/ExceptionTest.java']
mock_files_touched.return_value = {
'hash2':
[git.GitFile('devdaily/src/main/java/com/devdaily/tests/ExceptionTest.java', 'M')]}
self.assertEqual(2, api.lookup_stacktrace(traceback, "hash1..hash3", fast=False).
get_sorted_results()[0]._line_numbers_matched)
self.assertEqual(0, mock_pickaxe.call_count)
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_fast(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
traceback = self.get_traceback()
self.setup_mocks(mock_files, mock_files_touched)
api.lookup_stacktrace(traceback, "hash1..hash3", fast=True)
self.assertEqual(1, mock_pickaxe.call_count)
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_line_match(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
mock_line_match.return_value = True
traceback = self.get_traceback()
self.setup_mocks(mock_files, mock_files_touched)
self.assertEqual(1, api.lookup_stacktrace(traceback, "hash1..hash3", fast=False).
get_sorted_results()[0]._line_numbers_matched)
self.assertEqual(3, mock_pickaxe.call_count) | 0.596551 | 0.404449 |
import random
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.modules.distance import CosineSimilarity
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from model.attention import Attention
from model.embedding import GloveEmbedding
from data_preparation import get_stopword_ids
class CosineCoherence(nn.Module):
def __init__(self, args, device):
super(CosineCoherence, self).__init__()
self.seed = args.seed
self.cos = CosineSimilarity(dim=-1)
self.emb = GloveEmbedding(args)
self.device = device
def forward(self, x_dialogues, x_acts, lengths):
x_lengths = lengths[0]
x = self.emb(x_dialogues)
# x = x.mean(-2) #TODO: use lengths to get the mean, due to padding we'd otherwise get wrong values
x = torch.sum(x, dim=-2)
x = torch.div(x, x_lengths.view(x_lengths.size(0), x_lengths.size(1), 1).type(torch.FloatTensor))
y = torch.narrow(x, dim=1, start=1, length=x.size(1)-1)
x = torch.narrow(x, dim=1, start=0, length=x.size(1)-1)
scores = self.cos(x,y).mean(-1)
return scores, None
def __str__(self):
return "cosine"
class MTL_Model3(nn.Module):
def __init__(self, args, device, collect_da_predictions=True):
super(MTL_Model3, self).__init__()
self.input_size = args.embedding_dim
self.hidden_size_u = args.lstm_sent_size
self.hidden_size_d = args.lstm_utt_size
self.num_layers = args.lstm_layers
self.num_dialogacts = args.num_classes
self.device = device
self.emb = GloveEmbedding(args)
self.only_da = True if args.loss == 'da' else False
self.bilstm_u = nn.LSTM(self.input_size, self.hidden_size_u, self.num_layers, bidirectional=True, batch_first=True)
for param in self.bilstm_u.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
self.bilstm_d = nn.LSTM(2*self.hidden_size_u, self.hidden_size_d, self.num_layers, bidirectional=True, batch_first=True)
for param in self.bilstm_d.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
self.attn_u = Attention(2*self.hidden_size_u)
self.attn_d = Attention(2*self.hidden_size_d)
self.ff_u = nn.Linear(2*self.hidden_size_u, self.num_dialogacts)
self.ff_d = nn.Linear(2*self.hidden_size_d, 1)
nn.init.normal_(self.ff_d.weight, mean=0, std=1)
nn.init.normal_(self.ff_u.weight, mean=0, std=1)
self.dropout_u = nn.Dropout(args.dropout_prob)
self.collect_da_predictions = collect_da_predictions
self.da_predictions = []
#add weights to the loss function to account for the distribution of dialog acts in daily dialog
#nll_class_weights = torch.tensor([0.0, 2.1861911569232313, 3.4904300472491396, 6.120629125122877, 10.787031308006435]).to(device)
if args.num_classes == 5:
nll_class_weights = torch.tensor([0.0, 1.0, 1.0, 1.0, 1.0]).to(device)
# self.nll = nn.NLLLoss(weight=nll_class_weights, reduction='none')
self.nll = nn.CrossEntropyLoss(weight=nll_class_weights, reduction='mean')
else:
self.nll = nn.CrossEntropyLoss( reduction='mean')
def forward(self, x_dialogues, x_acts, lengths):
s_lengths = lengths[0]
d_lengths = lengths[1]
x = self.emb(x_dialogues)
old_size = (x.size(0), x.size(1), x.size(2), x.size(3))
ten_sents = x.view(old_size[0]*old_size[1], old_size[2], old_size[3])
ten_acts = x_acts.view(old_size[0]*old_size[1])
loss_da = torch.zeros(ten_acts.size(0)).to(self.device)
h0 = torch.zeros(self.num_layers*2, ten_sents.size(0), self.hidden_size_u).to(self.device)# 2 for bidirection
c0 = torch.zeros(self.num_layers*2, ten_sents.size(0), self.hidden_size_u).to(self.device)
ten_sents = pack_padded_sequence(ten_sents, s_lengths.view(s_lengths.size(0)*s_lengths.size(1)), batch_first=True, enforce_sorted=False)
out, _ = self.bilstm_u(ten_sents, (h0, c0))
out, _ = pad_packed_sequence(out, batch_first=True)
H = self.attn_u(out)
# view_size1 = int(H.size(0)/old_size[1])
H1 = H.view(old_size[0], old_size[1], H.size(1))
H_u = self.dropout_u(H1)
m = self.ff_u(H_u)
m = m.view(m.size(0)* m.size(1), m.size(2))
loss_da = self.nll(m, ten_acts)
pda = F.log_softmax(m, 1)
_, da_pred = torch.max(pda, 1)
da_pred = da_pred.view(old_size[0], old_size[1])
# loss_da = self.nll(pda.view(old_size[0] * old_size[1], pda.size(2)), ten_acts)
# loss2 = torch.sum(loss_da.view(old_size[0], old_size[1]), dim=1)
# H = H.unsqueeze(0)
if not self.only_da:
h0 = torch.zeros(self.num_layers*2, H1.size(0), self.hidden_size_d).to(self.device)# 2 for bidirection
c0 = torch.zeros(self.num_layers*2, H1.size(0), self.hidden_size_d).to(self.device)
H1 = pack_padded_sequence(H1, d_lengths, batch_first=True, enforce_sorted=False)
out, _ = self.bilstm_d(H1, (h0, c0))
out, _ = pad_packed_sequence(out, batch_first=True)
hd = self.attn_d(out)
s_coh = self.ff_d(hd).squeeze(1)
else:
s_coh = torch.randn(old_size[0]).to(self.device)
return (s_coh, (da_pred, loss_da))
def __str__(self):
return "model-3" | model/mtl_models.py | import random
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.modules.distance import CosineSimilarity
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from model.attention import Attention
from model.embedding import GloveEmbedding
from data_preparation import get_stopword_ids
class CosineCoherence(nn.Module):
def __init__(self, args, device):
super(CosineCoherence, self).__init__()
self.seed = args.seed
self.cos = CosineSimilarity(dim=-1)
self.emb = GloveEmbedding(args)
self.device = device
def forward(self, x_dialogues, x_acts, lengths):
x_lengths = lengths[0]
x = self.emb(x_dialogues)
# x = x.mean(-2) #TODO: use lengths to get the mean, due to padding we'd otherwise get wrong values
x = torch.sum(x, dim=-2)
x = torch.div(x, x_lengths.view(x_lengths.size(0), x_lengths.size(1), 1).type(torch.FloatTensor))
y = torch.narrow(x, dim=1, start=1, length=x.size(1)-1)
x = torch.narrow(x, dim=1, start=0, length=x.size(1)-1)
scores = self.cos(x,y).mean(-1)
return scores, None
def __str__(self):
return "cosine"
class MTL_Model3(nn.Module):
def __init__(self, args, device, collect_da_predictions=True):
super(MTL_Model3, self).__init__()
self.input_size = args.embedding_dim
self.hidden_size_u = args.lstm_sent_size
self.hidden_size_d = args.lstm_utt_size
self.num_layers = args.lstm_layers
self.num_dialogacts = args.num_classes
self.device = device
self.emb = GloveEmbedding(args)
self.only_da = True if args.loss == 'da' else False
self.bilstm_u = nn.LSTM(self.input_size, self.hidden_size_u, self.num_layers, bidirectional=True, batch_first=True)
for param in self.bilstm_u.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
self.bilstm_d = nn.LSTM(2*self.hidden_size_u, self.hidden_size_d, self.num_layers, bidirectional=True, batch_first=True)
for param in self.bilstm_d.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
self.attn_u = Attention(2*self.hidden_size_u)
self.attn_d = Attention(2*self.hidden_size_d)
self.ff_u = nn.Linear(2*self.hidden_size_u, self.num_dialogacts)
self.ff_d = nn.Linear(2*self.hidden_size_d, 1)
nn.init.normal_(self.ff_d.weight, mean=0, std=1)
nn.init.normal_(self.ff_u.weight, mean=0, std=1)
self.dropout_u = nn.Dropout(args.dropout_prob)
self.collect_da_predictions = collect_da_predictions
self.da_predictions = []
#add weights to the loss function to account for the distribution of dialog acts in daily dialog
#nll_class_weights = torch.tensor([0.0, 2.1861911569232313, 3.4904300472491396, 6.120629125122877, 10.787031308006435]).to(device)
if args.num_classes == 5:
nll_class_weights = torch.tensor([0.0, 1.0, 1.0, 1.0, 1.0]).to(device)
# self.nll = nn.NLLLoss(weight=nll_class_weights, reduction='none')
self.nll = nn.CrossEntropyLoss(weight=nll_class_weights, reduction='mean')
else:
self.nll = nn.CrossEntropyLoss( reduction='mean')
def forward(self, x_dialogues, x_acts, lengths):
s_lengths = lengths[0]
d_lengths = lengths[1]
x = self.emb(x_dialogues)
old_size = (x.size(0), x.size(1), x.size(2), x.size(3))
ten_sents = x.view(old_size[0]*old_size[1], old_size[2], old_size[3])
ten_acts = x_acts.view(old_size[0]*old_size[1])
loss_da = torch.zeros(ten_acts.size(0)).to(self.device)
h0 = torch.zeros(self.num_layers*2, ten_sents.size(0), self.hidden_size_u).to(self.device)# 2 for bidirection
c0 = torch.zeros(self.num_layers*2, ten_sents.size(0), self.hidden_size_u).to(self.device)
ten_sents = pack_padded_sequence(ten_sents, s_lengths.view(s_lengths.size(0)*s_lengths.size(1)), batch_first=True, enforce_sorted=False)
out, _ = self.bilstm_u(ten_sents, (h0, c0))
out, _ = pad_packed_sequence(out, batch_first=True)
H = self.attn_u(out)
# view_size1 = int(H.size(0)/old_size[1])
H1 = H.view(old_size[0], old_size[1], H.size(1))
H_u = self.dropout_u(H1)
m = self.ff_u(H_u)
m = m.view(m.size(0)* m.size(1), m.size(2))
loss_da = self.nll(m, ten_acts)
pda = F.log_softmax(m, 1)
_, da_pred = torch.max(pda, 1)
da_pred = da_pred.view(old_size[0], old_size[1])
# loss_da = self.nll(pda.view(old_size[0] * old_size[1], pda.size(2)), ten_acts)
# loss2 = torch.sum(loss_da.view(old_size[0], old_size[1]), dim=1)
# H = H.unsqueeze(0)
if not self.only_da:
h0 = torch.zeros(self.num_layers*2, H1.size(0), self.hidden_size_d).to(self.device)# 2 for bidirection
c0 = torch.zeros(self.num_layers*2, H1.size(0), self.hidden_size_d).to(self.device)
H1 = pack_padded_sequence(H1, d_lengths, batch_first=True, enforce_sorted=False)
out, _ = self.bilstm_d(H1, (h0, c0))
out, _ = pad_packed_sequence(out, batch_first=True)
hd = self.attn_d(out)
s_coh = self.ff_d(hd).squeeze(1)
else:
s_coh = torch.randn(old_size[0]).to(self.device)
return (s_coh, (da_pred, loss_da))
def __str__(self):
return "model-3" | 0.826887 | 0.380759 |
import re
import os
import json
import inflect
import nltk
import numpy as np
inflect_eng = inflect.engine()
def get_data_set_info_path(data_folder):
"""
:param data_folder: name of data folder, e.g. 'dataset1'
:return: absolute path to JSON file containing information about data set
"""
return os.path.join(os.path.dirname(__file__), '../../data/{0}/external/data_info.json'.format(data_folder))
def get_external_data_path(data_folder):
"""
:param data_folder: name of data folder, e.g. 'dataset1'
:return: absolute path to external data set file for this folder name
"""
return os.path.join(os.path.dirname(__file__), '../../data/{0}/external/training_set.txt'.format(data_folder))
def get_processed_data_path(data_folder):
"""
:param data_folder: name of data folder, e.g. 'dataset1'
:type data_folder: string (path to a folder)
:return: absolute path to processed data set file for this folder name
"""
return os.path.join(os.path.dirname(__file__), '../../data/{0}/processed/training_set.txt'.format(data_folder))
def preprocess_sentence(sentence):
"""Adjusts sentence by filtering words and correcting some common issues """
# twitter users often forget to put space before special words
sentence = sentence.replace('http', ' http').replace('www', ' www').replace('@', ' @').replace('#', ' #')
new_sentence = []
alpha_numeric = re.compile("[^a-z0-9]")
sentence = ' '.join(filter(lambda w: not (w.startswith('@') or w.startswith('&') or
w.startswith('http') or w.startswith('www')), sentence.split()))
for w in alpha_numeric.sub(' ', sentence).split():
if w.isspace():
continue
if w.isdigit() and int(w) <= 21: # convert small numbers to words using inflect package
new_sentence.append(inflect_eng.number_to_words(int(w)))
else:
new_sentence.append(w)
return new_sentence
def string_to_words_list(sentence):
words = preprocess_sentence(sentence.strip().lower()) # filter words and correct some common issues in sentence
return words
def make(data_file_path, output_file_path):
"""
Generates files with data represented as vectors of words of fixed length.
Words shorter than required length will be extended by empty words.
Words that are too long will be trimmed.
:param data_file_path: relative path to file with data set
:param output_file_path: relative path to which processed data should be written
:type data_file_path: string (path to data file)
:type output_file_path: int (non-negative)
"""
if not os.path.exists(os.path.dirname(output_file_path)):
os.makedirs(os.path.dirname(output_file_path))
with open(output_file_path, 'w') as output_data_file:
for line in open(data_file_path, 'r'):
category, sentence = line.split(' ', 1)
keywords = string_to_words_list(sentence)
output_data_file.write("{0} {1}\n".format(category, ','.join(keywords)))
print "Processed data written to " + output_file_path
def read(data_file_path, data_info):
data_set_size = data_info["Size"]
labels = np.empty(data_set_size, dtype=np.uint8)
sentences = np.empty(data_set_size, dtype=object)
count = 0
for line in open(data_file_path, 'r'):
label, rest = line.split(' ', 1)
sentence = string_to_words_list(rest)
if len(sentence) > 0:
sentences[count] = sentence
labels[count] = int(label)
count += 1
labels = labels[:count]
sentences = sentences[:count]
labels.flags.writeable = False
sentences.flags.writeable = False
return labels, sentences
def get_unique_words(data_file_path):
words = set()
for line in open(data_file_path, 'r'):
line_words = line.split(' ')[1].split(',')
for word in line_words:
words.add(word)
return words
def read_data_info(data_set_info_path):
with open(data_set_info_path) as data_file:
return json.load(data_file)
def run_interactive_processed_data_generation():
while True:
command = raw_input("Type data set folder name to generate data set or 'quit' to quit script: ")
if command.lower() == "quit" or command.lower() == "exit":
break
input_file_path = get_external_data_path(command)
output_file_path = get_processed_data_path(command)
if not os.path.isfile(input_file_path):
print "Path {0} does not exist".format(input_file_path)
else:
make(input_file_path, output_file_path)
if __name__ == "__main__":
"""
Main method allows to generate processed data sets in interactive mode.
"""
run_interactive_processed_data_generation() | src/data/dataset.py | import re
import os
import json
import inflect
import nltk
import numpy as np
inflect_eng = inflect.engine()
def get_data_set_info_path(data_folder):
"""
:param data_folder: name of data folder, e.g. 'dataset1'
:return: absolute path to JSON file containing information about data set
"""
return os.path.join(os.path.dirname(__file__), '../../data/{0}/external/data_info.json'.format(data_folder))
def get_external_data_path(data_folder):
"""
:param data_folder: name of data folder, e.g. 'dataset1'
:return: absolute path to external data set file for this folder name
"""
return os.path.join(os.path.dirname(__file__), '../../data/{0}/external/training_set.txt'.format(data_folder))
def get_processed_data_path(data_folder):
"""
:param data_folder: name of data folder, e.g. 'dataset1'
:type data_folder: string (path to a folder)
:return: absolute path to processed data set file for this folder name
"""
return os.path.join(os.path.dirname(__file__), '../../data/{0}/processed/training_set.txt'.format(data_folder))
def preprocess_sentence(sentence):
"""Adjusts sentence by filtering words and correcting some common issues """
# twitter users often forget to put space before special words
sentence = sentence.replace('http', ' http').replace('www', ' www').replace('@', ' @').replace('#', ' #')
new_sentence = []
alpha_numeric = re.compile("[^a-z0-9]")
sentence = ' '.join(filter(lambda w: not (w.startswith('@') or w.startswith('&') or
w.startswith('http') or w.startswith('www')), sentence.split()))
for w in alpha_numeric.sub(' ', sentence).split():
if w.isspace():
continue
if w.isdigit() and int(w) <= 21: # convert small numbers to words using inflect package
new_sentence.append(inflect_eng.number_to_words(int(w)))
else:
new_sentence.append(w)
return new_sentence
def string_to_words_list(sentence):
words = preprocess_sentence(sentence.strip().lower()) # filter words and correct some common issues in sentence
return words
def make(data_file_path, output_file_path):
"""
Generates files with data represented as vectors of words of fixed length.
Words shorter than required length will be extended by empty words.
Words that are too long will be trimmed.
:param data_file_path: relative path to file with data set
:param output_file_path: relative path to which processed data should be written
:type data_file_path: string (path to data file)
:type output_file_path: int (non-negative)
"""
if not os.path.exists(os.path.dirname(output_file_path)):
os.makedirs(os.path.dirname(output_file_path))
with open(output_file_path, 'w') as output_data_file:
for line in open(data_file_path, 'r'):
category, sentence = line.split(' ', 1)
keywords = string_to_words_list(sentence)
output_data_file.write("{0} {1}\n".format(category, ','.join(keywords)))
print "Processed data written to " + output_file_path
def read(data_file_path, data_info):
data_set_size = data_info["Size"]
labels = np.empty(data_set_size, dtype=np.uint8)
sentences = np.empty(data_set_size, dtype=object)
count = 0
for line in open(data_file_path, 'r'):
label, rest = line.split(' ', 1)
sentence = string_to_words_list(rest)
if len(sentence) > 0:
sentences[count] = sentence
labels[count] = int(label)
count += 1
labels = labels[:count]
sentences = sentences[:count]
labels.flags.writeable = False
sentences.flags.writeable = False
return labels, sentences
def get_unique_words(data_file_path):
words = set()
for line in open(data_file_path, 'r'):
line_words = line.split(' ')[1].split(',')
for word in line_words:
words.add(word)
return words
def read_data_info(data_set_info_path):
with open(data_set_info_path) as data_file:
return json.load(data_file)
def run_interactive_processed_data_generation():
while True:
command = raw_input("Type data set folder name to generate data set or 'quit' to quit script: ")
if command.lower() == "quit" or command.lower() == "exit":
break
input_file_path = get_external_data_path(command)
output_file_path = get_processed_data_path(command)
if not os.path.isfile(input_file_path):
print "Path {0} does not exist".format(input_file_path)
else:
make(input_file_path, output_file_path)
if __name__ == "__main__":
"""
Main method allows to generate processed data sets in interactive mode.
"""
run_interactive_processed_data_generation() | 0.385722 | 0.309806 |
from __future__ import absolute_import
from six.moves import range
import os
import numpy as np
from replay_memory import ReplayMemory
from sampler import Sampler, ObsSampler
from learner import QLearner, q_cnn
from explorer import LinearDecayEGreedyExplorer
from trainer import Trainer
from validator import Validator
from output_path import OutputPath
from nnabla.ext_utils import get_extension_context
import nnabla as nn
from nnabla.monitor import Monitor
from tensorboardX import SummaryWriter
def get_args():
import argparse
p = argparse.ArgumentParser()
p.add_argument('--gym-env', '-g', default='BreakoutNoFrameskip-v4')
p.add_argument('--num_epochs', '-E', type=int, default=1000000)
p.add_argument('--num_episodes', '-T', type=int, default=10)
p.add_argument('--num_val_episodes', '-V', type=int, default=1)
p.add_argument('--num_eval_steps', '-S', type=int, default=125000*4)
p.add_argument('--inter_eval_steps', '-i', type=int, default=250000*4)
p.add_argument('--num_frames', '-f', type=int, default=4)
p.add_argument('--render-train', '-r', action='store_true')
p.add_argument('--render-val', '-v', action='store_true')
p.add_argument('--extension', '-e', default='cpu')
p.add_argument('--device-id', '-d', default='0')
p.add_argument('--log_path', '-l', default='./tmp.output')
return p.parse_args()
def main():
args = get_args()
nn.set_default_context(get_extension_context(
args.extension, device_id=args.device_id))
if args.log_path:
output_path = OutputPath(args.log_path)
else:
output_path = OutputPath()
monitor = Monitor(output_path.path)
tbw = SummaryWriter(output_path.path)
# Create an atari env.
from atari_utils import make_atari_deepmind
env = make_atari_deepmind(args.gym_env, valid=False)
env_val = make_atari_deepmind(args.gym_env, valid=True)
print('Observation:', env.observation_space)
print('Action:', env.action_space)
# 10000 * 4 frames
val_replay_memory = ReplayMemory(
env.observation_space.shape, env.action_space.shape, max_memory=args.num_frames)
replay_memory = ReplayMemory(
env.observation_space.shape, env.action_space.shape, max_memory=40000)
learner = QLearner(q_cnn, env.action_space.n, sync_freq=1000, save_freq=250000,
gamma=0.99, learning_rate=1e-4, name_q='q', save_path=output_path)
explorer = LinearDecayEGreedyExplorer(
env.action_space.n, eps_start=1.0, eps_end=0.01, eps_steps=1e6,
q_builder=q_cnn, name='q')
sampler = Sampler(args.num_frames)
obs_sampler = ObsSampler(args.num_frames)
validator = Validator(env_val, val_replay_memory, explorer, obs_sampler,
num_episodes=args.num_val_episodes, num_eval_steps=args.num_eval_steps,
render=args.render_val, monitor=monitor, tbw=tbw)
trainer_with_validator = Trainer(env, replay_memory, learner, sampler, explorer, obs_sampler, inter_eval_steps=args.inter_eval_steps,
num_episodes=args.num_episodes, train_start=10000, batch_size=32,
render=args.render_train, validator=validator, monitor=monitor, tbw=tbw)
for e in range(args.num_epochs):
trainer_with_validator.step()
if __name__ == '__main__':
main() | reinforcement_learning/dqn/train_atari.py |
from __future__ import absolute_import
from six.moves import range
import os
import numpy as np
from replay_memory import ReplayMemory
from sampler import Sampler, ObsSampler
from learner import QLearner, q_cnn
from explorer import LinearDecayEGreedyExplorer
from trainer import Trainer
from validator import Validator
from output_path import OutputPath
from nnabla.ext_utils import get_extension_context
import nnabla as nn
from nnabla.monitor import Monitor
from tensorboardX import SummaryWriter
def get_args():
import argparse
p = argparse.ArgumentParser()
p.add_argument('--gym-env', '-g', default='BreakoutNoFrameskip-v4')
p.add_argument('--num_epochs', '-E', type=int, default=1000000)
p.add_argument('--num_episodes', '-T', type=int, default=10)
p.add_argument('--num_val_episodes', '-V', type=int, default=1)
p.add_argument('--num_eval_steps', '-S', type=int, default=125000*4)
p.add_argument('--inter_eval_steps', '-i', type=int, default=250000*4)
p.add_argument('--num_frames', '-f', type=int, default=4)
p.add_argument('--render-train', '-r', action='store_true')
p.add_argument('--render-val', '-v', action='store_true')
p.add_argument('--extension', '-e', default='cpu')
p.add_argument('--device-id', '-d', default='0')
p.add_argument('--log_path', '-l', default='./tmp.output')
return p.parse_args()
def main():
args = get_args()
nn.set_default_context(get_extension_context(
args.extension, device_id=args.device_id))
if args.log_path:
output_path = OutputPath(args.log_path)
else:
output_path = OutputPath()
monitor = Monitor(output_path.path)
tbw = SummaryWriter(output_path.path)
# Create an atari env.
from atari_utils import make_atari_deepmind
env = make_atari_deepmind(args.gym_env, valid=False)
env_val = make_atari_deepmind(args.gym_env, valid=True)
print('Observation:', env.observation_space)
print('Action:', env.action_space)
# 10000 * 4 frames
val_replay_memory = ReplayMemory(
env.observation_space.shape, env.action_space.shape, max_memory=args.num_frames)
replay_memory = ReplayMemory(
env.observation_space.shape, env.action_space.shape, max_memory=40000)
learner = QLearner(q_cnn, env.action_space.n, sync_freq=1000, save_freq=250000,
gamma=0.99, learning_rate=1e-4, name_q='q', save_path=output_path)
explorer = LinearDecayEGreedyExplorer(
env.action_space.n, eps_start=1.0, eps_end=0.01, eps_steps=1e6,
q_builder=q_cnn, name='q')
sampler = Sampler(args.num_frames)
obs_sampler = ObsSampler(args.num_frames)
validator = Validator(env_val, val_replay_memory, explorer, obs_sampler,
num_episodes=args.num_val_episodes, num_eval_steps=args.num_eval_steps,
render=args.render_val, monitor=monitor, tbw=tbw)
trainer_with_validator = Trainer(env, replay_memory, learner, sampler, explorer, obs_sampler, inter_eval_steps=args.inter_eval_steps,
num_episodes=args.num_episodes, train_start=10000, batch_size=32,
render=args.render_train, validator=validator, monitor=monitor, tbw=tbw)
for e in range(args.num_epochs):
trainer_with_validator.step()
if __name__ == '__main__':
main() | 0.650911 | 0.126353 |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
from copy import deepcopy
from indra.preassembler.hierarchy_manager import hierarchies, HierarchyManager
from indra.statements import get_valid_location, InvalidLocationError, Agent
from indra.util import unicode_strs
ent_hierarchy = hierarchies['entity']
mod_hierarchy = hierarchies['modification']
act_hierarchy = hierarchies['activity']
comp_hierarchy = hierarchies['cellular_component']
def test_hierarchy_unicode():
# Test all the hierarchies except the comp_hierarchy, which is an
# RDF graph
assert unicode_strs((ent_hierarchy.isa_closure,
ent_hierarchy.partof_closure))
assert unicode_strs((mod_hierarchy.isa_closure,
mod_hierarchy.partof_closure))
assert unicode_strs((act_hierarchy.isa_closure,
act_hierarchy.partof_closure))
def test_isa_entity():
assert(ent_hierarchy.isa('HGNC', 'BRAF', 'FPLX', 'RAF'))
def test_isa_entity2():
assert(not ent_hierarchy.isa('HGNC', 'BRAF', 'HGNC', 'ARAF'))
def test_isa_entity3():
assert(not ent_hierarchy.isa('FPLX', 'RAF', 'HGNC', 'BRAF'))
def test_partof_entity():
assert ent_hierarchy.partof('FPLX', 'HIF_alpha', 'FPLX', 'HIF')
def test_isa_or_partof_entity():
assert ent_hierarchy.isa_or_partof('HGNC', 'PRKAG1', 'FPLX', 'AMPK')
def test_partof_entity_not():
assert not ent_hierarchy.partof('FPLX', 'HIF1', 'FPLX', 'HIF_alpha')
def test_isa_mod():
assert(mod_hierarchy.isa('INDRA_MODS', 'phosphorylation',
'INDRA_MODS', 'modification'))
def test_isa_mod_not():
assert(not mod_hierarchy.isa('INDRA_MODS', 'phosphorylation',
'INDRA_MODS', 'ubiquitination'))
def test_isa_activity():
assert act_hierarchy.isa('INDRA_ACTIVITIES', 'kinase',
'INDRA_ACTIVITIES', 'activity')
def test_isa_activity_not():
assert not act_hierarchy.isa('INDRA_ACTIVITIES', 'kinase',
'INDRA_ACTIVITIES', 'phosphatase')
def test_partof_comp():
assert comp_hierarchy.partof('INDRA_LOCATIONS', 'cytoplasm',
'INDRA_LOCATIONS', 'cell')
def test_partof_comp_not():
assert not comp_hierarchy.partof('INDRA_LOCATIONS', 'cell',
'INDRA_LOCATIONS', 'cytoplasm')
def test_partof_comp_none():
assert comp_hierarchy.partof('INDRA_LOCATIONS', 'cytoplasm',
'INDRA_LOCATIONS', None)
def test_partof_comp_none_none():
assert comp_hierarchy.partof('INDRA_LOCATIONS', None,
'INDRA_LOCATIONS', None)
def test_partof_comp_none_not():
assert not comp_hierarchy.partof('INDRA_LOCATIONS', None,
'INDRA_LOCATIONS', 'cytoplasm')
def test_get_children():
raf = 'http://identifiers.org/fplx/RAF'
braf = 'http://identifiers.org/hgnc.symbol/BRAF'
mapk = 'http://identifiers.org/fplx/MAPK'
ampk = 'http://identifiers.org/fplx/AMPK'
# Look up RAF
rafs = ent_hierarchy.get_children(raf)
# Should get three family members
assert isinstance(rafs, list)
assert len(rafs) == 3
assert unicode_strs(rafs)
# The lookup of a gene-level entity should not return any additional
# entities
brafs = ent_hierarchy.get_children(braf)
assert isinstance(brafs, list)
assert len(brafs) == 0
assert unicode_strs(brafs)
mapks = ent_hierarchy.get_children(mapk)
assert len(mapks) == 12
assert unicode_strs(mapks)
# Make sure we can also do this in a case involving both family and complex
# relationships
ampks = ent_hierarchy.get_children(ampk)
assert len(ampks) == 22
ag_none = ''
none_children = ent_hierarchy.get_children('')
assert isinstance(none_children, list)
assert len(none_children) == 0
def test_mtorc_children():
mtorc1 = 'http://identifiers.org/fplx/mTORC1'
mtorc2 = 'http://identifiers.org/fplx/mTORC2'
ch1 = ent_hierarchy.get_children(mtorc1)
ch2 = ent_hierarchy.get_children(mtorc2)
assert('http://identifiers.org/hgnc.symbol/RICTOR' not in ch1)
assert('http://identifiers.org/hgnc.symbol/RPTOR' not in ch2)
def test_mtorc_get_parents():
rictor = 'http://identifiers.org/hgnc.symbol/RICTOR'
p = ent_hierarchy.get_parents(rictor, 'all')
assert(len(p) == 1)
assert(list(p)[0] == 'http://identifiers.org/fplx/mTORC2')
def test_mtorc_transitive_closure():
rictor = 'http://identifiers.org/hgnc.symbol/RICTOR'
p = ent_hierarchy.partof_closure.get(rictor)
assert(len(p) == 1)
assert(p[0] == 'http://identifiers.org/fplx/mTORC2')
def test_mtorc_partof_no_tc():
ent_hierarchy_no_tc = deepcopy(ent_hierarchy)
ent_hierarchy_no_tc.isa_closure = {}
ent_hierarchy_no_tc.partof_closure = {}
assert(ent_hierarchy_no_tc.partof('HGNC', 'RPTOR', 'FPLX', 'mTORC1'))
assert(not ent_hierarchy_no_tc.partof('HGNC', 'RPTOR', 'FPLX', 'mTORC2'))
def test_erk_isa_no_tc():
ent_hierarchy_no_tc = deepcopy(ent_hierarchy)
ent_hierarchy_no_tc.isa_closure = {}
ent_hierarchy_no_tc.partof_closure = {}
assert(ent_hierarchy_no_tc.isa('HGNC', 'MAPK1', 'FPLX', 'MAPK'))
assert(not ent_hierarchy_no_tc.isa('HGNC', 'MAPK1', 'FPLX', 'JNK'))
def test_get_parents():
prkaa1 = 'http://identifiers.org/hgnc.symbol/PRKAA1'
ampk = 'http://identifiers.org/fplx/AMPK'
p1 = ent_hierarchy.get_parents(prkaa1, 'all')
assert(len(p1) == 8)
assert(ampk in p1)
p2 = ent_hierarchy.get_parents(prkaa1, 'immediate')
assert(len(p2) == 7)
assert (ampk not in p2)
p3 = ent_hierarchy.get_parents(prkaa1, 'top')
assert(len(p3) == 1)
assert (ampk in p3)
def test_load_eidos_hierarchy():
eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../sources/eidos/eidos_ontology.rdf')
eidos_ns = 'https://github.com/clulab/eidos/wiki/JSON-LD/Grounding#'
hm = HierarchyManager(eidos_ont, True, True)
assert hm.isa_closure
eidos_isa = lambda a, b: hm.isa('EIDOS', a, 'EIDOS', b)
assert eidos_isa('events/human/conflict/war',
'events/human/conflict')
assert not eidos_isa('events/human/conflict/war',
'events/human/human_migration/migration')
assert eidos_isa('entities/measurement/distance/meter',
'entities/measurement')
assert eidos_isa('events/natural/weather/storm/tornado',
'events')
assert not eidos_isa('events',
'events/natural/weather/storm/tornado')
def test_load_trips_hierarchy():
trips_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../sources/cwms/trips_ontology.rdf')
hm = HierarchyManager(trips_ont, True, True)
assert hm.isa_closure
trips_isa = lambda a, b: hm.isa('CWMS', a, 'CWMS', b)
assert trips_isa('ONT::TRUCK', 'ONT::VEHICLE')
assert not trips_isa('ONT::VEHICLE', 'ONT::TRUCK')
assert trips_isa('ONT::MONEY', 'ONT::PHYS-OBJECT')
assert trips_isa('ONT::TABLE', 'ONT::MANUFACTURED-OBJECT')
def test_same_components():
uri_prkag1 = ent_hierarchy.get_uri('HGNC', 'PRKAG1')
uri_ampk = ent_hierarchy.get_uri('FPLX', 'AMPK')
c1 = ent_hierarchy.components[uri_prkag1]
c2 = ent_hierarchy.components[uri_ampk]
assert(c1 == c2) | indra/tests/test_hierarchy_manager.py | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
from copy import deepcopy
from indra.preassembler.hierarchy_manager import hierarchies, HierarchyManager
from indra.statements import get_valid_location, InvalidLocationError, Agent
from indra.util import unicode_strs
ent_hierarchy = hierarchies['entity']
mod_hierarchy = hierarchies['modification']
act_hierarchy = hierarchies['activity']
comp_hierarchy = hierarchies['cellular_component']
def test_hierarchy_unicode():
# Test all the hierarchies except the comp_hierarchy, which is an
# RDF graph
assert unicode_strs((ent_hierarchy.isa_closure,
ent_hierarchy.partof_closure))
assert unicode_strs((mod_hierarchy.isa_closure,
mod_hierarchy.partof_closure))
assert unicode_strs((act_hierarchy.isa_closure,
act_hierarchy.partof_closure))
def test_isa_entity():
assert(ent_hierarchy.isa('HGNC', 'BRAF', 'FPLX', 'RAF'))
def test_isa_entity2():
assert(not ent_hierarchy.isa('HGNC', 'BRAF', 'HGNC', 'ARAF'))
def test_isa_entity3():
assert(not ent_hierarchy.isa('FPLX', 'RAF', 'HGNC', 'BRAF'))
def test_partof_entity():
assert ent_hierarchy.partof('FPLX', 'HIF_alpha', 'FPLX', 'HIF')
def test_isa_or_partof_entity():
assert ent_hierarchy.isa_or_partof('HGNC', 'PRKAG1', 'FPLX', 'AMPK')
def test_partof_entity_not():
assert not ent_hierarchy.partof('FPLX', 'HIF1', 'FPLX', 'HIF_alpha')
def test_isa_mod():
assert(mod_hierarchy.isa('INDRA_MODS', 'phosphorylation',
'INDRA_MODS', 'modification'))
def test_isa_mod_not():
assert(not mod_hierarchy.isa('INDRA_MODS', 'phosphorylation',
'INDRA_MODS', 'ubiquitination'))
def test_isa_activity():
assert act_hierarchy.isa('INDRA_ACTIVITIES', 'kinase',
'INDRA_ACTIVITIES', 'activity')
def test_isa_activity_not():
assert not act_hierarchy.isa('INDRA_ACTIVITIES', 'kinase',
'INDRA_ACTIVITIES', 'phosphatase')
def test_partof_comp():
assert comp_hierarchy.partof('INDRA_LOCATIONS', 'cytoplasm',
'INDRA_LOCATIONS', 'cell')
def test_partof_comp_not():
assert not comp_hierarchy.partof('INDRA_LOCATIONS', 'cell',
'INDRA_LOCATIONS', 'cytoplasm')
def test_partof_comp_none():
assert comp_hierarchy.partof('INDRA_LOCATIONS', 'cytoplasm',
'INDRA_LOCATIONS', None)
def test_partof_comp_none_none():
assert comp_hierarchy.partof('INDRA_LOCATIONS', None,
'INDRA_LOCATIONS', None)
def test_partof_comp_none_not():
assert not comp_hierarchy.partof('INDRA_LOCATIONS', None,
'INDRA_LOCATIONS', 'cytoplasm')
def test_get_children():
raf = 'http://identifiers.org/fplx/RAF'
braf = 'http://identifiers.org/hgnc.symbol/BRAF'
mapk = 'http://identifiers.org/fplx/MAPK'
ampk = 'http://identifiers.org/fplx/AMPK'
# Look up RAF
rafs = ent_hierarchy.get_children(raf)
# Should get three family members
assert isinstance(rafs, list)
assert len(rafs) == 3
assert unicode_strs(rafs)
# The lookup of a gene-level entity should not return any additional
# entities
brafs = ent_hierarchy.get_children(braf)
assert isinstance(brafs, list)
assert len(brafs) == 0
assert unicode_strs(brafs)
mapks = ent_hierarchy.get_children(mapk)
assert len(mapks) == 12
assert unicode_strs(mapks)
# Make sure we can also do this in a case involving both family and complex
# relationships
ampks = ent_hierarchy.get_children(ampk)
assert len(ampks) == 22
ag_none = ''
none_children = ent_hierarchy.get_children('')
assert isinstance(none_children, list)
assert len(none_children) == 0
def test_mtorc_children():
mtorc1 = 'http://identifiers.org/fplx/mTORC1'
mtorc2 = 'http://identifiers.org/fplx/mTORC2'
ch1 = ent_hierarchy.get_children(mtorc1)
ch2 = ent_hierarchy.get_children(mtorc2)
assert('http://identifiers.org/hgnc.symbol/RICTOR' not in ch1)
assert('http://identifiers.org/hgnc.symbol/RPTOR' not in ch2)
def test_mtorc_get_parents():
rictor = 'http://identifiers.org/hgnc.symbol/RICTOR'
p = ent_hierarchy.get_parents(rictor, 'all')
assert(len(p) == 1)
assert(list(p)[0] == 'http://identifiers.org/fplx/mTORC2')
def test_mtorc_transitive_closure():
rictor = 'http://identifiers.org/hgnc.symbol/RICTOR'
p = ent_hierarchy.partof_closure.get(rictor)
assert(len(p) == 1)
assert(p[0] == 'http://identifiers.org/fplx/mTORC2')
def test_mtorc_partof_no_tc():
ent_hierarchy_no_tc = deepcopy(ent_hierarchy)
ent_hierarchy_no_tc.isa_closure = {}
ent_hierarchy_no_tc.partof_closure = {}
assert(ent_hierarchy_no_tc.partof('HGNC', 'RPTOR', 'FPLX', 'mTORC1'))
assert(not ent_hierarchy_no_tc.partof('HGNC', 'RPTOR', 'FPLX', 'mTORC2'))
def test_erk_isa_no_tc():
ent_hierarchy_no_tc = deepcopy(ent_hierarchy)
ent_hierarchy_no_tc.isa_closure = {}
ent_hierarchy_no_tc.partof_closure = {}
assert(ent_hierarchy_no_tc.isa('HGNC', 'MAPK1', 'FPLX', 'MAPK'))
assert(not ent_hierarchy_no_tc.isa('HGNC', 'MAPK1', 'FPLX', 'JNK'))
def test_get_parents():
prkaa1 = 'http://identifiers.org/hgnc.symbol/PRKAA1'
ampk = 'http://identifiers.org/fplx/AMPK'
p1 = ent_hierarchy.get_parents(prkaa1, 'all')
assert(len(p1) == 8)
assert(ampk in p1)
p2 = ent_hierarchy.get_parents(prkaa1, 'immediate')
assert(len(p2) == 7)
assert (ampk not in p2)
p3 = ent_hierarchy.get_parents(prkaa1, 'top')
assert(len(p3) == 1)
assert (ampk in p3)
def test_load_eidos_hierarchy():
eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../sources/eidos/eidos_ontology.rdf')
eidos_ns = 'https://github.com/clulab/eidos/wiki/JSON-LD/Grounding#'
hm = HierarchyManager(eidos_ont, True, True)
assert hm.isa_closure
eidos_isa = lambda a, b: hm.isa('EIDOS', a, 'EIDOS', b)
assert eidos_isa('events/human/conflict/war',
'events/human/conflict')
assert not eidos_isa('events/human/conflict/war',
'events/human/human_migration/migration')
assert eidos_isa('entities/measurement/distance/meter',
'entities/measurement')
assert eidos_isa('events/natural/weather/storm/tornado',
'events')
assert not eidos_isa('events',
'events/natural/weather/storm/tornado')
def test_load_trips_hierarchy():
trips_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../sources/cwms/trips_ontology.rdf')
hm = HierarchyManager(trips_ont, True, True)
assert hm.isa_closure
trips_isa = lambda a, b: hm.isa('CWMS', a, 'CWMS', b)
assert trips_isa('ONT::TRUCK', 'ONT::VEHICLE')
assert not trips_isa('ONT::VEHICLE', 'ONT::TRUCK')
assert trips_isa('ONT::MONEY', 'ONT::PHYS-OBJECT')
assert trips_isa('ONT::TABLE', 'ONT::MANUFACTURED-OBJECT')
def test_same_components():
uri_prkag1 = ent_hierarchy.get_uri('HGNC', 'PRKAG1')
uri_ampk = ent_hierarchy.get_uri('FPLX', 'AMPK')
c1 = ent_hierarchy.components[uri_prkag1]
c2 = ent_hierarchy.components[uri_ampk]
assert(c1 == c2) | 0.584153 | 0.478041 |
import string
import numpy as np
import re
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
def preprocess(X):
X = X.lower()
# use regex to get rid of mentions (e.g., @tomhanks)
pattern = f'(@[a-zA-Z0-9-]*)|[{string.punctuation[1:]}]*'
p = re.compile(pattern)
X = p.sub('', X)
X = word_tokenize(X)
stopwords_list = stopwords.words('english') + ['sxsw']
stopwords_list += list(string.punctuation[1:])
X = [x for x in X if x not in stopwords_list]
lemmatizer = WordNetLemmatizer()
X = [lemmatizer.lemmatize(x) for x in X]
X = ' '.join(X)
return X;
def split(df, percent):
X = df['tweet']
y = df['emotion']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=percent, random_state=42)
return (X_train, X_test, y_train, y_test)
def tfidfVectorize(X_train, *X_test):
vectorizer = TfidfVectorizer()
results=[]
results.append(vectorizer.fit_transform(X_train))
for test in X_test:
results.append(vectorizer.transform(test))
return tuple(results)
def w2v_vectorize(wv, docs):
w2v_docs = []
for doc in docs:
doc_vec = np.zeros(100)
count=0
for word in doc:
if word not in wv:
continue
else:
doc_vec+=wv[word]
count+=1
doc_vec/=count
w2v_docs.append(doc_vec)
return w2v_docs
number_to_sentiment = {0: 'Negative emotion', 1: 'No emotion toward brand or product', 2: 'Positive emotion'}
sentiment_to_number = {'Negative emotion': 0, 'No emotion toward brand or product':1, 'Positive emotion':2}
def sentiment_encoder(Y):
return [sentiment_to_number[y] for y in Y]
def sentiment_decoder(Y):
return [number_to_sentiment[y] for y in Y]
def ngrams(X, size):
vectorizer = TfidfVectorizer(ngram_range=size)
grams = vectorizer.fit_transform(X)
sums = grams.sum(axis = 0)
features = vectorizer.get_feature_names()
data = []
for col, term in enumerate(features):
data.append( (term, sums[0,col] ))
ranking = pd.DataFrame(data, columns = ['term','rank'])
words = (ranking.sort_values('rank', ascending = False))
return words | notebooks/preprocessing.py | import string
import numpy as np
import re
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
def preprocess(X):
X = X.lower()
# use regex to get rid of mentions (e.g., @tomhanks)
pattern = f'(@[a-zA-Z0-9-]*)|[{string.punctuation[1:]}]*'
p = re.compile(pattern)
X = p.sub('', X)
X = word_tokenize(X)
stopwords_list = stopwords.words('english') + ['sxsw']
stopwords_list += list(string.punctuation[1:])
X = [x for x in X if x not in stopwords_list]
lemmatizer = WordNetLemmatizer()
X = [lemmatizer.lemmatize(x) for x in X]
X = ' '.join(X)
return X;
def split(df, percent):
X = df['tweet']
y = df['emotion']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=percent, random_state=42)
return (X_train, X_test, y_train, y_test)
def tfidfVectorize(X_train, *X_test):
vectorizer = TfidfVectorizer()
results=[]
results.append(vectorizer.fit_transform(X_train))
for test in X_test:
results.append(vectorizer.transform(test))
return tuple(results)
def w2v_vectorize(wv, docs):
w2v_docs = []
for doc in docs:
doc_vec = np.zeros(100)
count=0
for word in doc:
if word not in wv:
continue
else:
doc_vec+=wv[word]
count+=1
doc_vec/=count
w2v_docs.append(doc_vec)
return w2v_docs
number_to_sentiment = {0: 'Negative emotion', 1: 'No emotion toward brand or product', 2: 'Positive emotion'}
sentiment_to_number = {'Negative emotion': 0, 'No emotion toward brand or product':1, 'Positive emotion':2}
def sentiment_encoder(Y):
return [sentiment_to_number[y] for y in Y]
def sentiment_decoder(Y):
return [number_to_sentiment[y] for y in Y]
def ngrams(X, size):
vectorizer = TfidfVectorizer(ngram_range=size)
grams = vectorizer.fit_transform(X)
sums = grams.sum(axis = 0)
features = vectorizer.get_feature_names()
data = []
for col, term in enumerate(features):
data.append( (term, sums[0,col] ))
ranking = pd.DataFrame(data, columns = ['term','rank'])
words = (ranking.sort_values('rank', ascending = False))
return words | 0.269999 | 0.385606 |
import logging
from rest_framework import exceptions
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth import get_user_model
from galaxy.api import serializers
from galaxy.api.views import base_views
from galaxy.main import models
__all__ = [
'UserList',
'UserDetail',
'ActiveUserView',
'UserNotificationSecretList',
'UserRepositoriesList',
'UserRolesList',
'UserStarredList',
'UserSubscriptionList',
]
logger = logging.getLogger(__name__)
User = get_user_model()
class UserDetail(base_views.RetrieveUpdateAPIView):
model = User
serializer_class = serializers.UserSerializer
def get_object(self, qs=None):
obj = super(UserDetail, self).get_object()
if not obj.is_active:
raise exceptions.PermissionDenied()
return obj
class UserList(base_views.ListAPIView):
model = User
serializer_class = serializers.UserSerializer
def get_queryset(self):
qs = super(UserList, self).get_queryset()
return qs.filter(is_active=True)
class ActiveUserView(base_views.RetrieveAPIView):
model = User
serializer_class = serializers.ActiveUserSerializer
view_name = 'Me'
def get_object(self):
try:
obj = self.model.objects.get(pk=self.request.user.pk)
except ObjectDoesNotExist:
obj = AnonymousUser()
return obj
class UserRepositoriesList(base_views.SubListAPIView):
model = models.Repository
serializer_class = serializers.RepositorySerializer
parent_model = User
relationship = 'repositories'
class UserRolesList(base_views.SubListAPIView):
model = models.Content
serializer_class = serializers.RoleDetailSerializer
parent_model = User
relationship = 'roles'
def get_queryset(self):
qs = super(UserRolesList, self).get_queryset()
return qs.filter(active=True, is_valid=True)
class UserSubscriptionList(base_views.SubListAPIView):
model = models.Subscription
serializer_class = serializers.SubscriptionSerializer
parent_model = User
relationship = 'subscriptions'
class UserStarredList(base_views.SubListAPIView):
model = models.Stargazer
serializer_class = serializers.StargazerSerializer
parent_model = User
relationship = 'starred'
class UserNotificationSecretList(base_views.SubListAPIView):
model = models.NotificationSecret
serializer_class = serializers.NotificationSecretSerializer
parent_model = User
relationship = 'notification_secrets' | galaxy/api/views/users.py | import logging
from rest_framework import exceptions
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth import get_user_model
from galaxy.api import serializers
from galaxy.api.views import base_views
from galaxy.main import models
__all__ = [
'UserList',
'UserDetail',
'ActiveUserView',
'UserNotificationSecretList',
'UserRepositoriesList',
'UserRolesList',
'UserStarredList',
'UserSubscriptionList',
]
logger = logging.getLogger(__name__)
User = get_user_model()
class UserDetail(base_views.RetrieveUpdateAPIView):
model = User
serializer_class = serializers.UserSerializer
def get_object(self, qs=None):
obj = super(UserDetail, self).get_object()
if not obj.is_active:
raise exceptions.PermissionDenied()
return obj
class UserList(base_views.ListAPIView):
model = User
serializer_class = serializers.UserSerializer
def get_queryset(self):
qs = super(UserList, self).get_queryset()
return qs.filter(is_active=True)
class ActiveUserView(base_views.RetrieveAPIView):
model = User
serializer_class = serializers.ActiveUserSerializer
view_name = 'Me'
def get_object(self):
try:
obj = self.model.objects.get(pk=self.request.user.pk)
except ObjectDoesNotExist:
obj = AnonymousUser()
return obj
class UserRepositoriesList(base_views.SubListAPIView):
model = models.Repository
serializer_class = serializers.RepositorySerializer
parent_model = User
relationship = 'repositories'
class UserRolesList(base_views.SubListAPIView):
model = models.Content
serializer_class = serializers.RoleDetailSerializer
parent_model = User
relationship = 'roles'
def get_queryset(self):
qs = super(UserRolesList, self).get_queryset()
return qs.filter(active=True, is_valid=True)
class UserSubscriptionList(base_views.SubListAPIView):
model = models.Subscription
serializer_class = serializers.SubscriptionSerializer
parent_model = User
relationship = 'subscriptions'
class UserStarredList(base_views.SubListAPIView):
model = models.Stargazer
serializer_class = serializers.StargazerSerializer
parent_model = User
relationship = 'starred'
class UserNotificationSecretList(base_views.SubListAPIView):
model = models.NotificationSecret
serializer_class = serializers.NotificationSecretSerializer
parent_model = User
relationship = 'notification_secrets' | 0.481698 | 0.03605 |
import json
from loaders.base import Loader
"""
Dump everything in the format:
type 0 ascii str - "a:string"
type 1 float num - 123
type 2 utf16 str - "u:string"
ex:
{
"some_key": ["u:äöõö", 123, "a:hello", ...],
"some_other_key": [956, "a:halo", ...],
...
}
"""
class CSVLoader(Loader):
def read(self, reader):
reader.seek(self.entry.location)
file_length, entry_count, entry_offset, when = reader.read_fmt('IIII')
reader.seek(self.entry.location + entry_offset)
entries = [reader.read_fmt('IIHHI') for i in range(entry_count)]
out_dict = {}
for key_loc, key_len, val_count, idx, val_loc in entries:
reader.seek(self.entry.location + key_loc)
key = reader.read_str(key_len)
values = []
reader.seek(self.entry.location + val_loc)
for i in range(val_count):
val_type = reader.read_fmt('I')[0]
if val_type == 0: # string
str_loc, str_len = reader.read_fmt('II')
tmp_pos = reader.pos
reader.seek(self.entry.location + str_loc)
str_val = reader.read_str(str_len)
reader.seek(tmp_pos)
values.append(str_val)
elif val_type == 1: # number(float)
float_val, _ = reader.read_fmt('fI')
values.append(float("{0:.5f}".format(float_val)))
elif val_type == 2: # utf16 string
str_loc, str_len = reader.read_fmt('II')
tmp_pos = reader.pos
reader.seek(self.entry.location + str_loc)
if reader.little_endian:
str_val = reader.handle.read(str_len * 2).decode('utf-16le')
else:
str_val = reader.handle.read(str_len * 2).decode('utf-16be')
reader.seek(tmp_pos)
values.append(str_val)
else:
raise Exception('malformed CSV')
out_dict[key] = values
self.data = out_dict
def save(self, handle):
for key in self.data.keys():
handle.write(key.encode('ascii', 'ignore'))
handle.write(",".encode())
for item in self.data[key]:
if isinstance(item, str):
handle.write(item.encode('ascii', "ignore"))
handle.write(",".encode())
else:
handle.write(str(item).encode('ascii', 'ignore'))
handle.write(",".encode())
handle.write("\n".encode())
def reimport(self, handle):
self.data = json.loads(handle.read().decode()) | loaders/csv.py | import json
from loaders.base import Loader
"""
Dump everything in the format:
type 0 ascii str - "a:string"
type 1 float num - 123
type 2 utf16 str - "u:string"
ex:
{
"some_key": ["u:äöõö", 123, "a:hello", ...],
"some_other_key": [956, "a:halo", ...],
...
}
"""
class CSVLoader(Loader):
def read(self, reader):
reader.seek(self.entry.location)
file_length, entry_count, entry_offset, when = reader.read_fmt('IIII')
reader.seek(self.entry.location + entry_offset)
entries = [reader.read_fmt('IIHHI') for i in range(entry_count)]
out_dict = {}
for key_loc, key_len, val_count, idx, val_loc in entries:
reader.seek(self.entry.location + key_loc)
key = reader.read_str(key_len)
values = []
reader.seek(self.entry.location + val_loc)
for i in range(val_count):
val_type = reader.read_fmt('I')[0]
if val_type == 0: # string
str_loc, str_len = reader.read_fmt('II')
tmp_pos = reader.pos
reader.seek(self.entry.location + str_loc)
str_val = reader.read_str(str_len)
reader.seek(tmp_pos)
values.append(str_val)
elif val_type == 1: # number(float)
float_val, _ = reader.read_fmt('fI')
values.append(float("{0:.5f}".format(float_val)))
elif val_type == 2: # utf16 string
str_loc, str_len = reader.read_fmt('II')
tmp_pos = reader.pos
reader.seek(self.entry.location + str_loc)
if reader.little_endian:
str_val = reader.handle.read(str_len * 2).decode('utf-16le')
else:
str_val = reader.handle.read(str_len * 2).decode('utf-16be')
reader.seek(tmp_pos)
values.append(str_val)
else:
raise Exception('malformed CSV')
out_dict[key] = values
self.data = out_dict
def save(self, handle):
for key in self.data.keys():
handle.write(key.encode('ascii', 'ignore'))
handle.write(",".encode())
for item in self.data[key]:
if isinstance(item, str):
handle.write(item.encode('ascii', "ignore"))
handle.write(",".encode())
else:
handle.write(str(item).encode('ascii', 'ignore'))
handle.write(",".encode())
handle.write("\n".encode())
def reimport(self, handle):
self.data = json.loads(handle.read().decode()) | 0.219505 | 0.300779 |
from fastapi.testclient import TestClient
import json
import pytest
from openapi_server.models.user import User
def test_create_user(client: TestClient):
"""Test case for create_user
Create user
"""
user = {"first_name":"firstName","last_name":"lastName","password":"password","user_status":6,"phone":"phone","id":0,"email":"email","username":"username"}
headers = {
'api_key': 'special-key',
}
response = client.request(
'POST',
'/user',
headers=headers,
json=user,
)
assert response.status_code == 200
def test_create_users_with_array_input(client: TestClient):
"""Test case for create_users_with_array_input
Creates list of users with given input array
"""
user = [{"first_name":"firstName","last_name":"lastName","password":"password","user_status":6,"phone":"phone","id":0,"email":"email","username":"username"}]
headers = {
'api_key': 'special-key',
}
response = client.request(
'POST',
'/user/createWithArray',
headers=headers,
json=user,
)
assert response.status_code == 200
def test_create_users_with_list_input(client: TestClient):
"""Test case for create_users_with_list_input
Creates list of users with given input array
"""
user = [{"first_name":"firstName","last_name":"lastName","password":"password","user_status":6,"phone":"phone","id":0,"email":"email","username":"username"}]
headers = {
'api_key': 'special-key',
}
response = client.request(
'POST',
'/user/createWithList',
headers=headers,
json=user,
)
assert response.status_code == 200
def test_delete_user(client: TestClient):
"""Test case for delete_user
Delete user
"""
headers = {
'api_key': 'special-key',
}
response = client.request(
'DELETE',
'/user/{username}'.format(username='username_example'),
headers=headers,
)
assert response.status_code == 200
def test_get_user_by_name(client: TestClient):
"""Test case for get_user_by_name
Get user by user name
"""
headers = {
}
response = client.request(
'GET',
'/user/{username}'.format(username='username_example'),
headers=headers,
)
assert response.status_code == 200
def test_login_user(client: TestClient):
"""Test case for login_user
Logs user into the system
"""
params = [("username", 'username_example'),
("password", '<PASSWORD>')]
headers = {
}
response = client.request(
'GET',
'/user/login',
headers=headers,
params=params,
)
assert response.status_code == 200
def test_logout_user(client: TestClient):
"""Test case for logout_user
Logs out current logged in user session
"""
headers = {
'api_key': 'special-key',
}
response = client.request(
'GET',
'/user/logout',
headers=headers,
)
assert response.status_code == 200
def test_update_user(client: TestClient):
"""Test case for update_user
Updated user
"""
user = {"first_name":"firstName","last_name":"lastName","password":"password","user_status":6,"phone":"phone","id":0,"email":"email","username":"username"}
headers = {
'api_key': 'special-key',
}
response = client.request(
'PUT',
'/user/{username}'.format(username='username_example'),
headers=headers,
json=user,
)
assert response.status_code == 200 | samples/server/petstore/python-fastapi/tests/test_user_api.py |
from fastapi.testclient import TestClient
import json
import pytest
from openapi_server.models.user import User
def test_create_user(client: TestClient):
"""Test case for create_user
Create user
"""
user = {"first_name":"firstName","last_name":"lastName","password":"password","user_status":6,"phone":"phone","id":0,"email":"email","username":"username"}
headers = {
'api_key': 'special-key',
}
response = client.request(
'POST',
'/user',
headers=headers,
json=user,
)
assert response.status_code == 200
def test_create_users_with_array_input(client: TestClient):
"""Test case for create_users_with_array_input
Creates list of users with given input array
"""
user = [{"first_name":"firstName","last_name":"lastName","password":"password","user_status":6,"phone":"phone","id":0,"email":"email","username":"username"}]
headers = {
'api_key': 'special-key',
}
response = client.request(
'POST',
'/user/createWithArray',
headers=headers,
json=user,
)
assert response.status_code == 200
def test_create_users_with_list_input(client: TestClient):
"""Test case for create_users_with_list_input
Creates list of users with given input array
"""
user = [{"first_name":"firstName","last_name":"lastName","password":"password","user_status":6,"phone":"phone","id":0,"email":"email","username":"username"}]
headers = {
'api_key': 'special-key',
}
response = client.request(
'POST',
'/user/createWithList',
headers=headers,
json=user,
)
assert response.status_code == 200
def test_delete_user(client: TestClient):
"""Test case for delete_user
Delete user
"""
headers = {
'api_key': 'special-key',
}
response = client.request(
'DELETE',
'/user/{username}'.format(username='username_example'),
headers=headers,
)
assert response.status_code == 200
def test_get_user_by_name(client: TestClient):
"""Test case for get_user_by_name
Get user by user name
"""
headers = {
}
response = client.request(
'GET',
'/user/{username}'.format(username='username_example'),
headers=headers,
)
assert response.status_code == 200
def test_login_user(client: TestClient):
"""Test case for login_user
Logs user into the system
"""
params = [("username", 'username_example'),
("password", '<PASSWORD>')]
headers = {
}
response = client.request(
'GET',
'/user/login',
headers=headers,
params=params,
)
assert response.status_code == 200
def test_logout_user(client: TestClient):
"""Test case for logout_user
Logs out current logged in user session
"""
headers = {
'api_key': 'special-key',
}
response = client.request(
'GET',
'/user/logout',
headers=headers,
)
assert response.status_code == 200
def test_update_user(client: TestClient):
"""Test case for update_user
Updated user
"""
user = {"first_name":"firstName","last_name":"lastName","password":"password","user_status":6,"phone":"phone","id":0,"email":"email","username":"username"}
headers = {
'api_key': 'special-key',
}
response = client.request(
'PUT',
'/user/{username}'.format(username='username_example'),
headers=headers,
json=user,
)
assert response.status_code == 200 | 0.497803 | 0.376652 |
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2012, SvartalF'
__license__ = 'BSD 3-Clause License'
import opuslib.api.decoder
import opuslib.api.encoder
import opuslib.api.ctl
import opuslib.constants
class Decoder(object):
def __init__(self, fs, channels):
"""
Parameters:
fs : sampling rate
channels : number of channels
"""
self._fs = fs
self._channels = channels
self._state = opuslib.api.decoder.create(fs, channels)
def __del__(self):
if hasattr(self, '_state'):
# Destroying state only if __init__ completed successfully
opuslib.api.decoder.destroy(self._state)
def reset_state(self):
"""
Resets the codec state to be equivalent to a freshly initialized state
"""
opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.reset_state)
def decode(self, data, frame_size, decode_fec=False):
return opuslib.api.decoder.decode(
self._state, data, len(data), frame_size, decode_fec,
channels=self._channels)
def decode_float(self, data, frame_size, decode_fec=False):
return opuslib.api.decoder.decode_float(
self._state, data, len(data), frame_size, decode_fec,
channels=self._channels)
# CTL interfaces
_get_final_range = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_final_range)
final_range = property(_get_final_range)
_get_bandwidth = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_bandwidth)
bandwidth = property(_get_bandwidth)
_get_pitch = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_pitch)
pitch = property(_get_pitch)
_get_lsb_depth = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_lsb_depth)
_set_lsb_depth = lambda self, x: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.set_lsb_depth, x)
lsb_depth = property(_get_lsb_depth, _set_lsb_depth)
_get_gain = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_gain)
_set_gain = lambda self, x: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.set_gain, x)
gain = property(_get_gain, _set_gain)
class Encoder(object):
def __init__(self, fs, channels, application):
"""
Parameters:
fs : sampling rate
channels : number of channels
"""
if application in opuslib.constants.APPLICATION_TYPES_MAP.keys():
application = opuslib.constants.APPLICATION_TYPES_MAP[application]
elif application in opuslib.constants.APPLICATION_TYPES_MAP.values():
pass # Nothing to do here
else:
raise ValueError(
"`application` value must be in 'voip', 'audio' or "
"'restricted_lowdelay'")
self._fs = fs
self._channels = channels
self._application = application
self._state = opuslib.api.encoder.create(fs, channels, application)
def __del__(self):
if hasattr(self, '_state'):
# Destroying state only if __init__ completed successfully
opuslib.api.encoder.destroy(self._state)
def reset_state(self):
"""
Resets the codec state to be equivalent to a freshly initialized state
"""
opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.reset_state)
def encode(self, data, frame_size):
return opuslib.api.encoder.encode(
self._state, data, frame_size, len(data))
def encode_float(self, data, frame_size, decode_fec=False):
return opuslib.api.encoder.encode_float(
self._state, data, frame_size, len(data))
# CTL interfaces
_get_final_range = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_final_range)
final_range = property(_get_final_range)
_get_bandwidth = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_bandwidth)
bandwidth = property(_get_bandwidth)
_get_pitch = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_pitch)
pitch = property(_get_pitch)
_get_lsb_depth = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_lsb_depth)
_set_lsb_depth = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_lsb_depth, x)
lsb_depth = property(_get_lsb_depth, _set_lsb_depth)
_get_complexity = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_complexity)
_set_complexity = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_complexity, x)
complexity = property(_get_complexity, _set_complexity)
_get_bitrate = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_bitrate)
_set_bitrate = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_bitrate, x)
bitrate = property(_get_bitrate, _set_bitrate)
_get_vbr = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_vbr)
_set_vbr = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_vbr, x)
vbr = property(_get_vbr, _set_vbr)
_get_vbr_constraint = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_vbr_constraint)
_set_vbr_constraint = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_vbr_constraint, x)
vbr_constraint = property(_get_vbr_constraint, _set_vbr_constraint)
_get_force_channels = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_force_channels)
_set_force_channels = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_force_channels, x)
force_channels = property(_get_force_channels, _set_force_channels)
_get_max_bandwidth = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_max_bandwidth)
_set_max_bandwidth = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_max_bandwidth, x)
max_bandwidth = property(_get_max_bandwidth, _set_max_bandwidth)
_set_bandwidth = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_bandwidth, x)
bandwidth = property(None, _set_bandwidth)
_get_signal = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_signal)
_set_signal = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_signal, x)
signal = property(_get_signal, _set_signal)
_get_application = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_application)
_set_application = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_application, x)
application = property(_get_application, _set_application)
_get_sample_rate = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_sample_rate)
sample_rate = property(_get_sample_rate)
_get_lookahead = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_lookahead)
lookahead = property(_get_lookahead)
_get_inband_fec = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_inband_fec)
_set_inband_fec = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_inband_fec)
inband_fec = property(_get_inband_fec, _set_inband_fec)
_get_packet_loss_perc = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_packet_loss_perc)
_set_packet_loss_perc = \
lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_packet_loss_perc, x)
packet_loss_perc = property(_get_packet_loss_perc, _set_packet_loss_perc)
_get_dtx = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_dtx)
_set_dtx = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_dtx, x) | opuslib/classes.py | __author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2012, SvartalF'
__license__ = 'BSD 3-Clause License'
import opuslib.api.decoder
import opuslib.api.encoder
import opuslib.api.ctl
import opuslib.constants
class Decoder(object):
def __init__(self, fs, channels):
"""
Parameters:
fs : sampling rate
channels : number of channels
"""
self._fs = fs
self._channels = channels
self._state = opuslib.api.decoder.create(fs, channels)
def __del__(self):
if hasattr(self, '_state'):
# Destroying state only if __init__ completed successfully
opuslib.api.decoder.destroy(self._state)
def reset_state(self):
"""
Resets the codec state to be equivalent to a freshly initialized state
"""
opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.reset_state)
def decode(self, data, frame_size, decode_fec=False):
return opuslib.api.decoder.decode(
self._state, data, len(data), frame_size, decode_fec,
channels=self._channels)
def decode_float(self, data, frame_size, decode_fec=False):
return opuslib.api.decoder.decode_float(
self._state, data, len(data), frame_size, decode_fec,
channels=self._channels)
# CTL interfaces
_get_final_range = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_final_range)
final_range = property(_get_final_range)
_get_bandwidth = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_bandwidth)
bandwidth = property(_get_bandwidth)
_get_pitch = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_pitch)
pitch = property(_get_pitch)
_get_lsb_depth = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_lsb_depth)
_set_lsb_depth = lambda self, x: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.set_lsb_depth, x)
lsb_depth = property(_get_lsb_depth, _set_lsb_depth)
_get_gain = lambda self: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.get_gain)
_set_gain = lambda self, x: opuslib.api.decoder.opuslib.api.ctl(
self._state, opuslib.api.ctl.set_gain, x)
gain = property(_get_gain, _set_gain)
class Encoder(object):
def __init__(self, fs, channels, application):
"""
Parameters:
fs : sampling rate
channels : number of channels
"""
if application in opuslib.constants.APPLICATION_TYPES_MAP.keys():
application = opuslib.constants.APPLICATION_TYPES_MAP[application]
elif application in opuslib.constants.APPLICATION_TYPES_MAP.values():
pass # Nothing to do here
else:
raise ValueError(
"`application` value must be in 'voip', 'audio' or "
"'restricted_lowdelay'")
self._fs = fs
self._channels = channels
self._application = application
self._state = opuslib.api.encoder.create(fs, channels, application)
def __del__(self):
if hasattr(self, '_state'):
# Destroying state only if __init__ completed successfully
opuslib.api.encoder.destroy(self._state)
def reset_state(self):
"""
Resets the codec state to be equivalent to a freshly initialized state
"""
opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.reset_state)
def encode(self, data, frame_size):
return opuslib.api.encoder.encode(
self._state, data, frame_size, len(data))
def encode_float(self, data, frame_size, decode_fec=False):
return opuslib.api.encoder.encode_float(
self._state, data, frame_size, len(data))
# CTL interfaces
_get_final_range = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_final_range)
final_range = property(_get_final_range)
_get_bandwidth = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_bandwidth)
bandwidth = property(_get_bandwidth)
_get_pitch = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_pitch)
pitch = property(_get_pitch)
_get_lsb_depth = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_lsb_depth)
_set_lsb_depth = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_lsb_depth, x)
lsb_depth = property(_get_lsb_depth, _set_lsb_depth)
_get_complexity = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_complexity)
_set_complexity = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_complexity, x)
complexity = property(_get_complexity, _set_complexity)
_get_bitrate = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_bitrate)
_set_bitrate = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_bitrate, x)
bitrate = property(_get_bitrate, _set_bitrate)
_get_vbr = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_vbr)
_set_vbr = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_vbr, x)
vbr = property(_get_vbr, _set_vbr)
_get_vbr_constraint = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_vbr_constraint)
_set_vbr_constraint = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_vbr_constraint, x)
vbr_constraint = property(_get_vbr_constraint, _set_vbr_constraint)
_get_force_channels = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_force_channels)
_set_force_channels = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_force_channels, x)
force_channels = property(_get_force_channels, _set_force_channels)
_get_max_bandwidth = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_max_bandwidth)
_set_max_bandwidth = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_max_bandwidth, x)
max_bandwidth = property(_get_max_bandwidth, _set_max_bandwidth)
_set_bandwidth = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_bandwidth, x)
bandwidth = property(None, _set_bandwidth)
_get_signal = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_signal)
_set_signal = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_signal, x)
signal = property(_get_signal, _set_signal)
_get_application = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_application)
_set_application = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_application, x)
application = property(_get_application, _set_application)
_get_sample_rate = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_sample_rate)
sample_rate = property(_get_sample_rate)
_get_lookahead = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_lookahead)
lookahead = property(_get_lookahead)
_get_inband_fec = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_inband_fec)
_set_inband_fec = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_inband_fec)
inband_fec = property(_get_inband_fec, _set_inband_fec)
_get_packet_loss_perc = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_packet_loss_perc)
_set_packet_loss_perc = \
lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.set_packet_loss_perc, x)
packet_loss_perc = property(_get_packet_loss_perc, _set_packet_loss_perc)
_get_dtx = lambda self: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_dtx)
_set_dtx = lambda self, x: opuslib.api.encoder.ctl(
self._state, opuslib.api.ctl.get_dtx, x) | 0.761538 | 0.24434 |
import os
import re
__docformat__ = "restructredtext en"
# map eland_result.txt sense
sense_map = {'F': '+', 'R': '-'}
sense_color = {'F': '0,0,255', 'R': '255,255,0'}
def create_bed_header(name, description):
"""
Produce the headerline for a bedfile
"""
# provide default track names
if name is None:
name = "track"
if description is None:
description = "eland result file"
bed_header = 'track name="%s" description="%s" visibility=4 itemRgb="ON"' % (name, description)
bed_header += os.linesep
return bed_header
def make_bed_from_eland_stream(instream, outstream, name, description, chromosome_prefix='chr'):
"""
read an eland result file from instream and write a bedfile to outstream
:Parameters:
- `instream`: stream containing the output from eland
- `outstream`: stream to write the bed file too
- `name`: name of bed-file (must be unique)
- `description`: longer description of the bed file
- `chromosome_prefix`: restrict output lines to fasta records that start with this pattern
"""
for line in make_bed_from_eland_generator(instream, name, description, chromosome_prefix):
outstream.write(line)
def make_bed_from_eland_generator(instream, name, description, chromosome_prefix='chr'):
"""
read an eland result file from instream and write a bedfile to outstream
:Parameters:
- `instream`: stream containing the output from eland
- `name`: name of bed-file (must be unique)
- `description`: longer description of the bed file
- `chromosome_prefix`: restrict output lines to fasta records that start with this pattern
:Return: generator which yields lines of bedfile
"""
# indexes into fields in eland_result.txt file
SEQ = 1
CHR = 6
START = 7
SENSE = 8
yield create_bed_header(name, description)
prefix_len = len(chromosome_prefix)
for line in instream:
fields = line.split()
# we need more than the CHR field, and it needs to match a chromosome
if len(fields) <= CHR or fields[CHR][:prefix_len] != chromosome_prefix:
continue
start = fields[START]
stop = int(start) + len(fields[SEQ])
# strip off filename extension
chromosome = fields[CHR].split('.')[0]
yield '%s %s %d read 0 %s - - %s%s' % (
chromosome,
start,
stop,
sense_map[fields[SENSE]],
sense_color[fields[SENSE]],
os.linesep
)
def make_bed_from_multi_eland_stream(
instream,
outstream,
name,
description,
chr_prefix='chr',
max_reads=255):
"""
read a multi eland result file from instream and write the bedfile to outstream
:Parameters:
- `instream`: stream containing the output from eland
- `outstream`: stream to write the bed file too
- `name`: name of bed-file (must be unique)
- `description`: longer description of the bed file
- `chromosome_prefix`: restrict output lines to fasta records that start with this pattern
- `max_reads`: maximum number of reads to write to bed stream
"""
for lane in make_bed_from_multi_eland_generator(instream, name, description, chr_prefix, max_reads):
outstream.write(lane)
def make_bed_from_multi_eland_generator(instream, name, description, chr_prefix, max_reads=255):
loc_pattern = '(?P<fullloc>(?P<start>[0-9]+)(?P<dir>[FR])(?P<count>[0-9AGCT]+))'
other_pattern = '(?P<chr>[^:,]+)'
split_re = re.compile('(%s|%s)' % (loc_pattern, other_pattern))
yield create_bed_header(name, description)
for line in instream:
rec = line.split()
if len(rec) > 3:
# colony_id = rec[0]
seq = rec[1]
# number of matches for 0, 1, and 2 mismatches
# m0, m1, m2 = [int(x) for x in rec[2].split(':')]
compressed_reads = rec[3]
cur_chr = ""
reads = {0: [], 1: [], 2: []}
for token in split_re.finditer(compressed_reads):
if token.group('chr') is not None:
cur_chr = token.group('chr')
# strip off extension if present
cur_chr = os.path.splitext(cur_chr)[0]
elif token.group('fullloc') is not None:
matches = int(token.group('count'))
# only emit a bed line if
# our current chromosome starts with chromosome pattern
if chr_prefix is None or cur_chr.startswith(chr_prefix):
start = int(token.group('start'))
stop = start + len(seq)
orientation = token.group('dir')
strand = sense_map[orientation]
color = sense_color[orientation]
# build up list of reads for this record
reads[matches].append((cur_chr, start, stop, strand, color))
# report up to our max_read threshold reporting the fewer-mismatch
# matches first
reported_reads = 0
keys = [0, 1, 2]
for mismatch, read_list in ((k, reads[k]) for k in keys):
reported_reads += len(read_list)
if reported_reads <= max_reads:
for cur_chr, start, stop, strand, color in read_list:
reported_reads += 1
yield '%s %d %d read 0 %s - - %s%s' % (
cur_chr,
start,
stop,
sense_map[orientation],
sense_color[orientation],
os.linesep
)
def make_description(flowcell_id, lane):
"""
compute a bedfile name and description from the django database
"""
from htsworkflow.experiments import models as experiments
lane = int(lane)
if lane < 1 or lane > 8:
raise RuntimeError("flowcells only have lanes 1-8")
cell = experiments.FlowCell.objects.get(flowcell_id=flowcell_id)
name = "%s-%s" % (flowcell_id, lane)
cell_library = getattr(cell, 'lane_%d_library' % (lane,))
cell_library_id = cell_library.library_id
description = "%s-%s" % (cell_library.library_name, cell_library_id)
return name, description | htsworkflow/util/makebed.py | import os
import re
__docformat__ = "restructredtext en"
# map eland_result.txt sense
sense_map = {'F': '+', 'R': '-'}
sense_color = {'F': '0,0,255', 'R': '255,255,0'}
def create_bed_header(name, description):
"""
Produce the headerline for a bedfile
"""
# provide default track names
if name is None:
name = "track"
if description is None:
description = "eland result file"
bed_header = 'track name="%s" description="%s" visibility=4 itemRgb="ON"' % (name, description)
bed_header += os.linesep
return bed_header
def make_bed_from_eland_stream(instream, outstream, name, description, chromosome_prefix='chr'):
"""
read an eland result file from instream and write a bedfile to outstream
:Parameters:
- `instream`: stream containing the output from eland
- `outstream`: stream to write the bed file too
- `name`: name of bed-file (must be unique)
- `description`: longer description of the bed file
- `chromosome_prefix`: restrict output lines to fasta records that start with this pattern
"""
for line in make_bed_from_eland_generator(instream, name, description, chromosome_prefix):
outstream.write(line)
def make_bed_from_eland_generator(instream, name, description, chromosome_prefix='chr'):
"""
read an eland result file from instream and write a bedfile to outstream
:Parameters:
- `instream`: stream containing the output from eland
- `name`: name of bed-file (must be unique)
- `description`: longer description of the bed file
- `chromosome_prefix`: restrict output lines to fasta records that start with this pattern
:Return: generator which yields lines of bedfile
"""
# indexes into fields in eland_result.txt file
SEQ = 1
CHR = 6
START = 7
SENSE = 8
yield create_bed_header(name, description)
prefix_len = len(chromosome_prefix)
for line in instream:
fields = line.split()
# we need more than the CHR field, and it needs to match a chromosome
if len(fields) <= CHR or fields[CHR][:prefix_len] != chromosome_prefix:
continue
start = fields[START]
stop = int(start) + len(fields[SEQ])
# strip off filename extension
chromosome = fields[CHR].split('.')[0]
yield '%s %s %d read 0 %s - - %s%s' % (
chromosome,
start,
stop,
sense_map[fields[SENSE]],
sense_color[fields[SENSE]],
os.linesep
)
def make_bed_from_multi_eland_stream(
instream,
outstream,
name,
description,
chr_prefix='chr',
max_reads=255):
"""
read a multi eland result file from instream and write the bedfile to outstream
:Parameters:
- `instream`: stream containing the output from eland
- `outstream`: stream to write the bed file too
- `name`: name of bed-file (must be unique)
- `description`: longer description of the bed file
- `chromosome_prefix`: restrict output lines to fasta records that start with this pattern
- `max_reads`: maximum number of reads to write to bed stream
"""
for lane in make_bed_from_multi_eland_generator(instream, name, description, chr_prefix, max_reads):
outstream.write(lane)
def make_bed_from_multi_eland_generator(instream, name, description, chr_prefix, max_reads=255):
loc_pattern = '(?P<fullloc>(?P<start>[0-9]+)(?P<dir>[FR])(?P<count>[0-9AGCT]+))'
other_pattern = '(?P<chr>[^:,]+)'
split_re = re.compile('(%s|%s)' % (loc_pattern, other_pattern))
yield create_bed_header(name, description)
for line in instream:
rec = line.split()
if len(rec) > 3:
# colony_id = rec[0]
seq = rec[1]
# number of matches for 0, 1, and 2 mismatches
# m0, m1, m2 = [int(x) for x in rec[2].split(':')]
compressed_reads = rec[3]
cur_chr = ""
reads = {0: [], 1: [], 2: []}
for token in split_re.finditer(compressed_reads):
if token.group('chr') is not None:
cur_chr = token.group('chr')
# strip off extension if present
cur_chr = os.path.splitext(cur_chr)[0]
elif token.group('fullloc') is not None:
matches = int(token.group('count'))
# only emit a bed line if
# our current chromosome starts with chromosome pattern
if chr_prefix is None or cur_chr.startswith(chr_prefix):
start = int(token.group('start'))
stop = start + len(seq)
orientation = token.group('dir')
strand = sense_map[orientation]
color = sense_color[orientation]
# build up list of reads for this record
reads[matches].append((cur_chr, start, stop, strand, color))
# report up to our max_read threshold reporting the fewer-mismatch
# matches first
reported_reads = 0
keys = [0, 1, 2]
for mismatch, read_list in ((k, reads[k]) for k in keys):
reported_reads += len(read_list)
if reported_reads <= max_reads:
for cur_chr, start, stop, strand, color in read_list:
reported_reads += 1
yield '%s %d %d read 0 %s - - %s%s' % (
cur_chr,
start,
stop,
sense_map[orientation],
sense_color[orientation],
os.linesep
)
def make_description(flowcell_id, lane):
"""
compute a bedfile name and description from the django database
"""
from htsworkflow.experiments import models as experiments
lane = int(lane)
if lane < 1 or lane > 8:
raise RuntimeError("flowcells only have lanes 1-8")
cell = experiments.FlowCell.objects.get(flowcell_id=flowcell_id)
name = "%s-%s" % (flowcell_id, lane)
cell_library = getattr(cell, 'lane_%d_library' % (lane,))
cell_library_id = cell_library.library_id
description = "%s-%s" % (cell_library.library_name, cell_library_id)
return name, description | 0.670608 | 0.285979 |
the way."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import json
import os
import re
import shutil
from timeit import default_timer as timer
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from model import Model
from pgd_attack import LinfPGDAttack
os.environ["CUDA_VISIBLE_DEVICES"]="0"
config_file_path = 'config.json'
with open(config_file_path) as config_file:
config = json.load(config_file)
# Setting up training parameters
tf.set_random_seed(config['random_seed'])
np.random.seed(config['random_seed'])
max_num_training_steps = config['max_num_training_steps']
num_output_steps = config['num_output_steps']
num_summary_steps = config['num_summary_steps']
num_checkpoint_steps = config['num_checkpoint_steps']
training_objective = config['training_objective']
m = config["m"]
lamb = config["lambda"]
approx_factor = config["approx_factor"]
continue_train = config["continue_train"]
batch_size = config['training_batch_size']
# Setting up the data and the model
fashion_mnist = input_data.read_data_sets('data/fashion', source_url='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/', one_hot=False)
global_step = tf.contrib.framework.get_or_create_global_step()
model = Model('train', m, lamb, approx_factor)
# Setting up the optimizer
if training_objective == 'ar':
re_term = model.IG_regularization_term
train_step = tf.train.AdamOptimizer(1e-4).minimize(model.loss_with_IG_regularizer,
global_step=global_step)
elif training_objective == 'adv_ar':
re_term = model.IG_regularization_term
train_step = tf.train.AdamOptimizer(1e-4).minimize(model.adv_loss_with_IG_regularizer,
global_step=global_step)
else:
assert False, ('Unknown training objective.')
# Setting up the Tensorboard and checkpoint outputs
model_dir = config['model_dir']
print('model_dir: {}'.format(model_dir))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# We add accuracy and xent twice so we can easily make three types of
# comparisons in Tensorboard:
# - train vs eval (for a single run)
# - train of different runs
# - eval of different runs
saver = tf.train.Saver(max_to_keep=3)
tf.summary.scalar('accuracy adv train', model.accuracy)
tf.summary.scalar('accuracy adv', model.accuracy)
tf.summary.scalar('xent adv train', model.xent / batch_size)
tf.summary.scalar('xent adv', model.xent / batch_size)
merged_summaries = tf.summary.merge_all()
shutil.copy(config_file_path, model_dir)
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with tf.Session(config = tf_config) as sess:
# Initialize the summary writer, global variables, and our time counter.
# Set up adversary
attack = LinfPGDAttack(sess, model, config['epsilon'], config['k'], config['a'], config['random_start'], config['loss_func'])
summary_writer = tf.summary.FileWriter(model_dir, sess.graph)
if continue_train:
checkpoint = tf.train.latest_checkpoint(model_dir)
saver.restore(sess, checkpoint)
curr_step = int(checkpoint.split('-')[1])
sess.run(global_step.assign(curr_step))
else:
curr_step = 0
sess.run(tf.global_variables_initializer())
training_time = 0.0
# Main training loop
for ii in range(curr_step, max_num_training_steps):
x_batch, y_batch = fashion_mnist.train.next_batch(batch_size)
x_batch = x_batch.reshape((-1,28,28,1))
# Compute Adversarial Perturbations
start = timer()
x_batch_adv = attack.perturb(x_batch, y_batch)
end = timer()
training_time += end - start
nat_dict = {model.input: x_batch, model.label: y_batch}
adv_dict = {model.input: x_batch_adv, model.label: y_batch}
# Output to stdout
if ii % num_output_steps == 0:
nat_acc, nat_loss = sess.run([model.accuracy, model.xent], feed_dict=nat_dict)
adv_acc, adv_loss = sess.run([model.accuracy, model.xent], feed_dict=adv_dict)
IG_re = sess.run(re_term, feed_dict={model.input: x_batch, model.adv_input: x_batch_adv, model.label: y_batch})
print('Step {}: ({})'.format(ii, datetime.now()), flush=True)
print(' training nat accuracy {:.4}%, loss {:.4}'.format(nat_acc * 100,nat_loss), flush=True)
print(' training adv accuracy {:.4}%, loss {:.4}'.format(adv_acc * 100,adv_loss), flush=True)
print(' training IG term {:.4}'.format(IG_re), flush=True)
if ii != 0:
print(' {} examples per second'.format(
num_output_steps * batch_size / training_time), flush=True)
training_time = 0.0
# Tensorboard summaries
if ii % num_summary_steps == 0:
summary = sess.run(merged_summaries, feed_dict=adv_dict)
summary_writer.add_summary(summary, global_step.eval(sess))
# Write a checkpoint
if ii % num_checkpoint_steps == 0:
saver.save(sess,
os.path.join(model_dir, 'checkpoint'),
global_step=global_step)
# Actual training step
start = timer()
sess.run(train_step, feed_dict={model.input: x_batch, model.adv_input: x_batch_adv, model.label: y_batch})
end = timer()
training_time += end - start | Fashion-MNIST/train_attribution.py | the way."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import json
import os
import re
import shutil
from timeit import default_timer as timer
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from model import Model
from pgd_attack import LinfPGDAttack
os.environ["CUDA_VISIBLE_DEVICES"]="0"
config_file_path = 'config.json'
with open(config_file_path) as config_file:
config = json.load(config_file)
# Setting up training parameters
tf.set_random_seed(config['random_seed'])
np.random.seed(config['random_seed'])
max_num_training_steps = config['max_num_training_steps']
num_output_steps = config['num_output_steps']
num_summary_steps = config['num_summary_steps']
num_checkpoint_steps = config['num_checkpoint_steps']
training_objective = config['training_objective']
m = config["m"]
lamb = config["lambda"]
approx_factor = config["approx_factor"]
continue_train = config["continue_train"]
batch_size = config['training_batch_size']
# Setting up the data and the model
fashion_mnist = input_data.read_data_sets('data/fashion', source_url='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/', one_hot=False)
global_step = tf.contrib.framework.get_or_create_global_step()
model = Model('train', m, lamb, approx_factor)
# Setting up the optimizer
if training_objective == 'ar':
re_term = model.IG_regularization_term
train_step = tf.train.AdamOptimizer(1e-4).minimize(model.loss_with_IG_regularizer,
global_step=global_step)
elif training_objective == 'adv_ar':
re_term = model.IG_regularization_term
train_step = tf.train.AdamOptimizer(1e-4).minimize(model.adv_loss_with_IG_regularizer,
global_step=global_step)
else:
assert False, ('Unknown training objective.')
# Setting up the Tensorboard and checkpoint outputs
model_dir = config['model_dir']
print('model_dir: {}'.format(model_dir))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# We add accuracy and xent twice so we can easily make three types of
# comparisons in Tensorboard:
# - train vs eval (for a single run)
# - train of different runs
# - eval of different runs
saver = tf.train.Saver(max_to_keep=3)
tf.summary.scalar('accuracy adv train', model.accuracy)
tf.summary.scalar('accuracy adv', model.accuracy)
tf.summary.scalar('xent adv train', model.xent / batch_size)
tf.summary.scalar('xent adv', model.xent / batch_size)
merged_summaries = tf.summary.merge_all()
shutil.copy(config_file_path, model_dir)
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with tf.Session(config = tf_config) as sess:
# Initialize the summary writer, global variables, and our time counter.
# Set up adversary
attack = LinfPGDAttack(sess, model, config['epsilon'], config['k'], config['a'], config['random_start'], config['loss_func'])
summary_writer = tf.summary.FileWriter(model_dir, sess.graph)
if continue_train:
checkpoint = tf.train.latest_checkpoint(model_dir)
saver.restore(sess, checkpoint)
curr_step = int(checkpoint.split('-')[1])
sess.run(global_step.assign(curr_step))
else:
curr_step = 0
sess.run(tf.global_variables_initializer())
training_time = 0.0
# Main training loop
for ii in range(curr_step, max_num_training_steps):
x_batch, y_batch = fashion_mnist.train.next_batch(batch_size)
x_batch = x_batch.reshape((-1,28,28,1))
# Compute Adversarial Perturbations
start = timer()
x_batch_adv = attack.perturb(x_batch, y_batch)
end = timer()
training_time += end - start
nat_dict = {model.input: x_batch, model.label: y_batch}
adv_dict = {model.input: x_batch_adv, model.label: y_batch}
# Output to stdout
if ii % num_output_steps == 0:
nat_acc, nat_loss = sess.run([model.accuracy, model.xent], feed_dict=nat_dict)
adv_acc, adv_loss = sess.run([model.accuracy, model.xent], feed_dict=adv_dict)
IG_re = sess.run(re_term, feed_dict={model.input: x_batch, model.adv_input: x_batch_adv, model.label: y_batch})
print('Step {}: ({})'.format(ii, datetime.now()), flush=True)
print(' training nat accuracy {:.4}%, loss {:.4}'.format(nat_acc * 100,nat_loss), flush=True)
print(' training adv accuracy {:.4}%, loss {:.4}'.format(adv_acc * 100,adv_loss), flush=True)
print(' training IG term {:.4}'.format(IG_re), flush=True)
if ii != 0:
print(' {} examples per second'.format(
num_output_steps * batch_size / training_time), flush=True)
training_time = 0.0
# Tensorboard summaries
if ii % num_summary_steps == 0:
summary = sess.run(merged_summaries, feed_dict=adv_dict)
summary_writer.add_summary(summary, global_step.eval(sess))
# Write a checkpoint
if ii % num_checkpoint_steps == 0:
saver.save(sess,
os.path.join(model_dir, 'checkpoint'),
global_step=global_step)
# Actual training step
start = timer()
sess.run(train_step, feed_dict={model.input: x_batch, model.adv_input: x_batch_adv, model.label: y_batch})
end = timer()
training_time += end - start | 0.82425 | 0.332161 |
import re
import abc
import asyncio
import logging
import websockets
from .enums import ReplyCode, EventType
from .errors import *
from .events import Event
from .utils import insort, LineBuffer, SortedHandler, IRCPrefix
__all__ = ("Server", "DefaultServer")
log = logging.getLogger("airc.server")
_cap_subcommands = set('LS LIST REQ ACK NAK CLEAR END'.split())
_client_subcommands = set(_cap_subcommands) - {'NAK'}
_rfc_pattern = r"^(@(?P<tags>[^ ]*) )?(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?"
_regexp_rfc = re.compile(_rfc_pattern)
# TODO: Move some of this to utils
def _handle_tags(tags):
if tags is None:
return {}
tags = tags.lstrip("@")
raw_tags = tags.split(";")
tags = {}
for raw_tag in raw_tags:
name, val = raw_tag.split("=")
if val == "":
val = None
tags[name] = val
return tags
def _handle_args(args):
args = args.lstrip()
out_args = []
rest = False
tmp = ""
for char in args:
if rest:
tmp += char
elif char == " ":
out_args.append(tmp)
tmp = ""
elif char == ":" and tmp == "":
rest = True
else:
tmp += char
if tmp:
out_args.append(tmp)
return out_args
def _handle_command(command):
if not command.isnumeric():
return EventType.PROTOCOL, command
try:
com = int(command)
code = ReplyCode(com)
if 0 <= com <= 399:
type = EventType.REPLY
elif 400 <= com <= 599:
type = EventType.ERROR
else:
type = EventType.UNKNOWN
except ValueError:
return EventType.UNKNOWN, command
return type, code.name
def _handle_prefix(prefix):
if prefix is None:
return None
return IRCPrefix(prefix)
class Server:
"""
Generic IRC connection. Subclassed by specific kinds of servers.
"""
__slots__ = ("loop", "master", "handlers", "socket", "connected", "_uri")
def __init__(self, uri, master=None, *, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.master = master
self._uri = uri
self.handlers = {}
self.socket = None
self.connected = False
def add_global_handler(self, event, handler, priority=0):
if self.master:
self.master.add_global_handler(event, handler, priority)
def remove_global_handler(self, event, handler):
if self.master:
self.master.remove_global_handler(event, handler)
def add_handler(self, event, handler, priority=0):
handler = SortedHandler(handler, priority)
li = self.handlers.setdefault(event, [])
insort(li, handler)
def remove_handler(self, event, handler):
handlers = self.handlers.get(event, [])
for h in handlers:
if h.handler == handler:
handlers.remove(h)
break
async def _dispatch(self, event):
if self.master:
self.loop.create_task(self.master._dispatch(event))
for handler in self.handlers.get("all_events", ()):
try:
await handler(event)
except Exception as e:
raise HandlerError(e)
for handler in self.handlers.get(event.command, ()):
try:
await handler(event)
except Exception as e:
raise HandlerError(e)
@abc.abstractmethod
async def connect(self, name, password=""):
raise NotImplementedError
@abc.abstractmethod
async def disconnect(self):
raise NotImplementedError
@abc.abstractmethod
async def process_data(self):
raise NotImplementedError
@abc.abstractmethod
async def send_raw(self, data):
raise NotImplementedError
class DefaultServer(Server):
"""
The default server is a 'standard' IRC server, complying with the
IRC specification
"""
__slots__ = ("buffer", "_uri", "username", "password")
def __init__(self, uri, master=None, *, loop=None):
super().__init__(uri, master, loop=loop)
self.buffer = LineBuffer()
self.username = None
self.password = None
async def connect(self, username, password=""):
self.username = username
self.password = password
self.socket = await websockets.connect(self._uri, ssl=None)
self.connected = True
if self.password:
await self.pass_(self.password)
await self.nick(self.username)
await self.user(self.username, self.username)
async def disconnect(self):
await self.socket.close()
self.socket = None
self.connected = False
# Methods for receiving data
async def process_data(self):
try:
data = await self.socket.recv()
except websockets.ConnectionClosed:
await self.disconnect()
raise
if isinstance(data, str):
data = bytes(data, 'utf-8')
if chr(data[-1]) != "\n":
data += b'\n'
self.buffer.feed(data)
for line in self.buffer:
if not line:
continue
await self._process_line(line)
return self
async def _process_line(self, line):
event = Event(self, EventType.CLIENT, "all_raw_events", [None, line])
await self._dispatch(event)
match = _regexp_rfc.match(line)
type, command = _handle_command(match.group('command'))
command = command.lower()
args = _handle_args(match.group('argument'))
tags = _handle_tags(match.group('tags'))
prefix = _handle_prefix(match.group('prefix'))
# Dispatch the actual specific event
event = Event(self, type, command, args, prefix, tags)
log.debug(event)
await self._dispatch(event)
# Methods for sending data
async def send_raw(self, data):
await self.socket.send(data)
async def send_items(self, *items):
await self.send_raw(' '.join(filter(None, items)))
# Handlers to send individual commands
# Server management
async def pass_(self, password):
await self.send_items("PASS", password)
async def nick(self, nick):
await self.send_items("NICK", nick)
async def user(self, user, realname, mode=None):
if mode is None:
mode = "0"
await self.send_items("USER", user, mode, "*", f":{realname}")
async def oper(self, name, password):
await self.send_items("OPER", name, password)
async def mode(self, nick, mode, param=None):
await self.send_items("MODE", nick, mode, param)
async def service(self, nick, distribution, type, info):
await self.send_items("SERVICE", nick, "*", distribution, type, "*", f":{info}")
async def quit(self, message=None):
if message is not None:
message = f":{message}"
await self.send_items("QUIT", message)
await self.disconnect()
async def squit(self, server, comment=None):
if comment is not None:
comment = f":{comment}"
await self.send_items("SQUIT", server, comment)
# Channel management
async def join(self, channel, key=None):
if isinstance(channel, list):
if key is not None and not isinstance(key, list):
raise TypeError("List of channels must use list of keys, if keys are provided")
channel = ",".join(channel)
if key is not None:
key = ",".join(key)
await self.send_items("JOIN", channel, key)
async def part(self, channel, message=None):
if isinstance(channel, list):
channel = ",".join(channel)
if message is not None:
message = f":{message}"
await self.send_items("PART", channel, message)
async def topic(self, channel, topic=None):
if topic is not None:
topic = f":{topic}"
await self.send_items("TOPIC", channel, topic)
async def names(self, channel=None, target=None):
if isinstance(channel, list):
channel = ",".join(channel)
await self.send_items("NAMES", channel, target)
async def list(self, channel=None, target=None):
if isinstance(channel, list):
channel = ",".join(channel)
await self.send_items("LIST", channel, target)
async def invite(self, nick, channel):
await self.send_items("INVITE", nick, channel)
async def kick(self, channel, user, comment=None):
if isinstance(channel, list):
channel = ",".join(channel)
if isinstance(user, list):
user = ",".join(user)
if comment is not None:
comment = f":{comment}"
await self.send_items("KICK", channel, user, comment)
# Sending messages
async def privmsg(self, target, text):
text = f":{text}"
await self.send_items("PRIVMSG", target, text)
async def notice(self, target, text):
text = f":{text}"
await self.send_items("NOTICE", target, text)
# Server queries
async def motd(self, target=None):
await self.send_items("MOTD", target)
async def lusers(self, mask=None, target=None):
await self.send_items("LUSERS", mask, target)
async def version(self, target=None):
await self.send_items("VERSION", target)
async def stats(self, query=None, target=None):
await self.send_items("STATS", query, target)
async def links(self, mask=None, remote=None):
await self.send_items("LINKS", remote, mask)
async def time(self, target=None):
await self.send_items("TIME", target)
async def connect_(self, target, port, remote=None):
await self.send_items("CONNECT", target, port, remote)
async def trace(self, target=None):
await self.send_items("TRACE", target)
async def admin(self, target=None):
await self.send_items("ADMIN", target)
async def info(self, target=None):
await self.send_items("INFO", target)
# Service queries
async def servlist(self, mask=None, type=None):
await self.send_items("SERVLIST", mask, type)
async def squery(self, name, text):
text = f":{text}"
await self.send_items(name, text)
# User queries
async def who(self, mask=None, ops_only=False):
if mask is None:
mask = "0"
if ops_only is True:
mask += " o"
await self.send_items("WHO", mask)
async def whois(self, mask, target=None):
if isinstance(mask, list):
mask = ",".join(mask)
await self.send_items("WHOIS", target, mask)
async def whowas(self, nick, count=None, target=None):
if isinstance(nick, list):
nick = ",".join(nick)
await self.send_items("WHOWAS", nick, count, target)
# Miscellaneous messages
async def kill(self, nick, comment):
comment = f":{comment}"
await self.send_items("KILL", nick, comment)
async def ping(self, serv1, serv2=None):
await self.send_items("PING", serv1, serv2)
async def pong(self, serv1, serv2=None):
await self.send_items("PONG", serv1, serv2)
async def error(self, message):
message = f":{message}"
await self.send_items("ERROR", message)
# Optional messages bellow
async def away(self, message=None):
if message is not None:
message = f":{message}"
await self.send_items("AWAY", message)
async def rehash(self):
await self.send_items("REHASH")
async def die(self):
await self.send_items("DIE")
async def restart(self):
await self.send_items("RESTART")
async def summon(self, user, target=None, channel=None):
await self.send_items("SUMMON", user, target, channel)
async def users(self, target=None):
await self.send_items("USERS", target)
async def wallops(self, message=None):
if message is not None:
message = f":{message}"
await self.send_items("WALLOPS", message)
async def userhost(self, nick):
if isinstance(nick, list):
if len(nick) > 5:
raise AttributeError("Userhost command can only get up to 5 users at once")
nick = " ".join(nick)
await self.send_items("USERHOST", nick)
async def ison(self, nick):
if isinstance(nick, list):
nick = " ".join(nick)
await self.send_items("ISON", nick)
# IRC v3 addons
async def cap(self, subcom, args):
if subcom not in _cap_subcommands:
raise AttributeError
if isinstance(args, list):
args = " ".join(args)
args = f":{args}"
await self.send_items("CAP", subcom, args) | airc/server.py | import re
import abc
import asyncio
import logging
import websockets
from .enums import ReplyCode, EventType
from .errors import *
from .events import Event
from .utils import insort, LineBuffer, SortedHandler, IRCPrefix
__all__ = ("Server", "DefaultServer")
log = logging.getLogger("airc.server")
_cap_subcommands = set('LS LIST REQ ACK NAK CLEAR END'.split())
_client_subcommands = set(_cap_subcommands) - {'NAK'}
_rfc_pattern = r"^(@(?P<tags>[^ ]*) )?(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?"
_regexp_rfc = re.compile(_rfc_pattern)
# TODO: Move some of this to utils
def _handle_tags(tags):
if tags is None:
return {}
tags = tags.lstrip("@")
raw_tags = tags.split(";")
tags = {}
for raw_tag in raw_tags:
name, val = raw_tag.split("=")
if val == "":
val = None
tags[name] = val
return tags
def _handle_args(args):
args = args.lstrip()
out_args = []
rest = False
tmp = ""
for char in args:
if rest:
tmp += char
elif char == " ":
out_args.append(tmp)
tmp = ""
elif char == ":" and tmp == "":
rest = True
else:
tmp += char
if tmp:
out_args.append(tmp)
return out_args
def _handle_command(command):
if not command.isnumeric():
return EventType.PROTOCOL, command
try:
com = int(command)
code = ReplyCode(com)
if 0 <= com <= 399:
type = EventType.REPLY
elif 400 <= com <= 599:
type = EventType.ERROR
else:
type = EventType.UNKNOWN
except ValueError:
return EventType.UNKNOWN, command
return type, code.name
def _handle_prefix(prefix):
if prefix is None:
return None
return IRCPrefix(prefix)
class Server:
"""
Generic IRC connection. Subclassed by specific kinds of servers.
"""
__slots__ = ("loop", "master", "handlers", "socket", "connected", "_uri")
def __init__(self, uri, master=None, *, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.master = master
self._uri = uri
self.handlers = {}
self.socket = None
self.connected = False
def add_global_handler(self, event, handler, priority=0):
if self.master:
self.master.add_global_handler(event, handler, priority)
def remove_global_handler(self, event, handler):
if self.master:
self.master.remove_global_handler(event, handler)
def add_handler(self, event, handler, priority=0):
handler = SortedHandler(handler, priority)
li = self.handlers.setdefault(event, [])
insort(li, handler)
def remove_handler(self, event, handler):
handlers = self.handlers.get(event, [])
for h in handlers:
if h.handler == handler:
handlers.remove(h)
break
async def _dispatch(self, event):
if self.master:
self.loop.create_task(self.master._dispatch(event))
for handler in self.handlers.get("all_events", ()):
try:
await handler(event)
except Exception as e:
raise HandlerError(e)
for handler in self.handlers.get(event.command, ()):
try:
await handler(event)
except Exception as e:
raise HandlerError(e)
@abc.abstractmethod
async def connect(self, name, password=""):
raise NotImplementedError
@abc.abstractmethod
async def disconnect(self):
raise NotImplementedError
@abc.abstractmethod
async def process_data(self):
raise NotImplementedError
@abc.abstractmethod
async def send_raw(self, data):
raise NotImplementedError
class DefaultServer(Server):
"""
The default server is a 'standard' IRC server, complying with the
IRC specification
"""
__slots__ = ("buffer", "_uri", "username", "password")
def __init__(self, uri, master=None, *, loop=None):
super().__init__(uri, master, loop=loop)
self.buffer = LineBuffer()
self.username = None
self.password = None
async def connect(self, username, password=""):
self.username = username
self.password = password
self.socket = await websockets.connect(self._uri, ssl=None)
self.connected = True
if self.password:
await self.pass_(self.password)
await self.nick(self.username)
await self.user(self.username, self.username)
async def disconnect(self):
await self.socket.close()
self.socket = None
self.connected = False
# Methods for receiving data
async def process_data(self):
try:
data = await self.socket.recv()
except websockets.ConnectionClosed:
await self.disconnect()
raise
if isinstance(data, str):
data = bytes(data, 'utf-8')
if chr(data[-1]) != "\n":
data += b'\n'
self.buffer.feed(data)
for line in self.buffer:
if not line:
continue
await self._process_line(line)
return self
async def _process_line(self, line):
event = Event(self, EventType.CLIENT, "all_raw_events", [None, line])
await self._dispatch(event)
match = _regexp_rfc.match(line)
type, command = _handle_command(match.group('command'))
command = command.lower()
args = _handle_args(match.group('argument'))
tags = _handle_tags(match.group('tags'))
prefix = _handle_prefix(match.group('prefix'))
# Dispatch the actual specific event
event = Event(self, type, command, args, prefix, tags)
log.debug(event)
await self._dispatch(event)
# Methods for sending data
async def send_raw(self, data):
await self.socket.send(data)
async def send_items(self, *items):
await self.send_raw(' '.join(filter(None, items)))
# Handlers to send individual commands
# Server management
async def pass_(self, password):
await self.send_items("PASS", password)
async def nick(self, nick):
await self.send_items("NICK", nick)
async def user(self, user, realname, mode=None):
if mode is None:
mode = "0"
await self.send_items("USER", user, mode, "*", f":{realname}")
async def oper(self, name, password):
await self.send_items("OPER", name, password)
async def mode(self, nick, mode, param=None):
await self.send_items("MODE", nick, mode, param)
async def service(self, nick, distribution, type, info):
await self.send_items("SERVICE", nick, "*", distribution, type, "*", f":{info}")
async def quit(self, message=None):
if message is not None:
message = f":{message}"
await self.send_items("QUIT", message)
await self.disconnect()
async def squit(self, server, comment=None):
if comment is not None:
comment = f":{comment}"
await self.send_items("SQUIT", server, comment)
# Channel management
async def join(self, channel, key=None):
if isinstance(channel, list):
if key is not None and not isinstance(key, list):
raise TypeError("List of channels must use list of keys, if keys are provided")
channel = ",".join(channel)
if key is not None:
key = ",".join(key)
await self.send_items("JOIN", channel, key)
async def part(self, channel, message=None):
if isinstance(channel, list):
channel = ",".join(channel)
if message is not None:
message = f":{message}"
await self.send_items("PART", channel, message)
async def topic(self, channel, topic=None):
if topic is not None:
topic = f":{topic}"
await self.send_items("TOPIC", channel, topic)
async def names(self, channel=None, target=None):
if isinstance(channel, list):
channel = ",".join(channel)
await self.send_items("NAMES", channel, target)
async def list(self, channel=None, target=None):
if isinstance(channel, list):
channel = ",".join(channel)
await self.send_items("LIST", channel, target)
async def invite(self, nick, channel):
await self.send_items("INVITE", nick, channel)
async def kick(self, channel, user, comment=None):
if isinstance(channel, list):
channel = ",".join(channel)
if isinstance(user, list):
user = ",".join(user)
if comment is not None:
comment = f":{comment}"
await self.send_items("KICK", channel, user, comment)
# Sending messages
async def privmsg(self, target, text):
text = f":{text}"
await self.send_items("PRIVMSG", target, text)
async def notice(self, target, text):
text = f":{text}"
await self.send_items("NOTICE", target, text)
# Server queries
async def motd(self, target=None):
await self.send_items("MOTD", target)
async def lusers(self, mask=None, target=None):
await self.send_items("LUSERS", mask, target)
async def version(self, target=None):
await self.send_items("VERSION", target)
async def stats(self, query=None, target=None):
await self.send_items("STATS", query, target)
async def links(self, mask=None, remote=None):
await self.send_items("LINKS", remote, mask)
async def time(self, target=None):
await self.send_items("TIME", target)
async def connect_(self, target, port, remote=None):
await self.send_items("CONNECT", target, port, remote)
async def trace(self, target=None):
await self.send_items("TRACE", target)
async def admin(self, target=None):
await self.send_items("ADMIN", target)
async def info(self, target=None):
await self.send_items("INFO", target)
# Service queries
async def servlist(self, mask=None, type=None):
await self.send_items("SERVLIST", mask, type)
async def squery(self, name, text):
text = f":{text}"
await self.send_items(name, text)
# User queries
async def who(self, mask=None, ops_only=False):
if mask is None:
mask = "0"
if ops_only is True:
mask += " o"
await self.send_items("WHO", mask)
async def whois(self, mask, target=None):
if isinstance(mask, list):
mask = ",".join(mask)
await self.send_items("WHOIS", target, mask)
async def whowas(self, nick, count=None, target=None):
if isinstance(nick, list):
nick = ",".join(nick)
await self.send_items("WHOWAS", nick, count, target)
# Miscellaneous messages
async def kill(self, nick, comment):
comment = f":{comment}"
await self.send_items("KILL", nick, comment)
async def ping(self, serv1, serv2=None):
await self.send_items("PING", serv1, serv2)
async def pong(self, serv1, serv2=None):
await self.send_items("PONG", serv1, serv2)
async def error(self, message):
message = f":{message}"
await self.send_items("ERROR", message)
# Optional messages bellow
async def away(self, message=None):
if message is not None:
message = f":{message}"
await self.send_items("AWAY", message)
async def rehash(self):
await self.send_items("REHASH")
async def die(self):
await self.send_items("DIE")
async def restart(self):
await self.send_items("RESTART")
async def summon(self, user, target=None, channel=None):
await self.send_items("SUMMON", user, target, channel)
async def users(self, target=None):
await self.send_items("USERS", target)
async def wallops(self, message=None):
if message is not None:
message = f":{message}"
await self.send_items("WALLOPS", message)
async def userhost(self, nick):
if isinstance(nick, list):
if len(nick) > 5:
raise AttributeError("Userhost command can only get up to 5 users at once")
nick = " ".join(nick)
await self.send_items("USERHOST", nick)
async def ison(self, nick):
if isinstance(nick, list):
nick = " ".join(nick)
await self.send_items("ISON", nick)
# IRC v3 addons
async def cap(self, subcom, args):
if subcom not in _cap_subcommands:
raise AttributeError
if isinstance(args, list):
args = " ".join(args)
args = f":{args}"
await self.send_items("CAP", subcom, args) | 0.283881 | 0.101322 |
import unittest
import collections
from runtime import env, ast, lib
INT_VALUE = env.Value(lib.INTEGER, 1, "x")
FLOAT_VALUE = env.Value(lib.FLOAT, 1.0, "y")
STRING_VALUE = env.Value(lib.STRING, "Hello", "identifier")
LIST_VALUE = env.Value(lib.LIST, ["H", "e", "l", "l", "o"])
SET_VALUE = env.Value(lib.SET, set(LIST_VALUE.data))
BOOL_VALUE = env.Value(lib.BOOLEAN, True, "b")
TRUE_STRING_VALUE = env.Value(lib.STRING, "true")
MISSING_INT_VALUE = env.Value(lib.INTEGER, 0, "missing")
NULL_VALUE = env.Value(env.NULL)
USELESS_OPERATOR = env.Operator(None, "+")
ANOTHER_USELESS_OPERATOR = env.Operator(None, "?")
USELESS_FUNCTION = env.Value(lib.FUNCTION, None)
OBJECT_VALUE = env.Value(lib.OBJECT, None)
LIBRARY = collections.namedtuple("Library", "EXPORTS")
class TestEnv(unittest.TestCase):
"""Test cases for the runtime environment."""
def test_namespace(self):
"""Test the Namespace class."""
namespace = env.Namespace(None)
self.assertEqual(namespace.parent, None)
self.assertRaises(env.NamespaceException, namespace.find, "id", "identifier")
self.assertRaises(env.NamespaceException, namespace.find, "op", "operator")
# store example operator
namespace.store(STRING_VALUE)
namespace.store(USELESS_OPERATOR)
self.assertEqual(namespace.find("id", STRING_VALUE.name), STRING_VALUE)
self.assertEqual(
namespace.find("op", USELESS_OPERATOR.symbol), USELESS_OPERATOR)
# test upper namspace
sub = namespace.child()
self.assertEqual(namespace.find("id", STRING_VALUE.name), STRING_VALUE)
self.assertEqual(
namespace.find("op", USELESS_OPERATOR.symbol), USELESS_OPERATOR)
# check independence
sub.store(MISSING_INT_VALUE)
sub.store(ANOTHER_USELESS_OPERATOR)
self.assertRaises(env.NamespaceException, namespace.find,
"id", MISSING_INT_VALUE.name)
self.assertRaises(env.NamespaceException, namespace.find, "op",
ANOTHER_USELESS_OPERATOR.symbol)
def test_datatype(self):
"""Test the Datatype class."""
self.assertTrue(lib.INTEGER.kind_of(lib.NUMBER))
self.assertTrue(lib.FLOAT.kind_of(lib.NUMBER))
self.assertTrue(lib.INTEGER.kind_of(env.ANY))
def test_context(self):
"""Test the Context class."""
namespace = env.Namespace(None)
context = env.Context(namespace)
context.store(INT_VALUE)
self.assertEqual(context.find("id", INT_VALUE.name), INT_VALUE)
self.assertRaises(env.NamespaceException, context.find, "id", STRING_VALUE.name)
custom_library = LIBRARY(EXPORTS=[STRING_VALUE])
context.load(custom_library)
self.assertEqual(context.find("id", STRING_VALUE.name), STRING_VALUE)
self.assertIs(context.substitute(), namespace)
self.assertIsNot(context.namespace, namespace)
def test_value(self):
"""Test the Value class."""
self.assertNotEqual(INT_VALUE, FLOAT_VALUE)
self.assertEqual(INT_VALUE, INT_VALUE)
self.assertEqual(str(NULL_VALUE), "<Value ? <T null> *(None)>")
def test_signature(self):
"""Test the signature class."""
expected_values = [
env.Value(lib.NUMBER, None, "x"),
env.Value(lib.NUMBER, None, "delta"),
env.Value(lib.FLOAT, -1.0, "phi"),
]
sign = env.Signature(expected_values, "works!")
# Case 1: Too many arguments
first_case = [
env.Value(lib.INTEGER, 3),
env.Value(lib.FLOAT, 3.0),
env.Value(lib.FLOAT, -3.0),
env.Value(lib.INTEGER, 3.0),
]
self.assertRaises(env.ArgumentException, sign.match, first_case)
# Case 2: Too less arguments
second_case = [
env.Value(lib.INTEGER, 3),
]
self.assertRaises(env.ArgumentException, sign.match, second_case)
# Case 3: Fitting arguments
third_case = [
env.Value(lib.INTEGER, 3),
env.Value(lib.INTEGER, 0),
env.Value(lib.FLOAT, 0.0),
]
third_case_result = [
env.Value(lib.INTEGER, 3, "x"),
env.Value(lib.INTEGER, 0, "delta"),
env.Value(lib.FLOAT, 0.0, "phi"),
]
self.assertEqual(sign.match(third_case), (third_case_result, "works!"))
# Case 4: default values
fourth_case = [
env.Value(lib.INTEGER, 3),
env.Value(lib.INTEGER, 0),
]
fourth_case_result = [
env.Value(lib.INTEGER, 3, "x"),
env.Value(lib.INTEGER, 0, "delta"),
env.Value(lib.FLOAT, -1.0, "phi"),
]
self.assertEqual(sign.match(fourth_case),
(fourth_case_result, "works!"))
def test_function(self):
"""Test the function class."""
context = env.empty_context()
# FUNCTIONtion without signatures
func = env.Function([])
self.assertRaises(env.FunctionException, func.eval, [], context)
# FUNCTIONtion with one signature, perfect match
identifier_literal = ast.Identifier("str")
func = env.Function([
env.Signature([env.Value(lib.STRING, None, "str")],
identifier_literal),
])
args = [
STRING_VALUE,
]
self.assertEqual(func.eval(args, context), STRING_VALUE)
# FUNCTIONtion with one signature, optional argument
func = env.Function([
env.Signature(
[env.Value(lib.STRING, STRING_VALUE.data, "str")], identifier_literal),
])
self.assertEqual(func.eval(args, context), STRING_VALUE)
# FUNCTIONtion with two signatures, second perfect match
func = env.Function([
env.Signature([env.Value(lib.INTEGER, None, "i")], None),
env.Signature([env.Value(lib.STRING, None, "str")],
identifier_literal),
])
self.assertEqual(func.eval(args, context), STRING_VALUE)
# Check function sandboxing
class CustomNode(ast.Node):
"""A custom node."""
name = "custom"
def __init__(self):
super().__init__()
@classmethod
def eval(cls, context):
"""Stores a an INTEGER at x."""
context.store(env.Value(lib.INTEGER, 1, "x"))
return env.Value(env.NULL)
func = env.Function([
env.Signature([], CustomNode()),
])
self.assertRaises(Exception, context.find, "id", "x")
def test_operator(self):
"""Test the operator class."""
context = env.empty_context()
int_literal = ast.Literal(INT_VALUE)
# Test forwarding
func = env.Function([
env.Signature([], int_literal)
])
operator = env.Operator(func, "+")
self.assertEqual(str(operator), "<Operator (+)>")
self.assertEqual(operator.eval([], context), INT_VALUE) | runtime/test_env.py | import unittest
import collections
from runtime import env, ast, lib
INT_VALUE = env.Value(lib.INTEGER, 1, "x")
FLOAT_VALUE = env.Value(lib.FLOAT, 1.0, "y")
STRING_VALUE = env.Value(lib.STRING, "Hello", "identifier")
LIST_VALUE = env.Value(lib.LIST, ["H", "e", "l", "l", "o"])
SET_VALUE = env.Value(lib.SET, set(LIST_VALUE.data))
BOOL_VALUE = env.Value(lib.BOOLEAN, True, "b")
TRUE_STRING_VALUE = env.Value(lib.STRING, "true")
MISSING_INT_VALUE = env.Value(lib.INTEGER, 0, "missing")
NULL_VALUE = env.Value(env.NULL)
USELESS_OPERATOR = env.Operator(None, "+")
ANOTHER_USELESS_OPERATOR = env.Operator(None, "?")
USELESS_FUNCTION = env.Value(lib.FUNCTION, None)
OBJECT_VALUE = env.Value(lib.OBJECT, None)
LIBRARY = collections.namedtuple("Library", "EXPORTS")
class TestEnv(unittest.TestCase):
"""Test cases for the runtime environment."""
def test_namespace(self):
"""Test the Namespace class."""
namespace = env.Namespace(None)
self.assertEqual(namespace.parent, None)
self.assertRaises(env.NamespaceException, namespace.find, "id", "identifier")
self.assertRaises(env.NamespaceException, namespace.find, "op", "operator")
# store example operator
namespace.store(STRING_VALUE)
namespace.store(USELESS_OPERATOR)
self.assertEqual(namespace.find("id", STRING_VALUE.name), STRING_VALUE)
self.assertEqual(
namespace.find("op", USELESS_OPERATOR.symbol), USELESS_OPERATOR)
# test upper namspace
sub = namespace.child()
self.assertEqual(namespace.find("id", STRING_VALUE.name), STRING_VALUE)
self.assertEqual(
namespace.find("op", USELESS_OPERATOR.symbol), USELESS_OPERATOR)
# check independence
sub.store(MISSING_INT_VALUE)
sub.store(ANOTHER_USELESS_OPERATOR)
self.assertRaises(env.NamespaceException, namespace.find,
"id", MISSING_INT_VALUE.name)
self.assertRaises(env.NamespaceException, namespace.find, "op",
ANOTHER_USELESS_OPERATOR.symbol)
def test_datatype(self):
"""Test the Datatype class."""
self.assertTrue(lib.INTEGER.kind_of(lib.NUMBER))
self.assertTrue(lib.FLOAT.kind_of(lib.NUMBER))
self.assertTrue(lib.INTEGER.kind_of(env.ANY))
def test_context(self):
"""Test the Context class."""
namespace = env.Namespace(None)
context = env.Context(namespace)
context.store(INT_VALUE)
self.assertEqual(context.find("id", INT_VALUE.name), INT_VALUE)
self.assertRaises(env.NamespaceException, context.find, "id", STRING_VALUE.name)
custom_library = LIBRARY(EXPORTS=[STRING_VALUE])
context.load(custom_library)
self.assertEqual(context.find("id", STRING_VALUE.name), STRING_VALUE)
self.assertIs(context.substitute(), namespace)
self.assertIsNot(context.namespace, namespace)
def test_value(self):
"""Test the Value class."""
self.assertNotEqual(INT_VALUE, FLOAT_VALUE)
self.assertEqual(INT_VALUE, INT_VALUE)
self.assertEqual(str(NULL_VALUE), "<Value ? <T null> *(None)>")
def test_signature(self):
"""Test the signature class."""
expected_values = [
env.Value(lib.NUMBER, None, "x"),
env.Value(lib.NUMBER, None, "delta"),
env.Value(lib.FLOAT, -1.0, "phi"),
]
sign = env.Signature(expected_values, "works!")
# Case 1: Too many arguments
first_case = [
env.Value(lib.INTEGER, 3),
env.Value(lib.FLOAT, 3.0),
env.Value(lib.FLOAT, -3.0),
env.Value(lib.INTEGER, 3.0),
]
self.assertRaises(env.ArgumentException, sign.match, first_case)
# Case 2: Too less arguments
second_case = [
env.Value(lib.INTEGER, 3),
]
self.assertRaises(env.ArgumentException, sign.match, second_case)
# Case 3: Fitting arguments
third_case = [
env.Value(lib.INTEGER, 3),
env.Value(lib.INTEGER, 0),
env.Value(lib.FLOAT, 0.0),
]
third_case_result = [
env.Value(lib.INTEGER, 3, "x"),
env.Value(lib.INTEGER, 0, "delta"),
env.Value(lib.FLOAT, 0.0, "phi"),
]
self.assertEqual(sign.match(third_case), (third_case_result, "works!"))
# Case 4: default values
fourth_case = [
env.Value(lib.INTEGER, 3),
env.Value(lib.INTEGER, 0),
]
fourth_case_result = [
env.Value(lib.INTEGER, 3, "x"),
env.Value(lib.INTEGER, 0, "delta"),
env.Value(lib.FLOAT, -1.0, "phi"),
]
self.assertEqual(sign.match(fourth_case),
(fourth_case_result, "works!"))
def test_function(self):
"""Test the function class."""
context = env.empty_context()
# FUNCTIONtion without signatures
func = env.Function([])
self.assertRaises(env.FunctionException, func.eval, [], context)
# FUNCTIONtion with one signature, perfect match
identifier_literal = ast.Identifier("str")
func = env.Function([
env.Signature([env.Value(lib.STRING, None, "str")],
identifier_literal),
])
args = [
STRING_VALUE,
]
self.assertEqual(func.eval(args, context), STRING_VALUE)
# FUNCTIONtion with one signature, optional argument
func = env.Function([
env.Signature(
[env.Value(lib.STRING, STRING_VALUE.data, "str")], identifier_literal),
])
self.assertEqual(func.eval(args, context), STRING_VALUE)
# FUNCTIONtion with two signatures, second perfect match
func = env.Function([
env.Signature([env.Value(lib.INTEGER, None, "i")], None),
env.Signature([env.Value(lib.STRING, None, "str")],
identifier_literal),
])
self.assertEqual(func.eval(args, context), STRING_VALUE)
# Check function sandboxing
class CustomNode(ast.Node):
"""A custom node."""
name = "custom"
def __init__(self):
super().__init__()
@classmethod
def eval(cls, context):
"""Stores a an INTEGER at x."""
context.store(env.Value(lib.INTEGER, 1, "x"))
return env.Value(env.NULL)
func = env.Function([
env.Signature([], CustomNode()),
])
self.assertRaises(Exception, context.find, "id", "x")
def test_operator(self):
"""Test the operator class."""
context = env.empty_context()
int_literal = ast.Literal(INT_VALUE)
# Test forwarding
func = env.Function([
env.Signature([], int_literal)
])
operator = env.Operator(func, "+")
self.assertEqual(str(operator), "<Operator (+)>")
self.assertEqual(operator.eval([], context), INT_VALUE) | 0.691185 | 0.393269 |
def solve_part_1(inputlist : list) -> int:
#Current position of the program counter
pc = 0
while (pc < len(inputlist) and pc != -1):
pc = execute_opcode(inputlist, pc)
return inputlist[0]
def solve_part_2(inputlist: list) -> int:
desired_output = 19690720
for noun in range(100):
for verb in range(100):
newlist = inputlist.copy()
newlist[1] = noun
newlist[2] = verb
output = solve_part_1(newlist)
if output == desired_output:
return 100*noun + verb
def execute_opcode(inputlist: list, position: int) -> int:
"Executes the OPCode on position p"
op = inputlist[position]
print(op)
parameters = []
if len(op) >2 :
# Parameter mode 1
# We have to make a distincion because of leading zeros!
mode = "1"
get_parameter = {}
get_parameter[1] =lambda p: int(inputlist[p])
get_parameter[0] = lambda p : int(inputlist[int(inputlist[p])])
input_parameters = inputlist[position][:-2]
op = int(inputlist[position][-2:])
# Extract the parameters
# We first need to add leading zeroes
# We expect a length of 3 for if opcode 1,2 and length 1 for opcode 3,4
if op in (1,2,7,8):
# Expect length 3
input_parameters = input_parameters.zfill(3)
if op in (3,4):
# Expect length 1
input_parameters = input_parameters.zfill(1)
if op in (5,6):
# Expect lenght 2
input_parameters = input_parameters.zfill(2)
input_parameters = input_parameters[::-1]
for i in range(len(input_parameters)):
parameters.append(get_parameter[int(input_parameters[i])](position+i+1))
else:
# Parameter mode 0
mode = "0"
get_parameter = lambda p : int(inputlist[int(inputlist[p])])
op = int(inputlist[position])
if op in (1,2,7,8):
parameters.append(get_parameter(position+1))
parameters.append(get_parameter(position+2))
parameters.append(get_parameter(position+3))
if op in (3,4):
parameters.append(get_parameter(position+1))
if op in (5,6):
parameters.append(get_parameter(position+1))
parameters.append(get_parameter(position+2))
# Fill a list with parameters
def set_result(p, value):
if p <1:
raise Exception("Trying to write to wrong field")
if mode == "0":
inputlist[int(inputlist[position + p])] = str(value)
return
if int(input_parameters[p-1]) == 0:
inputlist[int(inputlist[position + p])] = str(value)
if int(input_parameters[p-1]) == 1:
inputlist[position + p] = str(value)
# Here we execute the actual operation
if(op == 1):
# Addition
left, right, goal = parameters
result = left + right
set_result(3,result)
return position + 4
if(op == 2):
# Multiplikation
left,right,goal = parameters
result = left * right
set_result(3,result)
return position + 4
if(op == 3):
# Save input
set_result(1, input_p)
return position + 2
if(op == 4):
# Print output
parameter = parameters[0]
print(f"Output: {parameter}")
return position + 2
if(op == 5):
# Jump if true
parameter = parameters[0]
if parameter != 0:
return parameters[1]
else:
return position + 3
if(op == 6):
# Jump if false
parameter = parameters[0]
if parameter == 0:
return parameters[1]
else:
return position + 3
if(op == 7):
# Less than
left, right, goal = parameters
result = (0,1 ) [left < right]
set_result(3, result)
return position + 4
if(op == 8):
# equals
left, right, goal = parameters
result = (0,1 ) [left == right]
set_result(3, result)
return position + 4
if(op==99):
#Program End
return -1
raise Exception("OP Code not recognized")
if __name__ == "__main__":
f = open("input.txt",'r')
positions = f.read().split(',')
global input_p
input_p = 5
solution = solve_part_1(positions)
print(solution) | door_05/solution.py | def solve_part_1(inputlist : list) -> int:
#Current position of the program counter
pc = 0
while (pc < len(inputlist) and pc != -1):
pc = execute_opcode(inputlist, pc)
return inputlist[0]
def solve_part_2(inputlist: list) -> int:
desired_output = 19690720
for noun in range(100):
for verb in range(100):
newlist = inputlist.copy()
newlist[1] = noun
newlist[2] = verb
output = solve_part_1(newlist)
if output == desired_output:
return 100*noun + verb
def execute_opcode(inputlist: list, position: int) -> int:
"Executes the OPCode on position p"
op = inputlist[position]
print(op)
parameters = []
if len(op) >2 :
# Parameter mode 1
# We have to make a distincion because of leading zeros!
mode = "1"
get_parameter = {}
get_parameter[1] =lambda p: int(inputlist[p])
get_parameter[0] = lambda p : int(inputlist[int(inputlist[p])])
input_parameters = inputlist[position][:-2]
op = int(inputlist[position][-2:])
# Extract the parameters
# We first need to add leading zeroes
# We expect a length of 3 for if opcode 1,2 and length 1 for opcode 3,4
if op in (1,2,7,8):
# Expect length 3
input_parameters = input_parameters.zfill(3)
if op in (3,4):
# Expect length 1
input_parameters = input_parameters.zfill(1)
if op in (5,6):
# Expect lenght 2
input_parameters = input_parameters.zfill(2)
input_parameters = input_parameters[::-1]
for i in range(len(input_parameters)):
parameters.append(get_parameter[int(input_parameters[i])](position+i+1))
else:
# Parameter mode 0
mode = "0"
get_parameter = lambda p : int(inputlist[int(inputlist[p])])
op = int(inputlist[position])
if op in (1,2,7,8):
parameters.append(get_parameter(position+1))
parameters.append(get_parameter(position+2))
parameters.append(get_parameter(position+3))
if op in (3,4):
parameters.append(get_parameter(position+1))
if op in (5,6):
parameters.append(get_parameter(position+1))
parameters.append(get_parameter(position+2))
# Fill a list with parameters
def set_result(p, value):
if p <1:
raise Exception("Trying to write to wrong field")
if mode == "0":
inputlist[int(inputlist[position + p])] = str(value)
return
if int(input_parameters[p-1]) == 0:
inputlist[int(inputlist[position + p])] = str(value)
if int(input_parameters[p-1]) == 1:
inputlist[position + p] = str(value)
# Here we execute the actual operation
if(op == 1):
# Addition
left, right, goal = parameters
result = left + right
set_result(3,result)
return position + 4
if(op == 2):
# Multiplikation
left,right,goal = parameters
result = left * right
set_result(3,result)
return position + 4
if(op == 3):
# Save input
set_result(1, input_p)
return position + 2
if(op == 4):
# Print output
parameter = parameters[0]
print(f"Output: {parameter}")
return position + 2
if(op == 5):
# Jump if true
parameter = parameters[0]
if parameter != 0:
return parameters[1]
else:
return position + 3
if(op == 6):
# Jump if false
parameter = parameters[0]
if parameter == 0:
return parameters[1]
else:
return position + 3
if(op == 7):
# Less than
left, right, goal = parameters
result = (0,1 ) [left < right]
set_result(3, result)
return position + 4
if(op == 8):
# equals
left, right, goal = parameters
result = (0,1 ) [left == right]
set_result(3, result)
return position + 4
if(op==99):
#Program End
return -1
raise Exception("OP Code not recognized")
if __name__ == "__main__":
f = open("input.txt",'r')
positions = f.read().split(',')
global input_p
input_p = 5
solution = solve_part_1(positions)
print(solution) | 0.373876 | 0.508849 |
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import fsm_state
expected_verilog = """
module test;
reg CLK;
reg RST;
wire valid;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.valid(valid)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#1000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg valid
);
reg [8-1:0] counter;
reg [32-1:0] fsm;
localparam fsm_init = 0;
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
valid <= 0;
counter <= 0;
end else begin
if(counter <= 255) begin
counter <= counter + 1;
end else begin
counter <= 0;
end
case(fsm)
fsm_init: begin
if(counter == 10) begin
valid <= 0;
end else begin
valid <= 1;
end
if(counter == 40) begin
valid <= 0;
end else begin
valid <= 1;
end
if(valid) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(counter == 20) begin
valid <= 0;
end else begin
valid <= 1;
end
if(valid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
if(counter == 30) begin
valid <= 0;
end else begin
valid <= 1;
end
if(counter[0] == 0) begin
fsm <= fsm_3;
end
if(!(counter[0] == 0) && (counter[1] == 1)) begin
fsm <= fsm_1;
end
if(!(counter[0] == 0) && !(counter[1] == 1)) begin
fsm <= fsm_2;
end
end
fsm_3: begin
fsm <= fsm_init;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = fsm_state.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code) | tests/extension/fsm_/state/test_fsm_state.py | from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import fsm_state
expected_verilog = """
module test;
reg CLK;
reg RST;
wire valid;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.valid(valid)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#1000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg valid
);
reg [8-1:0] counter;
reg [32-1:0] fsm;
localparam fsm_init = 0;
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
valid <= 0;
counter <= 0;
end else begin
if(counter <= 255) begin
counter <= counter + 1;
end else begin
counter <= 0;
end
case(fsm)
fsm_init: begin
if(counter == 10) begin
valid <= 0;
end else begin
valid <= 1;
end
if(counter == 40) begin
valid <= 0;
end else begin
valid <= 1;
end
if(valid) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(counter == 20) begin
valid <= 0;
end else begin
valid <= 1;
end
if(valid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
if(counter == 30) begin
valid <= 0;
end else begin
valid <= 1;
end
if(counter[0] == 0) begin
fsm <= fsm_3;
end
if(!(counter[0] == 0) && (counter[1] == 1)) begin
fsm <= fsm_1;
end
if(!(counter[0] == 0) && !(counter[1] == 1)) begin
fsm <= fsm_2;
end
end
fsm_3: begin
fsm <= fsm_init;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = fsm_state.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code) | 0.344885 | 0.278459 |
from __future__ import print_function
import os
import sys
import threading
import time
from argparse import ArgumentParser
from DBSAPI.dbsApi import DbsApi
from WMCore.WMSpec.StdSpecs.Harvesting import harvestingWorkload, getTestArguments
from WMCore.DataStructs.Run import Run
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMInit import connectToDB
from WMCore.WMSpec.Makers.TaskMaker import TaskMaker
from WMCore.WorkQueue.WMBSHelper import WMBSHelper
def check_list(option, opt, value):
return value.split(",")
def comma_separated_list(string):
return string.split(',')
usage = "usage: %prog [options]"
parser = ArgumentParser(usage=usage)
parser.add_argument("-d", "--dataset", dest="InputDataset",
action="store", help="Dataset to harvest",
metavar="DATASET")
parser.add_argument("-R", "--run", dest="RunWhitelist", type=comma_separated_list,
action="store", help="Comma separated list of runs",
metavar="RUN1,RUN2", default=[])
parser.add_argument("-r", "--release", dest="CMSSWVersion",
action="store", help="CMSSW version to use for harvesting",
metavar="CMSSW_X_Y_Z")
parser.add_argument("-s", "--scenario", dest="Scenario",
action="store", help="Configuration/DataProcessing scenario",
metavar="SCENARIO")
parser.add_argument("-t", "--global-tag", dest="GlobalTag",
action="store", help="Conditions global tag",
metavar="GLOBALTAG")
parser.add_argument("-f", "--reference", dest="RefHistogram",
action="store", help="Reference histogram",
metavar="LFN")
options = parser.parse_args()
missing = []
mandatory = ["InputDataset", "CMSSWVersion", "Scenario", "GlobalTag"]
for option in options.__dict__:
if getattr(options, option) is None and option in mandatory:
missing.append(option)
if missing:
print("Error: The following mandatory options are missing:")
print("\n".join(missing))
sys.exit(1)
# The default arguments are set in:
# WMCORE/src/python/WMCore/WMSpec/StdSpecs/Harvesting.py
arguments = getTestArguments()
arguments.update(options.__dict__)
connectToDB()
req_time = "%.2f" % time.time()
workloadName = "Harvesting%s--%s" % (arguments["InputDataset"].replace("/", "__"), req_time)
workloadFile = "Harvesting%s--%s.pkl" % (arguments["InputDataset"].replace("/", "__"), req_time)
os.mkdir(workloadName)
workload = harvestingWorkload(workloadName, arguments)
workloadPath = os.path.join(workloadName, workloadFile)
workload.setOwner("<EMAIL>")
workload.setSpecUrl(workloadPath)
# Build a sandbox using TaskMaker
taskMaker = TaskMaker(workload, os.path.join(os.getcwd(), workloadName))
taskMaker.skipSubscription = True
taskMaker.processWorkload()
workload.save(workloadPath)
def injectFilesFromDBS(inputFileset, datasetPath, runsWhiteList=[]):
"""
_injectFilesFromDBS_
"""
print("injecting files from %s into %s, please wait..." % (datasetPath, inputFileset.name))
args = {}
args["url"] = "https://cmsweb.cern.ch/dbs/prod/global/DBSReader"
args["version"] = "DBS_2_1_1"
args["mode"] = "GET"
dbsApi = DbsApi(args)
dbsResults = dbsApi.listFileArray(path=datasetPath, retriveList=["retrive_lumi", "retrive_run"])
print(" found %d files, inserting into wmbs..." % (len(dbsResults)))
for dbsResult in dbsResults:
if runsWhiteList and str(dbsResult["LumiList"][0]["RunNumber"]) not in runsWhiteList:
continue
myFile = File(lfn=dbsResult["LogicalFileName"], size=dbsResult["FileSize"],
events=dbsResult["NumberOfEvents"], checksums={"cksum": dbsResult["Checksum"]},
locations="cmssrm.fnal.gov", merged=True)
myRun = Run(runNumber=dbsResult["LumiList"][0]["RunNumber"])
for lumi in dbsResult["LumiList"]:
myRun.appendLumi(lumi["LumiSectionNumber"])
myFile.addRun(myRun)
myFile.create()
inputFileset.addFile(myFile)
if len(inputFileset) < 1:
raise Exception("No files were selected!")
inputFileset.commit()
inputFileset.markOpen(False)
return
myThread = threading.currentThread()
myThread.transaction.begin()
for workloadTask in workload.taskIterator():
inputFileset = Fileset(name=workloadTask.getPathName())
inputFileset.create()
inputDataset = workloadTask.inputDataset()
inputDatasetPath = "/%s/%s/%s" % (inputDataset.primary,
inputDataset.processed,
inputDataset.tier)
injectFilesFromDBS(inputFileset, inputDatasetPath, options.RunWhitelist)
myWMBSHelper = WMBSHelper(workload)
myWMBSHelper._createSubscriptionsInWMBS(workloadTash.getPathName())
myThread.transaction.commit() | etc/harvestingInjector.py | from __future__ import print_function
import os
import sys
import threading
import time
from argparse import ArgumentParser
from DBSAPI.dbsApi import DbsApi
from WMCore.WMSpec.StdSpecs.Harvesting import harvestingWorkload, getTestArguments
from WMCore.DataStructs.Run import Run
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMInit import connectToDB
from WMCore.WMSpec.Makers.TaskMaker import TaskMaker
from WMCore.WorkQueue.WMBSHelper import WMBSHelper
def check_list(option, opt, value):
return value.split(",")
def comma_separated_list(string):
return string.split(',')
usage = "usage: %prog [options]"
parser = ArgumentParser(usage=usage)
parser.add_argument("-d", "--dataset", dest="InputDataset",
action="store", help="Dataset to harvest",
metavar="DATASET")
parser.add_argument("-R", "--run", dest="RunWhitelist", type=comma_separated_list,
action="store", help="Comma separated list of runs",
metavar="RUN1,RUN2", default=[])
parser.add_argument("-r", "--release", dest="CMSSWVersion",
action="store", help="CMSSW version to use for harvesting",
metavar="CMSSW_X_Y_Z")
parser.add_argument("-s", "--scenario", dest="Scenario",
action="store", help="Configuration/DataProcessing scenario",
metavar="SCENARIO")
parser.add_argument("-t", "--global-tag", dest="GlobalTag",
action="store", help="Conditions global tag",
metavar="GLOBALTAG")
parser.add_argument("-f", "--reference", dest="RefHistogram",
action="store", help="Reference histogram",
metavar="LFN")
options = parser.parse_args()
missing = []
mandatory = ["InputDataset", "CMSSWVersion", "Scenario", "GlobalTag"]
for option in options.__dict__:
if getattr(options, option) is None and option in mandatory:
missing.append(option)
if missing:
print("Error: The following mandatory options are missing:")
print("\n".join(missing))
sys.exit(1)
# The default arguments are set in:
# WMCORE/src/python/WMCore/WMSpec/StdSpecs/Harvesting.py
arguments = getTestArguments()
arguments.update(options.__dict__)
connectToDB()
req_time = "%.2f" % time.time()
workloadName = "Harvesting%s--%s" % (arguments["InputDataset"].replace("/", "__"), req_time)
workloadFile = "Harvesting%s--%s.pkl" % (arguments["InputDataset"].replace("/", "__"), req_time)
os.mkdir(workloadName)
workload = harvestingWorkload(workloadName, arguments)
workloadPath = os.path.join(workloadName, workloadFile)
workload.setOwner("<EMAIL>")
workload.setSpecUrl(workloadPath)
# Build a sandbox using TaskMaker
taskMaker = TaskMaker(workload, os.path.join(os.getcwd(), workloadName))
taskMaker.skipSubscription = True
taskMaker.processWorkload()
workload.save(workloadPath)
def injectFilesFromDBS(inputFileset, datasetPath, runsWhiteList=[]):
"""
_injectFilesFromDBS_
"""
print("injecting files from %s into %s, please wait..." % (datasetPath, inputFileset.name))
args = {}
args["url"] = "https://cmsweb.cern.ch/dbs/prod/global/DBSReader"
args["version"] = "DBS_2_1_1"
args["mode"] = "GET"
dbsApi = DbsApi(args)
dbsResults = dbsApi.listFileArray(path=datasetPath, retriveList=["retrive_lumi", "retrive_run"])
print(" found %d files, inserting into wmbs..." % (len(dbsResults)))
for dbsResult in dbsResults:
if runsWhiteList and str(dbsResult["LumiList"][0]["RunNumber"]) not in runsWhiteList:
continue
myFile = File(lfn=dbsResult["LogicalFileName"], size=dbsResult["FileSize"],
events=dbsResult["NumberOfEvents"], checksums={"cksum": dbsResult["Checksum"]},
locations="cmssrm.fnal.gov", merged=True)
myRun = Run(runNumber=dbsResult["LumiList"][0]["RunNumber"])
for lumi in dbsResult["LumiList"]:
myRun.appendLumi(lumi["LumiSectionNumber"])
myFile.addRun(myRun)
myFile.create()
inputFileset.addFile(myFile)
if len(inputFileset) < 1:
raise Exception("No files were selected!")
inputFileset.commit()
inputFileset.markOpen(False)
return
myThread = threading.currentThread()
myThread.transaction.begin()
for workloadTask in workload.taskIterator():
inputFileset = Fileset(name=workloadTask.getPathName())
inputFileset.create()
inputDataset = workloadTask.inputDataset()
inputDatasetPath = "/%s/%s/%s" % (inputDataset.primary,
inputDataset.processed,
inputDataset.tier)
injectFilesFromDBS(inputFileset, inputDatasetPath, options.RunWhitelist)
myWMBSHelper = WMBSHelper(workload)
myWMBSHelper._createSubscriptionsInWMBS(workloadTash.getPathName())
myThread.transaction.commit() | 0.396886 | 0.070656 |
import os
import sys
import argparse
import joblib
import pandas as pd
from azureml.core import Run
from azureml.core.run import Run
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
def getRuntimeArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str)
args = parser.parse_args()
return args
def main():
args = getRuntimeArgs()
run = Run.get_context()
credit_data_df = pd.read_csv(os.path.join(args.data_path, 'german_credit_data.csv'))
#credit_data_df = pd.read_csv(os.path.join(run.input_datasets['data'], 'german_credit_data.csv'))
clf = model_train(credit_data_df, run)
#copying to "outputs" directory, automatically uploads it to Azure ML
output_dir = './outputs/'
os.makedirs(output_dir, exist_ok=True)
joblib.dump(value=clf, filename=os.path.join(output_dir, 'model.pkl'))
run.parent.upload_file(name='outputs/model.pkl',path_or_stream="./outputs/model.pkl")
def model_train(ds_df, run):
ds_df.drop("Sno", axis=1, inplace=True)
y_raw = ds_df['Risk']
X_raw = ds_df.drop('Risk', axis=1)
categorical_features = X_raw.select_dtypes(include=['object']).columns
numeric_features = X_raw.select_dtypes(include=['int64', 'float']).columns
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value="missing")),
('onehotencoder', OneHotEncoder(categories='auto', sparse=False))])
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
feature_engineering_pipeline = ColumnTransformer(
transformers=[
('numeric', numeric_transformer, numeric_features),
('categorical', categorical_transformer, categorical_features)
], remainder="drop")
# Encode Labels
le = LabelEncoder()
encoded_y = le.fit_transform(y_raw)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X_raw, encoded_y, test_size=0.20, stratify=encoded_y, random_state=42)
# Create sklearn pipeline
lr_clf = Pipeline(steps=[('preprocessor', feature_engineering_pipeline),
('classifier', LogisticRegression(solver="lbfgs"))])
# Train the model
lr_clf.fit(X_train, y_train)
# Capture metrics
train_acc = lr_clf.score(X_train, y_train)
test_acc = lr_clf.score(X_test, y_test)
print("Training accuracy: %.3f" % train_acc)
print("Test data accuracy: %.3f" % test_acc)
# Log to Azure ML
run.log('Train accuracy', train_acc)
run.log('Test accuracy', test_acc)
return lr_clf
if __name__ == "__main__":
main() | src/model1/train_register.py | import os
import sys
import argparse
import joblib
import pandas as pd
from azureml.core import Run
from azureml.core.run import Run
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
def getRuntimeArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str)
args = parser.parse_args()
return args
def main():
args = getRuntimeArgs()
run = Run.get_context()
credit_data_df = pd.read_csv(os.path.join(args.data_path, 'german_credit_data.csv'))
#credit_data_df = pd.read_csv(os.path.join(run.input_datasets['data'], 'german_credit_data.csv'))
clf = model_train(credit_data_df, run)
#copying to "outputs" directory, automatically uploads it to Azure ML
output_dir = './outputs/'
os.makedirs(output_dir, exist_ok=True)
joblib.dump(value=clf, filename=os.path.join(output_dir, 'model.pkl'))
run.parent.upload_file(name='outputs/model.pkl',path_or_stream="./outputs/model.pkl")
def model_train(ds_df, run):
ds_df.drop("Sno", axis=1, inplace=True)
y_raw = ds_df['Risk']
X_raw = ds_df.drop('Risk', axis=1)
categorical_features = X_raw.select_dtypes(include=['object']).columns
numeric_features = X_raw.select_dtypes(include=['int64', 'float']).columns
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value="missing")),
('onehotencoder', OneHotEncoder(categories='auto', sparse=False))])
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
feature_engineering_pipeline = ColumnTransformer(
transformers=[
('numeric', numeric_transformer, numeric_features),
('categorical', categorical_transformer, categorical_features)
], remainder="drop")
# Encode Labels
le = LabelEncoder()
encoded_y = le.fit_transform(y_raw)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X_raw, encoded_y, test_size=0.20, stratify=encoded_y, random_state=42)
# Create sklearn pipeline
lr_clf = Pipeline(steps=[('preprocessor', feature_engineering_pipeline),
('classifier', LogisticRegression(solver="lbfgs"))])
# Train the model
lr_clf.fit(X_train, y_train)
# Capture metrics
train_acc = lr_clf.score(X_train, y_train)
test_acc = lr_clf.score(X_test, y_test)
print("Training accuracy: %.3f" % train_acc)
print("Test data accuracy: %.3f" % test_acc)
# Log to Azure ML
run.log('Train accuracy', train_acc)
run.log('Test accuracy', test_acc)
return lr_clf
if __name__ == "__main__":
main() | 0.426799 | 0.172729 |
import pytest
import cocotb
from cocotb.queue import LifoQueue, PriorityQueue, Queue, QueueEmpty, QueueFull
from cocotb.regression import TestFactory
from cocotb.triggers import Combine, NullTrigger
async def run_queue_nonblocking_test(dut, queue_type):
QUEUE_SIZE = 10
q = queue_type(maxsize=QUEUE_SIZE)
# queue empty
assert q.maxsize == QUEUE_SIZE
assert q.qsize() == 0
assert q.empty()
assert not q.full()
# put one item
q.put_nowait(0)
assert q.qsize() == 1
assert not q.empty()
assert not q.full()
# fill queue
if queue_type is PriorityQueue:
for k in range(QUEUE_SIZE - 1, 0, -1):
q.put_nowait(k)
else:
for k in range(1, QUEUE_SIZE):
q.put_nowait(k)
assert q.qsize() == QUEUE_SIZE
assert not q.empty()
assert q.full()
# overflow
with pytest.raises(QueueFull):
q.put_nowait(100)
# check queue contents
if queue_type is LifoQueue:
for k in range(QUEUE_SIZE - 1, -1, -1):
assert q.get_nowait() == k
else:
for k in range(QUEUE_SIZE):
assert q.get_nowait() == k
assert q.qsize() == 0
assert q.empty()
assert not q.full()
# underflow
with pytest.raises(QueueEmpty):
q.get_nowait()
factory = TestFactory(run_queue_nonblocking_test)
factory.add_option("queue_type", [Queue, PriorityQueue, LifoQueue])
factory.generate_tests()
@cocotb.test()
async def test_queue_contention(dut):
NUM_PUTTERS = 20
QUEUE_SIZE = 10
q = Queue(maxsize=QUEUE_SIZE)
async def putter(lst, item):
await q.put(item)
lst.append(item)
async def getter(lst, item):
assert item == await q.get()
lst.append(item)
coro_list = []
putter_list = []
getter_list = []
# test put contention
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(putter(putter_list, k)))
assert q.qsize() == QUEUE_SIZE
# test killed putter
coro = cocotb.start_soon(putter(putter_list, 100))
coro.kill()
coro_list.append(cocotb.start_soon(putter(putter_list, 101)))
for k in range(NUM_PUTTERS):
coro_list.append(cocotb.start_soon(getter(getter_list, k)))
coro_list.append(cocotb.start_soon(getter(getter_list, 101)))
await Combine(*coro_list)
assert putter_list == list(range(NUM_PUTTERS)) + [101]
assert getter_list == list(range(NUM_PUTTERS)) + [101]
assert q.qsize() == 0
coro_list = []
putter_list = []
getter_list = []
# test get contention
for k in range(NUM_PUTTERS):
coro_list.append(cocotb.start_soon(getter(getter_list, k)))
# test killed getter
coro2 = cocotb.start_soon(getter(getter_list, 100))
coro2.kill()
coro_list.append(cocotb.start_soon(getter(getter_list, 101)))
for k in range(NUM_PUTTERS):
coro_list.append(cocotb.start_soon(putter(putter_list, k)))
coro_list.append(cocotb.start_soon(putter(putter_list, 101)))
await Combine(*coro_list)
assert putter_list == list(range(NUM_PUTTERS)) + [101]
assert getter_list == list(range(NUM_PUTTERS)) + [101]
assert q.qsize() == 0
@cocotb.test()
async def test_fair_scheduling(dut):
NUM_PUTTERS = 10
NUM_PUTS = 10
q = Queue(maxsize=1)
async def putter(i):
for _ in range(NUM_PUTS):
await q.put(i)
# fill queue to force contention
q.put_nowait(None)
# create NUM_PUTTER contending putters
putters = [await cocotb.start(putter(i)) for i in range(NUM_PUTTERS)]
# remove value that forced contention
assert q.get_nowait() is None, "Popped unexpected value"
# test fair scheduling by ensuring that each putter is serviced for its first
# write before the second write on any putter is serviced.
for _ in range(NUM_PUTS):
remaining = set(range(NUM_PUTTERS))
for _ in range(NUM_PUTTERS):
v = await q.get()
assert v in remaining, "Unfair scheduling occurred"
remaining.remove(v)
assert all(p.done() for p in putters), "Not all putters finished?"
async def run_queue_blocking_test(dut, queue_type):
NUM_PUTTERS = 20
QUEUE_SIZE = 10
q = queue_type(maxsize=QUEUE_SIZE)
ref_q = queue_type()
async def putter(lst, item):
await q.put(item)
ref_q.put_nowait(item)
lst.append(item)
async def getter(lst, num):
item = await q.get()
assert ref_q.get_nowait() == item
lst.append(num)
coro_list = []
putter_list = []
getter_list = []
# test put contention
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(putter(putter_list, k)))
assert q.qsize() == QUEUE_SIZE
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(getter(getter_list, k)))
await Combine(*coro_list)
assert putter_list == list(range(NUM_PUTTERS))
assert getter_list == list(range(NUM_PUTTERS))
assert q.qsize() == 0
assert ref_q.qsize() == 0
coro_list = []
putter_list = []
getter_list = []
# test get contention
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(getter(getter_list, k)))
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(putter(putter_list, k)))
await Combine(*coro_list)
assert putter_list == list(range(NUM_PUTTERS))
assert getter_list == list(range(NUM_PUTTERS))
assert q.qsize() == 0
assert ref_q.qsize() == 0
factory = TestFactory(run_queue_blocking_test)
factory.add_option("queue_type", [Queue, PriorityQueue, LifoQueue])
factory.generate_tests()
@cocotb.test()
async def test_str_and_repr(_):
q = Queue[int](maxsize=1)
q.put_nowait(0)
await cocotb.start(q.put(1))
s = repr(q)
assert "maxsize" in s
assert "_queue" in s
assert "_putters" in s
assert str(q)[:-1] in s
assert q.get_nowait() == 0
# There's now room in the queue and putter has been signalled to wake up
await NullTrigger()
# putter has put into queue
s = repr(q)
assert "_queue" in s
assert "_putters" not in s
assert q.get_nowait() == 1
getter = await cocotb.start(q.get())
s = repr(q)
assert "_putters" not in s
assert "_getters" in s
assert str(q)[:-1] in s
cocotb.start_soon(q.put(2))
await getter
s = repr(q)
assert "_getters" not in s
assert str(q)[:-1] in s | tests/test_cases/test_cocotb/test_queues.py | import pytest
import cocotb
from cocotb.queue import LifoQueue, PriorityQueue, Queue, QueueEmpty, QueueFull
from cocotb.regression import TestFactory
from cocotb.triggers import Combine, NullTrigger
async def run_queue_nonblocking_test(dut, queue_type):
QUEUE_SIZE = 10
q = queue_type(maxsize=QUEUE_SIZE)
# queue empty
assert q.maxsize == QUEUE_SIZE
assert q.qsize() == 0
assert q.empty()
assert not q.full()
# put one item
q.put_nowait(0)
assert q.qsize() == 1
assert not q.empty()
assert not q.full()
# fill queue
if queue_type is PriorityQueue:
for k in range(QUEUE_SIZE - 1, 0, -1):
q.put_nowait(k)
else:
for k in range(1, QUEUE_SIZE):
q.put_nowait(k)
assert q.qsize() == QUEUE_SIZE
assert not q.empty()
assert q.full()
# overflow
with pytest.raises(QueueFull):
q.put_nowait(100)
# check queue contents
if queue_type is LifoQueue:
for k in range(QUEUE_SIZE - 1, -1, -1):
assert q.get_nowait() == k
else:
for k in range(QUEUE_SIZE):
assert q.get_nowait() == k
assert q.qsize() == 0
assert q.empty()
assert not q.full()
# underflow
with pytest.raises(QueueEmpty):
q.get_nowait()
factory = TestFactory(run_queue_nonblocking_test)
factory.add_option("queue_type", [Queue, PriorityQueue, LifoQueue])
factory.generate_tests()
@cocotb.test()
async def test_queue_contention(dut):
NUM_PUTTERS = 20
QUEUE_SIZE = 10
q = Queue(maxsize=QUEUE_SIZE)
async def putter(lst, item):
await q.put(item)
lst.append(item)
async def getter(lst, item):
assert item == await q.get()
lst.append(item)
coro_list = []
putter_list = []
getter_list = []
# test put contention
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(putter(putter_list, k)))
assert q.qsize() == QUEUE_SIZE
# test killed putter
coro = cocotb.start_soon(putter(putter_list, 100))
coro.kill()
coro_list.append(cocotb.start_soon(putter(putter_list, 101)))
for k in range(NUM_PUTTERS):
coro_list.append(cocotb.start_soon(getter(getter_list, k)))
coro_list.append(cocotb.start_soon(getter(getter_list, 101)))
await Combine(*coro_list)
assert putter_list == list(range(NUM_PUTTERS)) + [101]
assert getter_list == list(range(NUM_PUTTERS)) + [101]
assert q.qsize() == 0
coro_list = []
putter_list = []
getter_list = []
# test get contention
for k in range(NUM_PUTTERS):
coro_list.append(cocotb.start_soon(getter(getter_list, k)))
# test killed getter
coro2 = cocotb.start_soon(getter(getter_list, 100))
coro2.kill()
coro_list.append(cocotb.start_soon(getter(getter_list, 101)))
for k in range(NUM_PUTTERS):
coro_list.append(cocotb.start_soon(putter(putter_list, k)))
coro_list.append(cocotb.start_soon(putter(putter_list, 101)))
await Combine(*coro_list)
assert putter_list == list(range(NUM_PUTTERS)) + [101]
assert getter_list == list(range(NUM_PUTTERS)) + [101]
assert q.qsize() == 0
@cocotb.test()
async def test_fair_scheduling(dut):
NUM_PUTTERS = 10
NUM_PUTS = 10
q = Queue(maxsize=1)
async def putter(i):
for _ in range(NUM_PUTS):
await q.put(i)
# fill queue to force contention
q.put_nowait(None)
# create NUM_PUTTER contending putters
putters = [await cocotb.start(putter(i)) for i in range(NUM_PUTTERS)]
# remove value that forced contention
assert q.get_nowait() is None, "Popped unexpected value"
# test fair scheduling by ensuring that each putter is serviced for its first
# write before the second write on any putter is serviced.
for _ in range(NUM_PUTS):
remaining = set(range(NUM_PUTTERS))
for _ in range(NUM_PUTTERS):
v = await q.get()
assert v in remaining, "Unfair scheduling occurred"
remaining.remove(v)
assert all(p.done() for p in putters), "Not all putters finished?"
async def run_queue_blocking_test(dut, queue_type):
NUM_PUTTERS = 20
QUEUE_SIZE = 10
q = queue_type(maxsize=QUEUE_SIZE)
ref_q = queue_type()
async def putter(lst, item):
await q.put(item)
ref_q.put_nowait(item)
lst.append(item)
async def getter(lst, num):
item = await q.get()
assert ref_q.get_nowait() == item
lst.append(num)
coro_list = []
putter_list = []
getter_list = []
# test put contention
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(putter(putter_list, k)))
assert q.qsize() == QUEUE_SIZE
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(getter(getter_list, k)))
await Combine(*coro_list)
assert putter_list == list(range(NUM_PUTTERS))
assert getter_list == list(range(NUM_PUTTERS))
assert q.qsize() == 0
assert ref_q.qsize() == 0
coro_list = []
putter_list = []
getter_list = []
# test get contention
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(getter(getter_list, k)))
for k in range(NUM_PUTTERS):
coro_list.append(await cocotb.start(putter(putter_list, k)))
await Combine(*coro_list)
assert putter_list == list(range(NUM_PUTTERS))
assert getter_list == list(range(NUM_PUTTERS))
assert q.qsize() == 0
assert ref_q.qsize() == 0
factory = TestFactory(run_queue_blocking_test)
factory.add_option("queue_type", [Queue, PriorityQueue, LifoQueue])
factory.generate_tests()
@cocotb.test()
async def test_str_and_repr(_):
q = Queue[int](maxsize=1)
q.put_nowait(0)
await cocotb.start(q.put(1))
s = repr(q)
assert "maxsize" in s
assert "_queue" in s
assert "_putters" in s
assert str(q)[:-1] in s
assert q.get_nowait() == 0
# There's now room in the queue and putter has been signalled to wake up
await NullTrigger()
# putter has put into queue
s = repr(q)
assert "_queue" in s
assert "_putters" not in s
assert q.get_nowait() == 1
getter = await cocotb.start(q.get())
s = repr(q)
assert "_putters" not in s
assert "_getters" in s
assert str(q)[:-1] in s
cocotb.start_soon(q.put(2))
await getter
s = repr(q)
assert "_getters" not in s
assert str(q)[:-1] in s | 0.496826 | 0.542803 |
import time
import urllib.parse
import json
import re
# 插件模块
import requests
from bs4 import BeautifulSoup
# 自写模块
from CityId import CityIdList
import Tools
# host
host = 'www.meituan.com'
# 关键字
keyWord = '<PASSWORD>'
# 省份名字
provinceName = '江西'
# 城市名字(为空获取整个省)
cityName = '永丰'
# 每次获取个数
number = 32
# 获取的数据总数
count = 0
# cookies
cookies = Tools.get_cookies('https://'+host)
# 获取城市id
city_id_list = []
city_id_list = CityIdList(host).get_city_id(provinceName,cityName)
# 如果返回默认值就是北京
if (city_id_list[0]==1 and provinceName != '北京'):
provinceName = '北京'
cityName = '北京市'
# 获取数据保存文件
info_file_name = provinceName +'-'+ cityName +'-'+ keyWord + ".json"
# 设置头部
res_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': cookies,
'Host': 'www.meituan.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 7.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3770.100 Safari/537.36'
}
# 请求api获取数据
res_headers['Host'] = 'apimobile.meituan.com'
def get_json(url):
res = requests.get(url, headers=res_headers)
info = json.loads(res.text)
if (info['code'] != '0'):
return {}
global count
count = info['data']['totalCount']
#print("本次请求总数:",count)
return info['data']['searchResult']
# 获取所有数据
data_list = []
q = urllib.parse.quote(keyWord)
print("请稍等,获取数据中。。。。")
# 如果返回不是单个id是数
for city_id in city_id_list:
print("开始获取id:",city_id,'的数据')
# 开始获取数量
startNumber = 0
while startNumber <= count:
# 拼接url
api_url = 'https://apimobile.meituan.com/group/v4/poi/pcsearch/' + str(city_id) + '?' + cookies + '&userid=-1&limit=' + str(number) + '&offset=' + str(startNumber) + '&cateId=-1&q=' + q
#print(api_url)
data_list += get_json(api_url)
startNumber += number
# print(startNumber)
# 每个城市获取完暂停 3秒
time.sleep(3)
# 格式化 info_list
print("开始获取商家信息并且格式化数据。。。。")
i = 0
info_list = []
res_headers['Host'] = 'www.meituan.com'
res_headers['Content-Type']= 'text/html; charset=utf-8'
res_headers['Referer'] = 'https://www.meituan.com'
while i < len(data_list):
# 商家详情页
# res_url = 'https://www.meituan.com/shop/'+str(data_list[i]['id']) +'/'
# meishi
res_url = 'https://www.meituan.com/meishi/' + str(data_list[i]['id']) + '/'
page = requests.get(res_url, headers=res_headers) # Get该网页从而获取该html内容
soup = BeautifulSoup(page.content, "lxml") # 用lxml解析器解析该网页的内容, 好像f.text也是返回的html
#print(page.content.decode())
pattern = re.compile(r"window\.(_appState|AppData) = (.*?)", re.MULTILINE | re.DOTALL)
#尝试打印出网页内容,看是否获取成功
content = soup.find_all("script",text=pattern)
phone = ''
if (content != 0):
phone = content[0].string.split(' = ')[1]
data = json.dumps(phone)
print(data['poiInfo']['phone'])
info_list.append({'title': data_list[i]['title'],'showType':data_list[i]['showType'] ,'avgscore':data_list[i]['avgscore'],'address': data_list[i]['address'],'latitude':data_list[i]['latitude'],'longitude':data_list[i]['longitude']})
# 打印
#print(info_list[i])
i += 1
# 暂停会
time.sleep(3)
# 保存
Tools.save_json_file(info_file_name, info_list)
# 打印小吃个数
print("保存数据成功,总共有" + keyWord + ":",i,"家") | others/python/meituan-demo/demo.py | import time
import urllib.parse
import json
import re
# 插件模块
import requests
from bs4 import BeautifulSoup
# 自写模块
from CityId import CityIdList
import Tools
# host
host = 'www.meituan.com'
# 关键字
keyWord = '<PASSWORD>'
# 省份名字
provinceName = '江西'
# 城市名字(为空获取整个省)
cityName = '永丰'
# 每次获取个数
number = 32
# 获取的数据总数
count = 0
# cookies
cookies = Tools.get_cookies('https://'+host)
# 获取城市id
city_id_list = []
city_id_list = CityIdList(host).get_city_id(provinceName,cityName)
# 如果返回默认值就是北京
if (city_id_list[0]==1 and provinceName != '北京'):
provinceName = '北京'
cityName = '北京市'
# 获取数据保存文件
info_file_name = provinceName +'-'+ cityName +'-'+ keyWord + ".json"
# 设置头部
res_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': cookies,
'Host': 'www.meituan.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 7.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3770.100 Safari/537.36'
}
# 请求api获取数据
res_headers['Host'] = 'apimobile.meituan.com'
def get_json(url):
res = requests.get(url, headers=res_headers)
info = json.loads(res.text)
if (info['code'] != '0'):
return {}
global count
count = info['data']['totalCount']
#print("本次请求总数:",count)
return info['data']['searchResult']
# 获取所有数据
data_list = []
q = urllib.parse.quote(keyWord)
print("请稍等,获取数据中。。。。")
# 如果返回不是单个id是数
for city_id in city_id_list:
print("开始获取id:",city_id,'的数据')
# 开始获取数量
startNumber = 0
while startNumber <= count:
# 拼接url
api_url = 'https://apimobile.meituan.com/group/v4/poi/pcsearch/' + str(city_id) + '?' + cookies + '&userid=-1&limit=' + str(number) + '&offset=' + str(startNumber) + '&cateId=-1&q=' + q
#print(api_url)
data_list += get_json(api_url)
startNumber += number
# print(startNumber)
# 每个城市获取完暂停 3秒
time.sleep(3)
# 格式化 info_list
print("开始获取商家信息并且格式化数据。。。。")
i = 0
info_list = []
res_headers['Host'] = 'www.meituan.com'
res_headers['Content-Type']= 'text/html; charset=utf-8'
res_headers['Referer'] = 'https://www.meituan.com'
while i < len(data_list):
# 商家详情页
# res_url = 'https://www.meituan.com/shop/'+str(data_list[i]['id']) +'/'
# meishi
res_url = 'https://www.meituan.com/meishi/' + str(data_list[i]['id']) + '/'
page = requests.get(res_url, headers=res_headers) # Get该网页从而获取该html内容
soup = BeautifulSoup(page.content, "lxml") # 用lxml解析器解析该网页的内容, 好像f.text也是返回的html
#print(page.content.decode())
pattern = re.compile(r"window\.(_appState|AppData) = (.*?)", re.MULTILINE | re.DOTALL)
#尝试打印出网页内容,看是否获取成功
content = soup.find_all("script",text=pattern)
phone = ''
if (content != 0):
phone = content[0].string.split(' = ')[1]
data = json.dumps(phone)
print(data['poiInfo']['phone'])
info_list.append({'title': data_list[i]['title'],'showType':data_list[i]['showType'] ,'avgscore':data_list[i]['avgscore'],'address': data_list[i]['address'],'latitude':data_list[i]['latitude'],'longitude':data_list[i]['longitude']})
# 打印
#print(info_list[i])
i += 1
# 暂停会
time.sleep(3)
# 保存
Tools.save_json_file(info_file_name, info_list)
# 打印小吃个数
print("保存数据成功,总共有" + keyWord + ":",i,"家") | 0.071017 | 0.057865 |
import numpy as np
import numpy.linalg as la
from scipy.special import erf, erfinv
from ..util import numerical_types, sequence_types
from .JumpingDistribution import JumpingDistribution
class TruncatedGaussianJumpingDistribution(JumpingDistribution):
"""
Class representing a univariate jumping distribution whose translation is
Gaussian distributed with the extra condition that the destination cannot
fall outside a given range.
"""
def __init__(self, variance, low=None, high=None):
"""
Initializes a `TruncatedGaussianJumpingDistribution` with the given
variance and endpoints.
Parameters
----------
variance : float
a single number representing the variance of the non-truncated
Gaussian
low : float or None
- if None, the variate can be an arbitrarily large negative number
- if float, gives the lowest possible value of the variate
high : float or None
- if None, the variate can be an arbitrarily large positive number
- if float, gives the highest possible value of the variate and
must be larger than low
"""
self.variance = variance
self.low = low
self.high = high
@property
def low(self):
"""
The low endpoint of this truncated Gaussian. Can be -inf
"""
if not hasattr(self, '_low'):
raise AttributeError("low referenced before it was set.")
return self._low
@low.setter
def low(self, value):
"""
Setter for `TruncatedGaussianJumpingDistribution.low`.
Parameters
----------
value : float or None
- if None, the variate can be an arbitrarily large negative number
- if float, gives the lowest possible value of the variate
"""
if type(value) is type(None):
self._low = -np.inf
elif type(value) in numerical_types:
self._low = value
else:
raise TypeError("low was neither None nor a number.")
@property
def high(self):
"""
The low endpoint of this truncated Gaussian. Can be +inf
"""
if not hasattr(self, '_high'):
raise AttributeError("high referenced before it was set.")
return self._high
@high.setter
def high(self, value):
"""
Setter for `TruncatedGaussianJumpingDistribution.high`.
Parameters
----------
value : float or None
- if None, the variate can be an arbitrarily large positive number
- if float, gives the highest possible value of the variate
"""
if type(value) is type(None):
self._high = np.inf
elif type(value) in numerical_types:
if value > self.low:
self._high = value
else:
raise ValueError("high was not larger than low.")
else:
raise TypeError("high was neither None nor a number.")
@property
def variance(self):
"""
The variance, \\(\\sigma^2\\), of the non-truncated Gaussian.
"""
if not hasattr(self, '_variance'):
raise AttributeError("variance referenced before it was set.")
return self._variance
@variance.setter
def variance(self, value):
"""
Setter for `TruncatedGaussianJumpingDistribution.variance`.
Parameters
----------
value : float
a positive number
"""
if type(value) in numerical_types:
self._variance = value
else:
raise TypeError("variance was not a number.")
@property
def root_twice_variance(self):
"""
The square root of twice the variance, \\(\\sqrt{2}\\sigma\\).
"""
if not hasattr(self, '_root_twice_variance'):
self._root_twice_variance = np.sqrt(2 * self.variance)
return self._root_twice_variance
def left_erf(self, source):
"""
Computes the relevant error function evaluated at `source`
Parameters
----------
source : float
the mean of the truncated Gaussian
Returns
-------
left_erf_value : float
if `TruncatedGaussianJumpingDistribution.low` is \\(l\\), `source`
is \\(\\mu\\), and variance \\(\\sigma^2\\), then `left_erf_value`
is \\(\\text{erf}\\left(\\frac{l-\\mu}{\\sqrt{2}\\sigma}\\right)\\)
"""
if self.low == -np.inf:
return (-1.)
else:
return erf((self.low - source) / self.root_twice_variance)
def right_erf(self, source):
"""
Computes the relevant error function evaluated at `source`
Parameters
----------
source : float
the mean of the truncated Gaussian
Returns
-------
right_erf_value : float
if `TruncatedGaussianJumpingDistribution.high` is \\(h\\), `source`
is \\(\\mu\\), and variance \\(\\sigma^2\\), then `left_erf_value`
is \\(\\text{erf}\\left(\\frac{h-\\mu}{\\sqrt{2}\\sigma}\\right)\\)
"""
if self.high == np.inf:
return 1.
else:
return erf((self.high - source) / self.root_twice_variance)
def erf_difference(self, source):
"""
Computes the difference of the two error function values.
right_erf(source)-left_erf(source)
Parameters
----------
source : float
the mean of the truncated Gaussian
Returns
-------
erf_difference_value : float
if `TruncatedGaussianJumpingDistribution.high` is \\(h\\),
`TruncatedGaussianJumpingDistribution.low` is \\(l\\), `source` is
\\(\\mu\\), and variance \\(\\sigma^2\\), then
`erf_value_difference` is
\\(\\text{erf}\\left(\\frac{h-\\mu}{\\sqrt{2}\\sigma}\\right)-\
\\text{erf}\\left(\\frac{l-\\mu}{\\sqrt{2}\\sigma}\\right)\\)
"""
return (self.right_erf(source) - self.left_erf(source))
@property
def constant_in_log_value(self):
"""
A constant in the log value which is independent of both the source and
the destination.
"""
if not hasattr(self, '_constant_in_log_value'):
self._constant_in_log_value =\
(np.log(2. / (np.pi * self.variance)) / 2.)
return self._constant_in_log_value
def draw(self, source, shape=None, random=np.random):
"""
Draws a destination point from this jumping distribution given a source
point.
Parameters
----------
source : float
source point
shape : None or int or tuple
- if None, a single destination is returned as a single number
- if int \\(n\\), \\(n\\) destinations are returned as a 1D
`numpy.ndarray` of length \\(n\\)
- if tuple of ints \\((n_1,n_2,\\ldots,n_k)\\),
\\(\\prod_{m=1}^kn_m\\) destinations are returned as a
`numpy.ndarray` of shape \\((n_1,n_2,\\ldots,n_k)\\)
random : numpy.random.RandomState
the random number generator to use (default: `numpy.random`)
Returns
-------
drawn : number or numpy.ndarray
either single value or array of values. See documentation on
`shape` above for the type of the returned value
"""
uniforms = random.uniform(size=shape)
erfinv_argument = ((uniforms * self.right_erf(source)) +\
((1 - uniforms) * self.left_erf(source)))
return (source + (self.root_twice_variance * erfinv(erfinv_argument)))
def log_value(self, source, destination):
"""
Computes the log-PDF of jumping from `source` to `destination`.
Parameters
----------
source : float
source point
destination : float
destination point
Returns
-------
log_pdf : float
if the distribution is \\(f(x,y)=\\text{Pr}[y|x]\\), `source` is
\\(x\\) and `destination` is \\(y\\), then `log_pdf` is given by
\\(\\ln{f(x,y)}\\)
"""
difference = (destination - source)
return (self.constant_in_log_value +\
(((difference / self.standard_deviation) ** 2) / (-2.))) -\
np.log(self.erf_difference(source))
def log_value_difference(self, source, destination):
"""
Computes the difference in the log-PDF of jumping from `source` to
`destination` and of jumping from `destination` to `source`. While this
method has a default version, overriding it may provide an efficiency
benefit.
Parameters
----------
source : float
source point
destination : float
destination point
Returns
-------
log_pdf_difference : float
if the distribution is \\(f(x,y)=\\text{Pr}[y|x]\\), `source` is
\\(x\\) and `destination` is \\(y\\), then `log_pdf_difference` is
given by \\(\\ln{f(x,y)}-\\ln{f(y,x)}\\)
"""
return np.log(\
self.erf_difference(destination) / self.erf_difference(source))
@property
def numparams(self):
"""
The integer number of parameters described by this distribution. Since
the truncated Gaussian is only easily analytically sampled in the case
of 1 parameter, `TruncatedGaussianJumpingDistribution` only allows one
parameter.
"""
return 1
@property
def standard_deviation(self):
"""
The square root of the variance.
"""
if not hasattr(self, '_standard_deviation'):
self._standard_deviation = np.sqrt(self.variance)
return self._standard_deviation
def __eq__(self, other):
"""
Tests for equality between this `TruncatedGaussianJumpingDistribution`
and `other`.
Parameters
----------
other : object
object with which to check for equality
Returns
-------
result : bool
True if and only if object is another
`TruncatedGaussianJumpingDistribution` with the same
`TruncatedGaussianJumpingDistribution.variance`,
`TruncatedGaussianJumpingDistribution.low`, and
`TruncatedGaussianJumpingDistribution.high`
"""
if isinstance(other, TruncatedGaussianJumpingDistribution):
if self.numparams == other.numparams:
variances_close = np.allclose(self.variance, other.variance,\
rtol=1e-12, atol=1e-12)
lows_close = np.isclose(self.low, other.low, atol=1e-6)
highs_close = np.isclose(self.high, other.high, atol=1e-6)
return (variances_close and lows_close and highs_close)
else:
return False
else:
return False
@property
def is_discrete(self):
"""
Boolean describing whether this `TruncatedGaussianJumpingDistribution`
describes discrete (True) or continuous (False) variable(s). Since
Gaussian distributions are continuous, this is always False.
"""
return False
def fill_hdf5_group(self, group):
"""
Fills the given hdf5 file group with data from this distribution.
Parameters
----------
group : h5py.Group
hdf5 file group to fill with information about this distribution
"""
group.attrs['class'] = 'TruncatedGaussianJumpingDistribution'
group.attrs['variance'] = self.variance
if self.low != -np.inf:
group.attrs['low'] = self.low
if self.high != np.inf:
group.attrs['high'] = self.high
@staticmethod
def load_from_hdf5_group(group):
"""
Loads a `TruncatedGaussianJumpingDistribution` from the given hdf5 file
group.
Parameters
----------
group : h5py.Group
the same hdf5 file group which
`TruncatedGaussianJumpingDistribution.fill_hdf5_group` was called
on
Returns
-------
loaded : `TruncatedGaussianJumpingDistribution`
distribution loaded from information in the given group
"""
try:
assert\
group.attrs['class'] == 'TruncatedGaussianJumpingDistribution'
except:
raise ValueError("The given group does not seem to contain a " +\
"TruncatedGaussianJumpingDistribution.")
variance = group.attrs['variance']
if 'low' in group.attrs:
low = group.attrs['low']
else:
low = None
if 'high' in group.attrs:
high = group.attrs['high']
else:
high = None
return\
TruncatedGaussianJumpingDistribution(variance, low=low, high=high) | distpy/jumping/TruncatedGaussianJumpingDistribution.py | import numpy as np
import numpy.linalg as la
from scipy.special import erf, erfinv
from ..util import numerical_types, sequence_types
from .JumpingDistribution import JumpingDistribution
class TruncatedGaussianJumpingDistribution(JumpingDistribution):
"""
Class representing a univariate jumping distribution whose translation is
Gaussian distributed with the extra condition that the destination cannot
fall outside a given range.
"""
def __init__(self, variance, low=None, high=None):
"""
Initializes a `TruncatedGaussianJumpingDistribution` with the given
variance and endpoints.
Parameters
----------
variance : float
a single number representing the variance of the non-truncated
Gaussian
low : float or None
- if None, the variate can be an arbitrarily large negative number
- if float, gives the lowest possible value of the variate
high : float or None
- if None, the variate can be an arbitrarily large positive number
- if float, gives the highest possible value of the variate and
must be larger than low
"""
self.variance = variance
self.low = low
self.high = high
@property
def low(self):
"""
The low endpoint of this truncated Gaussian. Can be -inf
"""
if not hasattr(self, '_low'):
raise AttributeError("low referenced before it was set.")
return self._low
@low.setter
def low(self, value):
"""
Setter for `TruncatedGaussianJumpingDistribution.low`.
Parameters
----------
value : float or None
- if None, the variate can be an arbitrarily large negative number
- if float, gives the lowest possible value of the variate
"""
if type(value) is type(None):
self._low = -np.inf
elif type(value) in numerical_types:
self._low = value
else:
raise TypeError("low was neither None nor a number.")
@property
def high(self):
"""
The low endpoint of this truncated Gaussian. Can be +inf
"""
if not hasattr(self, '_high'):
raise AttributeError("high referenced before it was set.")
return self._high
@high.setter
def high(self, value):
"""
Setter for `TruncatedGaussianJumpingDistribution.high`.
Parameters
----------
value : float or None
- if None, the variate can be an arbitrarily large positive number
- if float, gives the highest possible value of the variate
"""
if type(value) is type(None):
self._high = np.inf
elif type(value) in numerical_types:
if value > self.low:
self._high = value
else:
raise ValueError("high was not larger than low.")
else:
raise TypeError("high was neither None nor a number.")
@property
def variance(self):
"""
The variance, \\(\\sigma^2\\), of the non-truncated Gaussian.
"""
if not hasattr(self, '_variance'):
raise AttributeError("variance referenced before it was set.")
return self._variance
@variance.setter
def variance(self, value):
"""
Setter for `TruncatedGaussianJumpingDistribution.variance`.
Parameters
----------
value : float
a positive number
"""
if type(value) in numerical_types:
self._variance = value
else:
raise TypeError("variance was not a number.")
@property
def root_twice_variance(self):
"""
The square root of twice the variance, \\(\\sqrt{2}\\sigma\\).
"""
if not hasattr(self, '_root_twice_variance'):
self._root_twice_variance = np.sqrt(2 * self.variance)
return self._root_twice_variance
def left_erf(self, source):
"""
Computes the relevant error function evaluated at `source`
Parameters
----------
source : float
the mean of the truncated Gaussian
Returns
-------
left_erf_value : float
if `TruncatedGaussianJumpingDistribution.low` is \\(l\\), `source`
is \\(\\mu\\), and variance \\(\\sigma^2\\), then `left_erf_value`
is \\(\\text{erf}\\left(\\frac{l-\\mu}{\\sqrt{2}\\sigma}\\right)\\)
"""
if self.low == -np.inf:
return (-1.)
else:
return erf((self.low - source) / self.root_twice_variance)
def right_erf(self, source):
"""
Computes the relevant error function evaluated at `source`
Parameters
----------
source : float
the mean of the truncated Gaussian
Returns
-------
right_erf_value : float
if `TruncatedGaussianJumpingDistribution.high` is \\(h\\), `source`
is \\(\\mu\\), and variance \\(\\sigma^2\\), then `left_erf_value`
is \\(\\text{erf}\\left(\\frac{h-\\mu}{\\sqrt{2}\\sigma}\\right)\\)
"""
if self.high == np.inf:
return 1.
else:
return erf((self.high - source) / self.root_twice_variance)
def erf_difference(self, source):
"""
Computes the difference of the two error function values.
right_erf(source)-left_erf(source)
Parameters
----------
source : float
the mean of the truncated Gaussian
Returns
-------
erf_difference_value : float
if `TruncatedGaussianJumpingDistribution.high` is \\(h\\),
`TruncatedGaussianJumpingDistribution.low` is \\(l\\), `source` is
\\(\\mu\\), and variance \\(\\sigma^2\\), then
`erf_value_difference` is
\\(\\text{erf}\\left(\\frac{h-\\mu}{\\sqrt{2}\\sigma}\\right)-\
\\text{erf}\\left(\\frac{l-\\mu}{\\sqrt{2}\\sigma}\\right)\\)
"""
return (self.right_erf(source) - self.left_erf(source))
@property
def constant_in_log_value(self):
"""
A constant in the log value which is independent of both the source and
the destination.
"""
if not hasattr(self, '_constant_in_log_value'):
self._constant_in_log_value =\
(np.log(2. / (np.pi * self.variance)) / 2.)
return self._constant_in_log_value
def draw(self, source, shape=None, random=np.random):
"""
Draws a destination point from this jumping distribution given a source
point.
Parameters
----------
source : float
source point
shape : None or int or tuple
- if None, a single destination is returned as a single number
- if int \\(n\\), \\(n\\) destinations are returned as a 1D
`numpy.ndarray` of length \\(n\\)
- if tuple of ints \\((n_1,n_2,\\ldots,n_k)\\),
\\(\\prod_{m=1}^kn_m\\) destinations are returned as a
`numpy.ndarray` of shape \\((n_1,n_2,\\ldots,n_k)\\)
random : numpy.random.RandomState
the random number generator to use (default: `numpy.random`)
Returns
-------
drawn : number or numpy.ndarray
either single value or array of values. See documentation on
`shape` above for the type of the returned value
"""
uniforms = random.uniform(size=shape)
erfinv_argument = ((uniforms * self.right_erf(source)) +\
((1 - uniforms) * self.left_erf(source)))
return (source + (self.root_twice_variance * erfinv(erfinv_argument)))
def log_value(self, source, destination):
"""
Computes the log-PDF of jumping from `source` to `destination`.
Parameters
----------
source : float
source point
destination : float
destination point
Returns
-------
log_pdf : float
if the distribution is \\(f(x,y)=\\text{Pr}[y|x]\\), `source` is
\\(x\\) and `destination` is \\(y\\), then `log_pdf` is given by
\\(\\ln{f(x,y)}\\)
"""
difference = (destination - source)
return (self.constant_in_log_value +\
(((difference / self.standard_deviation) ** 2) / (-2.))) -\
np.log(self.erf_difference(source))
def log_value_difference(self, source, destination):
"""
Computes the difference in the log-PDF of jumping from `source` to
`destination` and of jumping from `destination` to `source`. While this
method has a default version, overriding it may provide an efficiency
benefit.
Parameters
----------
source : float
source point
destination : float
destination point
Returns
-------
log_pdf_difference : float
if the distribution is \\(f(x,y)=\\text{Pr}[y|x]\\), `source` is
\\(x\\) and `destination` is \\(y\\), then `log_pdf_difference` is
given by \\(\\ln{f(x,y)}-\\ln{f(y,x)}\\)
"""
return np.log(\
self.erf_difference(destination) / self.erf_difference(source))
@property
def numparams(self):
"""
The integer number of parameters described by this distribution. Since
the truncated Gaussian is only easily analytically sampled in the case
of 1 parameter, `TruncatedGaussianJumpingDistribution` only allows one
parameter.
"""
return 1
@property
def standard_deviation(self):
"""
The square root of the variance.
"""
if not hasattr(self, '_standard_deviation'):
self._standard_deviation = np.sqrt(self.variance)
return self._standard_deviation
def __eq__(self, other):
"""
Tests for equality between this `TruncatedGaussianJumpingDistribution`
and `other`.
Parameters
----------
other : object
object with which to check for equality
Returns
-------
result : bool
True if and only if object is another
`TruncatedGaussianJumpingDistribution` with the same
`TruncatedGaussianJumpingDistribution.variance`,
`TruncatedGaussianJumpingDistribution.low`, and
`TruncatedGaussianJumpingDistribution.high`
"""
if isinstance(other, TruncatedGaussianJumpingDistribution):
if self.numparams == other.numparams:
variances_close = np.allclose(self.variance, other.variance,\
rtol=1e-12, atol=1e-12)
lows_close = np.isclose(self.low, other.low, atol=1e-6)
highs_close = np.isclose(self.high, other.high, atol=1e-6)
return (variances_close and lows_close and highs_close)
else:
return False
else:
return False
@property
def is_discrete(self):
"""
Boolean describing whether this `TruncatedGaussianJumpingDistribution`
describes discrete (True) or continuous (False) variable(s). Since
Gaussian distributions are continuous, this is always False.
"""
return False
def fill_hdf5_group(self, group):
"""
Fills the given hdf5 file group with data from this distribution.
Parameters
----------
group : h5py.Group
hdf5 file group to fill with information about this distribution
"""
group.attrs['class'] = 'TruncatedGaussianJumpingDistribution'
group.attrs['variance'] = self.variance
if self.low != -np.inf:
group.attrs['low'] = self.low
if self.high != np.inf:
group.attrs['high'] = self.high
@staticmethod
def load_from_hdf5_group(group):
"""
Loads a `TruncatedGaussianJumpingDistribution` from the given hdf5 file
group.
Parameters
----------
group : h5py.Group
the same hdf5 file group which
`TruncatedGaussianJumpingDistribution.fill_hdf5_group` was called
on
Returns
-------
loaded : `TruncatedGaussianJumpingDistribution`
distribution loaded from information in the given group
"""
try:
assert\
group.attrs['class'] == 'TruncatedGaussianJumpingDistribution'
except:
raise ValueError("The given group does not seem to contain a " +\
"TruncatedGaussianJumpingDistribution.")
variance = group.attrs['variance']
if 'low' in group.attrs:
low = group.attrs['low']
else:
low = None
if 'high' in group.attrs:
high = group.attrs['high']
else:
high = None
return\
TruncatedGaussianJumpingDistribution(variance, low=low, high=high) | 0.911946 | 0.644253 |
import torch
from torch.utils.data import DataLoader, Dataset, Sampler
import phyre
import numpy as np
import matplotlib.pyplot as plt
import os
INF = 10 ** 20
def dump_img(observation, name):
img = phyre.observations_to_uint8_rgb(observation)
name = "rollout/" + str(name) + ".jpg"
plt.imsave(name, img)
print(name, os.getcwd())
class PhyreParallel(Dataset):
def __init__(self, simulator):
self._sim = simulator
self._len = INF
def __getitem__(self, args):
# IF args is an instance of JunkKeys this means you are sampling without feeding task/action pairs to
# sampler via Sampler.feed_task_action
task_idx, action = args
status, imgs = self._sim.simulate_single(task_idx, action)
return torch.LongTensor(imgs)
def __len__(self):
return self._len
class JunkKeys:
pass
class SimulationSampler(Sampler):
def __init__(self):
"""Sampler may need to be primed by supplying a dummy batch of task/action pairs before passing to torch.data.Dataloader"""
self.keys = JunkKeys()
def __len__(self):
return INF
def __iter__(self):
return self
def __next__(self):
if self.keys is None:
raise ValueError("Cannot sample from simulator as no task/action pairs have been provided")
keys = self.keys
self.keys = None
return keys
def feed_task_action(self, task_idxs, actions):
"""Feed a list of task indexes and a list/tensor of actions for sampler to supply to data loader"""
assert len(task_idxs) == len(actions)
self.keys = zip(task_idxs, actions)
if __name__ == "__main__":
train_id, dev_id, test_id = phyre.get_fold("ball_cross_template", 0)
train_id = train_id[:5]
simulator = phyre.initialize_simulator(train_id, "ball")
dset = PhyreParallel(simulator)
sampler = SimulationSampler()
sampler.feed_task_action([0], [np.array([0.8, 0.8, 0.05])])
dloader = iter(DataLoader(dset, batch_sampler=sampler))
sampler.feed_task_action([1, 2, 1], np.array([[0.8, 0.8, 0.05], [0.8, 0.8, 0.05], [0.8, 0.8, 0.1]]))
imgs = next(dloader)
print(imgs.shape)
dump_img(imgs[0][0], "b0")
dump_img(imgs[1][0], "b1")
dump_img(imgs[2][0], "b2")
print(imgs.shape) | agents/report_web_viewer/dataloader_parallel.py |
import torch
from torch.utils.data import DataLoader, Dataset, Sampler
import phyre
import numpy as np
import matplotlib.pyplot as plt
import os
INF = 10 ** 20
def dump_img(observation, name):
img = phyre.observations_to_uint8_rgb(observation)
name = "rollout/" + str(name) + ".jpg"
plt.imsave(name, img)
print(name, os.getcwd())
class PhyreParallel(Dataset):
def __init__(self, simulator):
self._sim = simulator
self._len = INF
def __getitem__(self, args):
# IF args is an instance of JunkKeys this means you are sampling without feeding task/action pairs to
# sampler via Sampler.feed_task_action
task_idx, action = args
status, imgs = self._sim.simulate_single(task_idx, action)
return torch.LongTensor(imgs)
def __len__(self):
return self._len
class JunkKeys:
pass
class SimulationSampler(Sampler):
def __init__(self):
"""Sampler may need to be primed by supplying a dummy batch of task/action pairs before passing to torch.data.Dataloader"""
self.keys = JunkKeys()
def __len__(self):
return INF
def __iter__(self):
return self
def __next__(self):
if self.keys is None:
raise ValueError("Cannot sample from simulator as no task/action pairs have been provided")
keys = self.keys
self.keys = None
return keys
def feed_task_action(self, task_idxs, actions):
"""Feed a list of task indexes and a list/tensor of actions for sampler to supply to data loader"""
assert len(task_idxs) == len(actions)
self.keys = zip(task_idxs, actions)
if __name__ == "__main__":
train_id, dev_id, test_id = phyre.get_fold("ball_cross_template", 0)
train_id = train_id[:5]
simulator = phyre.initialize_simulator(train_id, "ball")
dset = PhyreParallel(simulator)
sampler = SimulationSampler()
sampler.feed_task_action([0], [np.array([0.8, 0.8, 0.05])])
dloader = iter(DataLoader(dset, batch_sampler=sampler))
sampler.feed_task_action([1, 2, 1], np.array([[0.8, 0.8, 0.05], [0.8, 0.8, 0.05], [0.8, 0.8, 0.1]]))
imgs = next(dloader)
print(imgs.shape)
dump_img(imgs[0][0], "b0")
dump_img(imgs[1][0], "b1")
dump_img(imgs[2][0], "b2")
print(imgs.shape) | 0.554712 | 0.396857 |
from web3 import Web3
import solc
import os
class EthException(Exception):
pass
class BankException(Exception):
pass
class Bank:
""" Our Ethereum bank Web3 implementation. """
def __init__(self, http_url, priv_key):
self._provider = Web3.HTTPProvider(http_url)
self._w3 = Web3(self._provider)
self._account = self._w3.eth.account.privateKeyToAccount(priv_key)
self._iface = self._contract_compile()
self.contract_addr = None
self._bank_inst = None
def _contract_compile(self):
fn = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"sol",
"bank.sol"
)
src = solc.compile_files([fn])
return list(src.values())[0]
def _transact(self, func):
tx = func.buildTransaction({
"from": self._account.address,
"nonce": self._w3.eth.getTransactionCount(self._account.address)
})
tx_signed = self._account.signTransaction(tx)
tx_hash = self._w3.eth.sendRawTransaction(tx_signed.rawTransaction)
tx_receipt = self._w3.eth.waitForTransactionReceipt(tx_hash)
return tx_receipt.contractAddress
def _call(self, func):
return func.call({"from": self._account.address})
def contract_deploy(self):
""" Deploys our contract to blockchain. Should be called only once. """
Bank = self._w3.eth.contract(
abi=self._iface["abi"],
bytecode=self._iface["bin"]
)
func = Bank.constructor()
return self._transact(func)
def set_contract_addr(self, addr):
""" Sets the contract address to use. """
self.contract_addr = addr
self._bank_inst = self._w3.eth.contract(
address=self.contract_addr,
abi=self._iface["abi"],
)
def get_tokens(self, token_id):
func = self._bank_inst.functions.get_tokens(token_id)
return self._call(func)
def withdraw(self, token_id, tokens):
func = self._bank_inst.functions.withdraw(token_id, tokens)
ret = self._call(func)
if ret == -1:
raise EthException("Not our contract")
elif ret == -2:
raise BankException("Not enough tokens")
self._transact(func)
return self.get_tokens(token_id)
def deposit(self, token_id, tokens):
func = self._bank_inst.functions.deposit(token_id, tokens)
ret = self._call(func)
if ret == -1:
raise EthException("Not our contract")
self._transact(func)
return self.get_tokens(token_id) | tokens/bank.py | from web3 import Web3
import solc
import os
class EthException(Exception):
pass
class BankException(Exception):
pass
class Bank:
""" Our Ethereum bank Web3 implementation. """
def __init__(self, http_url, priv_key):
self._provider = Web3.HTTPProvider(http_url)
self._w3 = Web3(self._provider)
self._account = self._w3.eth.account.privateKeyToAccount(priv_key)
self._iface = self._contract_compile()
self.contract_addr = None
self._bank_inst = None
def _contract_compile(self):
fn = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"sol",
"bank.sol"
)
src = solc.compile_files([fn])
return list(src.values())[0]
def _transact(self, func):
tx = func.buildTransaction({
"from": self._account.address,
"nonce": self._w3.eth.getTransactionCount(self._account.address)
})
tx_signed = self._account.signTransaction(tx)
tx_hash = self._w3.eth.sendRawTransaction(tx_signed.rawTransaction)
tx_receipt = self._w3.eth.waitForTransactionReceipt(tx_hash)
return tx_receipt.contractAddress
def _call(self, func):
return func.call({"from": self._account.address})
def contract_deploy(self):
""" Deploys our contract to blockchain. Should be called only once. """
Bank = self._w3.eth.contract(
abi=self._iface["abi"],
bytecode=self._iface["bin"]
)
func = Bank.constructor()
return self._transact(func)
def set_contract_addr(self, addr):
""" Sets the contract address to use. """
self.contract_addr = addr
self._bank_inst = self._w3.eth.contract(
address=self.contract_addr,
abi=self._iface["abi"],
)
def get_tokens(self, token_id):
func = self._bank_inst.functions.get_tokens(token_id)
return self._call(func)
def withdraw(self, token_id, tokens):
func = self._bank_inst.functions.withdraw(token_id, tokens)
ret = self._call(func)
if ret == -1:
raise EthException("Not our contract")
elif ret == -2:
raise BankException("Not enough tokens")
self._transact(func)
return self.get_tokens(token_id)
def deposit(self, token_id, tokens):
func = self._bank_inst.functions.deposit(token_id, tokens)
ret = self._call(func)
if ret == -1:
raise EthException("Not our contract")
self._transact(func)
return self.get_tokens(token_id) | 0.442396 | 0.136695 |
import errno
import os
import pytest
import sys
# Import our local xcross.
test_dir = os.path.dirname(os.path.realpath(__file__))
xcross_dir = os.path.dirname(test_dir)
sys.path.insert(0, xcross_dir)
import xcross
os.environ['CROSS_TARGET'] = 'alpha-unknown-linux-gnu'
os.environ['CROSS_WITH_PACKAGE_MANAGERS'] = ''
def run_validate_arguments(argv):
args = xcross.process_args(argv)
try:
xcross.validate_arguments(args)
return True
except SystemExit:
return False
def run_get_image(argv, expected):
args = xcross.process_args(argv)
xcross.validate_arguments(args)
image = xcross.get_image(args)
assert image == expected
def run_normpath(argv, expected):
args = xcross.process_args(argv)
xcross.normpath(args)
assert args.command == expected
def run_format_command(argv, expected):
args = xcross.process_args(argv)
actual = xcross.format_command(args)
assert actual == expected
def run_image_command(argv, expected):
args = xcross.process_args(argv)
xcross.validate_arguments(args)
actual = xcross.image_command(args, '.').splitlines()
assert actual[0].startswith('source /etc/profile')
assert actual[1].startswith('cd /mnt/xcross')
assert actual[2] == expected
def run_image(args, exit_code=0):
with pytest.raises(SystemExit) as exit_error:
xcross.main(['--target', 'alpha-unknown-linux-gnu'] + args)
assert exit_error.value.code == exit_code
def test_get_image():
run_get_image([
'--target', 'alpha-unknown-linux-gnu'
], 'docker.io/ahuszagh/cross:alpha-unknown-linux-gnu')
run_get_image([
'--target', 'alpha-unknown-linux-gnu',
'--server', '',
], 'ahuszagh/cross:alpha-unknown-linux-gnu')
run_get_image([
'--target', 'alpha-unknown-linux-gnu',
'--server', '',
'--username', '',
], 'cross:alpha-unknown-linux-gnu')
run_get_image([
'--target', 'alpha-unknown-linux-gnu',
'--image-version', '0.1',
'--server', '',
'--username', '',
], 'cross:alpha-unknown-linux-gnu-0.1')
def test_simple_format_command():
run_format_command([], '/bin/bash')
run_format_command(['make'], 'make')
run_format_command(['cmake ..'], 'cmake ..')
run_format_command(['cmake', '..'], 'cmake ..')
run_format_command(['c++', 'main o.cc'], 'c++ "main o.cc"')
def test_single_format_command():
run_format_command(['c++ "main o.cc"'], 'c++ "main o.cc"')
run_format_command(['c++ main\\ o.cc'], 'c++ main\\ o.cc')
run_format_command(['c++ main\\ "o.cc'], 'c++ main\\ "o.cc')
def test_hyphen_command():
run_format_command(['make', '-j', '5'], 'make -j 5')
run_format_command([
'cmake', '..', '-DBUILD_SHARED_LIBS=OFF'
], 'cmake .. -DBUILD_SHARED_LIBS=OFF')
def test_normpath_windows():
if os.name != 'nt':
return
run_normpath([], [])
run_normpath(['cmake'], ['cmake'])
run_normpath(['cmake', '..\\..'], ['cmake', "'../..'"])
run_normpath(['.\\xcross'], ["'xcross'"])
run_normpath(['.\\env\\shared'], ["'env/shared'"])
def test_control_characters():
run_format_command(['$(echo `whoami`)'], '$(echo `whoami`)')
with pytest.raises(SystemExit):
run_format_command(['$(echo', '`whoami`)'], '')
with pytest.raises(SystemExit):
run_format_command(['cmake', '--build', '.', '--config', 'Release;' 'echo', '5'], '')
with pytest.raises(SystemExit):
run_format_command(['echo', '${var[@]}'], '')
with pytest.raises(SystemExit):
run_format_command(['echo', '`whoami`'], '')
with pytest.raises(SystemExit):
run_format_command(['c++', '"main o.cc"'], '')
with pytest.raises(SystemExit):
run_format_command(['c++', 'main" o.cc'], '')
with pytest.raises(SystemExit):
run_format_command(['c++', "main' o.cc"], '')
def test_validate_arguments():
assert not run_validate_arguments(['--target', 'x\\a'])
assert not run_validate_arguments(['--username', 'a#huszagh'])
assert not run_validate_arguments(['--repository', 'cross;5'])
assert run_validate_arguments(['--target', 'alpha-unknown-linux-gnu'])
assert run_validate_arguments(['--username', 'ahusz05-v1_23'])
assert run_validate_arguments(['--repository', 'cross05-v1_23'])
def test_run_image_command():
run_image_command(['make', '-j', '5'], 'make -j 5')
def test_run_image():
run_image(['echo', 'helloworld'])
run_image(['c++', '--version'])
run_image(['cl'], exit_code=127)
# Test detached mode. Ensure we clean up at the end.
try:
run_image(['--detach', 'echo', 'hellworld'])
run_image(['--detach', 'echo', 'next'])
finally:
run_image(['--stop'])
def windows_permissions():
# Check we don't have permissions to write if not admin.
# Note that Docker runs as a non-administrator on Windows,
# so just assume that it will fails.
command = ['touch', '/mnt/xcross/sample_xcross_file']
run_image(command, exit_code=errno.EPERM)
def unix_permissions():
# Make sure all files produced have the same permissions.
# This means we properly mapped the image.
try:
run_image(['touch', 'sample_xcross_file'])
st = os.stat('sample_xcross_file')
assert(st.st_uid == os.getuid())
assert(st.st_gid == os.getgid())
finally:
os.unlink('sample_xcross_file')
# Check sudo isn't enabled.
run_image(['which', 'sudo'], exit_code=1)
# Test with podman: ensure permissions don't fail.
run_image(['ls', '-la', '--engine', 'podman'])
def test_permissions():
if xcross.os_name() == 'nt':
windows_permissions()
else:
unix_permissions() | test/test_xcross.py |
import errno
import os
import pytest
import sys
# Import our local xcross.
test_dir = os.path.dirname(os.path.realpath(__file__))
xcross_dir = os.path.dirname(test_dir)
sys.path.insert(0, xcross_dir)
import xcross
os.environ['CROSS_TARGET'] = 'alpha-unknown-linux-gnu'
os.environ['CROSS_WITH_PACKAGE_MANAGERS'] = ''
def run_validate_arguments(argv):
args = xcross.process_args(argv)
try:
xcross.validate_arguments(args)
return True
except SystemExit:
return False
def run_get_image(argv, expected):
args = xcross.process_args(argv)
xcross.validate_arguments(args)
image = xcross.get_image(args)
assert image == expected
def run_normpath(argv, expected):
args = xcross.process_args(argv)
xcross.normpath(args)
assert args.command == expected
def run_format_command(argv, expected):
args = xcross.process_args(argv)
actual = xcross.format_command(args)
assert actual == expected
def run_image_command(argv, expected):
args = xcross.process_args(argv)
xcross.validate_arguments(args)
actual = xcross.image_command(args, '.').splitlines()
assert actual[0].startswith('source /etc/profile')
assert actual[1].startswith('cd /mnt/xcross')
assert actual[2] == expected
def run_image(args, exit_code=0):
with pytest.raises(SystemExit) as exit_error:
xcross.main(['--target', 'alpha-unknown-linux-gnu'] + args)
assert exit_error.value.code == exit_code
def test_get_image():
run_get_image([
'--target', 'alpha-unknown-linux-gnu'
], 'docker.io/ahuszagh/cross:alpha-unknown-linux-gnu')
run_get_image([
'--target', 'alpha-unknown-linux-gnu',
'--server', '',
], 'ahuszagh/cross:alpha-unknown-linux-gnu')
run_get_image([
'--target', 'alpha-unknown-linux-gnu',
'--server', '',
'--username', '',
], 'cross:alpha-unknown-linux-gnu')
run_get_image([
'--target', 'alpha-unknown-linux-gnu',
'--image-version', '0.1',
'--server', '',
'--username', '',
], 'cross:alpha-unknown-linux-gnu-0.1')
def test_simple_format_command():
run_format_command([], '/bin/bash')
run_format_command(['make'], 'make')
run_format_command(['cmake ..'], 'cmake ..')
run_format_command(['cmake', '..'], 'cmake ..')
run_format_command(['c++', 'main o.cc'], 'c++ "main o.cc"')
def test_single_format_command():
run_format_command(['c++ "main o.cc"'], 'c++ "main o.cc"')
run_format_command(['c++ main\\ o.cc'], 'c++ main\\ o.cc')
run_format_command(['c++ main\\ "o.cc'], 'c++ main\\ "o.cc')
def test_hyphen_command():
run_format_command(['make', '-j', '5'], 'make -j 5')
run_format_command([
'cmake', '..', '-DBUILD_SHARED_LIBS=OFF'
], 'cmake .. -DBUILD_SHARED_LIBS=OFF')
def test_normpath_windows():
if os.name != 'nt':
return
run_normpath([], [])
run_normpath(['cmake'], ['cmake'])
run_normpath(['cmake', '..\\..'], ['cmake', "'../..'"])
run_normpath(['.\\xcross'], ["'xcross'"])
run_normpath(['.\\env\\shared'], ["'env/shared'"])
def test_control_characters():
run_format_command(['$(echo `whoami`)'], '$(echo `whoami`)')
with pytest.raises(SystemExit):
run_format_command(['$(echo', '`whoami`)'], '')
with pytest.raises(SystemExit):
run_format_command(['cmake', '--build', '.', '--config', 'Release;' 'echo', '5'], '')
with pytest.raises(SystemExit):
run_format_command(['echo', '${var[@]}'], '')
with pytest.raises(SystemExit):
run_format_command(['echo', '`whoami`'], '')
with pytest.raises(SystemExit):
run_format_command(['c++', '"main o.cc"'], '')
with pytest.raises(SystemExit):
run_format_command(['c++', 'main" o.cc'], '')
with pytest.raises(SystemExit):
run_format_command(['c++', "main' o.cc"], '')
def test_validate_arguments():
assert not run_validate_arguments(['--target', 'x\\a'])
assert not run_validate_arguments(['--username', 'a#huszagh'])
assert not run_validate_arguments(['--repository', 'cross;5'])
assert run_validate_arguments(['--target', 'alpha-unknown-linux-gnu'])
assert run_validate_arguments(['--username', 'ahusz05-v1_23'])
assert run_validate_arguments(['--repository', 'cross05-v1_23'])
def test_run_image_command():
run_image_command(['make', '-j', '5'], 'make -j 5')
def test_run_image():
run_image(['echo', 'helloworld'])
run_image(['c++', '--version'])
run_image(['cl'], exit_code=127)
# Test detached mode. Ensure we clean up at the end.
try:
run_image(['--detach', 'echo', 'hellworld'])
run_image(['--detach', 'echo', 'next'])
finally:
run_image(['--stop'])
def windows_permissions():
# Check we don't have permissions to write if not admin.
# Note that Docker runs as a non-administrator on Windows,
# so just assume that it will fails.
command = ['touch', '/mnt/xcross/sample_xcross_file']
run_image(command, exit_code=errno.EPERM)
def unix_permissions():
# Make sure all files produced have the same permissions.
# This means we properly mapped the image.
try:
run_image(['touch', 'sample_xcross_file'])
st = os.stat('sample_xcross_file')
assert(st.st_uid == os.getuid())
assert(st.st_gid == os.getgid())
finally:
os.unlink('sample_xcross_file')
# Check sudo isn't enabled.
run_image(['which', 'sudo'], exit_code=1)
# Test with podman: ensure permissions don't fail.
run_image(['ls', '-la', '--engine', 'podman'])
def test_permissions():
if xcross.os_name() == 'nt':
windows_permissions()
else:
unix_permissions() | 0.353205 | 0.246196 |
import torch
import numpy as np
import sys
import random
import copy
from torch.autograd import Variable
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestSolve(TestCase):
def generate_data(self, min, max, shape, dtype):
input = np.random.uniform(min, max, shape).astype(dtype)
npu_input = torch.from_numpy(input)
return npu_input
def cpu_op_exec(self, input1, input2):
X, LU = torch.solve(input2, input1)
return X
def npu_op_exec(self, input1, input2):
input1 = input1.to("npu")
input2 = input2.to("npu")
X, LU = torch.solve(input2, input1)
X = X.to("cpu")
return X
def test_solve_float16_2(self, device):
def cpu_op_exec_float16_2(input1, input2):
input1 = input1.to(torch.float32)
input2 = input2.to(torch.float32)
X, LU = torch.solve(input2, input1)
X = X.numpy()
X = X.astype(np.float16)
return X
npu_input1 = self.generate_data(0, 100, (2, 2), np.float16)
npu_input2 = self.generate_data(0, 100, (2, 1), np.float16)
cpu_output = cpu_op_exec_float16_2(npu_input1, npu_input2)
# npu_output = self.npu_op_exec(npu_input1, npu_input2)
#self.assertRtolEqual(cpu_output, npu_output)
def test_solve_float16_1(self, device):
def cpu_op_exec_float16_1(input1, input2):
input1 = input1.to(torch.float32)
input2 = input2.to(torch.float32)
X, LU = torch.solve(input2, input1)
X = X.numpy()
X = X.astype(np.float16)
return X
npu_input1 = self.generate_data(0, 100, (5, 5), np.float16)
npu_input2 = self.generate_data(0, 100, (5, 5), np.float16)
cpu_output = cpu_op_exec_float16_1(npu_input1, npu_input2)
# npu_output = self.npu_op_exec(npu_input1, npu_input2)
#self.assertRtolEqual(cpu_output, npu_output)
def test_solve_float32_1(self, device):
npu_input1 = self.generate_data(0, 100, (2, 3, 2, 2), np.float32)
npu_input2 = self.generate_data(0, 100, (2, 1, 2, 1), np.float32)
cpu_output = self.cpu_op_exec(npu_input1, npu_input2)
# npu_output = self.npu_op_exec(npu_input1, npu_input2)
# self.assertRtolEqual(cpu_output, npu_output)
def test_solve_float32_2(self, device):
npu_input1 = self.generate_data(0, 100, (3, 3, 3), np.float32)
npu_input2 = self.generate_data(0, 100, (3, 3, 2), np.float32)
cpu_output = self.cpu_op_exec(npu_input1, npu_input2)
# npu_output = self.npu_op_exec(npu_input1, npu_input2)
# self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestSolve, globals(), except_for='cpu')
if __name__ == '__main__':
run_tests() | test/test_npu/test_solve.py |
import torch
import numpy as np
import sys
import random
import copy
from torch.autograd import Variable
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestSolve(TestCase):
def generate_data(self, min, max, shape, dtype):
input = np.random.uniform(min, max, shape).astype(dtype)
npu_input = torch.from_numpy(input)
return npu_input
def cpu_op_exec(self, input1, input2):
X, LU = torch.solve(input2, input1)
return X
def npu_op_exec(self, input1, input2):
input1 = input1.to("npu")
input2 = input2.to("npu")
X, LU = torch.solve(input2, input1)
X = X.to("cpu")
return X
def test_solve_float16_2(self, device):
def cpu_op_exec_float16_2(input1, input2):
input1 = input1.to(torch.float32)
input2 = input2.to(torch.float32)
X, LU = torch.solve(input2, input1)
X = X.numpy()
X = X.astype(np.float16)
return X
npu_input1 = self.generate_data(0, 100, (2, 2), np.float16)
npu_input2 = self.generate_data(0, 100, (2, 1), np.float16)
cpu_output = cpu_op_exec_float16_2(npu_input1, npu_input2)
# npu_output = self.npu_op_exec(npu_input1, npu_input2)
#self.assertRtolEqual(cpu_output, npu_output)
def test_solve_float16_1(self, device):
def cpu_op_exec_float16_1(input1, input2):
input1 = input1.to(torch.float32)
input2 = input2.to(torch.float32)
X, LU = torch.solve(input2, input1)
X = X.numpy()
X = X.astype(np.float16)
return X
npu_input1 = self.generate_data(0, 100, (5, 5), np.float16)
npu_input2 = self.generate_data(0, 100, (5, 5), np.float16)
cpu_output = cpu_op_exec_float16_1(npu_input1, npu_input2)
# npu_output = self.npu_op_exec(npu_input1, npu_input2)
#self.assertRtolEqual(cpu_output, npu_output)
def test_solve_float32_1(self, device):
npu_input1 = self.generate_data(0, 100, (2, 3, 2, 2), np.float32)
npu_input2 = self.generate_data(0, 100, (2, 1, 2, 1), np.float32)
cpu_output = self.cpu_op_exec(npu_input1, npu_input2)
# npu_output = self.npu_op_exec(npu_input1, npu_input2)
# self.assertRtolEqual(cpu_output, npu_output)
def test_solve_float32_2(self, device):
npu_input1 = self.generate_data(0, 100, (3, 3, 3), np.float32)
npu_input2 = self.generate_data(0, 100, (3, 3, 2), np.float32)
cpu_output = self.cpu_op_exec(npu_input1, npu_input2)
# npu_output = self.npu_op_exec(npu_input1, npu_input2)
# self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestSolve, globals(), except_for='cpu')
if __name__ == '__main__':
run_tests() | 0.422505 | 0.557725 |
# Standard Library Imports
from itertools import combinations
# Application-specific Imports
from advent_of_code.solvers import solver
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 17: No Such Thing as Too Much
Attributes:
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = '\n'.join((
'The number of 150 litre container combinations is {0}.',
'The number of 150 litre fewest container combinations is {1}.',
))
@staticmethod
def _get_150_litre_combos(cups, min_length_combos=False):
"""
Args:
cups (list):
min_length_combos (bool):
Returns:
list:
"""
cup_combos = []
for length in range(1, len(cups) + 1):
cup_combos.extend((
tuple(combo) for combo in combinations(cups, length)
if sum(combo) == 150
))
if min_length_combos and cup_combos:
break
return cup_combos
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
cups = [int(cup) for cup in self.puzzle_input.splitlines()]
count_all_combos = len(self._get_150_litre_combos(cups, False))
count_min_length_combos = len(self._get_150_litre_combos(cups, True))
return (count_all_combos, count_min_length_combos)
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
test_input = '\n'.join(('120', '90', '60', '30', '30'))
self._run_test_case(solver.TestCase(test_input, 4, 3)) | advent_of_code/solvers/day_17.py | # Standard Library Imports
from itertools import combinations
# Application-specific Imports
from advent_of_code.solvers import solver
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 17: No Such Thing as Too Much
Attributes:
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = '\n'.join((
'The number of 150 litre container combinations is {0}.',
'The number of 150 litre fewest container combinations is {1}.',
))
@staticmethod
def _get_150_litre_combos(cups, min_length_combos=False):
"""
Args:
cups (list):
min_length_combos (bool):
Returns:
list:
"""
cup_combos = []
for length in range(1, len(cups) + 1):
cup_combos.extend((
tuple(combo) for combo in combinations(cups, length)
if sum(combo) == 150
))
if min_length_combos and cup_combos:
break
return cup_combos
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
cups = [int(cup) for cup in self.puzzle_input.splitlines()]
count_all_combos = len(self._get_150_litre_combos(cups, False))
count_min_length_combos = len(self._get_150_litre_combos(cups, True))
return (count_all_combos, count_min_length_combos)
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
test_input = '\n'.join(('120', '90', '60', '30', '30'))
self._run_test_case(solver.TestCase(test_input, 4, 3)) | 0.837354 | 0.315485 |
import logging, time, json, re
from .utils import camel_case
logger = logging.getLogger(__name__)
# https://support.google.com/tagmanager/answer/7182738?hl=en
# Array.from(str).map( s => s.innerText)
BUILT_IN_VARIABLES_LIST = ["Click Element", "Click Classes", "Click ID", "Click Target", "Click URL", "Click Text", "Error Message", "Error URL", "Error Line", "Debug Mode", "Form Classes", "Form Element", "Form ID", "Form Target", "Form Text", "Form URL", "History Source", "New History Fragment", "New History State", "Old History Fragment", "Old History State", "Page Hostname", "Page Path", "Page URL", "Referrer", "Scroll Depth Threshold", "Scroll Depth Units", "Scroll Direction", "Container ID", "Container Version", "Environment Name", "Event", "HTML ID", "Random Number", "Video Current Time", "Video Duration", "Video Percent", "Video Provider", "Video Status", "Video Title", "Video URL", "Video Visible", "Percent Visible", "On-Screen Duration"]
class Entity():
def __init__(self, data, parent):
self.service = parent.service
self.gtmservice = parent.gtmservice
self.parent = parent
self.data = data
self.name = data.get("name")
self.type = camel_case(data.get("type"))
# self.type = data.get("type")
self.path = data.get("path")
self.parameter = data.get("parameter")
# Param for buit in variables
self.path_additional_params = {}
self.dependency_check_id = data.get("name")
self.dependent_variables = []
self.dependent_built_in_variables = []
dependent_variables = re.findall("{{([^{}\\\]+?)}}", json.dumps(self.data))
if len(dependent_variables) > 0:
for variable in dependent_variables:
if variable != "_event":
if variable in BUILT_IN_VARIABLES_LIST:
self.dependent_built_in_variables.append(variable)
else:
self.dependent_variables.append(variable)
def update(self):
self.service.execute(getattr(self.gtmservice.accounts().containers().workspaces(), self.entity_type)().update(path=self.path,body=self.data,))
if self.parent.cache:
self.parent.update_cache(self.entity_type)
def replace_data_fragment(self, old_text, new_text, api_update=True):
try:
changed_data = re.sub(old_text, new_text, json.dumps(self.data))
self.data = json.loads(changed_data)
if api_update:
self.update()
else:
if self.parent.cache:
self.parent.update_cache(self.entity_type)
except Exception as e:
raise ValueError(f"Can't change data for {self.name}: {e}")
def rename_references(self, new_name, old_name, api_update=True):
processed = {}
processed['tags']=[]
processed['variables']=[]
processed['triggers']=[]
dependencies = self.get_depended()
if dependencies['len']>0:
for entity_type in dependencies.keys():
if entity_type == 'len':
continue
if f"dependent_{self.entity_type}" in dependencies[entity_type].keys():
for entity_name in dependencies[entity_type][f"dependent_{self.entity_type}"]:
if entity_name in processed[entity_type]:
continue
processed[entity_type].append(entity_name)
entity = self.parent.get_entity(entity_type, entity_name)
entity.replace_data_fragment(f"{{{{{re.escape(old_name)}}}}}", f"{{{{{new_name}}}}}",api_update)
logger.info(f"Modifed {entity_type} {entity_name}")
else:
for entity_name in [item for sublist in list(dependencies[entity_type].values()) for item in sublist]:
if entity_name in processed[entity_type]:
continue
processed[entity_type].append(entity_name)
entity = self.parent.get_entity(entity_type, entity_name)
if entity:
entity.replace_data_fragment(f"""{re.escape(old_name)}""", f"""{new_name}""",api_update)
# entity.replace_data_fragment(f"'{old_name}'", f"'{new_name}'",api_update)
logger.info(f"Modifed {entity_type} {entity_name}")
def delete(self, do_check = True):
depended = self.get_depended()
if do_check and depended['len']>0:
logger.warning(f"Can't delete {self.entity_type} {self.name}: it used in {depended}")
else:
self.service.execute(getattr(self.gtmservice.accounts().containers().workspaces(), self.entity_type)().delete(**{**{'path':self.path},**self.path_additional_params}))
self.parent.delete(self.entity_type, self.name)
def get_depended(self):
return self.parent.get_depended(self.entity_type, self.dependency_check_id, self.depended_checks)
def get_template_name(self):
for param in self.parameter:
if param["type"].lower() == "template" and param["key"] == "name":
return param["value"]
def get_param(self, param_key, param_type='template'):
for param in self.parameter:
if param["type"] == param_type and param["key"] == param_key:
return param["list"] if param_type == 'list' else param["value"]
def get_custom_params(self):
customParams = self.get_param('customParams','list')
params = []
if customParams and len(customParams)>0:
customParams = [param['map'] for param in customParams]
for param in customParams:
key = False
value = False
for p in param:
if p['key'] == 'key':
key = p['value']
if p['key'] == 'value':
value = p['value']
if key and value:
params.append({key:value})
return params
def get_template_param(self, param_name):
if self.parameter:
for param in self.parameter:
if param["type"].lower() == "template" and param["key"] == param_name:
return param["value"]
def set_folder_id(self, folder_id):
self.data['parentFolderId']=folder_id
def set_type(self, type):
self.data['type'] = type
self.type = type
def get_id(self):
return self.data[self.id_name] | gtm_gear/entity.py | import logging, time, json, re
from .utils import camel_case
logger = logging.getLogger(__name__)
# https://support.google.com/tagmanager/answer/7182738?hl=en
# Array.from(str).map( s => s.innerText)
BUILT_IN_VARIABLES_LIST = ["Click Element", "Click Classes", "Click ID", "Click Target", "Click URL", "Click Text", "Error Message", "Error URL", "Error Line", "Debug Mode", "Form Classes", "Form Element", "Form ID", "Form Target", "Form Text", "Form URL", "History Source", "New History Fragment", "New History State", "Old History Fragment", "Old History State", "Page Hostname", "Page Path", "Page URL", "Referrer", "Scroll Depth Threshold", "Scroll Depth Units", "Scroll Direction", "Container ID", "Container Version", "Environment Name", "Event", "HTML ID", "Random Number", "Video Current Time", "Video Duration", "Video Percent", "Video Provider", "Video Status", "Video Title", "Video URL", "Video Visible", "Percent Visible", "On-Screen Duration"]
class Entity():
def __init__(self, data, parent):
self.service = parent.service
self.gtmservice = parent.gtmservice
self.parent = parent
self.data = data
self.name = data.get("name")
self.type = camel_case(data.get("type"))
# self.type = data.get("type")
self.path = data.get("path")
self.parameter = data.get("parameter")
# Param for buit in variables
self.path_additional_params = {}
self.dependency_check_id = data.get("name")
self.dependent_variables = []
self.dependent_built_in_variables = []
dependent_variables = re.findall("{{([^{}\\\]+?)}}", json.dumps(self.data))
if len(dependent_variables) > 0:
for variable in dependent_variables:
if variable != "_event":
if variable in BUILT_IN_VARIABLES_LIST:
self.dependent_built_in_variables.append(variable)
else:
self.dependent_variables.append(variable)
def update(self):
self.service.execute(getattr(self.gtmservice.accounts().containers().workspaces(), self.entity_type)().update(path=self.path,body=self.data,))
if self.parent.cache:
self.parent.update_cache(self.entity_type)
def replace_data_fragment(self, old_text, new_text, api_update=True):
try:
changed_data = re.sub(old_text, new_text, json.dumps(self.data))
self.data = json.loads(changed_data)
if api_update:
self.update()
else:
if self.parent.cache:
self.parent.update_cache(self.entity_type)
except Exception as e:
raise ValueError(f"Can't change data for {self.name}: {e}")
def rename_references(self, new_name, old_name, api_update=True):
processed = {}
processed['tags']=[]
processed['variables']=[]
processed['triggers']=[]
dependencies = self.get_depended()
if dependencies['len']>0:
for entity_type in dependencies.keys():
if entity_type == 'len':
continue
if f"dependent_{self.entity_type}" in dependencies[entity_type].keys():
for entity_name in dependencies[entity_type][f"dependent_{self.entity_type}"]:
if entity_name in processed[entity_type]:
continue
processed[entity_type].append(entity_name)
entity = self.parent.get_entity(entity_type, entity_name)
entity.replace_data_fragment(f"{{{{{re.escape(old_name)}}}}}", f"{{{{{new_name}}}}}",api_update)
logger.info(f"Modifed {entity_type} {entity_name}")
else:
for entity_name in [item for sublist in list(dependencies[entity_type].values()) for item in sublist]:
if entity_name in processed[entity_type]:
continue
processed[entity_type].append(entity_name)
entity = self.parent.get_entity(entity_type, entity_name)
if entity:
entity.replace_data_fragment(f"""{re.escape(old_name)}""", f"""{new_name}""",api_update)
# entity.replace_data_fragment(f"'{old_name}'", f"'{new_name}'",api_update)
logger.info(f"Modifed {entity_type} {entity_name}")
def delete(self, do_check = True):
depended = self.get_depended()
if do_check and depended['len']>0:
logger.warning(f"Can't delete {self.entity_type} {self.name}: it used in {depended}")
else:
self.service.execute(getattr(self.gtmservice.accounts().containers().workspaces(), self.entity_type)().delete(**{**{'path':self.path},**self.path_additional_params}))
self.parent.delete(self.entity_type, self.name)
def get_depended(self):
return self.parent.get_depended(self.entity_type, self.dependency_check_id, self.depended_checks)
def get_template_name(self):
for param in self.parameter:
if param["type"].lower() == "template" and param["key"] == "name":
return param["value"]
def get_param(self, param_key, param_type='template'):
for param in self.parameter:
if param["type"] == param_type and param["key"] == param_key:
return param["list"] if param_type == 'list' else param["value"]
def get_custom_params(self):
customParams = self.get_param('customParams','list')
params = []
if customParams and len(customParams)>0:
customParams = [param['map'] for param in customParams]
for param in customParams:
key = False
value = False
for p in param:
if p['key'] == 'key':
key = p['value']
if p['key'] == 'value':
value = p['value']
if key and value:
params.append({key:value})
return params
def get_template_param(self, param_name):
if self.parameter:
for param in self.parameter:
if param["type"].lower() == "template" and param["key"] == param_name:
return param["value"]
def set_folder_id(self, folder_id):
self.data['parentFolderId']=folder_id
def set_type(self, type):
self.data['type'] = type
self.type = type
def get_id(self):
return self.data[self.id_name] | 0.231527 | 0.135032 |
import logging
import os
import shutil
import subprocess
from typing import AnyStr
from typing import Dict, Any, List
class FilterModule(object):
"""
FilterModule is an abstraction responsible for providing Golang Template and Sprig Function functionality through
an Ansible Playbook Filter. This class will contain functionality that attempts to directly mimic existing Golang
template functionality to provide a 1:1 conversion when possible.
"""
# Filter name constants
CONTAINS_KEY: str = "contains"
INVOKE_GO_TF__KEY: str = "invoke_go_tf"
TRIM__KEY: str = "trim"
# Other constants
ANSIBLE_ARGUMENT_ONE: int = 0
ANSIBLE_ARGUMENT_TWO: int = 1
ANSIBLE_ARGUMENT_THREE: int = 2
DEFAULT_ENCODING: str = "UTF-8"
GO_BINARY_NAME: str = "go"
GO_RUN_KEYWORD: str = "run"
SPRIG_CONDUIT_FILENAME: str = "main.go"
SPRIG_CONDUIT_PATH: AnyStr = os.path.join(os.path.dirname(__file__), SPRIG_CONDUIT_FILENAME)
ZERO_ARGS: int = 0
def __init__(self):
super(FilterModule, self).__init__()
# set up some defaults
self._log = self._setup_logging()
self._go_binary = self._establish_go_binary()
def _setup_logging(self) -> logging.Logger:
"""
Sets up a logging.Logger equipped with a logging.StreamHandler, which will output log messages to the console.
:return: A preconfigured logging.Logger
"""
self._log = logging.getLogger(__name__)
self._log.setLevel(logging.INFO)
self._log.addHandler(logging.StreamHandler())
return self._log
def _establish_go_binary(self):
"""
Establish the go binary location within the system $PATH
:return: the file path location of the go binary
"""
go_binary = shutil.which(FilterModule.GO_BINARY_NAME)
if go_binary is None:
self._log.error("Couldn't locate go binary in $PATH")
self._log.debug("Found go binary: %s", go_binary)
return go_binary
def filters(self) -> Dict[str, Any]:
"""
Returns a list of exposed filters to the Ansible runtime. Since Ansible provides no base class abstraction,
this method is assumed to be present, though it is not contractually obligated.
:return: a dict with keys that are the filter keywords and values are the filter implementation
"""
# Uncomment those which make sense.
return {
# FilterModule.CONTAINS_KEY: self.contains,
FilterModule.INVOKE_GO_TF__KEY: self.invoke_go_tf,
# FilterModule.TRIM__KEY: self.trim,
}
def trim(self, *args) -> str:
"""
A conduit between Ansible Filter and Go Sprig's "trim" function. This invokes a Go subprocess with the
corresponding arguments and return the raw string output.
To enable, see "filters" function.
:param args: Ansible Playbook arguments
:return: subprocess output after rendering the Go template
"""
return self._generic_wrapped_tf(FilterModule.TRIM__KEY, args)
def contains(self, *args) -> str:
"""
A conduit between Ansible Filter and Go Sprig's "contains" function. This invokes a Go subprocess with the
corresponding arguments and return the raw string output.
To enable, see "filters" function.
:param args: Ansible Playbook arguments
:return: subprocess output after rendering the Go template
"""
return self._generic_wrapped_tf(FilterModule.CONTAINS_KEY, args)
@staticmethod
def _first_argument_exists(args: [str]) -> bool:
"""
Verifies that the first Ansible Argument exists. Since Ansible Filters are invoked by piping, the first
argument should always exist from this context; this is considered a sanity check.
:param args: Ansible Playbook Filter arguments
:return: whether the first argument exists
"""
return len(args) > FilterModule.ZERO_ARGS
def _generic_wrapped_tf(self, func_name: str, *args: [str]) -> str:
"""
Provides an easy way to invoke Go templating for any generic Go/Sprig Template Function. See "trim" and
"contains" for examples.
:param func_name: the name of the go template function
:param args: the Ansible Playbook Filter arguments
:return: the output after rendering the Go template
"""
# The first argument is guaranteed to exist. Check anyway; weirder things have happened.
if not FilterModule._first_argument_exists(args=args):
self._log.error("Ansible Playbook should guarantee at least one argument; check the input playbook")
# re-arrange arguments to expected format
arguments = args[FilterModule.ANSIBLE_ARGUMENT_ONE]
list_arguments = list(arguments)
augmented_args = list()
augmented_args.append(list_arguments[FilterModule.ANSIBLE_ARGUMENT_ONE])
augmented_args.append(func_name)
for i in range(FilterModule.ANSIBLE_ARGUMENT_TWO, len(list_arguments)):
augmented_args.append(list_arguments[i])
return self.invoke_go_tf(*augmented_args)
def invoke_go_tf(self, *args):
"""
invokes a generic go template function.
:param args: Ansible Playbook Filter arguments
:return: the output after rendering the Go template
"""
return self._invoke(args)
@staticmethod
def _form_system_call(go_binary: str, func_name: str, first_arg: str, other_args: List[str]) -> List[str]:
"""
Form the system call used to invoke the Go template shim.
:param go_binary: the filename of the go binary
:param func_name: the name of the sprig/Go template function
:param first_arg: the first argument (the one before the pipe in Ansible filter)
:param other_args: all other arguments to the Sprig function
:return: the raw command array which can be passed to subprocess.Popen.
"""
# re-arrange applicable arguments
sys_call_list = list()
sys_call_list.append(go_binary)
sys_call_list.append(FilterModule.GO_RUN_KEYWORD)
sys_call_list.append(FilterModule.SPRIG_CONDUIT_PATH)
sys_call_list.append(func_name)
# only shim in the first argument if it is not empty
if first_arg != "":
sys_call_list.append(first_arg)
for arg in other_args:
sys_call_list.append(str(arg))
return sys_call_list
def _invoke(self, args):
"""
Invokes the Go subprocess with applicable arguments in order to resolve sprig function I/O.
:param args: Ansible Playbook Filter arguments.
:return: the output after rendering the Go template
"""
self._log.info("_invoke%s", args)
first_arg = args[FilterModule.ANSIBLE_ARGUMENT_ONE]
func_name = args[FilterModule.ANSIBLE_ARGUMENT_TWO]
other_args = args[FilterModule.ANSIBLE_ARGUMENT_THREE:]
sys_call_list = FilterModule._form_system_call(self._go_binary, func_name, first_arg, other_args)
self._log.info("Go System Call: %s", " ".join(sys_call_list))
process = subprocess.Popen(sys_call_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_bytes, stderr_bytes = process.communicate()
if stderr_bytes is not None and stderr_bytes.decode(FilterModule.DEFAULT_ENCODING) != "":
self._log.error("Go invocation attempt failed! stderr: %s",
stderr_bytes.decode(FilterModule.DEFAULT_ENCODING))
stdout = stdout_bytes.decode(FilterModule.DEFAULT_ENCODING)
return stdout | internal/filters/invoke_go_tf.py | import logging
import os
import shutil
import subprocess
from typing import AnyStr
from typing import Dict, Any, List
class FilterModule(object):
"""
FilterModule is an abstraction responsible for providing Golang Template and Sprig Function functionality through
an Ansible Playbook Filter. This class will contain functionality that attempts to directly mimic existing Golang
template functionality to provide a 1:1 conversion when possible.
"""
# Filter name constants
CONTAINS_KEY: str = "contains"
INVOKE_GO_TF__KEY: str = "invoke_go_tf"
TRIM__KEY: str = "trim"
# Other constants
ANSIBLE_ARGUMENT_ONE: int = 0
ANSIBLE_ARGUMENT_TWO: int = 1
ANSIBLE_ARGUMENT_THREE: int = 2
DEFAULT_ENCODING: str = "UTF-8"
GO_BINARY_NAME: str = "go"
GO_RUN_KEYWORD: str = "run"
SPRIG_CONDUIT_FILENAME: str = "main.go"
SPRIG_CONDUIT_PATH: AnyStr = os.path.join(os.path.dirname(__file__), SPRIG_CONDUIT_FILENAME)
ZERO_ARGS: int = 0
def __init__(self):
super(FilterModule, self).__init__()
# set up some defaults
self._log = self._setup_logging()
self._go_binary = self._establish_go_binary()
def _setup_logging(self) -> logging.Logger:
"""
Sets up a logging.Logger equipped with a logging.StreamHandler, which will output log messages to the console.
:return: A preconfigured logging.Logger
"""
self._log = logging.getLogger(__name__)
self._log.setLevel(logging.INFO)
self._log.addHandler(logging.StreamHandler())
return self._log
def _establish_go_binary(self):
"""
Establish the go binary location within the system $PATH
:return: the file path location of the go binary
"""
go_binary = shutil.which(FilterModule.GO_BINARY_NAME)
if go_binary is None:
self._log.error("Couldn't locate go binary in $PATH")
self._log.debug("Found go binary: %s", go_binary)
return go_binary
def filters(self) -> Dict[str, Any]:
"""
Returns a list of exposed filters to the Ansible runtime. Since Ansible provides no base class abstraction,
this method is assumed to be present, though it is not contractually obligated.
:return: a dict with keys that are the filter keywords and values are the filter implementation
"""
# Uncomment those which make sense.
return {
# FilterModule.CONTAINS_KEY: self.contains,
FilterModule.INVOKE_GO_TF__KEY: self.invoke_go_tf,
# FilterModule.TRIM__KEY: self.trim,
}
def trim(self, *args) -> str:
"""
A conduit between Ansible Filter and Go Sprig's "trim" function. This invokes a Go subprocess with the
corresponding arguments and return the raw string output.
To enable, see "filters" function.
:param args: Ansible Playbook arguments
:return: subprocess output after rendering the Go template
"""
return self._generic_wrapped_tf(FilterModule.TRIM__KEY, args)
def contains(self, *args) -> str:
"""
A conduit between Ansible Filter and Go Sprig's "contains" function. This invokes a Go subprocess with the
corresponding arguments and return the raw string output.
To enable, see "filters" function.
:param args: Ansible Playbook arguments
:return: subprocess output after rendering the Go template
"""
return self._generic_wrapped_tf(FilterModule.CONTAINS_KEY, args)
@staticmethod
def _first_argument_exists(args: [str]) -> bool:
"""
Verifies that the first Ansible Argument exists. Since Ansible Filters are invoked by piping, the first
argument should always exist from this context; this is considered a sanity check.
:param args: Ansible Playbook Filter arguments
:return: whether the first argument exists
"""
return len(args) > FilterModule.ZERO_ARGS
def _generic_wrapped_tf(self, func_name: str, *args: [str]) -> str:
"""
Provides an easy way to invoke Go templating for any generic Go/Sprig Template Function. See "trim" and
"contains" for examples.
:param func_name: the name of the go template function
:param args: the Ansible Playbook Filter arguments
:return: the output after rendering the Go template
"""
# The first argument is guaranteed to exist. Check anyway; weirder things have happened.
if not FilterModule._first_argument_exists(args=args):
self._log.error("Ansible Playbook should guarantee at least one argument; check the input playbook")
# re-arrange arguments to expected format
arguments = args[FilterModule.ANSIBLE_ARGUMENT_ONE]
list_arguments = list(arguments)
augmented_args = list()
augmented_args.append(list_arguments[FilterModule.ANSIBLE_ARGUMENT_ONE])
augmented_args.append(func_name)
for i in range(FilterModule.ANSIBLE_ARGUMENT_TWO, len(list_arguments)):
augmented_args.append(list_arguments[i])
return self.invoke_go_tf(*augmented_args)
def invoke_go_tf(self, *args):
"""
invokes a generic go template function.
:param args: Ansible Playbook Filter arguments
:return: the output after rendering the Go template
"""
return self._invoke(args)
@staticmethod
def _form_system_call(go_binary: str, func_name: str, first_arg: str, other_args: List[str]) -> List[str]:
"""
Form the system call used to invoke the Go template shim.
:param go_binary: the filename of the go binary
:param func_name: the name of the sprig/Go template function
:param first_arg: the first argument (the one before the pipe in Ansible filter)
:param other_args: all other arguments to the Sprig function
:return: the raw command array which can be passed to subprocess.Popen.
"""
# re-arrange applicable arguments
sys_call_list = list()
sys_call_list.append(go_binary)
sys_call_list.append(FilterModule.GO_RUN_KEYWORD)
sys_call_list.append(FilterModule.SPRIG_CONDUIT_PATH)
sys_call_list.append(func_name)
# only shim in the first argument if it is not empty
if first_arg != "":
sys_call_list.append(first_arg)
for arg in other_args:
sys_call_list.append(str(arg))
return sys_call_list
def _invoke(self, args):
"""
Invokes the Go subprocess with applicable arguments in order to resolve sprig function I/O.
:param args: Ansible Playbook Filter arguments.
:return: the output after rendering the Go template
"""
self._log.info("_invoke%s", args)
first_arg = args[FilterModule.ANSIBLE_ARGUMENT_ONE]
func_name = args[FilterModule.ANSIBLE_ARGUMENT_TWO]
other_args = args[FilterModule.ANSIBLE_ARGUMENT_THREE:]
sys_call_list = FilterModule._form_system_call(self._go_binary, func_name, first_arg, other_args)
self._log.info("Go System Call: %s", " ".join(sys_call_list))
process = subprocess.Popen(sys_call_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_bytes, stderr_bytes = process.communicate()
if stderr_bytes is not None and stderr_bytes.decode(FilterModule.DEFAULT_ENCODING) != "":
self._log.error("Go invocation attempt failed! stderr: %s",
stderr_bytes.decode(FilterModule.DEFAULT_ENCODING))
stdout = stdout_bytes.decode(FilterModule.DEFAULT_ENCODING)
return stdout | 0.756178 | 0.269236 |
from django.conf import settings
from collections import OrderedDict
try:
from django.core.cache import caches
except ImportError:
from django.core.cache import get_cache as caches
def check(request):
all_stats = []
for alias in settings.CACHES:
server_stats = []
if is_memcached_profile(alias):
cache_backend = get_cache(alias)
for server, stats in cache_backend._cache.get_stats():
stats = debyteify(stats)
result = OrderedDict()
result['name'] = debyteify(server)
result['summary'] = get_summary(stats)
result['details'] = stats
server_stats.append(result)
all_stats.append(dict(alias=alias, locations=server_stats))
return all_stats
def get_summary(stats):
return {
'load': get_width_ratio(stats['bytes'], stats['limit_maxbytes']),
'miss_ratio': get_width_ratio(stats['get_misses'], stats['cmd_get'])}
def get_width_ratio(value, max_value, max_width=100):
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
except ZeroDivisionError:
return 0
except (ValueError, TypeError, OverflowError):
return ''
return ratio
def debyteify(input):
if isinstance(input, dict):
return {debyteify(key): debyteify(value)
for key, value in input.items()}
elif isinstance(input, list):
return [debyteify(element) for element in input]
elif isinstance(input, bytes):
return input.decode('utf-8')
else:
return input
def get_cache(cache_name):
if hasattr(caches, '__call__'):
return caches(cache_name)
return caches[cache_name]
def is_memcached_profile(cache_profile):
backends = ['django.core.cache.backends.memcached.MemcachedCache',
'django.core.cache.backends.memcached.PyLibMCCache']
return any(
[settings.CACHES[cache_profile]['BACKEND'] == b for b in backends]) | src/heartbeat/checkers/memcached_status.py | from django.conf import settings
from collections import OrderedDict
try:
from django.core.cache import caches
except ImportError:
from django.core.cache import get_cache as caches
def check(request):
all_stats = []
for alias in settings.CACHES:
server_stats = []
if is_memcached_profile(alias):
cache_backend = get_cache(alias)
for server, stats in cache_backend._cache.get_stats():
stats = debyteify(stats)
result = OrderedDict()
result['name'] = debyteify(server)
result['summary'] = get_summary(stats)
result['details'] = stats
server_stats.append(result)
all_stats.append(dict(alias=alias, locations=server_stats))
return all_stats
def get_summary(stats):
return {
'load': get_width_ratio(stats['bytes'], stats['limit_maxbytes']),
'miss_ratio': get_width_ratio(stats['get_misses'], stats['cmd_get'])}
def get_width_ratio(value, max_value, max_width=100):
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
except ZeroDivisionError:
return 0
except (ValueError, TypeError, OverflowError):
return ''
return ratio
def debyteify(input):
if isinstance(input, dict):
return {debyteify(key): debyteify(value)
for key, value in input.items()}
elif isinstance(input, list):
return [debyteify(element) for element in input]
elif isinstance(input, bytes):
return input.decode('utf-8')
else:
return input
def get_cache(cache_name):
if hasattr(caches, '__call__'):
return caches(cache_name)
return caches[cache_name]
def is_memcached_profile(cache_profile):
backends = ['django.core.cache.backends.memcached.MemcachedCache',
'django.core.cache.backends.memcached.PyLibMCCache']
return any(
[settings.CACHES[cache_profile]['BACKEND'] == b for b in backends]) | 0.486575 | 0.138812 |
import collections
import sys
from chainer.backends import cuda
from chainer import function_hook
try:
MemoryHook = cuda.cupy.cuda.memory_hook.MemoryHook
memory_hook_available = True
except Exception as e:
_resolution_error = e
MemoryHook = object
memory_hook_available = False
class CupyMemoryProfileHook(function_hook.FunctionHook):
"""Function hook for measuring memory usage of functions in cupy memory pool.
Example:
Code example::
from chainer.function_hooks import CupyMemoryProfileHook
hook = CupyMemoryProfileHook()
with hook:
trainer.run()
hook.print_report()
Output example::
FunctionName UsedBytes AcquiredBytes Occurrence
LinearFunction 5.16GB 179.98MB 3900
ReLU 991.82MB 458.97MB 2600
SoftmaxCrossEntropy 7.71MB 5.08MB 1300
Accuracy 617.97KB 351.00KB 700
where *FunctionName* is the name of function that calls the hook, and
*UsedBytes* is the memory bytes the function used from cupy memory
pool, and *AcquiredBytes* is the actual memory bytes the cupy memory
pool acquired from GPU device on the function call, and *Occurrence*
is the number of calls.
Attributes:
call_history: List of measurement results. It consists of the name of
the function that calls this hook, the memory bytes the function
used from cupy memory pool, and the memory bytes the cupy memory
pool acquired from GPU device on the function call.
"""
name = 'CupyMemoryProfileHook'
def __init__(self):
cuda.check_cuda_available()
if not memory_hook_available:
msg = 'CuPy >= 2.0 is required. %s' % str(_resolution_error)
raise RuntimeError(msg)
self.call_history = []
self._memory_hook = CupyMemoryCumulativeHook()
self._running_stack = []
self._total_used_bytes = 0
self._total_acquired_bytes = 0
def added(self, function=None):
self._memory_hook.__enter__()
def deleted(self, function=None):
self._memory_hook.__exit__()
def _preprocess(self):
start_used_bytes = self._memory_hook.used_bytes
start_acquired_bytes = self._memory_hook.acquired_bytes
self._running_stack.append((start_used_bytes, start_acquired_bytes))
def forward_preprocess(self, function, in_data):
self._preprocess()
def backward_preprocess(self, function, in_data, out_grad):
self._preprocess()
def _postprocess(self, function):
start_used_bytes, start_acquired_bytes = self._running_stack.pop()
end_used_bytes = self._memory_hook.used_bytes
end_acquired_bytes = self._memory_hook.acquired_bytes
used_bytes = end_used_bytes - start_used_bytes
acquired_bytes = end_acquired_bytes - start_acquired_bytes
depth = len(self._running_stack)
self.call_history.append(
(function._impl_name, used_bytes, acquired_bytes, depth))
if depth == 0:
self._total_used_bytes += used_bytes
self._total_acquired_bytes += acquired_bytes
def forward_postprocess(self, function, in_data):
self._postprocess(function)
def backward_postprocess(self, function, in_data, out_grad):
self._postprocess(function)
def total_used_bytes(self):
"""Returns total bytes that functions used from cupy memory pool."""
return self._total_used_bytes
def total_acquired_bytes(self):
"""Returns total bytes that cupy memory pool acquired from GPU."""
return self._total_acquired_bytes
def summary(self):
"""Returns a summary of memory profiling in functions.
Returns:
A summarized dictionary whose keys are function names and
values are dictionaries of
``used_bytes``, ``acquired_bytes``, and ``occurrrence``.
"""
# TODO(sonots): PROBLEM: takes count of nested functions duplicately
summary = collections.OrderedDict()
for func_name, used_bytes, acquired_bytes, depth in self.call_history:
if func_name not in summary:
summary[func_name] = {'used_bytes': 0,
'acquired_bytes': 0, 'occurrence': 0}
record = summary[func_name]
record['used_bytes'] += used_bytes
record['acquired_bytes'] += acquired_bytes
record['occurrence'] += 1
return summary
def _humanized_size(self, size):
"""Returns a human redable bytes string."""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E']:
if size < 1024.0:
return '%3.2f%sB' % (size, unit)
size /= 1024.0
return '%.2f%sB' % (size, 'Z')
def print_report(self, file=sys.stdout):
"""Prints a summary report of memory profiling in functions."""
entries = [[
'FunctionName', 'UsedBytes', 'AcquiredBytes', 'Occurrence']]
for function_name, record in self.summary().items():
used_bytes = self._humanized_size(record['used_bytes'])
acquired_bytes = self._humanized_size(record['acquired_bytes'])
occurrence = str(record['occurrence'])
entries.append(
[function_name, used_bytes, acquired_bytes, occurrence])
entry_widths = []
entry_widths.append(max(len(f) for f, _, _, _ in entries))
entry_widths.append(max(len(u) for _, u, _, _ in entries))
entry_widths.append(max(len(a) for _, _, a, _ in entries))
entry_widths.append(max(len(o) for _, _, _, o in entries))
template = ' '.join('{:>%d}' % w for w in entry_widths)
for function_name, used_bytes, acquired_bytes, occurrence in entries:
line = template.format(
function_name, used_bytes, acquired_bytes, occurrence)
file.write(line)
file.write('\n')
file.flush()
class CupyMemoryCumulativeHook(MemoryHook):
"""A simple memory hook for cupy measuring memory usage cumulatively.
Attributes:
used_bytes (int): cumulative bytes that application used from cupy
memory pool.
acquired_bytes (int): cumulative bytes that cupy memory pool acquired
from GPU device.
"""
name = 'CupyMemoryCumulativeHook'
def __init__(self):
self.used_bytes = 0
self.acquired_bytes = 0
def alloc_preprocess(self, **kwargs):
self.acquired_bytes += kwargs['mem_size']
def malloc_preprocess(self, **kwargs):
self.used_bytes += kwargs['mem_size'] | chainer/function_hooks/cupy_memory_profile.py | import collections
import sys
from chainer.backends import cuda
from chainer import function_hook
try:
MemoryHook = cuda.cupy.cuda.memory_hook.MemoryHook
memory_hook_available = True
except Exception as e:
_resolution_error = e
MemoryHook = object
memory_hook_available = False
class CupyMemoryProfileHook(function_hook.FunctionHook):
"""Function hook for measuring memory usage of functions in cupy memory pool.
Example:
Code example::
from chainer.function_hooks import CupyMemoryProfileHook
hook = CupyMemoryProfileHook()
with hook:
trainer.run()
hook.print_report()
Output example::
FunctionName UsedBytes AcquiredBytes Occurrence
LinearFunction 5.16GB 179.98MB 3900
ReLU 991.82MB 458.97MB 2600
SoftmaxCrossEntropy 7.71MB 5.08MB 1300
Accuracy 617.97KB 351.00KB 700
where *FunctionName* is the name of function that calls the hook, and
*UsedBytes* is the memory bytes the function used from cupy memory
pool, and *AcquiredBytes* is the actual memory bytes the cupy memory
pool acquired from GPU device on the function call, and *Occurrence*
is the number of calls.
Attributes:
call_history: List of measurement results. It consists of the name of
the function that calls this hook, the memory bytes the function
used from cupy memory pool, and the memory bytes the cupy memory
pool acquired from GPU device on the function call.
"""
name = 'CupyMemoryProfileHook'
def __init__(self):
cuda.check_cuda_available()
if not memory_hook_available:
msg = 'CuPy >= 2.0 is required. %s' % str(_resolution_error)
raise RuntimeError(msg)
self.call_history = []
self._memory_hook = CupyMemoryCumulativeHook()
self._running_stack = []
self._total_used_bytes = 0
self._total_acquired_bytes = 0
def added(self, function=None):
self._memory_hook.__enter__()
def deleted(self, function=None):
self._memory_hook.__exit__()
def _preprocess(self):
start_used_bytes = self._memory_hook.used_bytes
start_acquired_bytes = self._memory_hook.acquired_bytes
self._running_stack.append((start_used_bytes, start_acquired_bytes))
def forward_preprocess(self, function, in_data):
self._preprocess()
def backward_preprocess(self, function, in_data, out_grad):
self._preprocess()
def _postprocess(self, function):
start_used_bytes, start_acquired_bytes = self._running_stack.pop()
end_used_bytes = self._memory_hook.used_bytes
end_acquired_bytes = self._memory_hook.acquired_bytes
used_bytes = end_used_bytes - start_used_bytes
acquired_bytes = end_acquired_bytes - start_acquired_bytes
depth = len(self._running_stack)
self.call_history.append(
(function._impl_name, used_bytes, acquired_bytes, depth))
if depth == 0:
self._total_used_bytes += used_bytes
self._total_acquired_bytes += acquired_bytes
def forward_postprocess(self, function, in_data):
self._postprocess(function)
def backward_postprocess(self, function, in_data, out_grad):
self._postprocess(function)
def total_used_bytes(self):
"""Returns total bytes that functions used from cupy memory pool."""
return self._total_used_bytes
def total_acquired_bytes(self):
"""Returns total bytes that cupy memory pool acquired from GPU."""
return self._total_acquired_bytes
def summary(self):
"""Returns a summary of memory profiling in functions.
Returns:
A summarized dictionary whose keys are function names and
values are dictionaries of
``used_bytes``, ``acquired_bytes``, and ``occurrrence``.
"""
# TODO(sonots): PROBLEM: takes count of nested functions duplicately
summary = collections.OrderedDict()
for func_name, used_bytes, acquired_bytes, depth in self.call_history:
if func_name not in summary:
summary[func_name] = {'used_bytes': 0,
'acquired_bytes': 0, 'occurrence': 0}
record = summary[func_name]
record['used_bytes'] += used_bytes
record['acquired_bytes'] += acquired_bytes
record['occurrence'] += 1
return summary
def _humanized_size(self, size):
"""Returns a human redable bytes string."""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E']:
if size < 1024.0:
return '%3.2f%sB' % (size, unit)
size /= 1024.0
return '%.2f%sB' % (size, 'Z')
def print_report(self, file=sys.stdout):
"""Prints a summary report of memory profiling in functions."""
entries = [[
'FunctionName', 'UsedBytes', 'AcquiredBytes', 'Occurrence']]
for function_name, record in self.summary().items():
used_bytes = self._humanized_size(record['used_bytes'])
acquired_bytes = self._humanized_size(record['acquired_bytes'])
occurrence = str(record['occurrence'])
entries.append(
[function_name, used_bytes, acquired_bytes, occurrence])
entry_widths = []
entry_widths.append(max(len(f) for f, _, _, _ in entries))
entry_widths.append(max(len(u) for _, u, _, _ in entries))
entry_widths.append(max(len(a) for _, _, a, _ in entries))
entry_widths.append(max(len(o) for _, _, _, o in entries))
template = ' '.join('{:>%d}' % w for w in entry_widths)
for function_name, used_bytes, acquired_bytes, occurrence in entries:
line = template.format(
function_name, used_bytes, acquired_bytes, occurrence)
file.write(line)
file.write('\n')
file.flush()
class CupyMemoryCumulativeHook(MemoryHook):
"""A simple memory hook for cupy measuring memory usage cumulatively.
Attributes:
used_bytes (int): cumulative bytes that application used from cupy
memory pool.
acquired_bytes (int): cumulative bytes that cupy memory pool acquired
from GPU device.
"""
name = 'CupyMemoryCumulativeHook'
def __init__(self):
self.used_bytes = 0
self.acquired_bytes = 0
def alloc_preprocess(self, **kwargs):
self.acquired_bytes += kwargs['mem_size']
def malloc_preprocess(self, **kwargs):
self.used_bytes += kwargs['mem_size'] | 0.602529 | 0.32461 |
import datetime
import unittest
from builtins import RuntimeError
from unittest import mock
from unittest.mock import call
import freezegun
import mongomock
from bson import ObjectId
from pymongo import MongoClient
from pymongo_odm.document import Document
from pymongo_odm.helpers import Map
from pymongo_odm.helpers.type_hints import Datetime
from test.helpers import random_dict, random_str, random_datetime, random_int
class FakeDoc(Document):
db = 'test'
collection = 'fake_doc'
def for_json(self) -> dict:
pass
class TestDocument(unittest.TestCase):
DOCUMENT_MODULE_PATH = 'pymongo_odm'
DOCUMENT_PATH = f'{DOCUMENT_MODULE_PATH}.Document'
_SET_PATH = f'{DOCUMENT_PATH}._set'
_UPDATE_DB_CACHE_PATH = f'{DOCUMENT_PATH}._update_db_cache'
_GET_DIFF_PATH = f'{DOCUMENT_PATH}._get_diff'
_INSERT_PATH = f'{DOCUMENT_PATH}._insert'
_UPDATE_PATH = f'{DOCUMENT_PATH}._update'
_DEFAULT_PRE_INSERT_PATH = f'{DOCUMENT_PATH}._default_pre_insert'
PRE_INSERT_PATH = f'{DOCUMENT_PATH}.pre_insert'
POST_INSERT_PATH = f'{DOCUMENT_PATH}.post_insert'
_DEFAULT_PRE_UPDATE_PATH = f'{DOCUMENT_PATH}._default_pre_update'
PRE_UPDATE_PATH = f'{DOCUMENT_PATH}.pre_update'
POST_UPDATE_PATH = f'{DOCUMENT_PATH}.post_update'
def setUp(self):
super().setUp()
FakeDoc.client = mongomock.MongoClient()
def test_cannot_instantiate_the_base_document(self):
"""
Test Method:
"""
# Given
# I expect it to raise
with self.assertRaises(NotImplementedError):
# When I try to instantiate
doc = Document()
def test_get_db_should_return_the_db_object(self):
"""
Test Method:
"""
# Given I've got a doc model
# When I call
result = FakeDoc.get_db()
# Then I expect
self.assertIsInstance(result, mongomock.Database)
self.assertEqual(FakeDoc.db, result.name)
def test_get_collection_should_return_the_collection_object(self):
"""
Test Method:
"""
# Given I've got a doc model
# When I call
result = FakeDoc.get_collection()
# Then I expect
self.assertIsInstance(result, mongomock.Collection)
self.assertEqual(FakeDoc.collection, result.name)
def test_create_collection_should_skip_when_collection_exists(self):
"""
Test Method:
"""
# Given I know a collection exists
FakeDoc().save() # Saves in a collection
# When I call
FakeDoc.create_collection()
# Then I expect
self.assertEqual(1, FakeDoc.count())
def test_create_collection_should_skip_when_collection_does_not_have_a_capped_attr(self):
"""
Test Method:
"""
# Given I know a we dont have a given collection
FakeDoc.get_collection().drop()
# When I call
FakeDoc.create_collection()
# Then I expect
self.assertFalse(FakeDoc.collection in FakeDoc.get_db().list_collection_names())
def test_create_collection_should_create_when_collection_has_a_capped_attr(self):
"""
Test Method:
"""
FakeDoc.client = MongoClient()
# Given I know a we dont have a given collection
FakeDoc.get_collection().drop()
# And I know
FakeDoc.capped = {
'size': random_int(),
'max': random_int()
}
# When I call
FakeDoc.create_collection()
# Then I expect
self.assertTrue(FakeDoc.collection in FakeDoc.get_db().list_collection_names())
self.assertTrue(FakeDoc.get_collection().options().get('capped', False))
FakeDoc.capped = None
FakeDoc.get_collection().drop()
def test__update_db_cache_should_copy_data_to_data_in_db(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know it has some data that is not stored in db (and not cached as stored in db)
doc._data = Map({random_str(): random_str() for _ in range(5)})
# When I call
doc._update_db_cache()
# Then I expect
self.assertEqual(doc._data, doc._data_in_db)
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_SET_PATH)
def test__load_args_should_set_given_data_and_not_update_cache_when_doc_is_not_created(self, mock__set,
mock__update_db_cache):
"""
Test Method:
"""
# Given I've got a new doc model
doc = FakeDoc()
# And I've got a a raw doc
raw_doc = {'_id': random_str(), 'created': random_datetime(), 'modified': random_datetime()}
# And I don't won't to treat it as a document that exists in db yet
is_created = False
# When I call
doc._load_args(raw_doc, is_created)
# Then I expect
calls = [call(key, raw_doc[key]) for key in raw_doc]
mock__set.assert_has_calls(calls)
mock__update_db_cache.assert_not_called()
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_SET_PATH)
def test__load_args_should_raise_when_is_created_is_true_but_not_given_an__id(self, mock__set,
mock__update_db_cache):
"""
Test Method:
"""
# Given I've got a new doc model
doc = FakeDoc()
# And I've got a a raw doc
raw_doc = {'created': random_datetime(), 'modified': random_datetime()}
# And I want to set the doc as created in db
is_created = True
# I expect it to raise
with self.assertRaises(ValueError):
# When I call
doc._load_args(raw_doc, is_created)
# Then I expect
calls = [call(key, raw_doc[key]) for key in raw_doc]
mock__set.assert_has_calls(calls)
mock__update_db_cache.assert_not_called()
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_SET_PATH)
def test__load_args_should_set_given_data_and_update_cache_when_doc_is_created(self, mock__set,
mock__update_db_cache):
"""
Test Method:
"""
# Given I've got a new doc model
doc = FakeDoc()
# And I've got a a raw doc
raw_doc = {'_id': random_str(), 'created': random_datetime(), 'modified': random_datetime()}
# And I want to set the doc as created in db
is_created = True
# When I call
doc._load_args(raw_doc, is_created)
# Then I expect
calls = [call(key, raw_doc[key]) for key in raw_doc]
mock__set.assert_has_calls(calls)
mock__update_db_cache.assert_called_once()
def test__set_should_do_nothing_when_given_value_matches_the_existing_one(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know the doc has some data
key = random_str()
value = random_str()
doc._data[key] = value
# When I call
doc._set(key, value)
# Then I expect
self.assertEqual(value, doc._data[key])
def test__set_should_update_value_when_is_different_than_exiting_value(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know the doc has some data
key = random_str()
value = random_str()
doc._data[key] = value
new_value = random_str()
# When I call
doc._set(key, new_value)
# Then I expect
self.assertEqual(new_value, doc._data[key])
def test__set_should_add_key_and_value_when_key_does_not_exist_yet(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know the doc has no data
doc._data.clear()
# And I've got
key = random_str()
value = random_str()
# When I call
doc._set(key, value)
# Then I expect
self.assertEqual(value, doc._data[key])
def test__get_diff_should_return_empty_dict_when_data_and_data_in_db_are_equal(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know that the doc's data in memory is equal to the data in db
data = random_dict(20)
doc._data = data.copy()
doc._data_in_db = data.copy()
# When I call
diff = doc._get_diff()
# Then I expect
self.assertEqual({}, diff)
def test__get_diff_should_return_the_difference_between_data_and_data_in_db(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know that the doc's data in memory is not equal to the data in db
data = random_dict(20)
data_in_db = random_dict(13)
doc._data = data.copy()
doc._data_in_db = data_in_db.copy()
# When I call
diff = doc._get_diff()
# Then I expect
self.assertEqual(data, diff)
def test_saving_a_new_doc_should_add_created_and_modified_properties(self):
"""
Test Method: INTEGRATION
"""
# Given I've got a new doc instance
doc = FakeDoc()
# When I call
doc.save()
# Then I expect
self.assertIsInstance(doc.created, Datetime)
self.assertIsInstance(doc.modified, Datetime)
self.assertIsInstance(doc.id, ObjectId)
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_UPDATE_PATH)
@mock.patch(_INSERT_PATH)
def test_saving_a_new_doc_should_insert(self, mock__insert, mock__update, mock__update_db_cache):
"""
Test Method:
"""
# Given I've got a new doc instance
doc = FakeDoc()
# When I call
result = doc.save()
# Then I expect
mock__insert.assert_called_once_with()
mock__update.assert_not_called()
mock__update_db_cache.assert_called_once()
self.assertIs(doc, result)
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_UPDATE_PATH)
@mock.patch(_INSERT_PATH)
def test_saving_an_existing_doc_should_raise_because_it_is_not_supported(self, mock__insert,
mock__update, mock__update_db_cache):
"""
Test Method:
"""
# Given I've got an existing doc instance
doc = FakeDoc()
doc._data_in_db._id = random_str()
# I expect it to raise
with self.assertRaises(RuntimeError):
# When I call
result = doc.save()
# Then I expect
mock__insert.assert_not_called()
mock__update_db_cache.assert_not_called()
@freezegun.freeze_time('1999-11-11')
@mock.patch(POST_INSERT_PATH)
@mock.patch(_GET_DIFF_PATH)
@mock.patch(PRE_INSERT_PATH)
@mock.patch(_DEFAULT_PRE_INSERT_PATH)
def test__insert_should_call_lifecycle_hooks_and_insert(self, mock__default_pre_insert, mock_pre_insert,
mock__get_diff, mock_post_insert):
"""
Test Method:
"""
# Given I've got a new Doc
doc = FakeDoc()
# And I know the new values (changes)
date = datetime.datetime.utcnow()
doc.created = date
doc.modified = date
changes = {'created': date, 'modified': date}
mock__get_diff.return_value = changes
# When I call
doc._insert()
# Then I expect
mock__default_pre_insert.assert_called_once()
mock_pre_insert.assert_called_once()
mock__get_diff.assert_called_once()
mock_post_insert.assert_called_once()
_id = doc.id
self.assertIsInstance(_id, ObjectId)
expected = FakeDoc.get_collection().find_one({'_id': _id})
self.assertIsNotNone(expected)
self.assertEqual(expected.get('created'), doc.created)
self.assertEqual(expected.get('created'), date)
self.assertEqual(expected.get('modified'), doc.modified)
self.assertEqual(expected.get('modified'), date)
@freezegun.freeze_time('1999-11-11')
def test__default_pre_insert_should_set_created_and_modified(self):
"""
Test Method:
"""
# Given I've got a new Doc
doc = FakeDoc()
# When I call
doc._default_pre_insert()
# Then I expect
self.assertEqual(doc.created, datetime.datetime(1999, 11, 11))
self.assertEqual(doc.modified, datetime.datetime(1999, 11, 11))
def test__to_map_should_return_a_map_with_the_given_keys(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And It has some data
data = {'a': random_str(), 'b': random_str(), 'c': random_str()}
doc._data = data
# When I call
result = doc._to_map('a', 'c', 'F')
# Then I expect
expected = Map({'_id': None, 'created': None, 'modified': None, 'a': data['a'], 'c': data['c'], 'F': None})
self.assertEqual(expected, result)
def test_from_dict_should_instantiate_document(self):
"""
Test Method:
"""
# Given I've got a raw doc
raw_doc = {'_id': 4343, 'created': 4343244}
# When I call
result = FakeDoc.from_dict(raw_doc)
# Then I expect
self.assertIsInstance(result, FakeDoc)
self.assertTrue(result.is_created)
self.assertEqual(result._data, result._data_in_db)
def test_from_dict_should_instantiate_document_without_setting_is_created(self):
"""
Test Method:
"""
# Given I've got a raw doc
raw_doc = {'_id': 4343, 'created': 4343244}
# When I call
result = FakeDoc.from_dict(raw_doc, False)
# Then I expect
self.assertIsInstance(result, FakeDoc)
self.assertFalse(result.is_created)
self.assertNotEqual(result._data, result._data_in_db)
def test_count_should_return_the_collection_count(self):
"""
Test Method:
"""
# Given I've got some docs in collection
amount_of_docs = random_int(start=3)
for _ in range(amount_of_docs):
FakeDoc().save()
# When I call
result = FakeDoc.count()
# Then I expect
self.assertEqual(amount_of_docs, result)
def test_document_should_not_equal_data_when_has_id_and_data_does_not_match(self):
"""
Test Method:
"""
# Given I've got 2 docs
doc1 = FakeDoc().save()
doc2 = FakeDoc().save()
# When I call
result = doc1 == doc2
# Then I expect
self.assertFalse(result)
def test_document_should_equal_data_when_has_id(self):
"""
Test Method:
"""
# Given I've got 2 docs
doc1 = FakeDoc().save()
doc2 = FakeDoc()
doc2._data = doc1._data
# When I call
result = doc1 == doc2
# Then I expect
self.assertTrue(result)
def test_document_should_not_equal_itself_when_it_has_no_id(self):
"""
Test Method:
"""
# Given I've got 2 docs without id
doc1 = FakeDoc()
doc2 = FakeDoc()
doc2._data = doc1._data
# When I call
result = doc1 == doc2
# Then I expect
self.assertFalse(result)
def test_document_should_equal_itself_when_it_has_no_id(self):
"""
Test Method:
"""
# Given I've got a doc without an ID
doc1 = FakeDoc()
doc2 = doc1
# When I call
result = doc1 == doc2
# Then I expect
self.assertTrue(result)
def test_delete_will_delete_the_doc_from_memory_when_doc_not_saved_yet(self):
"""
Test Method:
"""
# Given I've got a doc that is created (saved in db)
doc = FakeDoc()
doc._data = random_dict()
# When I call
result = doc.delete()
# Then I expect
self.assertFalse(hasattr(doc, '_data'))
self.assertFalse(hasattr(doc, '_data_in_db'))
self.assertTrue(result)
def test_delete_will_delete_the_doc_from_db_and_from_memory(self):
"""
Test Method:
"""
# Given I've got a doc that is created (saved in db)
doc = FakeDoc().save()
_id = doc.id
# When I call
result = doc.delete()
# Then I expect
expected = FakeDoc.get_collection().find_one({'_id': _id})
self.assertIsNone(expected)
self.assertFalse(hasattr(doc, '_data'))
self.assertFalse(hasattr(doc, '_data_in_db'))
self.assertTrue(result) | test/test_document.py | import datetime
import unittest
from builtins import RuntimeError
from unittest import mock
from unittest.mock import call
import freezegun
import mongomock
from bson import ObjectId
from pymongo import MongoClient
from pymongo_odm.document import Document
from pymongo_odm.helpers import Map
from pymongo_odm.helpers.type_hints import Datetime
from test.helpers import random_dict, random_str, random_datetime, random_int
class FakeDoc(Document):
db = 'test'
collection = 'fake_doc'
def for_json(self) -> dict:
pass
class TestDocument(unittest.TestCase):
DOCUMENT_MODULE_PATH = 'pymongo_odm'
DOCUMENT_PATH = f'{DOCUMENT_MODULE_PATH}.Document'
_SET_PATH = f'{DOCUMENT_PATH}._set'
_UPDATE_DB_CACHE_PATH = f'{DOCUMENT_PATH}._update_db_cache'
_GET_DIFF_PATH = f'{DOCUMENT_PATH}._get_diff'
_INSERT_PATH = f'{DOCUMENT_PATH}._insert'
_UPDATE_PATH = f'{DOCUMENT_PATH}._update'
_DEFAULT_PRE_INSERT_PATH = f'{DOCUMENT_PATH}._default_pre_insert'
PRE_INSERT_PATH = f'{DOCUMENT_PATH}.pre_insert'
POST_INSERT_PATH = f'{DOCUMENT_PATH}.post_insert'
_DEFAULT_PRE_UPDATE_PATH = f'{DOCUMENT_PATH}._default_pre_update'
PRE_UPDATE_PATH = f'{DOCUMENT_PATH}.pre_update'
POST_UPDATE_PATH = f'{DOCUMENT_PATH}.post_update'
def setUp(self):
super().setUp()
FakeDoc.client = mongomock.MongoClient()
def test_cannot_instantiate_the_base_document(self):
"""
Test Method:
"""
# Given
# I expect it to raise
with self.assertRaises(NotImplementedError):
# When I try to instantiate
doc = Document()
def test_get_db_should_return_the_db_object(self):
"""
Test Method:
"""
# Given I've got a doc model
# When I call
result = FakeDoc.get_db()
# Then I expect
self.assertIsInstance(result, mongomock.Database)
self.assertEqual(FakeDoc.db, result.name)
def test_get_collection_should_return_the_collection_object(self):
"""
Test Method:
"""
# Given I've got a doc model
# When I call
result = FakeDoc.get_collection()
# Then I expect
self.assertIsInstance(result, mongomock.Collection)
self.assertEqual(FakeDoc.collection, result.name)
def test_create_collection_should_skip_when_collection_exists(self):
"""
Test Method:
"""
# Given I know a collection exists
FakeDoc().save() # Saves in a collection
# When I call
FakeDoc.create_collection()
# Then I expect
self.assertEqual(1, FakeDoc.count())
def test_create_collection_should_skip_when_collection_does_not_have_a_capped_attr(self):
"""
Test Method:
"""
# Given I know a we dont have a given collection
FakeDoc.get_collection().drop()
# When I call
FakeDoc.create_collection()
# Then I expect
self.assertFalse(FakeDoc.collection in FakeDoc.get_db().list_collection_names())
def test_create_collection_should_create_when_collection_has_a_capped_attr(self):
"""
Test Method:
"""
FakeDoc.client = MongoClient()
# Given I know a we dont have a given collection
FakeDoc.get_collection().drop()
# And I know
FakeDoc.capped = {
'size': random_int(),
'max': random_int()
}
# When I call
FakeDoc.create_collection()
# Then I expect
self.assertTrue(FakeDoc.collection in FakeDoc.get_db().list_collection_names())
self.assertTrue(FakeDoc.get_collection().options().get('capped', False))
FakeDoc.capped = None
FakeDoc.get_collection().drop()
def test__update_db_cache_should_copy_data_to_data_in_db(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know it has some data that is not stored in db (and not cached as stored in db)
doc._data = Map({random_str(): random_str() for _ in range(5)})
# When I call
doc._update_db_cache()
# Then I expect
self.assertEqual(doc._data, doc._data_in_db)
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_SET_PATH)
def test__load_args_should_set_given_data_and_not_update_cache_when_doc_is_not_created(self, mock__set,
mock__update_db_cache):
"""
Test Method:
"""
# Given I've got a new doc model
doc = FakeDoc()
# And I've got a a raw doc
raw_doc = {'_id': random_str(), 'created': random_datetime(), 'modified': random_datetime()}
# And I don't won't to treat it as a document that exists in db yet
is_created = False
# When I call
doc._load_args(raw_doc, is_created)
# Then I expect
calls = [call(key, raw_doc[key]) for key in raw_doc]
mock__set.assert_has_calls(calls)
mock__update_db_cache.assert_not_called()
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_SET_PATH)
def test__load_args_should_raise_when_is_created_is_true_but_not_given_an__id(self, mock__set,
mock__update_db_cache):
"""
Test Method:
"""
# Given I've got a new doc model
doc = FakeDoc()
# And I've got a a raw doc
raw_doc = {'created': random_datetime(), 'modified': random_datetime()}
# And I want to set the doc as created in db
is_created = True
# I expect it to raise
with self.assertRaises(ValueError):
# When I call
doc._load_args(raw_doc, is_created)
# Then I expect
calls = [call(key, raw_doc[key]) for key in raw_doc]
mock__set.assert_has_calls(calls)
mock__update_db_cache.assert_not_called()
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_SET_PATH)
def test__load_args_should_set_given_data_and_update_cache_when_doc_is_created(self, mock__set,
mock__update_db_cache):
"""
Test Method:
"""
# Given I've got a new doc model
doc = FakeDoc()
# And I've got a a raw doc
raw_doc = {'_id': random_str(), 'created': random_datetime(), 'modified': random_datetime()}
# And I want to set the doc as created in db
is_created = True
# When I call
doc._load_args(raw_doc, is_created)
# Then I expect
calls = [call(key, raw_doc[key]) for key in raw_doc]
mock__set.assert_has_calls(calls)
mock__update_db_cache.assert_called_once()
def test__set_should_do_nothing_when_given_value_matches_the_existing_one(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know the doc has some data
key = random_str()
value = random_str()
doc._data[key] = value
# When I call
doc._set(key, value)
# Then I expect
self.assertEqual(value, doc._data[key])
def test__set_should_update_value_when_is_different_than_exiting_value(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know the doc has some data
key = random_str()
value = random_str()
doc._data[key] = value
new_value = random_str()
# When I call
doc._set(key, new_value)
# Then I expect
self.assertEqual(new_value, doc._data[key])
def test__set_should_add_key_and_value_when_key_does_not_exist_yet(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know the doc has no data
doc._data.clear()
# And I've got
key = random_str()
value = random_str()
# When I call
doc._set(key, value)
# Then I expect
self.assertEqual(value, doc._data[key])
def test__get_diff_should_return_empty_dict_when_data_and_data_in_db_are_equal(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know that the doc's data in memory is equal to the data in db
data = random_dict(20)
doc._data = data.copy()
doc._data_in_db = data.copy()
# When I call
diff = doc._get_diff()
# Then I expect
self.assertEqual({}, diff)
def test__get_diff_should_return_the_difference_between_data_and_data_in_db(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And I know that the doc's data in memory is not equal to the data in db
data = random_dict(20)
data_in_db = random_dict(13)
doc._data = data.copy()
doc._data_in_db = data_in_db.copy()
# When I call
diff = doc._get_diff()
# Then I expect
self.assertEqual(data, diff)
def test_saving_a_new_doc_should_add_created_and_modified_properties(self):
"""
Test Method: INTEGRATION
"""
# Given I've got a new doc instance
doc = FakeDoc()
# When I call
doc.save()
# Then I expect
self.assertIsInstance(doc.created, Datetime)
self.assertIsInstance(doc.modified, Datetime)
self.assertIsInstance(doc.id, ObjectId)
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_UPDATE_PATH)
@mock.patch(_INSERT_PATH)
def test_saving_a_new_doc_should_insert(self, mock__insert, mock__update, mock__update_db_cache):
"""
Test Method:
"""
# Given I've got a new doc instance
doc = FakeDoc()
# When I call
result = doc.save()
# Then I expect
mock__insert.assert_called_once_with()
mock__update.assert_not_called()
mock__update_db_cache.assert_called_once()
self.assertIs(doc, result)
@mock.patch(_UPDATE_DB_CACHE_PATH)
@mock.patch(_UPDATE_PATH)
@mock.patch(_INSERT_PATH)
def test_saving_an_existing_doc_should_raise_because_it_is_not_supported(self, mock__insert,
mock__update, mock__update_db_cache):
"""
Test Method:
"""
# Given I've got an existing doc instance
doc = FakeDoc()
doc._data_in_db._id = random_str()
# I expect it to raise
with self.assertRaises(RuntimeError):
# When I call
result = doc.save()
# Then I expect
mock__insert.assert_not_called()
mock__update_db_cache.assert_not_called()
@freezegun.freeze_time('1999-11-11')
@mock.patch(POST_INSERT_PATH)
@mock.patch(_GET_DIFF_PATH)
@mock.patch(PRE_INSERT_PATH)
@mock.patch(_DEFAULT_PRE_INSERT_PATH)
def test__insert_should_call_lifecycle_hooks_and_insert(self, mock__default_pre_insert, mock_pre_insert,
mock__get_diff, mock_post_insert):
"""
Test Method:
"""
# Given I've got a new Doc
doc = FakeDoc()
# And I know the new values (changes)
date = datetime.datetime.utcnow()
doc.created = date
doc.modified = date
changes = {'created': date, 'modified': date}
mock__get_diff.return_value = changes
# When I call
doc._insert()
# Then I expect
mock__default_pre_insert.assert_called_once()
mock_pre_insert.assert_called_once()
mock__get_diff.assert_called_once()
mock_post_insert.assert_called_once()
_id = doc.id
self.assertIsInstance(_id, ObjectId)
expected = FakeDoc.get_collection().find_one({'_id': _id})
self.assertIsNotNone(expected)
self.assertEqual(expected.get('created'), doc.created)
self.assertEqual(expected.get('created'), date)
self.assertEqual(expected.get('modified'), doc.modified)
self.assertEqual(expected.get('modified'), date)
@freezegun.freeze_time('1999-11-11')
def test__default_pre_insert_should_set_created_and_modified(self):
"""
Test Method:
"""
# Given I've got a new Doc
doc = FakeDoc()
# When I call
doc._default_pre_insert()
# Then I expect
self.assertEqual(doc.created, datetime.datetime(1999, 11, 11))
self.assertEqual(doc.modified, datetime.datetime(1999, 11, 11))
def test__to_map_should_return_a_map_with_the_given_keys(self):
"""
Test Method:
"""
# Given I've got a doc model
doc = FakeDoc()
# And It has some data
data = {'a': random_str(), 'b': random_str(), 'c': random_str()}
doc._data = data
# When I call
result = doc._to_map('a', 'c', 'F')
# Then I expect
expected = Map({'_id': None, 'created': None, 'modified': None, 'a': data['a'], 'c': data['c'], 'F': None})
self.assertEqual(expected, result)
def test_from_dict_should_instantiate_document(self):
"""
Test Method:
"""
# Given I've got a raw doc
raw_doc = {'_id': 4343, 'created': 4343244}
# When I call
result = FakeDoc.from_dict(raw_doc)
# Then I expect
self.assertIsInstance(result, FakeDoc)
self.assertTrue(result.is_created)
self.assertEqual(result._data, result._data_in_db)
def test_from_dict_should_instantiate_document_without_setting_is_created(self):
"""
Test Method:
"""
# Given I've got a raw doc
raw_doc = {'_id': 4343, 'created': 4343244}
# When I call
result = FakeDoc.from_dict(raw_doc, False)
# Then I expect
self.assertIsInstance(result, FakeDoc)
self.assertFalse(result.is_created)
self.assertNotEqual(result._data, result._data_in_db)
def test_count_should_return_the_collection_count(self):
"""
Test Method:
"""
# Given I've got some docs in collection
amount_of_docs = random_int(start=3)
for _ in range(amount_of_docs):
FakeDoc().save()
# When I call
result = FakeDoc.count()
# Then I expect
self.assertEqual(amount_of_docs, result)
def test_document_should_not_equal_data_when_has_id_and_data_does_not_match(self):
"""
Test Method:
"""
# Given I've got 2 docs
doc1 = FakeDoc().save()
doc2 = FakeDoc().save()
# When I call
result = doc1 == doc2
# Then I expect
self.assertFalse(result)
def test_document_should_equal_data_when_has_id(self):
"""
Test Method:
"""
# Given I've got 2 docs
doc1 = FakeDoc().save()
doc2 = FakeDoc()
doc2._data = doc1._data
# When I call
result = doc1 == doc2
# Then I expect
self.assertTrue(result)
def test_document_should_not_equal_itself_when_it_has_no_id(self):
"""
Test Method:
"""
# Given I've got 2 docs without id
doc1 = FakeDoc()
doc2 = FakeDoc()
doc2._data = doc1._data
# When I call
result = doc1 == doc2
# Then I expect
self.assertFalse(result)
def test_document_should_equal_itself_when_it_has_no_id(self):
"""
Test Method:
"""
# Given I've got a doc without an ID
doc1 = FakeDoc()
doc2 = doc1
# When I call
result = doc1 == doc2
# Then I expect
self.assertTrue(result)
def test_delete_will_delete_the_doc_from_memory_when_doc_not_saved_yet(self):
"""
Test Method:
"""
# Given I've got a doc that is created (saved in db)
doc = FakeDoc()
doc._data = random_dict()
# When I call
result = doc.delete()
# Then I expect
self.assertFalse(hasattr(doc, '_data'))
self.assertFalse(hasattr(doc, '_data_in_db'))
self.assertTrue(result)
def test_delete_will_delete_the_doc_from_db_and_from_memory(self):
"""
Test Method:
"""
# Given I've got a doc that is created (saved in db)
doc = FakeDoc().save()
_id = doc.id
# When I call
result = doc.delete()
# Then I expect
expected = FakeDoc.get_collection().find_one({'_id': _id})
self.assertIsNone(expected)
self.assertFalse(hasattr(doc, '_data'))
self.assertFalse(hasattr(doc, '_data_in_db'))
self.assertTrue(result) | 0.510252 | 0.237736 |
import FWCore.ParameterSet.Config as cms
simCastorDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('CastorDataFramesSorted'))
)
)
simEcalUnsuppressedDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('EBDigiCollection')),
cms.PSet(type = cms.string('EEDigiCollection')),
cms.PSet(type = cms.string('ESDigiCollection'))
)
)
simHcalUnsuppressedDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('HBHEDataFramesSorted')),
cms.PSet(type = cms.string('HFDataFramesSorted')),
cms.PSet(type = cms.string('HODataFramesSorted')),
cms.PSet(type = cms.string('ZDCDataFramesSorted')),
cms.PSet(type = cms.string('QIE10DataFrameHcalDataFrameContainer')),
cms.PSet(type = cms.string('QIE11DataFrameHcalDataFrameContainer'))
)
)
simSiPixelDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('PixelDigiedmDetSetVector')),
cms.PSet(type = cms.string('PixelDigiSimLinkedmDetSetVector'))
)
)
simSiStripDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('SiStripDigiedmDetSetVector')),
cms.PSet(type = cms.string('SiStripRawDigiedmDetSetVector')),
cms.PSet(type = cms.string('StripDigiSimLinkedmDetSetVector'))
)
)
simHGCalUnsuppressedDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(
type = cms.string("DetIdHGCSampleHGCDataFramesSorted"),
fromProductInstance = cms.string("HGCDigisEE"),
toProductInstance = cms.string("EE"),
),
cms.PSet(
type = cms.string("DetIdHGCSampleHGCDataFramesSorted"),
fromProductInstance = cms.string("HGCDigisHEfront"),
toProductInstance = cms.string("HEfront"),
),
cms.PSet(
type = cms.string("DetIdHGCSampleHGCDataFramesSorted"),
fromProductInstance = cms.string("HGCDigisHEback"),
toProductInstance = cms.string("HEback"),
),
)
)
# no castor,pixel,strip digis in fastsim
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(simCastorDigis, mix = None)
fastSim.toModify(simSiPixelDigis, mix = None)
fastSim.toModify(simSiStripDigis, mix = None)
from Configuration.Eras.Modifier_run3_common_cff import run3_common
run3_common.toModify(simCastorDigis, mix = None)
from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal
(~phase2_hgcal).toModify(simHGCalUnsuppressedDigis, mix = None)
from Configuration.ProcessModifiers.premix_stage1_cff import premix_stage1
(premix_stage1 & phase2_hgcal).toModify(simHGCalUnsuppressedDigis,
mix = {
0 : dict(type = "PHGCSimAccumulator"),
1 : dict(type = "PHGCSimAccumulator"),
2 : dict(type = "PHGCSimAccumulator"),
}
) | SimGeneral/MixingModule/python/aliases_cfi.py | import FWCore.ParameterSet.Config as cms
simCastorDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('CastorDataFramesSorted'))
)
)
simEcalUnsuppressedDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('EBDigiCollection')),
cms.PSet(type = cms.string('EEDigiCollection')),
cms.PSet(type = cms.string('ESDigiCollection'))
)
)
simHcalUnsuppressedDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('HBHEDataFramesSorted')),
cms.PSet(type = cms.string('HFDataFramesSorted')),
cms.PSet(type = cms.string('HODataFramesSorted')),
cms.PSet(type = cms.string('ZDCDataFramesSorted')),
cms.PSet(type = cms.string('QIE10DataFrameHcalDataFrameContainer')),
cms.PSet(type = cms.string('QIE11DataFrameHcalDataFrameContainer'))
)
)
simSiPixelDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('PixelDigiedmDetSetVector')),
cms.PSet(type = cms.string('PixelDigiSimLinkedmDetSetVector'))
)
)
simSiStripDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(type = cms.string('SiStripDigiedmDetSetVector')),
cms.PSet(type = cms.string('SiStripRawDigiedmDetSetVector')),
cms.PSet(type = cms.string('StripDigiSimLinkedmDetSetVector'))
)
)
simHGCalUnsuppressedDigis = cms.EDAlias(
mix = cms.VPSet(
cms.PSet(
type = cms.string("DetIdHGCSampleHGCDataFramesSorted"),
fromProductInstance = cms.string("HGCDigisEE"),
toProductInstance = cms.string("EE"),
),
cms.PSet(
type = cms.string("DetIdHGCSampleHGCDataFramesSorted"),
fromProductInstance = cms.string("HGCDigisHEfront"),
toProductInstance = cms.string("HEfront"),
),
cms.PSet(
type = cms.string("DetIdHGCSampleHGCDataFramesSorted"),
fromProductInstance = cms.string("HGCDigisHEback"),
toProductInstance = cms.string("HEback"),
),
)
)
# no castor,pixel,strip digis in fastsim
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(simCastorDigis, mix = None)
fastSim.toModify(simSiPixelDigis, mix = None)
fastSim.toModify(simSiStripDigis, mix = None)
from Configuration.Eras.Modifier_run3_common_cff import run3_common
run3_common.toModify(simCastorDigis, mix = None)
from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal
(~phase2_hgcal).toModify(simHGCalUnsuppressedDigis, mix = None)
from Configuration.ProcessModifiers.premix_stage1_cff import premix_stage1
(premix_stage1 & phase2_hgcal).toModify(simHGCalUnsuppressedDigis,
mix = {
0 : dict(type = "PHGCSimAccumulator"),
1 : dict(type = "PHGCSimAccumulator"),
2 : dict(type = "PHGCSimAccumulator"),
}
) | 0.415373 | 0.30081 |
print ('《回村》0.1版本','Author: <NAME>')
print ('附注:基于 <NAME> 的 Demo 开发')
print ('更多内容请关注微博或微信公众号:IoT前哨站')
print ('')
print ('欢迎来到风景优美的山谷。但是时间不早了,你要返回村庄。亲戚朋友还在等你回去聚餐。')
print ('')
directions = ['北面','南面','东面','西面']
# Data structure to store details of each location in the game
class Location:
# Constructor - set up
def __init__(self, name, description):
self.name = name
self.description = description
self.linkedLocations = {}
# Empty dictionary - will store which locations are linked to which other locations
def addLink(self, direction, destination):
# Add link to linkedLocations dictionary (if the specified direction and destination are valid)
if direction not in directions:
raise ValueError('方向错误')
elif destination not in locations:
raise ValueError('目的地无效')
else:
self.linkedLocations[direction] = destination
# Dictionary with location ID strings as keys and Location objects as the values
locations = {
'森林':Location('有个森林', '你在森林。这里有很多树。'),
'湖泊':Location('有个湖泊', '你现在在湖边,这里很潮湿。'),
'小山':Location('有个小山', '这里有蜿蜒的小路。'),
'村庄':Location('有个村庄', '*恭喜你,你现在到村庄了,大家正等你吃饭呢。*')
}
locations['森林'].addLink('北面','湖泊')
locations['森林'].addLink('东面','小山')
locations['湖泊'].addLink('南面','森林')
locations['小山'].addLink('西面','森林')
locations['小山'].addLink('南面','村庄')
locations['村庄'].addLink('北面','小山')
currentLocation = locations['森林']
# Main game loop
while True:
# Display description of current location
print(currentLocation.description)
# Display neighbouring locations
for linkDirection,linkedLocation in currentLocation.linkedLocations.items():
print(linkDirection + ': ' + locations[linkedLocation].name)
# Read player input
command = input('>').lower()
if command in directions:
if command not in currentLocation.linkedLocations:
print('你不能走那里。')
else:
newLocationID = currentLocation.linkedLocations[command]
currentLocation = locations[newLocationID]
else:
print('尝试一个方向: ' + ', '.join(directions))
# Show list of directions, separated by commas
#让玩家走出一个森林,回到村庄。 | Text-Adventure/Gohome.py | print ('《回村》0.1版本','Author: <NAME>')
print ('附注:基于 <NAME> 的 Demo 开发')
print ('更多内容请关注微博或微信公众号:IoT前哨站')
print ('')
print ('欢迎来到风景优美的山谷。但是时间不早了,你要返回村庄。亲戚朋友还在等你回去聚餐。')
print ('')
directions = ['北面','南面','东面','西面']
# Data structure to store details of each location in the game
class Location:
# Constructor - set up
def __init__(self, name, description):
self.name = name
self.description = description
self.linkedLocations = {}
# Empty dictionary - will store which locations are linked to which other locations
def addLink(self, direction, destination):
# Add link to linkedLocations dictionary (if the specified direction and destination are valid)
if direction not in directions:
raise ValueError('方向错误')
elif destination not in locations:
raise ValueError('目的地无效')
else:
self.linkedLocations[direction] = destination
# Dictionary with location ID strings as keys and Location objects as the values
locations = {
'森林':Location('有个森林', '你在森林。这里有很多树。'),
'湖泊':Location('有个湖泊', '你现在在湖边,这里很潮湿。'),
'小山':Location('有个小山', '这里有蜿蜒的小路。'),
'村庄':Location('有个村庄', '*恭喜你,你现在到村庄了,大家正等你吃饭呢。*')
}
locations['森林'].addLink('北面','湖泊')
locations['森林'].addLink('东面','小山')
locations['湖泊'].addLink('南面','森林')
locations['小山'].addLink('西面','森林')
locations['小山'].addLink('南面','村庄')
locations['村庄'].addLink('北面','小山')
currentLocation = locations['森林']
# Main game loop
while True:
# Display description of current location
print(currentLocation.description)
# Display neighbouring locations
for linkDirection,linkedLocation in currentLocation.linkedLocations.items():
print(linkDirection + ': ' + locations[linkedLocation].name)
# Read player input
command = input('>').lower()
if command in directions:
if command not in currentLocation.linkedLocations:
print('你不能走那里。')
else:
newLocationID = currentLocation.linkedLocations[command]
currentLocation = locations[newLocationID]
else:
print('尝试一个方向: ' + ', '.join(directions))
# Show list of directions, separated by commas
#让玩家走出一个森林,回到村庄。 | 0.215681 | 0.18743 |
from collections import Counter
from typing import List
from ranked_vote.ballot import Ballot, Candidate
class ApprovalEntry:
def __init__(self, candidate, other_candidate, candidate_votes, other_candidate_votes):
self.candidate = candidate
self.other_candidate = other_candidate
self.candidate_votes = candidate_votes
self.other_candidate_votes = other_candidate_votes
def to_dict(self):
return {
'candidate': str(self.candidate),
'other_candidate': str(self.other_candidate),
'candidate_votes': self.candidate_votes,
'other_candidate_votes': self.other_candidate_votes,
}
class ApprovalResult:
def __init__(self, approval_set: List[ApprovalEntry], approval_set_compliment: List[ApprovalEntry]):
self.approval_set = approval_set
self.approval_set_compliment = approval_set_compliment
def to_dict(self):
return {
'approval_set': [d.to_dict() for d in self.approval_set],
'approval_set_compliment': [d.to_dict() for d in self.approval_set_compliment],
}
def honest_approval_set(ballots: List[Ballot], candidates: List[Candidate]) -> ApprovalResult:
approval_set = list()
approval_set_compliment = list()
for candidate in candidates:
votes = Counter()
for ballot in ballots:
if candidate in ballot.choices:
for c in ballot.choices:
votes[c] += 1
if c == candidate:
break
else:
votes[ballot.choices[0]] += 1
candidate_votes = votes.pop(candidate)
[(other_candidate, other_candidate_votes)] = votes.most_common(1)
approval_entry = ApprovalEntry(candidate, other_candidate, candidate_votes, other_candidate_votes)
if candidate_votes > other_candidate_votes:
approval_set.append(approval_entry)
else:
approval_set_compliment.append(approval_entry)
return ApprovalResult(approval_set, approval_set_compliment) | ranked_vote/analysis/honest_approval_set.py | from collections import Counter
from typing import List
from ranked_vote.ballot import Ballot, Candidate
class ApprovalEntry:
def __init__(self, candidate, other_candidate, candidate_votes, other_candidate_votes):
self.candidate = candidate
self.other_candidate = other_candidate
self.candidate_votes = candidate_votes
self.other_candidate_votes = other_candidate_votes
def to_dict(self):
return {
'candidate': str(self.candidate),
'other_candidate': str(self.other_candidate),
'candidate_votes': self.candidate_votes,
'other_candidate_votes': self.other_candidate_votes,
}
class ApprovalResult:
def __init__(self, approval_set: List[ApprovalEntry], approval_set_compliment: List[ApprovalEntry]):
self.approval_set = approval_set
self.approval_set_compliment = approval_set_compliment
def to_dict(self):
return {
'approval_set': [d.to_dict() for d in self.approval_set],
'approval_set_compliment': [d.to_dict() for d in self.approval_set_compliment],
}
def honest_approval_set(ballots: List[Ballot], candidates: List[Candidate]) -> ApprovalResult:
approval_set = list()
approval_set_compliment = list()
for candidate in candidates:
votes = Counter()
for ballot in ballots:
if candidate in ballot.choices:
for c in ballot.choices:
votes[c] += 1
if c == candidate:
break
else:
votes[ballot.choices[0]] += 1
candidate_votes = votes.pop(candidate)
[(other_candidate, other_candidate_votes)] = votes.most_common(1)
approval_entry = ApprovalEntry(candidate, other_candidate, candidate_votes, other_candidate_votes)
if candidate_votes > other_candidate_votes:
approval_set.append(approval_entry)
else:
approval_set_compliment.append(approval_entry)
return ApprovalResult(approval_set, approval_set_compliment) | 0.75985 | 0.150778 |
import sys
import os
import stat
import re
import copy
from pwd import getpwnam
import shutil
def main():
from docassemble.base.config import daconfig, S3_ENABLED, s3_config, AZURE_ENABLED, azure_config
certs_location = daconfig.get('certs', None)
cloud = None
prefix = None
if S3_ENABLED:
import docassemble.webapp.amazon
my_config = copy.deepcopy(s3_config)
if certs_location is None:
cloud = docassemble.webapp.amazon.s3object(my_config)
prefix = 'certs/'
else:
m = re.search(r'^s3://([^/]+)/(.*)', certs_location)
if m:
prefix = m.group(2)
my_config['bucket'] = m.group(1)
cloud = docassemble.webapp.amazon.s3object(my_config)
elif AZURE_ENABLED:
import docassemble.webapp.microsoft
my_config = copy.deepcopy(azure_config)
if certs_location is None:
prefix = 'certs/'
cloud = docassemble.webapp.microsoft.azureobject(my_config)
else:
m = re.search(r'^blob://([^/]+)/([^/]+)/(.*)', certs_location)
if m:
my_config['account name'] = m.group(1)
my_config['container'] = m.group(2)
prefix = m.group(3)
cloud = docassemble.webapp.microsoft.azureobject(my_config)
if cloud is not None and prefix is not None:
success = False
if not re.search(r'/$', prefix):
prefix = prefix + '/'
dest = daconfig.get('cert install directory', '/etc/ssl/docassemble')
if dest:
if not os.path.isdir(dest):
os.makedirs(dest)
for key in cloud.list_keys(prefix=prefix):
filename = re.sub(r'.*/', '', key.name)
fullpath = os.path.join(dest, filename)
sys.stderr.write("install_certs: saving " + str(key.name) + " to " + str(fullpath) + "\n")
key.get_contents_to_filename(fullpath)
os.chmod(fullpath, stat.S_IRUSR)
success = True
else:
sys.stderr.write("SSL destination directory not known\n")
sys.exit(1)
if success:
return
if certs_location is None:
if os.path.isdir('/usr/share/docassemble/certs'):
certs_location = '/usr/share/docassemble/certs'
else:
return
if not os.path.isdir(certs_location):
sys.stderr.write("certs directory " + str(certs_location) + " does not exist")
sys.exit(1)
import shutil
dest = daconfig.get('cert install directory', '/etc/ssl/docassemble')
if dest:
if os.path.isdir(dest):
shutil.rmtree(dest)
shutil.copytree(certs_location, dest)
for root, dirs, files in os.walk(dest):
for the_file in files:
os.chmod(os.path.join(root, the_file), stat.S_IRUSR)
else:
sys.stderr.write("SSL destination directory not known")
sys.exit(1)
www_install = daconfig.get('web server certificate directory', '/var/www/.certs')
if www_install:
www_username = daconfig.get('web server user', 'www-data')
www_uid = getpwnam(www_username)[2]
www_gid = getpwnam(www_username)[3]
if os.path.isdir(www_install):
shutil.rmtree(www_install)
shutil.copytree(certs_location, www_install)
os.chown(www_install, www_uid, www_gid)
for root, dirs, files in os.walk(www_install):
for the_file in files:
os.chown(os.path.join(root, the_file), www_uid, www_gid)
os.chmod(os.path.join(root, the_file), stat.S_IRUSR)
return
if __name__ == "__main__":
import docassemble.base.config
docassemble.base.config.load(arguments=sys.argv)
main()
sys.exit(0) | docassemble_webapp/docassemble/webapp/install_certs.py | import sys
import os
import stat
import re
import copy
from pwd import getpwnam
import shutil
def main():
from docassemble.base.config import daconfig, S3_ENABLED, s3_config, AZURE_ENABLED, azure_config
certs_location = daconfig.get('certs', None)
cloud = None
prefix = None
if S3_ENABLED:
import docassemble.webapp.amazon
my_config = copy.deepcopy(s3_config)
if certs_location is None:
cloud = docassemble.webapp.amazon.s3object(my_config)
prefix = 'certs/'
else:
m = re.search(r'^s3://([^/]+)/(.*)', certs_location)
if m:
prefix = m.group(2)
my_config['bucket'] = m.group(1)
cloud = docassemble.webapp.amazon.s3object(my_config)
elif AZURE_ENABLED:
import docassemble.webapp.microsoft
my_config = copy.deepcopy(azure_config)
if certs_location is None:
prefix = 'certs/'
cloud = docassemble.webapp.microsoft.azureobject(my_config)
else:
m = re.search(r'^blob://([^/]+)/([^/]+)/(.*)', certs_location)
if m:
my_config['account name'] = m.group(1)
my_config['container'] = m.group(2)
prefix = m.group(3)
cloud = docassemble.webapp.microsoft.azureobject(my_config)
if cloud is not None and prefix is not None:
success = False
if not re.search(r'/$', prefix):
prefix = prefix + '/'
dest = daconfig.get('cert install directory', '/etc/ssl/docassemble')
if dest:
if not os.path.isdir(dest):
os.makedirs(dest)
for key in cloud.list_keys(prefix=prefix):
filename = re.sub(r'.*/', '', key.name)
fullpath = os.path.join(dest, filename)
sys.stderr.write("install_certs: saving " + str(key.name) + " to " + str(fullpath) + "\n")
key.get_contents_to_filename(fullpath)
os.chmod(fullpath, stat.S_IRUSR)
success = True
else:
sys.stderr.write("SSL destination directory not known\n")
sys.exit(1)
if success:
return
if certs_location is None:
if os.path.isdir('/usr/share/docassemble/certs'):
certs_location = '/usr/share/docassemble/certs'
else:
return
if not os.path.isdir(certs_location):
sys.stderr.write("certs directory " + str(certs_location) + " does not exist")
sys.exit(1)
import shutil
dest = daconfig.get('cert install directory', '/etc/ssl/docassemble')
if dest:
if os.path.isdir(dest):
shutil.rmtree(dest)
shutil.copytree(certs_location, dest)
for root, dirs, files in os.walk(dest):
for the_file in files:
os.chmod(os.path.join(root, the_file), stat.S_IRUSR)
else:
sys.stderr.write("SSL destination directory not known")
sys.exit(1)
www_install = daconfig.get('web server certificate directory', '/var/www/.certs')
if www_install:
www_username = daconfig.get('web server user', 'www-data')
www_uid = getpwnam(www_username)[2]
www_gid = getpwnam(www_username)[3]
if os.path.isdir(www_install):
shutil.rmtree(www_install)
shutil.copytree(certs_location, www_install)
os.chown(www_install, www_uid, www_gid)
for root, dirs, files in os.walk(www_install):
for the_file in files:
os.chown(os.path.join(root, the_file), www_uid, www_gid)
os.chmod(os.path.join(root, the_file), stat.S_IRUSR)
return
if __name__ == "__main__":
import docassemble.base.config
docassemble.base.config.load(arguments=sys.argv)
main()
sys.exit(0) | 0.150996 | 0.057388 |
import os, sys, inspect, torch, csv, copy
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import degree
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
from torch_geometric.nn import GATConv
import torch.nn as nn
import torch.nn.functional as F
from rdkit.Chem.Draw import SimilarityMaps
from models.tetra import *
class GCNConv(MessagePassing):
def __init__(self, args, custom_hidden_size=None):
super(GCNConv, self).__init__(aggr='add')
if isinstance(custom_hidden_size, int):
self.linear = nn.Linear(custom_hidden_size, args.hidden_size)
else:
self.linear = nn.Linear(args.hidden_size, args.hidden_size)
self.batch_norm = nn.BatchNorm1d(args.hidden_size)
self.tetra = args.tetra
if self.tetra:
self.tetra_update = get_tetra_update(args)
def forward(self, x, edge_index, edge_attr, parity_atoms):
# no edge updates
x = self.linear(x)
# Compute normalization
row, col = edge_index
deg = degree(col, x.size(0), dtype=x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
x_new = self.propagate(edge_index, x=x, edge_attr=edge_attr, norm=norm)
if self.tetra:
tetra_ids = parity_atoms.nonzero().squeeze(1)
if tetra_ids.nelement() != 0:
x_new[tetra_ids] = self.tetra_message(x, edge_index, edge_attr, tetra_ids, parity_atoms)
x = x_new + F.relu(x)
return self.batch_norm(x), edge_attr
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def tetra_message(self, x, edge_index, edge_attr, tetra_ids, parity_atoms):
row, col = edge_index
tetra_nei_ids = torch.cat([row[col == i].unsqueeze(0) for i in range(x.size(0)) if i in tetra_ids])
# calculate pseudo tetra degree aligned with GCN method
deg = degree(col, x.size(0), dtype=x.dtype)
t_deg = deg[tetra_nei_ids]
t_deg_inv_sqrt = t_deg.pow(-0.5)
t_norm = 0.5 * t_deg_inv_sqrt.mean(dim=1)
# switch entries for -1 rdkit labels
ccw_mask = parity_atoms[tetra_ids] == -1
tetra_nei_ids[ccw_mask] = tetra_nei_ids.clone()[ccw_mask][:, [1, 0, 2, 3]]
# calculate reps
edge_ids = torch.cat([tetra_nei_ids.view(1, -1), tetra_ids.repeat_interleave(4).unsqueeze(0)], dim=0)
# dense_edge_attr = to_dense_adj(edge_index, batch=None, edge_attr=edge_attr).squeeze(0)
# edge_reps = dense_edge_attr[edge_ids[0], edge_ids[1], :].view(tetra_nei_ids.size(0), 4, -1)
attr_ids = [torch.where((a == edge_index.t()).all(dim=1))[0] for a in edge_ids.t()]
edge_reps = edge_attr[attr_ids, :].view(tetra_nei_ids.size(0), 4, -1)
reps = x[tetra_nei_ids] + edge_reps
return t_norm.unsqueeze(-1) * self.tetra_update(reps)
class GINEConv(MessagePassing):
def __init__(self, args):
super(GINEConv, self).__init__(aggr="add")
self.eps = nn.Parameter(torch.Tensor([0]))
self.mlp = nn.Sequential(nn.Linear(args.hidden_size, 2 * args.hidden_size),
nn.BatchNorm1d(2 * args.hidden_size),
nn.ReLU(),
nn.Linear(2 * args.hidden_size, args.hidden_size))
self.batch_norm = nn.BatchNorm1d(args.hidden_size)
self.tetra = args.tetra
if self.tetra:
self.tetra_update = get_tetra_update(args)
def forward(self, x, edge_index, edge_attr, parity_atoms):
# no edge updates
x_new = self.propagate(edge_index, x=x, edge_attr=edge_attr)
if self.tetra:
tetra_ids = parity_atoms.nonzero().squeeze(1)
if tetra_ids.nelement() != 0:
x_new[tetra_ids] = self.tetra_message(x, edge_index, edge_attr, tetra_ids, parity_atoms)
x = self.mlp((1 + self.eps) * x + x_new)
return self.batch_norm(x), edge_attr
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def tetra_message(self, x, edge_index, edge_attr, tetra_ids, parity_atoms):
row, col = edge_index
tetra_nei_ids = torch.cat([row[col == i].unsqueeze(0) for i in range(x.size(0)) if i in tetra_ids])
# switch entries for -1 rdkit labels
ccw_mask = parity_atoms[tetra_ids] == -1
tetra_nei_ids[ccw_mask] = tetra_nei_ids.clone()[ccw_mask][:, [1, 0, 2, 3]]
# calculate reps
edge_ids = torch.cat([tetra_nei_ids.view(1, -1), tetra_ids.repeat_interleave(4).unsqueeze(0)], dim=0)
# dense_edge_attr = to_dense_adj(edge_index, batch=None, edge_attr=edge_attr).squeeze(0)
# edge_reps = dense_edge_attr[edge_ids[0], edge_ids[1], :].view(tetra_nei_ids.size(0), 4, -1)
attr_ids = [torch.where((a == edge_index.t()).all(dim=1))[0] for a in edge_ids.t()]
edge_reps = edge_attr[attr_ids, :].view(tetra_nei_ids.size(0), 4, -1)
reps = x[tetra_nei_ids] + edge_reps
return self.tetra_update(reps)
class DMPNNConv(MessagePassing):
def __init__(self, args):
super(DMPNNConv, self).__init__(aggr='add')
self.lin = nn.Linear(args.hidden_size, args.hidden_size)
self.mlp = nn.Sequential(nn.Linear(args.hidden_size, args.hidden_size),
nn.BatchNorm1d(args.hidden_size),
nn.ReLU())
self.tetra = args.tetra
if self.tetra:
self.tetra_update = get_tetra_update(args)
def forward(self, x, edge_index, edge_attr, parity_atoms):
# print('='*20)
# print('Inside DMPNN:')
row, col = edge_index
# print('*'*10)
# print('row:', row)
# print('col:', col)
# print('*'*10)
a_message = self.propagate(edge_index, x=None, edge_attr=edge_attr)
# print('a_message dim:', a_message.size())
# print('a_message data:')
# print(a_message)
if self.tetra:
tetra_ids = parity_atoms.nonzero().squeeze(1)
# print('tetra_ids dim:', tetra_ids.size())
# print('tetra_ids data:', tetra_ids)
if tetra_ids.nelement() != 0:
a_message[tetra_ids] = self.tetra_message(x, edge_index, edge_attr, tetra_ids, parity_atoms)
# print('a_message dim after tetra (permute concat thingy):', a_message)
# print('a_message data after tetra:')
# print(a_message)
rev_message = torch.flip(edge_attr.view(edge_attr.size(0) // 2, 2, -1), dims=[1]).view(edge_attr.size(0), -1)
# print('='*20)
return a_message, self.mlp(a_message[row] - rev_message)
def message(self, x_j, edge_attr):
return F.relu(self.lin(edge_attr))
def tetra_message(self, x, edge_index, edge_attr, tetra_ids, parity_atoms):
row, col = edge_index
tetra_nei_ids = torch.cat([row[col == i].unsqueeze(0) for i in range(x.size(0)) if i in tetra_ids])
# switch entries for -1 rdkit labels
ccw_mask = parity_atoms[tetra_ids] == -1
tetra_nei_ids[ccw_mask] = tetra_nei_ids.clone()[ccw_mask][:, [1, 0, 2, 3]]
# calculate reps
edge_ids = torch.cat([tetra_nei_ids.view(1, -1), tetra_ids.repeat_interleave(4).unsqueeze(0)], dim=0)
# dense_edge_attr = to_dense_adj(edge_index, batch=None, edge_attr=edge_attr).squeeze(0)
# edge_reps = dense_edge_attr[edge_ids[0], edge_ids[1], :].view(tetra_nei_ids.size(0), 4, -1)
attr_ids = [torch.where((a == edge_index.t()).all(dim=1))[0] for a in edge_ids.t()]
edge_reps = edge_attr[attr_ids, :].view(tetra_nei_ids.size(0), 4, -1)
return self.tetra_update(edge_reps)
class OrigDMPNNConv(MessagePassing):
def __init__(self, args, node_agg=False, in_channel=47):
"""
in_channel: dimension of node feature
"""
super(OrigDMPNNConv, self).__init__(aggr='add')
self.lin = nn.Linear(args.hidden_size, args.hidden_size)
# self.mlp = nn.Sequential(nn.Linear(args.hidden_size, args.hidden_size),
# nn.BatchNorm1d(args.hidden_size),
# nn.ReLU())
self.node_agg = node_agg
self.tetra = args.tetra
if self.tetra:
self.tetra_update = get_tetra_update(args)
if self.node_agg:
self.agg_lin = nn.Linear(args.hidden_size+in_channel, args.hidden_size)
def forward(self, x, edge_index, edge_attr, parity_atoms):
row, col = edge_index
# print('*'*10)
# print('row:', row)
# print('col:', col)
a_message = self.propagate(edge_index, x=None, edge_attr=edge_attr)
# print('a_message size:', a_message.size())
# print('a_message data:')
# print(a_message)
# print('*'*10)
if self.tetra:
tetra_ids = parity_atoms.nonzero().squeeze(1) # get indices of non-zero elems (-1 or 1)
if tetra_ids.nelement() != 0:
a_message[tetra_ids] = self.tetra_message(x, edge_index, edge_attr, tetra_ids, parity_atoms)
rev_message = torch.flip(edge_attr.view(edge_attr.size(0) // 2, 2, -1), dims=[1]).view(edge_attr.size(0), -1)
edge_message = self.lin(a_message[row] - rev_message)
edge_message = F.relu(edge_message)
if self.node_agg: # node aggregation
# message passing
node_agg_message = self.propagate(edge_index, x=None, edge_attr=edge_message)
# print('Dim of x:', x.size())
# print('Dim of node_Agg_message:',node_agg_message.size() )
a_message = torch.cat([x, node_agg_message], dim=1)
# print('Dim after concat:', a_message.size())
a_message = F.relu(self.agg_lin(a_message))
# a_message is node aggregation (final step). If not final step, use the second output (self.mlp...)
return a_message, edge_message
def message(self, x_j, edge_attr):
return edge_attr
def tetra_message(self, x, edge_index, edge_attr, tetra_ids, parity_atoms):
row, col = edge_index
tetra_nei_ids = torch.cat([row[col == i].unsqueeze(0) for i in range(x.size(0)) if i in tetra_ids]) # indices of neighbors of tetra ids
# switch entries for -1 rdkit labels
ccw_mask = parity_atoms[tetra_ids] == -1
tetra_nei_ids[ccw_mask] = tetra_nei_ids.clone()[ccw_mask][:, [1, 0, 2, 3]]
# calculate reps
edge_ids = torch.cat([tetra_nei_ids.view(1, -1), tetra_ids.repeat_interleave(4).unsqueeze(0)], dim=0)
# dense_edge_attr = to_dense_adj(edge_index, batch=None, edge_attr=edge_attr).squeeze(0)
# edge_reps = dense_edge_attr[edge_ids[0], edge_ids[1], :].view(tetra_nei_ids.size(0), 4, -1)
attr_ids = [torch.where((a == edge_index.t()).all(dim=1))[0] for a in edge_ids.t()]
edge_reps = edge_attr[attr_ids, :].view(tetra_nei_ids.size(0), 4, -1)
return self.tetra_update(edge_reps) | stereonet/models/mp_layers.py | import os, sys, inspect, torch, csv, copy
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import degree
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
from torch_geometric.nn import GATConv
import torch.nn as nn
import torch.nn.functional as F
from rdkit.Chem.Draw import SimilarityMaps
from models.tetra import *
class GCNConv(MessagePassing):
def __init__(self, args, custom_hidden_size=None):
super(GCNConv, self).__init__(aggr='add')
if isinstance(custom_hidden_size, int):
self.linear = nn.Linear(custom_hidden_size, args.hidden_size)
else:
self.linear = nn.Linear(args.hidden_size, args.hidden_size)
self.batch_norm = nn.BatchNorm1d(args.hidden_size)
self.tetra = args.tetra
if self.tetra:
self.tetra_update = get_tetra_update(args)
def forward(self, x, edge_index, edge_attr, parity_atoms):
# no edge updates
x = self.linear(x)
# Compute normalization
row, col = edge_index
deg = degree(col, x.size(0), dtype=x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
x_new = self.propagate(edge_index, x=x, edge_attr=edge_attr, norm=norm)
if self.tetra:
tetra_ids = parity_atoms.nonzero().squeeze(1)
if tetra_ids.nelement() != 0:
x_new[tetra_ids] = self.tetra_message(x, edge_index, edge_attr, tetra_ids, parity_atoms)
x = x_new + F.relu(x)
return self.batch_norm(x), edge_attr
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def tetra_message(self, x, edge_index, edge_attr, tetra_ids, parity_atoms):
row, col = edge_index
tetra_nei_ids = torch.cat([row[col == i].unsqueeze(0) for i in range(x.size(0)) if i in tetra_ids])
# calculate pseudo tetra degree aligned with GCN method
deg = degree(col, x.size(0), dtype=x.dtype)
t_deg = deg[tetra_nei_ids]
t_deg_inv_sqrt = t_deg.pow(-0.5)
t_norm = 0.5 * t_deg_inv_sqrt.mean(dim=1)
# switch entries for -1 rdkit labels
ccw_mask = parity_atoms[tetra_ids] == -1
tetra_nei_ids[ccw_mask] = tetra_nei_ids.clone()[ccw_mask][:, [1, 0, 2, 3]]
# calculate reps
edge_ids = torch.cat([tetra_nei_ids.view(1, -1), tetra_ids.repeat_interleave(4).unsqueeze(0)], dim=0)
# dense_edge_attr = to_dense_adj(edge_index, batch=None, edge_attr=edge_attr).squeeze(0)
# edge_reps = dense_edge_attr[edge_ids[0], edge_ids[1], :].view(tetra_nei_ids.size(0), 4, -1)
attr_ids = [torch.where((a == edge_index.t()).all(dim=1))[0] for a in edge_ids.t()]
edge_reps = edge_attr[attr_ids, :].view(tetra_nei_ids.size(0), 4, -1)
reps = x[tetra_nei_ids] + edge_reps
return t_norm.unsqueeze(-1) * self.tetra_update(reps)
class GINEConv(MessagePassing):
def __init__(self, args):
super(GINEConv, self).__init__(aggr="add")
self.eps = nn.Parameter(torch.Tensor([0]))
self.mlp = nn.Sequential(nn.Linear(args.hidden_size, 2 * args.hidden_size),
nn.BatchNorm1d(2 * args.hidden_size),
nn.ReLU(),
nn.Linear(2 * args.hidden_size, args.hidden_size))
self.batch_norm = nn.BatchNorm1d(args.hidden_size)
self.tetra = args.tetra
if self.tetra:
self.tetra_update = get_tetra_update(args)
def forward(self, x, edge_index, edge_attr, parity_atoms):
# no edge updates
x_new = self.propagate(edge_index, x=x, edge_attr=edge_attr)
if self.tetra:
tetra_ids = parity_atoms.nonzero().squeeze(1)
if tetra_ids.nelement() != 0:
x_new[tetra_ids] = self.tetra_message(x, edge_index, edge_attr, tetra_ids, parity_atoms)
x = self.mlp((1 + self.eps) * x + x_new)
return self.batch_norm(x), edge_attr
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def tetra_message(self, x, edge_index, edge_attr, tetra_ids, parity_atoms):
row, col = edge_index
tetra_nei_ids = torch.cat([row[col == i].unsqueeze(0) for i in range(x.size(0)) if i in tetra_ids])
# switch entries for -1 rdkit labels
ccw_mask = parity_atoms[tetra_ids] == -1
tetra_nei_ids[ccw_mask] = tetra_nei_ids.clone()[ccw_mask][:, [1, 0, 2, 3]]
# calculate reps
edge_ids = torch.cat([tetra_nei_ids.view(1, -1), tetra_ids.repeat_interleave(4).unsqueeze(0)], dim=0)
# dense_edge_attr = to_dense_adj(edge_index, batch=None, edge_attr=edge_attr).squeeze(0)
# edge_reps = dense_edge_attr[edge_ids[0], edge_ids[1], :].view(tetra_nei_ids.size(0), 4, -1)
attr_ids = [torch.where((a == edge_index.t()).all(dim=1))[0] for a in edge_ids.t()]
edge_reps = edge_attr[attr_ids, :].view(tetra_nei_ids.size(0), 4, -1)
reps = x[tetra_nei_ids] + edge_reps
return self.tetra_update(reps)
class DMPNNConv(MessagePassing):
def __init__(self, args):
super(DMPNNConv, self).__init__(aggr='add')
self.lin = nn.Linear(args.hidden_size, args.hidden_size)
self.mlp = nn.Sequential(nn.Linear(args.hidden_size, args.hidden_size),
nn.BatchNorm1d(args.hidden_size),
nn.ReLU())
self.tetra = args.tetra
if self.tetra:
self.tetra_update = get_tetra_update(args)
def forward(self, x, edge_index, edge_attr, parity_atoms):
# print('='*20)
# print('Inside DMPNN:')
row, col = edge_index
# print('*'*10)
# print('row:', row)
# print('col:', col)
# print('*'*10)
a_message = self.propagate(edge_index, x=None, edge_attr=edge_attr)
# print('a_message dim:', a_message.size())
# print('a_message data:')
# print(a_message)
if self.tetra:
tetra_ids = parity_atoms.nonzero().squeeze(1)
# print('tetra_ids dim:', tetra_ids.size())
# print('tetra_ids data:', tetra_ids)
if tetra_ids.nelement() != 0:
a_message[tetra_ids] = self.tetra_message(x, edge_index, edge_attr, tetra_ids, parity_atoms)
# print('a_message dim after tetra (permute concat thingy):', a_message)
# print('a_message data after tetra:')
# print(a_message)
rev_message = torch.flip(edge_attr.view(edge_attr.size(0) // 2, 2, -1), dims=[1]).view(edge_attr.size(0), -1)
# print('='*20)
return a_message, self.mlp(a_message[row] - rev_message)
def message(self, x_j, edge_attr):
return F.relu(self.lin(edge_attr))
def tetra_message(self, x, edge_index, edge_attr, tetra_ids, parity_atoms):
row, col = edge_index
tetra_nei_ids = torch.cat([row[col == i].unsqueeze(0) for i in range(x.size(0)) if i in tetra_ids])
# switch entries for -1 rdkit labels
ccw_mask = parity_atoms[tetra_ids] == -1
tetra_nei_ids[ccw_mask] = tetra_nei_ids.clone()[ccw_mask][:, [1, 0, 2, 3]]
# calculate reps
edge_ids = torch.cat([tetra_nei_ids.view(1, -1), tetra_ids.repeat_interleave(4).unsqueeze(0)], dim=0)
# dense_edge_attr = to_dense_adj(edge_index, batch=None, edge_attr=edge_attr).squeeze(0)
# edge_reps = dense_edge_attr[edge_ids[0], edge_ids[1], :].view(tetra_nei_ids.size(0), 4, -1)
attr_ids = [torch.where((a == edge_index.t()).all(dim=1))[0] for a in edge_ids.t()]
edge_reps = edge_attr[attr_ids, :].view(tetra_nei_ids.size(0), 4, -1)
return self.tetra_update(edge_reps)
class OrigDMPNNConv(MessagePassing):
def __init__(self, args, node_agg=False, in_channel=47):
"""
in_channel: dimension of node feature
"""
super(OrigDMPNNConv, self).__init__(aggr='add')
self.lin = nn.Linear(args.hidden_size, args.hidden_size)
# self.mlp = nn.Sequential(nn.Linear(args.hidden_size, args.hidden_size),
# nn.BatchNorm1d(args.hidden_size),
# nn.ReLU())
self.node_agg = node_agg
self.tetra = args.tetra
if self.tetra:
self.tetra_update = get_tetra_update(args)
if self.node_agg:
self.agg_lin = nn.Linear(args.hidden_size+in_channel, args.hidden_size)
def forward(self, x, edge_index, edge_attr, parity_atoms):
row, col = edge_index
# print('*'*10)
# print('row:', row)
# print('col:', col)
a_message = self.propagate(edge_index, x=None, edge_attr=edge_attr)
# print('a_message size:', a_message.size())
# print('a_message data:')
# print(a_message)
# print('*'*10)
if self.tetra:
tetra_ids = parity_atoms.nonzero().squeeze(1) # get indices of non-zero elems (-1 or 1)
if tetra_ids.nelement() != 0:
a_message[tetra_ids] = self.tetra_message(x, edge_index, edge_attr, tetra_ids, parity_atoms)
rev_message = torch.flip(edge_attr.view(edge_attr.size(0) // 2, 2, -1), dims=[1]).view(edge_attr.size(0), -1)
edge_message = self.lin(a_message[row] - rev_message)
edge_message = F.relu(edge_message)
if self.node_agg: # node aggregation
# message passing
node_agg_message = self.propagate(edge_index, x=None, edge_attr=edge_message)
# print('Dim of x:', x.size())
# print('Dim of node_Agg_message:',node_agg_message.size() )
a_message = torch.cat([x, node_agg_message], dim=1)
# print('Dim after concat:', a_message.size())
a_message = F.relu(self.agg_lin(a_message))
# a_message is node aggregation (final step). If not final step, use the second output (self.mlp...)
return a_message, edge_message
def message(self, x_j, edge_attr):
return edge_attr
def tetra_message(self, x, edge_index, edge_attr, tetra_ids, parity_atoms):
row, col = edge_index
tetra_nei_ids = torch.cat([row[col == i].unsqueeze(0) for i in range(x.size(0)) if i in tetra_ids]) # indices of neighbors of tetra ids
# switch entries for -1 rdkit labels
ccw_mask = parity_atoms[tetra_ids] == -1
tetra_nei_ids[ccw_mask] = tetra_nei_ids.clone()[ccw_mask][:, [1, 0, 2, 3]]
# calculate reps
edge_ids = torch.cat([tetra_nei_ids.view(1, -1), tetra_ids.repeat_interleave(4).unsqueeze(0)], dim=0)
# dense_edge_attr = to_dense_adj(edge_index, batch=None, edge_attr=edge_attr).squeeze(0)
# edge_reps = dense_edge_attr[edge_ids[0], edge_ids[1], :].view(tetra_nei_ids.size(0), 4, -1)
attr_ids = [torch.where((a == edge_index.t()).all(dim=1))[0] for a in edge_ids.t()]
edge_reps = edge_attr[attr_ids, :].view(tetra_nei_ids.size(0), 4, -1)
return self.tetra_update(edge_reps) | 0.722723 | 0.431884 |
from __future__ import absolute_import, unicode_literals
import unittest
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.six import b
from tuiuiu.tests.utils import TuiuiuTestUtils
from tuiuiu.tuiuiudocs import models
@override_settings(_TUIUIUSEARCH_FORCE_AUTO_UPDATE=['elasticsearch'])
class TestIssue613(TestCase, TuiuiuTestUtils):
def get_elasticsearch_backend(self):
from django.conf import settings
from tuiuiu.tuiuiusearch.backends import get_search_backend
backend_path = 'tuiuiu.tuiuiusearch.backends.elasticsearch'
# Search TUIUIUSEARCH_BACKENDS for an entry that uses the given backend path
for backend_name, backend_conf in settings.TUIUIUSEARCH_BACKENDS.items():
if backend_conf['BACKEND'] == backend_path:
return get_search_backend(backend_name)
else:
# no conf entry found - skip tests for this backend
raise unittest.SkipTest("No TUIUIUSEARCH_BACKENDS entry for the backend %s" % backend_path)
def setUp(self):
self.search_backend = self.get_elasticsearch_backend()
self.login()
def add_document(self, **params):
# Build a fake file
fake_file = ContentFile(b("A boring example document"))
fake_file.name = 'test.txt'
# Submit
post_data = {
'title': "Test document",
'file': fake_file,
}
post_data.update(params)
response = self.client.post(reverse('tuiuiudocs:add'), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('tuiuiudocs:index'))
# Document should be created
doc = models.Document.objects.filter(title=post_data['title'])
self.assertTrue(doc.exists())
return doc.first()
def edit_document(self, **params):
# Build a fake file
fake_file = ContentFile(b("A boring example document"))
fake_file.name = 'test.txt'
# Create a document without tags to edit
document = models.Document.objects.create(title="Test document", file=fake_file)
# Build another fake file
another_fake_file = ContentFile(b("A boring example document"))
another_fake_file.name = 'test.txt'
# Submit
post_data = {
'title': "Test document changed!",
'file': another_fake_file,
}
post_data.update(params)
response = self.client.post(reverse('tuiuiudocs:edit', args=(document.id,)), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('tuiuiudocs:index'))
# Document should be changed
doc = models.Document.objects.filter(title=post_data['title'])
self.assertTrue(doc.exists())
return doc.first()
def test_issue_613_on_add(self):
# Reset the search index
self.search_backend.reset_index()
self.search_backend.add_type(models.Document)
# Add a document with some tags
document = self.add_document(tags="hello")
self.search_backend.refresh_index()
# Search for it by tag
results = self.search_backend.search("hello", models.Document)
# Check
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, document.id)
def test_issue_613_on_edit(self):
# Reset the search index
self.search_backend.reset_index()
self.search_backend.add_type(models.Document)
# Add a document with some tags
document = self.edit_document(tags="hello")
self.search_backend.refresh_index()
# Search for it by tag
results = self.search_backend.search("hello", models.Document)
# Check
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, document.id) | tuiuiu/tuiuiudocs/tests/test_search.py | from __future__ import absolute_import, unicode_literals
import unittest
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.six import b
from tuiuiu.tests.utils import TuiuiuTestUtils
from tuiuiu.tuiuiudocs import models
@override_settings(_TUIUIUSEARCH_FORCE_AUTO_UPDATE=['elasticsearch'])
class TestIssue613(TestCase, TuiuiuTestUtils):
def get_elasticsearch_backend(self):
from django.conf import settings
from tuiuiu.tuiuiusearch.backends import get_search_backend
backend_path = 'tuiuiu.tuiuiusearch.backends.elasticsearch'
# Search TUIUIUSEARCH_BACKENDS for an entry that uses the given backend path
for backend_name, backend_conf in settings.TUIUIUSEARCH_BACKENDS.items():
if backend_conf['BACKEND'] == backend_path:
return get_search_backend(backend_name)
else:
# no conf entry found - skip tests for this backend
raise unittest.SkipTest("No TUIUIUSEARCH_BACKENDS entry for the backend %s" % backend_path)
def setUp(self):
self.search_backend = self.get_elasticsearch_backend()
self.login()
def add_document(self, **params):
# Build a fake file
fake_file = ContentFile(b("A boring example document"))
fake_file.name = 'test.txt'
# Submit
post_data = {
'title': "Test document",
'file': fake_file,
}
post_data.update(params)
response = self.client.post(reverse('tuiuiudocs:add'), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('tuiuiudocs:index'))
# Document should be created
doc = models.Document.objects.filter(title=post_data['title'])
self.assertTrue(doc.exists())
return doc.first()
def edit_document(self, **params):
# Build a fake file
fake_file = ContentFile(b("A boring example document"))
fake_file.name = 'test.txt'
# Create a document without tags to edit
document = models.Document.objects.create(title="Test document", file=fake_file)
# Build another fake file
another_fake_file = ContentFile(b("A boring example document"))
another_fake_file.name = 'test.txt'
# Submit
post_data = {
'title': "Test document changed!",
'file': another_fake_file,
}
post_data.update(params)
response = self.client.post(reverse('tuiuiudocs:edit', args=(document.id,)), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('tuiuiudocs:index'))
# Document should be changed
doc = models.Document.objects.filter(title=post_data['title'])
self.assertTrue(doc.exists())
return doc.first()
def test_issue_613_on_add(self):
# Reset the search index
self.search_backend.reset_index()
self.search_backend.add_type(models.Document)
# Add a document with some tags
document = self.add_document(tags="hello")
self.search_backend.refresh_index()
# Search for it by tag
results = self.search_backend.search("hello", models.Document)
# Check
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, document.id)
def test_issue_613_on_edit(self):
# Reset the search index
self.search_backend.reset_index()
self.search_backend.add_type(models.Document)
# Add a document with some tags
document = self.edit_document(tags="hello")
self.search_backend.refresh_index()
# Search for it by tag
results = self.search_backend.search("hello", models.Document)
# Check
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, document.id) | 0.638835 | 0.158989 |
import requests
import json
import csv
from collections import deque
import time
import datetime
def getsince(csv_file):
with open(csv_file,'r') as f:
return deque(csv.reader(f),1)[0][0]
def getprices():
#urlbfx='https://api.bitfinex.com/v2/candles/trade:1m:tLTCBTC/hist'
#file='Bitfinex1minLTCBTC.csv'
urlbfx='https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist'
file='Data\BitfinexBTCUSD.csv'
params = {'limit':1000, 'sort':1}
response = requests.get(urlbfx, params=params)
bfxjson = json.loads(response.text)
bfxohlc = []
for i in range(0, len(bfxjson)):
appendline=bfxjson[i][0]/1000,bfxjson[i][1],bfxjson[i][3],bfxjson[i][4],bfxjson[i][2],bfxjson[i][5]
bfxohlc.append(appendline)
with open(file,'a',newline='') as f:
writer = csv.writer(f)
writer.writerows(bfxohlc)
#print(bfxohlc)
'''
start = (float(getsince(file))+60)*1000
now = int(datetime.datetime.timestamp(datetime.datetime.now()))
datevalue = datetime.datetime.utcfromtimestamp(start/1000).replace(tzinfo=datetime.timezone.utc)
print('last:', datevalue)
limit = 10000
count = 0
while count < limit:
params = {'limit':1000, 'start':start, 'sort':1}
try:
response = requests.get(urlbfx, params=params)
bfxjson = json.loads(response.text)
bfxohlc = []
for i in range(0, len(bfxjson)):
appendline=bfxjson[i][0]/1000,bfxjson[i][1],bfxjson[i][3],bfxjson[i][4],bfxjson[i][2],bfxjson[i][5]
bfxohlc.append(appendline)
with open(file,'a',newline='') as f:
writer = csv.writer(f)
writer.writerows(bfxohlc)
start = (float(getsince(file))+60)*1000
datevalue = datetime.datetime.utcfromtimestamp(start/1000).replace(tzinfo=datetime.timezone.utc)
print(count+1)
print('last:', datevalue)
count += 1
time.sleep(5)
except Exception as e:
continue
if start/1000 >= now:
break
'''
getprices() | pricefetch.py | import requests
import json
import csv
from collections import deque
import time
import datetime
def getsince(csv_file):
with open(csv_file,'r') as f:
return deque(csv.reader(f),1)[0][0]
def getprices():
#urlbfx='https://api.bitfinex.com/v2/candles/trade:1m:tLTCBTC/hist'
#file='Bitfinex1minLTCBTC.csv'
urlbfx='https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist'
file='Data\BitfinexBTCUSD.csv'
params = {'limit':1000, 'sort':1}
response = requests.get(urlbfx, params=params)
bfxjson = json.loads(response.text)
bfxohlc = []
for i in range(0, len(bfxjson)):
appendline=bfxjson[i][0]/1000,bfxjson[i][1],bfxjson[i][3],bfxjson[i][4],bfxjson[i][2],bfxjson[i][5]
bfxohlc.append(appendline)
with open(file,'a',newline='') as f:
writer = csv.writer(f)
writer.writerows(bfxohlc)
#print(bfxohlc)
'''
start = (float(getsince(file))+60)*1000
now = int(datetime.datetime.timestamp(datetime.datetime.now()))
datevalue = datetime.datetime.utcfromtimestamp(start/1000).replace(tzinfo=datetime.timezone.utc)
print('last:', datevalue)
limit = 10000
count = 0
while count < limit:
params = {'limit':1000, 'start':start, 'sort':1}
try:
response = requests.get(urlbfx, params=params)
bfxjson = json.loads(response.text)
bfxohlc = []
for i in range(0, len(bfxjson)):
appendline=bfxjson[i][0]/1000,bfxjson[i][1],bfxjson[i][3],bfxjson[i][4],bfxjson[i][2],bfxjson[i][5]
bfxohlc.append(appendline)
with open(file,'a',newline='') as f:
writer = csv.writer(f)
writer.writerows(bfxohlc)
start = (float(getsince(file))+60)*1000
datevalue = datetime.datetime.utcfromtimestamp(start/1000).replace(tzinfo=datetime.timezone.utc)
print(count+1)
print('last:', datevalue)
count += 1
time.sleep(5)
except Exception as e:
continue
if start/1000 >= now:
break
'''
getprices() | 0.042752 | 0.071819 |
import collections
from typing import Any, Dict, Optional, Iterable, Sequence
import numpy as np
from modin.core.dataframe.base.exchange.dataframe_protocol.dataframe import (
ProtocolDataframe,
)
from modin.core.dataframe.pandas.dataframe.dataframe import PandasDataframe
from modin.utils import _inherit_docstrings
from .column import PandasProtocolColumn
@_inherit_docstrings(ProtocolDataframe)
class PandasProtocolDataframe(ProtocolDataframe):
"""
A data frame class, with only the methods required by the interchange protocol defined.
Instances of this (private) class are returned from ``modin.pandas.DataFrame.__dataframe__``
as objects with the methods and attributes defined on this class.
A "data frame" represents an ordered collection of named columns.
A column's "name" must be a unique string. Columns may be accessed by name or by position.
This could be a public data frame class, or an object with the methods and
attributes defined on this DataFrame class could be returned from the
``__dataframe__`` method of a public data frame class in a library adhering
to the dataframe interchange protocol specification.
Parameters
----------
df : PandasDataframe
A ``PandasDataframe`` object.
nan_as_null : bool, default:False
A keyword intended for the consumer to tell the producer
to overwrite null values in the data with ``NaN`` (or ``NaT``).
This currently has no effect; once support for nullable extension
dtypes is added, this value should be propagated to columns.
allow_copy : bool, default: True
A keyword that defines whether or not the library is allowed
to make a copy of the data. For example, copying data would be necessary
if a library supports strided buffers, given that this protocol
specifies contiguous buffers. Currently, if the flag is set to ``False``
and a copy is needed, a ``RuntimeError`` will be raised.
"""
def __init__(
self,
df: PandasDataframe,
nan_as_null: bool = False,
allow_copy: bool = True,
) -> None:
self._df = df
self._nan_as_null = nan_as_null
self._allow_copy = allow_copy
@property
def metadata(self) -> Dict[str, Any]:
return {"modin.index": self._df.index}
def num_columns(self) -> int:
return len(self._df.columns)
def num_rows(self) -> int:
return len(self._df.index)
def num_chunks(self) -> int:
return self._df._partitions.shape[0]
def column_names(self) -> Iterable[str]:
for col in self._df.columns:
yield col
def get_column(self, i: int) -> PandasProtocolColumn:
return PandasProtocolColumn(
self._df.mask(row_positions=None, col_positions=[i]),
allow_copy=self._allow_copy,
)
def get_column_by_name(self, name: str) -> PandasProtocolColumn:
return PandasProtocolColumn(
self._df.mask(row_positions=None, col_labels=[name]),
allow_copy=self._allow_copy,
)
def get_columns(self) -> Iterable[PandasProtocolColumn]:
for name in self._df.columns:
yield PandasProtocolColumn(
self._df.mask(row_positions=None, col_labels=[name]),
allow_copy=self._allow_copy,
)
def select_columns(self, indices: Sequence[int]) -> "PandasProtocolDataframe":
if not isinstance(indices, collections.Sequence):
raise ValueError("`indices` is not a sequence")
return PandasProtocolDataframe(
self._df.mask(row_positions=None, col_positions=indices),
allow_copy=self._allow_copy,
)
def select_columns_by_name(self, names: Sequence[str]) -> "PandasProtocolDataframe":
if not isinstance(names, collections.Sequence):
raise ValueError("`names` is not a sequence")
return PandasProtocolDataframe(
self._df.mask(row_positions=None, col_labels=names),
allow_copy=self._allow_copy,
)
def get_chunks(
self, n_chunks: Optional[int] = None
) -> Iterable["PandasProtocolDataframe"]:
cur_n_chunks = self.num_chunks()
n_rows = self.num_rows()
if n_chunks is None or n_chunks == cur_n_chunks:
cum_row_lengths = np.cumsum([0] + self._df._row_lengths)
for i in range(len(cum_row_lengths) - 1):
yield PandasProtocolDataframe(
self._df.mask(
row_positions=range(cum_row_lengths[i], cum_row_lengths[i + 1]),
col_positions=None,
),
allow_copy=self._allow_copy,
)
return
if n_chunks % cur_n_chunks != 0:
raise RuntimeError(
"The passed `n_chunks` must be a multiple of `self.num_chunks()`."
)
if n_chunks > n_rows:
raise RuntimeError(
"The passed `n_chunks` value is bigger than `self.num_rows()`."
)
chunksize = n_rows // n_chunks
new_lengths = [chunksize] * n_chunks
new_lengths[-1] = n_rows % n_chunks + new_lengths[-1]
new_partitions = self._df._partition_mgr_cls.map_axis_partitions(
0,
self._df._partitions,
lambda df: df,
keep_partitioning=False,
lengths=new_lengths,
)
new_df = self._df.__constructor__(
new_partitions,
self._df.index,
self._df.columns,
new_lengths,
self._df._column_widths,
)
cum_row_lengths = np.cumsum([0] + new_df._row_lengths)
for i in range(len(cum_row_lengths) - 1):
yield PandasProtocolDataframe(
new_df.mask(
row_positions=range(cum_row_lengths[i], cum_row_lengths[i + 1]),
col_positions=None,
),
allow_copy=self._allow_copy,
) | modin/core/dataframe/pandas/exchange/dataframe_protocol/dataframe.py | import collections
from typing import Any, Dict, Optional, Iterable, Sequence
import numpy as np
from modin.core.dataframe.base.exchange.dataframe_protocol.dataframe import (
ProtocolDataframe,
)
from modin.core.dataframe.pandas.dataframe.dataframe import PandasDataframe
from modin.utils import _inherit_docstrings
from .column import PandasProtocolColumn
@_inherit_docstrings(ProtocolDataframe)
class PandasProtocolDataframe(ProtocolDataframe):
"""
A data frame class, with only the methods required by the interchange protocol defined.
Instances of this (private) class are returned from ``modin.pandas.DataFrame.__dataframe__``
as objects with the methods and attributes defined on this class.
A "data frame" represents an ordered collection of named columns.
A column's "name" must be a unique string. Columns may be accessed by name or by position.
This could be a public data frame class, or an object with the methods and
attributes defined on this DataFrame class could be returned from the
``__dataframe__`` method of a public data frame class in a library adhering
to the dataframe interchange protocol specification.
Parameters
----------
df : PandasDataframe
A ``PandasDataframe`` object.
nan_as_null : bool, default:False
A keyword intended for the consumer to tell the producer
to overwrite null values in the data with ``NaN`` (or ``NaT``).
This currently has no effect; once support for nullable extension
dtypes is added, this value should be propagated to columns.
allow_copy : bool, default: True
A keyword that defines whether or not the library is allowed
to make a copy of the data. For example, copying data would be necessary
if a library supports strided buffers, given that this protocol
specifies contiguous buffers. Currently, if the flag is set to ``False``
and a copy is needed, a ``RuntimeError`` will be raised.
"""
def __init__(
self,
df: PandasDataframe,
nan_as_null: bool = False,
allow_copy: bool = True,
) -> None:
self._df = df
self._nan_as_null = nan_as_null
self._allow_copy = allow_copy
@property
def metadata(self) -> Dict[str, Any]:
return {"modin.index": self._df.index}
def num_columns(self) -> int:
return len(self._df.columns)
def num_rows(self) -> int:
return len(self._df.index)
def num_chunks(self) -> int:
return self._df._partitions.shape[0]
def column_names(self) -> Iterable[str]:
for col in self._df.columns:
yield col
def get_column(self, i: int) -> PandasProtocolColumn:
return PandasProtocolColumn(
self._df.mask(row_positions=None, col_positions=[i]),
allow_copy=self._allow_copy,
)
def get_column_by_name(self, name: str) -> PandasProtocolColumn:
return PandasProtocolColumn(
self._df.mask(row_positions=None, col_labels=[name]),
allow_copy=self._allow_copy,
)
def get_columns(self) -> Iterable[PandasProtocolColumn]:
for name in self._df.columns:
yield PandasProtocolColumn(
self._df.mask(row_positions=None, col_labels=[name]),
allow_copy=self._allow_copy,
)
def select_columns(self, indices: Sequence[int]) -> "PandasProtocolDataframe":
if not isinstance(indices, collections.Sequence):
raise ValueError("`indices` is not a sequence")
return PandasProtocolDataframe(
self._df.mask(row_positions=None, col_positions=indices),
allow_copy=self._allow_copy,
)
def select_columns_by_name(self, names: Sequence[str]) -> "PandasProtocolDataframe":
if not isinstance(names, collections.Sequence):
raise ValueError("`names` is not a sequence")
return PandasProtocolDataframe(
self._df.mask(row_positions=None, col_labels=names),
allow_copy=self._allow_copy,
)
def get_chunks(
self, n_chunks: Optional[int] = None
) -> Iterable["PandasProtocolDataframe"]:
cur_n_chunks = self.num_chunks()
n_rows = self.num_rows()
if n_chunks is None or n_chunks == cur_n_chunks:
cum_row_lengths = np.cumsum([0] + self._df._row_lengths)
for i in range(len(cum_row_lengths) - 1):
yield PandasProtocolDataframe(
self._df.mask(
row_positions=range(cum_row_lengths[i], cum_row_lengths[i + 1]),
col_positions=None,
),
allow_copy=self._allow_copy,
)
return
if n_chunks % cur_n_chunks != 0:
raise RuntimeError(
"The passed `n_chunks` must be a multiple of `self.num_chunks()`."
)
if n_chunks > n_rows:
raise RuntimeError(
"The passed `n_chunks` value is bigger than `self.num_rows()`."
)
chunksize = n_rows // n_chunks
new_lengths = [chunksize] * n_chunks
new_lengths[-1] = n_rows % n_chunks + new_lengths[-1]
new_partitions = self._df._partition_mgr_cls.map_axis_partitions(
0,
self._df._partitions,
lambda df: df,
keep_partitioning=False,
lengths=new_lengths,
)
new_df = self._df.__constructor__(
new_partitions,
self._df.index,
self._df.columns,
new_lengths,
self._df._column_widths,
)
cum_row_lengths = np.cumsum([0] + new_df._row_lengths)
for i in range(len(cum_row_lengths) - 1):
yield PandasProtocolDataframe(
new_df.mask(
row_positions=range(cum_row_lengths[i], cum_row_lengths[i + 1]),
col_positions=None,
),
allow_copy=self._allow_copy,
) | 0.911807 | 0.493714 |
import importlib.util, os, shutil, subprocess, sys, tempfile, urllib.request
assert sys.version_info >= (3, 4)
# A horrible workaround for a partially existing distutils.
_distutils_usercustomize = """
# miniirc_bootstrap: Make user-provided packages load first.
# This can safely be removed if distutils has been properly installed.
import os, sys
dir = os.path.expanduser('~/.local/lib/python{}.{}/site-packages'.format(
*sys.version_info[:2]))
if dir in sys.path:
sys.path.remove(dir)
sys.path.insert(0, dir)
del dir
# End of miniirc_bootstrap changes
""".lstrip()
# Debian has decided to remove distutils from Python installs by default.
# TODO: Make less assumptions about the system.
def bootstrap_distutils():
"""
Bootstrap installs distutils on Debian systems. This is horrible and should
probably be avoided if possible.
"""
if (importlib.util.find_spec('distutils.util') is not None and
sys.version_info < (3, 12)):
return
print('[This should never happen] Downloading distutils...')
if sys.platform != 'linux' or not shutil.which('apt-get'):
raise NotImplementedError('Cannot bootstrap distutils on non-Debian '
'systems!')
partial_distutils = importlib.util.find_spec('distutils')
# Get paths
python = 'python{}.{}'.format(*sys.version_info[:2])
pkg = 'python{}-distutils'.format(sys.version_info[0])
local_lib = os.path.expanduser('~/.local/lib')
if not os.path.isdir(local_lib):
os.mkdir(local_lib)
local_python = os.path.join(local_lib, python)
if not os.path.isdir(local_python):
os.mkdir(local_python)
local_python = os.path.join(local_python, 'site-packages')
if not os.path.isdir(local_python):
os.mkdir(local_python)
with tempfile.TemporaryDirectory() as tmpdir:
# Download the package.
subprocess.check_call(('apt-get', 'download', pkg), cwd=tmpdir)
files = os.listdir(tmpdir)
assert len(files) == 1, 'Error downloading .deb!'
# Extract the downloaded .deb file.
print('[This should never happen] Installing distutils...')
subprocess.check_call(('dpkg-deb', '-x', files[0], tmpdir), cwd=tmpdir)
# Move distutils out of the extracted package.
f = os.path.join(tmpdir, 'usr', 'lib', python, 'distutils')
os.rename(f, os.path.join(local_python, 'distutils'))
if partial_distutils is not None:
# Symlink extra files.
old_distutils = os.path.dirname(partial_distutils.origin)
for fn in os.listdir(old_distutils):
if not fn.endswith('.py'):
continue
dst = os.path.join(local_python, 'distutils', fn)
if os.path.exists(dst):
continue
os.symlink(os.path.join(old_distutils, fn), dst)
# A horrible tweak to make user-provided packages load before system ones.
usercustomize = os.path.join(local_python, 'usercustomize.py')
print('[This should never happen] Adding {}...'.format(usercustomize))
with open(usercustomize, 'a') as f:
# If the file already contains data write a leading newline.
if f.tell():
f.write('\n')
# Write the custom distutils data.
f.write(_distutils_usercustomize)
print('[This should never happen] distutils should be installed!')
# Recommend the user installs distutils correctly if possible.
print(('If you have root access, please install {}, remove the '
'changes made in {!r}, and delete {!r}.').format(pkg, usercustomize,
os.path.join(local_python, 'distutils')), file=sys.stderr)
# Download a webpage
def wget(url, raw=False):
try:
with urllib.request.urlopen(url) as f:
if raw:
return f.read()
else:
return f.read().decode('utf-8', 'replace')
except urllib.request.HTTPError:
return ''
def bootstrap_pip():
"""
Bootstrap installs pip. This will print messages to stdout/stderr.
This is required because some versions of Ubuntu do not have pip or
ensurepip installed with Python by default.
"""
if importlib.util.find_spec('distutils.util') is None:
bootstrap_distutils()
print('Downloading pip...')
url = 'https://bootstrap.pypa.io/{}get-pip.py'
# If this machine is using an obsolete Python, download the
# version-specific one.
major, minor = sys.version_info[:2]
pip = wget(url.format('{}.{}/'.format(major, minor)), raw=True)
# Otherwise use the generic one.
if not pip:
pip = wget(url.format(''), raw=True)
assert pip, 'Error downloading pip!'
print('Installing pip...')
fd, filename = tempfile.mkstemp()
with open(fd, 'wb') as f:
f.write(pip)
del pip
subprocess.check_call((sys.executable, '--', filename, '--user'))
os.remove(filename)
print('pip (should be) installed!')
# Install a package
def pip_install(*pkgs, upgrade=False):
"""
Installs or upgrades packages using pip. `pip` will print to stdout/stderr.
This automatically calls bootstrap_pip() if required.
"""
args = [sys.executable, '-m', 'pip', 'install']
if upgrade:
args.append('--upgrade')
args.extend(('--user', '--'))
args.extend(pkgs)
try:
subprocess.check_call(args)
except subprocess.CalledProcessError:
if importlib.util.find_spec('pip') is not None:
raise
print('pip is (somehow) not installed!')
bootstrap_pip()
subprocess.check_call(args)
# Install miniirc
def main():
# Do nothing if arguments are specified.
import argparse
argparse.ArgumentParser().parse_args()
# Get miniirc
upgrade = True
try:
import miniirc
except ImportError:
upgrade = False
pip_install('miniirc', upgrade=upgrade)
print('miniirc (should be) installed!')
if __name__ == '__main__':
main() | miniirc_bootstrap.py |
import importlib.util, os, shutil, subprocess, sys, tempfile, urllib.request
assert sys.version_info >= (3, 4)
# A horrible workaround for a partially existing distutils.
_distutils_usercustomize = """
# miniirc_bootstrap: Make user-provided packages load first.
# This can safely be removed if distutils has been properly installed.
import os, sys
dir = os.path.expanduser('~/.local/lib/python{}.{}/site-packages'.format(
*sys.version_info[:2]))
if dir in sys.path:
sys.path.remove(dir)
sys.path.insert(0, dir)
del dir
# End of miniirc_bootstrap changes
""".lstrip()
# Debian has decided to remove distutils from Python installs by default.
# TODO: Make less assumptions about the system.
def bootstrap_distutils():
"""
Bootstrap installs distutils on Debian systems. This is horrible and should
probably be avoided if possible.
"""
if (importlib.util.find_spec('distutils.util') is not None and
sys.version_info < (3, 12)):
return
print('[This should never happen] Downloading distutils...')
if sys.platform != 'linux' or not shutil.which('apt-get'):
raise NotImplementedError('Cannot bootstrap distutils on non-Debian '
'systems!')
partial_distutils = importlib.util.find_spec('distutils')
# Get paths
python = 'python{}.{}'.format(*sys.version_info[:2])
pkg = 'python{}-distutils'.format(sys.version_info[0])
local_lib = os.path.expanduser('~/.local/lib')
if not os.path.isdir(local_lib):
os.mkdir(local_lib)
local_python = os.path.join(local_lib, python)
if not os.path.isdir(local_python):
os.mkdir(local_python)
local_python = os.path.join(local_python, 'site-packages')
if not os.path.isdir(local_python):
os.mkdir(local_python)
with tempfile.TemporaryDirectory() as tmpdir:
# Download the package.
subprocess.check_call(('apt-get', 'download', pkg), cwd=tmpdir)
files = os.listdir(tmpdir)
assert len(files) == 1, 'Error downloading .deb!'
# Extract the downloaded .deb file.
print('[This should never happen] Installing distutils...')
subprocess.check_call(('dpkg-deb', '-x', files[0], tmpdir), cwd=tmpdir)
# Move distutils out of the extracted package.
f = os.path.join(tmpdir, 'usr', 'lib', python, 'distutils')
os.rename(f, os.path.join(local_python, 'distutils'))
if partial_distutils is not None:
# Symlink extra files.
old_distutils = os.path.dirname(partial_distutils.origin)
for fn in os.listdir(old_distutils):
if not fn.endswith('.py'):
continue
dst = os.path.join(local_python, 'distutils', fn)
if os.path.exists(dst):
continue
os.symlink(os.path.join(old_distutils, fn), dst)
# A horrible tweak to make user-provided packages load before system ones.
usercustomize = os.path.join(local_python, 'usercustomize.py')
print('[This should never happen] Adding {}...'.format(usercustomize))
with open(usercustomize, 'a') as f:
# If the file already contains data write a leading newline.
if f.tell():
f.write('\n')
# Write the custom distutils data.
f.write(_distutils_usercustomize)
print('[This should never happen] distutils should be installed!')
# Recommend the user installs distutils correctly if possible.
print(('If you have root access, please install {}, remove the '
'changes made in {!r}, and delete {!r}.').format(pkg, usercustomize,
os.path.join(local_python, 'distutils')), file=sys.stderr)
# Download a webpage
def wget(url, raw=False):
try:
with urllib.request.urlopen(url) as f:
if raw:
return f.read()
else:
return f.read().decode('utf-8', 'replace')
except urllib.request.HTTPError:
return ''
def bootstrap_pip():
"""
Bootstrap installs pip. This will print messages to stdout/stderr.
This is required because some versions of Ubuntu do not have pip or
ensurepip installed with Python by default.
"""
if importlib.util.find_spec('distutils.util') is None:
bootstrap_distutils()
print('Downloading pip...')
url = 'https://bootstrap.pypa.io/{}get-pip.py'
# If this machine is using an obsolete Python, download the
# version-specific one.
major, minor = sys.version_info[:2]
pip = wget(url.format('{}.{}/'.format(major, minor)), raw=True)
# Otherwise use the generic one.
if not pip:
pip = wget(url.format(''), raw=True)
assert pip, 'Error downloading pip!'
print('Installing pip...')
fd, filename = tempfile.mkstemp()
with open(fd, 'wb') as f:
f.write(pip)
del pip
subprocess.check_call((sys.executable, '--', filename, '--user'))
os.remove(filename)
print('pip (should be) installed!')
# Install a package
def pip_install(*pkgs, upgrade=False):
"""
Installs or upgrades packages using pip. `pip` will print to stdout/stderr.
This automatically calls bootstrap_pip() if required.
"""
args = [sys.executable, '-m', 'pip', 'install']
if upgrade:
args.append('--upgrade')
args.extend(('--user', '--'))
args.extend(pkgs)
try:
subprocess.check_call(args)
except subprocess.CalledProcessError:
if importlib.util.find_spec('pip') is not None:
raise
print('pip is (somehow) not installed!')
bootstrap_pip()
subprocess.check_call(args)
# Install miniirc
def main():
# Do nothing if arguments are specified.
import argparse
argparse.ArgumentParser().parse_args()
# Get miniirc
upgrade = True
try:
import miniirc
except ImportError:
upgrade = False
pip_install('miniirc', upgrade=upgrade)
print('miniirc (should be) installed!')
if __name__ == '__main__':
main() | 0.296145 | 0.200734 |
import lightgbm as lgb
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from matplotlib import pyplot as plt
import numpy as np
class LightGBMWrapper:
"""
Apply an light GBM model on a dataset with hyperparameters tuning
"""
params = {'boosting_type': 'gbdt',
'max_depth' : 5,
'objective': 'binary',
'num_leaves': 64,
'learning_rate': 0.05,
'subsample': 1,
'colsample_bytree': 0.8,
'max_bin' : 10}
# Create parameters to search
default_parameters = {
'n_estimators': [100, 200, 300],
'num_leaves': [6,8,12],
'boosting_type' : ['gbdt'],
'objective' : ['binary'],
'colsample_bytree': [i for i in list(np.arange(0.2, 1.2, 0.2))],
'subsample': [0.6, 0.8, 1.0],
'max_depth': [i for i in range(1, 13, 2)],
'min_child_weight': [1, 5, 10]
}
def __init__(self, train_x, train_y, parameters=default_parameters, n_folds= 5, n_jobs=5, scoring_metrics='accuracy'):
"""
Instantiate a light GBM object.
Parameters
------------
train_x : A pandas dataframe with all the features
train_y : A pandas dataframe with the ground truth
parameters : dictionary of paramters used for grid search : https://xgboost.readthedocs.io/en/latest/parameter.html
n_fold : Number of partitions for the cross validation
n_jobs : Number of jobs for parallel execution
scoring_metrics : you can see both there : http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
"""
self.columns_names = train_x.columns.tolist()
self.train_x = train_x.values
self.train_y = train_y.values
self.lgbm_model = lgb.LGBMClassifier(boosting_type= 'gbdt',
objective = 'binary',
n_jobs = n_jobs
silent = True,
max_bin = self.params['max_bin'],
max_depth = self.params['max_depth'],
#subsample_for_bin = self.params['subsample_for_bin'],
subsample = self.params['subsample'])
#subsample_freq = self.params['subsample_freq'],
#min_split_gain = self.params['min_split_gain'])
#min_child_weight = self.params['min_child_weight']
#min_child_samples = self.params['min_child_samples'],
#scale_pos_weight = self.params['scale_pos_weight'])
self.gridSearch = GridSearchCV(self.lgbm_model, parameters, n_jobs=n_jobs,
cv=StratifiedKFold(n_splits=n_folds, shuffle=True),
scoring=scoring_metrics,
refit=True,
verbose=10)
def find_best_model(self):
"""
Perform the grid search and return the best model and best model score on cross validation
"""
self.gridSearch.fit(self.train_x, self.train_y)
return self.gridSearch.best_estimator_, self.gridSearch.best_score_
def get_most_important_features_plot(self):
"""
Plot the most important features for XGboost decision
"""
plt.bar(self.columns_names , self.gridSearch.best_estimator_.feature_importances_)
plt.show() | Contests/2018-11-09 - 2018-11-10 - Huawei Hackathon/src/models/other/lightGBMWrapper.py | import lightgbm as lgb
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from matplotlib import pyplot as plt
import numpy as np
class LightGBMWrapper:
"""
Apply an light GBM model on a dataset with hyperparameters tuning
"""
params = {'boosting_type': 'gbdt',
'max_depth' : 5,
'objective': 'binary',
'num_leaves': 64,
'learning_rate': 0.05,
'subsample': 1,
'colsample_bytree': 0.8,
'max_bin' : 10}
# Create parameters to search
default_parameters = {
'n_estimators': [100, 200, 300],
'num_leaves': [6,8,12],
'boosting_type' : ['gbdt'],
'objective' : ['binary'],
'colsample_bytree': [i for i in list(np.arange(0.2, 1.2, 0.2))],
'subsample': [0.6, 0.8, 1.0],
'max_depth': [i for i in range(1, 13, 2)],
'min_child_weight': [1, 5, 10]
}
def __init__(self, train_x, train_y, parameters=default_parameters, n_folds= 5, n_jobs=5, scoring_metrics='accuracy'):
"""
Instantiate a light GBM object.
Parameters
------------
train_x : A pandas dataframe with all the features
train_y : A pandas dataframe with the ground truth
parameters : dictionary of paramters used for grid search : https://xgboost.readthedocs.io/en/latest/parameter.html
n_fold : Number of partitions for the cross validation
n_jobs : Number of jobs for parallel execution
scoring_metrics : you can see both there : http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
"""
self.columns_names = train_x.columns.tolist()
self.train_x = train_x.values
self.train_y = train_y.values
self.lgbm_model = lgb.LGBMClassifier(boosting_type= 'gbdt',
objective = 'binary',
n_jobs = n_jobs
silent = True,
max_bin = self.params['max_bin'],
max_depth = self.params['max_depth'],
#subsample_for_bin = self.params['subsample_for_bin'],
subsample = self.params['subsample'])
#subsample_freq = self.params['subsample_freq'],
#min_split_gain = self.params['min_split_gain'])
#min_child_weight = self.params['min_child_weight']
#min_child_samples = self.params['min_child_samples'],
#scale_pos_weight = self.params['scale_pos_weight'])
self.gridSearch = GridSearchCV(self.lgbm_model, parameters, n_jobs=n_jobs,
cv=StratifiedKFold(n_splits=n_folds, shuffle=True),
scoring=scoring_metrics,
refit=True,
verbose=10)
def find_best_model(self):
"""
Perform the grid search and return the best model and best model score on cross validation
"""
self.gridSearch.fit(self.train_x, self.train_y)
return self.gridSearch.best_estimator_, self.gridSearch.best_score_
def get_most_important_features_plot(self):
"""
Plot the most important features for XGboost decision
"""
plt.bar(self.columns_names , self.gridSearch.best_estimator_.feature_importances_)
plt.show() | 0.786131 | 0.455986 |
import subprocess
import time
instList = [ \
#(0x707f, 0x2003, 'LW', ['rs1','rd','immI']),
#(0x707f, 0x3, 'LB', ['rs1','rd','immI']),
#(0x707f, 0x1003, 'LH', ['rs1','rd','immI']),
#(0x707f, 0x3003, 'LD', ['rs1','rd','immI']),
#(0x707f, 0x4003, 'LBU', ['rs1','rd','immI']),
#(0x707f, 0x5003, 'LHU', ['rs1','rd','immI']),
#(0x707f, 0x6003, 'LWU', ['rs1','rd','immI']),
#(0x707f, 0x0023, 'SB', ['rs1','rs2','immS']),
#(0x707f, 0x1023, 'SH', ['rs1','rs2','immS']),
#(0x707f, 0x2023, 'SW', ['rs1','rs2','immS']), \
#(0x707f, 0x3023, 'SD', ['rs1','rs2','immS']),
#(0x707f, 0x0063, 'BEQ', ['rs1','rs2','immB']),
#(0x707f, 0x1063, 'BNE', ['rs1','rs2','immB']),
#(0x707f, 0x4063, 'BLT', ['rs1','rs2','immB']),
#(0x707f, 0x5063, 'BGE', ['rs1','rs2','immB']),
#(0x707f, 0x6063, 'BLTU', ['rs1','rs2','immB']),
#(0x707f, 0x7063, 'BGEU', ['rs1','rs2','immB']),
#(0x7f, 0x6f, 'JAL', ['immJ','rd']),
(0x707f, 0x67, 'JALR', ['rs1','rd','immI']),
(0x7f,0x37, 'LUI', ['immU','rd']),
(0x7f,0x17, 'AUIPC', ['immU','rd']),
(0x707f, 0x13, 'ADDI', ['rs1','rd','immI']),
(0x707f, 0x2013, 'SLTI', ['rs1','rd','immI']),
(0x707f, 0x3013, 'SLTIU', ['rs1','rd','immI']),
(0x707f, 0x4013, 'XORI', ['rs1','rd','immI']),
(0x707f, 0x6013, 'ORI', ['rs1','rd','immI']),
(0x707f, 0x7013, 'ANDI', ['rs1','rd','immI']),
(0xfe00707f, 0x00005013, 'SRLI', ['rs1','rd','shamt']),
(0xfe00707f, 0x40005013, 'SRAI', ['rs1','rd','shamt']),
(0xfe00707f, 0x00001013, 'SLLI', ['rs1','rd','shamt']),
(0xfe00707f, 0x00000033, 'ADD', ['rs1','rs2','rd']),
(0xfe00707f, 0x40000033, 'SUB', ['rs1','rs2','rd']),
(0xfe00707f, 0x00001033, 'SLL', ['rs1','rs2','rd']),
(0xfe00707f, 0x00002033, 'SLT', ['rs1','rs2','rd']),
(0xfe00707f, 0x00003033, 'SLTU', ['rs1','rs2','rd']),
(0xfe00707f, 0x00004033, 'XOR', ['rs1','rs2','rd']),
(0xfe00707f, 0x00005033, 'SRL', ['rs1','rs2','rd']),
(0xfe00707f, 0x40005033, 'SRA', ['rs1','rs2','rd']),
(0xfe00707f, 0x00006033, 'OR', ['rs1','rs2','rd']),
#(0xfe00707f, 0x00007033, 'AND', ['rs1','rs2','rd'])
]
with open('RocketFV.v.in') as fin:
text = fin.read()
with open('bmcprove.tcl.in') as fin:
script = fin.read()
logf = open('prove.log','wt')
with open ('result.log','wt') as rt:
for m,h,instname,l in instList:
outtext = text.replace('%%%INST%%%', instname)
scr = script.replace('%%%INST%%%', instname)
with open('RocketFV.v','wt') as fout:
fout.write(outtext)
with open('bmcprove.tcl','wt') as fout:
fout.write(scr)
#let's do the work
subprocess.call(['mkdir','db/'+instname])
logf.flush()
logf.write( 'proving %s\n' % instname )
starttime = time.time()
logf.write( 'Time: %f\n' % starttime )
logf.flush()
#exit(1);
subprocess.call(['jg','-no_gui','-fpv','bmcprove.tcl'], stdout=rt)
endtime = time.time()
logf.write( 'End: %f\n Elapsed: %f\n' % (starttime, endtime-starttime ) )
logf.flush() | cores/RISC-V/RISC-V-Synth/ILAVerif/BaseI/handle.py | import subprocess
import time
instList = [ \
#(0x707f, 0x2003, 'LW', ['rs1','rd','immI']),
#(0x707f, 0x3, 'LB', ['rs1','rd','immI']),
#(0x707f, 0x1003, 'LH', ['rs1','rd','immI']),
#(0x707f, 0x3003, 'LD', ['rs1','rd','immI']),
#(0x707f, 0x4003, 'LBU', ['rs1','rd','immI']),
#(0x707f, 0x5003, 'LHU', ['rs1','rd','immI']),
#(0x707f, 0x6003, 'LWU', ['rs1','rd','immI']),
#(0x707f, 0x0023, 'SB', ['rs1','rs2','immS']),
#(0x707f, 0x1023, 'SH', ['rs1','rs2','immS']),
#(0x707f, 0x2023, 'SW', ['rs1','rs2','immS']), \
#(0x707f, 0x3023, 'SD', ['rs1','rs2','immS']),
#(0x707f, 0x0063, 'BEQ', ['rs1','rs2','immB']),
#(0x707f, 0x1063, 'BNE', ['rs1','rs2','immB']),
#(0x707f, 0x4063, 'BLT', ['rs1','rs2','immB']),
#(0x707f, 0x5063, 'BGE', ['rs1','rs2','immB']),
#(0x707f, 0x6063, 'BLTU', ['rs1','rs2','immB']),
#(0x707f, 0x7063, 'BGEU', ['rs1','rs2','immB']),
#(0x7f, 0x6f, 'JAL', ['immJ','rd']),
(0x707f, 0x67, 'JALR', ['rs1','rd','immI']),
(0x7f,0x37, 'LUI', ['immU','rd']),
(0x7f,0x17, 'AUIPC', ['immU','rd']),
(0x707f, 0x13, 'ADDI', ['rs1','rd','immI']),
(0x707f, 0x2013, 'SLTI', ['rs1','rd','immI']),
(0x707f, 0x3013, 'SLTIU', ['rs1','rd','immI']),
(0x707f, 0x4013, 'XORI', ['rs1','rd','immI']),
(0x707f, 0x6013, 'ORI', ['rs1','rd','immI']),
(0x707f, 0x7013, 'ANDI', ['rs1','rd','immI']),
(0xfe00707f, 0x00005013, 'SRLI', ['rs1','rd','shamt']),
(0xfe00707f, 0x40005013, 'SRAI', ['rs1','rd','shamt']),
(0xfe00707f, 0x00001013, 'SLLI', ['rs1','rd','shamt']),
(0xfe00707f, 0x00000033, 'ADD', ['rs1','rs2','rd']),
(0xfe00707f, 0x40000033, 'SUB', ['rs1','rs2','rd']),
(0xfe00707f, 0x00001033, 'SLL', ['rs1','rs2','rd']),
(0xfe00707f, 0x00002033, 'SLT', ['rs1','rs2','rd']),
(0xfe00707f, 0x00003033, 'SLTU', ['rs1','rs2','rd']),
(0xfe00707f, 0x00004033, 'XOR', ['rs1','rs2','rd']),
(0xfe00707f, 0x00005033, 'SRL', ['rs1','rs2','rd']),
(0xfe00707f, 0x40005033, 'SRA', ['rs1','rs2','rd']),
(0xfe00707f, 0x00006033, 'OR', ['rs1','rs2','rd']),
#(0xfe00707f, 0x00007033, 'AND', ['rs1','rs2','rd'])
]
with open('RocketFV.v.in') as fin:
text = fin.read()
with open('bmcprove.tcl.in') as fin:
script = fin.read()
logf = open('prove.log','wt')
with open ('result.log','wt') as rt:
for m,h,instname,l in instList:
outtext = text.replace('%%%INST%%%', instname)
scr = script.replace('%%%INST%%%', instname)
with open('RocketFV.v','wt') as fout:
fout.write(outtext)
with open('bmcprove.tcl','wt') as fout:
fout.write(scr)
#let's do the work
subprocess.call(['mkdir','db/'+instname])
logf.flush()
logf.write( 'proving %s\n' % instname )
starttime = time.time()
logf.write( 'Time: %f\n' % starttime )
logf.flush()
#exit(1);
subprocess.call(['jg','-no_gui','-fpv','bmcprove.tcl'], stdout=rt)
endtime = time.time()
logf.write( 'End: %f\n Elapsed: %f\n' % (starttime, endtime-starttime ) )
logf.flush() | 0.04844 | 0.282017 |
from pynmea.exceptions import NoDataGivenError
class NMEAStream(object):
""" NMEAStream object is used to
"""
def __init__(self, stream_obj=None):
""" stream_obj should be a file like object.
If the requirement is just to split data in memory, no stream_obj
is required. Simply create an instance of this class and
call _split directly with the data.
"""
self.stream = stream_obj
self.head = ''
def get_strings(self, data=None, size=1024):
""" Read and return sentences as strings
"""
return self._read(data=data, size=size)
def get_objects(self, data=None, size=1024):
""" Get sentences but return list of NMEA objects
"""
str_data = self._read(data=data, size=size)
nmea_objects = []
for nmea_str in str_data:
try:
nmea_ob = self._get_type(nmea_str)()
except TypeError:
# NMEA sentence was not recognised
continue
nmea_ob.parse(nmea_str)
nmea_objects.append(nmea_ob)
return nmea_objects
def _read(self, data=None, size=1024):
""" Read size bytes of data. Always strip off the last record and
append to the start of the data stream on the next call.
This ensures that only full sentences are returned.
"""
if not data and not self.stream and not self.head:
# If there's no data and no stream, raise an error
raise NoDataGivenError('No data was provided')
if not data and self.stream:
read_data = self.stream.read(size)
else:
read_data = data
data = self.head + read_data
# DBG:
print "Joined head and read_data to get"
print "-"*20
print data
print "-"*20
raw_sentences = self._split(data)
if not read_data:
self.head = ''
return raw_sentences
self.head = raw_sentences[-1]
full_sentences = raw_sentences[:-1]
return full_sentences
def _get_type(self, sentence):
""" Get the NMEA type and return the appropriate object. Returns
None if no such object was found.
TODO: raise error instead of None. Failing silently is a Bad Thing.
We can always catch the error later if the user wishes to supress
errors.
"""
sen_type = sentence.split(',')[0].lstrip('$')
sen_mod = __import__('pynmea.nmea', fromlist=[sen_type])
sen_obj = getattr(sen_mod, sen_type, None)
return sen_obj
def _split(self, data, separator=None):
""" Take some data and split up based on the notion that a sentence
looks something like:
$x,y,z or $x,y,z*ab
separator is for cases where there is something strange or
non-standard as a separator between sentences.
Without this, there is no real way to tell whether:
$x,y,zSTUFF
is legal or if STUFF should be stripped.
"""
sentences = data.split('$')
clean_sentences = []
for item in sentences:
cleaned_item = item.rstrip()
if separator:
cleaned_item = cleaned_item.rstrip(separator)
if '*' in cleaned_item.split(',')[-1]:
# There must be a checksum. Remove any trailing fluff:
try:
first, checksum = cleaned_item.split('*')
except ValueError:
# Some GPS data recorders have been shown to output
# run-together sentences (no leading $).
# In this case, ignore error and continue, discarding the
# erroneous data.
# TODO: try and fix the data.
continue
cleaned_item = '*'.join([first, checksum[:2]])
if cleaned_item:
clean_sentences.append(cleaned_item)
return clean_sentences | adcpy/pynmea/streamer.py | from pynmea.exceptions import NoDataGivenError
class NMEAStream(object):
""" NMEAStream object is used to
"""
def __init__(self, stream_obj=None):
""" stream_obj should be a file like object.
If the requirement is just to split data in memory, no stream_obj
is required. Simply create an instance of this class and
call _split directly with the data.
"""
self.stream = stream_obj
self.head = ''
def get_strings(self, data=None, size=1024):
""" Read and return sentences as strings
"""
return self._read(data=data, size=size)
def get_objects(self, data=None, size=1024):
""" Get sentences but return list of NMEA objects
"""
str_data = self._read(data=data, size=size)
nmea_objects = []
for nmea_str in str_data:
try:
nmea_ob = self._get_type(nmea_str)()
except TypeError:
# NMEA sentence was not recognised
continue
nmea_ob.parse(nmea_str)
nmea_objects.append(nmea_ob)
return nmea_objects
def _read(self, data=None, size=1024):
""" Read size bytes of data. Always strip off the last record and
append to the start of the data stream on the next call.
This ensures that only full sentences are returned.
"""
if not data and not self.stream and not self.head:
# If there's no data and no stream, raise an error
raise NoDataGivenError('No data was provided')
if not data and self.stream:
read_data = self.stream.read(size)
else:
read_data = data
data = self.head + read_data
# DBG:
print "Joined head and read_data to get"
print "-"*20
print data
print "-"*20
raw_sentences = self._split(data)
if not read_data:
self.head = ''
return raw_sentences
self.head = raw_sentences[-1]
full_sentences = raw_sentences[:-1]
return full_sentences
def _get_type(self, sentence):
""" Get the NMEA type and return the appropriate object. Returns
None if no such object was found.
TODO: raise error instead of None. Failing silently is a Bad Thing.
We can always catch the error later if the user wishes to supress
errors.
"""
sen_type = sentence.split(',')[0].lstrip('$')
sen_mod = __import__('pynmea.nmea', fromlist=[sen_type])
sen_obj = getattr(sen_mod, sen_type, None)
return sen_obj
def _split(self, data, separator=None):
""" Take some data and split up based on the notion that a sentence
looks something like:
$x,y,z or $x,y,z*ab
separator is for cases where there is something strange or
non-standard as a separator between sentences.
Without this, there is no real way to tell whether:
$x,y,zSTUFF
is legal or if STUFF should be stripped.
"""
sentences = data.split('$')
clean_sentences = []
for item in sentences:
cleaned_item = item.rstrip()
if separator:
cleaned_item = cleaned_item.rstrip(separator)
if '*' in cleaned_item.split(',')[-1]:
# There must be a checksum. Remove any trailing fluff:
try:
first, checksum = cleaned_item.split('*')
except ValueError:
# Some GPS data recorders have been shown to output
# run-together sentences (no leading $).
# In this case, ignore error and continue, discarding the
# erroneous data.
# TODO: try and fix the data.
continue
cleaned_item = '*'.join([first, checksum[:2]])
if cleaned_item:
clean_sentences.append(cleaned_item)
return clean_sentences | 0.426322 | 0.404949 |
import os
import sys
reload(sys)
sys.setdefaultencoding('UTF-8')
import signal
import time
from datetime import datetime
from datetime import timedelta
import ConfigParser
import glob
import json
import uuid
import shutil
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from apscheduler.schedulers.blocking import BlockingScheduler
import SftpClient as SFTPClient
import Mobigen.Common.Log as Log; Log.Init()
import subprocess
workInfoCollector = None
def handler(signum, frame):
__LOG__.Trace('signal : process shutdown')
try :
if workInfoCollector :
workInfoCollector.shutdown()
except :
__LOG__.Exception()
# SIGTERM
signal.signal(signal.SIGTERM, handler)
# SIGINT
signal.signal(signal.SIGINT, handler)
# SIGHUP
signal.signal(signal.SIGHUP, handler)
# SIGPIPE
signal.signal(signal.SIGPIPE, handler)
class WorkInfoCollector :
def __init__(self, cfg) :
self.cfg = cfg
self.WORKINFO_REPO = {}
self._initConfig()
def _initConfig(self) :
self.systemName = self.cfg.get('MODULE_CONF', 'TACS_SYSTEM_NAME')
self.workInfoBaseDir = self.cfg.get('MODULE_CONF', 'TACS_WORKINFO_RAW')
self.auditLogTempDir = self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_TEMP')
self.auditLogBaseDir = self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_PATH')
self.receivedWorkCode = self.cfg.get('MODULE_CONF', 'RECEIVED_WORK_CODE')
self.tangoWmWorkInfoUrl = self.cfg.get('MODULE_CONF', 'TANGO_WM_WORKINFO_URL')
self.tangoWmEqpInfoUrl = self.cfg.get('MODULE_CONF', 'TANGO_WM_EQPINFO_URL')
self.xAuthToken = self.cfg.get('MODULE_CONF', 'TANGO_WM_X_AUTH_TOKEN')
self.host = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_HOST')
self.port = int(self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PORT'))
self.user = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_USER')
self.passwd = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PASSWD')
self.scheduleInterval = self.cfg.get('MODULE_CONF', 'SCHEDULE_INTERVAL_MIN')
self.stdoutSleepTime = int(self.cfg.get('MODULE_CONF', 'STDOUT_SLEEP_TIME'))
self.headers = {'x-auth-token' : self.xAuthToken, 'Content-Type' : 'application/json; charset=utf-8'}
self.migration = False
def _executeMigration(self, searchStartDate, searchEndDate) :
__LOG__.Trace('migration process start. searchStartDate({}), searchEndDate({})'.format(searchStartDate, searchEndDate))
try :
searchStartDateObj = datetime.strptime(searchStartDate, '%Y%m%d%H%M%S')
searchEndDateObj = datetime.strptime(searchEndDate, '%Y%m%d%H%M%S')
if searchStartDateObj > searchEndDateObj :
__LOG__.Trace('searchStartDate({}) bigger than searchEndDate({})'.format(searchStartDate, searchEndDate))
print '[ERROR] searchStartDate({}) bigger than searchEndDate({})'.format(searchStartDate, searchEndDate)
else :
# request workInfo
workIdList = self._lookupWorkInfo(searchStartDate, searchEndDate, True)
# request eqpInfo by workId
self._lookupEqpInfo(workIdList)
except Exception as ex :
__LOG__.Trace('workInfo migration failed. {}'.format(ex))
def _executeScheduler(self) :
try :
__LOG__.Trace('scheduler process start')
# request workInfo
workIdList = self._lookupWorkInfo()
# request eqpInfo by workId
self._lookupEqpInfo(workIdList)
except :
__LOG__.Exception()
def _stdout(self, msg) :
sys.stdout.write('stdout' + msg + '\n')
sys.stdout.flush()
__LOG__.Trace('stdout: %s' % msg)
def _lookupWorkInfo(self, fromDate = None, toDate = None, migration = False) :
searchStartDate = fromDate
searchEndDate = toDate
if not migration :
searchEndDateObj = datetime.now()
#searchStartDateObj = datetime(searchEndDateObj.year, searchEndDateObj.month, searchEndDateObj.day, searchEndDateObj.hour, (searchEndDateObj.minute - int(self.scheduleInterval)))
searchStartDateObj = searchEndDateObj - timedelta(minutes=1)
searchStartDate = searchStartDateObj.strftime('%Y%m%d%H%M')
searchEndDate = searchEndDateObj.strftime('%Y%m%d%H%M')
__LOG__.Trace('lookup workInfo from({}) ~ to({})'.format(searchStartDate, searchEndDate))
url = self.tangoWmWorkInfoUrl.format(self.systemName, searchStartDate, searchEndDate)
__LOG__.Trace('request workInfo url: {}'.format(url))
rawDict = self._requestGet(url)
return self._loadWorkInfo(rawDict)
def _lookupEqpInfo(self, workIdList) :
if not workIdList :
__LOG__.Trace('workIdList is empty')
else :
logDictList = list()
yyyyMMdd = None
eventDate = None
for oneWorkId in workIdList :
url = self.tangoWmEqpInfoUrl.format(self.systemName, oneWorkId)
__LOG__.Trace('request eqpInfo url: {}'.format(url))
rawDict = self._requestGet(url)
logDict, yyyyMMdd, eventDate = self._loadEqpInfo(oneWorkId, rawDict, logDictList)
logDictList.append(logDict)
self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDictList)
def _requestGet(self, url, verify = False) :
rawDict = None
response = requests.get(url = url, headers = self.headers, verify = verify)
if response.status_code == 200 :
#jsonText = response.text.decode('string_escape')
#__LOG__.Trace('raw response.text: {}'.format(jsonText))
#__LOG__.Trace('replace response.text: {}'.format(jsonText.replace('\\\\\\"', '\\\"')))
#__LOG__.Trace('replace response.text: {}'.format(jsonText))
#tmpDict = json.loads(response.text)
#__LOG__.Trace('tmpDict: {}'.format(tmpDict))
#__LOG__.Trace('tmpDict.dumps: {}'.format(json.dumps(tmpDict, ensure_ascii=False)))
rawDict = response.json()
#rawDict = json.loads(jsonText)
else :
__LOG__.Trace('!!! Exception !!! requestGet failed. statusCode: {}'.format(response.status_code))
pass
return rawDict
def _loadWorkInfo(self, rawDict) :
if rawDict :
__LOG__.Trace('workInfo rawData: {}'.format(rawDict))
workIdList = []
if type(rawDict['workInfo']) is list :
for oneWorkInfo in rawDict['workInfo'] :
workId = oneWorkInfo['workId']
__LOG__.Trace('workId: {}'.format(workId))
if workId is None or not workId :
__LOG__.Trace('invalid workId({})'.format(workId))
continue
workIdList.append(workId)
wrapper = {}
wrapper['workInfo'] = oneWorkInfo
workEvntDate = datetime.now().strftime('%Y%m%d%H%M%S')
wrapper['workInfo']['workEvntDate'] = workEvntDate
self.WORKINFO_REPO[workId] = wrapper
__LOG__.Trace('WORKINFO_REPO: {}'.format(self.WORKINFO_REPO))
else :
__LOG__.Trace('Unsupported type: {}'.format(type(rawDict['workInfo'])))
pass
return workIdList
else :
__LOG__.Trace('workInfo rawData is None')
return None
def _loadEqpInfo(self, oneWorkId, rawDict, logDictList) :
logDict = dict()
yyyyMMdd = None
eventDate = None
if rawDict :
__LOG__.Trace('eqpInfo rawData: {}'.format(rawDict))
if 'eqpInfo' in rawDict and type(rawDict['eqpInfo']) is list :
scriptFileList = []
wrapper = self.WORKINFO_REPO[oneWorkId]
if wrapper :
wrapper['eqpInfo'] = rawDict['eqpInfo']
for oneEqpInfoDict in rawDict['eqpInfo'] :
if 'scriptInfo' in oneEqpInfoDict :
scriptInfoList = oneEqpInfoDict['scriptInfo']
if scriptInfoList :
for oneScriptInfoDict in scriptInfoList :
filePathname = oneScriptInfoDict['atchdPathFileNm']
if filePathname :
remoteFilepath, remoteFilename = os.path.split(filePathname)
__LOG__.Trace('remoteFilepath({}), remoteFilename({})'.format(remoteFilepath, remoteFilename))
scriptFileDict = {}
scriptFileDict['remoteFilepath'] = remoteFilepath
scriptFileDict['remoteFilename'] = remoteFilename
scriptFileList.append(scriptFileDict)
else :
__LOG__.Trace('workId({})/eqpNm({}) atchdPathFileNm({}) is invalid'.format(oneWorkId, oneEqpInfoDict['eqpNm'], filePathname))
pass
else :
__LOG__.Trace('workId({})/eqpNm({}) scriptInfoList({}) is invalid'.format(oneWorkId, oneEqpInfoDict['eqpNm'], scriptInfoList))
else :
__LOG__.Trace('workId({})/eqpNm({}) scriptInfo does not exist in eqpInfo'.format(oneWorkId, oneEqpInfoDict['eqpNm']))
pass
else :
__LOG__.Trace('no registered workId({}) in WORKINFO_REPO'.format(oneWorkId))
return
__LOG__.Trace('scriptFileList: {}'.format(scriptFileList))
eventDate = wrapper['workInfo']['workEvntDate']
yyyyMMdd = datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%Y%m%d')
__LOG__.Trace('eventDate({}), yyyyMMdd({})'.format(eventDate, yyyyMMdd))
self._getScriptFiles(yyyyMMdd, oneWorkId, scriptFileList)
logDict = self._writeTangoWorkFile(yyyyMMdd, eventDate, oneWorkId, wrapper)
self._removeCompleteWorkInfo(oneWorkId)
else :
__LOG__.Trace('Unsupported type: {}'.format('eqpInfo' in rawDict if type(rawDict['eqpInfo']) else None ))
pass
else :
__LOG__.Trace('workId({}), eqpInfo rawData is None'.format(oneWorkId))
pass
return logDict, yyyyMMdd, eventDate
def _getScriptFiles(self, yyyyMMdd, workId, scriptFileList) :
if not scriptFileList :
__LOG__.Trace('scriptFileList({}) is empty'.format(scriptFileList))
return
try :
tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd, workId)
self._mkdirs(tacsWorkInfoPath)
sftpClient = SFTPClient.SftpClient(self.host, self.port, self.user, self.passwd)
for oneScriptFileDict in scriptFileList :
remoteFilepath = oneScriptFileDict['remoteFilepath']
remoteFilename = oneScriptFileDict['remoteFilename']
sftpClient.download(remoteFilepath, remoteFilename, tacsWorkInfoPath)
__LOG__.Trace('scriptFile from({}) -> to({}) download succeed'.format(os.path.join(remoteFilepath, remoteFilename), os.path.join(tacsWorkInfoPath, remoteFilename)))
sftpClient.close()
except Exception as ex :
__LOG__.Trace('scriptFile download proccess failed {}'.format(ex))
self._removeCompleteWorkInfo(workId)
raise ex
def _writeTangoWorkFile(self, yyyyMMdd, eventDate, workId, wrapper) :
logDict = {}
try :
tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd, workId)
self._mkdirs(tacsWorkInfoPath)
contents = json.dumps(wrapper, ensure_ascii=False)
__LOG__.Trace('contents: {}'.format(contents))
createFilePath = os.path.join(tacsWorkInfoPath, '{}_{}_META.json'.format(eventDate, workId))
self._createFile(createFilePath, contents)
logDict['tacsLnkgRst'] = 'OK'
if self.migration :
__LOG__.Trace( ['mf','30000', 'put', 'dbl', 'stdoutfile://{}'.format(createFilePath)] )
subprocess.call(['mf', '30000', 'put,dbl,stdoutfile://{}'.format(createFilePath)])
else :
time.sleep(self.stdoutSleepTime)
self._stdout('file://{}'.format(createFilePath))
except Exception as ex :
__LOG__.Trace('workFile write process failed {}'.format(ex))
logDict['tacsLnkgRst'] = 'FAIL'
logDict['tacsLnkgRsn'] = ex.args
self._removeCompleteWorkInfo(workId)
raise ex
finally :
logDict['evntTypCd'] = self.receivedWorkCode
logDict['evntDate'] = eventDate
logDict['workId'] = workId
logDict['lnkgEqpIp'] = ''
return logDict
# self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDict)
def _writeTacsHistoryFile(self, yyyyMMdd, eventDate, logDictList) :
if logDictList :
__LOG__.Trace('received workInfo history: {}'.format(logDictList))
try :
tacsHistoryTempPath = os.path.join(self.auditLogTempDir, 'AUDIT_{}'.format(self.receivedWorkCode))
self._mkdirs(tacsHistoryTempPath)
contentList = list()
for oneLogDict in logDictList :
content = json.dumps(oneLogDict, ensure_ascii=False)
contentList.append(content)
contents = '\n'.join(contentList)
__LOG__.Trace('contents: {}'.format(contents))
tacsHistoryFilename = self._getTacsHistoryFilename(yyyyMMdd, eventDate)
__LOG__.Trace('tacsHistoryFilename: {}'.format(tacsHistoryFilename))
self._createFile(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), contents)
tacsHistoryPath = os.path.join(self.auditLogBaseDir, 'AUDIT_{}'.format(self.receivedWorkCode))
self._mkdirs(tacsHistoryPath)
shutil.move(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename))
__LOG__.Trace('tacsHistory file move from {} -> to {} succeed'.format(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename)))
except Exception as ex :
__LOG__.Trace('tacsHistory {} load process failed {}'.format(logDict, ex))
else :
__LOG__.Trace('received workInfo history({}) is invalid'.format(logDict))
def _mkdirs(self, directory) :
__LOG__.Trace('{} isExists: {}'.format(directory, os.path.exists(directory)))
if not os.path.exists(directory) :
__LOG__.Trace('create directories {}'.format(directory))
os.makedirs(directory)
def _createFile(self, filePath, contents) :
f = None
try :
f = open(filePath, 'w')
f.write(contents)
__LOG__.Trace('{} file is created'.format(filePath))
except Exception as ex :
__LOG__.Trace('{} to file process failed {}'.format(contents, ex))
raise ex
finally :
if f :
f.close()
def _getTacsHistoryFilename(self, yyyyMMdd, eventDate) :
HHmm = datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%H%M')
tacsHistoryFilename = '{}_{}_{}.audit'.format(yyyyMMdd, HHmm, uuid.uuid4())
return tacsHistoryFilename
def _removeCompleteWorkInfo(self, workId) :
if workId in self.WORKINFO_REPO :
del self.WORKINFO_REPO[workId]
__LOG__.Trace('workId({}), WORKINFO_REPO: {}'.format(workId, self.WORKINFO_REPO))
def shutdown(self) :
try :
if self.scheduler :
#self.scheduler.remove_job('workInfo_scheduler')
self.scheduler.shutdown()
__LOG__.Trace('schduler is terminated')
else :
_LOG__.Trace('scheduler is None')
except Exception as ex :
__LOG__.Trace('shutdown failed {}'.format(ex))
def run(self, searchStartDate = None, searchEndDate = None, migration = False) :
self.migration = migration
if not migration :
self.scheduler = BlockingScheduler()
self.scheduler.add_job(self._executeScheduler, 'cron', minute='*/{}'.format(self.scheduleInterval), second='0', id='workInfo_scheduler')
self.scheduler.start()
else :
self._executeMigration(searchStartDate, searchEndDate)
__LOG__.Trace('migration proccess done')
def main() :
argvLength = len(sys.argv)
if argvLength < 3 :
print '''
[ERROR] WorkInfoCollector argv required at least 3
++ Usage
++++ scheduler : module section cfgfile
++++ migration : module section cfgfile searchStartDate(yyyyMMddHHmm) searchEndDate(yyyyMMddHHmm)
'''
return
module = os.path.basename(sys.argv[0])
section = sys.argv[1]
cfgfile = sys.argv[2]
searchStartDate = None
searchEndDate = None
migration = False
if argvLength == 5 :
migration = True
searchStartDate = sys.argv[3]
searchEndDate = sys.argv[4]
cfg = ConfigParser.ConfigParser()
cfg.read(cfgfile)
logPath = cfg.get("GENERAL", "LOG_PATH")
logFile = os.path.join(logPath, "%s_%s.log" % (module, section))
logCfgPath = cfg.get("GENERAL", "LOG_CONF")
logCfg = ConfigParser.ConfigParser()
logCfg.read(logCfgPath)
Log.Init(Log.CRotatingLog(logFile, logCfg.get("LOG", "MAX_SIZE"), logCfg.get("LOG", "MAX_CNT") ))
global workInfoCollector
workInfoCollector = WorkInfoCollector(cfg)
workInfoCollector.run(searchStartDate, searchEndDate, migration)
__LOG__.Trace('main is terminated')
if __name__ == '__main__' :
try :
main()
except :
__LOG__.Exception() | ETL/bin/WorkInfoCollector_MODULE.py |
import os
import sys
reload(sys)
sys.setdefaultencoding('UTF-8')
import signal
import time
from datetime import datetime
from datetime import timedelta
import ConfigParser
import glob
import json
import uuid
import shutil
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from apscheduler.schedulers.blocking import BlockingScheduler
import SftpClient as SFTPClient
import Mobigen.Common.Log as Log; Log.Init()
import subprocess
workInfoCollector = None
def handler(signum, frame):
__LOG__.Trace('signal : process shutdown')
try :
if workInfoCollector :
workInfoCollector.shutdown()
except :
__LOG__.Exception()
# SIGTERM
signal.signal(signal.SIGTERM, handler)
# SIGINT
signal.signal(signal.SIGINT, handler)
# SIGHUP
signal.signal(signal.SIGHUP, handler)
# SIGPIPE
signal.signal(signal.SIGPIPE, handler)
class WorkInfoCollector :
def __init__(self, cfg) :
self.cfg = cfg
self.WORKINFO_REPO = {}
self._initConfig()
def _initConfig(self) :
self.systemName = self.cfg.get('MODULE_CONF', 'TACS_SYSTEM_NAME')
self.workInfoBaseDir = self.cfg.get('MODULE_CONF', 'TACS_WORKINFO_RAW')
self.auditLogTempDir = self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_TEMP')
self.auditLogBaseDir = self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_PATH')
self.receivedWorkCode = self.cfg.get('MODULE_CONF', 'RECEIVED_WORK_CODE')
self.tangoWmWorkInfoUrl = self.cfg.get('MODULE_CONF', 'TANGO_WM_WORKINFO_URL')
self.tangoWmEqpInfoUrl = self.cfg.get('MODULE_CONF', 'TANGO_WM_EQPINFO_URL')
self.xAuthToken = self.cfg.get('MODULE_CONF', 'TANGO_WM_X_AUTH_TOKEN')
self.host = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_HOST')
self.port = int(self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PORT'))
self.user = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_USER')
self.passwd = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PASSWD')
self.scheduleInterval = self.cfg.get('MODULE_CONF', 'SCHEDULE_INTERVAL_MIN')
self.stdoutSleepTime = int(self.cfg.get('MODULE_CONF', 'STDOUT_SLEEP_TIME'))
self.headers = {'x-auth-token' : self.xAuthToken, 'Content-Type' : 'application/json; charset=utf-8'}
self.migration = False
def _executeMigration(self, searchStartDate, searchEndDate) :
__LOG__.Trace('migration process start. searchStartDate({}), searchEndDate({})'.format(searchStartDate, searchEndDate))
try :
searchStartDateObj = datetime.strptime(searchStartDate, '%Y%m%d%H%M%S')
searchEndDateObj = datetime.strptime(searchEndDate, '%Y%m%d%H%M%S')
if searchStartDateObj > searchEndDateObj :
__LOG__.Trace('searchStartDate({}) bigger than searchEndDate({})'.format(searchStartDate, searchEndDate))
print '[ERROR] searchStartDate({}) bigger than searchEndDate({})'.format(searchStartDate, searchEndDate)
else :
# request workInfo
workIdList = self._lookupWorkInfo(searchStartDate, searchEndDate, True)
# request eqpInfo by workId
self._lookupEqpInfo(workIdList)
except Exception as ex :
__LOG__.Trace('workInfo migration failed. {}'.format(ex))
def _executeScheduler(self) :
try :
__LOG__.Trace('scheduler process start')
# request workInfo
workIdList = self._lookupWorkInfo()
# request eqpInfo by workId
self._lookupEqpInfo(workIdList)
except :
__LOG__.Exception()
def _stdout(self, msg) :
sys.stdout.write('stdout' + msg + '\n')
sys.stdout.flush()
__LOG__.Trace('stdout: %s' % msg)
def _lookupWorkInfo(self, fromDate = None, toDate = None, migration = False) :
searchStartDate = fromDate
searchEndDate = toDate
if not migration :
searchEndDateObj = datetime.now()
#searchStartDateObj = datetime(searchEndDateObj.year, searchEndDateObj.month, searchEndDateObj.day, searchEndDateObj.hour, (searchEndDateObj.minute - int(self.scheduleInterval)))
searchStartDateObj = searchEndDateObj - timedelta(minutes=1)
searchStartDate = searchStartDateObj.strftime('%Y%m%d%H%M')
searchEndDate = searchEndDateObj.strftime('%Y%m%d%H%M')
__LOG__.Trace('lookup workInfo from({}) ~ to({})'.format(searchStartDate, searchEndDate))
url = self.tangoWmWorkInfoUrl.format(self.systemName, searchStartDate, searchEndDate)
__LOG__.Trace('request workInfo url: {}'.format(url))
rawDict = self._requestGet(url)
return self._loadWorkInfo(rawDict)
def _lookupEqpInfo(self, workIdList) :
if not workIdList :
__LOG__.Trace('workIdList is empty')
else :
logDictList = list()
yyyyMMdd = None
eventDate = None
for oneWorkId in workIdList :
url = self.tangoWmEqpInfoUrl.format(self.systemName, oneWorkId)
__LOG__.Trace('request eqpInfo url: {}'.format(url))
rawDict = self._requestGet(url)
logDict, yyyyMMdd, eventDate = self._loadEqpInfo(oneWorkId, rawDict, logDictList)
logDictList.append(logDict)
self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDictList)
def _requestGet(self, url, verify = False) :
rawDict = None
response = requests.get(url = url, headers = self.headers, verify = verify)
if response.status_code == 200 :
#jsonText = response.text.decode('string_escape')
#__LOG__.Trace('raw response.text: {}'.format(jsonText))
#__LOG__.Trace('replace response.text: {}'.format(jsonText.replace('\\\\\\"', '\\\"')))
#__LOG__.Trace('replace response.text: {}'.format(jsonText))
#tmpDict = json.loads(response.text)
#__LOG__.Trace('tmpDict: {}'.format(tmpDict))
#__LOG__.Trace('tmpDict.dumps: {}'.format(json.dumps(tmpDict, ensure_ascii=False)))
rawDict = response.json()
#rawDict = json.loads(jsonText)
else :
__LOG__.Trace('!!! Exception !!! requestGet failed. statusCode: {}'.format(response.status_code))
pass
return rawDict
def _loadWorkInfo(self, rawDict) :
if rawDict :
__LOG__.Trace('workInfo rawData: {}'.format(rawDict))
workIdList = []
if type(rawDict['workInfo']) is list :
for oneWorkInfo in rawDict['workInfo'] :
workId = oneWorkInfo['workId']
__LOG__.Trace('workId: {}'.format(workId))
if workId is None or not workId :
__LOG__.Trace('invalid workId({})'.format(workId))
continue
workIdList.append(workId)
wrapper = {}
wrapper['workInfo'] = oneWorkInfo
workEvntDate = datetime.now().strftime('%Y%m%d%H%M%S')
wrapper['workInfo']['workEvntDate'] = workEvntDate
self.WORKINFO_REPO[workId] = wrapper
__LOG__.Trace('WORKINFO_REPO: {}'.format(self.WORKINFO_REPO))
else :
__LOG__.Trace('Unsupported type: {}'.format(type(rawDict['workInfo'])))
pass
return workIdList
else :
__LOG__.Trace('workInfo rawData is None')
return None
def _loadEqpInfo(self, oneWorkId, rawDict, logDictList) :
logDict = dict()
yyyyMMdd = None
eventDate = None
if rawDict :
__LOG__.Trace('eqpInfo rawData: {}'.format(rawDict))
if 'eqpInfo' in rawDict and type(rawDict['eqpInfo']) is list :
scriptFileList = []
wrapper = self.WORKINFO_REPO[oneWorkId]
if wrapper :
wrapper['eqpInfo'] = rawDict['eqpInfo']
for oneEqpInfoDict in rawDict['eqpInfo'] :
if 'scriptInfo' in oneEqpInfoDict :
scriptInfoList = oneEqpInfoDict['scriptInfo']
if scriptInfoList :
for oneScriptInfoDict in scriptInfoList :
filePathname = oneScriptInfoDict['atchdPathFileNm']
if filePathname :
remoteFilepath, remoteFilename = os.path.split(filePathname)
__LOG__.Trace('remoteFilepath({}), remoteFilename({})'.format(remoteFilepath, remoteFilename))
scriptFileDict = {}
scriptFileDict['remoteFilepath'] = remoteFilepath
scriptFileDict['remoteFilename'] = remoteFilename
scriptFileList.append(scriptFileDict)
else :
__LOG__.Trace('workId({})/eqpNm({}) atchdPathFileNm({}) is invalid'.format(oneWorkId, oneEqpInfoDict['eqpNm'], filePathname))
pass
else :
__LOG__.Trace('workId({})/eqpNm({}) scriptInfoList({}) is invalid'.format(oneWorkId, oneEqpInfoDict['eqpNm'], scriptInfoList))
else :
__LOG__.Trace('workId({})/eqpNm({}) scriptInfo does not exist in eqpInfo'.format(oneWorkId, oneEqpInfoDict['eqpNm']))
pass
else :
__LOG__.Trace('no registered workId({}) in WORKINFO_REPO'.format(oneWorkId))
return
__LOG__.Trace('scriptFileList: {}'.format(scriptFileList))
eventDate = wrapper['workInfo']['workEvntDate']
yyyyMMdd = datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%Y%m%d')
__LOG__.Trace('eventDate({}), yyyyMMdd({})'.format(eventDate, yyyyMMdd))
self._getScriptFiles(yyyyMMdd, oneWorkId, scriptFileList)
logDict = self._writeTangoWorkFile(yyyyMMdd, eventDate, oneWorkId, wrapper)
self._removeCompleteWorkInfo(oneWorkId)
else :
__LOG__.Trace('Unsupported type: {}'.format('eqpInfo' in rawDict if type(rawDict['eqpInfo']) else None ))
pass
else :
__LOG__.Trace('workId({}), eqpInfo rawData is None'.format(oneWorkId))
pass
return logDict, yyyyMMdd, eventDate
def _getScriptFiles(self, yyyyMMdd, workId, scriptFileList) :
if not scriptFileList :
__LOG__.Trace('scriptFileList({}) is empty'.format(scriptFileList))
return
try :
tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd, workId)
self._mkdirs(tacsWorkInfoPath)
sftpClient = SFTPClient.SftpClient(self.host, self.port, self.user, self.passwd)
for oneScriptFileDict in scriptFileList :
remoteFilepath = oneScriptFileDict['remoteFilepath']
remoteFilename = oneScriptFileDict['remoteFilename']
sftpClient.download(remoteFilepath, remoteFilename, tacsWorkInfoPath)
__LOG__.Trace('scriptFile from({}) -> to({}) download succeed'.format(os.path.join(remoteFilepath, remoteFilename), os.path.join(tacsWorkInfoPath, remoteFilename)))
sftpClient.close()
except Exception as ex :
__LOG__.Trace('scriptFile download proccess failed {}'.format(ex))
self._removeCompleteWorkInfo(workId)
raise ex
def _writeTangoWorkFile(self, yyyyMMdd, eventDate, workId, wrapper) :
logDict = {}
try :
tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd, workId)
self._mkdirs(tacsWorkInfoPath)
contents = json.dumps(wrapper, ensure_ascii=False)
__LOG__.Trace('contents: {}'.format(contents))
createFilePath = os.path.join(tacsWorkInfoPath, '{}_{}_META.json'.format(eventDate, workId))
self._createFile(createFilePath, contents)
logDict['tacsLnkgRst'] = 'OK'
if self.migration :
__LOG__.Trace( ['mf','30000', 'put', 'dbl', 'stdoutfile://{}'.format(createFilePath)] )
subprocess.call(['mf', '30000', 'put,dbl,stdoutfile://{}'.format(createFilePath)])
else :
time.sleep(self.stdoutSleepTime)
self._stdout('file://{}'.format(createFilePath))
except Exception as ex :
__LOG__.Trace('workFile write process failed {}'.format(ex))
logDict['tacsLnkgRst'] = 'FAIL'
logDict['tacsLnkgRsn'] = ex.args
self._removeCompleteWorkInfo(workId)
raise ex
finally :
logDict['evntTypCd'] = self.receivedWorkCode
logDict['evntDate'] = eventDate
logDict['workId'] = workId
logDict['lnkgEqpIp'] = ''
return logDict
# self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDict)
def _writeTacsHistoryFile(self, yyyyMMdd, eventDate, logDictList) :
if logDictList :
__LOG__.Trace('received workInfo history: {}'.format(logDictList))
try :
tacsHistoryTempPath = os.path.join(self.auditLogTempDir, 'AUDIT_{}'.format(self.receivedWorkCode))
self._mkdirs(tacsHistoryTempPath)
contentList = list()
for oneLogDict in logDictList :
content = json.dumps(oneLogDict, ensure_ascii=False)
contentList.append(content)
contents = '\n'.join(contentList)
__LOG__.Trace('contents: {}'.format(contents))
tacsHistoryFilename = self._getTacsHistoryFilename(yyyyMMdd, eventDate)
__LOG__.Trace('tacsHistoryFilename: {}'.format(tacsHistoryFilename))
self._createFile(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), contents)
tacsHistoryPath = os.path.join(self.auditLogBaseDir, 'AUDIT_{}'.format(self.receivedWorkCode))
self._mkdirs(tacsHistoryPath)
shutil.move(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename))
__LOG__.Trace('tacsHistory file move from {} -> to {} succeed'.format(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename)))
except Exception as ex :
__LOG__.Trace('tacsHistory {} load process failed {}'.format(logDict, ex))
else :
__LOG__.Trace('received workInfo history({}) is invalid'.format(logDict))
def _mkdirs(self, directory) :
__LOG__.Trace('{} isExists: {}'.format(directory, os.path.exists(directory)))
if not os.path.exists(directory) :
__LOG__.Trace('create directories {}'.format(directory))
os.makedirs(directory)
def _createFile(self, filePath, contents) :
f = None
try :
f = open(filePath, 'w')
f.write(contents)
__LOG__.Trace('{} file is created'.format(filePath))
except Exception as ex :
__LOG__.Trace('{} to file process failed {}'.format(contents, ex))
raise ex
finally :
if f :
f.close()
def _getTacsHistoryFilename(self, yyyyMMdd, eventDate) :
HHmm = datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%H%M')
tacsHistoryFilename = '{}_{}_{}.audit'.format(yyyyMMdd, HHmm, uuid.uuid4())
return tacsHistoryFilename
def _removeCompleteWorkInfo(self, workId) :
if workId in self.WORKINFO_REPO :
del self.WORKINFO_REPO[workId]
__LOG__.Trace('workId({}), WORKINFO_REPO: {}'.format(workId, self.WORKINFO_REPO))
def shutdown(self) :
try :
if self.scheduler :
#self.scheduler.remove_job('workInfo_scheduler')
self.scheduler.shutdown()
__LOG__.Trace('schduler is terminated')
else :
_LOG__.Trace('scheduler is None')
except Exception as ex :
__LOG__.Trace('shutdown failed {}'.format(ex))
def run(self, searchStartDate = None, searchEndDate = None, migration = False) :
self.migration = migration
if not migration :
self.scheduler = BlockingScheduler()
self.scheduler.add_job(self._executeScheduler, 'cron', minute='*/{}'.format(self.scheduleInterval), second='0', id='workInfo_scheduler')
self.scheduler.start()
else :
self._executeMigration(searchStartDate, searchEndDate)
__LOG__.Trace('migration proccess done')
def main() :
argvLength = len(sys.argv)
if argvLength < 3 :
print '''
[ERROR] WorkInfoCollector argv required at least 3
++ Usage
++++ scheduler : module section cfgfile
++++ migration : module section cfgfile searchStartDate(yyyyMMddHHmm) searchEndDate(yyyyMMddHHmm)
'''
return
module = os.path.basename(sys.argv[0])
section = sys.argv[1]
cfgfile = sys.argv[2]
searchStartDate = None
searchEndDate = None
migration = False
if argvLength == 5 :
migration = True
searchStartDate = sys.argv[3]
searchEndDate = sys.argv[4]
cfg = ConfigParser.ConfigParser()
cfg.read(cfgfile)
logPath = cfg.get("GENERAL", "LOG_PATH")
logFile = os.path.join(logPath, "%s_%s.log" % (module, section))
logCfgPath = cfg.get("GENERAL", "LOG_CONF")
logCfg = ConfigParser.ConfigParser()
logCfg.read(logCfgPath)
Log.Init(Log.CRotatingLog(logFile, logCfg.get("LOG", "MAX_SIZE"), logCfg.get("LOG", "MAX_CNT") ))
global workInfoCollector
workInfoCollector = WorkInfoCollector(cfg)
workInfoCollector.run(searchStartDate, searchEndDate, migration)
__LOG__.Trace('main is terminated')
if __name__ == '__main__' :
try :
main()
except :
__LOG__.Exception() | 0.138404 | 0.062417 |
import pytest
from brownie import Contract, Wei, reverts
from fixedint import *
import shared
def test_Demand_Curve_Setting(loanToken, loanTokenSettings, LoanTokenSettingsLowerAdmin, accounts, LoanToken, LoanTokenLogicStandard):
baseRate = 1e18
rateMultiplier = 20.25e18
targetLevel=80*10**18
kinkLevel=90*10**18
maxScaleRate=100*10**18
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
localLoanToken.setDemandCurve(baseRate, rateMultiplier, baseRate, rateMultiplier, targetLevel, kinkLevel, maxScaleRate)
assert(loanToken.baseRate() == baseRate)
assert(loanToken.rateMultiplier() == rateMultiplier)
assert(loanToken.lowUtilBaseRate() == baseRate)
assert(loanToken.lowUtilRateMultiplier() == rateMultiplier)
loanTokenLogic = accounts[0].deploy(LoanTokenLogicStandard)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenLogic.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenLogicStandard.abi, owner=accounts[0])
borrowInterestRate = loanToken.borrowInterestRate()
print("borrowInterestRate: ", borrowInterestRate)
assert(borrowInterestRate > 1e18)
def test_Demand_Curve_Setting_should_fail_if_rateMultiplier_plus_baseRate_is_grater_than_100_percent(
loanToken, loanTokenSettings, LoanTokenSettingsLowerAdmin, accounts, LoanToken, LoanTokenLogicStandard):
incorrect_baseRate = 51e18
incorrect_rateMultiplier = 50e18
baseRate = 1e18
rateMultiplier = 20.25e18
targetLevel=80*10**18
kinkLevel=90*10**18
maxScaleRate=100*10**18
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
with reverts():
localLoanToken.setDemandCurve(incorrect_baseRate, incorrect_rateMultiplier, baseRate, rateMultiplier,
targetLevel, kinkLevel, maxScaleRate)
with reverts():
localLoanToken.setDemandCurve(baseRate, rateMultiplier, incorrect_baseRate, incorrect_rateMultiplier,
targetLevel, kinkLevel, maxScaleRate)
def test_lending_fee_setting(sovryn):
tx = sovryn.setLendingFeePercent(1e20)
lfp = sovryn.lendingFeePercent()
assert(lfp == 1e20)
'''
1. pause a function
2. try to call the function - should fail
3. reactivate it
4. try to call the function - should succeed
'''
def test_toggle_function_pause(accounts, loanToken, LoanToken, LoanTokenSettingsLowerAdmin, LoanTokenLogicStandard, loanTokenSettings, SUSD, open_margin_trade_position, lend_to_pool):
lend_to_pool()
functionSignature = "marginTrade(bytes32,uint256,uint256,uint256,address,address,bytes)"
# pause the given function
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
localLoanToken.toggleFunctionPause(functionSignature, True)
# make sure the function can't be called anymore
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
loanTokenLogic = accounts[0].deploy(LoanTokenLogicStandard)
localLoanToken.setTarget(loanTokenLogic.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenLogicStandard.abi, owner=accounts[0])
with reverts("unauthorized"):
open_margin_trade_position()
#check if checkPause returns true
assert(localLoanToken.checkPause(functionSignature))
# reactivate the given function
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
localLoanToken.toggleFunctionPause(functionSignature, False)
#make sure the function can be called again
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenLogic.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenLogicStandard.abi, owner=accounts[0])
open_margin_trade_position()
#check if checkPause returns false
assert(not localLoanToken.checkPause(functionSignature))
'''
call toggleFunction with a non-admin address and make sure it fails
'''
def test_toggle_function_pause_with_non_admin_should_fail(loanToken, LoanTokenSettingsLowerAdmin, loanTokenSettings, LoanToken, accounts):
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
with reverts("unauthorized"):
localLoanToken.toggleFunctionPause("mint(address,uint256)", True, {'from':accounts[1]}) | tests/loanToken/administration/test_administration.py | import pytest
from brownie import Contract, Wei, reverts
from fixedint import *
import shared
def test_Demand_Curve_Setting(loanToken, loanTokenSettings, LoanTokenSettingsLowerAdmin, accounts, LoanToken, LoanTokenLogicStandard):
baseRate = 1e18
rateMultiplier = 20.25e18
targetLevel=80*10**18
kinkLevel=90*10**18
maxScaleRate=100*10**18
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
localLoanToken.setDemandCurve(baseRate, rateMultiplier, baseRate, rateMultiplier, targetLevel, kinkLevel, maxScaleRate)
assert(loanToken.baseRate() == baseRate)
assert(loanToken.rateMultiplier() == rateMultiplier)
assert(loanToken.lowUtilBaseRate() == baseRate)
assert(loanToken.lowUtilRateMultiplier() == rateMultiplier)
loanTokenLogic = accounts[0].deploy(LoanTokenLogicStandard)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenLogic.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenLogicStandard.abi, owner=accounts[0])
borrowInterestRate = loanToken.borrowInterestRate()
print("borrowInterestRate: ", borrowInterestRate)
assert(borrowInterestRate > 1e18)
def test_Demand_Curve_Setting_should_fail_if_rateMultiplier_plus_baseRate_is_grater_than_100_percent(
loanToken, loanTokenSettings, LoanTokenSettingsLowerAdmin, accounts, LoanToken, LoanTokenLogicStandard):
incorrect_baseRate = 51e18
incorrect_rateMultiplier = 50e18
baseRate = 1e18
rateMultiplier = 20.25e18
targetLevel=80*10**18
kinkLevel=90*10**18
maxScaleRate=100*10**18
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
with reverts():
localLoanToken.setDemandCurve(incorrect_baseRate, incorrect_rateMultiplier, baseRate, rateMultiplier,
targetLevel, kinkLevel, maxScaleRate)
with reverts():
localLoanToken.setDemandCurve(baseRate, rateMultiplier, incorrect_baseRate, incorrect_rateMultiplier,
targetLevel, kinkLevel, maxScaleRate)
def test_lending_fee_setting(sovryn):
tx = sovryn.setLendingFeePercent(1e20)
lfp = sovryn.lendingFeePercent()
assert(lfp == 1e20)
'''
1. pause a function
2. try to call the function - should fail
3. reactivate it
4. try to call the function - should succeed
'''
def test_toggle_function_pause(accounts, loanToken, LoanToken, LoanTokenSettingsLowerAdmin, LoanTokenLogicStandard, loanTokenSettings, SUSD, open_margin_trade_position, lend_to_pool):
lend_to_pool()
functionSignature = "marginTrade(bytes32,uint256,uint256,uint256,address,address,bytes)"
# pause the given function
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
localLoanToken.toggleFunctionPause(functionSignature, True)
# make sure the function can't be called anymore
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
loanTokenLogic = accounts[0].deploy(LoanTokenLogicStandard)
localLoanToken.setTarget(loanTokenLogic.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenLogicStandard.abi, owner=accounts[0])
with reverts("unauthorized"):
open_margin_trade_position()
#check if checkPause returns true
assert(localLoanToken.checkPause(functionSignature))
# reactivate the given function
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
localLoanToken.toggleFunctionPause(functionSignature, False)
#make sure the function can be called again
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenLogic.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenLogicStandard.abi, owner=accounts[0])
open_margin_trade_position()
#check if checkPause returns false
assert(not localLoanToken.checkPause(functionSignature))
'''
call toggleFunction with a non-admin address and make sure it fails
'''
def test_toggle_function_pause_with_non_admin_should_fail(loanToken, LoanTokenSettingsLowerAdmin, loanTokenSettings, LoanToken, accounts):
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanToken.abi, owner=accounts[0])
localLoanToken.setTarget(loanTokenSettings.address)
localLoanToken = Contract.from_abi("loanToken", address=loanToken.address, abi=LoanTokenSettingsLowerAdmin.abi, owner=accounts[0])
with reverts("unauthorized"):
localLoanToken.toggleFunctionPause("mint(address,uint256)", True, {'from':accounts[1]}) | 0.452536 | 0.409752 |
import sys,os
import argparse
import subprocess
MRFLOW_HOME = os.environ['MRFLOW_HOME']
sys.path.append(MRFLOW_HOME)
import dataset_parameters
def generate_paths(dataset, arg):
""" Generate paths for either Sintel or Kitti
Requires the following environment variables to be set:
- SINTEL_HOME
- KITTI_HOME
- MRFLOW_SINTEL_INIT
- MRFLOW_KITTI_INIT
"""
if dataset == 'sintel':
testtrain, pas, seq, frame = arg.split(',')
frame = int(frame)
print('Calling Sintel preparation with')
print('\t TESTTRAIN = {}'.format(testtrain))
print('\t PASS = {}'.format(pas))
print('\t SEQ = {}'.format(seq))
print('\t FRAME = {}'.format(frame))
# Build paths for Sintel
SINTEL_HOME = os.environ['SINTEL_HOME']
PPPFLOW_SINTEL_INIT = os.environ['MRFLOW_SINTEL_INIT']
path_image_prev = os.path.join(SINTEL_HOME, testtrain, pas, seq, 'frame_{0:04d}.png'.format(frame-1))
path_image_current = os.path.join(SINTEL_HOME, testtrain, pas, seq, 'frame_{0:04d}.png'.format(frame))
path_image_next = os.path.join(SINTEL_HOME, testtrain, pas, seq, 'frame_{0:04d}.png'.format(frame+1))
# Flow from reference frame to adjacent frames
path_flow_fwd = os.path.join(PPPFLOW_SINTEL_INIT, 'flow', testtrain, pas, seq, 'frame_{0:04d}_fwd.flo'.format(frame))
path_flow_bwd = os.path.join(PPPFLOW_SINTEL_INIT, 'flow', testtrain, pas, seq, 'frame_{0:04d}_bwd.flo'.format(frame))
# Flow from adjacent frames back to reference frame
path_backflow_fwd = os.path.join(PPPFLOW_SINTEL_INIT, 'flow', testtrain, pas, seq, 'frame_{0:04d}_bwd.flo'.format(frame+1))
path_backflow_bwd = os.path.join(PPPFLOW_SINTEL_INIT, 'flow', testtrain, pas, seq, 'frame_{0:04d}_fwd.flo'.format(frame-1))
# Estimated rigidity
path_rigidity = os.path.join(PPPFLOW_SINTEL_INIT, 'rigidity', testtrain, pas, seq, 'frame_{0:04d}.png'.format(frame))
# Add GT regions if we are in training pass
if testtrain == 'training':
path_flow_fwd_gt = os.path.join(SINTEL_HOME, testtrain, 'flow', seq, 'frame_{0:04d}.flo'.format(frame))
path_rigidity_gt = os.path.join(SINTEL_HOME, testtrain, 'rigidity', seq, 'frame_{0:04d}.png'.format(frame))
else:
path_flow_fwd_gt = ''
path_rigidity_gt = ''
elif dataset == 'kitti':
testtrain, frame = arg.split(',')
frame = int(frame)
# KITTI
print('Calling KITTI preparation with')
print('\t TESTTRAIN = {}'.format(testtrain))
print('\t FRAME = {}'.format(frame))
# Build paths for Sintel
KITTI_HOME = os.environ['KITTI_HOME']
PPPFLOW_KITTI_INIT = os.environ['MRFLOW_KITTI_INIT']
# Hack for file layout
if testtrain == 'training':
testtrain_ = 'training'
else:
testtrain_ = 'testing'
path_image_prev = os.path.join(KITTI_HOME, testtrain_, 'image_2', '{0:06d}_09.png'.format(frame))
path_image_current = os.path.join(KITTI_HOME, testtrain_, 'image_2', '{0:06d}_10.png'.format(frame))
path_image_next = os.path.join(KITTI_HOME, testtrain_, 'image_2', '{0:06d}_11.png'.format(frame))
# Flow from reference frame to adjacent frames
path_flow_fwd = os.path.join(PPPFLOW_KITTI_INIT, 'flow', testtrain, '{0:06d}_10_fwd.flo'.format(frame))
path_flow_bwd = os.path.join(PPPFLOW_KITTI_INIT, 'flow', testtrain, '{0:06d}_10_bwd.flo'.format(frame))
# Flow from adjacent frames back to reference frame
path_backflow_bwd = os.path.join(PPPFLOW_KITTI_INIT, 'flow', testtrain, '{0:06d}_09_fwd.flo'.format(frame))
path_backflow_fwd = os.path.join(PPPFLOW_KITTI_INIT, 'flow', testtrain, '{0:06d}_11_bwd.flo'.format(frame))
# Estimated rigidity
path_rigidity = os.path.join(PPPFLOW_KITTI_INIT, 'rigidity', testtrain, '{0:06d}_10.png'.format(frame))
# Add GT regions if we are in training pass
if testtrain == 'training':
path_flow_fwd_gt = os.path.join(KITTI_HOME, testtrain, 'flow_occ', '{0:06d}_10.png'.format(frame))
path_rigidity_gt = os.path.join(KITTI_HOME, testtrain, 'rigidity_generated', '{0:06d}_10.png'.format(frame))
else:
path_flow_fwd_gt = ''
path_rigidity_gt = ''
paths = {
'--flow_fwd': path_flow_fwd,
'--flow_bwd': path_flow_bwd,
'--backflow_fwd': path_backflow_fwd,
'--backflow_bwd': path_backflow_bwd,
'--rigidity': path_rigidity
}
if path_flow_fwd_gt:
paths['--flow_fwd_gt'] = path_flow_fwd_gt
if path_rigidity_gt:
paths['--rigidity_gt'] = path_rigidity_gt
paths_images = [path_image_prev, path_image_current, path_image_next]
return paths,paths_images
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, help='Dataset to use (sintel/kitti)')
parser.add_argument('token', type=str, help='Token determining the frame.\n For KITTI, please give as {training/test},frame.\n For Sintel, give as {training/test},pass,seq,frame.')
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args()
paths,paths_images = generate_paths(args.dataset,args.token)
if args.dataset == 'kitti':
testtrain, frame = args.token.split(',')
params_default = dataset_parameters.kitti_parameters
params_default['tempdir'] = os.path.join('data_seqs', testtrain, '{0:06d}'.format(int(frame)))
elif args.dataset == 'sintel':
testtrain, pas, seq, frame = args.token.split(',')
params_default = dataset_parameters.sintel_parameters
params_default['tempdir'] = os.path.join('data_seqs', testtrain, pas, seq, 'frame_{0:04d}'.format(int(frame)))
# If tempdir does not exist yet, create it.
if not os.path.isdir(params_default['tempdir']):
os.makedirs(params_default['tempdir'])
# Set up params to call mr-flow with
args_mrflow = {}
for k,v in params_default.items():
args_mrflow['--' + k] = str(v)
for k,v in paths.items():
args_mrflow[k] = v
remainder_args = zip(args.args[::2],args.args[1::2])
for k,v in remainder_args:
args_mrflow[k] = v
args_mrflow_array = []
for k,v in args_mrflow.items():
args_mrflow_array.append(k)
args_mrflow_array.append(v)
args_mrflow_array.append(paths_images[0])
args_mrflow_array.append(paths_images[1])
args_mrflow_array.append(paths_images[2])
print('Calling MR-Flow with arguments: ')
for k,v in zip(args_mrflow_array[::2],args_mrflow_array[1::2]):
print('\t{}\t:\t{}'.format(k,v))
print('')
subprocess.call(['python', 'mrflow.py',] + args_mrflow_array)
if __name__ == '__main__':
main() | mrflow_dataset.py |
import sys,os
import argparse
import subprocess
MRFLOW_HOME = os.environ['MRFLOW_HOME']
sys.path.append(MRFLOW_HOME)
import dataset_parameters
def generate_paths(dataset, arg):
""" Generate paths for either Sintel or Kitti
Requires the following environment variables to be set:
- SINTEL_HOME
- KITTI_HOME
- MRFLOW_SINTEL_INIT
- MRFLOW_KITTI_INIT
"""
if dataset == 'sintel':
testtrain, pas, seq, frame = arg.split(',')
frame = int(frame)
print('Calling Sintel preparation with')
print('\t TESTTRAIN = {}'.format(testtrain))
print('\t PASS = {}'.format(pas))
print('\t SEQ = {}'.format(seq))
print('\t FRAME = {}'.format(frame))
# Build paths for Sintel
SINTEL_HOME = os.environ['SINTEL_HOME']
PPPFLOW_SINTEL_INIT = os.environ['MRFLOW_SINTEL_INIT']
path_image_prev = os.path.join(SINTEL_HOME, testtrain, pas, seq, 'frame_{0:04d}.png'.format(frame-1))
path_image_current = os.path.join(SINTEL_HOME, testtrain, pas, seq, 'frame_{0:04d}.png'.format(frame))
path_image_next = os.path.join(SINTEL_HOME, testtrain, pas, seq, 'frame_{0:04d}.png'.format(frame+1))
# Flow from reference frame to adjacent frames
path_flow_fwd = os.path.join(PPPFLOW_SINTEL_INIT, 'flow', testtrain, pas, seq, 'frame_{0:04d}_fwd.flo'.format(frame))
path_flow_bwd = os.path.join(PPPFLOW_SINTEL_INIT, 'flow', testtrain, pas, seq, 'frame_{0:04d}_bwd.flo'.format(frame))
# Flow from adjacent frames back to reference frame
path_backflow_fwd = os.path.join(PPPFLOW_SINTEL_INIT, 'flow', testtrain, pas, seq, 'frame_{0:04d}_bwd.flo'.format(frame+1))
path_backflow_bwd = os.path.join(PPPFLOW_SINTEL_INIT, 'flow', testtrain, pas, seq, 'frame_{0:04d}_fwd.flo'.format(frame-1))
# Estimated rigidity
path_rigidity = os.path.join(PPPFLOW_SINTEL_INIT, 'rigidity', testtrain, pas, seq, 'frame_{0:04d}.png'.format(frame))
# Add GT regions if we are in training pass
if testtrain == 'training':
path_flow_fwd_gt = os.path.join(SINTEL_HOME, testtrain, 'flow', seq, 'frame_{0:04d}.flo'.format(frame))
path_rigidity_gt = os.path.join(SINTEL_HOME, testtrain, 'rigidity', seq, 'frame_{0:04d}.png'.format(frame))
else:
path_flow_fwd_gt = ''
path_rigidity_gt = ''
elif dataset == 'kitti':
testtrain, frame = arg.split(',')
frame = int(frame)
# KITTI
print('Calling KITTI preparation with')
print('\t TESTTRAIN = {}'.format(testtrain))
print('\t FRAME = {}'.format(frame))
# Build paths for Sintel
KITTI_HOME = os.environ['KITTI_HOME']
PPPFLOW_KITTI_INIT = os.environ['MRFLOW_KITTI_INIT']
# Hack for file layout
if testtrain == 'training':
testtrain_ = 'training'
else:
testtrain_ = 'testing'
path_image_prev = os.path.join(KITTI_HOME, testtrain_, 'image_2', '{0:06d}_09.png'.format(frame))
path_image_current = os.path.join(KITTI_HOME, testtrain_, 'image_2', '{0:06d}_10.png'.format(frame))
path_image_next = os.path.join(KITTI_HOME, testtrain_, 'image_2', '{0:06d}_11.png'.format(frame))
# Flow from reference frame to adjacent frames
path_flow_fwd = os.path.join(PPPFLOW_KITTI_INIT, 'flow', testtrain, '{0:06d}_10_fwd.flo'.format(frame))
path_flow_bwd = os.path.join(PPPFLOW_KITTI_INIT, 'flow', testtrain, '{0:06d}_10_bwd.flo'.format(frame))
# Flow from adjacent frames back to reference frame
path_backflow_bwd = os.path.join(PPPFLOW_KITTI_INIT, 'flow', testtrain, '{0:06d}_09_fwd.flo'.format(frame))
path_backflow_fwd = os.path.join(PPPFLOW_KITTI_INIT, 'flow', testtrain, '{0:06d}_11_bwd.flo'.format(frame))
# Estimated rigidity
path_rigidity = os.path.join(PPPFLOW_KITTI_INIT, 'rigidity', testtrain, '{0:06d}_10.png'.format(frame))
# Add GT regions if we are in training pass
if testtrain == 'training':
path_flow_fwd_gt = os.path.join(KITTI_HOME, testtrain, 'flow_occ', '{0:06d}_10.png'.format(frame))
path_rigidity_gt = os.path.join(KITTI_HOME, testtrain, 'rigidity_generated', '{0:06d}_10.png'.format(frame))
else:
path_flow_fwd_gt = ''
path_rigidity_gt = ''
paths = {
'--flow_fwd': path_flow_fwd,
'--flow_bwd': path_flow_bwd,
'--backflow_fwd': path_backflow_fwd,
'--backflow_bwd': path_backflow_bwd,
'--rigidity': path_rigidity
}
if path_flow_fwd_gt:
paths['--flow_fwd_gt'] = path_flow_fwd_gt
if path_rigidity_gt:
paths['--rigidity_gt'] = path_rigidity_gt
paths_images = [path_image_prev, path_image_current, path_image_next]
return paths,paths_images
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, help='Dataset to use (sintel/kitti)')
parser.add_argument('token', type=str, help='Token determining the frame.\n For KITTI, please give as {training/test},frame.\n For Sintel, give as {training/test},pass,seq,frame.')
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args()
paths,paths_images = generate_paths(args.dataset,args.token)
if args.dataset == 'kitti':
testtrain, frame = args.token.split(',')
params_default = dataset_parameters.kitti_parameters
params_default['tempdir'] = os.path.join('data_seqs', testtrain, '{0:06d}'.format(int(frame)))
elif args.dataset == 'sintel':
testtrain, pas, seq, frame = args.token.split(',')
params_default = dataset_parameters.sintel_parameters
params_default['tempdir'] = os.path.join('data_seqs', testtrain, pas, seq, 'frame_{0:04d}'.format(int(frame)))
# If tempdir does not exist yet, create it.
if not os.path.isdir(params_default['tempdir']):
os.makedirs(params_default['tempdir'])
# Set up params to call mr-flow with
args_mrflow = {}
for k,v in params_default.items():
args_mrflow['--' + k] = str(v)
for k,v in paths.items():
args_mrflow[k] = v
remainder_args = zip(args.args[::2],args.args[1::2])
for k,v in remainder_args:
args_mrflow[k] = v
args_mrflow_array = []
for k,v in args_mrflow.items():
args_mrflow_array.append(k)
args_mrflow_array.append(v)
args_mrflow_array.append(paths_images[0])
args_mrflow_array.append(paths_images[1])
args_mrflow_array.append(paths_images[2])
print('Calling MR-Flow with arguments: ')
for k,v in zip(args_mrflow_array[::2],args_mrflow_array[1::2]):
print('\t{}\t:\t{}'.format(k,v))
print('')
subprocess.call(['python', 'mrflow.py',] + args_mrflow_array)
if __name__ == '__main__':
main() | 0.340376 | 0.21595 |
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.vpn \
import forms as vpn_forms
from openstack_dashboard.dashboards.project.vpn import tabs as vpn_tabs
from openstack_dashboard.dashboards.project.vpn \
import workflows as vpn_workflows
import re
class IndexView(tabs.TabView):
tab_group_class = vpn_tabs.VPNTabs
template_name = 'project/vpn/index.html'
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
m = re.search('.delete([a-z]+)', action).group(1)
if obj_ids == []:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if m == 'vpnservice':
for obj_id in obj_ids:
try:
api.vpn.vpnservice_delete(request, obj_id)
messages.success(request,
_('Deleted VPN Service %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete VPN Service: %s')
% e)
elif m == 'ikepolicy':
for obj_id in obj_ids:
try:
api.vpn.ikepolicy_delete(request, obj_id)
messages.success(request,
_('Deleted IKE Policy %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete IKE Policy: %s') % e)
elif m == 'ipsecpolicy':
for obj_id in obj_ids:
try:
api.vpn.ipsecpolicy_delete(request, obj_id)
messages.success(request,
_('Deleted IPSec Policy %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete IPSec Policy: %s')
% e)
elif m == 'ipsecsiteconnection':
for obj_id in obj_ids:
try:
api.vpn.ipsecsiteconnection_delete(request, obj_id)
messages.success(request,
_('Deleted IPSec Site Connection %s')
% obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete IPSec Site Connection: %s') % e)
return self.get(request, *args, **kwargs)
class AddVPNServiceView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddVPNService
def get_initial(self):
initial = super(AddVPNServiceView, self).get_initial()
return initial
class AddIPSecSiteConnectionView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddIPSecSiteConnection
def get_initial(self):
initial = super(AddIPSecSiteConnectionView, self).get_initial()
return initial
class AddIKEPolicyView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddIKEPolicy
def get_initial(self):
initial = super(AddIKEPolicyView, self).get_initial()
return initial
class AddIPSecPolicyView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddIPSecPolicy
def get_initial(self):
initial = super(AddIPSecPolicyView, self).get_initial()
return initial
class IKEPolicyDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.IKEPolicyDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class IPSecPolicyDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.IPSecPolicyDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class VPNServiceDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.VPNServiceDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class IPSecSiteConnectionDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.IPSecSiteConnectionDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class UpdateVPNServiceView(forms.ModalFormView):
form_class = vpn_forms.UpdateVPNService
template_name = "project/vpn/update_vpnservice.html"
context_object_name = 'vpnservice'
success_url = reverse_lazy("horizon:project:vpn:index")
def get_context_data(self, **kwargs):
context = super(UpdateVPNServiceView, self).get_context_data(**kwargs)
context["vpnservice_id"] = self.kwargs['vpnservice_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
vpnservice_id = self.kwargs['vpnservice_id']
try:
return api.vpn.vpnservice_get(self.request, vpnservice_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VPN Service details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
vpnservice = self._get_object()
return {'name': vpnservice['name'],
'vpnservice_id': vpnservice['id'],
'description': vpnservice['description'],
'admin_state_up': vpnservice['admin_state_up']}
class UpdateIKEPolicyView(forms.ModalFormView):
form_class = vpn_forms.UpdateIKEPolicy
template_name = "project/vpn/update_ikepolicy.html"
context_object_name = 'ikepolicy'
success_url = reverse_lazy("horizon:project:vpn:index")
def get_context_data(self, **kwargs):
context = super(UpdateIKEPolicyView, self).get_context_data(**kwargs)
context["ikepolicy_id"] = self.kwargs['ikepolicy_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
ikepolicy_id = self.kwargs['ikepolicy_id']
try:
return api.vpn.ikepolicy_get(self.request, ikepolicy_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve IKE Policy details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
ikepolicy = self._get_object()
return {'name': ikepolicy['name'],
'ikepolicy_id': ikepolicy['id'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'ike_version': ikepolicy['ike_version'],
'lifetime_units': ikepolicy['lifetime']['units'],
'lifetime_value': ikepolicy['lifetime']['value'],
'pfs': ikepolicy['pfs'],
'phase1_negotiation_mode': ikepolicy[
'phase1_negotiation_mode']}
class UpdateIPSecPolicyView(forms.ModalFormView):
form_class = vpn_forms.UpdateIPSecPolicy
template_name = "project/vpn/update_ipsecpolicy.html"
context_object_name = 'ipsecpolicy'
success_url = reverse_lazy("horizon:project:vpn:index")
def get_context_data(self, **kwargs):
context = super(UpdateIPSecPolicyView, self).get_context_data(**kwargs)
context["ipsecpolicy_id"] = self.kwargs['ipsecpolicy_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
ipsecpolicy_id = self.kwargs['ipsecpolicy_id']
try:
return api.vpn.ipsecpolicy_get(self.request, ipsecpolicy_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve IPSec Policy details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
ipsecpolicy = self._get_object()
return {'name': ipsecpolicy['name'],
'ipsecpolicy_id': ipsecpolicy['id'],
'description': ipsecpolicy['description'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'lifetime_units': ipsecpolicy['lifetime']['units'],
'lifetime_value': ipsecpolicy['lifetime']['value'],
'pfs': ipsecpolicy['pfs'],
'transform_protocol': ipsecpolicy['transform_protocol']}
class UpdateIPSecSiteConnectionView(forms.ModalFormView):
form_class = vpn_forms.UpdateIPSecSiteConnection
template_name = "project/vpn/update_ipsecsiteconnection.html"
context_object_name = 'ipsecsiteconnection'
success_url = reverse_lazy("horizon:project:vpn:index")
def get_context_data(self, **kwargs):
context = super(
UpdateIPSecSiteConnectionView, self).get_context_data(**kwargs)
context["ipsecsiteconnection_id"] = self.kwargs[
'ipsecsiteconnection_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
connection_id = self.kwargs['ipsecsiteconnection_id']
try:
return api.vpn.ipsecsiteconnection_get(self.request, connection_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve IPSec Site Connection details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
ipsecsiteconnection = self._get_object()
return {'name': ipsecsiteconnection['name'],
'ipsecsiteconnection_id': ipsecsiteconnection['id'],
'description': ipsecsiteconnection['description'],
'peer_address': ipsecsiteconnection['peer_address'],
'peer_id': ipsecsiteconnection['peer_id'],
'peer_cidrs': ", ".join(ipsecsiteconnection['peer_cidrs']),
'psk': ipsecsiteconnection['psk'],
'mtu': ipsecsiteconnection['mtu'],
'dpd_action': ipsecsiteconnection['dpd']['action'],
'dpd_interval': ipsecsiteconnection['dpd']['interval'],
'dpd_timeout': ipsecsiteconnection['dpd']['timeout'],
'initiator': ipsecsiteconnection['initiator'],
'admin_state_up': ipsecsiteconnection['admin_state_up']} | openstack_dashboard/dashboards/project/vpn/views.py |
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.vpn \
import forms as vpn_forms
from openstack_dashboard.dashboards.project.vpn import tabs as vpn_tabs
from openstack_dashboard.dashboards.project.vpn \
import workflows as vpn_workflows
import re
class IndexView(tabs.TabView):
tab_group_class = vpn_tabs.VPNTabs
template_name = 'project/vpn/index.html'
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
m = re.search('.delete([a-z]+)', action).group(1)
if obj_ids == []:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if m == 'vpnservice':
for obj_id in obj_ids:
try:
api.vpn.vpnservice_delete(request, obj_id)
messages.success(request,
_('Deleted VPN Service %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete VPN Service: %s')
% e)
elif m == 'ikepolicy':
for obj_id in obj_ids:
try:
api.vpn.ikepolicy_delete(request, obj_id)
messages.success(request,
_('Deleted IKE Policy %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete IKE Policy: %s') % e)
elif m == 'ipsecpolicy':
for obj_id in obj_ids:
try:
api.vpn.ipsecpolicy_delete(request, obj_id)
messages.success(request,
_('Deleted IPSec Policy %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete IPSec Policy: %s')
% e)
elif m == 'ipsecsiteconnection':
for obj_id in obj_ids:
try:
api.vpn.ipsecsiteconnection_delete(request, obj_id)
messages.success(request,
_('Deleted IPSec Site Connection %s')
% obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete IPSec Site Connection: %s') % e)
return self.get(request, *args, **kwargs)
class AddVPNServiceView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddVPNService
def get_initial(self):
initial = super(AddVPNServiceView, self).get_initial()
return initial
class AddIPSecSiteConnectionView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddIPSecSiteConnection
def get_initial(self):
initial = super(AddIPSecSiteConnectionView, self).get_initial()
return initial
class AddIKEPolicyView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddIKEPolicy
def get_initial(self):
initial = super(AddIKEPolicyView, self).get_initial()
return initial
class AddIPSecPolicyView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddIPSecPolicy
def get_initial(self):
initial = super(AddIPSecPolicyView, self).get_initial()
return initial
class IKEPolicyDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.IKEPolicyDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class IPSecPolicyDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.IPSecPolicyDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class VPNServiceDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.VPNServiceDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class IPSecSiteConnectionDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.IPSecSiteConnectionDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class UpdateVPNServiceView(forms.ModalFormView):
form_class = vpn_forms.UpdateVPNService
template_name = "project/vpn/update_vpnservice.html"
context_object_name = 'vpnservice'
success_url = reverse_lazy("horizon:project:vpn:index")
def get_context_data(self, **kwargs):
context = super(UpdateVPNServiceView, self).get_context_data(**kwargs)
context["vpnservice_id"] = self.kwargs['vpnservice_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
vpnservice_id = self.kwargs['vpnservice_id']
try:
return api.vpn.vpnservice_get(self.request, vpnservice_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VPN Service details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
vpnservice = self._get_object()
return {'name': vpnservice['name'],
'vpnservice_id': vpnservice['id'],
'description': vpnservice['description'],
'admin_state_up': vpnservice['admin_state_up']}
class UpdateIKEPolicyView(forms.ModalFormView):
form_class = vpn_forms.UpdateIKEPolicy
template_name = "project/vpn/update_ikepolicy.html"
context_object_name = 'ikepolicy'
success_url = reverse_lazy("horizon:project:vpn:index")
def get_context_data(self, **kwargs):
context = super(UpdateIKEPolicyView, self).get_context_data(**kwargs)
context["ikepolicy_id"] = self.kwargs['ikepolicy_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
ikepolicy_id = self.kwargs['ikepolicy_id']
try:
return api.vpn.ikepolicy_get(self.request, ikepolicy_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve IKE Policy details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
ikepolicy = self._get_object()
return {'name': ikepolicy['name'],
'ikepolicy_id': ikepolicy['id'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'ike_version': ikepolicy['ike_version'],
'lifetime_units': ikepolicy['lifetime']['units'],
'lifetime_value': ikepolicy['lifetime']['value'],
'pfs': ikepolicy['pfs'],
'phase1_negotiation_mode': ikepolicy[
'phase1_negotiation_mode']}
class UpdateIPSecPolicyView(forms.ModalFormView):
form_class = vpn_forms.UpdateIPSecPolicy
template_name = "project/vpn/update_ipsecpolicy.html"
context_object_name = 'ipsecpolicy'
success_url = reverse_lazy("horizon:project:vpn:index")
def get_context_data(self, **kwargs):
context = super(UpdateIPSecPolicyView, self).get_context_data(**kwargs)
context["ipsecpolicy_id"] = self.kwargs['ipsecpolicy_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
ipsecpolicy_id = self.kwargs['ipsecpolicy_id']
try:
return api.vpn.ipsecpolicy_get(self.request, ipsecpolicy_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve IPSec Policy details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
ipsecpolicy = self._get_object()
return {'name': ipsecpolicy['name'],
'ipsecpolicy_id': ipsecpolicy['id'],
'description': ipsecpolicy['description'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'lifetime_units': ipsecpolicy['lifetime']['units'],
'lifetime_value': ipsecpolicy['lifetime']['value'],
'pfs': ipsecpolicy['pfs'],
'transform_protocol': ipsecpolicy['transform_protocol']}
class UpdateIPSecSiteConnectionView(forms.ModalFormView):
form_class = vpn_forms.UpdateIPSecSiteConnection
template_name = "project/vpn/update_ipsecsiteconnection.html"
context_object_name = 'ipsecsiteconnection'
success_url = reverse_lazy("horizon:project:vpn:index")
def get_context_data(self, **kwargs):
context = super(
UpdateIPSecSiteConnectionView, self).get_context_data(**kwargs)
context["ipsecsiteconnection_id"] = self.kwargs[
'ipsecsiteconnection_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
connection_id = self.kwargs['ipsecsiteconnection_id']
try:
return api.vpn.ipsecsiteconnection_get(self.request, connection_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve IPSec Site Connection details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
ipsecsiteconnection = self._get_object()
return {'name': ipsecsiteconnection['name'],
'ipsecsiteconnection_id': ipsecsiteconnection['id'],
'description': ipsecsiteconnection['description'],
'peer_address': ipsecsiteconnection['peer_address'],
'peer_id': ipsecsiteconnection['peer_id'],
'peer_cidrs': ", ".join(ipsecsiteconnection['peer_cidrs']),
'psk': ipsecsiteconnection['psk'],
'mtu': ipsecsiteconnection['mtu'],
'dpd_action': ipsecsiteconnection['dpd']['action'],
'dpd_interval': ipsecsiteconnection['dpd']['interval'],
'dpd_timeout': ipsecsiteconnection['dpd']['timeout'],
'initiator': ipsecsiteconnection['initiator'],
'admin_state_up': ipsecsiteconnection['admin_state_up']} | 0.448668 | 0.098599 |
import inspect
import os
import subprocess # noqa: S404
import sys
from dataclasses import dataclass
from pathlib import Path
from textwrap import dedent
from types import ModuleType
from typing import Any
from typing import Callable
from typing import Iterable
from typing import List
from typing import TYPE_CHECKING
import pytest
import tomlkit.api # https://github.com/sdispater/tomlkit/issues/128
from packaging.utils import canonicalize_name
if TYPE_CHECKING:
CompletedProcess = subprocess.CompletedProcess[str]
else:
from subprocess import CompletedProcess # noqa: S404
@dataclass(frozen=True)
class Package:
"""Python package."""
name: str
version: str
@dataclass
class Project:
"""Poetry project."""
path: Path
def _read_toml(self, filename: str) -> Any:
path = self.path / filename
text = path.read_text()
return tomlkit.api.parse(text)
def _get_config(self, key: str) -> Any:
data: Any = self._read_toml("pyproject.toml")
return data["tool"]["poetry"][key]
def get_dependency(self, name: str) -> Package:
"""Return the package with the given name."""
data = self._read_toml("poetry.lock")
for package in data["package"]:
if package["name"] == name:
url = package.get("source", {}).get("url")
if url is not None:
# Abuse Package.version to store the URL (for ``list_packages``).
return Package(name, url)
return Package(name, package["version"])
raise ValueError(f"{name}: package not found")
@property
def package(self) -> Package:
"""Return the package name."""
name: str = self._get_config("name")
version: str = self._get_config("version")
return Package(name, version)
@property
def dependencies(self) -> List[Package]:
"""Return the package dependencies."""
data = self._read_toml("poetry.lock")
dependencies: List[str] = [
package["name"]
for package in data["package"]
if package["category"] == "main" and not package["optional"]
]
return [self.get_dependency(package) for package in dependencies]
@property
def development_dependencies(self) -> List[Package]:
"""Return the development dependencies."""
dependencies: List[str] = list(self._get_config("dev-dependencies"))
return [self.get_dependency(package) for package in dependencies]
@pytest.fixture
def project(datadir: Path) -> Project:
"""Return an example Poetry project."""
return Project(datadir / "example")
def _run_nox(project: Project, *nox_args: str) -> CompletedProcess:
env = os.environ.copy()
env.pop("NOXSESSION", None)
try:
return subprocess.run( # noqa: S603, S607
["nox", *nox_args],
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=project.path,
env=env,
)
except subprocess.CalledProcessError as error:
raise RuntimeError(f"{error}\n{error.stderr}")
SessionFunction = Callable[..., Any]
def _write_noxfile(
project: Project,
sessions: Iterable[SessionFunction],
imports: Iterable[ModuleType],
) -> None:
header = "\n".join(f"import {module.__name__}" for module in imports)
stanzas = [dedent(inspect.getsource(session)) for session in sessions]
text = "\n\n".join([header, *stanzas])
path = project.path / "noxfile.py"
path.write_text(text)
def run_nox_with_noxfile(
project: Project,
sessions: Iterable[SessionFunction],
imports: Iterable[ModuleType],
*nox_args: str,
) -> CompletedProcess:
"""Write a noxfile and run Nox in the project."""
_write_noxfile(project, sessions, imports)
return _run_nox(project, *nox_args)
def list_packages(project: Project, session: SessionFunction) -> List[Package]:
"""List the installed packages for a session in the given project."""
bindir = "Scripts" if sys.platform == "win32" else "bin"
pip = project.path / ".nox" / session.__name__ / bindir / "pip"
process = subprocess.run( # noqa: S603
[str(pip), "freeze"],
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def parse(line: str) -> Package:
name, _, version = line.partition("==")
if not version and " @ " in line:
# Abuse Package.version to store the URL or path.
name, _, version = line.partition(" @ ")
if name == project.package.name:
# But use the known version for the local package.
return project.package
name = canonicalize_name(name)
return Package(name, version)
return [parse(line) for line in process.stdout.splitlines()] | tests/functional/conftest.py | import inspect
import os
import subprocess # noqa: S404
import sys
from dataclasses import dataclass
from pathlib import Path
from textwrap import dedent
from types import ModuleType
from typing import Any
from typing import Callable
from typing import Iterable
from typing import List
from typing import TYPE_CHECKING
import pytest
import tomlkit.api # https://github.com/sdispater/tomlkit/issues/128
from packaging.utils import canonicalize_name
if TYPE_CHECKING:
CompletedProcess = subprocess.CompletedProcess[str]
else:
from subprocess import CompletedProcess # noqa: S404
@dataclass(frozen=True)
class Package:
"""Python package."""
name: str
version: str
@dataclass
class Project:
"""Poetry project."""
path: Path
def _read_toml(self, filename: str) -> Any:
path = self.path / filename
text = path.read_text()
return tomlkit.api.parse(text)
def _get_config(self, key: str) -> Any:
data: Any = self._read_toml("pyproject.toml")
return data["tool"]["poetry"][key]
def get_dependency(self, name: str) -> Package:
"""Return the package with the given name."""
data = self._read_toml("poetry.lock")
for package in data["package"]:
if package["name"] == name:
url = package.get("source", {}).get("url")
if url is not None:
# Abuse Package.version to store the URL (for ``list_packages``).
return Package(name, url)
return Package(name, package["version"])
raise ValueError(f"{name}: package not found")
@property
def package(self) -> Package:
"""Return the package name."""
name: str = self._get_config("name")
version: str = self._get_config("version")
return Package(name, version)
@property
def dependencies(self) -> List[Package]:
"""Return the package dependencies."""
data = self._read_toml("poetry.lock")
dependencies: List[str] = [
package["name"]
for package in data["package"]
if package["category"] == "main" and not package["optional"]
]
return [self.get_dependency(package) for package in dependencies]
@property
def development_dependencies(self) -> List[Package]:
"""Return the development dependencies."""
dependencies: List[str] = list(self._get_config("dev-dependencies"))
return [self.get_dependency(package) for package in dependencies]
@pytest.fixture
def project(datadir: Path) -> Project:
"""Return an example Poetry project."""
return Project(datadir / "example")
def _run_nox(project: Project, *nox_args: str) -> CompletedProcess:
env = os.environ.copy()
env.pop("NOXSESSION", None)
try:
return subprocess.run( # noqa: S603, S607
["nox", *nox_args],
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=project.path,
env=env,
)
except subprocess.CalledProcessError as error:
raise RuntimeError(f"{error}\n{error.stderr}")
SessionFunction = Callable[..., Any]
def _write_noxfile(
project: Project,
sessions: Iterable[SessionFunction],
imports: Iterable[ModuleType],
) -> None:
header = "\n".join(f"import {module.__name__}" for module in imports)
stanzas = [dedent(inspect.getsource(session)) for session in sessions]
text = "\n\n".join([header, *stanzas])
path = project.path / "noxfile.py"
path.write_text(text)
def run_nox_with_noxfile(
project: Project,
sessions: Iterable[SessionFunction],
imports: Iterable[ModuleType],
*nox_args: str,
) -> CompletedProcess:
"""Write a noxfile and run Nox in the project."""
_write_noxfile(project, sessions, imports)
return _run_nox(project, *nox_args)
def list_packages(project: Project, session: SessionFunction) -> List[Package]:
"""List the installed packages for a session in the given project."""
bindir = "Scripts" if sys.platform == "win32" else "bin"
pip = project.path / ".nox" / session.__name__ / bindir / "pip"
process = subprocess.run( # noqa: S603
[str(pip), "freeze"],
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def parse(line: str) -> Package:
name, _, version = line.partition("==")
if not version and " @ " in line:
# Abuse Package.version to store the URL or path.
name, _, version = line.partition(" @ ")
if name == project.package.name:
# But use the known version for the local package.
return project.package
name = canonicalize_name(name)
return Package(name, version)
return [parse(line) for line in process.stdout.splitlines()] | 0.639736 | 0.204144 |
_character_map = {
# Symbol: (Hex Value, URI Form)
'!': (0x21, '!'), '"': (0x22, '%22'), '%': (0x25, '%25'), '&': (0x26, '%26'),
'\'': (0x27, '\''), '(': (0x28, '('), ')': (0x29, ')'), '*': (0x2a, '*'),
'+': (0x2b, '+'), ',': (0x2c, ','), '-': (0x2d, '-'), '.': (0x2e, '.'),
'/': (0x2f, '%2F'), '0': (0x30, '0'), '1': (0x31, '1'), '2': (0x32, '2'),
'3': (0x33, '3'), '4': (0x34, '4'), '5': (0x35, '5'), '6': (0x36, '6'),
'7': (0x37, '7'), '8': (0x38, '8'), '9': (0x39, '9'), ':': (0x3a, ':'),
';': (0x3b, ';'), '<': (0x3c, '%3C'), '=': (0x3d, '='), '>': (0x3e, '%3E'),
'?': (0x3f, '%3F'), 'A': (0x41, 'A'), 'B': (0x42, 'B'), 'C': (0x43, 'C'),
'D': (0x44, 'D'), 'E': (0x45, 'E'), 'F': (0x46, 'F'), 'G': (0x47, 'G'),
'H': (0x48, 'H'), 'I': (0x49, 'I'), 'J': (0x4a, 'J'), 'K': (0x4b, 'K'),
'L': (0x4c, 'L'), 'M': (0x4d, 'M'), 'N': (0x4e, 'N'), 'O': (0x4f, 'O'),
'P': (0x50, 'P'), 'Q': (0x51, 'Q'), 'R': (0x52, 'R'), 'S': (0x53, 'S'),
'T': (0x54, 'T'), 'U': (0x55, 'U'), 'V': (0x56, 'V'), 'W': (0x57, 'W'),
'X': (0x58, 'X'), 'Y': (0x59, 'Y'), 'Z': (0x5a, 'Z'), '_': (0x5f, '_'),
'a': (0x61, 'a'), 'b': (0x62, 'b'), 'c': (0x63, 'c'), 'd': (0x64, 'd'),
'e': (0x65, 'e'), 'f': (0x66, 'f'), 'g': (0x67, 'g'), 'h': (0x68, 'h'),
'i': (0x69, 'i'), 'j': (0x6a, 'j'), 'k': (0x6b, 'k'), 'l': (0x6c, 'l'),
'm': (0x6d, 'm'), 'n': (0x6e, 'n'), 'o': (0x6f, 'o'), 'p': (0x70, 'p'),
'q': (0x71, 'q'), 'r': (0x72, 'r'), 's': (0x73, 's'), 't': (0x74, 't'),
'u': (0x75, 'u'), 'v': (0x76, 'v'), 'w': (0x77, 'w'), 'x': (0x78, 'x'),
'y': (0x79, 'y'), 'z': (0x7a, 'z'),
}
_hex_map = {
# Symbol: (Hex Value, URI Form)
0x21: ('!', '!'), 0x22: ('"', '%22'), 0x25: ('%', '%25'), 0x26: ('&', '%26'),
0x27: ('\'', '\''), 0x28: ('(', '('), 0x29: (')', ')'), 0x2a: ('*', '*'),
0x2b: ('+', '+'), 0x2c: (',', ','), 0x2d: ('-', '-'), 0x2e: ('.', '.'),
0x2f: ('/', '%2F'), 0x30: ('0', '0'), 0x31: ('1', '1'), 0x32: ('2', '2'),
0x33: ('3', '3'), 0x34: ('4', '4'), 0x35: ('5', '5'), 0x36: ('6', '6'),
0x37: ('7', '7'), 0x38: ('8', '8'), 0x39: ('9', '9'), 0x3a: (':', ':'),
0x3b: (';', ';'), 0x3c: ('<', '%3C'), 0x3d: ('=', '='), 0x3e: ('>', '%3E'),
0x3f: ('?', '%3F'), 0x41: ('A', 'A'), 0x42: ('B', 'B'), 0x43: ('C', 'C'),
0x44: ('D', 'D'), 0x45: ('E', 'E'), 0x46: ('F', 'F'), 0x47: ('G', 'G'),
0x48: ('H', 'H'), 0x49: ('I', 'I'), 0x4a: ('J', 'J'), 0x4b: ('K', 'K'),
0x4c: ('L', 'L'), 0x4d: ('M', 'M'), 0x4e: ('N', 'N'), 0x4f: ('O', 'O'),
0x50: ('P', 'P'), 0x51: ('Q', 'Q'), 0x52: ('R', 'R'), 0x53: ('S', 'S'),
0x54: ('T', 'T'), 0x55: ('U', 'U'), 0x56: ('V', 'V'), 0x57: ('W', 'W'),
0x58: ('X', 'X'), 0x59: ('Y', 'Y'), 0x5a: ('Z', 'Z'), 0x5f: ('_', '_'),
0x61: ('a', 'a'), 0x62: ('b', 'b'), 0x63: ('c', 'c'), 0x64: ('d', 'd'),
0x65: ('e', 'e'), 0x66: ('f', 'f'), 0x67: ('g', 'g'), 0x68: ('h', 'h'),
0x69: ('i', 'i'), 0x6a: ('j', 'j'), 0x6b: ('k', 'k'), 0x6c: ('l', 'l'),
0x6d: ('m', 'm'), 0x6e: ('n', 'n'), 0x6f: ('o', 'o'), 0x70: ('p', 'p'),
0x71: ('q', 'q'), 0x72: ('r', 'r'), 0x73: ('s', 's'), 0x74: ('t', 't'),
0x75: ('u', 'u'), 0x76: ('v', 'v'), 0x77: ('w', 'w'), 0x78: ('x', 'x'),
0x79: ('y', 'y'), 0x7a: ('z', 'z'),
}
def encode_string(string, bit_length=0):
if not isinstance(string, str):
string = str(string)
encoded_string = ''
for char in string:
try:
encoded_string += '{:07b}'.format(_character_map[char][0])
except KeyError:
raise ValueError('`%s` is not a valid character for encoding' % char)
encoded_string = encoded_string.ljust(bit_length, '0')
return encoded_string
def url_encode_string(string):
if not isinstance(string, str):
string = str(string)
encoded_string = ''
if not isinstance(string, str):
string = str(string)
for char in string:
try:
encoded_string += _character_map[char][1]
except KeyError:
raise ValueError('%s is not a valid character for encoding' % char)
return encoded_string
def decode_string(string_bin):
decoded_string = ''
# Split into 7 bit chunks
for char in [string_bin[i:i + 7] for i in range(0, len(string_bin), 7)]:
int_char = int(char, 2)
if int_char == 0:
# End of string
break
try:
decoded_string += _hex_map[int_char][0]
except KeyError:
raise ValueError('`%s` is not a valid value for decoding' % hex(int_char))
return decoded_string
def is_encodable_string(string, raise_exception=False):
try:
encode_string(string)
except ValueError as e:
if raise_exception:
raise e
return False
return True
def is_decodeable_string(string_bin, raise_exception=False):
try:
decode_string(string_bin)
except ValueError as e:
if raise_exception:
raise e
return False
return True | epc/encoding/string.py | _character_map = {
# Symbol: (Hex Value, URI Form)
'!': (0x21, '!'), '"': (0x22, '%22'), '%': (0x25, '%25'), '&': (0x26, '%26'),
'\'': (0x27, '\''), '(': (0x28, '('), ')': (0x29, ')'), '*': (0x2a, '*'),
'+': (0x2b, '+'), ',': (0x2c, ','), '-': (0x2d, '-'), '.': (0x2e, '.'),
'/': (0x2f, '%2F'), '0': (0x30, '0'), '1': (0x31, '1'), '2': (0x32, '2'),
'3': (0x33, '3'), '4': (0x34, '4'), '5': (0x35, '5'), '6': (0x36, '6'),
'7': (0x37, '7'), '8': (0x38, '8'), '9': (0x39, '9'), ':': (0x3a, ':'),
';': (0x3b, ';'), '<': (0x3c, '%3C'), '=': (0x3d, '='), '>': (0x3e, '%3E'),
'?': (0x3f, '%3F'), 'A': (0x41, 'A'), 'B': (0x42, 'B'), 'C': (0x43, 'C'),
'D': (0x44, 'D'), 'E': (0x45, 'E'), 'F': (0x46, 'F'), 'G': (0x47, 'G'),
'H': (0x48, 'H'), 'I': (0x49, 'I'), 'J': (0x4a, 'J'), 'K': (0x4b, 'K'),
'L': (0x4c, 'L'), 'M': (0x4d, 'M'), 'N': (0x4e, 'N'), 'O': (0x4f, 'O'),
'P': (0x50, 'P'), 'Q': (0x51, 'Q'), 'R': (0x52, 'R'), 'S': (0x53, 'S'),
'T': (0x54, 'T'), 'U': (0x55, 'U'), 'V': (0x56, 'V'), 'W': (0x57, 'W'),
'X': (0x58, 'X'), 'Y': (0x59, 'Y'), 'Z': (0x5a, 'Z'), '_': (0x5f, '_'),
'a': (0x61, 'a'), 'b': (0x62, 'b'), 'c': (0x63, 'c'), 'd': (0x64, 'd'),
'e': (0x65, 'e'), 'f': (0x66, 'f'), 'g': (0x67, 'g'), 'h': (0x68, 'h'),
'i': (0x69, 'i'), 'j': (0x6a, 'j'), 'k': (0x6b, 'k'), 'l': (0x6c, 'l'),
'm': (0x6d, 'm'), 'n': (0x6e, 'n'), 'o': (0x6f, 'o'), 'p': (0x70, 'p'),
'q': (0x71, 'q'), 'r': (0x72, 'r'), 's': (0x73, 's'), 't': (0x74, 't'),
'u': (0x75, 'u'), 'v': (0x76, 'v'), 'w': (0x77, 'w'), 'x': (0x78, 'x'),
'y': (0x79, 'y'), 'z': (0x7a, 'z'),
}
_hex_map = {
# Symbol: (Hex Value, URI Form)
0x21: ('!', '!'), 0x22: ('"', '%22'), 0x25: ('%', '%25'), 0x26: ('&', '%26'),
0x27: ('\'', '\''), 0x28: ('(', '('), 0x29: (')', ')'), 0x2a: ('*', '*'),
0x2b: ('+', '+'), 0x2c: (',', ','), 0x2d: ('-', '-'), 0x2e: ('.', '.'),
0x2f: ('/', '%2F'), 0x30: ('0', '0'), 0x31: ('1', '1'), 0x32: ('2', '2'),
0x33: ('3', '3'), 0x34: ('4', '4'), 0x35: ('5', '5'), 0x36: ('6', '6'),
0x37: ('7', '7'), 0x38: ('8', '8'), 0x39: ('9', '9'), 0x3a: (':', ':'),
0x3b: (';', ';'), 0x3c: ('<', '%3C'), 0x3d: ('=', '='), 0x3e: ('>', '%3E'),
0x3f: ('?', '%3F'), 0x41: ('A', 'A'), 0x42: ('B', 'B'), 0x43: ('C', 'C'),
0x44: ('D', 'D'), 0x45: ('E', 'E'), 0x46: ('F', 'F'), 0x47: ('G', 'G'),
0x48: ('H', 'H'), 0x49: ('I', 'I'), 0x4a: ('J', 'J'), 0x4b: ('K', 'K'),
0x4c: ('L', 'L'), 0x4d: ('M', 'M'), 0x4e: ('N', 'N'), 0x4f: ('O', 'O'),
0x50: ('P', 'P'), 0x51: ('Q', 'Q'), 0x52: ('R', 'R'), 0x53: ('S', 'S'),
0x54: ('T', 'T'), 0x55: ('U', 'U'), 0x56: ('V', 'V'), 0x57: ('W', 'W'),
0x58: ('X', 'X'), 0x59: ('Y', 'Y'), 0x5a: ('Z', 'Z'), 0x5f: ('_', '_'),
0x61: ('a', 'a'), 0x62: ('b', 'b'), 0x63: ('c', 'c'), 0x64: ('d', 'd'),
0x65: ('e', 'e'), 0x66: ('f', 'f'), 0x67: ('g', 'g'), 0x68: ('h', 'h'),
0x69: ('i', 'i'), 0x6a: ('j', 'j'), 0x6b: ('k', 'k'), 0x6c: ('l', 'l'),
0x6d: ('m', 'm'), 0x6e: ('n', 'n'), 0x6f: ('o', 'o'), 0x70: ('p', 'p'),
0x71: ('q', 'q'), 0x72: ('r', 'r'), 0x73: ('s', 's'), 0x74: ('t', 't'),
0x75: ('u', 'u'), 0x76: ('v', 'v'), 0x77: ('w', 'w'), 0x78: ('x', 'x'),
0x79: ('y', 'y'), 0x7a: ('z', 'z'),
}
def encode_string(string, bit_length=0):
if not isinstance(string, str):
string = str(string)
encoded_string = ''
for char in string:
try:
encoded_string += '{:07b}'.format(_character_map[char][0])
except KeyError:
raise ValueError('`%s` is not a valid character for encoding' % char)
encoded_string = encoded_string.ljust(bit_length, '0')
return encoded_string
def url_encode_string(string):
if not isinstance(string, str):
string = str(string)
encoded_string = ''
if not isinstance(string, str):
string = str(string)
for char in string:
try:
encoded_string += _character_map[char][1]
except KeyError:
raise ValueError('%s is not a valid character for encoding' % char)
return encoded_string
def decode_string(string_bin):
decoded_string = ''
# Split into 7 bit chunks
for char in [string_bin[i:i + 7] for i in range(0, len(string_bin), 7)]:
int_char = int(char, 2)
if int_char == 0:
# End of string
break
try:
decoded_string += _hex_map[int_char][0]
except KeyError:
raise ValueError('`%s` is not a valid value for decoding' % hex(int_char))
return decoded_string
def is_encodable_string(string, raise_exception=False):
try:
encode_string(string)
except ValueError as e:
if raise_exception:
raise e
return False
return True
def is_decodeable_string(string_bin, raise_exception=False):
try:
decode_string(string_bin)
except ValueError as e:
if raise_exception:
raise e
return False
return True | 0.395368 | 0.175856 |
from Token import Token
from SymbolTable import SymbolTable
from TableEntry import TableEntry
from SymbolTableTree import SymbolTableTree
from ASA import *
class Syntactic():
token = ''
arrayToken = []
indexToken = ''
no = ''
symbolTableTree = ''
tableEntry = ''
actualTable = ''
def __init__ (self, arrayToken):
self.arrayToken = arrayToken
self.token = self.arrayToken[0]
self.indexToken = 0
self.actualTable = SymbolTable()
self.symbolTableTree = SymbolTableTree(self.actualTable)
self.no = AST('AST')
def match(self,tok):
if(self.token.getCodigoToken() == tok):
'''for k,v in self.actualTable.symbolTable.items():
print(v.toString())'''
self.indexToken = self.indexToken + 1
if (self.indexToken < len(self.arrayToken)):
self.token = self.arrayToken[self.indexToken]
else:
print('token invalido ' + self.token.getCodigoToken())
def imprimeErro(self):
i = self.indexToken - 1;
#print('Tokens ' + str(Follow[sync_token.type]) + ' esperados na entrada.')
#continua a análise para verificar outros erros
self.indexToken = self.indexToken + 1
self.token = self.arrayToken[self.indexToken]
#sincroniza(sync_token)
def program(self):
#match first token for any code in c-small
print(self.token.__str__())
self.match('INT')
self.match('MAIN')
self.match('LBRACKET')
self.match('RBRACKET')
self.match('LBRACE')
print(self.token.value)
#start recursion and build ASA
print('bla ' + self.no.nome)
self.decl_comand(self.no)
print('analise sintática realizada com sucesso')
print('resultado')
print(self.no.children)
print_tree(self.no)
a = open('../../tp2/output/saidateste.txt','w')
for k,v in self.actualTable.symbolTable.items():
a.write(v.toString() + '\r\n')
ToXML.toXML(self.no)
a.close()
def decl_comand(self,no):
if(self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT'):
self.declaration(no)
if(self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT' or self.token.getCodigoToken() == 'LBRACE' or self.token.getCodigoToken() == 'ID' or self.token.getCodigoToken() == 'IF' or self.token.getCodigoToken() == 'WHILE' or self.token.getCodigoToken() == 'READ' or self.token.getCodigoToken() == 'PRINT' or self.token.getCodigoToken() == 'FOR'):
self.decl_comand(no)
elif(self.token.getCodigoToken() == 'LBRACE' or self.token.getCodigoToken() == 'ID' or self.token.getCodigoToken() == 'IF' or self.token.getCodigoToken() == 'WHILE' or self.token.getCodigoToken() == 'READ' or self.token.getCodigoToken() == 'PRINT' or self.token.getCodigoToken() == 'FOR'):
no3 = self.comand()
if(not(no3 is None)):
no.children.append(no3)
if(self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT' or self.token.getCodigoToken() == 'LBRACE' or self.token.getCodigoToken() == 'ID' or self.token.getCodigoToken() == 'IF' or self.token.getCodigoToken() == 'WHILE' or self.token.getCodigoToken() == 'READ' or self.token.getCodigoToken() == 'PRINT' or self.token.getCodigoToken() == 'FOR'):
self.decl_comand(no)
# print('O no attr aqui ')
print(self.no.children)
def types(self):
if(self.token.getCodigoToken() == 'INT'):
self.match('INT')
self.tableEntry.setTipo('int')
elif(self.token.getCodigoToken() == 'FLOAT'):
self.match('FLOAT')
self.tableEntry.setTipo('float')
def declaration(self, no):
if (self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT'):
self.tableEntry = TableEntry(None, None, None, None)
self.types()
self.tableEntry.setLexema(self.token.getLexema())
self.tableEntry.setNumLinha(self.token.getNumLinha())
#começo da criação da asa
no_id = ''
if(self.token.getCodigoToken() == 'ID'):
no_id= Id(self.token)
self.match('ID')
no_attr = None
if(self.token.getCodigoToken() == 'ATTR'):
no_attr = Assign(no_id, '=', None)
self.declaration2(self.no, no_attr)
def declaration2(self, no_pai, no):
if (self.token.getCodigoToken() == 'COMMA'):
self.match('COMMA')
self.actualTable.symbolTable[self.tableEntry.getLexema()] = self.tableEntry
lastType = self.tableEntry.getTipo()
self.tableEntry = TableEntry(None, lastType, None, None)
self.tableEntry.setLexema(self.token.getLexema())
self.tableEntry.setNumLinha(self.token.getNumLinha())
no2 = Id(self.token)
self.match('ID')
no_attr = None
if(self.token.getCodigoToken() == 'ATTR'):
no_attr = Assign(no2, '=', None)
self.declaration2(no_pai, no_attr)
elif(self.token.getCodigoToken() == 'PCOMMA'):
self.match('PCOMMA')
self.actualTable.symbolTable[self.tableEntry.getLexema()] = self.tableEntry
self.tableEntry = TableEntry(None, None, None, None)
elif(self.token.getCodigoToken() == 'ATTR'):
self.match('ATTR')
no2 = self.expression()
no.children.append(no2)
no.right = no2
no_pai.children.append(no)
self.declaration2(no_pai, no)
def comand(self):
if (self.token.getCodigoToken() == 'LBRACE'):
no = self.block()
return no
elif(self.token.getCodigoToken() == 'ID'):
no = self.attr()
return no
elif(self.token.getCodigoToken() == 'IF'):
no = self.comand_if()
return no
elif(self.token.getCodigoToken() == 'WHILE'):
no = self.comand_while()
return no
elif(self.token.getCodigoToken() == 'READ'):
no = self.comand_read()
return no
elif(self.token.getCodigoToken() == 'PRINT'):
no = self.comand_print()
return no
elif(self.token.getCodigoToken() == 'FOR'):
no = self.comand_for()
return no
def block(self):
self.match('LBRACE')
no_block = Compound()
self.decl_comand(no_block)
self.match('RBRACE')
return no_block
def attr(self):
no1 = Id(self.token)
no_attr = Assign(no1 , '=', None)
self.match('ID')
self.match('ATTR')
no2 = self.expression()
no_attr.children.append(no2)
no_attr.right = no2
self.match('PCOMMA')
return no_attr
def comand_if(self):
no_if = If(None,None,None)
self.match('IF')
self.match('LBRACKET')
no_expr = self.expression()
no_if.children.append(no_expr)
no_if.exp = no_expr
self.match('RBRACKET')
no_comand = self.comand()
no_if.children.append(no_comand)
if(self.token == 'ELSE'):
no_else = self.comand_else()
no_if.children.append(no_else)
return no_if
def comand_else(self):
self.match('ELSE')
no_else = self.comand()
return no_else
def comand_while(self):
no_while = While(None,None)
self.match('WHILE')
self.match('LBRACKET')
no_expr = self.expression()
no_while.children.append(no_expr)
no_while.exp = no_expr
self.match('RBRACKET')
no_comand = self.comand()
no_while.children.append(no_comand)
no_while.commands = no_comand
return no_while
def comand_read(self):
no_read = Read(None)
self.match('READ')
no_id = Id(self.token)
no_read.children.append(no_id)
self.match('ID')
self.match('PCOMMA')
return no_read
def comand_print(self):
no_print = Print(None)
self.match('PRINT')
self.match('LBRACKET')
no_expr = self.expression()
no_print.children.append(no_expr)
no_print.exp = no_expr
self.match('RBRACKET')
self.match('PCOMMA')
return no_print
#sem for por enquanto =('''
def comand_for(self):
no_for = For(None,None,None,None)
self.match('FOR')
self.match('LBRACKET')
no_attr = self.att_for()
no_for.children.append(no_attr)
no_for.attr = no_attr
self.match('PCOMMA')
no_expr = self.expression()
no_for.children.append(no_expr)
no_for.exp = no_expr
self.match('PCOMMA')
no_attr2 = self.att_for()
no_for.children.append(no_attr2)
no_for.attr2 = no_attr2
self.match('RBRACKET')
no_comand = self.comand()
if(not(no_comand is None)):
no_for.children.append(no_comand)
no_for.commands = no_comand
return no_for
def att_for(self):
no_id = Id(self.token)
self.match('ID')
no_attr_for = Assign(no_id,'=',None)
self.match('ATTR')
no_expr = self.expression()
no_attr_for.children.append(no_expr)
no_attr_for.right = no_expr
return no_attr_for
def expression(self):
no = self.conjunction()
if (self.token.getCodigoToken() == 'OR'):
no_expr_opc = self.expressaoOpc()
no_expr_opc.children.append(no)
no_expr_opc.left = no
return no_expr_opc
return no
def expressaoOpc(self):
no_expr_opc = LogicalOp('OR', None, None)
self.match('OR')
self.conjunction()
if(self.token.getCodigoToken() == 'OR'):
no_expr_opc2 = self.expressaoOpc()
no_expr_opc2.children.left(no_expr_opc)
no_expr_opc2.left = no_expr_opc
return no_expr_opc2
return no_expr_opc
def conjunction(self):
no = self.equal()
if(self.token.getCodigoToken() == 'AND'):
no_conj = self.conjuction_opc()
no_conj.children.append(no)
no_conj.left = no
return no
def conjuction_opc(self):
no_conj = LogicalOp('AND', None, None)
self.match('AND')
no = self.equal()
no_conj.children.append(no)
no_conj.right = no
if(self.token == 'AND'):
no_conj2 = self.conjuction_opc()
no_conj2.children.left(no_conj)
no_conj2.left = no_conj
return no_conj2
return no_conj
def equal(self):
no = self.relation()
if (self.token.getCodigoToken() == 'EQ' or self.token.getCodigoToken() == 'NE'):
no_equal_opc = self.equal_opc()
no_equal_opc.children.append(no)
return no_equal_opc
return no
def equal_opc(self):
no_op_equal = self.op_equal()
no = self.relation()
no_op_equal.children.append(no)
no_op_equal.right = no
if (self.token == 'EQ' or self.token == 'NE'):
no_equal_opc2 = self.equal_opc()
no_equal_opc2.children.append(no)
return no_equal_opc2
return no_op_equal
def op_equal(self):
if(self.token.getCodigoToken() == 'EQ' ):
self.match('EQ')
return RelOp(None, '==', None)
elif(self.token.getCodigoToken() == 'NE'):
self.match('NE')
return RelOp(None, '!=', None)
def relation(self):
no = self.add()
if(self.token.getCodigoToken() == 'LT' or self.token.getCodigoToken() == 'LE' or self.token.getCodigoToken() == 'GT' or self.token.getCodigoToken() == 'GE'):
no_relac_opc = self.relac_opc()
no_relac_opc.children.append(no)
no_relac_opc.left = no
return no_relac_opc
return no
def relac_opc(self):
no_op_rel = self.op_rel()
no2 = self.add()
no_op_rel.children.append(no2)
no_op_rel.right = no2
if(self.token == 'LT' or self.token == 'LE' or self.token == 'GT' or self.token == 'GE'):
no_op_rel2 = self.relac_opc()
no_op_rel2.append(no_op_rel)
no_op_rel2.left = no_op_rel
return no_op_rel2
return no_op_rel
def op_rel(self):
if (self.token.getCodigoToken() == 'LT'):
self.match('LT')
return RelOp(None,'<',None)
elif(self.token.getCodigoToken() == 'LE'):
self.match('LE')
return RelOp(None,'<=',None)
elif(self.token.getCodigoToken() == 'GT'):
self.match('GT')
return RelOp(None, '>', None)
elif (self.token.getCodigoToken() == 'GE'):
self.match('GE')
return RelOp(None, '>=', None)
def add(self):
no = self.term()
if (self.token.getCodigoToken() == 'PLUS' or self.token.getCodigoToken() == 'MINUS'):
no_plus_minus = self.add_opc()
no_plus_minus.children.append(no)
no_plus_minus.left = no
return no_plus_minus
return no
def add_opc(self):
no_plus_minus = self.op_add()
no2 = self.term()
no_plus_minus.children.append(no2)
no_plus_minus.right = no2
if (self.token.getCodigoToken() == 'PLUS' or self.token.getCodigoToken() == 'MINUS'):
no_plus_minus2 = self.add_opc()
no_plus_minus2.children.append(no_plus_minus)
no_plus_minus.left = no_plus_minus
return no_plus_minus2
return no_plus_minus
def op_add(self):
if(self.token.getCodigoToken() == 'PLUS'):
no_add = ArithOp('+',None, None)
self.match('PLUS')
return no_add
if(self.token.getCodigoToken() == 'MINUS'):
no_minus = ArithOp('-',None, None)
self.match('MINUS')
return no_minus
def term(self):
no = self.fact()
if(self.token.getCodigoToken() == 'MULT' or self.token.getCodigoToken() == 'DIV'):
no_div_mult = self.term_opc()
no_div_mult.children.append(no)
no_div_mult.left = no
return no_div_mult
return no
def term_opc(self):
no_div_mult = self.op_mult()
no2 = self.fact()
no_div_mult.children.append(no2)
no_div_mult.right = no2
if(self.token == 'MULT' or self.token == 'DIV'):
no_div_mult2 = self.term_opc()
no_div_mult2.children.append(no_div_mult)
no_div_mult.left = no_div_mult
return no_div_mult2
return no_div_mult
def op_mult(self):
if(self.token.getCodigoToken() == 'MULT'):
no_div_mult = ArithOp('*',None,None)
self.match('MULT')
return no_div_mult
elif(self.token.getCodigoToken() == 'DIV'):
no_div_mult = ArithOp('/',None,None)
self.match('DIV')
return no_div_mult
def fact(self):
if (self.token.getCodigoToken() == 'ID'):
no = Id(self.token)
self.match('ID')
return no
elif(self.token.getCodigoToken() == 'INTEGER_CONST'):
no = Num(self.token)
self.match('INTEGER_CONST')
return no
elif(self.token.getCodigoToken() == 'FLOAT_CONST'):
no = Num(self.token)
self.match('FLOAT_CONST')
return no
elif(self.token.getCodigoToken() == 'LBRACKET'):
self.match('LBRACKET')
no = self.expression()
self.match('RBRACKET')
return no | tp2/src/Syntactic.py | from Token import Token
from SymbolTable import SymbolTable
from TableEntry import TableEntry
from SymbolTableTree import SymbolTableTree
from ASA import *
class Syntactic():
token = ''
arrayToken = []
indexToken = ''
no = ''
symbolTableTree = ''
tableEntry = ''
actualTable = ''
def __init__ (self, arrayToken):
self.arrayToken = arrayToken
self.token = self.arrayToken[0]
self.indexToken = 0
self.actualTable = SymbolTable()
self.symbolTableTree = SymbolTableTree(self.actualTable)
self.no = AST('AST')
def match(self,tok):
if(self.token.getCodigoToken() == tok):
'''for k,v in self.actualTable.symbolTable.items():
print(v.toString())'''
self.indexToken = self.indexToken + 1
if (self.indexToken < len(self.arrayToken)):
self.token = self.arrayToken[self.indexToken]
else:
print('token invalido ' + self.token.getCodigoToken())
def imprimeErro(self):
i = self.indexToken - 1;
#print('Tokens ' + str(Follow[sync_token.type]) + ' esperados na entrada.')
#continua a análise para verificar outros erros
self.indexToken = self.indexToken + 1
self.token = self.arrayToken[self.indexToken]
#sincroniza(sync_token)
def program(self):
#match first token for any code in c-small
print(self.token.__str__())
self.match('INT')
self.match('MAIN')
self.match('LBRACKET')
self.match('RBRACKET')
self.match('LBRACE')
print(self.token.value)
#start recursion and build ASA
print('bla ' + self.no.nome)
self.decl_comand(self.no)
print('analise sintática realizada com sucesso')
print('resultado')
print(self.no.children)
print_tree(self.no)
a = open('../../tp2/output/saidateste.txt','w')
for k,v in self.actualTable.symbolTable.items():
a.write(v.toString() + '\r\n')
ToXML.toXML(self.no)
a.close()
def decl_comand(self,no):
if(self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT'):
self.declaration(no)
if(self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT' or self.token.getCodigoToken() == 'LBRACE' or self.token.getCodigoToken() == 'ID' or self.token.getCodigoToken() == 'IF' or self.token.getCodigoToken() == 'WHILE' or self.token.getCodigoToken() == 'READ' or self.token.getCodigoToken() == 'PRINT' or self.token.getCodigoToken() == 'FOR'):
self.decl_comand(no)
elif(self.token.getCodigoToken() == 'LBRACE' or self.token.getCodigoToken() == 'ID' or self.token.getCodigoToken() == 'IF' or self.token.getCodigoToken() == 'WHILE' or self.token.getCodigoToken() == 'READ' or self.token.getCodigoToken() == 'PRINT' or self.token.getCodigoToken() == 'FOR'):
no3 = self.comand()
if(not(no3 is None)):
no.children.append(no3)
if(self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT' or self.token.getCodigoToken() == 'LBRACE' or self.token.getCodigoToken() == 'ID' or self.token.getCodigoToken() == 'IF' or self.token.getCodigoToken() == 'WHILE' or self.token.getCodigoToken() == 'READ' or self.token.getCodigoToken() == 'PRINT' or self.token.getCodigoToken() == 'FOR'):
self.decl_comand(no)
# print('O no attr aqui ')
print(self.no.children)
def types(self):
if(self.token.getCodigoToken() == 'INT'):
self.match('INT')
self.tableEntry.setTipo('int')
elif(self.token.getCodigoToken() == 'FLOAT'):
self.match('FLOAT')
self.tableEntry.setTipo('float')
def declaration(self, no):
if (self.token.getCodigoToken() == 'INT' or self.token.getCodigoToken() == 'FLOAT'):
self.tableEntry = TableEntry(None, None, None, None)
self.types()
self.tableEntry.setLexema(self.token.getLexema())
self.tableEntry.setNumLinha(self.token.getNumLinha())
#começo da criação da asa
no_id = ''
if(self.token.getCodigoToken() == 'ID'):
no_id= Id(self.token)
self.match('ID')
no_attr = None
if(self.token.getCodigoToken() == 'ATTR'):
no_attr = Assign(no_id, '=', None)
self.declaration2(self.no, no_attr)
def declaration2(self, no_pai, no):
if (self.token.getCodigoToken() == 'COMMA'):
self.match('COMMA')
self.actualTable.symbolTable[self.tableEntry.getLexema()] = self.tableEntry
lastType = self.tableEntry.getTipo()
self.tableEntry = TableEntry(None, lastType, None, None)
self.tableEntry.setLexema(self.token.getLexema())
self.tableEntry.setNumLinha(self.token.getNumLinha())
no2 = Id(self.token)
self.match('ID')
no_attr = None
if(self.token.getCodigoToken() == 'ATTR'):
no_attr = Assign(no2, '=', None)
self.declaration2(no_pai, no_attr)
elif(self.token.getCodigoToken() == 'PCOMMA'):
self.match('PCOMMA')
self.actualTable.symbolTable[self.tableEntry.getLexema()] = self.tableEntry
self.tableEntry = TableEntry(None, None, None, None)
elif(self.token.getCodigoToken() == 'ATTR'):
self.match('ATTR')
no2 = self.expression()
no.children.append(no2)
no.right = no2
no_pai.children.append(no)
self.declaration2(no_pai, no)
def comand(self):
if (self.token.getCodigoToken() == 'LBRACE'):
no = self.block()
return no
elif(self.token.getCodigoToken() == 'ID'):
no = self.attr()
return no
elif(self.token.getCodigoToken() == 'IF'):
no = self.comand_if()
return no
elif(self.token.getCodigoToken() == 'WHILE'):
no = self.comand_while()
return no
elif(self.token.getCodigoToken() == 'READ'):
no = self.comand_read()
return no
elif(self.token.getCodigoToken() == 'PRINT'):
no = self.comand_print()
return no
elif(self.token.getCodigoToken() == 'FOR'):
no = self.comand_for()
return no
def block(self):
self.match('LBRACE')
no_block = Compound()
self.decl_comand(no_block)
self.match('RBRACE')
return no_block
def attr(self):
no1 = Id(self.token)
no_attr = Assign(no1 , '=', None)
self.match('ID')
self.match('ATTR')
no2 = self.expression()
no_attr.children.append(no2)
no_attr.right = no2
self.match('PCOMMA')
return no_attr
def comand_if(self):
no_if = If(None,None,None)
self.match('IF')
self.match('LBRACKET')
no_expr = self.expression()
no_if.children.append(no_expr)
no_if.exp = no_expr
self.match('RBRACKET')
no_comand = self.comand()
no_if.children.append(no_comand)
if(self.token == 'ELSE'):
no_else = self.comand_else()
no_if.children.append(no_else)
return no_if
def comand_else(self):
self.match('ELSE')
no_else = self.comand()
return no_else
def comand_while(self):
no_while = While(None,None)
self.match('WHILE')
self.match('LBRACKET')
no_expr = self.expression()
no_while.children.append(no_expr)
no_while.exp = no_expr
self.match('RBRACKET')
no_comand = self.comand()
no_while.children.append(no_comand)
no_while.commands = no_comand
return no_while
def comand_read(self):
no_read = Read(None)
self.match('READ')
no_id = Id(self.token)
no_read.children.append(no_id)
self.match('ID')
self.match('PCOMMA')
return no_read
def comand_print(self):
no_print = Print(None)
self.match('PRINT')
self.match('LBRACKET')
no_expr = self.expression()
no_print.children.append(no_expr)
no_print.exp = no_expr
self.match('RBRACKET')
self.match('PCOMMA')
return no_print
#sem for por enquanto =('''
def comand_for(self):
no_for = For(None,None,None,None)
self.match('FOR')
self.match('LBRACKET')
no_attr = self.att_for()
no_for.children.append(no_attr)
no_for.attr = no_attr
self.match('PCOMMA')
no_expr = self.expression()
no_for.children.append(no_expr)
no_for.exp = no_expr
self.match('PCOMMA')
no_attr2 = self.att_for()
no_for.children.append(no_attr2)
no_for.attr2 = no_attr2
self.match('RBRACKET')
no_comand = self.comand()
if(not(no_comand is None)):
no_for.children.append(no_comand)
no_for.commands = no_comand
return no_for
def att_for(self):
no_id = Id(self.token)
self.match('ID')
no_attr_for = Assign(no_id,'=',None)
self.match('ATTR')
no_expr = self.expression()
no_attr_for.children.append(no_expr)
no_attr_for.right = no_expr
return no_attr_for
def expression(self):
no = self.conjunction()
if (self.token.getCodigoToken() == 'OR'):
no_expr_opc = self.expressaoOpc()
no_expr_opc.children.append(no)
no_expr_opc.left = no
return no_expr_opc
return no
def expressaoOpc(self):
no_expr_opc = LogicalOp('OR', None, None)
self.match('OR')
self.conjunction()
if(self.token.getCodigoToken() == 'OR'):
no_expr_opc2 = self.expressaoOpc()
no_expr_opc2.children.left(no_expr_opc)
no_expr_opc2.left = no_expr_opc
return no_expr_opc2
return no_expr_opc
def conjunction(self):
no = self.equal()
if(self.token.getCodigoToken() == 'AND'):
no_conj = self.conjuction_opc()
no_conj.children.append(no)
no_conj.left = no
return no
def conjuction_opc(self):
no_conj = LogicalOp('AND', None, None)
self.match('AND')
no = self.equal()
no_conj.children.append(no)
no_conj.right = no
if(self.token == 'AND'):
no_conj2 = self.conjuction_opc()
no_conj2.children.left(no_conj)
no_conj2.left = no_conj
return no_conj2
return no_conj
def equal(self):
no = self.relation()
if (self.token.getCodigoToken() == 'EQ' or self.token.getCodigoToken() == 'NE'):
no_equal_opc = self.equal_opc()
no_equal_opc.children.append(no)
return no_equal_opc
return no
def equal_opc(self):
no_op_equal = self.op_equal()
no = self.relation()
no_op_equal.children.append(no)
no_op_equal.right = no
if (self.token == 'EQ' or self.token == 'NE'):
no_equal_opc2 = self.equal_opc()
no_equal_opc2.children.append(no)
return no_equal_opc2
return no_op_equal
def op_equal(self):
if(self.token.getCodigoToken() == 'EQ' ):
self.match('EQ')
return RelOp(None, '==', None)
elif(self.token.getCodigoToken() == 'NE'):
self.match('NE')
return RelOp(None, '!=', None)
def relation(self):
no = self.add()
if(self.token.getCodigoToken() == 'LT' or self.token.getCodigoToken() == 'LE' or self.token.getCodigoToken() == 'GT' or self.token.getCodigoToken() == 'GE'):
no_relac_opc = self.relac_opc()
no_relac_opc.children.append(no)
no_relac_opc.left = no
return no_relac_opc
return no
def relac_opc(self):
no_op_rel = self.op_rel()
no2 = self.add()
no_op_rel.children.append(no2)
no_op_rel.right = no2
if(self.token == 'LT' or self.token == 'LE' or self.token == 'GT' or self.token == 'GE'):
no_op_rel2 = self.relac_opc()
no_op_rel2.append(no_op_rel)
no_op_rel2.left = no_op_rel
return no_op_rel2
return no_op_rel
def op_rel(self):
if (self.token.getCodigoToken() == 'LT'):
self.match('LT')
return RelOp(None,'<',None)
elif(self.token.getCodigoToken() == 'LE'):
self.match('LE')
return RelOp(None,'<=',None)
elif(self.token.getCodigoToken() == 'GT'):
self.match('GT')
return RelOp(None, '>', None)
elif (self.token.getCodigoToken() == 'GE'):
self.match('GE')
return RelOp(None, '>=', None)
def add(self):
no = self.term()
if (self.token.getCodigoToken() == 'PLUS' or self.token.getCodigoToken() == 'MINUS'):
no_plus_minus = self.add_opc()
no_plus_minus.children.append(no)
no_plus_minus.left = no
return no_plus_minus
return no
def add_opc(self):
no_plus_minus = self.op_add()
no2 = self.term()
no_plus_minus.children.append(no2)
no_plus_minus.right = no2
if (self.token.getCodigoToken() == 'PLUS' or self.token.getCodigoToken() == 'MINUS'):
no_plus_minus2 = self.add_opc()
no_plus_minus2.children.append(no_plus_minus)
no_plus_minus.left = no_plus_minus
return no_plus_minus2
return no_plus_minus
def op_add(self):
if(self.token.getCodigoToken() == 'PLUS'):
no_add = ArithOp('+',None, None)
self.match('PLUS')
return no_add
if(self.token.getCodigoToken() == 'MINUS'):
no_minus = ArithOp('-',None, None)
self.match('MINUS')
return no_minus
def term(self):
no = self.fact()
if(self.token.getCodigoToken() == 'MULT' or self.token.getCodigoToken() == 'DIV'):
no_div_mult = self.term_opc()
no_div_mult.children.append(no)
no_div_mult.left = no
return no_div_mult
return no
def term_opc(self):
no_div_mult = self.op_mult()
no2 = self.fact()
no_div_mult.children.append(no2)
no_div_mult.right = no2
if(self.token == 'MULT' or self.token == 'DIV'):
no_div_mult2 = self.term_opc()
no_div_mult2.children.append(no_div_mult)
no_div_mult.left = no_div_mult
return no_div_mult2
return no_div_mult
def op_mult(self):
if(self.token.getCodigoToken() == 'MULT'):
no_div_mult = ArithOp('*',None,None)
self.match('MULT')
return no_div_mult
elif(self.token.getCodigoToken() == 'DIV'):
no_div_mult = ArithOp('/',None,None)
self.match('DIV')
return no_div_mult
def fact(self):
if (self.token.getCodigoToken() == 'ID'):
no = Id(self.token)
self.match('ID')
return no
elif(self.token.getCodigoToken() == 'INTEGER_CONST'):
no = Num(self.token)
self.match('INTEGER_CONST')
return no
elif(self.token.getCodigoToken() == 'FLOAT_CONST'):
no = Num(self.token)
self.match('FLOAT_CONST')
return no
elif(self.token.getCodigoToken() == 'LBRACKET'):
self.match('LBRACKET')
no = self.expression()
self.match('RBRACKET')
return no | 0.119588 | 0.076615 |
from __future__ import absolute_import
from datetime import datetime
from os import path, listdir
from time import sleep
from unittest import TestCase
import re
import pytest
from werkzeug.datastructures import Headers
from werkzeug.http import http_date
from werkzeug.test import EnvironBuilder
from werkzeug.wrappers import Request
from loris import img_info, webapp
from loris.loris_exception import ConfigError
from loris.transforms import KakaduJP2Transformer, OPJ_JP2Transformer
from tests import loris_t
def _get_werkzeug_request(path):
builder = EnvironBuilder(path=path)
env = builder.get_environ()
return Request(env)
class TestDebugConfig(object):
def test_debug_config_gives_kakadu_transformer(self):
config = webapp.get_debug_config('kdu')
app = webapp.Loris(config)
assert isinstance(app.transformers['jp2'], KakaduJP2Transformer)
def test_debug_config_gives_openjpeg_transformer(self):
config = webapp.get_debug_config('opj')
app = webapp.Loris(config)
assert isinstance(app.transformers['jp2'], OPJ_JP2Transformer)
def test_unrecognized_debug_config_is_configerror(self):
with pytest.raises(ConfigError) as err:
webapp.get_debug_config('no_such_jp2_transformer')
assert 'Unrecognized debug JP2 transformer' in str(err.value)
class TestLorisRequest(TestCase):
def setUp(self):
self.test_jp2_color_id = '01%2F02%2F0001.jp2'
def test_get_base_uri(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, True, None)
self.assertEqual(loris_request.base_uri, 'http://localhost/01%2F02%2F0001.jp2')
def test_get_base_uri_proxy_path(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
proxy_path = 'http://example.org/'
loris_request = webapp.LorisRequest(req, True, proxy_path)
self.assertEqual(loris_request.base_uri, 'http://example.org/01%2F02%2F0001.jp2')
def test_root_path(self):
path = '/'
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, '')
self.assertEqual(loris_request.params, '')
self.assertEqual(loris_request.request_type, 'index')
def test_favicon(self):
path = '/favicon.ico'
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, '')
self.assertEqual(loris_request.params, '')
self.assertEqual(loris_request.request_type, 'favicon')
def test_unescaped_ident_request(self):
path = '/01/02/0001.jp2/'
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, True, None)
self.assertEqual(loris_request.ident, '01%2F02%2F0001.jp2')
self.assertEqual(loris_request.params, '')
self.assertEqual(loris_request.request_type, 'redirect_info')
def test_ident_request(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, True, None)
self.assertEqual(loris_request.ident, self.test_jp2_color_id)
self.assertEqual(loris_request.params, '')
self.assertEqual(loris_request.request_type, 'redirect_info')
def test_ident_request_no_redirect(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, self.test_jp2_color_id + '%2F')
self.assertEqual(loris_request.request_type, 'redirect_info')
def test_info_request(self):
info_path = '/%s/info.json' % self.test_jp2_color_id
req = _get_werkzeug_request(info_path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, self.test_jp2_color_id)
self.assertEqual(loris_request.params, 'info.json')
self.assertEqual(loris_request.request_type, 'info')
def test_img_request(self):
path = '/%s/full/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, self.test_jp2_color_id)
expected_params = {'region': u'full', 'size': u'full', 'rotation': u'0', 'quality': u'default', 'format': u'jpg'}
self.assertEqual(loris_request.params, expected_params)
self.assertEqual(loris_request.request_type, u'image')
def test_img_region(self):
path = '/%s/square/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['region'], 'square')
path = '/%s/0,0,500,500/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['region'], '0,0,500,500')
path = '/%s/pct:41.6,7.5,40,70/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['region'], 'pct:41.6,7.5,40,70')
def test_img_size(self):
path = '/%s/full/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], 'full')
path = '/%s/full/max/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], 'max')
path = '/%s/full/150,/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], '150,')
path = '/%s/full/pct:50/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], 'pct:50')
path = '/%s/full/!225,100/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], '!225,100')
def test_img_rotation(self):
path = '/%s/full/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['rotation'], '0')
path = '/%s/full/full/22.5/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['rotation'], '22.5')
path = '/%s/full/full/!0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['rotation'], '!0')
def test_img_quality(self):
path = '/%s/full/full/0/gray.jpg' % (self.test_jp2_color_id,)
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, 'image')
self.assertEqual(loris_request.params['quality'], 'gray')
path = '/%s/full/full/0/native.jpg' % (self.test_jp2_color_id,)
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'bad_image_request')
def test_img_format(self):
path = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, 'image')
self.assertEqual(loris_request.params['format'], 'jpg')
def test_many_slash_img_request(self):
identifier = '1/2/3/4/5/6/7/8/9/xyz'
encoded_identifier = '1%2F2%2F3%2F4%2F5%2F6%2F7%2F8%2F9%2Fxyz'
path = '/%s/full/full/0/default.jpg' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, encoded_identifier)
expected_params = {'region': u'full', 'size': u'full', 'rotation': u'0', 'quality': u'default', 'format': u'jpg'}
self.assertEqual(loris_request.params, expected_params)
self.assertEqual(loris_request.request_type, u'image')
def test_https_uri_identifier(self):
identifier = 'https://sample.sample/0001'
encoded_identifier = 'https%3A%2F%2Fsample.sample%2F0001'
path = '/%s/full/full/0/default.jpg' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, encoded_identifier)
expected_params = {'region': u'full', 'size': u'full', 'rotation': u'0', 'quality': u'default', 'format': u'jpg'}
self.assertEqual(loris_request.params, expected_params)
self.assertEqual(loris_request.request_type, u'image')
def test_many_slash_info_request(self):
identifier = '1/2/3/4/5/6/7/8/9/xyz'
encoded_identifier = '1%2F2%2F3%2F4%2F5%2F6%2F7%2F8%2F9%2Fxyz'
path = '/%s/info.json' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'info')
self.assertEqual(loris_request.ident, encoded_identifier)
def test_template_delimiter_request(self):
identifier = u'a:foo|bar'
encoded_identifier = u'a%3Afoo%7Cbar'
#image request
path = u'/%s/full/full/0/default.jpg' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.ident, encoded_identifier)
#info request
path = u'/%s/info.json' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req)
self.assertEqual(loris_request.request_type, u'info')
self.assertEqual(loris_request.ident, encoded_identifier)
class TestGetInfo(loris_t.LorisTest):
def test_get_info(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path=path)
base_uri = 'http://example.org/01%2F02%2F0001.jp2'
info, last_mod = self.app._get_info(self.test_jp2_color_id, req, base_uri)
self.assertEqual(info.ident, base_uri)
def test_get_info_invalid_src_format(self):
# This functionality was factored out
# --azaroth42 2017-07-07
return None
#path = '/%s/' % self.test_jp2_color_id
#builder = EnvironBuilder(path=path)
#env = builder.get_environ()
#req = Request(env)
#base_uri = 'http://example.org/01%2F02%2F0001.jp2'
#src_fp = 'invalid'
#src_format = 'invalid'
#exception = loris_exception.ImageInfoException
#function = self.app._get_info
#args = [self.test_jp2_color_id, req, base_uri]
#self.assertRaises(exception, function, *args)
class WebappIntegration(loris_t.LorisTest):
'Simulate working with the webapp over HTTP.'
def test_index(self):
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.data.decode('utf8').startswith('This is Loris, '))
def test_favicon(self):
resp = self.client.get('/favicon.ico')
self.assertEqual(resp.status_code, 200)
def test_bare_identifier_request_303(self):
resp = self.client.get('/%s' % (self.test_jp2_color_id,))
self.assertEqual(resp.status_code, 303)
self.assertEqual(resp.headers['Location'], 'http://localhost/01%2F02%2F0001.jp2/info.json')
def test_bare_identifier_request_with_trailing_slash_303(self):
resp = self.client.get('/%s/' % (self.test_jp2_color_id,))
self.assertEqual(resp.status_code, 303)
self.assertEqual(resp.headers['Location'], 'http://localhost/01%2F02%2F0001.jp2/info.json')
def test_bare_identifier_with_trailing_slash_404s_with_redir_off(self):
self.app.redirect_id_slash_to_info = False
resp = self.client.get('/%s/' % (self.test_jp2_color_id,))
self.assertEqual(resp.status_code, 404)
def test_access_control_allow_origin_on_bare_identifier(self):
resp = self.client.get('/%s' % (self.test_jp2_color_id,), follow_redirects=False)
self.assertEqual(resp.headers['access-control-allow-origin'], '*')
def test_access_control_allow_origin_on_info_requests(self):
uri = '/%s/info.json' % (self.test_jp2_color_id,)
resp = self.client.get(uri)
self.assertEqual(resp.headers['access-control-allow-origin'], '*')
def test_access_control_allow_origin_on_img_request(self):
uri = '/%s/full/100,/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(uri)
self.assertEqual(resp.headers['access-control-allow-origin'], '*')
def test_cors_regex_match(self):
self.app.cors_regex = re.compile('calhos')
to_get = '/%s/full/110,/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEquals(resp.headers['Access-Control-Allow-Origin'], 'http://localhost/')
def test_cors_regex_no_match(self):
self.app.cors_regex = re.compile('fooxyz')
to_get = '/%s/full/120,/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertFalse(resp.headers.has_key('Access-Control-Allow-Origin'))
def test_bare_broken_identifier_request_404(self):
resp = self.client.get('/foo%2Fbar')
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.headers['content-type'], 'text/plain')
def test_info_not_found_request(self):
resp = self.client.get('/foobar/info.json')
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.headers['content-type'], 'text/plain')
def test_image_not_found_request(self):
resp = self.client.get('/foobar/full/full/0/default.jpg')
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.headers['content-type'], 'text/plain')
def test_bare_identifier_request_303_gets_info(self):
# Follow the redirect. After that this is nearly a copy of
# img_info_t.C_InfoFunctionalTests#test_jp2_info_dot_json_request
to_get = '/%s' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
tmp_fp = path.join(self.app.tmp_dp, 'loris_test_info.json')
with open(tmp_fp, 'wb') as f:
f.write(resp.data)
info = img_info.ImageInfo.from_json_fp(tmp_fp)
self.assertEqual(info.width, self.test_jp2_color_dims[0])
def test_info_without_dot_json_404(self):
# Note that this isn't what we really want...should be 400, but this
# gets through as an ID. Technically OK, I think.
to_get = '/%s/info' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 404)
def test_image_without_format_400(self):
to_get = '/%s/full/full/0/default' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_image_redirect_to_canonical(self):
self.app.redirect_canonical_image_request = True
to_get = '/%s/0,0,500,600/!550,600/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=False)
self.assertEqual(resp.status_code, 301)
def test_image_no_redirect_to_canonical(self):
self.app.redirect_canonical_image_request = False
to_get = '/%s/0,0,500,600/!550,600/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
def test_image_proxy_path_canonical_link(self):
self.app.proxy_path = 'https://proxy_example.org/image/'
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
link = '<http://iiif.io/api/image/2/level2.json>;rel="profile",<https://proxy_example.org/image/01%2F02%2F0001.jp2/full/full/0/default.jpg>;rel="canonical"'
self.assertEqual(resp.headers['Link'], link)
def test_image_canonical_link(self):
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
link = '<http://iiif.io/api/image/2/level2.json>;rel="profile",<http://localhost/01%2F02%2F0001.jp2/full/full/0/default.jpg>;rel="canonical"'
self.assertEqual(resp.headers['Link'], link)
def test_img_sends_304(self):
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
# get an image
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 200)
lmod = resp.headers['Last-Modified']
sleep(1) # just make sure.
headers = Headers([('If-Modified-Since', lmod)])
resp = self.client.get(to_get, headers=headers)
self.assertEqual(resp.status_code, 304)
sleep(1)
dt = http_date(datetime.utcnow()) # ~2 seconds later
headers = Headers([('If-Modified-Since', dt)])
resp = self.client.get(to_get, headers=headers)
self.assertEqual(resp.status_code, 304)
def test_img_reduce(self):
to_get = '/%s/full/300,/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 200)
def test_no_ims_header_ok(self):
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
# get an image
resp = self.client.get(to_get, headers=Headers())
self.assertEqual(resp.status_code, 200)
def test_info_fake_jp2(self):
to_get = '/01%2F03%2Ffake.jp2/info.json'
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 500)
self.assertEqual(resp.data.decode('utf8'), 'Server Side Error: Invalid JP2 file (500)')
def test_info_sends_304(self):
to_get = '/%s/info.json' % (self.test_jp2_color_id,)
# get an image
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 200)
lmod = resp.headers['Last-Modified']
sleep(1) # just make sure.
headers = Headers([('if-modified-since', lmod)])
resp = self.client.get(to_get, headers=headers)
self.assertEqual(resp.status_code, 304)
sleep(1)
dt = http_date(datetime.utcnow()) # ~2 seconds later
headers = Headers([('if-modified-since', dt)])
resp = self.client.get(to_get, headers=headers)
self.assertEqual(resp.status_code, 304)
def test_info_with_callback_is_wrapped_correctly(self):
to_get = '/%s/info.json?callback=mycallback' % self.test_jpeg_id
resp = self.client.get(to_get)
assert resp.status_code == 200
assert re.match(r'^mycallback\(.*\);$', resp.data.decode('utf8'))
def test_info_as_options(self):
to_opt = '/%s/info.json?callback=mycallback' % self.test_jpeg_id
resp = self.client.options(to_opt)
assert resp.status_code == 200
assert resp.headers.get('Access-Control-Allow-Methods') == 'GET, OPTIONS'
def test_bad_format_returns_400(self):
to_get = '/%s/full/full/0/default.hey' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_quality_returns_400(self):
to_get = '/%s/full/full/0/native.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_quality_for_gray_image_returns_400(self):
to_get = '/%s/full/full/0/color.jpg' % (self.test_jp2_gray_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_rotation_returns_400(self):
to_get = '/%s/full/full/x/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_size_returns_400(self):
to_get = '/%s/full/xyz/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_region_returns_400(self):
to_get = '/%s/foo_/full/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_cleans_up_when_caching(self):
self.app.enable_caching = True
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
# We use the response as a context manager to ensure it gets
# closed before the test ends.
with self.client.get(to_get):
pass
self._assert_tmp_has_no_files()
def test_cleans_up_when_not_caching(self):
self.app.enable_caching = False
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
# We use the response as a context manager to ensure it gets
# closed before the test ends.
with self.client.get(to_get):
pass
self._assert_tmp_has_no_files()
def _assert_tmp_has_no_files(self):
# callback should delete the image before the test ends, so the tmp dir
# should not contain any files (there may be dirs)
tmp = self.app.tmp_dp
any_files = any([path.isfile(path.join(tmp, n)) for n in listdir(tmp)])
self.assertTrue(not any_files, "There are too many files in %s: %s" % (tmp, any_files))
class SizeRestriction(loris_t.LorisTest):
'''Tests for restriction of size parameter.'''
def setUp(self):
'''Set max_size_above_full to 100 for tests.'''
super(SizeRestriction, self).setUp()
self.app.max_size_above_full = 100
def test_json_no_size_above_full(self):
'''Is 'sizeAboveFull' removed from json?'''
request_path = '/%s/info.json' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
self.assertFalse('sizeAboveFull' in resp.data.decode('utf8'))
def _test_json_has_size_above_full(self):
'''Does sizeAboveFull remain in info.json if size > 100?'''
self.app.max_size_above_full = 200
request_path = '/%s/info.json' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
self.assertTrue('sizeAboveFull' in resp.data.decode('utf8'))
def test_full_full(self):
'''full/full has no size restrictions.'''
request_path = '/%s/full/full/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_percent_ok(self):
'''pct:100 is allowed.'''
request_path = '/%s/full/pct:100/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_percent_ok_200(self):
'''pct:200 is allowed is max_size_above_full is 200.'''
self.app.max_size_above_full = 200
request_path = '/%s/full/pct:200/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_percent_exceeds_100(self):
'''Restrict interpolation. So pct:101 must be rejected.'''
request_path = '/%s/full/pct:101/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_percent_exceeds_200(self):
'''Restrict interpolation to 200. So pct:201 must be rejected.'''
self.app.max_size_above_full = 200
request_path = '/%s/full/pct:201/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_size_width_ok(self):
'''Explicit width in size parameter is not larger than image size.'''
request_path = '/%s/full/3600,/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_size_width_too_big(self):
'''Explicit width in size parameter is larger than image size.'''
request_path = '/%s/full/3601,/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_size_height_ok(self):
'''Explicit height in size parameter is not larger than image height.'''
request_path = '/%s/full/,2987/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_size_height_to_big(self):
'''Explicit height in size parameter is larger than image height.'''
request_path = '/%s/full/,2988/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_region_too_big(self):
'''It's not allowed to make a region larger than 100% of original
region size.'''
request_path = '/%s/100,100,100,100/120,/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_no_restriction(self):
'''If max_size_above_full ist set to 0, users can request
any image size.'''
self.app.max_size_above_full = 0
request_path = '/%s/full/pct:120/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200) | tests/webapp_t.py |
from __future__ import absolute_import
from datetime import datetime
from os import path, listdir
from time import sleep
from unittest import TestCase
import re
import pytest
from werkzeug.datastructures import Headers
from werkzeug.http import http_date
from werkzeug.test import EnvironBuilder
from werkzeug.wrappers import Request
from loris import img_info, webapp
from loris.loris_exception import ConfigError
from loris.transforms import KakaduJP2Transformer, OPJ_JP2Transformer
from tests import loris_t
def _get_werkzeug_request(path):
builder = EnvironBuilder(path=path)
env = builder.get_environ()
return Request(env)
class TestDebugConfig(object):
def test_debug_config_gives_kakadu_transformer(self):
config = webapp.get_debug_config('kdu')
app = webapp.Loris(config)
assert isinstance(app.transformers['jp2'], KakaduJP2Transformer)
def test_debug_config_gives_openjpeg_transformer(self):
config = webapp.get_debug_config('opj')
app = webapp.Loris(config)
assert isinstance(app.transformers['jp2'], OPJ_JP2Transformer)
def test_unrecognized_debug_config_is_configerror(self):
with pytest.raises(ConfigError) as err:
webapp.get_debug_config('no_such_jp2_transformer')
assert 'Unrecognized debug JP2 transformer' in str(err.value)
class TestLorisRequest(TestCase):
def setUp(self):
self.test_jp2_color_id = '01%2F02%2F0001.jp2'
def test_get_base_uri(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, True, None)
self.assertEqual(loris_request.base_uri, 'http://localhost/01%2F02%2F0001.jp2')
def test_get_base_uri_proxy_path(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
proxy_path = 'http://example.org/'
loris_request = webapp.LorisRequest(req, True, proxy_path)
self.assertEqual(loris_request.base_uri, 'http://example.org/01%2F02%2F0001.jp2')
def test_root_path(self):
path = '/'
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, '')
self.assertEqual(loris_request.params, '')
self.assertEqual(loris_request.request_type, 'index')
def test_favicon(self):
path = '/favicon.ico'
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, '')
self.assertEqual(loris_request.params, '')
self.assertEqual(loris_request.request_type, 'favicon')
def test_unescaped_ident_request(self):
path = '/01/02/0001.jp2/'
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, True, None)
self.assertEqual(loris_request.ident, '01%2F02%2F0001.jp2')
self.assertEqual(loris_request.params, '')
self.assertEqual(loris_request.request_type, 'redirect_info')
def test_ident_request(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, True, None)
self.assertEqual(loris_request.ident, self.test_jp2_color_id)
self.assertEqual(loris_request.params, '')
self.assertEqual(loris_request.request_type, 'redirect_info')
def test_ident_request_no_redirect(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, self.test_jp2_color_id + '%2F')
self.assertEqual(loris_request.request_type, 'redirect_info')
def test_info_request(self):
info_path = '/%s/info.json' % self.test_jp2_color_id
req = _get_werkzeug_request(info_path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, self.test_jp2_color_id)
self.assertEqual(loris_request.params, 'info.json')
self.assertEqual(loris_request.request_type, 'info')
def test_img_request(self):
path = '/%s/full/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, self.test_jp2_color_id)
expected_params = {'region': u'full', 'size': u'full', 'rotation': u'0', 'quality': u'default', 'format': u'jpg'}
self.assertEqual(loris_request.params, expected_params)
self.assertEqual(loris_request.request_type, u'image')
def test_img_region(self):
path = '/%s/square/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['region'], 'square')
path = '/%s/0,0,500,500/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['region'], '0,0,500,500')
path = '/%s/pct:41.6,7.5,40,70/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['region'], 'pct:41.6,7.5,40,70')
def test_img_size(self):
path = '/%s/full/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], 'full')
path = '/%s/full/max/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], 'max')
path = '/%s/full/150,/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], '150,')
path = '/%s/full/pct:50/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], 'pct:50')
path = '/%s/full/!225,100/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['size'], '!225,100')
def test_img_rotation(self):
path = '/%s/full/full/0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['rotation'], '0')
path = '/%s/full/full/22.5/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['rotation'], '22.5')
path = '/%s/full/full/!0/default.jpg' % self.test_jp2_color_id
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.params['rotation'], '!0')
def test_img_quality(self):
path = '/%s/full/full/0/gray.jpg' % (self.test_jp2_color_id,)
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, 'image')
self.assertEqual(loris_request.params['quality'], 'gray')
path = '/%s/full/full/0/native.jpg' % (self.test_jp2_color_id,)
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'bad_image_request')
def test_img_format(self):
path = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, 'image')
self.assertEqual(loris_request.params['format'], 'jpg')
def test_many_slash_img_request(self):
identifier = '1/2/3/4/5/6/7/8/9/xyz'
encoded_identifier = '1%2F2%2F3%2F4%2F5%2F6%2F7%2F8%2F9%2Fxyz'
path = '/%s/full/full/0/default.jpg' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, encoded_identifier)
expected_params = {'region': u'full', 'size': u'full', 'rotation': u'0', 'quality': u'default', 'format': u'jpg'}
self.assertEqual(loris_request.params, expected_params)
self.assertEqual(loris_request.request_type, u'image')
def test_https_uri_identifier(self):
identifier = 'https://sample.sample/0001'
encoded_identifier = 'https%3A%2F%2Fsample.sample%2F0001'
path = '/%s/full/full/0/default.jpg' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.ident, encoded_identifier)
expected_params = {'region': u'full', 'size': u'full', 'rotation': u'0', 'quality': u'default', 'format': u'jpg'}
self.assertEqual(loris_request.params, expected_params)
self.assertEqual(loris_request.request_type, u'image')
def test_many_slash_info_request(self):
identifier = '1/2/3/4/5/6/7/8/9/xyz'
encoded_identifier = '1%2F2%2F3%2F4%2F5%2F6%2F7%2F8%2F9%2Fxyz'
path = '/%s/info.json' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req, False, None)
self.assertEqual(loris_request.request_type, u'info')
self.assertEqual(loris_request.ident, encoded_identifier)
def test_template_delimiter_request(self):
identifier = u'a:foo|bar'
encoded_identifier = u'a%3Afoo%7Cbar'
#image request
path = u'/%s/full/full/0/default.jpg' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req)
self.assertEqual(loris_request.request_type, u'image')
self.assertEqual(loris_request.ident, encoded_identifier)
#info request
path = u'/%s/info.json' % identifier
req = _get_werkzeug_request(path)
loris_request = webapp.LorisRequest(req)
self.assertEqual(loris_request.request_type, u'info')
self.assertEqual(loris_request.ident, encoded_identifier)
class TestGetInfo(loris_t.LorisTest):
def test_get_info(self):
path = '/%s/' % self.test_jp2_color_id
req = _get_werkzeug_request(path=path)
base_uri = 'http://example.org/01%2F02%2F0001.jp2'
info, last_mod = self.app._get_info(self.test_jp2_color_id, req, base_uri)
self.assertEqual(info.ident, base_uri)
def test_get_info_invalid_src_format(self):
# This functionality was factored out
# --azaroth42 2017-07-07
return None
#path = '/%s/' % self.test_jp2_color_id
#builder = EnvironBuilder(path=path)
#env = builder.get_environ()
#req = Request(env)
#base_uri = 'http://example.org/01%2F02%2F0001.jp2'
#src_fp = 'invalid'
#src_format = 'invalid'
#exception = loris_exception.ImageInfoException
#function = self.app._get_info
#args = [self.test_jp2_color_id, req, base_uri]
#self.assertRaises(exception, function, *args)
class WebappIntegration(loris_t.LorisTest):
'Simulate working with the webapp over HTTP.'
def test_index(self):
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.data.decode('utf8').startswith('This is Loris, '))
def test_favicon(self):
resp = self.client.get('/favicon.ico')
self.assertEqual(resp.status_code, 200)
def test_bare_identifier_request_303(self):
resp = self.client.get('/%s' % (self.test_jp2_color_id,))
self.assertEqual(resp.status_code, 303)
self.assertEqual(resp.headers['Location'], 'http://localhost/01%2F02%2F0001.jp2/info.json')
def test_bare_identifier_request_with_trailing_slash_303(self):
resp = self.client.get('/%s/' % (self.test_jp2_color_id,))
self.assertEqual(resp.status_code, 303)
self.assertEqual(resp.headers['Location'], 'http://localhost/01%2F02%2F0001.jp2/info.json')
def test_bare_identifier_with_trailing_slash_404s_with_redir_off(self):
self.app.redirect_id_slash_to_info = False
resp = self.client.get('/%s/' % (self.test_jp2_color_id,))
self.assertEqual(resp.status_code, 404)
def test_access_control_allow_origin_on_bare_identifier(self):
resp = self.client.get('/%s' % (self.test_jp2_color_id,), follow_redirects=False)
self.assertEqual(resp.headers['access-control-allow-origin'], '*')
def test_access_control_allow_origin_on_info_requests(self):
uri = '/%s/info.json' % (self.test_jp2_color_id,)
resp = self.client.get(uri)
self.assertEqual(resp.headers['access-control-allow-origin'], '*')
def test_access_control_allow_origin_on_img_request(self):
uri = '/%s/full/100,/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(uri)
self.assertEqual(resp.headers['access-control-allow-origin'], '*')
def test_cors_regex_match(self):
self.app.cors_regex = re.compile('calhos')
to_get = '/%s/full/110,/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEquals(resp.headers['Access-Control-Allow-Origin'], 'http://localhost/')
def test_cors_regex_no_match(self):
self.app.cors_regex = re.compile('fooxyz')
to_get = '/%s/full/120,/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertFalse(resp.headers.has_key('Access-Control-Allow-Origin'))
def test_bare_broken_identifier_request_404(self):
resp = self.client.get('/foo%2Fbar')
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.headers['content-type'], 'text/plain')
def test_info_not_found_request(self):
resp = self.client.get('/foobar/info.json')
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.headers['content-type'], 'text/plain')
def test_image_not_found_request(self):
resp = self.client.get('/foobar/full/full/0/default.jpg')
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.headers['content-type'], 'text/plain')
def test_bare_identifier_request_303_gets_info(self):
# Follow the redirect. After that this is nearly a copy of
# img_info_t.C_InfoFunctionalTests#test_jp2_info_dot_json_request
to_get = '/%s' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
tmp_fp = path.join(self.app.tmp_dp, 'loris_test_info.json')
with open(tmp_fp, 'wb') as f:
f.write(resp.data)
info = img_info.ImageInfo.from_json_fp(tmp_fp)
self.assertEqual(info.width, self.test_jp2_color_dims[0])
def test_info_without_dot_json_404(self):
# Note that this isn't what we really want...should be 400, but this
# gets through as an ID. Technically OK, I think.
to_get = '/%s/info' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 404)
def test_image_without_format_400(self):
to_get = '/%s/full/full/0/default' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_image_redirect_to_canonical(self):
self.app.redirect_canonical_image_request = True
to_get = '/%s/0,0,500,600/!550,600/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=False)
self.assertEqual(resp.status_code, 301)
def test_image_no_redirect_to_canonical(self):
self.app.redirect_canonical_image_request = False
to_get = '/%s/0,0,500,600/!550,600/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
def test_image_proxy_path_canonical_link(self):
self.app.proxy_path = 'https://proxy_example.org/image/'
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
link = '<http://iiif.io/api/image/2/level2.json>;rel="profile",<https://proxy_example.org/image/01%2F02%2F0001.jp2/full/full/0/default.jpg>;rel="canonical"'
self.assertEqual(resp.headers['Link'], link)
def test_image_canonical_link(self):
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
link = '<http://iiif.io/api/image/2/level2.json>;rel="profile",<http://localhost/01%2F02%2F0001.jp2/full/full/0/default.jpg>;rel="canonical"'
self.assertEqual(resp.headers['Link'], link)
def test_img_sends_304(self):
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
# get an image
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 200)
lmod = resp.headers['Last-Modified']
sleep(1) # just make sure.
headers = Headers([('If-Modified-Since', lmod)])
resp = self.client.get(to_get, headers=headers)
self.assertEqual(resp.status_code, 304)
sleep(1)
dt = http_date(datetime.utcnow()) # ~2 seconds later
headers = Headers([('If-Modified-Since', dt)])
resp = self.client.get(to_get, headers=headers)
self.assertEqual(resp.status_code, 304)
def test_img_reduce(self):
to_get = '/%s/full/300,/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 200)
def test_no_ims_header_ok(self):
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
# get an image
resp = self.client.get(to_get, headers=Headers())
self.assertEqual(resp.status_code, 200)
def test_info_fake_jp2(self):
to_get = '/01%2F03%2Ffake.jp2/info.json'
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 500)
self.assertEqual(resp.data.decode('utf8'), 'Server Side Error: Invalid JP2 file (500)')
def test_info_sends_304(self):
to_get = '/%s/info.json' % (self.test_jp2_color_id,)
# get an image
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 200)
lmod = resp.headers['Last-Modified']
sleep(1) # just make sure.
headers = Headers([('if-modified-since', lmod)])
resp = self.client.get(to_get, headers=headers)
self.assertEqual(resp.status_code, 304)
sleep(1)
dt = http_date(datetime.utcnow()) # ~2 seconds later
headers = Headers([('if-modified-since', dt)])
resp = self.client.get(to_get, headers=headers)
self.assertEqual(resp.status_code, 304)
def test_info_with_callback_is_wrapped_correctly(self):
to_get = '/%s/info.json?callback=mycallback' % self.test_jpeg_id
resp = self.client.get(to_get)
assert resp.status_code == 200
assert re.match(r'^mycallback\(.*\);$', resp.data.decode('utf8'))
def test_info_as_options(self):
to_opt = '/%s/info.json?callback=mycallback' % self.test_jpeg_id
resp = self.client.options(to_opt)
assert resp.status_code == 200
assert resp.headers.get('Access-Control-Allow-Methods') == 'GET, OPTIONS'
def test_bad_format_returns_400(self):
to_get = '/%s/full/full/0/default.hey' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_quality_returns_400(self):
to_get = '/%s/full/full/0/native.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_quality_for_gray_image_returns_400(self):
to_get = '/%s/full/full/0/color.jpg' % (self.test_jp2_gray_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_rotation_returns_400(self):
to_get = '/%s/full/full/x/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_size_returns_400(self):
to_get = '/%s/full/xyz/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_bad_region_returns_400(self):
to_get = '/%s/foo_/full/0/default.jpg' % (self.test_jp2_color_id,)
resp = self.client.get(to_get)
self.assertEqual(resp.status_code, 400)
def test_cleans_up_when_caching(self):
self.app.enable_caching = True
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
# We use the response as a context manager to ensure it gets
# closed before the test ends.
with self.client.get(to_get):
pass
self._assert_tmp_has_no_files()
def test_cleans_up_when_not_caching(self):
self.app.enable_caching = False
to_get = '/%s/full/full/0/default.jpg' % (self.test_jp2_color_id,)
# We use the response as a context manager to ensure it gets
# closed before the test ends.
with self.client.get(to_get):
pass
self._assert_tmp_has_no_files()
def _assert_tmp_has_no_files(self):
# callback should delete the image before the test ends, so the tmp dir
# should not contain any files (there may be dirs)
tmp = self.app.tmp_dp
any_files = any([path.isfile(path.join(tmp, n)) for n in listdir(tmp)])
self.assertTrue(not any_files, "There are too many files in %s: %s" % (tmp, any_files))
class SizeRestriction(loris_t.LorisTest):
'''Tests for restriction of size parameter.'''
def setUp(self):
'''Set max_size_above_full to 100 for tests.'''
super(SizeRestriction, self).setUp()
self.app.max_size_above_full = 100
def test_json_no_size_above_full(self):
'''Is 'sizeAboveFull' removed from json?'''
request_path = '/%s/info.json' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
self.assertFalse('sizeAboveFull' in resp.data.decode('utf8'))
def _test_json_has_size_above_full(self):
'''Does sizeAboveFull remain in info.json if size > 100?'''
self.app.max_size_above_full = 200
request_path = '/%s/info.json' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
self.assertTrue('sizeAboveFull' in resp.data.decode('utf8'))
def test_full_full(self):
'''full/full has no size restrictions.'''
request_path = '/%s/full/full/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_percent_ok(self):
'''pct:100 is allowed.'''
request_path = '/%s/full/pct:100/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_percent_ok_200(self):
'''pct:200 is allowed is max_size_above_full is 200.'''
self.app.max_size_above_full = 200
request_path = '/%s/full/pct:200/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_percent_exceeds_100(self):
'''Restrict interpolation. So pct:101 must be rejected.'''
request_path = '/%s/full/pct:101/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_percent_exceeds_200(self):
'''Restrict interpolation to 200. So pct:201 must be rejected.'''
self.app.max_size_above_full = 200
request_path = '/%s/full/pct:201/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_size_width_ok(self):
'''Explicit width in size parameter is not larger than image size.'''
request_path = '/%s/full/3600,/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_size_width_too_big(self):
'''Explicit width in size parameter is larger than image size.'''
request_path = '/%s/full/3601,/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_size_height_ok(self):
'''Explicit height in size parameter is not larger than image height.'''
request_path = '/%s/full/,2987/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200)
def test_size_height_to_big(self):
'''Explicit height in size parameter is larger than image height.'''
request_path = '/%s/full/,2988/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_region_too_big(self):
'''It's not allowed to make a region larger than 100% of original
region size.'''
request_path = '/%s/100,100,100,100/120,/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 404)
def test_no_restriction(self):
'''If max_size_above_full ist set to 0, users can request
any image size.'''
self.app.max_size_above_full = 0
request_path = '/%s/full/pct:120/0/default.jpg' % (self.test_jpeg_id,)
resp = self.client.get(request_path)
self.assertEqual(resp.status_code, 200) | 0.496094 | 0.19309 |
from .CodeHelpers import generateExpressionCode
from .ErrorCodes import getErrorExitCode
def generateCAPIObjectCodeCommon(to_name, capi, arg_desc, may_raise, ref_count,
source_ref, emit, context, none_null = False):
arg_names = []
for arg_name, arg_expression in arg_desc:
if arg_expression is None and none_null:
arg_names.append("NULL")
else:
arg_name = context.allocateTempName(arg_name)
generateExpressionCode(
to_name = arg_name,
expression = arg_expression,
emit = emit,
context = context
)
arg_names.append(arg_name)
context.setCurrentSourceCodeReference(source_ref)
getCAPIObjectCode(
to_name = to_name,
capi = capi,
arg_names = arg_names,
may_raise = may_raise,
ref_count = ref_count,
emit = emit,
context = context
)
def generateCAPIObjectCode(to_name, capi, arg_desc, may_raise, source_ref, emit,
context, none_null = False):
generateCAPIObjectCodeCommon(
to_name = to_name,
capi = capi,
arg_desc = arg_desc,
may_raise = may_raise,
ref_count = 1,
source_ref = source_ref,
emit = emit,
context = context,
none_null = none_null
)
def generateCAPIObjectCode0(to_name, capi, arg_desc, may_raise, source_ref,
emit, context, none_null = False):
generateCAPIObjectCodeCommon(
to_name = to_name,
capi = capi,
arg_desc = arg_desc,
may_raise = may_raise,
ref_count = 0,
source_ref = source_ref,
emit = emit,
context = context,
none_null = none_null
)
def getCAPIObjectCode(to_name, capi, arg_names, may_raise, ref_count, emit,
context):
emit(
"%s = %s( %s );" % (
to_name,
capi,
", ".join(
str(arg_name)
for arg_name in
arg_names
)
)
)
getErrorExitCode(
check_name = to_name,
release_names = (
arg_name
for arg_name in
arg_names
if arg_name != "NULL"
),
needs_check = may_raise,
emit = emit,
context = context
)
if ref_count:
context.addCleanupTempName(to_name)
def getReferenceExportCode(base_name, emit, context):
if not context.needsCleanup(base_name):
emit("Py_INCREF( %s );" % base_name) | nuitka/codegen/PythonAPICodes.py | from .CodeHelpers import generateExpressionCode
from .ErrorCodes import getErrorExitCode
def generateCAPIObjectCodeCommon(to_name, capi, arg_desc, may_raise, ref_count,
source_ref, emit, context, none_null = False):
arg_names = []
for arg_name, arg_expression in arg_desc:
if arg_expression is None and none_null:
arg_names.append("NULL")
else:
arg_name = context.allocateTempName(arg_name)
generateExpressionCode(
to_name = arg_name,
expression = arg_expression,
emit = emit,
context = context
)
arg_names.append(arg_name)
context.setCurrentSourceCodeReference(source_ref)
getCAPIObjectCode(
to_name = to_name,
capi = capi,
arg_names = arg_names,
may_raise = may_raise,
ref_count = ref_count,
emit = emit,
context = context
)
def generateCAPIObjectCode(to_name, capi, arg_desc, may_raise, source_ref, emit,
context, none_null = False):
generateCAPIObjectCodeCommon(
to_name = to_name,
capi = capi,
arg_desc = arg_desc,
may_raise = may_raise,
ref_count = 1,
source_ref = source_ref,
emit = emit,
context = context,
none_null = none_null
)
def generateCAPIObjectCode0(to_name, capi, arg_desc, may_raise, source_ref,
emit, context, none_null = False):
generateCAPIObjectCodeCommon(
to_name = to_name,
capi = capi,
arg_desc = arg_desc,
may_raise = may_raise,
ref_count = 0,
source_ref = source_ref,
emit = emit,
context = context,
none_null = none_null
)
def getCAPIObjectCode(to_name, capi, arg_names, may_raise, ref_count, emit,
context):
emit(
"%s = %s( %s );" % (
to_name,
capi,
", ".join(
str(arg_name)
for arg_name in
arg_names
)
)
)
getErrorExitCode(
check_name = to_name,
release_names = (
arg_name
for arg_name in
arg_names
if arg_name != "NULL"
),
needs_check = may_raise,
emit = emit,
context = context
)
if ref_count:
context.addCleanupTempName(to_name)
def getReferenceExportCode(base_name, emit, context):
if not context.needsCleanup(base_name):
emit("Py_INCREF( %s );" % base_name) | 0.317426 | 0.070081 |
import tkinter as tk
from tkinter.ttk import Combobox
from tkinter import font
class FontSelector(tk.Toplevel):
"""A font selector popup"""
def __init__(self, master):
super().__init__(master)
self.master = master
self.font = master.font
self.title('Font')
self.transient(self.master)
self.resizable(False, False)
self.wm_attributes('-topmost', 'true', '-toolwindow', 'true')
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.focus_set()
# get sorted list of fonts families
fonts = sorted(font.families())
sizes = [8, 9, 10, 11, 12, 14, 16, 18, 20, 22, 24, 26, 28, 36, 38, 72]
# create widgets
self.family = Combobox(self, values=fonts, width=30)
self.family.set(self.master.font['family'])
self.size = Combobox(self, values=sizes, width=2)
self.size.set(self.master.font['size'])
self.weight = tk.StringVar()
self.weight.set(self.font['weight'])
self.weight_cb = tk.Checkbutton(self, text='Bold', anchor=tk.W, variable=self.weight, onvalue='bold', offvalue='normal')
self.slant = tk.StringVar()
self.slant.set(self.font['slant'])
self.slant_cb = tk.Checkbutton(self, text='Slant', anchor=tk.W, variable=self.slant, onvalue='italic', offvalue='roman')
self.underline = tk.IntVar()
self.underline.set(self.font['underline'])
self.underline_cb = tk.Checkbutton(self, text='Underline', anchor=tk.W, variable=self.underline)
self.overstrike = tk.IntVar()
self.overstrike.set(self.font['overstrike'])
self.overstrike_cb = tk.Checkbutton(self, text='Overstrike', anchor=tk.W, variable=self.overstrike)
self.ok_btn = tk.Button(self, text='OK', command=self.change_font)
self.cancel_btn = tk.Button(self, text='Cancel', command=self.cancel)
# arrange widgets on grid
self.family.grid(row=0, column=0, columnspan=4, sticky=tk.EW, padx=15, pady=15, ipadx=2, ipady=2)
self.size.grid(row=0, column=4, sticky=tk.EW, padx=15, pady=15, ipadx=2, ipady=2)
self.weight_cb.grid(row=1, column=0, sticky=tk.EW, padx=15)
self.slant_cb.grid(row=1, column=1, sticky=tk.EW, padx=15)
self.underline_cb.grid(row=2, column=0, sticky=tk.EW, padx=15)
self.overstrike_cb.grid(row=2, column=1, sticky=tk.EW, padx=15)
self.ok_btn.grid(row=1, column=3, columnspan=2, sticky=tk.EW, ipadx=15, padx=15)
self.cancel_btn.grid(row=2, column=3, columnspan=2, sticky=tk.EW, ipadx=15, padx=15, pady=(5, 15))
def change_font(self):
"""Apply font changes to the main text widget"""
self.font['family'] = self.family.get()
self.font['size'] = self.size.get()
self.font['weight'] = self.weight.get()
self.font['underline'] = self.underline.get()
self.font['slant'] = self.slant.get()
self.font['overstrike'] = self.overstrike.get()
self.master.text.focus()
self.destroy()
def cancel(self):
"""Cancel the request and return control to main window"""
self.master.text.focus()
self.destroy()
class TestWindow(tk.Tk):
"""A window used for testing the various module dialogs"""
def __init__(self):
super().__init__()
self.title('Testing Window')
self.font = font.Font(family='Courier New', size=14, weight=font.BOLD, slant=font.ROMAN, underline=False, overstrike=False)
self.text = tk.Text(self, font=self.font)
self.text.pack(fill=tk.BOTH, expand=tk.YES)
self.text.insert(tk.END, 'This is a test. This is only a test.')
if __name__ == '__main__':
w = TestWindow()
FontSelector(w)
w.mainloop() | {{cookiecutter.project_slug}}/text_editor/widgets/fontselect.py | import tkinter as tk
from tkinter.ttk import Combobox
from tkinter import font
class FontSelector(tk.Toplevel):
"""A font selector popup"""
def __init__(self, master):
super().__init__(master)
self.master = master
self.font = master.font
self.title('Font')
self.transient(self.master)
self.resizable(False, False)
self.wm_attributes('-topmost', 'true', '-toolwindow', 'true')
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.focus_set()
# get sorted list of fonts families
fonts = sorted(font.families())
sizes = [8, 9, 10, 11, 12, 14, 16, 18, 20, 22, 24, 26, 28, 36, 38, 72]
# create widgets
self.family = Combobox(self, values=fonts, width=30)
self.family.set(self.master.font['family'])
self.size = Combobox(self, values=sizes, width=2)
self.size.set(self.master.font['size'])
self.weight = tk.StringVar()
self.weight.set(self.font['weight'])
self.weight_cb = tk.Checkbutton(self, text='Bold', anchor=tk.W, variable=self.weight, onvalue='bold', offvalue='normal')
self.slant = tk.StringVar()
self.slant.set(self.font['slant'])
self.slant_cb = tk.Checkbutton(self, text='Slant', anchor=tk.W, variable=self.slant, onvalue='italic', offvalue='roman')
self.underline = tk.IntVar()
self.underline.set(self.font['underline'])
self.underline_cb = tk.Checkbutton(self, text='Underline', anchor=tk.W, variable=self.underline)
self.overstrike = tk.IntVar()
self.overstrike.set(self.font['overstrike'])
self.overstrike_cb = tk.Checkbutton(self, text='Overstrike', anchor=tk.W, variable=self.overstrike)
self.ok_btn = tk.Button(self, text='OK', command=self.change_font)
self.cancel_btn = tk.Button(self, text='Cancel', command=self.cancel)
# arrange widgets on grid
self.family.grid(row=0, column=0, columnspan=4, sticky=tk.EW, padx=15, pady=15, ipadx=2, ipady=2)
self.size.grid(row=0, column=4, sticky=tk.EW, padx=15, pady=15, ipadx=2, ipady=2)
self.weight_cb.grid(row=1, column=0, sticky=tk.EW, padx=15)
self.slant_cb.grid(row=1, column=1, sticky=tk.EW, padx=15)
self.underline_cb.grid(row=2, column=0, sticky=tk.EW, padx=15)
self.overstrike_cb.grid(row=2, column=1, sticky=tk.EW, padx=15)
self.ok_btn.grid(row=1, column=3, columnspan=2, sticky=tk.EW, ipadx=15, padx=15)
self.cancel_btn.grid(row=2, column=3, columnspan=2, sticky=tk.EW, ipadx=15, padx=15, pady=(5, 15))
def change_font(self):
"""Apply font changes to the main text widget"""
self.font['family'] = self.family.get()
self.font['size'] = self.size.get()
self.font['weight'] = self.weight.get()
self.font['underline'] = self.underline.get()
self.font['slant'] = self.slant.get()
self.font['overstrike'] = self.overstrike.get()
self.master.text.focus()
self.destroy()
def cancel(self):
"""Cancel the request and return control to main window"""
self.master.text.focus()
self.destroy()
class TestWindow(tk.Tk):
"""A window used for testing the various module dialogs"""
def __init__(self):
super().__init__()
self.title('Testing Window')
self.font = font.Font(family='Courier New', size=14, weight=font.BOLD, slant=font.ROMAN, underline=False, overstrike=False)
self.text = tk.Text(self, font=self.font)
self.text.pack(fill=tk.BOTH, expand=tk.YES)
self.text.insert(tk.END, 'This is a test. This is only a test.')
if __name__ == '__main__':
w = TestWindow()
FontSelector(w)
w.mainloop() | 0.528777 | 0.096365 |
import tensorflow as tf
from dl_playground.networks.model import BatchModel, ContinualModel
class LayerModel(tf.keras.Model, BatchModel):
"""A model contains one layer.
A thin wrapper around the layer to comply with model interface.
Parameters
----------
layer : tf.keras.layers.Layer & BatchLayer
"""
def __init__(self, layer):
super(LayerModel, self).__init__()
self._layer = layer
def call(self, inputs, training=None, **kwargs):
return self._layer(inputs, training=training, **kwargs)
def predict(self, inputs):
return self._layer.predict(inputs)
def loss_fn(self, batch, prediction, step):
return self._layer.loss_fn(batch, prediction, step)
def train_callback(self):
return self._layer.train_callback()
def metric_fn(self, batch, prediction):
return self._layer.metric_fn(batch, prediction)
def summary(self, writer, batch, step, training=None):
return self._layer.summary(
writer, batch, step, training=training
)
class BatchLayerContinualModel(tf.keras.Model, ContinualModel):
"""A Model contains a single BatchLayer.
Parameters
----------
layer : BatchLayer
"""
def __init__(self, layer, optimizer):
super(BatchLayerContinualModel, self).__init__()
self._layer = layer
self._opt = optimizer
self._step = 0
def perceive(
self,
batch,
freeze=False,
return_eval=False,
task_id=None,
):
"""Main function.
Parameters
----------
batch : dict | list(tf.Tensor) | tf.Tensor
freeze : bool
return_eval : bool
task_id : int | None
Returns
-------
pred : tf.Tensor | [tf.Tensor] | dict
The output of the `call` function of the underlying layer
perf : tf.Tensor
Optional
"""
with tf.GradientTape() as tape:
pred = self._layer(batch, training=not freeze)
losses = self._layer.loss_fn(
batch=batch,
prediction=pred,
step=self._step,
task_id=task_id,
)
loss = tf.reduce_mean(losses['loss'])
if not freeze:
grads = tape.gradient(loss, self._layer.trainable_weights)
self._opt.apply_gradients(
zip(grads, self._layer.trainable_weights)
)
self._step += 1
if return_eval:
perf = self._layer.metric_fn(
batch=batch,
prediction=pred,
task_id=task_id,
)
return pred, losses, perf
return pred
def evaluate(self, batch, task_id=None):
"""Evaluates the batch.
Parameters
----------
batch : tf.Tensor | list | dict
task_id : int | None
Returns
-------
perf : tf.Tensor, shape (B,)
"""
pred = self._layer.call(batch, training=False)
perf = self._layer.metric_fn(
batch=batch,
prediction=pred,
task_id=task_id,
)
return perf
def eval_and_summary(self, writer, batch, step, task_id=None):
perf = tf.reduce_mean(self.evaluate(batch, task_id=task_id))
with writer.as_default():
tf.summary.scalar('perf', tf.reduce_mean(perf), step=step)
def summary(self, writer, batch, step, training=None):
return self._layer.summary(
writer, batch, step, training=training
) | networks/layers/model.py | import tensorflow as tf
from dl_playground.networks.model import BatchModel, ContinualModel
class LayerModel(tf.keras.Model, BatchModel):
"""A model contains one layer.
A thin wrapper around the layer to comply with model interface.
Parameters
----------
layer : tf.keras.layers.Layer & BatchLayer
"""
def __init__(self, layer):
super(LayerModel, self).__init__()
self._layer = layer
def call(self, inputs, training=None, **kwargs):
return self._layer(inputs, training=training, **kwargs)
def predict(self, inputs):
return self._layer.predict(inputs)
def loss_fn(self, batch, prediction, step):
return self._layer.loss_fn(batch, prediction, step)
def train_callback(self):
return self._layer.train_callback()
def metric_fn(self, batch, prediction):
return self._layer.metric_fn(batch, prediction)
def summary(self, writer, batch, step, training=None):
return self._layer.summary(
writer, batch, step, training=training
)
class BatchLayerContinualModel(tf.keras.Model, ContinualModel):
"""A Model contains a single BatchLayer.
Parameters
----------
layer : BatchLayer
"""
def __init__(self, layer, optimizer):
super(BatchLayerContinualModel, self).__init__()
self._layer = layer
self._opt = optimizer
self._step = 0
def perceive(
self,
batch,
freeze=False,
return_eval=False,
task_id=None,
):
"""Main function.
Parameters
----------
batch : dict | list(tf.Tensor) | tf.Tensor
freeze : bool
return_eval : bool
task_id : int | None
Returns
-------
pred : tf.Tensor | [tf.Tensor] | dict
The output of the `call` function of the underlying layer
perf : tf.Tensor
Optional
"""
with tf.GradientTape() as tape:
pred = self._layer(batch, training=not freeze)
losses = self._layer.loss_fn(
batch=batch,
prediction=pred,
step=self._step,
task_id=task_id,
)
loss = tf.reduce_mean(losses['loss'])
if not freeze:
grads = tape.gradient(loss, self._layer.trainable_weights)
self._opt.apply_gradients(
zip(grads, self._layer.trainable_weights)
)
self._step += 1
if return_eval:
perf = self._layer.metric_fn(
batch=batch,
prediction=pred,
task_id=task_id,
)
return pred, losses, perf
return pred
def evaluate(self, batch, task_id=None):
"""Evaluates the batch.
Parameters
----------
batch : tf.Tensor | list | dict
task_id : int | None
Returns
-------
perf : tf.Tensor, shape (B,)
"""
pred = self._layer.call(batch, training=False)
perf = self._layer.metric_fn(
batch=batch,
prediction=pred,
task_id=task_id,
)
return perf
def eval_and_summary(self, writer, batch, step, task_id=None):
perf = tf.reduce_mean(self.evaluate(batch, task_id=task_id))
with writer.as_default():
tf.summary.scalar('perf', tf.reduce_mean(perf), step=step)
def summary(self, writer, batch, step, training=None):
return self._layer.summary(
writer, batch, step, training=training
) | 0.962743 | 0.509581 |
import json
import os.path
import netaddr
import argparse
from shared.organization import get_organization_accounts
__description__ = "Add and remove items from the config file"
def configure(action, arguments):
if not os.path.isfile(arguments.config_file):
print("Config file does not exist, creating one")
config = {"accounts": [], "cidrs": {}}
else:
with open(arguments.config_file, "r") as f:
config = json.loads(f.read())
if action == "add-account":
config["accounts"].append(
{
"id": str(arguments.id),
"name": str(arguments.name),
"default": True if arguments.default.lower() == "true" else False,
}
)
elif action == "add-cidr":
try:
netaddr.IPNetwork(arguments.cidr)
except netaddr.core.AddrFormatError:
exit("ERROR: CIDR is not valid")
return
config["cidrs"][str(arguments.cidr)] = {"name": str(arguments.name)}
elif action == "remove-account":
if arguments.name is None or arguments.id is None:
def condition(x, y):
return x or y
else:
def condition(x, y):
return x and y
for account in config["accounts"]:
if condition(
account["id"] == arguments.id, account["name"] == arguments.name
):
config["accounts"].remove(account)
elif action == "remove-cidr":
if arguments.name is None or arguments.cidr is None:
def condition(x, y):
return x or y
else:
def condition(x, y):
return x and y
# Force it to be a complete set so that deleting the key later on doesn't raise an error because the dictionary Size changed during iteration
for cidr in set(config["cidrs"].keys()):
name = config["cidrs"][cidr]["name"]
if condition(cidr == arguments.cidr, name == arguments.name):
del config["cidrs"][cidr]
elif action == "discover-organization-accounts":
organization_accounts = get_organization_accounts()
current_accounts = config.get("accounts", {})
current_account_ids = set(map(lambda entry: entry["id"], current_accounts))
for organization_account in organization_accounts:
# Don't overwrite any account already in the configuration file
if organization_account['id'] not in current_account_ids:
config["accounts"].append(organization_account)
with open(arguments.config_file, "w+") as f:
f.write(json.dumps(config, indent=4, sort_keys=True))
def run(arguments):
if len(arguments) == 0:
exit(
"ERROR: Missing action for configure.\n"
"Usage: [add-cidr|add-account|discover-organization-accounts|remove-cidr|remove-account]"
)
return
action = arguments[0]
arguments = arguments[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
"--config-file", help="Path to the config file", default="config.json", type=str
)
if action == "add-account" or action == "remove-account":
required = True if action.startswith("add") else False
parser.add_argument("--name", help="Account name", required=required, type=str)
parser.add_argument("--id", help="Account ID", required=required, type=str)
parser.add_argument(
"--default",
help="Default account",
required=False,
default="False",
type=str,
)
elif action == "add-cidr" or action == "remove-cidr":
required = True if action.startswith("add") else False
parser.add_argument("--cidr", help="CIDR IP", required=required, type=str)
parser.add_argument("--name", help="CIDR Name", required=required, type=str)
args = parser.parse_args(arguments)
configure(action, args) | commands/configure.py | import json
import os.path
import netaddr
import argparse
from shared.organization import get_organization_accounts
__description__ = "Add and remove items from the config file"
def configure(action, arguments):
if not os.path.isfile(arguments.config_file):
print("Config file does not exist, creating one")
config = {"accounts": [], "cidrs": {}}
else:
with open(arguments.config_file, "r") as f:
config = json.loads(f.read())
if action == "add-account":
config["accounts"].append(
{
"id": str(arguments.id),
"name": str(arguments.name),
"default": True if arguments.default.lower() == "true" else False,
}
)
elif action == "add-cidr":
try:
netaddr.IPNetwork(arguments.cidr)
except netaddr.core.AddrFormatError:
exit("ERROR: CIDR is not valid")
return
config["cidrs"][str(arguments.cidr)] = {"name": str(arguments.name)}
elif action == "remove-account":
if arguments.name is None or arguments.id is None:
def condition(x, y):
return x or y
else:
def condition(x, y):
return x and y
for account in config["accounts"]:
if condition(
account["id"] == arguments.id, account["name"] == arguments.name
):
config["accounts"].remove(account)
elif action == "remove-cidr":
if arguments.name is None or arguments.cidr is None:
def condition(x, y):
return x or y
else:
def condition(x, y):
return x and y
# Force it to be a complete set so that deleting the key later on doesn't raise an error because the dictionary Size changed during iteration
for cidr in set(config["cidrs"].keys()):
name = config["cidrs"][cidr]["name"]
if condition(cidr == arguments.cidr, name == arguments.name):
del config["cidrs"][cidr]
elif action == "discover-organization-accounts":
organization_accounts = get_organization_accounts()
current_accounts = config.get("accounts", {})
current_account_ids = set(map(lambda entry: entry["id"], current_accounts))
for organization_account in organization_accounts:
# Don't overwrite any account already in the configuration file
if organization_account['id'] not in current_account_ids:
config["accounts"].append(organization_account)
with open(arguments.config_file, "w+") as f:
f.write(json.dumps(config, indent=4, sort_keys=True))
def run(arguments):
if len(arguments) == 0:
exit(
"ERROR: Missing action for configure.\n"
"Usage: [add-cidr|add-account|discover-organization-accounts|remove-cidr|remove-account]"
)
return
action = arguments[0]
arguments = arguments[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
"--config-file", help="Path to the config file", default="config.json", type=str
)
if action == "add-account" or action == "remove-account":
required = True if action.startswith("add") else False
parser.add_argument("--name", help="Account name", required=required, type=str)
parser.add_argument("--id", help="Account ID", required=required, type=str)
parser.add_argument(
"--default",
help="Default account",
required=False,
default="False",
type=str,
)
elif action == "add-cidr" or action == "remove-cidr":
required = True if action.startswith("add") else False
parser.add_argument("--cidr", help="CIDR IP", required=required, type=str)
parser.add_argument("--name", help="CIDR Name", required=required, type=str)
args = parser.parse_args(arguments)
configure(action, args) | 0.146423 | 0.16492 |
from extra_views.formsets import InlineFormSetMixin
from django.http import HttpResponseRedirect
from django.forms.formsets import all_valid
from vanilla import GenericModelView
class BaseInlinesView(GenericModelView):
"""
A base view class that provides a way to multiple inline formsets in a request.
Used by:
* CreateWithInlinesView
* UpdateWithInlinesView
"""
inlines = []
inline_context_names = []
template_name_suffix = '_form'
success_url = None
def get_context_data(self, **kwargs):
"""
If `inlines_names` has been defined, add each formset to the context
under its corresponding entry in `inlines_names`.
"""
if self.inline_context_names and 'inlines' in kwargs:
kwargs.update(zip(self.inline_context_names, kwargs['inlines']))
return super(BaseInlinesView, self).get_context_data(**kwargs)
def get_inlines(self, data=None, files=None, **kwargs):
"""
Returns the inline formset instances.
"""
instance = kwargs.get('instance', None)
inline_formsets = []
for inline_class in self.inlines:
inline_instance = inline_class(self.model)
inline_formset = inline_instance.get_formset(data=data, files=files, **kwargs)
inline_formsets.append(inline_formset)
return inline_formsets
def forms_valid(self, form, inlines):
"""
If the form and formsets are valid, save the associated models and redirect.
"""
self.object = form.save()
for formset in inlines:
formset.save()
return HttpResponseRedirect(self.get_success_url())
def forms_invalid(self, form, inlines):
"""
If the form or formsets are invalid, re-render the context data with the
data-filled form and formsets and errors.
"""
context = self.get_context_data(form=form, inlines=inlines)
return self.render_to_response(context)
def get_success_url(self):
if self.success_url:
return self.success_url
return self.request.get_full_path()
class CreateWithInlinesView(BaseInlinesView):
def get(self, request, *args, **kwargs):
"""
Displays a blank version of the form and formsets.
"""
self.object = None
form = self.get_form()
inlines = self.get_inlines()
context = self.get_context_data(form=form, inlines=inlines)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form and formset instances with the passed
POST variables and then checked for validity.
"""
self.object = None
form = self.get_form(request.POST, request.FILES)
if form.is_valid():
self.object = form.save(commit=False)
inlines = self.get_inlines(request.POST, request.FILES, instance=self.object)
else:
inlines = self.get_inlines(request.POST, request.FILES)
if form.is_valid() and all_valid(inlines):
return self.forms_valid(form, inlines)
return self.forms_invalid(form, inlines)
class UpdateWithInlinesView(BaseInlinesView):
def get(self, request, *args, **kwargs):
"""
Displays a pre-filled version of the form and formsets.
"""
self.object = self.get_object()
form = self.get_form(instance=self.object)
inlines = self.get_inlines(instance=self.object)
context = self.get_context_data(form=form, inlines=inlines)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form and formset instances with the passed
POST variables and then checked for validity.
"""
self.object = self.get_object()
form = self.get_form(request.POST, request.FILES, instance=self.object)
if form.is_valid():
self.object = form.save(commit=False)
inlines = self.get_inlines(request.POST, request.FILES, instance=self.object)
else:
inlines = self.get_inlines(request.POST, request.FILES)
if form.is_valid() and all_valid(inlines):
return self.forms_valid(form, inlines)
return self.forms_invalid(form, inlines) | extra_views/advanced.py | from extra_views.formsets import InlineFormSetMixin
from django.http import HttpResponseRedirect
from django.forms.formsets import all_valid
from vanilla import GenericModelView
class BaseInlinesView(GenericModelView):
"""
A base view class that provides a way to multiple inline formsets in a request.
Used by:
* CreateWithInlinesView
* UpdateWithInlinesView
"""
inlines = []
inline_context_names = []
template_name_suffix = '_form'
success_url = None
def get_context_data(self, **kwargs):
"""
If `inlines_names` has been defined, add each formset to the context
under its corresponding entry in `inlines_names`.
"""
if self.inline_context_names and 'inlines' in kwargs:
kwargs.update(zip(self.inline_context_names, kwargs['inlines']))
return super(BaseInlinesView, self).get_context_data(**kwargs)
def get_inlines(self, data=None, files=None, **kwargs):
"""
Returns the inline formset instances.
"""
instance = kwargs.get('instance', None)
inline_formsets = []
for inline_class in self.inlines:
inline_instance = inline_class(self.model)
inline_formset = inline_instance.get_formset(data=data, files=files, **kwargs)
inline_formsets.append(inline_formset)
return inline_formsets
def forms_valid(self, form, inlines):
"""
If the form and formsets are valid, save the associated models and redirect.
"""
self.object = form.save()
for formset in inlines:
formset.save()
return HttpResponseRedirect(self.get_success_url())
def forms_invalid(self, form, inlines):
"""
If the form or formsets are invalid, re-render the context data with the
data-filled form and formsets and errors.
"""
context = self.get_context_data(form=form, inlines=inlines)
return self.render_to_response(context)
def get_success_url(self):
if self.success_url:
return self.success_url
return self.request.get_full_path()
class CreateWithInlinesView(BaseInlinesView):
def get(self, request, *args, **kwargs):
"""
Displays a blank version of the form and formsets.
"""
self.object = None
form = self.get_form()
inlines = self.get_inlines()
context = self.get_context_data(form=form, inlines=inlines)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form and formset instances with the passed
POST variables and then checked for validity.
"""
self.object = None
form = self.get_form(request.POST, request.FILES)
if form.is_valid():
self.object = form.save(commit=False)
inlines = self.get_inlines(request.POST, request.FILES, instance=self.object)
else:
inlines = self.get_inlines(request.POST, request.FILES)
if form.is_valid() and all_valid(inlines):
return self.forms_valid(form, inlines)
return self.forms_invalid(form, inlines)
class UpdateWithInlinesView(BaseInlinesView):
def get(self, request, *args, **kwargs):
"""
Displays a pre-filled version of the form and formsets.
"""
self.object = self.get_object()
form = self.get_form(instance=self.object)
inlines = self.get_inlines(instance=self.object)
context = self.get_context_data(form=form, inlines=inlines)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form and formset instances with the passed
POST variables and then checked for validity.
"""
self.object = self.get_object()
form = self.get_form(request.POST, request.FILES, instance=self.object)
if form.is_valid():
self.object = form.save(commit=False)
inlines = self.get_inlines(request.POST, request.FILES, instance=self.object)
else:
inlines = self.get_inlines(request.POST, request.FILES)
if form.is_valid() and all_valid(inlines):
return self.forms_valid(form, inlines)
return self.forms_invalid(form, inlines) | 0.688049 | 0.217753 |
import numpy as np
def accuracy_score(y_true, y_pred):
"""
Classification performance metric that computes the accuracy of y_true
and y_pred.
:param numpy.array y_true: array-like of shape (n_samples,) Ground truth correct labels.
:param numpy.array y_pred: array-like of shape (n_samples,) Estimated target values.
:returns: C (float) Accuracy score.
"""
correct = 0
for true, pred in zip(y_true, y_pred):
if true == pred:
correct += 1
accuracy = correct / len(y_true)
return accuracy
def mse(y_true, y_pred, squared=True):
"""
Mean squared error regression loss function.
Parameters
:param numpy.array y_true: array-like of shape (n_samples,)
Ground truth (correct) target values.
:param numpy.array y_pred: array-like of shape (n_samples,)
Estimated target values.
:param bool squared: If True returns MSE, if False returns RMSE. Default=True
:returns: loss (float) A non-negative floating point value (the best value is 0.0).
"""
y_true = np.array(y_true)
y_pred = np.array(y_pred)
errors = np.average((y_true - y_pred) ** 2, axis=0)
if not squared:
errors = np.sqrt(errors)
return np.average(errors)
def r2_score(y_true, y_pred):
"""
R^2 regression score function.
R^2 = 1 - SS_res / SS_tot
where SS_res is the residual sum of squares and SS_tot is the total
sum of squares.
:param numpy.array y_true : array-like of shape (n_samples,) Ground truth (correct) target values.
:param numpy.array y_pred : array-like of shape (n_samples,) Estimated target values.
:returns: score (float) R^2 score.
"""
# Residual sum of squares.
numerator = ((y_true - y_pred) ** 2).sum(axis=0)
# Total sum of squares.
denominator = ((y_true - np.average(y_true, axis=0)) ** 2).sum(axis=0)
# R^2.
score = 1 - numerator / denominator
return score
def mse_prime(y_true, y_pred):
return 2*(y_pred-y_true)/y_true.size
def cross_entropy(y_true, y_pred):
return -(y_true * np.log(y_pred)).sum()
def cross_entropy_prime(y_true, y_pred):
return y_pred - y_true | src/si/util/metrics.py | import numpy as np
def accuracy_score(y_true, y_pred):
"""
Classification performance metric that computes the accuracy of y_true
and y_pred.
:param numpy.array y_true: array-like of shape (n_samples,) Ground truth correct labels.
:param numpy.array y_pred: array-like of shape (n_samples,) Estimated target values.
:returns: C (float) Accuracy score.
"""
correct = 0
for true, pred in zip(y_true, y_pred):
if true == pred:
correct += 1
accuracy = correct / len(y_true)
return accuracy
def mse(y_true, y_pred, squared=True):
"""
Mean squared error regression loss function.
Parameters
:param numpy.array y_true: array-like of shape (n_samples,)
Ground truth (correct) target values.
:param numpy.array y_pred: array-like of shape (n_samples,)
Estimated target values.
:param bool squared: If True returns MSE, if False returns RMSE. Default=True
:returns: loss (float) A non-negative floating point value (the best value is 0.0).
"""
y_true = np.array(y_true)
y_pred = np.array(y_pred)
errors = np.average((y_true - y_pred) ** 2, axis=0)
if not squared:
errors = np.sqrt(errors)
return np.average(errors)
def r2_score(y_true, y_pred):
"""
R^2 regression score function.
R^2 = 1 - SS_res / SS_tot
where SS_res is the residual sum of squares and SS_tot is the total
sum of squares.
:param numpy.array y_true : array-like of shape (n_samples,) Ground truth (correct) target values.
:param numpy.array y_pred : array-like of shape (n_samples,) Estimated target values.
:returns: score (float) R^2 score.
"""
# Residual sum of squares.
numerator = ((y_true - y_pred) ** 2).sum(axis=0)
# Total sum of squares.
denominator = ((y_true - np.average(y_true, axis=0)) ** 2).sum(axis=0)
# R^2.
score = 1 - numerator / denominator
return score
def mse_prime(y_true, y_pred):
return 2*(y_pred-y_true)/y_true.size
def cross_entropy(y_true, y_pred):
return -(y_true * np.log(y_pred)).sum()
def cross_entropy_prime(y_true, y_pred):
return y_pred - y_true | 0.910207 | 0.921181 |
import numpy as np
import torch
import torch.nn as nn
class TNGraph(nn.Module):
def __init__(self, graph, contract_order=None):
super(TNGraph, self).__init__()
self.graph = graph
self.contract_order = contract_order
self.cores = []
for item in range(len(graph)):
shape = list(graph[:item, item]) + list(graph[item, item:])
self.cores.append(torch.nn.Parameter(torch.randn([i if i!=0 else 1 for i in shape])))
self.register_parameter('cores' + str(item), self.cores[-1])
if self.contract_order:
self.cores = [self.cores[i].permute(self.contract_order) for i in self.contract_order]
def contract_graph(self):
res = self.cores[0]
N = len(self.cores)
for i in range(1, N):
shape_node = list(self.cores[i].shape)
node = self.cores[i].reshape([np.prod(shape_node[:i])] + shape_node[i:])
res = torch.tensordot(res, node, [[i], [0]])
res = torch.movedim(res, N-1, i)
shape = res.shape
axis, new_shape = [k for k in range(i+1)], list(shape[:i+1])
for j in range(N-i-1):
axis.extend([i+1+j, N+j])
new_shape.append(shape[i+1+j] * shape[N+j])
res = res.permute(axis).reshape(new_shape)
return res
def forward(self):
if self.contract_order:
return self.contract_graph().permute([self.contract_order.index(i) for i in range(len(self.contract_order))])
else:
return self.contract_graph()
if __name__ == '__main__':
graph = np.array([[3, 2, 0, 1, 0], [0, 2, 4, 2, 3], [0, 0, 4,0, 2],[0,0,0,2,2],[0,0,0,0,6]])
net = TNGraph(graph, contract_order=[2,3,1,4,0])
x = net().cuda()
print(net.parameters())
traget = torch.from_numpy(np.random.random_sample([3,2,4,2,6])).float()
loss_function = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
for i in range(1):
outputs = net()
optimizer.zero_grad()
loss = loss_function(outputs, traget)
loss.backward()
optimizer.step()
if i % 1000==0:
print(loss.item()) | tensorNetwork/torch.py | import numpy as np
import torch
import torch.nn as nn
class TNGraph(nn.Module):
def __init__(self, graph, contract_order=None):
super(TNGraph, self).__init__()
self.graph = graph
self.contract_order = contract_order
self.cores = []
for item in range(len(graph)):
shape = list(graph[:item, item]) + list(graph[item, item:])
self.cores.append(torch.nn.Parameter(torch.randn([i if i!=0 else 1 for i in shape])))
self.register_parameter('cores' + str(item), self.cores[-1])
if self.contract_order:
self.cores = [self.cores[i].permute(self.contract_order) for i in self.contract_order]
def contract_graph(self):
res = self.cores[0]
N = len(self.cores)
for i in range(1, N):
shape_node = list(self.cores[i].shape)
node = self.cores[i].reshape([np.prod(shape_node[:i])] + shape_node[i:])
res = torch.tensordot(res, node, [[i], [0]])
res = torch.movedim(res, N-1, i)
shape = res.shape
axis, new_shape = [k for k in range(i+1)], list(shape[:i+1])
for j in range(N-i-1):
axis.extend([i+1+j, N+j])
new_shape.append(shape[i+1+j] * shape[N+j])
res = res.permute(axis).reshape(new_shape)
return res
def forward(self):
if self.contract_order:
return self.contract_graph().permute([self.contract_order.index(i) for i in range(len(self.contract_order))])
else:
return self.contract_graph()
if __name__ == '__main__':
graph = np.array([[3, 2, 0, 1, 0], [0, 2, 4, 2, 3], [0, 0, 4,0, 2],[0,0,0,2,2],[0,0,0,0,6]])
net = TNGraph(graph, contract_order=[2,3,1,4,0])
x = net().cuda()
print(net.parameters())
traget = torch.from_numpy(np.random.random_sample([3,2,4,2,6])).float()
loss_function = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
for i in range(1):
outputs = net()
optimizer.zero_grad()
loss = loss_function(outputs, traget)
loss.backward()
optimizer.step()
if i % 1000==0:
print(loss.item()) | 0.650911 | 0.30026 |
from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse,Http404
from .models import Image,Profile,Comments
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import NewPostForm,SignUpForm,EditProfileForm,CommentForm
from django.contrib import messages
from django.contrib.auth import logout
# Create your views here.
@login_required(login_url = '/accounts/login/')
def timeline(request):
'''
Function to render the homepage
'''
timeline_pics = Image.all_images()
return render(request,'timeline.html',{"timeline_pics":timeline_pics})
def like(request,id):
'''
Function to like a post
'''
image = get_object_or_404(Image,id=request.POST.get('ig_pic_id'))
user = request.User
image.likes.add(user)
return (redirect,'timeline')
@login_required(login_url = '/accounts/login/')
def new_post(request):
'''
Function that uploads a new post
'''
if request.method=='POST':
form = NewPostForm(request.POST,request.FILES)
if form.is_valid():
post=form.save(commit=False)
post.user = request.user
post.save()
return redirect('timeline')
else:
form = NewPostForm()
return render(request,'new_post.html',{'form':form})
def signUp(request):
'''
Function that sends email on sign up
'''
if request.method=='POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
name=form.cleaned_data['username']
email = form.cleaned_data['email']
send=welcome_email(name,email)
HttpResponseRedirect('timeline')
else:
form = SignUpForm()
return render(request,'registration/registration_form.html',{'form':form})
@login_required(login_url = '/accounts/login/')
def profile(request):
'''
Function that renders the active user's profile
'''
my_posts = Image.user_pics(request.user)
return render(request,'profile.html',{'my_posts':my_posts})
@login_required(login_url = '/accounts/login/')
def edit_profile(request):
'''
Function that updates profile information
'''
if request.method=='POST':
form = EditProfileForm(request.POST,request.FILES)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = EditProfileForm(instance=request.user)
return render(request,'update_profile.html',{'form':form})
@login_required(login_url = '/accounts/login/')
def comment(request,id):
'''
Function for commenting on a post,Args:id The id of the post
'''
id =id
if request.method=='POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.user = request.user
image = Image.objects.get(id = id)
comment.ig_pic_id = image
comment.save()
return redirect('timeline')
else:
pic_id = id
messages.info(request,'Make sure you fill all fields correctly')
return redirect('comment',id=pic_id)
else:
id = id
form =CommentForm()
return render(request,"comment.html",{'form':form,"id":id})
@login_required(login_url = '/accounts/login/')
def single_pic(request,id):
'''
Function for getting just a single post
Args:id The id of the post
'''
post = Image.objects.get(id = id)
comments = Comments.objects.filter(ig_pic_id = id)
return render(request,'single_pic.html',{'post':post,"comments":comments})
@login_required(login_url = '/accounts/login/')
def search_results(request):
'''
Function for searching a post with its name
'''
if 'image' in request.GET and request.GET['image']:
search_term = request.GET.get('image')
searched_pics = Image.search_image(search_term)
message = f'{search_term}'
return render(request,'search.html',{'message':message,'image':searched_pics})
else:
message = "You have not entered anything to search"
return render(request,'search.html',{"message":message})
@login_required(login_url="/accounts/login/")
def logout_request(request):
'''
Function for logging out user
'''
logout(request)
return redirect('timeline') | gram/views.py | from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse,Http404
from .models import Image,Profile,Comments
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import NewPostForm,SignUpForm,EditProfileForm,CommentForm
from django.contrib import messages
from django.contrib.auth import logout
# Create your views here.
@login_required(login_url = '/accounts/login/')
def timeline(request):
'''
Function to render the homepage
'''
timeline_pics = Image.all_images()
return render(request,'timeline.html',{"timeline_pics":timeline_pics})
def like(request,id):
'''
Function to like a post
'''
image = get_object_or_404(Image,id=request.POST.get('ig_pic_id'))
user = request.User
image.likes.add(user)
return (redirect,'timeline')
@login_required(login_url = '/accounts/login/')
def new_post(request):
'''
Function that uploads a new post
'''
if request.method=='POST':
form = NewPostForm(request.POST,request.FILES)
if form.is_valid():
post=form.save(commit=False)
post.user = request.user
post.save()
return redirect('timeline')
else:
form = NewPostForm()
return render(request,'new_post.html',{'form':form})
def signUp(request):
'''
Function that sends email on sign up
'''
if request.method=='POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
name=form.cleaned_data['username']
email = form.cleaned_data['email']
send=welcome_email(name,email)
HttpResponseRedirect('timeline')
else:
form = SignUpForm()
return render(request,'registration/registration_form.html',{'form':form})
@login_required(login_url = '/accounts/login/')
def profile(request):
'''
Function that renders the active user's profile
'''
my_posts = Image.user_pics(request.user)
return render(request,'profile.html',{'my_posts':my_posts})
@login_required(login_url = '/accounts/login/')
def edit_profile(request):
'''
Function that updates profile information
'''
if request.method=='POST':
form = EditProfileForm(request.POST,request.FILES)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = EditProfileForm(instance=request.user)
return render(request,'update_profile.html',{'form':form})
@login_required(login_url = '/accounts/login/')
def comment(request,id):
'''
Function for commenting on a post,Args:id The id of the post
'''
id =id
if request.method=='POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.user = request.user
image = Image.objects.get(id = id)
comment.ig_pic_id = image
comment.save()
return redirect('timeline')
else:
pic_id = id
messages.info(request,'Make sure you fill all fields correctly')
return redirect('comment',id=pic_id)
else:
id = id
form =CommentForm()
return render(request,"comment.html",{'form':form,"id":id})
@login_required(login_url = '/accounts/login/')
def single_pic(request,id):
'''
Function for getting just a single post
Args:id The id of the post
'''
post = Image.objects.get(id = id)
comments = Comments.objects.filter(ig_pic_id = id)
return render(request,'single_pic.html',{'post':post,"comments":comments})
@login_required(login_url = '/accounts/login/')
def search_results(request):
'''
Function for searching a post with its name
'''
if 'image' in request.GET and request.GET['image']:
search_term = request.GET.get('image')
searched_pics = Image.search_image(search_term)
message = f'{search_term}'
return render(request,'search.html',{'message':message,'image':searched_pics})
else:
message = "You have not entered anything to search"
return render(request,'search.html',{"message":message})
@login_required(login_url="/accounts/login/")
def logout_request(request):
'''
Function for logging out user
'''
logout(request)
return redirect('timeline') | 0.293708 | 0.059102 |
from compas.utilities import flatten
from compas.geometry import allclose
from compas.geometry import multiply_matrices
from compas.geometry.transformations import decompose_matrix
from compas.geometry.transformations import matrix_from_scale_factors
from compas.geometry.transformations import matrix_from_frame
from compas.geometry.transformations import matrix_inverse
from compas.geometry.transformations import Transformation
class Scale(Transformation):
"""Class representing a scale transformation.
Parameters
----------
matrix : list[list[float]], optional
A 4x4 matrix (or similar) representing a scaling.
Raises
------
ValueError
If the default constructor is used,
and the provided transformation matrix is not a scale matrix.
Examples
--------
>>> S = Scale.from_factors([1, 2, 3])
>>> S[0, 0] == 1
True
>>> S[1, 1] == 2
True
>>> S[2, 2] == 3
True
>>> from compas.geometry import Point, Frame
>>> point = Point(2, 5, 0)
>>> frame = Frame(point, (1, 0, 0), (0, 1, 0))
>>> points = [point, Point(2, 10, 0)]
>>> S = Scale.from_factors([2.] * 3, frame)
>>> [p.transformed(S) for p in points]
[Point(2.000, 5.000, 0.000), Point(2.000, 15.000, 0.000)]
"""
def __init__(self, matrix=None):
if matrix:
scale, _, _, _, _ = decompose_matrix(matrix)
check = matrix_from_scale_factors(scale)
if not allclose(flatten(matrix), flatten(check)):
raise ValueError('This is not a proper scale matrix.')
super(Scale, self).__init__(matrix=matrix)
def __repr__(self):
return "Scale({0!r})".format(self.matrix)
@classmethod
def from_factors(cls, factors, frame=None):
"""Construct a scale transformation from scale factors.
Parameters
----------
factors : [float, float, float]
The scale factors along X, Y, Z.
frame : [point, vector, vector] | :class:`compas.geometry.Frame`, optional
The anchor frame for the scaling transformation.
Returns
-------
:class:`compas.geometry.Scale`
A scale transformation.
Examples
--------
>>> from compas.geometry import Point, Frame
>>> point = Point(2, 5, 0)
>>> frame = Frame(point, (1, 0, 0), (0, 1, 0))
>>> points = [point, Point(2, 10, 0)]
>>> S = Scale.from_factors([2.] * 3, frame)
>>> [p.transformed(S) for p in points]
[Point(2.000, 5.000, 0.000), Point(2.000, 15.000, 0.000)]
"""
S = cls()
if frame:
Tw = matrix_from_frame(frame)
Tl = matrix_inverse(Tw)
Sc = matrix_from_scale_factors(factors)
S.matrix = multiply_matrices(multiply_matrices(Tw, Sc), Tl)
else:
S.matrix = matrix_from_scale_factors(factors)
return S | src/compas/geometry/transformations/scale.py | from compas.utilities import flatten
from compas.geometry import allclose
from compas.geometry import multiply_matrices
from compas.geometry.transformations import decompose_matrix
from compas.geometry.transformations import matrix_from_scale_factors
from compas.geometry.transformations import matrix_from_frame
from compas.geometry.transformations import matrix_inverse
from compas.geometry.transformations import Transformation
class Scale(Transformation):
"""Class representing a scale transformation.
Parameters
----------
matrix : list[list[float]], optional
A 4x4 matrix (or similar) representing a scaling.
Raises
------
ValueError
If the default constructor is used,
and the provided transformation matrix is not a scale matrix.
Examples
--------
>>> S = Scale.from_factors([1, 2, 3])
>>> S[0, 0] == 1
True
>>> S[1, 1] == 2
True
>>> S[2, 2] == 3
True
>>> from compas.geometry import Point, Frame
>>> point = Point(2, 5, 0)
>>> frame = Frame(point, (1, 0, 0), (0, 1, 0))
>>> points = [point, Point(2, 10, 0)]
>>> S = Scale.from_factors([2.] * 3, frame)
>>> [p.transformed(S) for p in points]
[Point(2.000, 5.000, 0.000), Point(2.000, 15.000, 0.000)]
"""
def __init__(self, matrix=None):
if matrix:
scale, _, _, _, _ = decompose_matrix(matrix)
check = matrix_from_scale_factors(scale)
if not allclose(flatten(matrix), flatten(check)):
raise ValueError('This is not a proper scale matrix.')
super(Scale, self).__init__(matrix=matrix)
def __repr__(self):
return "Scale({0!r})".format(self.matrix)
@classmethod
def from_factors(cls, factors, frame=None):
"""Construct a scale transformation from scale factors.
Parameters
----------
factors : [float, float, float]
The scale factors along X, Y, Z.
frame : [point, vector, vector] | :class:`compas.geometry.Frame`, optional
The anchor frame for the scaling transformation.
Returns
-------
:class:`compas.geometry.Scale`
A scale transformation.
Examples
--------
>>> from compas.geometry import Point, Frame
>>> point = Point(2, 5, 0)
>>> frame = Frame(point, (1, 0, 0), (0, 1, 0))
>>> points = [point, Point(2, 10, 0)]
>>> S = Scale.from_factors([2.] * 3, frame)
>>> [p.transformed(S) for p in points]
[Point(2.000, 5.000, 0.000), Point(2.000, 15.000, 0.000)]
"""
S = cls()
if frame:
Tw = matrix_from_frame(frame)
Tl = matrix_inverse(Tw)
Sc = matrix_from_scale_factors(factors)
S.matrix = multiply_matrices(multiply_matrices(Tw, Sc), Tl)
else:
S.matrix = matrix_from_scale_factors(factors)
return S | 0.949728 | 0.749821 |
import tensorflow as tf
from keras import backend as K
from keras.models import Model
from keras.layers import Conv2D, BatchNormalization, ReLU, DepthwiseConv2D, Activation, Input, Add ,Lambda,Concatenate,GlobalAvgPool1D
from keras.layers import GlobalAveragePooling2D, Reshape, Dense, multiply, Softmax, Flatten, merge, ZeroPadding2D, AveragePooling2D,MaxPooling2D,GlobalAveragePooling1D
from keras.regularizers import l2
from keras.utils.generic_utils import get_custom_objects
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Convolution2D
def _DenseLayer(input, nb_filter, bn_size, dropout_rate):
#x = BatchNormalization()(input)
x = Activation('relu')(input)
x = Convolution2D(nb_filter*bn_size, (1, 1), kernel_initializer="he_uniform", padding="same" )(x)
#x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter, (3, 3), kernel_initializer="he_uniform", padding="same" )(x)
if dropout_rate is not None:
x = Dropout(dropout_rate)(x)
return x
def _DenseBlock(x, num_layers, num_features, bn_size, growth_rate, dropout_rate):
feature_list = [x]
for i in range(num_layers):
x = _DenseLayer(x, growth_rate, bn_size, dropout_rate)
feature_list.append(x)
x = Concatenate()(feature_list)
num_features += growth_rate
return x, num_features
def densenet_(nb_class, input_dim, growth_rate=12, nb_dense_block=4, layer=5, nb_filter=32, dropout_rate=0.2):
model_input = Input(shape=input_dim)
# Initial convolution
x = Convolution2D(nb_filter, (3, 3), kernel_initializer="he_uniform", padding="same", name="initial_conv2D", use_bias=False)(model_input)
#x = BatchNormalization(name='batch1')(x)
x = Activation('relu', name='relu1')(x)
# Add dense blocks
num_features = nb_filter
num_layers = layer
x, nb_filter = _DenseBlock(x, num_layers=num_layers, num_features=num_features, bn_size=nb_dense_block, growth_rate=growth_rate, dropout_rate=dropout_rate)
# The last
x = BatchNormalization(name='batch_last')(x)
x = Convolution2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", name="last_conv2D", use_bias=False)(x)
x = Reshape(target_shape=((-1,nb_classes)),name='reshape')(x)
x = GlobalAveragePooling1D()(x)
x = Dense(nb_classes, activation='relu')(x)
densenet = Model(inputs=model_input, outputs=x)
return densenet | Densenet.py | import tensorflow as tf
from keras import backend as K
from keras.models import Model
from keras.layers import Conv2D, BatchNormalization, ReLU, DepthwiseConv2D, Activation, Input, Add ,Lambda,Concatenate,GlobalAvgPool1D
from keras.layers import GlobalAveragePooling2D, Reshape, Dense, multiply, Softmax, Flatten, merge, ZeroPadding2D, AveragePooling2D,MaxPooling2D,GlobalAveragePooling1D
from keras.regularizers import l2
from keras.utils.generic_utils import get_custom_objects
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Convolution2D
def _DenseLayer(input, nb_filter, bn_size, dropout_rate):
#x = BatchNormalization()(input)
x = Activation('relu')(input)
x = Convolution2D(nb_filter*bn_size, (1, 1), kernel_initializer="he_uniform", padding="same" )(x)
#x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter, (3, 3), kernel_initializer="he_uniform", padding="same" )(x)
if dropout_rate is not None:
x = Dropout(dropout_rate)(x)
return x
def _DenseBlock(x, num_layers, num_features, bn_size, growth_rate, dropout_rate):
feature_list = [x]
for i in range(num_layers):
x = _DenseLayer(x, growth_rate, bn_size, dropout_rate)
feature_list.append(x)
x = Concatenate()(feature_list)
num_features += growth_rate
return x, num_features
def densenet_(nb_class, input_dim, growth_rate=12, nb_dense_block=4, layer=5, nb_filter=32, dropout_rate=0.2):
model_input = Input(shape=input_dim)
# Initial convolution
x = Convolution2D(nb_filter, (3, 3), kernel_initializer="he_uniform", padding="same", name="initial_conv2D", use_bias=False)(model_input)
#x = BatchNormalization(name='batch1')(x)
x = Activation('relu', name='relu1')(x)
# Add dense blocks
num_features = nb_filter
num_layers = layer
x, nb_filter = _DenseBlock(x, num_layers=num_layers, num_features=num_features, bn_size=nb_dense_block, growth_rate=growth_rate, dropout_rate=dropout_rate)
# The last
x = BatchNormalization(name='batch_last')(x)
x = Convolution2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", name="last_conv2D", use_bias=False)(x)
x = Reshape(target_shape=((-1,nb_classes)),name='reshape')(x)
x = GlobalAveragePooling1D()(x)
x = Dense(nb_classes, activation='relu')(x)
densenet = Model(inputs=model_input, outputs=x)
return densenet | 0.908634 | 0.521532 |
import skil_client
from .base import Resource
class AzureStorage(Resource):
"""AzureStorage
SKIL Azure storage resource.
# Arguments:
skil: `Skil` server instance
name: Resource name
container_name: Azure storage container name
credential_uri: path to credential file
resource_id: optional resource ID to retrieve an existing resource
create: boolean, for internal use only. whether to create a new resource or not
"""
def __init__(self, skil, name, container_name, credential_uri,
resource_id=None, create=True):
super(AzureStorage, self).__init__(skil)
self.name = name
self.container_name = container_name
self.credential_uri = credential_uri
self.resource_id = resource_id
if create:
resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(
resource_name=self.name,
resource_details=skil_client.AzureStorageResourceDetails(
container_name=self.container_name
),
credential_uri=self.credential_uri,
type="STORAGE",
sub_type="AzureStorage")
)
self.resource_id = resource_response.get("resourceId")
else:
if resource_id is None:
raise ValueError(
'If create is False you need to provide a valid resource_id')
class GoogleStorage(Resource):
"""GoogleStorage
SKIL Google storage resource.
# Arguments:
skil: `Skil` server instance
name: Resource name
project_id: Google project ID
bucket_name: bucket name
credential_uri: path to credential file
resource_id: optional resource ID to retrieve an existing resource
create: boolean, for internal use only. whether to create a new resource or not
"""
def __init__(self, skil, name, project_id, bucket_name, credential_uri,
resource_id=None, create=True):
super(GoogleStorage, self).__init__(skil)
self.name = name
self.project_id = project_id
self.bucket_name = bucket_name
self.credential_uri = credential_uri
self.resource_id = resource_id
if create:
resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(
resource_name=self.name,
resource_details=skil_client.GoogleStorageResourceDetails(
project_id=self.project_id,
bucket_name=self.bucket_name
),
credential_uri=self.credential_uri,
type="STORAGE",
sub_type="GoogleStorage")
)
self.resource_id = resource_response.get("resourceId")
else:
if resource_id is None:
raise ValueError(
'If create is False you need to provide a valid resource_id')
class HDFS(Resource):
"""HDFS
SKIL HDFS resource.
# Arguments:
skil: `Skil` server instance
name: Resource name
name_node_host: host of the name node
name_node_port: port of the name node
credential_uri: path to credential file
resource_id: optional resource ID to retrieve an existing resource
create: boolean, for internal use only. whether to create a new resource or not
"""
def __init__(self, skil, name, name_node_host, name_node_port, credential_uri,
resource_id=None, create=True):
super(HDFS, self).__init__(skil)
self.name = name
self.name_node_host = name_node_host
self.name_node_port = name_node_port
self.credential_uri = credential_uri
self.resource_id = resource_id
if create:
resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(
resource_name=self.name,
resource_details=skil_client.HDFSResourceDetails(
name_node_host=self.name_node_host,
name_node_port=self.name_node_port
),
credential_uri=self.credential_uri,
type="STORAGE",
sub_type="HDFS")
)
self.resource_id = resource_response.get("resourceId")
else:
if resource_id is None:
raise ValueError(
'If create is False you need to provide a valid resource_id')
class S3(Resource):
"""S3
SKIL S3 resource.
# Arguments:
skil: `Skil` server instance
name: Resource name
bucket: S3 bucket name
region: AWS region
credential_uri: path to credential file
resource_id: optional resource ID to retrieve an existing resource
create: boolean, for internal use only. whether to create a new resource or not
"""
def __init__(self, skil, name, bucket, region, credential_uri,
resource_id=None, create=True):
super(S3, self).__init__(skil)
self.name = name
self.bucket = bucket
self.region = region
self.credential_uri = credential_uri
self.resource_id = resource_id
if create:
resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(
resource_name=self.name,
resource_details=skil_client.S3ResourceDetails(
bucket=self.bucket,
region=self.region
),
credential_uri=self.credential_uri,
type="STORAGE",
sub_type="S3")
)
self.resource_id = resource_response.get("resourceId")
else:
if resource_id is None:
raise ValueError(
'If create is False you need to provide a valid resource_id') | skil/resources/storage.py | import skil_client
from .base import Resource
class AzureStorage(Resource):
"""AzureStorage
SKIL Azure storage resource.
# Arguments:
skil: `Skil` server instance
name: Resource name
container_name: Azure storage container name
credential_uri: path to credential file
resource_id: optional resource ID to retrieve an existing resource
create: boolean, for internal use only. whether to create a new resource or not
"""
def __init__(self, skil, name, container_name, credential_uri,
resource_id=None, create=True):
super(AzureStorage, self).__init__(skil)
self.name = name
self.container_name = container_name
self.credential_uri = credential_uri
self.resource_id = resource_id
if create:
resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(
resource_name=self.name,
resource_details=skil_client.AzureStorageResourceDetails(
container_name=self.container_name
),
credential_uri=self.credential_uri,
type="STORAGE",
sub_type="AzureStorage")
)
self.resource_id = resource_response.get("resourceId")
else:
if resource_id is None:
raise ValueError(
'If create is False you need to provide a valid resource_id')
class GoogleStorage(Resource):
"""GoogleStorage
SKIL Google storage resource.
# Arguments:
skil: `Skil` server instance
name: Resource name
project_id: Google project ID
bucket_name: bucket name
credential_uri: path to credential file
resource_id: optional resource ID to retrieve an existing resource
create: boolean, for internal use only. whether to create a new resource or not
"""
def __init__(self, skil, name, project_id, bucket_name, credential_uri,
resource_id=None, create=True):
super(GoogleStorage, self).__init__(skil)
self.name = name
self.project_id = project_id
self.bucket_name = bucket_name
self.credential_uri = credential_uri
self.resource_id = resource_id
if create:
resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(
resource_name=self.name,
resource_details=skil_client.GoogleStorageResourceDetails(
project_id=self.project_id,
bucket_name=self.bucket_name
),
credential_uri=self.credential_uri,
type="STORAGE",
sub_type="GoogleStorage")
)
self.resource_id = resource_response.get("resourceId")
else:
if resource_id is None:
raise ValueError(
'If create is False you need to provide a valid resource_id')
class HDFS(Resource):
"""HDFS
SKIL HDFS resource.
# Arguments:
skil: `Skil` server instance
name: Resource name
name_node_host: host of the name node
name_node_port: port of the name node
credential_uri: path to credential file
resource_id: optional resource ID to retrieve an existing resource
create: boolean, for internal use only. whether to create a new resource or not
"""
def __init__(self, skil, name, name_node_host, name_node_port, credential_uri,
resource_id=None, create=True):
super(HDFS, self).__init__(skil)
self.name = name
self.name_node_host = name_node_host
self.name_node_port = name_node_port
self.credential_uri = credential_uri
self.resource_id = resource_id
if create:
resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(
resource_name=self.name,
resource_details=skil_client.HDFSResourceDetails(
name_node_host=self.name_node_host,
name_node_port=self.name_node_port
),
credential_uri=self.credential_uri,
type="STORAGE",
sub_type="HDFS")
)
self.resource_id = resource_response.get("resourceId")
else:
if resource_id is None:
raise ValueError(
'If create is False you need to provide a valid resource_id')
class S3(Resource):
"""S3
SKIL S3 resource.
# Arguments:
skil: `Skil` server instance
name: Resource name
bucket: S3 bucket name
region: AWS region
credential_uri: path to credential file
resource_id: optional resource ID to retrieve an existing resource
create: boolean, for internal use only. whether to create a new resource or not
"""
def __init__(self, skil, name, bucket, region, credential_uri,
resource_id=None, create=True):
super(S3, self).__init__(skil)
self.name = name
self.bucket = bucket
self.region = region
self.credential_uri = credential_uri
self.resource_id = resource_id
if create:
resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(
resource_name=self.name,
resource_details=skil_client.S3ResourceDetails(
bucket=self.bucket,
region=self.region
),
credential_uri=self.credential_uri,
type="STORAGE",
sub_type="S3")
)
self.resource_id = resource_response.get("resourceId")
else:
if resource_id is None:
raise ValueError(
'If create is False you need to provide a valid resource_id') | 0.753875 | 0.06357 |
import os
import random
import sys
from pyglet.gl import *
import pyglet
from pyglet.window import key
pyglet.resource.path.insert(0, os.path.dirname(__file__))
pyglet.resource.reindex()
BALL_IMAGE = 'ball.png'
BALL_SOUND = 'ping.wav'
sound = pyglet.resource.media(BALL_SOUND, streaming=False)
class Ball(pyglet.sprite.Sprite):
ball_image = pyglet.resource.image(BALL_IMAGE)
width = ball_image.width
height = ball_image.height
def __init__(self):
x = random.random() * (window.width - self.width)
y = random.random() * (window.height - self.height)
super(Ball, self).__init__(self.ball_image, x, y, batch=balls_batch)
self.dx = (random.random() - 0.5) * 1000
self.dy = (random.random() - 0.5) * 1000
def update(self, dt):
if self.x <= 0 or self.x + self.width >= window.width:
self.dx *= -1
sound.play()
if self.y <= 0 or self.y + self.height >= window.height:
self.dy *= -1
sound.play()
self.x += self.dx * dt
self.y += self.dy * dt
self.x = min(max(self.x, 0), window.width - self.width)
self.y = min(max(self.y, 0), window.height - self.height)
window = pyglet.window.Window(640, 480, caption='Noisy')
@window.event
def on_key_press(symbol, modifiers):
if symbol == key.SPACE:
balls.append(Ball())
elif symbol == key.BACKSPACE:
if balls:
del balls[-1]
elif symbol == key.ESCAPE:
window.has_exit = True
@window.event
def on_draw():
window.clear()
balls_batch.draw()
label.draw()
def update(dt):
for ball in balls:
ball.update(dt)
balls_batch = pyglet.graphics.Batch()
label = pyglet.text.Label('Press space to add a ball, backspace to remove',
font_size=14,
x=window.width // 2, y=10,
anchor_x='center')
balls = []
def main():
pyglet.clock.schedule_interval(update, 1/30.)
pyglet.app.run() | examples/pyglet/noisy/__init__.py | import os
import random
import sys
from pyglet.gl import *
import pyglet
from pyglet.window import key
pyglet.resource.path.insert(0, os.path.dirname(__file__))
pyglet.resource.reindex()
BALL_IMAGE = 'ball.png'
BALL_SOUND = 'ping.wav'
sound = pyglet.resource.media(BALL_SOUND, streaming=False)
class Ball(pyglet.sprite.Sprite):
ball_image = pyglet.resource.image(BALL_IMAGE)
width = ball_image.width
height = ball_image.height
def __init__(self):
x = random.random() * (window.width - self.width)
y = random.random() * (window.height - self.height)
super(Ball, self).__init__(self.ball_image, x, y, batch=balls_batch)
self.dx = (random.random() - 0.5) * 1000
self.dy = (random.random() - 0.5) * 1000
def update(self, dt):
if self.x <= 0 or self.x + self.width >= window.width:
self.dx *= -1
sound.play()
if self.y <= 0 or self.y + self.height >= window.height:
self.dy *= -1
sound.play()
self.x += self.dx * dt
self.y += self.dy * dt
self.x = min(max(self.x, 0), window.width - self.width)
self.y = min(max(self.y, 0), window.height - self.height)
window = pyglet.window.Window(640, 480, caption='Noisy')
@window.event
def on_key_press(symbol, modifiers):
if symbol == key.SPACE:
balls.append(Ball())
elif symbol == key.BACKSPACE:
if balls:
del balls[-1]
elif symbol == key.ESCAPE:
window.has_exit = True
@window.event
def on_draw():
window.clear()
balls_batch.draw()
label.draw()
def update(dt):
for ball in balls:
ball.update(dt)
balls_batch = pyglet.graphics.Batch()
label = pyglet.text.Label('Press space to add a ball, backspace to remove',
font_size=14,
x=window.width // 2, y=10,
anchor_x='center')
balls = []
def main():
pyglet.clock.schedule_interval(update, 1/30.)
pyglet.app.run() | 0.216094 | 0.207536 |
"""Pools managing shared Session objects."""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
import datetime
from six.moves import queue
from google.cloud.exceptions import NotFound
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
_NOW = datetime.datetime.utcnow # unit tests may replace
class AbstractSessionPool(object):
"""Specifies required API for concrete session pool implementations.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
_database = None
def __init__(self, labels=None):
if labels is None:
labels = {}
self._labels = labels
@property
def labels(self):
"""User-assigned labels for sesions created by the pool.
:rtype: dict (str -> str)
:returns: labels assigned by the user
"""
return self._labels
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
Concrete implementations of this method may pre-fill the pool
using the database.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def get(self):
"""Check a session out from the pool.
Concrete implementations of this method are allowed to raise an
error to signal that the pool is exhausted, or to block until a
session is available.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def put(self, session):
"""Return a session to the pool.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
Concrete implementations of this method are allowed to raise an
error to signal that the pool is full, or to block until it is
not full.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def clear(self):
"""Delete all sessions in the pool.
Concrete implementations of this method are allowed to raise an
error to signal that the pool is full, or to block until it is
not full.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def _new_session(self):
"""Helper for concrete methods creating session instances.
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: new session instance.
"""
if self.labels:
return self._database.session(labels=self.labels)
return self._database.session()
def session(self, **kwargs):
"""Check out a session from the pool.
:param kwargs: (optional) keyword arguments, passed through to
the returned checkout.
:rtype: :class:`~google.cloud.spanner_v1.session.SessionCheckout`
:returns: a checkout instance, to be used as a context manager for
accessing the session and returning it to the pool.
"""
return SessionCheckout(self, **kwargs)
class FixedSizePool(AbstractSessionPool):
"""Concrete session pool implementation:
- Pre-allocates / creates a fixed number of sessions.
- "Pings" existing sessions via :meth:`session.exists` before returning
them, and replaces expired sessions.
- Blocks, with a timeout, when :meth:`get` is called on an empty pool.
Raises after timing out.
- Raises when :meth:`put` is called on a full pool. That error is
never expected in normal practice, as users should be calling
:meth:`get` followed by :meth:`put` whenever in need of a session.
:type size: int
:param size: fixed pool size
:type default_timeout: int
:param default_timeout: default timeout, in seconds, to wait for
a returned session.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
DEFAULT_SIZE = 10
DEFAULT_TIMEOUT = 10
def __init__(self, size=DEFAULT_SIZE, default_timeout=DEFAULT_TIMEOUT, labels=None):
super(FixedSizePool, self).__init__(labels=labels)
self.size = size
self.default_timeout = default_timeout
self._sessions = queue.LifoQueue(size)
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
while not self._sessions.full():
resp = api.batch_create_sessions(
database=database.name,
session_count=self.size - self._sessions.qsize(),
metadata=metadata,
)
for session_pb in resp.session:
session = self._new_session()
session._session_id = session_pb.name.split("/")[-1]
self._sessions.put(session)
def get(self, timeout=None): # pylint: disable=arguments-differ
"""Check a session out from the pool.
:type timeout: int
:param timeout: seconds to block waiting for an available session
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: an existing session from the pool, or a newly-created
session.
:raises: :exc:`six.moves.queue.Empty` if the queue is empty.
"""
if timeout is None:
timeout = self.default_timeout
session = self._sessions.get(block=True, timeout=timeout)
if not session.exists():
session = self._database.session()
session.create()
return session
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full.
"""
self._sessions.put_nowait(session)
def clear(self):
"""Delete all sessions in the pool."""
while True:
try:
session = self._sessions.get(block=False)
except queue.Empty:
break
else:
session.delete()
class BurstyPool(AbstractSessionPool):
"""Concrete session pool implementation:
- "Pings" existing sessions via :meth:`session.exists` before returning
them.
- Creates a new session, rather than blocking, when :meth:`get` is called
on an empty pool.
- Discards the returned session, rather than blocking, when :meth:`put`
is called on a full pool.
:type target_size: int
:param target_size: max pool size
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
def __init__(self, target_size=10, labels=None):
super(BurstyPool, self).__init__(labels=labels)
self.target_size = target_size
self._database = None
self._sessions = queue.LifoQueue(target_size)
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
def get(self):
"""Check a session out from the pool.
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: an existing session from the pool, or a newly-created
session.
"""
try:
session = self._sessions.get_nowait()
except queue.Empty:
session = self._new_session()
session.create()
else:
if not session.exists():
session = self._new_session()
session.create()
return session
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, the returned session is
discarded.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
"""
try:
self._sessions.put_nowait(session)
except queue.Full:
try:
session.delete()
except NotFound:
pass
def clear(self):
"""Delete all sessions in the pool."""
while True:
try:
session = self._sessions.get(block=False)
except queue.Empty:
break
else:
session.delete()
class PingingPool(AbstractSessionPool):
"""Concrete session pool implementation:
- Pre-allocates / creates a fixed number of sessions.
- Sessions are used in "round-robin" order (LRU first).
- "Pings" existing sessions in the background after a specified interval
via an API call (``session.ping()``).
- Blocks, with a timeout, when :meth:`get` is called on an empty pool.
Raises after timing out.
- Raises when :meth:`put` is called on a full pool. That error is
never expected in normal practice, as users should be calling
:meth:`get` followed by :meth:`put` whenever in need of a session.
The application is responsible for calling :meth:`ping` at appropriate
times, e.g. from a background thread.
:type size: int
:param size: fixed pool size
:type default_timeout: int
:param default_timeout: default timeout, in seconds, to wait for
a returned session.
:type ping_interval: int
:param ping_interval: interval at which to ping sessions.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
def __init__(self, size=10, default_timeout=10, ping_interval=3000, labels=None):
super(PingingPool, self).__init__(labels=labels)
self.size = size
self.default_timeout = default_timeout
self._delta = datetime.timedelta(seconds=ping_interval)
self._sessions = queue.PriorityQueue(size)
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
created_session_count = 0
while created_session_count < self.size:
resp = api.batch_create_sessions(
database=database.name,
session_count=self.size - created_session_count,
metadata=metadata,
)
for session_pb in resp.session:
session = self._new_session()
session._session_id = session_pb.name.split("/")[-1]
self.put(session)
created_session_count += len(resp.session)
def get(self, timeout=None): # pylint: disable=arguments-differ
"""Check a session out from the pool.
:type timeout: int
:param timeout: seconds to block waiting for an available session
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: an existing session from the pool, or a newly-created
session.
:raises: :exc:`six.moves.queue.Empty` if the queue is empty.
"""
if timeout is None:
timeout = self.default_timeout
ping_after, session = self._sessions.get(block=True, timeout=timeout)
if _NOW() > ping_after:
# Using session.exists() guarantees the returned session exists.
# session.ping() uses a cached result in the backend which could
# result in a recently deleted session being returned.
if not session.exists():
session = self._new_session()
session.create()
return session
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full.
"""
self._sessions.put_nowait((_NOW() + self._delta, session))
def clear(self):
"""Delete all sessions in the pool."""
while True:
try:
_, session = self._sessions.get(block=False)
except queue.Empty:
break
else:
session.delete()
def ping(self):
"""Refresh maybe-expired sessions in the pool.
This method is designed to be called from a background thread,
or during the "idle" phase of an event loop.
"""
while True:
try:
ping_after, session = self._sessions.get(block=False)
except queue.Empty: # all sessions in use
break
if ping_after > _NOW(): # oldest session is fresh
# Re-add to queue with existing expiration
self._sessions.put((ping_after, session))
break
try:
session.ping()
except NotFound:
session = self._new_session()
session.create()
# Re-add to queue with new expiration
self.put(session)
class TransactionPingingPool(PingingPool):
"""Concrete session pool implementation:
In addition to the features of :class:`PingingPool`, this class
creates and begins a transaction for each of its sessions at startup.
When a session is returned to the pool, if its transaction has been
committed or rolled back, the pool creates a new transaction for the
session and pushes the transaction onto a separate queue of "transactions
to begin." The application is responsible for flushing this queue
as appropriate via the pool's :meth:`begin_pending_transactions` method.
:type size: int
:param size: fixed pool size
:type default_timeout: int
:param default_timeout: default timeout, in seconds, to wait for
a returned session.
:type ping_interval: int
:param ping_interval: interval at which to ping sessions.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
def __init__(self, size=10, default_timeout=10, ping_interval=3000, labels=None):
self._pending_sessions = queue.Queue()
super(TransactionPingingPool, self).__init__(
size, default_timeout, ping_interval, labels=labels
)
self.begin_pending_transactions()
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
super(TransactionPingingPool, self).bind(database)
self.begin_pending_transactions()
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full.
"""
if self._sessions.full():
raise queue.Full
txn = session._transaction
if txn is None or txn.committed or txn.rolled_back:
session.transaction()
self._pending_sessions.put(session)
else:
super(TransactionPingingPool, self).put(session)
def begin_pending_transactions(self):
"""Begin all transactions for sessions added to the pool."""
while not self._pending_sessions.empty():
session = self._pending_sessions.get()
session._transaction.begin()
super(TransactionPingingPool, self).put(session)
class SessionCheckout(object):
"""Context manager: hold session checked out from a pool.
:type pool: concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`
:param pool: Pool from which to check out a session.
:param kwargs: extra keyword arguments to be passed to :meth:`pool.get`.
"""
_session = None # Not checked out until '__enter__'.
def __init__(self, pool, **kwargs):
self._pool = pool
self._kwargs = kwargs.copy()
def __enter__(self):
self._session = self._pool.get(**self._kwargs)
return self._session
def __exit__(self, *ignored):
self._pool.put(self._session) | google/cloud/spanner_v1/pool.py |
"""Pools managing shared Session objects."""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
import datetime
from six.moves import queue
from google.cloud.exceptions import NotFound
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
_NOW = datetime.datetime.utcnow # unit tests may replace
class AbstractSessionPool(object):
"""Specifies required API for concrete session pool implementations.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
_database = None
def __init__(self, labels=None):
if labels is None:
labels = {}
self._labels = labels
@property
def labels(self):
"""User-assigned labels for sesions created by the pool.
:rtype: dict (str -> str)
:returns: labels assigned by the user
"""
return self._labels
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
Concrete implementations of this method may pre-fill the pool
using the database.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def get(self):
"""Check a session out from the pool.
Concrete implementations of this method are allowed to raise an
error to signal that the pool is exhausted, or to block until a
session is available.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def put(self, session):
"""Return a session to the pool.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
Concrete implementations of this method are allowed to raise an
error to signal that the pool is full, or to block until it is
not full.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def clear(self):
"""Delete all sessions in the pool.
Concrete implementations of this method are allowed to raise an
error to signal that the pool is full, or to block until it is
not full.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def _new_session(self):
"""Helper for concrete methods creating session instances.
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: new session instance.
"""
if self.labels:
return self._database.session(labels=self.labels)
return self._database.session()
def session(self, **kwargs):
"""Check out a session from the pool.
:param kwargs: (optional) keyword arguments, passed through to
the returned checkout.
:rtype: :class:`~google.cloud.spanner_v1.session.SessionCheckout`
:returns: a checkout instance, to be used as a context manager for
accessing the session and returning it to the pool.
"""
return SessionCheckout(self, **kwargs)
class FixedSizePool(AbstractSessionPool):
"""Concrete session pool implementation:
- Pre-allocates / creates a fixed number of sessions.
- "Pings" existing sessions via :meth:`session.exists` before returning
them, and replaces expired sessions.
- Blocks, with a timeout, when :meth:`get` is called on an empty pool.
Raises after timing out.
- Raises when :meth:`put` is called on a full pool. That error is
never expected in normal practice, as users should be calling
:meth:`get` followed by :meth:`put` whenever in need of a session.
:type size: int
:param size: fixed pool size
:type default_timeout: int
:param default_timeout: default timeout, in seconds, to wait for
a returned session.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
DEFAULT_SIZE = 10
DEFAULT_TIMEOUT = 10
def __init__(self, size=DEFAULT_SIZE, default_timeout=DEFAULT_TIMEOUT, labels=None):
super(FixedSizePool, self).__init__(labels=labels)
self.size = size
self.default_timeout = default_timeout
self._sessions = queue.LifoQueue(size)
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
while not self._sessions.full():
resp = api.batch_create_sessions(
database=database.name,
session_count=self.size - self._sessions.qsize(),
metadata=metadata,
)
for session_pb in resp.session:
session = self._new_session()
session._session_id = session_pb.name.split("/")[-1]
self._sessions.put(session)
def get(self, timeout=None): # pylint: disable=arguments-differ
"""Check a session out from the pool.
:type timeout: int
:param timeout: seconds to block waiting for an available session
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: an existing session from the pool, or a newly-created
session.
:raises: :exc:`six.moves.queue.Empty` if the queue is empty.
"""
if timeout is None:
timeout = self.default_timeout
session = self._sessions.get(block=True, timeout=timeout)
if not session.exists():
session = self._database.session()
session.create()
return session
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full.
"""
self._sessions.put_nowait(session)
def clear(self):
"""Delete all sessions in the pool."""
while True:
try:
session = self._sessions.get(block=False)
except queue.Empty:
break
else:
session.delete()
class BurstyPool(AbstractSessionPool):
"""Concrete session pool implementation:
- "Pings" existing sessions via :meth:`session.exists` before returning
them.
- Creates a new session, rather than blocking, when :meth:`get` is called
on an empty pool.
- Discards the returned session, rather than blocking, when :meth:`put`
is called on a full pool.
:type target_size: int
:param target_size: max pool size
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
def __init__(self, target_size=10, labels=None):
super(BurstyPool, self).__init__(labels=labels)
self.target_size = target_size
self._database = None
self._sessions = queue.LifoQueue(target_size)
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
def get(self):
"""Check a session out from the pool.
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: an existing session from the pool, or a newly-created
session.
"""
try:
session = self._sessions.get_nowait()
except queue.Empty:
session = self._new_session()
session.create()
else:
if not session.exists():
session = self._new_session()
session.create()
return session
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, the returned session is
discarded.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
"""
try:
self._sessions.put_nowait(session)
except queue.Full:
try:
session.delete()
except NotFound:
pass
def clear(self):
"""Delete all sessions in the pool."""
while True:
try:
session = self._sessions.get(block=False)
except queue.Empty:
break
else:
session.delete()
class PingingPool(AbstractSessionPool):
"""Concrete session pool implementation:
- Pre-allocates / creates a fixed number of sessions.
- Sessions are used in "round-robin" order (LRU first).
- "Pings" existing sessions in the background after a specified interval
via an API call (``session.ping()``).
- Blocks, with a timeout, when :meth:`get` is called on an empty pool.
Raises after timing out.
- Raises when :meth:`put` is called on a full pool. That error is
never expected in normal practice, as users should be calling
:meth:`get` followed by :meth:`put` whenever in need of a session.
The application is responsible for calling :meth:`ping` at appropriate
times, e.g. from a background thread.
:type size: int
:param size: fixed pool size
:type default_timeout: int
:param default_timeout: default timeout, in seconds, to wait for
a returned session.
:type ping_interval: int
:param ping_interval: interval at which to ping sessions.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
def __init__(self, size=10, default_timeout=10, ping_interval=3000, labels=None):
super(PingingPool, self).__init__(labels=labels)
self.size = size
self.default_timeout = default_timeout
self._delta = datetime.timedelta(seconds=ping_interval)
self._sessions = queue.PriorityQueue(size)
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
created_session_count = 0
while created_session_count < self.size:
resp = api.batch_create_sessions(
database=database.name,
session_count=self.size - created_session_count,
metadata=metadata,
)
for session_pb in resp.session:
session = self._new_session()
session._session_id = session_pb.name.split("/")[-1]
self.put(session)
created_session_count += len(resp.session)
def get(self, timeout=None): # pylint: disable=arguments-differ
"""Check a session out from the pool.
:type timeout: int
:param timeout: seconds to block waiting for an available session
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: an existing session from the pool, or a newly-created
session.
:raises: :exc:`six.moves.queue.Empty` if the queue is empty.
"""
if timeout is None:
timeout = self.default_timeout
ping_after, session = self._sessions.get(block=True, timeout=timeout)
if _NOW() > ping_after:
# Using session.exists() guarantees the returned session exists.
# session.ping() uses a cached result in the backend which could
# result in a recently deleted session being returned.
if not session.exists():
session = self._new_session()
session.create()
return session
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full.
"""
self._sessions.put_nowait((_NOW() + self._delta, session))
def clear(self):
"""Delete all sessions in the pool."""
while True:
try:
_, session = self._sessions.get(block=False)
except queue.Empty:
break
else:
session.delete()
def ping(self):
"""Refresh maybe-expired sessions in the pool.
This method is designed to be called from a background thread,
or during the "idle" phase of an event loop.
"""
while True:
try:
ping_after, session = self._sessions.get(block=False)
except queue.Empty: # all sessions in use
break
if ping_after > _NOW(): # oldest session is fresh
# Re-add to queue with existing expiration
self._sessions.put((ping_after, session))
break
try:
session.ping()
except NotFound:
session = self._new_session()
session.create()
# Re-add to queue with new expiration
self.put(session)
class TransactionPingingPool(PingingPool):
"""Concrete session pool implementation:
In addition to the features of :class:`PingingPool`, this class
creates and begins a transaction for each of its sessions at startup.
When a session is returned to the pool, if its transaction has been
committed or rolled back, the pool creates a new transaction for the
session and pushes the transaction onto a separate queue of "transactions
to begin." The application is responsible for flushing this queue
as appropriate via the pool's :meth:`begin_pending_transactions` method.
:type size: int
:param size: fixed pool size
:type default_timeout: int
:param default_timeout: default timeout, in seconds, to wait for
a returned session.
:type ping_interval: int
:param ping_interval: interval at which to ping sessions.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
def __init__(self, size=10, default_timeout=10, ping_interval=3000, labels=None):
self._pending_sessions = queue.Queue()
super(TransactionPingingPool, self).__init__(
size, default_timeout, ping_interval, labels=labels
)
self.begin_pending_transactions()
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
super(TransactionPingingPool, self).bind(database)
self.begin_pending_transactions()
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full.
"""
if self._sessions.full():
raise queue.Full
txn = session._transaction
if txn is None or txn.committed or txn.rolled_back:
session.transaction()
self._pending_sessions.put(session)
else:
super(TransactionPingingPool, self).put(session)
def begin_pending_transactions(self):
"""Begin all transactions for sessions added to the pool."""
while not self._pending_sessions.empty():
session = self._pending_sessions.get()
session._transaction.begin()
super(TransactionPingingPool, self).put(session)
class SessionCheckout(object):
"""Context manager: hold session checked out from a pool.
:type pool: concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`
:param pool: Pool from which to check out a session.
:param kwargs: extra keyword arguments to be passed to :meth:`pool.get`.
"""
_session = None # Not checked out until '__enter__'.
def __init__(self, pool, **kwargs):
self._pool = pool
self._kwargs = kwargs.copy()
def __enter__(self):
self._session = self._pool.get(**self._kwargs)
return self._session
def __exit__(self, *ignored):
self._pool.put(self._session) | 0.907732 | 0.366533 |
from __future__ import print_function
import os
from fabric.api import abort, env, run, settings, put
from braid import postgres, cron, archive, utils
from braid.twisted import service
from braid.utils import confirm
from braid import config
from braid.tasks import addTasks
__all__ = ['config']
class Trac(service.Service):
python = "python"
def task_install(self):
"""
Install trac.
"""
self.bootstrap()
with settings(user=self.serviceUser):
self.update()
run('/bin/mkdir -p ~/attachments')
run('/bin/ln -nsf ~/attachments {}/trac-env/files/attachments'.format(
self.configDir))
run('/bin/ln -nsf {} {}/trac-env/log'.format(self.logDir, self.configDir))
run('/bin/ln -nsf {}/start {}/start'.format(self.configDir, self.binDir))
cron.install(self.serviceUser, '{}/crontab'.format(self.configDir))
# FIXME: Make these idempotent.
postgres.createUser('trac')
postgres.createDb('trac', 'trac')
def update(self):
"""
Remove the current trac installation and reinstalls it.
"""
with settings(user=self.serviceUser):
self.venv.create()
self.venv.install_twisted()
self.venv.install(" ".join("""
psycopg2==2.7.5
pygments==2.2.0
spambayes==1.1b3
trac==1.2.2
trac-github==2.3
requests_oauthlib==1.0.0
svn+https://svn.edgewall.org/repos/trac/plugins/1.2/spam-filter@15310
git+https://github.com/twisted-infra/twisted-trac-plugins.git
""".split()))
run('mkdir -p ' + self.configDir)
put(os.path.dirname(__file__) + '/*', self.configDir,
mirror_local_mode=True)
def task_update(self):
"""
Stop, remove the current Trac installation, reinstalls it and start.
"""
try:
self.task_stop()
except:
pass
self.update()
self.task_start()
def task_upgrade(self):
"""
Remove the existing installation, re-install it and run a Trac
upgrade.
"""
with settings(user=self.serviceUser):
self.update()
run("~/virtualenv/bin/trac-admin {}/trac-env upgrade".format(self.configDir))
run("~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade".format(self.configDir))
self.task_restart()
def task_getGithubMirror(self, twistedName='twisted-staging'):
"""
Get a GitHub mirror.
"""
with settings(user=self.serviceUser):
run("git clone --mirror git://github.com/twisted/%s.git ~/twisted.git" % (twistedName,),
warn_only=True)
run("git --git-dir=/srv/trac/twisted.git remote update --prune")
def task_dump(self, localfile, withAttachments=True):
"""
Create a tarball containing all information not currently stored in
version control and download it to the given C{localfile}.
"""
with settings(user=self.serviceUser):
with utils.tempfile() as temp:
postgres.dumpToPath('trac', temp)
files = {
'db.dump': temp,
}
if withAttachments is True:
files['attachments'] = 'attachments'
archive.dump(files, localfile)
def task_restore(self, localfile, restoreDb=True, withAttachments=True):
"""
Restore all information not stored in version control from a tarball
on the invoking users machine.
"""
restoreDb = str(restoreDb).lower() in ('true', '1', 'yes', 'ok', 'y')
if restoreDb:
msg = (
'All existing files present in the backup will be overwritten and\n'
'the database dropped and recreated.'
)
else:
msg = (
'All existing files present in the backup will be overwritten\n'
'(the database will not be touched).'
)
print('')
if confirm(msg):
# TODO: Ask for confirmation here
if restoreDb:
postgres.dropDb('trac')
postgres.createDb('trac', 'trac')
with settings(user=self.serviceUser):
with utils.tempfile() as temp:
files = {
'db.dump': temp,
}
if withAttachments is True:
files['attachments'] = 'attachments'
archive.restore(files, localfile)
if restoreDb:
postgres.restoreFromPath('trac', temp)
def task_installTestData(self):
"""
Create an empty trac database for testing.
"""
if env.get('environment') == 'production':
abort("Don't use installTestData in production.")
if postgres.tableExists('trac', 'system'):
abort("Existing Trac tables found.")
with settings(user=self.serviceUser):
# Run trac initenv to create the postgresql database tables, but use
# a throwaway trac-env directory because that comes from
# https://github.com/twisted-infra/trac-config/tree/master/trac-env
try:
run('~/virtualenv/bin/trac-admin '
'/tmp/trac-init initenv TempTrac postgres://@/trac git ""')
finally:
run("rm -rf /tmp/trac-init")
# Run an upgrade to add plugin specific database tables and columns.
run('~/virtualenv/bin/trac-admin config/trac-env upgrade --no-backup')
addTasks(globals(), Trac('trac').getTasks()) | services/trac/fabfile.py | from __future__ import print_function
import os
from fabric.api import abort, env, run, settings, put
from braid import postgres, cron, archive, utils
from braid.twisted import service
from braid.utils import confirm
from braid import config
from braid.tasks import addTasks
__all__ = ['config']
class Trac(service.Service):
python = "python"
def task_install(self):
"""
Install trac.
"""
self.bootstrap()
with settings(user=self.serviceUser):
self.update()
run('/bin/mkdir -p ~/attachments')
run('/bin/ln -nsf ~/attachments {}/trac-env/files/attachments'.format(
self.configDir))
run('/bin/ln -nsf {} {}/trac-env/log'.format(self.logDir, self.configDir))
run('/bin/ln -nsf {}/start {}/start'.format(self.configDir, self.binDir))
cron.install(self.serviceUser, '{}/crontab'.format(self.configDir))
# FIXME: Make these idempotent.
postgres.createUser('trac')
postgres.createDb('trac', 'trac')
def update(self):
"""
Remove the current trac installation and reinstalls it.
"""
with settings(user=self.serviceUser):
self.venv.create()
self.venv.install_twisted()
self.venv.install(" ".join("""
psycopg2==2.7.5
pygments==2.2.0
spambayes==1.1b3
trac==1.2.2
trac-github==2.3
requests_oauthlib==1.0.0
svn+https://svn.edgewall.org/repos/trac/plugins/1.2/spam-filter@15310
git+https://github.com/twisted-infra/twisted-trac-plugins.git
""".split()))
run('mkdir -p ' + self.configDir)
put(os.path.dirname(__file__) + '/*', self.configDir,
mirror_local_mode=True)
def task_update(self):
"""
Stop, remove the current Trac installation, reinstalls it and start.
"""
try:
self.task_stop()
except:
pass
self.update()
self.task_start()
def task_upgrade(self):
"""
Remove the existing installation, re-install it and run a Trac
upgrade.
"""
with settings(user=self.serviceUser):
self.update()
run("~/virtualenv/bin/trac-admin {}/trac-env upgrade".format(self.configDir))
run("~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade".format(self.configDir))
self.task_restart()
def task_getGithubMirror(self, twistedName='twisted-staging'):
"""
Get a GitHub mirror.
"""
with settings(user=self.serviceUser):
run("git clone --mirror git://github.com/twisted/%s.git ~/twisted.git" % (twistedName,),
warn_only=True)
run("git --git-dir=/srv/trac/twisted.git remote update --prune")
def task_dump(self, localfile, withAttachments=True):
"""
Create a tarball containing all information not currently stored in
version control and download it to the given C{localfile}.
"""
with settings(user=self.serviceUser):
with utils.tempfile() as temp:
postgres.dumpToPath('trac', temp)
files = {
'db.dump': temp,
}
if withAttachments is True:
files['attachments'] = 'attachments'
archive.dump(files, localfile)
def task_restore(self, localfile, restoreDb=True, withAttachments=True):
"""
Restore all information not stored in version control from a tarball
on the invoking users machine.
"""
restoreDb = str(restoreDb).lower() in ('true', '1', 'yes', 'ok', 'y')
if restoreDb:
msg = (
'All existing files present in the backup will be overwritten and\n'
'the database dropped and recreated.'
)
else:
msg = (
'All existing files present in the backup will be overwritten\n'
'(the database will not be touched).'
)
print('')
if confirm(msg):
# TODO: Ask for confirmation here
if restoreDb:
postgres.dropDb('trac')
postgres.createDb('trac', 'trac')
with settings(user=self.serviceUser):
with utils.tempfile() as temp:
files = {
'db.dump': temp,
}
if withAttachments is True:
files['attachments'] = 'attachments'
archive.restore(files, localfile)
if restoreDb:
postgres.restoreFromPath('trac', temp)
def task_installTestData(self):
"""
Create an empty trac database for testing.
"""
if env.get('environment') == 'production':
abort("Don't use installTestData in production.")
if postgres.tableExists('trac', 'system'):
abort("Existing Trac tables found.")
with settings(user=self.serviceUser):
# Run trac initenv to create the postgresql database tables, but use
# a throwaway trac-env directory because that comes from
# https://github.com/twisted-infra/trac-config/tree/master/trac-env
try:
run('~/virtualenv/bin/trac-admin '
'/tmp/trac-init initenv TempTrac postgres://@/trac git ""')
finally:
run("rm -rf /tmp/trac-init")
# Run an upgrade to add plugin specific database tables and columns.
run('~/virtualenv/bin/trac-admin config/trac-env upgrade --no-backup')
addTasks(globals(), Trac('trac').getTasks()) | 0.188063 | 0.068694 |
import os
from typing import Union
from tests.warehouse_profile import WarehouseProfile
from wr_profiles import EnvvarProfile
def test_create_environment_with_and_without_activation():
wp: Union[WarehouseProfile, EnvvarProfile] = WarehouseProfile(name='for_this_test')
original_values = wp.to_dict()
env_with_activation = wp.create_env(username='example.username', password=<PASSWORD>)
assert env_with_activation == {
'WAREHOUSE_PROFILE': 'for_this_test',
'WAREHOUSE_FOR_THIS_TEST_HOST': 'localhost',
'WAREHOUSE_FOR_THIS_TEST_USERNAME': 'example.username',
'WAREHOUSE_FOR_THIS_TEST_PASSWORD': None,
}
env_without_activation = wp.create_env(username='example.username', password=<PASSWORD>, include_activation=False)
assert env_without_activation == {
'WAREHOUSE_FOR_THIS_TEST_HOST': 'localhost',
'WAREHOUSE_FOR_THIS_TEST_USERNAME': 'example.username',
'WAREHOUSE_FOR_THIS_TEST_PASSWORD': <PASSWORD>,
}
# The profile remains unchanged
assert original_values == wp.to_dict()
def test_environment_content_is_determined_at_creation_time():
wp: Union[WarehouseProfile, EnvvarProfile] = WarehouseProfile(name='creation')
first = wp.create_env(username='first_username')
second = wp.create_env(password='<PASSWORD>')
assert first['WAREHOUSE_CREATION_USERNAME'] == 'first_username'
assert second['WAREHOUSE_CREATION_USERNAME'] is None
assert first['WAREHOUSE_CREATION_PASSWORD'] is None
assert second['WAREHOUSE_CREATION_PASSWORD'] == '<PASSWORD>'
def test_environment_applied(monkeypatch):
wp: Union[WarehouseProfile, EnvvarProfile] = WarehouseProfile(name='env_test')
outer_env = wp.create_env(username='outer_username', password=None)
# Environment content is determined at the time of the creation.
inner_env = wp.create_env(password='<PASSWORD>')
assert wp.host == 'localhost'
assert wp.username is None
assert wp.password is None
with outer_env.applied(monkeypatch):
assert os.environ['WAREHOUSE_ENV_TEST_HOST'] == 'localhost'
assert os.environ['WAREHOUSE_ENV_TEST_USERNAME'] == 'outer_username'
assert 'WAREHOUSE_ENV_TEST_PASSWORD' not in os.environ
assert wp.host == 'localhost'
assert wp.username == 'outer_username'
assert wp.password is None
with inner_env.applied(monkeypatch):
assert os.environ['WAREHOUSE_ENV_TEST_HOST'] == 'localhost'
assert 'WAREHOUSE_ENV_TEST_USERNAME' not in os.environ
assert os.environ['WAREHOUSE_ENV_TEST_PASSWORD'] == '<PASSWORD>'
assert wp.host == 'localhost'
assert wp.username is None
assert wp.password == '<PASSWORD>'
assert wp.host == 'localhost'
assert wp.username == 'outer_username'
assert wp.password is None
assert wp.host == 'localhost'
assert wp.username is None
assert wp.password is None | tests/test_environment.py | import os
from typing import Union
from tests.warehouse_profile import WarehouseProfile
from wr_profiles import EnvvarProfile
def test_create_environment_with_and_without_activation():
wp: Union[WarehouseProfile, EnvvarProfile] = WarehouseProfile(name='for_this_test')
original_values = wp.to_dict()
env_with_activation = wp.create_env(username='example.username', password=<PASSWORD>)
assert env_with_activation == {
'WAREHOUSE_PROFILE': 'for_this_test',
'WAREHOUSE_FOR_THIS_TEST_HOST': 'localhost',
'WAREHOUSE_FOR_THIS_TEST_USERNAME': 'example.username',
'WAREHOUSE_FOR_THIS_TEST_PASSWORD': None,
}
env_without_activation = wp.create_env(username='example.username', password=<PASSWORD>, include_activation=False)
assert env_without_activation == {
'WAREHOUSE_FOR_THIS_TEST_HOST': 'localhost',
'WAREHOUSE_FOR_THIS_TEST_USERNAME': 'example.username',
'WAREHOUSE_FOR_THIS_TEST_PASSWORD': <PASSWORD>,
}
# The profile remains unchanged
assert original_values == wp.to_dict()
def test_environment_content_is_determined_at_creation_time():
wp: Union[WarehouseProfile, EnvvarProfile] = WarehouseProfile(name='creation')
first = wp.create_env(username='first_username')
second = wp.create_env(password='<PASSWORD>')
assert first['WAREHOUSE_CREATION_USERNAME'] == 'first_username'
assert second['WAREHOUSE_CREATION_USERNAME'] is None
assert first['WAREHOUSE_CREATION_PASSWORD'] is None
assert second['WAREHOUSE_CREATION_PASSWORD'] == '<PASSWORD>'
def test_environment_applied(monkeypatch):
wp: Union[WarehouseProfile, EnvvarProfile] = WarehouseProfile(name='env_test')
outer_env = wp.create_env(username='outer_username', password=None)
# Environment content is determined at the time of the creation.
inner_env = wp.create_env(password='<PASSWORD>')
assert wp.host == 'localhost'
assert wp.username is None
assert wp.password is None
with outer_env.applied(monkeypatch):
assert os.environ['WAREHOUSE_ENV_TEST_HOST'] == 'localhost'
assert os.environ['WAREHOUSE_ENV_TEST_USERNAME'] == 'outer_username'
assert 'WAREHOUSE_ENV_TEST_PASSWORD' not in os.environ
assert wp.host == 'localhost'
assert wp.username == 'outer_username'
assert wp.password is None
with inner_env.applied(monkeypatch):
assert os.environ['WAREHOUSE_ENV_TEST_HOST'] == 'localhost'
assert 'WAREHOUSE_ENV_TEST_USERNAME' not in os.environ
assert os.environ['WAREHOUSE_ENV_TEST_PASSWORD'] == '<PASSWORD>'
assert wp.host == 'localhost'
assert wp.username is None
assert wp.password == '<PASSWORD>'
assert wp.host == 'localhost'
assert wp.username == 'outer_username'
assert wp.password is None
assert wp.host == 'localhost'
assert wp.username is None
assert wp.password is None | 0.617513 | 0.359252 |
__all__ = ['letv_download', 'letvcloud_download', 'letvcloud_download_by_vu']
import json
import random
import xml.etree.ElementTree as ET
import base64, hashlib, urllib, time, re
from ..common import *
#@DEPRECATED
def get_timestamp():
tn = random.random()
url = 'http://api.letv.com/time?tn={}'.format(tn)
result = get_content(url)
return json.loads(result)['stime']
#@DEPRECATED
def get_key(t):
for s in range(0, 8):
e = 1 & t
t >>= 1
e <<= 31
t += e
return t ^ 185025305
def calcTimeKey(t):
ror = lambda val, r_bits, : ((val & (2**32-1)) >> r_bits%32) | (val << (32-(r_bits%32)) & (2**32-1))
return ror(ror(t,773625421%13)^773625421,773625421%17)
def decode(data):
version = data[0:5]
if version.lower() == b'vc_01':
#get real m3u8
loc2 = data[5:]
length = len(loc2)
loc4 = [0]*(2*length)
for i in range(length):
loc4[2*i] = loc2[i] >> 4
loc4[2*i+1]= loc2[i] & 15;
loc6 = loc4[len(loc4)-11:]+loc4[:len(loc4)-11]
loc7 = [0]*length
for i in range(length):
loc7[i] = (loc6[2 * i] << 4) +loc6[2*i+1]
return ''.join([chr(i) for i in loc7])
else:
# directly return
return data
def video_info(vid,**kwargs):
url = 'http://api.letv.com/mms/out/video/playJson?id={}&platid=1&splatid=101&format=1&tkey={}&domain=www.letv.com'.format(vid,calcTimeKey(int(time.time())))
r = get_content(url, decoded=False)
info=json.loads(str(r,"utf-8"))
stream_id = None
support_stream_id = info["playurl"]["dispatch"].keys()
if "stream_id" in kwargs and kwargs["stream_id"].lower() in support_stream_id:
stream_id = kwargs["stream_id"]
else:
print("Current Video Supports:")
for i in support_stream_id:
print("\t--format",i,"<URL>")
if "1080p" in support_stream_id:
stream_id = '1080p'
elif "720p" in support_stream_id:
stream_id = '720p'
else:
stream_id =sorted(support_stream_id,key= lambda i: int(i[1:]))[-1]
url =info["playurl"]["domain"][0]+info["playurl"]["dispatch"][stream_id][0]
ext = info["playurl"]["dispatch"][stream_id][1].split('.')[-1]
url+="&ctv=pc&m3v=1&termid=1&format=1&hwtype=un&ostype=Linux&tag=letv&sign=letv&expect=3&tn={}&pay=0&iscpn=f9051&rateid={}".format(random.random(),stream_id)
r2=get_content(url,decoded=False)
info2=json.loads(str(r2,"utf-8"))
# hold on ! more things to do
# to decode m3u8 (encoded)
m3u8 = get_content(info2["location"],decoded=False)
m3u8_list = decode(m3u8)
urls = re.findall(r'^[^#][^\r]*',m3u8_list,re.MULTILINE)
return ext,urls
def letv_download_by_vid(vid,title, output_dir='.', merge=True, info_only=False,**kwargs):
ext , urls = video_info(vid,**kwargs)
size = 0
for i in urls:
_, _, tmp = url_info(i)
size += tmp
print_info(site_info, title, ext, size)
if not info_only:
download_urls(urls, title, ext, size, output_dir=output_dir, merge=merge)
def letvcloud_download_by_vu(vu, uu, title=None, output_dir='.', merge=True, info_only=False):
#ran = float('0.' + str(random.randint(0, 9999999999999999))) # For ver 2.1
#str2Hash = 'cfflashformatjsonran{ran}uu{uu}ver2.2vu{vu}bie^#@(%27eib58'.format(vu = vu, uu = uu, ran = ran) #Magic!/ In ver 2.1
argumet_dict ={'cf' : 'flash', 'format': 'json', 'ran': str(int(time.time())), 'uu': str(uu),'ver': '2.2', 'vu': str(vu), }
sign_key = '2f9d6924b33a165a6d8b5d3d42f4f987' #ALL YOUR BASE ARE BELONG TO US
str2Hash = ''.join([i + argumet_dict[i] for i in sorted(argumet_dict)]) + sign_key
sign = hashlib.md5(str2Hash.encode('utf-8')).hexdigest()
request_info = urllib.request.Request('http://api.letvcloud.com/gpc.php?' + '&'.join([i + '=' + argumet_dict[i] for i in argumet_dict]) + '&sign={sign}'.format(sign = sign))
response = urllib.request.urlopen(request_info)
data = response.read()
info = json.loads(data.decode('utf-8'))
type_available = []
for video_type in info['data']['video_info']['media']:
type_available.append({'video_url': info['data']['video_info']['media'][video_type]['play_url']['main_url'], 'video_quality': int(info['data']['video_info']['media'][video_type]['play_url']['vtype'])})
urls = [base64.b64decode(sorted(type_available, key = lambda x:x['video_quality'])[-1]['video_url']).decode("utf-8")]
size = urls_size(urls)
ext = 'mp4'
print_info(site_info, title, ext, size)
if not info_only:
download_urls(urls, title, ext, size, output_dir=output_dir, merge=merge)
def letvcloud_download(url, output_dir='.', merge=True, info_only=False):
qs = parse.urlparse(url).query
vu = match1(qs, r'vu=([\w]+)')
uu = match1(qs, r'uu=([\w]+)')
title = "LETV-%s" % vu
letvcloud_download_by_vu(vu, uu, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
def letv_download(url, output_dir='.', merge=True, info_only=False ,**kwargs):
if re.match(r'http://yuntv.letv.com/', url):
letvcloud_download(url, output_dir=output_dir, merge=merge, info_only=info_only)
else:
html = get_content(url)
#to get title
if re.match(r'http://www.letv.com/ptv/vplay/(\d+).html', url):
vid = match1(url, r'http://www.letv.com/ptv/vplay/(\d+).html')
else:
vid = match1(html, r'vid="(\d+)"')
title = match1(html,r'name="irTitle" content="(.*?)"')
letv_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only,**kwargs)
site_info = "LeTV.com"
download = letv_download
download_playlist = playlist_not_supported('letv') | libraries/you-get/extractors/letv.py |
__all__ = ['letv_download', 'letvcloud_download', 'letvcloud_download_by_vu']
import json
import random
import xml.etree.ElementTree as ET
import base64, hashlib, urllib, time, re
from ..common import *
#@DEPRECATED
def get_timestamp():
tn = random.random()
url = 'http://api.letv.com/time?tn={}'.format(tn)
result = get_content(url)
return json.loads(result)['stime']
#@DEPRECATED
def get_key(t):
for s in range(0, 8):
e = 1 & t
t >>= 1
e <<= 31
t += e
return t ^ 185025305
def calcTimeKey(t):
ror = lambda val, r_bits, : ((val & (2**32-1)) >> r_bits%32) | (val << (32-(r_bits%32)) & (2**32-1))
return ror(ror(t,773625421%13)^773625421,773625421%17)
def decode(data):
version = data[0:5]
if version.lower() == b'vc_01':
#get real m3u8
loc2 = data[5:]
length = len(loc2)
loc4 = [0]*(2*length)
for i in range(length):
loc4[2*i] = loc2[i] >> 4
loc4[2*i+1]= loc2[i] & 15;
loc6 = loc4[len(loc4)-11:]+loc4[:len(loc4)-11]
loc7 = [0]*length
for i in range(length):
loc7[i] = (loc6[2 * i] << 4) +loc6[2*i+1]
return ''.join([chr(i) for i in loc7])
else:
# directly return
return data
def video_info(vid,**kwargs):
url = 'http://api.letv.com/mms/out/video/playJson?id={}&platid=1&splatid=101&format=1&tkey={}&domain=www.letv.com'.format(vid,calcTimeKey(int(time.time())))
r = get_content(url, decoded=False)
info=json.loads(str(r,"utf-8"))
stream_id = None
support_stream_id = info["playurl"]["dispatch"].keys()
if "stream_id" in kwargs and kwargs["stream_id"].lower() in support_stream_id:
stream_id = kwargs["stream_id"]
else:
print("Current Video Supports:")
for i in support_stream_id:
print("\t--format",i,"<URL>")
if "1080p" in support_stream_id:
stream_id = '1080p'
elif "720p" in support_stream_id:
stream_id = '720p'
else:
stream_id =sorted(support_stream_id,key= lambda i: int(i[1:]))[-1]
url =info["playurl"]["domain"][0]+info["playurl"]["dispatch"][stream_id][0]
ext = info["playurl"]["dispatch"][stream_id][1].split('.')[-1]
url+="&ctv=pc&m3v=1&termid=1&format=1&hwtype=un&ostype=Linux&tag=letv&sign=letv&expect=3&tn={}&pay=0&iscpn=f9051&rateid={}".format(random.random(),stream_id)
r2=get_content(url,decoded=False)
info2=json.loads(str(r2,"utf-8"))
# hold on ! more things to do
# to decode m3u8 (encoded)
m3u8 = get_content(info2["location"],decoded=False)
m3u8_list = decode(m3u8)
urls = re.findall(r'^[^#][^\r]*',m3u8_list,re.MULTILINE)
return ext,urls
def letv_download_by_vid(vid,title, output_dir='.', merge=True, info_only=False,**kwargs):
ext , urls = video_info(vid,**kwargs)
size = 0
for i in urls:
_, _, tmp = url_info(i)
size += tmp
print_info(site_info, title, ext, size)
if not info_only:
download_urls(urls, title, ext, size, output_dir=output_dir, merge=merge)
def letvcloud_download_by_vu(vu, uu, title=None, output_dir='.', merge=True, info_only=False):
#ran = float('0.' + str(random.randint(0, 9999999999999999))) # For ver 2.1
#str2Hash = 'cfflashformatjsonran{ran}uu{uu}ver2.2vu{vu}bie^#@(%27eib58'.format(vu = vu, uu = uu, ran = ran) #Magic!/ In ver 2.1
argumet_dict ={'cf' : 'flash', 'format': 'json', 'ran': str(int(time.time())), 'uu': str(uu),'ver': '2.2', 'vu': str(vu), }
sign_key = '2f9d6924b33a165a6d8b5d3d42f4f987' #ALL YOUR BASE ARE BELONG TO US
str2Hash = ''.join([i + argumet_dict[i] for i in sorted(argumet_dict)]) + sign_key
sign = hashlib.md5(str2Hash.encode('utf-8')).hexdigest()
request_info = urllib.request.Request('http://api.letvcloud.com/gpc.php?' + '&'.join([i + '=' + argumet_dict[i] for i in argumet_dict]) + '&sign={sign}'.format(sign = sign))
response = urllib.request.urlopen(request_info)
data = response.read()
info = json.loads(data.decode('utf-8'))
type_available = []
for video_type in info['data']['video_info']['media']:
type_available.append({'video_url': info['data']['video_info']['media'][video_type]['play_url']['main_url'], 'video_quality': int(info['data']['video_info']['media'][video_type]['play_url']['vtype'])})
urls = [base64.b64decode(sorted(type_available, key = lambda x:x['video_quality'])[-1]['video_url']).decode("utf-8")]
size = urls_size(urls)
ext = 'mp4'
print_info(site_info, title, ext, size)
if not info_only:
download_urls(urls, title, ext, size, output_dir=output_dir, merge=merge)
def letvcloud_download(url, output_dir='.', merge=True, info_only=False):
qs = parse.urlparse(url).query
vu = match1(qs, r'vu=([\w]+)')
uu = match1(qs, r'uu=([\w]+)')
title = "LETV-%s" % vu
letvcloud_download_by_vu(vu, uu, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
def letv_download(url, output_dir='.', merge=True, info_only=False ,**kwargs):
if re.match(r'http://yuntv.letv.com/', url):
letvcloud_download(url, output_dir=output_dir, merge=merge, info_only=info_only)
else:
html = get_content(url)
#to get title
if re.match(r'http://www.letv.com/ptv/vplay/(\d+).html', url):
vid = match1(url, r'http://www.letv.com/ptv/vplay/(\d+).html')
else:
vid = match1(html, r'vid="(\d+)"')
title = match1(html,r'name="irTitle" content="(.*?)"')
letv_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only,**kwargs)
site_info = "LeTV.com"
download = letv_download
download_playlist = playlist_not_supported('letv') | 0.175503 | 0.178938 |
import io,sys,time,random
import requests #用于模拟网页请求,抓取
from openpyxl import load_workbook #用于写入excel(why not csv???)
import lxml #html&xml解析库,方便处理数据
from bs4 import BeautifulSoup #也是方便处理html页面(美味汤)
from json import loads #处理response-json转字典
#有乱码,网上查找得如下.需换输出格式
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
merc_list = ['华为','OPPO','VIVO','小米','一加','苹果','黑鲨','三星','魅族','联想']
header = {'User-Agent': 'Mozilla/5.0'}
wb = load_workbook('data.xlsx')
wsheet = wb.worksheets[0]
wsheet.title = 'prefilt'
'''
取材来自-京东热卖 re.jd.com
取主流十个品牌->各取一页的爆款机型(16?)->各取质量最高用户前100条评价
csv结构:
'''
#分流器,进入对应的页面(搜索栏关键词方式)
def divider(merchan):
url = ''
if(merchan == merc_list[0]):
url = 'https://search.jd.com/Search?keyword=华为手机'
elif(merchan == merc_list[1]):
url = 'https://search.jd.com/Search?keyword=OPPO手机'
elif(merchan == merc_list[2]):
url = 'https://search.jd.com/Search?keyword=VIVO手机'
elif(merchan == merc_list[3]):
url = 'https://search.jd.com/Search?keyword=小米手机'
elif(merchan == merc_list[4]):
url = 'https://search.jd.com/Search?keyword=一加手机'
elif(merchan == merc_list[5]):
url = 'https://search.jd.com/Search?keyword=苹果手机'
elif(merchan == merc_list[6]):
url = 'https://search.jd.com/Search?keyword=黑鲨手机'
elif(merchan == merc_list[7]):
url = 'https://search.jd.com/Search?keyword=三星手机'
elif(merchan == merc_list[8]):
url = 'https://search.jd.com/Search?keyword=魅族手机'
elif(merchan == merc_list[9]):
url = 'https://search.jd.com/Search?keyword=联想手机'
else :
url = None
print('No Such Thing!!!\n')
return url
#建立连接,取商品链接,返回集合,便于进入各个商品以读取所需信息
def get_info(the_url):
response = requests.get(url=the_url,headers=header,verify=False)
if(response.status_code == 200): print('Connection Established!\n')
else:print('Connection failed!\n')
response.encoding = response.apparent_encoding #或者response.encoding = response.content.decode('utf-8')
soup = BeautifulSoup(response.text,'lxml')
#自营店waretype = 10 #J_goodsList > ul
#参考https://www.cnblogs.com/yizhenfeng168/p/6979339.html
goods = soup.select("li[ware-type='10']")
for li in goods:
prod_url = li.a.get('href')
prod_id = prod_url.split('/')[3].split('.')[0]
prod_price = li.i.text
#print(prod_url,prod_price,prod_vol)
get_comm(prod_id,prod_price)
#供get_info调用的子函数,真正读取详细评论
def get_comm(id,price):
'''
参考https://blog.csdn.net/weixin_42957905/article/details/106187180
https://club.jd.com/comment/productPageComments.action?
--- callback=fetchJSON_comment98&productId=10023108638660&
--- score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&rid=0&fold=1
此为通过response获得的url,翻页时page=?会变,productid在不同产品时会变
'''
for page in range(10):
time.sleep(random.randint(2,4))
comm_url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&'\
'productId={_id}&score=0&sortType=5&page={_p}&pageSize=10&'\
'isShadowSku=0&rid=0&fold=1'.format(_id=id,_p=page)
response = requests.get(url=comm_url,headers=header,verify=False).text
#取字典/json(一页10个)
prod_list = loads(response.lstrip('fetchJSON_comment98(').rstrip(');'))['productCommentSummary']
comm_sum,comm_good,comm_mid,comm_bad = prod_list['commentCount'],prod_list['goodCount'],prod_list['generalCount'],prod_list['poorCount']
comm_list = loads(response.lstrip('fetchJSON_comment98(').rstrip(');'))['comments']
for com in comm_list:
wsheet.append([id,com['referenceName'],price,comm_sum,comm_good,comm_mid,comm_bad,com['content']])
print([id,com['referenceName'],price,comm_sum,comm_good,comm_mid,comm_bad,com['content']])
wb.save('data.xlsx')
#主程序,调用即可
while(1):
url = divider(input(str(merc_list)+'\n你要哪个牌子的?:'))
if url == None:
break
else:
get_info(url) | spider.py | import io,sys,time,random
import requests #用于模拟网页请求,抓取
from openpyxl import load_workbook #用于写入excel(why not csv???)
import lxml #html&xml解析库,方便处理数据
from bs4 import BeautifulSoup #也是方便处理html页面(美味汤)
from json import loads #处理response-json转字典
#有乱码,网上查找得如下.需换输出格式
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
merc_list = ['华为','OPPO','VIVO','小米','一加','苹果','黑鲨','三星','魅族','联想']
header = {'User-Agent': 'Mozilla/5.0'}
wb = load_workbook('data.xlsx')
wsheet = wb.worksheets[0]
wsheet.title = 'prefilt'
'''
取材来自-京东热卖 re.jd.com
取主流十个品牌->各取一页的爆款机型(16?)->各取质量最高用户前100条评价
csv结构:
'''
#分流器,进入对应的页面(搜索栏关键词方式)
def divider(merchan):
url = ''
if(merchan == merc_list[0]):
url = 'https://search.jd.com/Search?keyword=华为手机'
elif(merchan == merc_list[1]):
url = 'https://search.jd.com/Search?keyword=OPPO手机'
elif(merchan == merc_list[2]):
url = 'https://search.jd.com/Search?keyword=VIVO手机'
elif(merchan == merc_list[3]):
url = 'https://search.jd.com/Search?keyword=小米手机'
elif(merchan == merc_list[4]):
url = 'https://search.jd.com/Search?keyword=一加手机'
elif(merchan == merc_list[5]):
url = 'https://search.jd.com/Search?keyword=苹果手机'
elif(merchan == merc_list[6]):
url = 'https://search.jd.com/Search?keyword=黑鲨手机'
elif(merchan == merc_list[7]):
url = 'https://search.jd.com/Search?keyword=三星手机'
elif(merchan == merc_list[8]):
url = 'https://search.jd.com/Search?keyword=魅族手机'
elif(merchan == merc_list[9]):
url = 'https://search.jd.com/Search?keyword=联想手机'
else :
url = None
print('No Such Thing!!!\n')
return url
#建立连接,取商品链接,返回集合,便于进入各个商品以读取所需信息
def get_info(the_url):
response = requests.get(url=the_url,headers=header,verify=False)
if(response.status_code == 200): print('Connection Established!\n')
else:print('Connection failed!\n')
response.encoding = response.apparent_encoding #或者response.encoding = response.content.decode('utf-8')
soup = BeautifulSoup(response.text,'lxml')
#自营店waretype = 10 #J_goodsList > ul
#参考https://www.cnblogs.com/yizhenfeng168/p/6979339.html
goods = soup.select("li[ware-type='10']")
for li in goods:
prod_url = li.a.get('href')
prod_id = prod_url.split('/')[3].split('.')[0]
prod_price = li.i.text
#print(prod_url,prod_price,prod_vol)
get_comm(prod_id,prod_price)
#供get_info调用的子函数,真正读取详细评论
def get_comm(id,price):
'''
参考https://blog.csdn.net/weixin_42957905/article/details/106187180
https://club.jd.com/comment/productPageComments.action?
--- callback=fetchJSON_comment98&productId=10023108638660&
--- score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&rid=0&fold=1
此为通过response获得的url,翻页时page=?会变,productid在不同产品时会变
'''
for page in range(10):
time.sleep(random.randint(2,4))
comm_url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&'\
'productId={_id}&score=0&sortType=5&page={_p}&pageSize=10&'\
'isShadowSku=0&rid=0&fold=1'.format(_id=id,_p=page)
response = requests.get(url=comm_url,headers=header,verify=False).text
#取字典/json(一页10个)
prod_list = loads(response.lstrip('fetchJSON_comment98(').rstrip(');'))['productCommentSummary']
comm_sum,comm_good,comm_mid,comm_bad = prod_list['commentCount'],prod_list['goodCount'],prod_list['generalCount'],prod_list['poorCount']
comm_list = loads(response.lstrip('fetchJSON_comment98(').rstrip(');'))['comments']
for com in comm_list:
wsheet.append([id,com['referenceName'],price,comm_sum,comm_good,comm_mid,comm_bad,com['content']])
print([id,com['referenceName'],price,comm_sum,comm_good,comm_mid,comm_bad,com['content']])
wb.save('data.xlsx')
#主程序,调用即可
while(1):
url = divider(input(str(merc_list)+'\n你要哪个牌子的?:'))
if url == None:
break
else:
get_info(url) | 0.057965 | 0.134577 |
import os
import json
# Status script
# Checking the status of the annotations,
# both old and new.
# Samia and Petter annotations
with open(os.path.join("v1.1","data","dev.json"),"r",encoding="utf-8") as data:
dev_sp = json.load(data)
with open(os.path.join("v1.1","data","train.json"),"r",encoding="utf-8") as data:
train_sp = json.load(data)
with open(os.path.join("v1.1","data","test.json"),"r",encoding="utf-8") as data:
test_sp = json.load(data)
alle_sp = dev_sp + train_sp + test_sp
alle_sp_id = {}
for tweet in alle_sp:
alle_sp_id[tweet["sent_id"]] = tweet
# Sentence level, curated round 1 and 2
with open(os.path.join("gui_annotations",
"finished_anns","curated_annotations",
"round1_curated.json"),"r",encoding="utf-8") as data:
runde1 = json.load(data)
with open(os.path.join("gui_annotations",
"finished_anns","curated_annotations",
"round2_curated.json"),"r",encoding="utf-8") as data:
runde2 = json.load(data)
# Currently in progress sentence level
with open(os.path.join("gui_annotations","marie","m_final_round.json"),"r",encoding="utf-8") as data:
marie_inprogress = json.load(data)
with open(os.path.join("gui_annotations","alexandra","a_final_round.json"),"r",encoding="utf-8") as data:
alexandra_inprogress = json.load(data)
def get_curated_num(json_file):
# Get the number of curated sentences from the sentence
# level annotations.
uncorrected = 0
corrected = 0
for tweet in json_file:
if json_file[tweet]["corrected_category"] == "NONE":
uncorrected += 1
else:
corrected += 1
summen = uncorrected + corrected
assert summen == len(json_file)
print("Corrected:",corrected)
print("Uncorrected:",uncorrected)
print(corrected/(summen/100),"% corrected")
# Uncomment to get the annotations
get_curated_num(marie_inprogress)
get_curated_num(alexandra_inprogress)
# Check overlap
#finegrained
def get_overlapping(progress):
for tweet in progress:
sid = progress[tweet]["sent_id"]
if sid in alle_sp_id:
print(sid)
#get_overlapping(marie_inprogress)
#get_overlapping(alexandra_inprogress) | status_script.py | import os
import json
# Status script
# Checking the status of the annotations,
# both old and new.
# Samia and Petter annotations
with open(os.path.join("v1.1","data","dev.json"),"r",encoding="utf-8") as data:
dev_sp = json.load(data)
with open(os.path.join("v1.1","data","train.json"),"r",encoding="utf-8") as data:
train_sp = json.load(data)
with open(os.path.join("v1.1","data","test.json"),"r",encoding="utf-8") as data:
test_sp = json.load(data)
alle_sp = dev_sp + train_sp + test_sp
alle_sp_id = {}
for tweet in alle_sp:
alle_sp_id[tweet["sent_id"]] = tweet
# Sentence level, curated round 1 and 2
with open(os.path.join("gui_annotations",
"finished_anns","curated_annotations",
"round1_curated.json"),"r",encoding="utf-8") as data:
runde1 = json.load(data)
with open(os.path.join("gui_annotations",
"finished_anns","curated_annotations",
"round2_curated.json"),"r",encoding="utf-8") as data:
runde2 = json.load(data)
# Currently in progress sentence level
with open(os.path.join("gui_annotations","marie","m_final_round.json"),"r",encoding="utf-8") as data:
marie_inprogress = json.load(data)
with open(os.path.join("gui_annotations","alexandra","a_final_round.json"),"r",encoding="utf-8") as data:
alexandra_inprogress = json.load(data)
def get_curated_num(json_file):
# Get the number of curated sentences from the sentence
# level annotations.
uncorrected = 0
corrected = 0
for tweet in json_file:
if json_file[tweet]["corrected_category"] == "NONE":
uncorrected += 1
else:
corrected += 1
summen = uncorrected + corrected
assert summen == len(json_file)
print("Corrected:",corrected)
print("Uncorrected:",uncorrected)
print(corrected/(summen/100),"% corrected")
# Uncomment to get the annotations
get_curated_num(marie_inprogress)
get_curated_num(alexandra_inprogress)
# Check overlap
#finegrained
def get_overlapping(progress):
for tweet in progress:
sid = progress[tweet]["sent_id"]
if sid in alle_sp_id:
print(sid)
#get_overlapping(marie_inprogress)
#get_overlapping(alexandra_inprogress) | 0.218419 | 0.288547 |
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.base import BaseEstimator,TransformerMixin
import logging
import time
import logging
logger = logging.getLogger(__name__)
class Tfidf_transform(BaseEstimator,TransformerMixin):
"""
Create TF-IDF (term frequency - inverse document frequency) features.
can use chi-squared test to limit features. Assumes string based input feature that can be split.
Uses scikit-learn based transformers internally
Parameters
----------
min_df : int, optinal
min document frequency (for sklearn vectorizer)
max_df : float, optional
max document frequency (for sklearn vectorizer)
select_features : bool, optional
use chi-squared test to select features
topn_features : int, optional
keep top features from chi-squared test
stop_words : str, optional
stop words (for sklearn vectorizer)
target_feature : str, optional
target feature for chi-squared test
"""
def __init__(self,input_feature=None,output_feature=None,min_df=0,max_df=1.0,select_features=False,topn_features=50000,stop_words=None,target_feature=None,vectorizer=None,tfidf_transformer=None,ch2=None,fnames=None,feature_names_support=[],ngram_range=[1,1]):
self.input_feature=input_feature
self.output_feature=output_feature
self.min_df=min_df
self.max_df=max_df
self.select_features = select_features
self.topn_features=topn_features
self.stop_words = stop_words
self.target_feature = target_feature
self.vectorizer = vectorizer
self.tfidf_transformer = tfidf_transformer
self.ch2 = ch2
self.fnames = fnames
self.feature_names_support = feature_names_support
self.ngram_range = ngram_range
def get_tokens(self,v):
"""basic method to get "document" string from feature
"""
if isinstance(v, list):
return " ".join([i if isinstance(i, basestring) else str(i) for i in v])
elif isinstance(v,basestring):
return v
else:
return str(v)
def fit(self,df):
"""
Fit tfidf transform
Parameters
----------
df : pandas dataframe
Returns
-------
self: object
"""
self.vectorizer = CountVectorizer(min_df=self.min_df,max_df=self.max_df,stop_words=self.stop_words,ngram_range=self.ngram_range)
self.tfidf_transformer = TfidfTransformer()
logger.info("getting docs")
docs = df[self.input_feature].apply(self.get_tokens)
logger.info("running vectorizer")
counts = self.vectorizer.fit_transform(docs.as_matrix())
logger.info("run tfidf transform")
self.tfidf = self.tfidf_transformer.fit_transform(counts)
self.fnames = self.vectorizer.get_feature_names()
logger.info("base tfidf features %d",len(self.fnames))
if self.select_features:
self.ch2 = SelectKBest(chi2, k=self.topn_features)
self.ch2.fit_transform(self.tfidf, df[self.target_feature])
self.feature_names_support = set([self.fnames[i] for i in self.ch2.get_support(indices=True)])
logger.info("selected tfidf features %d",len(self.feature_names_support))
return self
def _create_tfidf(self,v):
s = [self.get_tokens(v)]
counts = self.vectorizer.transform(s)
self.tfidf = self.tfidf_transformer.transform(counts)
doc_tfidf = {}
for (col,val) in zip(self.tfidf[0].indices,self.tfidf[0].data):
fname = self.fnames[col]
if self.select_features:
if fname in self.feature_names_support:
doc_tfidf[fname] = val
else:
doc_tfidf[fname] = val
self.progress += 1
if self.progress % 100 == 0:
logger.info("processed %d/%d",self.progress,self.size)
return doc_tfidf
def transform(self,df):
"""
transform features with tfidf transform
Parameters
----------
X : pandas dataframe
Returns
-------
Transformed pandas dataframe
"""
self.progress = 0
self.size = df.shape[0]
df[self.output_feature] = df[self.input_feature].apply(self._create_tfidf)
return df | python/build/lib/seldon/pipeline/tfidf_transform.py | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.base import BaseEstimator,TransformerMixin
import logging
import time
import logging
logger = logging.getLogger(__name__)
class Tfidf_transform(BaseEstimator,TransformerMixin):
"""
Create TF-IDF (term frequency - inverse document frequency) features.
can use chi-squared test to limit features. Assumes string based input feature that can be split.
Uses scikit-learn based transformers internally
Parameters
----------
min_df : int, optinal
min document frequency (for sklearn vectorizer)
max_df : float, optional
max document frequency (for sklearn vectorizer)
select_features : bool, optional
use chi-squared test to select features
topn_features : int, optional
keep top features from chi-squared test
stop_words : str, optional
stop words (for sklearn vectorizer)
target_feature : str, optional
target feature for chi-squared test
"""
def __init__(self,input_feature=None,output_feature=None,min_df=0,max_df=1.0,select_features=False,topn_features=50000,stop_words=None,target_feature=None,vectorizer=None,tfidf_transformer=None,ch2=None,fnames=None,feature_names_support=[],ngram_range=[1,1]):
self.input_feature=input_feature
self.output_feature=output_feature
self.min_df=min_df
self.max_df=max_df
self.select_features = select_features
self.topn_features=topn_features
self.stop_words = stop_words
self.target_feature = target_feature
self.vectorizer = vectorizer
self.tfidf_transformer = tfidf_transformer
self.ch2 = ch2
self.fnames = fnames
self.feature_names_support = feature_names_support
self.ngram_range = ngram_range
def get_tokens(self,v):
"""basic method to get "document" string from feature
"""
if isinstance(v, list):
return " ".join([i if isinstance(i, basestring) else str(i) for i in v])
elif isinstance(v,basestring):
return v
else:
return str(v)
def fit(self,df):
"""
Fit tfidf transform
Parameters
----------
df : pandas dataframe
Returns
-------
self: object
"""
self.vectorizer = CountVectorizer(min_df=self.min_df,max_df=self.max_df,stop_words=self.stop_words,ngram_range=self.ngram_range)
self.tfidf_transformer = TfidfTransformer()
logger.info("getting docs")
docs = df[self.input_feature].apply(self.get_tokens)
logger.info("running vectorizer")
counts = self.vectorizer.fit_transform(docs.as_matrix())
logger.info("run tfidf transform")
self.tfidf = self.tfidf_transformer.fit_transform(counts)
self.fnames = self.vectorizer.get_feature_names()
logger.info("base tfidf features %d",len(self.fnames))
if self.select_features:
self.ch2 = SelectKBest(chi2, k=self.topn_features)
self.ch2.fit_transform(self.tfidf, df[self.target_feature])
self.feature_names_support = set([self.fnames[i] for i in self.ch2.get_support(indices=True)])
logger.info("selected tfidf features %d",len(self.feature_names_support))
return self
def _create_tfidf(self,v):
s = [self.get_tokens(v)]
counts = self.vectorizer.transform(s)
self.tfidf = self.tfidf_transformer.transform(counts)
doc_tfidf = {}
for (col,val) in zip(self.tfidf[0].indices,self.tfidf[0].data):
fname = self.fnames[col]
if self.select_features:
if fname in self.feature_names_support:
doc_tfidf[fname] = val
else:
doc_tfidf[fname] = val
self.progress += 1
if self.progress % 100 == 0:
logger.info("processed %d/%d",self.progress,self.size)
return doc_tfidf
def transform(self,df):
"""
transform features with tfidf transform
Parameters
----------
X : pandas dataframe
Returns
-------
Transformed pandas dataframe
"""
self.progress = 0
self.size = df.shape[0]
df[self.output_feature] = df[self.input_feature].apply(self._create_tfidf)
return df | 0.834272 | 0.345906 |
import asyncio
import io
import pytz
from datetime import datetime
import pexpect
import aioschedule
from .sql import handling_casino_sql
from .sql.database import loop
from .parse_config import get_database_config
BACKUP_PATH = "bot_database_backup.sql"
HOST, USER, PASSWORD, DATABASE_NAME, PORT = loop.run_until_complete(get_database_config())
class LastJackpotData:
def __init__(self):
self.last_winner = None
self.last_prize = None
async def get_last_jackpot_data(self):
_last_jackpot_data = await handling_casino_sql.get_last_jackpot_results()
self.last_winner = _last_jackpot_data[0] if _last_jackpot_data[0] else _last_jackpot_data[1]
self.last_prize = _last_jackpot_data[2]
def make_db_backup():
with io.open(BACKUP_PATH, 'w', encoding="UTF-8") as file:
command = pexpect.spawn(f"mysqldump -h {HOST} -u {USER} -p '{DATABASE_NAME}'", encoding="UTF-8")
command.expect("Enter password: ")
command.sendline(PASSWORD)
while not command.eof():
chunk = command.readline()
file.write(chunk)
if command.exitstatus == 0:
print("Database backup done\n")
else:
print(f"Error during creating the backup. Code: {command.exitstatus}\n")
async def run_db_backup():
loop.run_in_executor(None, make_db_backup)
async def reset_duels():
now = datetime.now(tz=pytz.timezone('Europe/Warsaw'))
if now.day == 1:
await handling_casino_sql.delete_duels_new_season()
async def tasks_scheduler():
aioschedule.every().day.at("4:00").do(run_db_backup)
aioschedule.every().day.at("14:20").do(run_db_backup)
aioschedule.every().day.at("0:01").do(last_jackpot_data.get_last_jackpot_data)
aioschedule.every().day.at("00:00").do(reset_duels)
aioschedule.every(4).minutes.do(handling_casino_sql.reset_old_confirmations_emails)
while True:
loop.create_task(aioschedule.run_pending())
await asyncio.sleep(60)
async def init():
loop.create_task(tasks_scheduler())
last_jackpot_data = LastJackpotData()
loop.create_task(last_jackpot_data.get_last_jackpot_data()) | Bot/task_scheduler.py | import asyncio
import io
import pytz
from datetime import datetime
import pexpect
import aioschedule
from .sql import handling_casino_sql
from .sql.database import loop
from .parse_config import get_database_config
BACKUP_PATH = "bot_database_backup.sql"
HOST, USER, PASSWORD, DATABASE_NAME, PORT = loop.run_until_complete(get_database_config())
class LastJackpotData:
def __init__(self):
self.last_winner = None
self.last_prize = None
async def get_last_jackpot_data(self):
_last_jackpot_data = await handling_casino_sql.get_last_jackpot_results()
self.last_winner = _last_jackpot_data[0] if _last_jackpot_data[0] else _last_jackpot_data[1]
self.last_prize = _last_jackpot_data[2]
def make_db_backup():
with io.open(BACKUP_PATH, 'w', encoding="UTF-8") as file:
command = pexpect.spawn(f"mysqldump -h {HOST} -u {USER} -p '{DATABASE_NAME}'", encoding="UTF-8")
command.expect("Enter password: ")
command.sendline(PASSWORD)
while not command.eof():
chunk = command.readline()
file.write(chunk)
if command.exitstatus == 0:
print("Database backup done\n")
else:
print(f"Error during creating the backup. Code: {command.exitstatus}\n")
async def run_db_backup():
loop.run_in_executor(None, make_db_backup)
async def reset_duels():
now = datetime.now(tz=pytz.timezone('Europe/Warsaw'))
if now.day == 1:
await handling_casino_sql.delete_duels_new_season()
async def tasks_scheduler():
aioschedule.every().day.at("4:00").do(run_db_backup)
aioschedule.every().day.at("14:20").do(run_db_backup)
aioschedule.every().day.at("0:01").do(last_jackpot_data.get_last_jackpot_data)
aioschedule.every().day.at("00:00").do(reset_duels)
aioschedule.every(4).minutes.do(handling_casino_sql.reset_old_confirmations_emails)
while True:
loop.create_task(aioschedule.run_pending())
await asyncio.sleep(60)
async def init():
loop.create_task(tasks_scheduler())
last_jackpot_data = LastJackpotData()
loop.create_task(last_jackpot_data.get_last_jackpot_data()) | 0.20834 | 0.128717 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=-100):
"""The wrapper function for :func:`F.cross_entropy`"""
# class_weight is a manual rescaling weight given to each class.
# If given, has to be a Tensor of size C element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
use_sigmoid=False,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1))
# weighted element-wise losses
if weight is not None:
weight = weight.float()
if use_sigmoid:
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight=class_weight, reduction='none')
else:
loss = F.binary_cross_entropy(
pred, label.float(), weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
@LOSSES.register_module()
class AffinityLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(AffinityLoss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
self.cls_criterion = binary_cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
unary_term = self.cls_criterion(
cls_score,
label,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
diagonal_matrix = (1 - torch.eye(label.size(1))).to(label.get_device())
vtarget = diagonal_matrix * label
recall_part = torch.sum(cls_score * vtarget, dim=2)
denominator = torch.sum(vtarget, dim=2)
denominator = denominator.masked_fill_(~(denominator > 0), 1)
recall_part = recall_part.div_(denominator)
recall_label = torch.ones_like(recall_part)
recall_loss = self.cls_criterion(
recall_part,
recall_label,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
spec_part = torch.sum((1 - cls_score) * (1 - label), dim=2)
denominator = torch.sum(1 - label, dim=2)
denominator = denominator.masked_fill_(~(denominator > 0), 1)
spec_part = spec_part.div_(denominator)
spec_label = torch.ones_like(spec_part)
spec_loss = self.cls_criterion(
spec_part,
spec_label,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
precision_part = torch.sum(cls_score * vtarget, dim=2)
denominator = torch.sum(cls_score, dim=2)
denominator = denominator.masked_fill_(~(denominator > 0), 1)
precision_part = precision_part.div_(denominator)
precision_label = torch.ones_like(precision_part)
precision_loss = self.cls_criterion(
precision_part,
precision_label,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
global_term = recall_loss + spec_loss + precision_loss
loss_cls = self.loss_weight * (unary_term + global_term)
return loss_cls | mmseg/models/losses/affinity_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=-100):
"""The wrapper function for :func:`F.cross_entropy`"""
# class_weight is a manual rescaling weight given to each class.
# If given, has to be a Tensor of size C element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
use_sigmoid=False,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1))
# weighted element-wise losses
if weight is not None:
weight = weight.float()
if use_sigmoid:
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight=class_weight, reduction='none')
else:
loss = F.binary_cross_entropy(
pred, label.float(), weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
@LOSSES.register_module()
class AffinityLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(AffinityLoss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
self.cls_criterion = binary_cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
unary_term = self.cls_criterion(
cls_score,
label,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
diagonal_matrix = (1 - torch.eye(label.size(1))).to(label.get_device())
vtarget = diagonal_matrix * label
recall_part = torch.sum(cls_score * vtarget, dim=2)
denominator = torch.sum(vtarget, dim=2)
denominator = denominator.masked_fill_(~(denominator > 0), 1)
recall_part = recall_part.div_(denominator)
recall_label = torch.ones_like(recall_part)
recall_loss = self.cls_criterion(
recall_part,
recall_label,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
spec_part = torch.sum((1 - cls_score) * (1 - label), dim=2)
denominator = torch.sum(1 - label, dim=2)
denominator = denominator.masked_fill_(~(denominator > 0), 1)
spec_part = spec_part.div_(denominator)
spec_label = torch.ones_like(spec_part)
spec_loss = self.cls_criterion(
spec_part,
spec_label,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
precision_part = torch.sum(cls_score * vtarget, dim=2)
denominator = torch.sum(cls_score, dim=2)
denominator = denominator.masked_fill_(~(denominator > 0), 1)
precision_part = precision_part.div_(denominator)
precision_label = torch.ones_like(precision_part)
precision_loss = self.cls_criterion(
precision_part,
precision_label,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
global_term = recall_loss + spec_loss + precision_loss
loss_cls = self.loss_weight * (unary_term + global_term)
return loss_cls | 0.934349 | 0.609524 |
from .Helpers import get_builtin_type, indent, get_attribute_size, is_flags_enum, get_comments_from_attribute
from .Helpers import get_comments_if_present, create_enum_name, InterfaceType
from .Helpers import get_read_method_name, get_reverse_method_name, get_write_method_name
from .JavaGeneratorBase import JavaGeneratorBase
from .JavaMethodGenerator import JavaMethodGenerator
def get_type(attribute):
return get_builtin_type(attribute['size'])
class JavaEnumGenerator(JavaGeneratorBase):
"""Java enum generator"""
def __init__(self, name, schema, class_schema):
super(JavaEnumGenerator, self).__init__(name, schema, class_schema)
self.enum_values = {}
self.class_type = 'enum'
if is_flags_enum(name):
self.implements_list.add(InterfaceType.BitMaskable)
self._add_enum_values(self.class_schema)
def _add_private_declaration(self):
var_type = get_type(self.class_schema)
self.class_output += [indent(get_comments_if_present('Enum value.'))]
self.class_output += [indent('private final {0} value;'.format(var_type))] + ['']
def _add_enum_values(self, enum_attribute):
enum_attribute_values = enum_attribute['values']
for current_attribute in enum_attribute_values:
self.add_enum_value(current_attribute['name'], current_attribute['value'],
get_comments_from_attribute(current_attribute, False))
def _write_enum_values(self):
enum_type = get_type(self.class_schema)
enum_length = len(self.enum_values)
count = 1
for name, value_comments in self.enum_values.items():
value, comments = value_comments
comment_line = get_comments_if_present(comments)
if comment_line is not None:
self.class_output += [indent(comment_line)]
line = '{0}(({1}) {2})'.format(name.upper(), enum_type, value)
line += ',' if count < enum_length else ';'
self.class_output += [indent(line)]
count += 1
self.class_output += ['']
def _add_constructor(self):
enum_type = get_type(self.class_schema)
constructor_method = JavaMethodGenerator('', '', self.generated_class_name, ['final {0} value'.format(enum_type)])
constructor_method.add_instructions(['this.value = value'])
self._add_method_documentation(constructor_method, 'Constructor.', [('value', 'Enum value')], None)
self._add_method(constructor_method)
def _add_load_from_binary_custom(self, load_from_binary_method):
read_data_line = 'stream.{0}()'.format(get_read_method_name(self.class_schema['size']))
size = get_attribute_size(self.schema, self.class_schema)
reverse_byte_method = get_reverse_method_name(size).format(read_data_line)
lines = ['final {0} streamValue = {1}'.format(get_type(self.class_schema), reverse_byte_method)]
lines += ['return rawValueOf(streamValue)']
self.wrap_code_in_try(load_from_binary_method, lambda: load_from_binary_method.add_instructions(lines))
def _add_serialize_custom(self, serialize_method):
size = get_attribute_size(self.schema, self.class_schema)
reverse_byte_method = get_reverse_method_name(size).format('this.value')
serialize_method.add_instructions(['dataOutputStream.{0}({1})'.format(get_write_method_name(size), reverse_byte_method)])
def add_enum_value(self, name, value, comments):
self.enum_values[create_enum_name(name)] = [value, comments]
def _add_public_declarations(self):
self._add_raw_value_of_method()
def _add_private_declarations(self):
self._add_private_declaration()
self._add_constructor()
def _calculate_size(self, new_getter):
new_getter.add_instructions(['return {0}'.format(self.class_schema['size'])])
def _add_raw_value_of_method(self):
enum_type = get_type(self.class_schema)
new_method = JavaMethodGenerator('public', self.generated_class_name, 'rawValueOf', ['final {0} value'.format(enum_type)], '', True)
new_method.add_instructions(['for ({0} current : {0}.values()) {{'.format(self.generated_class_name)], False)
new_method.add_instructions([indent('if (value == current.value) {')], False)
new_method.add_instructions([indent('return current', 2)])
new_method.add_instructions([indent('}')], False)
new_method.add_instructions(['}'], False)
new_method.add_instructions(
['throw new IllegalArgumentException(value + " was not a backing value for {0}.")'.format(self.generated_class_name)])
self._add_method_documentation(new_method, 'Gets enum value.', [('value', 'Raw value of the enum')], 'Enum value')
self._add_method(new_method)
def _generate_bitmaskable_interface(self):
new_method = JavaMethodGenerator('public', 'long', 'getValue', [], '')
new_method.add_instructions(['return this.value'])
self._add_method_documentation(new_method, 'Gets the value of the enum', [], 'Value of the enum.')
self._add_method(new_method)
def _generate_interface_methods(self):
interface_generator = {
InterfaceType.BitMaskable: self._generate_bitmaskable_interface
}
for interfaceType in self.implements_list:
interface_generator[interfaceType]()
def generate(self):
self._add_class_definition()
self._write_enum_values()
self._generate_class_methods()
return self.class_output | generators/java/JavaEnumGenerator.py | from .Helpers import get_builtin_type, indent, get_attribute_size, is_flags_enum, get_comments_from_attribute
from .Helpers import get_comments_if_present, create_enum_name, InterfaceType
from .Helpers import get_read_method_name, get_reverse_method_name, get_write_method_name
from .JavaGeneratorBase import JavaGeneratorBase
from .JavaMethodGenerator import JavaMethodGenerator
def get_type(attribute):
return get_builtin_type(attribute['size'])
class JavaEnumGenerator(JavaGeneratorBase):
"""Java enum generator"""
def __init__(self, name, schema, class_schema):
super(JavaEnumGenerator, self).__init__(name, schema, class_schema)
self.enum_values = {}
self.class_type = 'enum'
if is_flags_enum(name):
self.implements_list.add(InterfaceType.BitMaskable)
self._add_enum_values(self.class_schema)
def _add_private_declaration(self):
var_type = get_type(self.class_schema)
self.class_output += [indent(get_comments_if_present('Enum value.'))]
self.class_output += [indent('private final {0} value;'.format(var_type))] + ['']
def _add_enum_values(self, enum_attribute):
enum_attribute_values = enum_attribute['values']
for current_attribute in enum_attribute_values:
self.add_enum_value(current_attribute['name'], current_attribute['value'],
get_comments_from_attribute(current_attribute, False))
def _write_enum_values(self):
enum_type = get_type(self.class_schema)
enum_length = len(self.enum_values)
count = 1
for name, value_comments in self.enum_values.items():
value, comments = value_comments
comment_line = get_comments_if_present(comments)
if comment_line is not None:
self.class_output += [indent(comment_line)]
line = '{0}(({1}) {2})'.format(name.upper(), enum_type, value)
line += ',' if count < enum_length else ';'
self.class_output += [indent(line)]
count += 1
self.class_output += ['']
def _add_constructor(self):
enum_type = get_type(self.class_schema)
constructor_method = JavaMethodGenerator('', '', self.generated_class_name, ['final {0} value'.format(enum_type)])
constructor_method.add_instructions(['this.value = value'])
self._add_method_documentation(constructor_method, 'Constructor.', [('value', 'Enum value')], None)
self._add_method(constructor_method)
def _add_load_from_binary_custom(self, load_from_binary_method):
read_data_line = 'stream.{0}()'.format(get_read_method_name(self.class_schema['size']))
size = get_attribute_size(self.schema, self.class_schema)
reverse_byte_method = get_reverse_method_name(size).format(read_data_line)
lines = ['final {0} streamValue = {1}'.format(get_type(self.class_schema), reverse_byte_method)]
lines += ['return rawValueOf(streamValue)']
self.wrap_code_in_try(load_from_binary_method, lambda: load_from_binary_method.add_instructions(lines))
def _add_serialize_custom(self, serialize_method):
size = get_attribute_size(self.schema, self.class_schema)
reverse_byte_method = get_reverse_method_name(size).format('this.value')
serialize_method.add_instructions(['dataOutputStream.{0}({1})'.format(get_write_method_name(size), reverse_byte_method)])
def add_enum_value(self, name, value, comments):
self.enum_values[create_enum_name(name)] = [value, comments]
def _add_public_declarations(self):
self._add_raw_value_of_method()
def _add_private_declarations(self):
self._add_private_declaration()
self._add_constructor()
def _calculate_size(self, new_getter):
new_getter.add_instructions(['return {0}'.format(self.class_schema['size'])])
def _add_raw_value_of_method(self):
enum_type = get_type(self.class_schema)
new_method = JavaMethodGenerator('public', self.generated_class_name, 'rawValueOf', ['final {0} value'.format(enum_type)], '', True)
new_method.add_instructions(['for ({0} current : {0}.values()) {{'.format(self.generated_class_name)], False)
new_method.add_instructions([indent('if (value == current.value) {')], False)
new_method.add_instructions([indent('return current', 2)])
new_method.add_instructions([indent('}')], False)
new_method.add_instructions(['}'], False)
new_method.add_instructions(
['throw new IllegalArgumentException(value + " was not a backing value for {0}.")'.format(self.generated_class_name)])
self._add_method_documentation(new_method, 'Gets enum value.', [('value', 'Raw value of the enum')], 'Enum value')
self._add_method(new_method)
def _generate_bitmaskable_interface(self):
new_method = JavaMethodGenerator('public', 'long', 'getValue', [], '')
new_method.add_instructions(['return this.value'])
self._add_method_documentation(new_method, 'Gets the value of the enum', [], 'Value of the enum.')
self._add_method(new_method)
def _generate_interface_methods(self):
interface_generator = {
InterfaceType.BitMaskable: self._generate_bitmaskable_interface
}
for interfaceType in self.implements_list:
interface_generator[interfaceType]()
def generate(self):
self._add_class_definition()
self._write_enum_values()
self._generate_class_methods()
return self.class_output | 0.630457 | 0.118947 |
from collections import defaultdict
from pathlib import Path
import csv
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.files import File
from django.core.management.base import BaseCommand
from django.urls import reverse
from wagtail.core.models import Page, Site, Locale, Collection, PageRevision
from django.core.files.images import ImageFile
from wagtail.documents.models import Document
from wagtail.images.models import Image
from wagtail_localize.models import Translation
from wagtail_localize.views.submit_translations import TranslationCreator
from wagtailmarkdown.utils import _get_bleach_kwargs
from wagtailmedia.models import Media
from wagtailsvg.models import Svg
import home.models as models
from comments.models import CommentStatus
from home.models import V1ToV2ObjectMap, V1PageURLToV2PageMap
from questionnaires.models import Poll, PollFormField, Survey, SurveyFormField, Quiz, QuizFormField
import psycopg2
import psycopg2.extras
import json
from questionnaires.models import PollIndexPage, SurveyIndexPage, QuizIndexPage
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--host',
default='0.0.0.0',
help='IoGT V1 database host'
)
parser.add_argument(
'--port',
default='5432',
help='IoGT V1 database port'
)
parser.add_argument(
'--name',
default='postgres',
help='IoGT V1 database name'
)
parser.add_argument(
'--user',
default='postgres',
help='IoGT V1 database user'
)
parser.add_argument(
'--password',
default='',
help='IoGT V1 database password'
)
parser.add_argument(
'--media-dir',
required=True,
help='**RELATIVE Path** to IoGT v1 media directory'
)
parser.add_argument(
'--delete-users',
action='store_true',
help='Delete existing Users and their associated data. Use carefully'
)
parser.add_argument(
'--v1-domains',
nargs="+",
required=True,
help="IoGT V1 domains for manually inserted internal links, --v1-domains domain1 domain2 ..."
)
parser.add_argument(
'--sort',
required=True,
help='Sort page by "type1" or "type2"'
)
def handle(self, *args, **options):
self.db_connect(options)
self.media_dir = options.get('media_dir')
self.v1_domains_list = options.get('v1_domains')
self.sort = options.get('sort')
self.v2_domain = options.get('v2_domain')
self.v2_site_port = options.get('v2_site_port')
self.collection_map = {}
self.document_map = {}
self.media_map = {}
self.image_map = {}
self.page_translation_map = {}
self.v1_to_v2_page_map = {}
self.post_migration_report_messages = defaultdict(list)
self.registration_survey_translations = defaultdict()
self.clear()
self.stdout.write('Existing site structure cleared')
root = Page.get_first_root_node()
self.migrate(root)
self.print_post_migration_report()
def clear(self):
PageRevision.objects.all().delete()
models.OfflineAppPage.objects.all().delete()
models.MiscellaneousIndexPage.objects.all().delete()
models.PageLinkPage.objects.all().delete()
PollFormField.objects.all().delete()
Poll.objects.all().delete()
SurveyFormField.objects.all().delete()
Survey.objects.all().delete()
QuizFormField.objects.all().delete()
Quiz.objects.all().delete()
models.FeaturedContent.objects.all().delete()
models.ArticleRecommendation.objects.all().delete()
models.FooterPage.objects.all().delete()
models.FooterIndexPage.objects.all().delete()
models.BannerPage.objects.all().delete()
models.BannerIndexPage.objects.all().delete()
models.Article.objects.all().delete()
models.Section.objects.all().delete()
models.SectionIndexPage.objects.all().delete()
models.HomePage.objects.all().delete()
Site.objects.all().delete()
Image.objects.all().delete()
Document.objects.all().delete()
Media.objects.all().delete()
V1ToV2ObjectMap.objects.all().delete()
def db_connect(self, options):
connection_string = self.create_connection_string(options)
self.stdout.write(f'DB connection string created, string={connection_string}')
self.v1_conn = psycopg2.connect(connection_string)
self.stdout.write('Connected to v1 DB')
def __del__(self):
try:
self.v1_conn.close()
self.stdout.write('Closed connection to v1 DB')
except AttributeError:
pass
def create_connection_string(self, options):
host = options.get('host', '0.0.0.0')
port = options.get('port', '5432')
name = options.get('name', 'postgres')
user = options.get('user', 'postgres')
password = options.get('password', '')
return f"host={host} port={port} dbname={name} user={user} password={password}"
def db_query(self, q):
cur = self.v1_conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute(q)
return cur
def migrate(self, root):
self.migrate_collections()
self.migrate_documents()
self.migrate_media()
self.migrate_images()
self.migrate_locales()
self.load_page_translation_map()
self.home_page = self.create_home_page(root)
self.translate_home_pages()
self.create_index_pages()
self.translate_index_pages()
self.migrate_sections()
self.migrate_articles()
self.migrate_footers()
self.migrate_polls()
self.migrate_surveys()
self.migrate_banners()
self.mark_pages_which_are_not_translated_in_v1_as_draft()
Page.fix_tree(fix_paths=True)
self.mark_empty_sections_as_draft()
self.fix_articles_body()
self.fix_footers_body()
self.fix_survey_description()
self.fix_banner_link_page()
self.attach_banners_to_home_page()
self.migrate_recommended_articles_for_article()
self.migrate_featured_articles_for_homepage()
self.add_surveys_from_surveys_index_page_to_footer_index_page_as_page_link_page()
self.add_polls_from_polls_index_page_to_footer_index_page_as_page_link_page()
self.add_polls_from_polls_index_page_to_home_page_featured_content()
self.add_surveys_from_surveys_index_page_to_home_page_featured_content()
self.move_footers_to_end_of_footer_index_page()
self.migrate_article_related_sections()
self.migrate_social_media_links()
self.sort_pages()
self.populate_registration_survey_translations()
self.translate_default_survey_submit_button_text()
self.migrate_post_registration_survey()
self.migrate_page_revisions()
self.stop_translations()
def create_home_page(self, root):
sql = 'select * ' \
'from wagtailcore_site wcs, core_sitesettings css, core_main cm, wagtailcore_page wcp ' \
'where wcs.id = css.site_id ' \
'and wcs.root_page_id = cm.page_ptr_id ' \
'and cm.page_ptr_id = wcp.id ' \
'and wcs.is_default_site = true'
cur = self.db_query(sql)
main = cur.fetchone()
cur.close()
if not main:
raise Exception('Could not find a main page in v1 DB')
sql = 'select * ' \
'from core_sitelanguage ' \
'where is_main_language = true'
cur = self.db_query(sql)
language = cur.fetchone()
cur.close()
if not language:
raise Exception('Could not find a main language in v1 DB')
locale = Locale.objects.get(language_code=self._get_iso_locale(language['locale']))
home = models.HomePage(
title=main['title'],
draft_title=main['draft_title'],
slug=main['slug'],
live=main['live'],
locked=main['locked'],
go_live_at=main['go_live_at'],
expire_at=main['expire_at'],
first_published_at=main['first_published_at'],
last_published_at=main['last_published_at'],
search_description=main['search_description'],
seo_title=main['seo_title'],
locale=locale
)
root.add_child(instance=home)
V1ToV2ObjectMap.create_map(content_object=home, v1_object_id=main['page_ptr_id'])
Site.objects.create(
hostname=self.v1_domains_list[0],
port=443,
root_page=home,
is_default_site=True,
site_name=main['site_name'] if main['site_name'] else 'Internet of Good Things',
)
logo = self.image_map.get(main['logo_id'])
if logo:
site_settings = models.SiteSettings.get_for_default_site()
site_settings.logo_id = logo.id
site_settings.save()
else:
self.post_migration_report_messages['other'].append(
'Not site logo found. Using default site logo.'
)
sql = f'select * ' \
f'from core_sitesettings css, wagtailcore_site wcs ' \
f'where css.site_id = wcs.id ' \
f'and wcs.is_default_site = true'
cur = self.db_query(sql)
for row in cur:
social_media_links = json.loads(row['social_media_links_on_footer_page'])
if social_media_links:
links = []
for social_media_link in social_media_links:
value = social_media_link.get('value')
if value:
links.append({
'title': value.get('title'),
'link': value.get('link'),
})
self.post_migration_report_messages['social_media_links'].append(
f'site: {row["site_name"]}, hostname: {row["hostname"]} has following social media links '
f'{[(link["title"], link["link"]) for link in links]}.')
cur.close()
self.post_migration_report_messages['other'].append(
'A default favicon has been chosen for the site.'
)
return home
def create_index_pages(self):
self.section_index_page = models.SectionIndexPage(title='Sections')
self.home_page.add_child(instance=self.section_index_page)
self.banner_index_page = models.BannerIndexPage(title='Banners')
self.home_page.add_child(instance=self.banner_index_page)
self.footer_index_page = models.FooterIndexPage(title='Footers')
self.home_page.add_child(instance=self.footer_index_page)
self.poll_index_page = PollIndexPage(title='Polls')
self.home_page.add_child(instance=self.poll_index_page)
self.survey_index_page = SurveyIndexPage(title='Surveys')
self.home_page.add_child(instance=self.survey_index_page)
self.quiz_index_page = QuizIndexPage(title='Quizzes')
self.home_page.add_child(instance=self.quiz_index_page)
self.miscellaneous_index_page = models.MiscellaneousIndexPage(title='Miscellaneous')
self.home_page.add_child(instance=self.miscellaneous_index_page)
def migrate_collections(self):
cur = self.db_query('select * from wagtailcore_collection')
for row in cur:
collection, _ = Collection.objects.get_or_create(
name=row['name'],
defaults={
'path': row['path'],
'depth': row['depth'],
'numchild': row['numchild'],
}
)
collection.save()
self.collection_map.update({row['id']: collection})
V1ToV2ObjectMap.create_map(content_object=collection, v1_object_id=row['id'])
cur.close()
self.stdout.write('Collections migrated')
def migrate_documents(self):
cur = self.db_query('select * from wagtaildocs_document')
content_type = self.find_content_type_id('wagtaildocs', 'document')
for row in cur:
if not row['file']:
self.post_migration_report_messages['document_file_not_found'].append(
f'Document file path not found, id={row["id"]}'
)
continue
file = self.open_file(row['file'])
if file:
document = Document.objects.create(
title=row['title'],
file=File(file),
created_at=row['created_at'],
collection=self.collection_map.get(row['collection_id']),
)
V1ToV2ObjectMap.create_map(content_object=document, v1_object_id=row['id'])
tags = self.find_tags(content_type, row['id'])
if tags:
document.tags.add(*tags)
self.document_map.update({row['id']: document})
cur.close()
self.stdout.write('Documents migrated')
def migrate_media(self):
cur = self.db_query('select * from core_molomedia')
content_type = self.find_content_type_id('core', 'molomedia')
for row in cur:
if not row['file']:
self.post_migration_report_messages['media_file_not_found'].append(
f'Media file path not found, id={row["id"]}'
)
continue
file = self.open_file(row['file'])
if file:
thumbnail = self.open_file(row['thumbnail'])
media = Media.objects.create(
title=row['title'],
file=File(file),
type=row['type'],
duration=row['duration'],
thumbnail=File(thumbnail) if thumbnail else None,
created_at=row['created_at'],
collection=self.collection_map.get(row['collection_id']),
)
V1ToV2ObjectMap.create_map(content_object=media, v1_object_id=row['id'])
tags = self.find_tags(content_type, row['id'])
if tags:
media.tags.add(*tags)
self.media_map.update({row['id']: media})
cur.close()
self.stdout.write('Media migrated')
def migrate_images(self):
cur = self.db_query('select * from wagtailimages_image')
content_type = self.find_content_type_id('wagtailimages', 'image')
for row in cur:
if not row['file']:
self.post_migration_report_messages['image_file_not_found'].append(
f'Image file path not found, id={row["id"]}'
)
continue
image_file = self.open_file(row['file'])
if image_file:
self.stdout.write(f"Creating image, file={row['file']}")
image = Image.objects.create(
title=row['title'],
file=ImageFile(image_file, name=row['file'].split('/')[-1]),
focal_point_x=row['focal_point_x'],
focal_point_y=row['focal_point_y'],
focal_point_width=row['focal_point_width'],
focal_point_height=row['focal_point_height'],
created_at=row['created_at'],
collection=self.collection_map.get(row['collection_id']),
)
V1ToV2ObjectMap.create_map(content_object=image, v1_object_id=row['id'])
image.get_file_size()
image.get_file_hash()
tags = self.find_tags(content_type, row['id'])
if tags:
image.tags.add(*tags)
self.image_map.update({row['id']: image})
cur.close()
self.stdout.write('Images migrated')
def migrate_locales(self):
sql = f'select * ' \
f'from core_sitelanguage'
cur = self.db_query(sql)
for row in cur:
Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
cur.close()
def find_content_type_id(self, app_label, model):
cur = self.db_query(f"select id from django_content_type where app_label = '{app_label}' and model = '{model}'")
content_type = cur.fetchone()
cur.close()
return content_type.get('id')
def open_file(self, file):
file_path = Path(self.media_dir) / file
try:
return open(file_path, 'rb')
except:
self.post_migration_report_messages['file_not_found'].append(
f"File not found: {file_path}"
)
def find_tags(self, content_type, object_id):
tags_query = 'select t.name from taggit_tag t join taggit_taggeditem ti on t.id = ti.tag_id where ti.content_type_id = {} and ti.object_id = {}'
cur = self.db_query(tags_query.format(content_type, object_id))
tags = [tag['name'] for tag in cur]
cur.close()
return tags
def migrate_sections(self):
sql = f"select * " \
f"from core_sectionpage csp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where csp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
section_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
section_page_translations.append(row)
else:
self.create_section(row)
else:
for row in section_page_translations:
section = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=section)
except:
self.post_migration_report_messages['untranslated_sections'].append(
f"Unable to translate section, title={row['title']}"
)
continue
translated_section = section.get_translation_or_none(locale)
if translated_section:
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
translated_section.lead_image = self.image_map.get(row['image_id'])
translated_section.title = row['title']
translated_section.slug = row['slug']
translated_section.draft_title = row['draft_title']
translated_section.live = row['live']
translated_section.locked = row['locked']
translated_section.go_live_at = row['go_live_at']
translated_section.expire_at = row['expire_at']
translated_section.first_published_at = row['first_published_at']
translated_section.last_published_at = row['last_published_at']
translated_section.search_description = row['search_description']
translated_section.seo_title = row['seo_title']
translated_section.font_color = self.get_color_hex(row['extra_style_hints']) or section.font_color
translated_section.larger_image_for_top_page_in_list_as_in_v1 = True
translated_section.commenting_status = commenting_status
translated_section.commenting_starts_at = commenting_open_time
translated_section.commenting_ends_at = commenting_close_time
translated_section.latest_revision_created_at = row['latest_revision_created_at']
translated_section.save()
self.add_warning_for_sections_with_description(row, section)
content_type = self.find_content_type_id('core', 'sectionpage')
tags = self.find_tags(content_type, row['page_ptr_id'])
if tags:
translated_section.tags.add(*tags)
V1ToV2ObjectMap.create_map(content_object=translated_section, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_section)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_section
})
if row['description'] is None:
self.post_migration_report_messages['sections_with_null_description'].append(
f'title: {translated_section.title}. URL: {translated_section.full_url}. '
f'Admin URL: {self.get_admin_url(translated_section.id)}.'
)
self.stdout.write(f"Translated section, title={row['title']}")
cur.close()
def mark_empty_sections_as_draft(self):
for section in models.Section.objects.all():
if section.get_children().filter(live=True).count() == 0:
section.live = False
section.save(update_fields=['live'])
def create_section(self, row):
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
section = models.Section(
lead_image=self.image_map.get(row['image_id']),
title=row['title'],
draft_title=row['draft_title'],
show_in_menus=True,
slug=row['slug'],
path=self.section_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
commenting_status=commenting_status,
commenting_starts_at=commenting_open_time,
commenting_ends_at=commenting_close_time,
search_description=row['search_description'],
seo_title=row['seo_title'],
font_color=self.get_color_hex(row['extra_style_hints']),
larger_image_for_top_page_in_list_as_in_v1=True,
latest_revision_created_at=row['latest_revision_created_at'],
)
section.save()
self.add_warning_for_sections_with_description(row, section)
content_type = self.find_content_type_id('core', 'sectionpage')
tags = self.find_tags(content_type, row['page_ptr_id'])
if tags:
section.tags.add(*tags)
V1ToV2ObjectMap.create_map(content_object=section, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=section)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: section
})
if row['description'] is None:
self.post_migration_report_messages['sections_with_null_description'].append(
f'title: {section.title}. URL: {section.full_url}. '
f'Admin URL: {self.get_admin_url(section.id)}.'
)
self.stdout.write(f"saved section, title={section.title}")
def add_warning_for_sections_with_description(self, row, section):
if row['description']:
self.post_migration_report_messages['sections_with_description'].append(
f'title: {section.title}. URL: {section.full_url}. '
f'Admin URL: {self.get_admin_url(section.id)}. '
f'Description (not migrated): {row["description"]}.'
)
def migrate_articles(self):
sql = f"select * " \
f"from core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cap.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"and wcp.path like '000100010002%' " \
f"order by wcp.path"
cur = self.db_query(sql)
article_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
article_page_translations.append(row)
else:
self.create_article(row)
else:
for row in article_page_translations:
article = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=article)
except:
self.post_migration_report_messages['untranslated_articles'].append(
f"Unable to translate article, title={row['title']}"
)
continue
translated_article = article.get_translation_or_none(locale)
if translated_article:
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
translated_article.lead_image = self.image_map.get(row['image_id'])
translated_article.title = row['title']
translated_article.slug = row['slug']
translated_article.draft_title = row['draft_title']
translated_article.live = row['live']
translated_article.locked = row['locked']
translated_article.go_live_at = row['go_live_at']
translated_article.expire_at = row['expire_at']
translated_article.first_published_at = row['first_published_at']
translated_article.last_published_at = row['last_published_at']
translated_article.search_description = row['search_description']
translated_article.seo_title = row['seo_title']
translated_article.index_page_description = row['subtitle']
translated_article.commenting_status = commenting_status
translated_article.commenting_starts_at = commenting_open_time
translated_article.commenting_ends_at = commenting_close_time
translated_article.latest_revision_created_at = row['latest_revision_created_at']
translated_article.save()
content_type = self.find_content_type_id('core', 'articlepage')
tags = self.find_tags(content_type, row['page_ptr_id'])
if tags:
translated_article.tags.add(*tags)
V1ToV2ObjectMap.create_map(content_object=translated_article, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_article)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_article
})
self.stdout.write(f"Translated article, title={row['title']}")
cur.close()
def _get_commenting_fields(self, row):
comments_map = {
'O': CommentStatus.OPEN,
'C': CommentStatus.CLOSED,
'D': CommentStatus.DISABLED,
'T': CommentStatus.TIMESTAMPED
}
commenting_status = comments_map[row['commenting_state']] if row['commenting_state'] else CommentStatus.INHERITED
return commenting_status, row['commenting_open_time'], row['commenting_close_time']
def create_article(self, row):
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
article = models.Article(
lead_image=self.image_map.get(row['image_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=self.section_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
commenting_status=commenting_status,
commenting_starts_at=commenting_open_time,
commenting_ends_at=commenting_close_time,
search_description=row['search_description'],
seo_title=row['seo_title'],
index_page_description=row['subtitle'],
latest_revision_created_at=row['latest_revision_created_at'],
)
try:
article.save()
content_type = self.find_content_type_id('core', 'articlepage')
tags = self.find_tags(content_type, row['page_ptr_id'])
if tags:
article.tags.add(*tags)
V1ToV2ObjectMap.create_map(content_object=article, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=article)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: article
})
except Page.DoesNotExist:
self.post_migration_report_messages['articles'].append(
f"Skipping article with missing parent: title={row['title']}"
)
return
self.stdout.write(f"saved article, title={article.title}")
def get_unsupported_html_tags(self, value):
bleach_kwargs = _get_bleach_kwargs()
unsupported_html_tags = []
tags = BeautifulSoup(value, "html.parser").find_all()
for tag in tags:
if tag.name not in bleach_kwargs['tags']:
unsupported_html_tags.append(tag.name)
return unsupported_html_tags
def _map_body(self, type_, row, v2_body):
for block in v2_body:
if block['type'] == 'paragraph':
unsupported_html_tags = self.get_unsupported_html_tags(block['value'])
if unsupported_html_tags:
block['type'] = 'paragraph_v1_legacy'
page = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if page:
self.post_migration_report_messages['page_with_unsupported_tags'].append(
f'title: {page.title}. URL: {page.full_url}. '
f'Admin URL: {self.get_admin_url(page.id)}. '
f'Tags: {unsupported_html_tags}.'
)
else:
block['type'] = 'markdown'
if bool([domain for domain in self.v1_domains_list if domain in block['value']]):
page = self.v1_to_v2_page_map.get(row['page_id'])
self.post_migration_report_messages['sections_with_internal_links'].append(
f"title: {page.title}. URL: {page.full_url}. "
f"Admin URL: {self.get_admin_url(page.id)}.")
elif block['type'] == 'richtext':
block['type'] = 'paragraph'
if bool([domain for domain in self.v1_domains_list if domain in block['value']]):
page = self.v1_to_v2_page_map.get(row['page_id'])
self.post_migration_report_messages['sections_with_internal_links'].append(
f"title: {page.title}. URL: {page.full_url}. "
f"Admin URL: {self.get_admin_url(page.id)}.")
elif block['type'] == 'image':
image = self.image_map.get(block['value'])
if image:
block['value'] = image.id
else:
page = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if page:
self.post_migration_report_messages['page_with_empty_image'].append(
f'title: {page.title}. URL: {page.full_url}. '
f'Admin URL: {self.get_admin_url(page.id)}. '
f'Image ID: {block["value"]}'
)
else:
self.post_migration_report_messages['invalid_image_id'].append(
f"title={row['title']} has image with invalid id {block['value']}"
)
block['value'] = None
elif block['type'] == 'media':
media = self.media_map.get(block['value'])
if media:
block['value'] = media.id
else:
self.post_migration_report_messages['invalid_media_id'].append(
f"title={row['title']} has media with invalid id {block['value']}"
)
block['value'] = None
elif block['type'] == 'page':
block['type'] = 'page_button'
page = self.v1_to_v2_page_map.get(block['value'])
if page:
block['value'] = {'page': page.id, 'text': ''}
else:
block['value'] = {'page': None, 'text': ''}
self.post_migration_report_messages['invalid_page_id'].append(
f'Unable to attach v2 page for {type_[:-1]}, title={row["title"]}'
)
return v2_body
def map_article_body(self, row):
v1_body = json.loads(row['body'])
v2_body = self._map_body('articles', row, v1_body)
if row['subtitle']:
v2_body = [{
'type': 'paragraph',
'value': row['subtitle'],
}] + v2_body
return json.dumps(v2_body)
def migrate_banners(self):
sql = f"select * " \
f"from core_bannerpage cbp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cbp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
banner_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
banner_page_translations.append(row)
else:
self.create_banner(row)
else:
for row in banner_page_translations:
banner = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=banner)
except:
self.post_migration_report_messages['untranslated_banners'].append(
f"Unable to translate banner, title={row['title']}"
)
continue
translated_banner = banner.get_translation_or_none(locale)
if translated_banner:
translated_banner.banner_image = self.image_map.get(row['banner_id'])
translated_banner.title = row['title']
translated_banner.slug = row['slug']
translated_banner.draft_title = row['draft_title']
translated_banner.live = row['live']
translated_banner.locked = row['locked']
translated_banner.go_live_at = row['go_live_at']
translated_banner.expire_at = row['expire_at']
translated_banner.first_published_at = row['first_published_at']
translated_banner.last_published_at = row['last_published_at']
translated_banner.search_description = row['search_description']
translated_banner.seo_title = row['seo_title']
translated_banner.latest_revision_created_at = row['latest_revision_created_at']
translated_banner.save()
V1ToV2ObjectMap.create_map(content_object=translated_banner, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_banner)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_banner
})
self.stdout.write(f"Translated banner, title={row['title']}")
cur.close()
def create_banner(self, row):
banner = models.BannerPage(
banner_image=self.image_map.get(row['banner_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=self.banner_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
banner_description='',
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
search_description=row['search_description'],
seo_title=row['seo_title'],
latest_revision_created_at=row['latest_revision_created_at'],
)
banner.save()
V1ToV2ObjectMap.create_map(content_object=banner, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=banner)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: banner
})
self.stdout.write(f"saved banner, title={banner.title}")
def map_banner_page(self, row):
v2_page = None
v1_banner_link_page_id = row['banner_link_page_id']
if v1_banner_link_page_id:
v2_page = self.v1_to_v2_page_map.get(v1_banner_link_page_id)
if not v2_page:
self.post_migration_report_messages['banner_page_link'].append(
f'Unable to attach v2 page for banner, title={row["title"]}'
)
return v2_page
def migrate_footers(self):
sql = f"select * " \
f"from core_footerpage cfp, core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cfp.articlepage_ptr_id = cap.page_ptr_id " \
f"and cap.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
footer_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
footer_page_translations.append(row)
else:
self.create_footer(row)
else:
for row in footer_page_translations:
footer = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=footer)
except:
self.post_migration_report_messages['untranslated_footers'].append(
f"Unable to translate footer, title={row['title']}"
)
continue
translated_footer = footer.get_translation_or_none(locale)
if translated_footer:
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
image = self.image_map.get(row['image_id'])
translated_footer.image_icon = image
translated_footer.title = row['title']
translated_footer.slug = row['slug']
translated_footer.draft_title = row['draft_title']
translated_footer.live = row['live']
translated_footer.locked = row['locked']
translated_footer.go_live_at = row['go_live_at']
translated_footer.expire_at = row['expire_at']
translated_footer.first_published_at = row['first_published_at']
translated_footer.last_published_at = row['last_published_at']
translated_footer.search_description = row['search_description']
translated_footer.seo_title = row['seo_title']
translated_footer.commenting_status = commenting_status
translated_footer.commenting_starts_at = commenting_open_time
translated_footer.commenting_ends_at = commenting_close_time
translated_footer.latest_revision_created_at = row['latest_revision_created_at']
translated_footer.save()
if image:
self.post_migration_report_messages['footers_with_image'].append(
f'title: {translated_footer.title}. URL: {translated_footer.full_url}. '
f'Admin URL: {self.get_admin_url(translated_footer.id)}.'
)
V1ToV2ObjectMap.create_map(content_object=translated_footer, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_footer)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_footer
})
self.stdout.write(f"Translated footer, title={row['title']}")
cur.close()
def create_footer(self, row):
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
image = self.image_map.get(row['image_id'])
footer = models.Article(
image_icon=image,
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=self.footer_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
search_description=row['search_description'],
seo_title=row['seo_title'],
commenting_status=commenting_status,
commenting_starts_at=commenting_open_time,
commenting_ends_at=commenting_close_time,
latest_revision_created_at=row['latest_revision_created_at'],
)
footer.save()
if image:
self.post_migration_report_messages['footers_with_image'].append(
f'title: {footer.title}. URL: {footer.full_url}. Admin URL: {self.get_admin_url(footer.id)}.'
)
V1ToV2ObjectMap.create_map(content_object=footer, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=footer)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: footer
})
self.stdout.write(f"saved footer, title={footer.title}")
def load_page_translation_map(self):
sql = "select * " \
"from core_pagetranslation"
cur = self.db_query(sql)
for row in cur:
self.page_translation_map.update({
row['translated_page_id']: row['page_id'],
})
cur.close()
self.stdout.write('Page translation map loaded.')
def translate_page(self, locale, page):
translator = TranslationCreator(user=None, target_locales=[locale])
translator.create_translations(page)
def stop_translations(self):
Translation.objects.update(enabled=False)
self.stdout.write('Translations stopped.')
def migrate_polls(self):
sql = f"select * " \
f"from polls_pollsindexpage ppip, wagtailcore_page wcp " \
f"where ppip.page_ptr_id = wcp.id " \
f"order by wcp.path"
cur = self.db_query(sql)
v1_poll_index_page = cur.fetchone()
cur.close()
self._migrate_polls(v1_poll_index_page, self.poll_index_page)
sql = f"select * " \
f"from core_sectionindexpage csip, wagtailcore_page wcp " \
f"where csip.page_ptr_id = wcp.id " \
f"order by wcp.path"
cur = self.db_query(sql)
v1_section_index_page = cur.fetchone()
cur.close()
self._migrate_polls(v1_section_index_page, self.section_index_page)
def _migrate_polls(self, v1_index_page, v2_index_page):
sql = f"select * " \
f"from polls_question pq, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where pq.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"and wcp.path like '{v1_index_page['path']}%' " \
f"order by wcp.path"
cur = self.db_query(sql)
poll_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
poll_page_translations.append(row)
else:
self.create_poll(v2_index_page, row)
else:
for row in poll_page_translations:
poll = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=poll)
except Exception as e:
self.post_migration_report_messages['untranslated_polls'].append(
f"Unable to translate poll, title={row['title']}"
)
continue
translated_poll = poll.get_translation_or_none(locale)
if translated_poll:
translated_poll.title = row['title']
translated_poll.slug = row['slug']
translated_poll.draft_title = row['draft_title']
translated_poll.live = row['live']
translated_poll.result_as_percentage = row['result_as_percentage']
translated_poll.show_results = row['show_results']
translated_poll.locked = row['locked']
translated_poll.go_live_at = row['go_live_at']
translated_poll.expire_at = row['expire_at']
translated_poll.first_published_at = row['first_published_at']
translated_poll.last_published_at = row['last_published_at']
translated_poll.search_description = row['search_description']
translated_poll.seo_title = row['seo_title']
translated_poll.randomise_options = row['randomise_options']
translated_poll.allow_anonymous_submissions = False
translated_poll.allow_multiple_submissions = False
translated_poll.latest_revision_created_at = row['latest_revision_created_at']
translated_poll.save()
V1ToV2ObjectMap.create_map(content_object=translated_poll, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_poll)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_poll
})
row['path'] = row['path'][:-4]
self.migrate_poll_questions(translated_poll, row)
self.stdout.write(f"Translated poll, title={row['title']}")
cur.close()
def create_poll(self, v2_index_page, row):
poll = Poll(
title=row['title'],
draft_title=row['draft_title'],
show_in_menus=True,
slug=row['slug'],
path=v2_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
show_results=row['show_results'],
result_as_percentage=row['result_as_percentage'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
search_description=row['search_description'],
seo_title=row['seo_title'],
randomise_options=row['randomise_options'],
allow_anonymous_submissions=False,
allow_multiple_submissions=False,
latest_revision_created_at=row['latest_revision_created_at'],
)
try:
poll.save()
V1ToV2ObjectMap.create_map(content_object=poll, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=poll)
except Exception as e:
self.post_migration_report_messages['polls'].append(
f"Unable to save poll, title={row['title']}"
)
return
self.migrate_poll_questions(poll, row)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: poll
})
self.stdout.write(f"saved poll, title={poll.title}")
def migrate_poll_questions(self, poll, poll_row):
sql = f'select * ' \
f'from polls_choice pc, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl ' \
f'where pc.page_ptr_id = wcp.id ' \
f'and wcp.path like \'{poll_row["path"]}%\' ' \
f'and wcp.id = clr.page_id ' \
f'and clr.language_id = csl.id ' \
f'and csl.locale = \'{poll_row["locale"]}\' ' \
f'order by wcp.path'
cur = self.db_query(sql)
self.create_poll_question(poll, poll_row, cur)
cur.close()
def create_poll_question(self, poll, poll_row, cur):
PollFormField.objects.filter(page=poll).delete()
choices = []
for row in cur:
choices.append(row['title'])
choices_length = len(choices)
if choices_length == 0:
field_type = 'multiline'
elif choices_length > 1:
if poll_row['allow_multiple_choice']:
field_type = 'checkboxes'
else:
field_type = 'radio'
else:
self.post_migration_report_messages['poll_questions'].append(
f'Unable to determine field type for poll={poll_row["title"]}.'
)
return
choices = '|'.join(choices)
poll_form_field = PollFormField.objects.create(
page=poll, label=poll.title, field_type=field_type, choices=choices,
admin_label=poll_row['short_name'] or poll.title)
if choices:
cur.scroll(0, 'absolute')
for row in cur:
V1ToV2ObjectMap.create_map(content_object=poll_form_field, v1_object_id=row['page_ptr_id'])
self.stdout.write(f"saved poll question, label={poll.title}")
def migrate_surveys(self):
sql = f"select * " \
f"from surveys_surveysindexpage ssip, wagtailcore_page wcp " \
f"where ssip.page_ptr_id = wcp.id " \
f"order by wcp.path"
cur = self.db_query(sql)
v1_survey_index_page = cur.fetchone()
cur.close()
self._migrate_surveys(v1_survey_index_page, self.survey_index_page)
sql = f"select * " \
f"from core_sectionindexpage csip, wagtailcore_page wcp " \
f"where csip.page_ptr_id = wcp.id " \
f"order by wcp.path"
cur = self.db_query(sql)
v1_section_index_page = cur.fetchone()
cur.close()
self._migrate_surveys(v1_section_index_page, self.section_index_page)
def _migrate_surveys(self, v1_index_page, v2_index_page):
sql = f"select * " \
f"from surveys_molosurveypage smsp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where smsp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"and wcp.path like '{v1_index_page['path']}%' " \
f"order by wcp.path"
cur = self.db_query(sql)
survey_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
survey_page_translations.append(row)
else:
self.create_survey(v2_index_page, row)
else:
for row in survey_page_translations:
survey = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=survey)
except Exception as e:
self.post_migration_report_messages['untranslated_surveys'].append(
f"Unable to translate survey, title={row['title']}"
)
continue
translated_survey = survey.get_translation_or_none(locale)
if translated_survey:
translated_survey.title = row['title']
translated_survey.slug = row['slug']
translated_survey.draft_title = row['draft_title']
translated_survey.live = row['live']
translated_survey.thank_you_text = self.map_survey_thank_you_text(row)
translated_survey.allow_anonymous_submissions = row['allow_anonymous_submissions']
translated_survey.allow_multiple_submissions = row['allow_multiple_submissions_per_user']
translated_survey.submit_button_text = row['submit_text'][:40] if row['submit_text'] else 'Submit'
translated_survey.direct_display = row['display_survey_directly']
translated_survey.multi_step = row['multi_step']
translated_survey.locked = row['locked']
translated_survey.go_live_at = row['go_live_at']
translated_survey.expire_at = row['expire_at']
translated_survey.first_published_at = row['first_published_at']
translated_survey.last_published_at = row['last_published_at']
translated_survey.search_description = row['search_description']
translated_survey.seo_title = row['seo_title']
translated_survey.index_page_description = row['homepage_introduction']
translated_survey.index_page_description_line_2 = row['homepage_button_text']
translated_survey.terms_and_conditions = self.map_survey_terms_and_conditions(row)
translated_survey.latest_revision_created_at = row['latest_revision_created_at']
translated_survey.save()
if row['submit_text'] and len(row['submit_text']) > 40:
self.post_migration_report_messages['truncated_submit_button_text'].append(
f'title: {translated_survey.title}. URL: {translated_survey.full_url}. '
f'Admin URL: {self.get_admin_url(translated_survey.id)}. '
f'Full text: {row["submit_text"]}.'
)
V1ToV2ObjectMap.create_map(content_object=translated_survey, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_survey)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_survey
})
self.migrate_survey_questions(translated_survey, row)
self.stdout.write(f"Translated survey, title={row['title']}")
cur.close()
def create_survey(self, v2_index_page, row):
survey = Survey(
title=row['title'],
draft_title=row['draft_title'],
show_in_menus=True,
slug=row['slug'],
path=v2_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
thank_you_text=self.map_survey_thank_you_text(row),
allow_anonymous_submissions=row['allow_anonymous_submissions'],
allow_multiple_submissions=row['allow_multiple_submissions_per_user'],
submit_button_text=row['submit_text'][:40] if row['submit_text'] else 'Submit',
direct_display=row['display_survey_directly'],
multi_step=row['multi_step'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
search_description=row['search_description'],
seo_title=row['seo_title'],
index_page_description=row['homepage_introduction'],
index_page_description_line_2=row['homepage_button_text'],
terms_and_conditions=self.map_survey_terms_and_conditions(row),
latest_revision_created_at=row['latest_revision_created_at'],
)
try:
survey.save()
if row['submit_text'] and len(row['submit_text']) > 40:
self.post_migration_report_messages['truncated_submit_button_text'].append(
f'title: {survey.title}. URL: {survey.full_url}. '
f'Admin URL: {self.get_admin_url(survey.id)}. '
f'Full text: {row["submit_text"]}.'
)
V1ToV2ObjectMap.create_map(content_object=survey, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=survey)
except Exception as e:
self.post_migration_report_messages['surveys'].append(
f"Unable to save survey, title={row['title']}"
)
return
self.migrate_survey_questions(survey, row)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: survey
})
self.stdout.write(f"saved survey, title={survey.title}")
def map_survey_description(self, row):
v1_survey_description = json.loads(row['description'])
v2_survey_description = self._map_body('surveys', row, v1_survey_description)
if row['introduction']:
v2_survey_description = [{
'type': 'paragraph',
'value': row['introduction'],
}] + v2_survey_description
return json.dumps(v2_survey_description)
def map_survey_thank_you_text(self, row):
v2_thank_you_text = []
if row['thank_you_text']:
v2_thank_you_text.append({'type': 'paragraph', 'value': row['thank_you_text']})
return json.dumps(v2_thank_you_text)
def map_survey_terms_and_conditions(self, row):
sql = f'select * ' \
f'from surveys_surveytermsconditions stc, surveys_molosurveypage msp, wagtailcore_page wcp ' \
f'where stc.page_id = msp.page_ptr_id ' \
f'and stc.terms_and_conditions_id = wcp.id ' \
f'and stc.page_id = {row["page_ptr_id"]} ' \
f'order by wcp.path'
cur = self.db_query(sql)
v1_term_and_condition = cur.fetchone()
cur.close()
if v1_term_and_condition:
return json.dumps([
{
"type": "page_button",
"value": {
"page": self.v1_to_v2_page_map[v1_term_and_condition["terms_and_conditions_id"]].id,
},
},
])
def migrate_survey_questions(self, survey, survey_row):
sql = f'select *, smsff.id as smsffid ' \
f'from surveys_molosurveyformfield smsff, surveys_molosurveypage smsp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl ' \
f'where smsff.page_id = smsp.page_ptr_id ' \
f'and smsp.page_ptr_id = wcp.id ' \
f'and wcp.id = clr.page_id ' \
f'and clr.language_id = csl.id ' \
f'and wcp.id = {survey_row["page_ptr_id"]} ' \
f'order by wcp.path'
cur = self.db_query(sql)
self.create_survey_question(survey, survey_row, cur)
cur.close()
def create_survey_question(self, survey, survey_row, cur):
SurveyFormField.objects.filter(page=survey).delete()
for row in cur:
field_type = 'positivenumber' if row['field_type'] == 'positive_number' else row['field_type']
survey_form_field = SurveyFormField.objects.create(
page=survey, sort_order=row['sort_order'], label=row['label'], required=row['required'],
default_value=row['default_value'], help_text=row['help_text'], field_type=field_type,
admin_label=row['admin_label'], page_break=row['page_break'],
choices='|'.join(row['choices'].split(',')), skip_logic=row['skip_logic']
)
V1ToV2ObjectMap.create_map(content_object=survey_form_field, v1_object_id=row['smsffid'])
skip_logic_next_actions = [logic['value']['skip_logic'] for logic in json.loads(row['skip_logic'])]
if not survey_row['multi_step'] and (
'end' in skip_logic_next_actions or 'question' in skip_logic_next_actions):
self.post_migration_report_messages['survey_multistep'].append(
f'skip logic without multi step'
)
self.stdout.write(f"saved survey question, label={row['label']}")
def _get_iso_locale(self, locale):
iso_locales_map = {
'sho': 'sn',
'ch': 'ny',
}
return iso_locales_map.get(locale, locale)
def translate_home_pages(self):
locales = Locale.objects.all()
for locale in locales:
self.translate_page(locale=locale, page=self.home_page)
translated_home_page = self.home_page.get_translation_or_none(locale)
if translated_home_page:
translated_home_page.title = f"{translated_home_page.title} [{str(locale)}]"
translated_home_page.draft_title = f"{translated_home_page.draft_title} [{str(locale)}]"
translated_home_page.save()
def translate_index_pages(self):
index_pages = [
self.section_index_page, self.banner_index_page, self.footer_index_page, self.poll_index_page,
self.survey_index_page, self.quiz_index_page, self.miscellaneous_index_page,
]
locales = Locale.objects.all()
for page in index_pages:
for locale in locales:
self.translate_page(locale=locale, page=page)
def migrate_recommended_articles_for_article(self):
article_cur = self.db_query(f'select DISTINCT page_id from core_articlepagerecommendedsections')
for article_row in article_cur:
v1_article_id = article_row['page_id']
v2_article = self.v1_to_v2_page_map.get(v1_article_id)
if v2_article:
cur = self.db_query(
f'select * from core_articlepagerecommendedsections where page_id = {v1_article_id} and recommended_article_id is not null')
for row in cur:
v2_recommended_article = self.v1_to_v2_page_map.get(row['recommended_article_id'])
if v2_recommended_article:
models.ArticleRecommendation.objects.create(
sort_order=row['sort_order'],
article=v2_recommended_article,
source=v2_article
)
cur.close()
article_cur.close()
self.stdout.write('Recommended articles migrated')
def migrate_featured_articles_for_homepage(self):
locale_cur = self.db_query(f"select * from core_sitelanguage")
for locale_row in locale_cur:
articles_cur = self.db_query(
f"select * "
f"from core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl "
f"where cap.page_ptr_id = wcp.id "
f"and wcp.id = clr.page_id "
f"and clr.language_id = csl.id "
f"and wcp.live = true "
f"and csl.locale = '{locale_row['locale']}' "
f"order by left(wcp.path, 16) "
)
articles_list = []
for article_row in articles_cur:
translated_from_page_id = self.page_translation_map.get(article_row['page_ptr_id'])
featured_in_homepage_start_date = article_row['featured_in_homepage_start_date']
if translated_from_page_id:
translated_from_article_cur = self.db_query(
f'select * from core_articlepage where page_ptr_id = {translated_from_page_id}')
translated_from_article_row = translated_from_article_cur.fetchone()
translated_from_article_cur.close()
# For translated articles, only the date of the translated from matters
featured_in_homepage_start_date = translated_from_article_row['featured_in_homepage_start_date']
if featured_in_homepage_start_date:
article = self.v1_to_v2_page_map.get(article_row['page_ptr_id'])
if article:
article.featured_in_homepage_start_date = featured_in_homepage_start_date
articles_list.append(article)
articles_cur.close()
articles_list = sorted(articles_list, key=lambda x: x.featured_in_homepage_start_date, reverse=True)
articles_list = sorted(articles_list, key=lambda x: x.path[:16])
article_groups = defaultdict(list)
for article in articles_list:
article_groups[article.path[:16]].append(article)
for k, v in article_groups.items():
for i, article in enumerate(v):
if i < 5:
self.add_article_as_featured_content_in_home_page(article)
else:
self.post_migration_report_messages['ommitted_old_featured_article'].append(
f'title: {article.title}. URL: {article.full_url}. '
f'Admin URL: {self.get_admin_url(article.id)}. '
f'featured since: {article.featured_in_homepage_start_date}.'
)
section = models.Section.objects.get(path=k)
self.add_section_as_featured_content_in_home_page(section)
locale_cur.close()
def add_article_as_featured_content_in_home_page(self, article):
home_page = self.home_page.get_translation_or_none(article.locale)
if home_page:
home_featured_content = home_page.home_featured_content.stream_data
home_featured_content.append({
'type': 'article',
'value': {
'article': article.id,
'display_section_title': True,
},
})
home_page.save()
def add_section_as_featured_content_in_home_page(self, section):
home_page = self.home_page.get_translation_or_none(section.locale)
if home_page:
home_featured_content = home_page.home_featured_content.stream_data
home_featured_content.append({
'type': 'page_button',
'value': {
'page': section.id,
'text': '',
},
})
home_page.save()
def attach_banners_to_home_page(self):
sql = f"select * " \
f"from core_bannerpage cbp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cbp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_banner = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_banner:
home_page = v2_banner.get_ancestors().exact_type(models.HomePage).first().specific
models.HomePageBanner.objects.create(source=home_page, banner_page=v2_banner)
cur.close()
def get_color_hex(self, color_name):
return {
'--tiber': '#07292F',
'--mecury': '#eae9e9',
'--light_scampi': '#685FA1',
'--dove_gray': '#737373',
'--mineral_gray': '#dedede',
'--washed_gray': '#f1f1f1',
'--brown': '#a03321',
'--medium_red_violet': '#B62A99',
'--dark_medium_red_violet': '#b43393',
'--violet_blue': '#a54f9e',
'--mandy': '#E24256',
'--plum': '#7e2268',
'--wisteria': '#8e68ad',
'--grape': '#541c56',
'--paris_m': '#202855',
'--east_bay': '#4E4682',
'--victoria': '#4D4391',
'--scampi': '#685FA1',
'--sandybrown': '#EF9955',
'--jaffa': '#ee8c39',
'--saffron': '#F2B438',
'--saffron_light': '#f2b437',
'--cinnabar': '#EC3B3A',
'--cinnabar_dark': '#ee5523',
'--cardinal': '#bf2026',
'--pomegranate': '#ed3330',
'--roman': '#DF6859',
'--mauvelous': '#F38AA5',
'--beed_blush': '#e764a0',
'--maxican_red': '#a21d2e',
'--kobi': '#d481b5',
'--illusion': '#ee97ac',
'--celery': '#A4CE55',
'--de_york': '#6EC17F',
'--eucalyptus': '#2A9B58',
'--tradewind': '#4bab99',
'--moss_green': '#b3d9a1',
'--danube': '#6093CD',
'--light_danube': '#627abc',
'--indigo': '#5F7AC9',
'--mariner': '#4759a6',
'--robin_egg_blue': '#00BFC6',
'--pelorous': '#37BFBE',
'--iris_blue': '#03acc3',
'--red_berry': '#711e29',
'--bay_of_may': '#2b378c',
'--viking': '#3bbfbd',
'--denim': '#127f99',
'--tory_blue': '#134b90',
}.get(color_name)
def fix_articles_body(self):
sql = f"select * " \
f"from core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cap.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"and wcp.path like '000100010002%' " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_article = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_article:
v2_article.refresh_from_db()
v2_article.body = self.map_article_body(row)
v2_article.save()
else:
self.post_migration_report_messages['articles'].append(
f'Unable to add article body, title={row["title"]}'
)
cur.close()
def fix_footers_body(self):
sql = f"select * " \
f"from core_footerpage cfp, core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cfp.articlepage_ptr_id = cap.page_ptr_id " \
f"and cap.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_footer = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_footer:
v2_footer.refresh_from_db()
v2_footer.body = self.map_article_body(row)
v2_footer.save()
cur.close()
def fix_survey_description(self):
sql = f"select * " \
f"from surveys_molosurveypage smsp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where smsp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_survey = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_survey:
v2_survey.refresh_from_db()
v2_survey.description = self.map_survey_description(row)
v2_survey.save()
cur.close()
def fix_banner_link_page(self):
sql = f"select * " \
f"from core_bannerpage cbp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cbp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_banner = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_banner:
v2_banner.refresh_from_db()
v2_banner.banner_link_page = self.map_banner_page(row)
v2_banner.save()
cur.close()
def add_polls_from_polls_index_page_to_footer_index_page_as_page_link_page(self):
self.poll_index_page.refresh_from_db()
self.footer_index_page.refresh_from_db()
file = File(open(Path(settings.BASE_DIR) / 'iogt/static/icons/clip_board_pen.svg'), name='clip_board_pen.svg')
icon = Svg.objects.create(title='clip board pen', file=file)
poll_index_pages = self.poll_index_page.get_translations(inclusive=True)
for poll_index_page in poll_index_pages:
polls = poll_index_page.get_children()
for poll in polls:
page_link_page = models.PageLinkPage(title=poll.title, page=poll, icon=icon, live=poll.live)
footer_index_page = self.footer_index_page.get_translation_or_none(poll.locale)
footer_index_page.add_child(instance=page_link_page)
self.stdout.write('Added polls from poll index page to footer index page as page link page.')
def add_surveys_from_surveys_index_page_to_footer_index_page_as_page_link_page(self):
self.survey_index_page.refresh_from_db()
self.footer_index_page.refresh_from_db()
file = File(open(Path(settings.BASE_DIR) / 'iogt/static/icons/loud_speaker.svg'), name='loud_speaker.svg')
icon = Svg.objects.create(title='loud speaker', file=file)
survey_index_page = self.survey_index_page.get_translations(inclusive=True)
for survey_index_page in survey_index_page:
surveys = survey_index_page.get_children()
for survey in surveys:
page_link_page = models.PageLinkPage(title=survey.title, page=survey, icon=icon, live=survey.live)
footer_index_page = self.footer_index_page.get_translation_or_none(survey.locale)
footer_index_page.add_child(instance=page_link_page)
self.stdout.write('Added surveys from survey index page to footer index page as page link page.')
def mark_pages_which_are_not_translated_in_v1_as_draft(self):
self.section_index_page.refresh_from_db()
self.banner_index_page.refresh_from_db()
self.footer_index_page.refresh_from_db()
self.poll_index_page.refresh_from_db()
self.survey_index_page.refresh_from_db()
self.quiz_index_page.refresh_from_db()
page_ids_to_exclude = []
page_ids_to_exclude += self.section_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.banner_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.footer_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.poll_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.survey_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.quiz_index_page.get_translations(inclusive=True).values_list('id', flat=True)
Page.objects.filter(alias_of__isnull=False).exclude(id__in=page_ids_to_exclude).update(live=False)
def migrate_social_media_links(self):
self.footer_index_page.refresh_from_db()
sql = f'select * from core_sitesettings'
cur = self.db_query(sql)
for row in cur:
social_media_links = json.loads(row['social_media_links_on_footer_page'])
for social_media_link in social_media_links:
block_value = social_media_link.get('value')
if block_value:
page_link_page_data = {
'title': block_value.get('title'),
'external_link': block_value.get('link'),
}
v2_image = self.image_map.get(block_value.get('image'))
if v2_image:
page_link_page_data.update({'image_icon_id': v2_image.id})
page_link_page = models.PageLinkPage(**page_link_page_data)
self.footer_index_page.add_child(instance=page_link_page)
def migrate_page_revisions(self):
PageRevision.objects.all().delete()
sql = f"select * " \
f"from wagtailcore_pagerevision wcpr"
cur = self.db_query(sql)
for row in cur:
v2_page = self.v1_to_v2_page_map.get(row['page_id'])
if v2_page:
page_revision = PageRevision.objects.create(
page=v2_page,
submitted_for_moderation=row['submitted_for_moderation'],
created_at=row['created_at'],
content_json=row['content_json'],
approved_go_live_at=row['approved_go_live_at'],
)
V1ToV2ObjectMap.create_map(page_revision, row['id'])
cur.close()
def add_polls_from_polls_index_page_to_home_page_featured_content(self):
self.poll_index_page.refresh_from_db()
self.home_page.refresh_from_db()
poll_index_pages = self.poll_index_page.get_translations(inclusive=True)
for poll_index_page in poll_index_pages:
home_page = self.home_page.get_translation_or_none(poll_index_page.locale)
home_featured_content = home_page.home_featured_content.stream_data
polls = poll_index_page.get_children().live()
for poll in polls:
home_featured_content.append({
'type': 'embedded_poll',
'value': {
'direct_display': True,
'poll': poll.id,
},
})
home_page.home_featured_content = json.dumps(home_featured_content)
home_page.save()
self.stdout.write('Added polls from poll index page to home page featured content.')
def add_surveys_from_surveys_index_page_to_home_page_featured_content(self):
self.survey_index_page.refresh_from_db()
self.home_page.refresh_from_db()
survey_index_pages = self.survey_index_page.get_translations(inclusive=True)
for survey_index_page in survey_index_pages:
home_page = self.home_page.get_translation_or_none(survey_index_page.locale)
home_featured_content = home_page.home_featured_content.stream_data
surveys = survey_index_page.get_children().live()
for survey in surveys:
home_featured_content.append({
'type': 'embedded_survey',
'value': {
'direct_display': survey.specific.direct_display,
'survey': survey.id,
},
})
home_page.home_featured_content = json.dumps(home_featured_content)
home_page.save()
self.stdout.write('Added surveys from survey index page to home page featured content.')
def migrate_article_related_sections(self):
cur = self.db_query('select * from core_articlepagerelatedsections caprs')
sections = defaultdict(list)
for row in cur:
section = self.v1_to_v2_page_map.get(row['section_id'])
article = self.v1_to_v2_page_map.get(row['page_id'])
if (not section) or (not article):
self.post_migration_report_messages['articles_in_related_sections'].append(
f"Couldn't find v2 page for v1 section: {row['section_id']} and article: {row['page_id']}"
)
continue
section.refresh_from_db()
article.refresh_from_db()
page_link_page = models.PageLinkPage(title=article.title, page=article, live=article.live)
section.add_child(instance=page_link_page)
page = Page.objects.get(id=page_link_page.id)
self.move_page(page_to_move=page, position=0)
sections[section.id].append(article.title)
for k, v in sections.items():
page = Page.objects.get(id=k)
self.post_migration_report_messages['unordered_related_articles_in_section'].append(
f"title: {page.title}. URL: {page.full_url}. Admin URL: {self.get_admin_url(page.id)}. "
f"articles: {', '.join(v)}"
)
def move_footers_to_end_of_footer_index_page(self):
footer_index_pages = self.footer_index_page.get_translations(inclusive=True)
for footer_index_page in footer_index_pages:
footer_index_page_children = footer_index_page.get_children()
articles = footer_index_page_children.exact_type(models.Article)
for article in articles:
self.move_page(page_to_move=article, position=footer_index_page_children.count())
def move_page(self, page_to_move, position):
parent_page = page_to_move.get_parent()
# Find page that is already in this position
position_page = None
if position is not None:
try:
position_page = parent_page.get_children()[int(position)]
except IndexError:
pass # No page in this position
# Move page
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
if position_page:
# If the page has been moved to the right, insert it to the
# right. If left, then left.
old_position = list(parent_page.get_children()).index(page_to_move)
if int(position) < old_position:
page_to_move.move(position_page, pos='left')
elif int(position) > old_position:
page_to_move.move(position_page, pos='right')
else:
# Move page to end
page_to_move.move(parent_page, pos='last-child')
def _sort_articles(self):
pages = models.Section.objects.all().order_by('path')
for page in pages:
page.refresh_from_db()
articles = page.get_children().type(models.Article)
children_list = []
for article in articles:
try:
v1_id = V1ToV2ObjectMap.get_v1_id(article.specific, article.id)
except:
continue
if v1_id:
translated_from_page_id = self.page_translation_map.get(v1_id)
if translated_from_page_id:
v1_id = translated_from_page_id
cur = self.db_query(f'select * from wagtailcore_page wcp where id = {v1_id}')
v1_row = cur.fetchone()
cur.close()
setattr(article, 'creation_date', v1_row['first_published_at'])
else:
setattr(article, 'creation_date', None)
children_list.append(article)
children_list = sorted(
children_list, key=lambda x: (x.creation_date is not None, x.creation_date))
for article in children_list:
article.refresh_from_db()
article.move(page, pos='first-child')
def _sort_sections(self):
locales = Locale.objects.all()
for locale in locales:
pages = models.Section.objects.filter(locale=locale).order_by('path')
for page in pages:
page.refresh_from_db()
try:
v1_id = V1ToV2ObjectMap.get_v1_id(page.specific, page.id)
except:
continue
translated_from_page_id = self.page_translation_map.get(v1_id)
if not translated_from_page_id:
continue
translated_from_page = self.v1_to_v2_page_map.get(translated_from_page_id)
if not translated_from_page:
continue
translated_from_page.refresh_from_db()
translated_from_sub_sections = translated_from_page.get_children().type(models.Section)
translated_sub_sections = page.get_children().type(models.Section)
if translated_sub_sections:
index_to_move = list(page.get_children()).index(translated_sub_sections.first())
for child in translated_from_sub_sections:
child.refresh_from_db()
translated_sub_section = child.get_translation_or_none(locale)
if translated_sub_section:
self.move_page(page_to_move=translated_sub_section, position=index_to_move)
index_to_move += 1
def sort_pages(self):
if self.sort != 'type1':
return
self._sort_sections()
self._sort_articles()
self.stdout.write('Pages sorted.')
def populate_registration_survey_translations(self):
with open(f'{settings.BASE_DIR}/iogt_content_migration/files/registration_survey_translations.csv',
newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
str_key = self._get_iso_locale(row.pop('str'))
self.registration_survey_translations[str_key] = row
def translate_default_survey_submit_button_text(self):
surveys = Survey.objects.all()
for survey in surveys:
if survey.submit_button_text == 'Submit':
# Technically, someone could have manually put 'Submit' on a non-English button,
# which we would now translate even though we shouldn't.
# This is quite unlikely though.
submit_button_text = self.registration_survey_translations['submit_button_text'][survey.locale.language_code]
if not submit_button_text:
self.post_migration_report_messages['untranslated_survey_button'].append(
f'title: {survey.title}. URL: {survey.full_url}. '
f'Admin URL: {self.get_admin_url(survey.id)}.'
)
if submit_button_text and len(submit_button_text) > 40:
# This should never happen in practice as we provide submit_button_text
self.stdout.write(f"Truncated default submit button text, title={survey.title}")
survey.submit_button_text = submit_button_text[:40] if submit_button_text else 'Submit'
survey.save()
def migrate_post_registration_survey(self):
sql = 'select * from profiles_userprofilessettings pups ' \
'inner join wagtailcore_site ws on pups.site_id = ws.id ' \
'where is_default_site = true'
cur = self.db_query(sql)
row = cur.fetchone()
survey = Survey(
title='Registration Survey', live=True, allow_multiple_submissions=True,
allow_anonymous_submissions=False, submit_button_text='Register')
self.survey_index_page.add_child(instance=survey)
for (should_add_field_key, translation_key, is_required_key, field_type, admin_label) in [
('activate_dob', 'dob', 'dob_required', 'date', 'date_of_birth'),
('activate_gender', 'gender', 'gender_required', 'singleline', 'gender'),
('activate_location', 'location', 'location_required', 'singleline', 'location'),
('activate_education_level', 'education_level', 'activate_education_level_required', 'singleline',
'education_level'),
('show_mobile_number_field', 'mobile_number', 'mobile_number_required', 'singleline', 'mobile_number'),
('show_email_field', 'email_address', 'email_required', 'email', 'email'),
]:
if row[should_add_field_key]:
SurveyFormField.objects.create(
page=survey,
label=self.registration_survey_translations[translation_key]['en'],
required=bool(row[is_required_key]),
field_type=field_type,
admin_label=admin_label,
help_text=self.registration_survey_translations[f'{translation_key}_helptext']['en']
)
self.stdout.write('Successfully migrated post registration survey')
default_site_settings = models.SiteSettings.get_for_default_site()
default_site_settings.registration_survey = survey
default_site_settings.save()
for locale in Locale.objects.all():
try:
self.translate_page(locale=locale, page=survey)
translated_survey = survey.get_translation_or_none(locale)
except Exception as e:
self.post_migration_report_messages['registration_survey'].append(
f"Unable to translate survey, title={survey.title} to locale={locale}"
)
continue
submit_button_text = self.registration_survey_translations['register_button_text'][locale.language_code]
if not submit_button_text:
self.post_migration_report_messages['registration_survey_translation_not_found'].append(
f'No translation for submit button of registration survey to locale: {locale}'
)
if submit_button_text and len(submit_button_text) > 40:
# This should never happen in practice as we provide submit_button_text
self.stdout.write(f"Truncated survey submit button text, title={translated_survey.title}")
translated_survey.submit_button_text = submit_button_text[:40] if submit_button_text else 'Register'
translated_survey.save()
if translated_survey:
for (admin_label, label_identifier) in [
('date_of_birth', 'dob'),
('gender', 'gender'),
('location', 'location'),
('mobile_number', 'mobile_number'),
('education_level', 'education_level'),
('email', 'email_address')
]:
try:
field = SurveyFormField.objects.get(page=translated_survey, admin_label=admin_label)
except SurveyFormField.DoesNotExist:
# This field is not marked as required in the registration survey
continue
try:
field.label = self.registration_survey_translations[label_identifier][locale.language_code]
field.help_text = self.registration_survey_translations[
f'{label_identifier}_helptext'][locale.language_code]
except KeyError:
self.post_migration_report_messages['registration_survey_translation_not_found'].append(
f'Incomplete translation for registration survey to locale: {locale}'
)
break
field.save()
self.post_migration_report_messages['other'].append(
'Title of registration survey (Pages > Internet of Good Things [Language] > Surveys > Registration Survey) '
'has not been translated for any language.'
)
def get_admin_url(self, id):
site = Site.objects.filter(is_default_site=True).first()
return f"{site.root_url}{reverse('wagtailadmin_pages:edit', args=(id,))}"
def print_post_migration_report(self):
self.stdout.write(self.style.ERROR('====================='))
self.stdout.write(self.style.ERROR('POST MIGRATION REPORT'))
self.stdout.write(self.style.ERROR('====================='))
for k, v in self.post_migration_report_messages.items():
self.stdout.write(self.style.ERROR(f"===> {k.replace('_', ' ').upper()}"))
self.stdout.write(self.style.ERROR('\n'.join(v))) | iogt_content_migration/management/commands/load_v1_db.py | from collections import defaultdict
from pathlib import Path
import csv
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.files import File
from django.core.management.base import BaseCommand
from django.urls import reverse
from wagtail.core.models import Page, Site, Locale, Collection, PageRevision
from django.core.files.images import ImageFile
from wagtail.documents.models import Document
from wagtail.images.models import Image
from wagtail_localize.models import Translation
from wagtail_localize.views.submit_translations import TranslationCreator
from wagtailmarkdown.utils import _get_bleach_kwargs
from wagtailmedia.models import Media
from wagtailsvg.models import Svg
import home.models as models
from comments.models import CommentStatus
from home.models import V1ToV2ObjectMap, V1PageURLToV2PageMap
from questionnaires.models import Poll, PollFormField, Survey, SurveyFormField, Quiz, QuizFormField
import psycopg2
import psycopg2.extras
import json
from questionnaires.models import PollIndexPage, SurveyIndexPage, QuizIndexPage
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--host',
default='0.0.0.0',
help='IoGT V1 database host'
)
parser.add_argument(
'--port',
default='5432',
help='IoGT V1 database port'
)
parser.add_argument(
'--name',
default='postgres',
help='IoGT V1 database name'
)
parser.add_argument(
'--user',
default='postgres',
help='IoGT V1 database user'
)
parser.add_argument(
'--password',
default='',
help='IoGT V1 database password'
)
parser.add_argument(
'--media-dir',
required=True,
help='**RELATIVE Path** to IoGT v1 media directory'
)
parser.add_argument(
'--delete-users',
action='store_true',
help='Delete existing Users and their associated data. Use carefully'
)
parser.add_argument(
'--v1-domains',
nargs="+",
required=True,
help="IoGT V1 domains for manually inserted internal links, --v1-domains domain1 domain2 ..."
)
parser.add_argument(
'--sort',
required=True,
help='Sort page by "type1" or "type2"'
)
def handle(self, *args, **options):
self.db_connect(options)
self.media_dir = options.get('media_dir')
self.v1_domains_list = options.get('v1_domains')
self.sort = options.get('sort')
self.v2_domain = options.get('v2_domain')
self.v2_site_port = options.get('v2_site_port')
self.collection_map = {}
self.document_map = {}
self.media_map = {}
self.image_map = {}
self.page_translation_map = {}
self.v1_to_v2_page_map = {}
self.post_migration_report_messages = defaultdict(list)
self.registration_survey_translations = defaultdict()
self.clear()
self.stdout.write('Existing site structure cleared')
root = Page.get_first_root_node()
self.migrate(root)
self.print_post_migration_report()
def clear(self):
PageRevision.objects.all().delete()
models.OfflineAppPage.objects.all().delete()
models.MiscellaneousIndexPage.objects.all().delete()
models.PageLinkPage.objects.all().delete()
PollFormField.objects.all().delete()
Poll.objects.all().delete()
SurveyFormField.objects.all().delete()
Survey.objects.all().delete()
QuizFormField.objects.all().delete()
Quiz.objects.all().delete()
models.FeaturedContent.objects.all().delete()
models.ArticleRecommendation.objects.all().delete()
models.FooterPage.objects.all().delete()
models.FooterIndexPage.objects.all().delete()
models.BannerPage.objects.all().delete()
models.BannerIndexPage.objects.all().delete()
models.Article.objects.all().delete()
models.Section.objects.all().delete()
models.SectionIndexPage.objects.all().delete()
models.HomePage.objects.all().delete()
Site.objects.all().delete()
Image.objects.all().delete()
Document.objects.all().delete()
Media.objects.all().delete()
V1ToV2ObjectMap.objects.all().delete()
def db_connect(self, options):
connection_string = self.create_connection_string(options)
self.stdout.write(f'DB connection string created, string={connection_string}')
self.v1_conn = psycopg2.connect(connection_string)
self.stdout.write('Connected to v1 DB')
def __del__(self):
try:
self.v1_conn.close()
self.stdout.write('Closed connection to v1 DB')
except AttributeError:
pass
def create_connection_string(self, options):
host = options.get('host', '0.0.0.0')
port = options.get('port', '5432')
name = options.get('name', 'postgres')
user = options.get('user', 'postgres')
password = options.get('password', '')
return f"host={host} port={port} dbname={name} user={user} password={password}"
def db_query(self, q):
cur = self.v1_conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute(q)
return cur
def migrate(self, root):
self.migrate_collections()
self.migrate_documents()
self.migrate_media()
self.migrate_images()
self.migrate_locales()
self.load_page_translation_map()
self.home_page = self.create_home_page(root)
self.translate_home_pages()
self.create_index_pages()
self.translate_index_pages()
self.migrate_sections()
self.migrate_articles()
self.migrate_footers()
self.migrate_polls()
self.migrate_surveys()
self.migrate_banners()
self.mark_pages_which_are_not_translated_in_v1_as_draft()
Page.fix_tree(fix_paths=True)
self.mark_empty_sections_as_draft()
self.fix_articles_body()
self.fix_footers_body()
self.fix_survey_description()
self.fix_banner_link_page()
self.attach_banners_to_home_page()
self.migrate_recommended_articles_for_article()
self.migrate_featured_articles_for_homepage()
self.add_surveys_from_surveys_index_page_to_footer_index_page_as_page_link_page()
self.add_polls_from_polls_index_page_to_footer_index_page_as_page_link_page()
self.add_polls_from_polls_index_page_to_home_page_featured_content()
self.add_surveys_from_surveys_index_page_to_home_page_featured_content()
self.move_footers_to_end_of_footer_index_page()
self.migrate_article_related_sections()
self.migrate_social_media_links()
self.sort_pages()
self.populate_registration_survey_translations()
self.translate_default_survey_submit_button_text()
self.migrate_post_registration_survey()
self.migrate_page_revisions()
self.stop_translations()
def create_home_page(self, root):
sql = 'select * ' \
'from wagtailcore_site wcs, core_sitesettings css, core_main cm, wagtailcore_page wcp ' \
'where wcs.id = css.site_id ' \
'and wcs.root_page_id = cm.page_ptr_id ' \
'and cm.page_ptr_id = wcp.id ' \
'and wcs.is_default_site = true'
cur = self.db_query(sql)
main = cur.fetchone()
cur.close()
if not main:
raise Exception('Could not find a main page in v1 DB')
sql = 'select * ' \
'from core_sitelanguage ' \
'where is_main_language = true'
cur = self.db_query(sql)
language = cur.fetchone()
cur.close()
if not language:
raise Exception('Could not find a main language in v1 DB')
locale = Locale.objects.get(language_code=self._get_iso_locale(language['locale']))
home = models.HomePage(
title=main['title'],
draft_title=main['draft_title'],
slug=main['slug'],
live=main['live'],
locked=main['locked'],
go_live_at=main['go_live_at'],
expire_at=main['expire_at'],
first_published_at=main['first_published_at'],
last_published_at=main['last_published_at'],
search_description=main['search_description'],
seo_title=main['seo_title'],
locale=locale
)
root.add_child(instance=home)
V1ToV2ObjectMap.create_map(content_object=home, v1_object_id=main['page_ptr_id'])
Site.objects.create(
hostname=self.v1_domains_list[0],
port=443,
root_page=home,
is_default_site=True,
site_name=main['site_name'] if main['site_name'] else 'Internet of Good Things',
)
logo = self.image_map.get(main['logo_id'])
if logo:
site_settings = models.SiteSettings.get_for_default_site()
site_settings.logo_id = logo.id
site_settings.save()
else:
self.post_migration_report_messages['other'].append(
'Not site logo found. Using default site logo.'
)
sql = f'select * ' \
f'from core_sitesettings css, wagtailcore_site wcs ' \
f'where css.site_id = wcs.id ' \
f'and wcs.is_default_site = true'
cur = self.db_query(sql)
for row in cur:
social_media_links = json.loads(row['social_media_links_on_footer_page'])
if social_media_links:
links = []
for social_media_link in social_media_links:
value = social_media_link.get('value')
if value:
links.append({
'title': value.get('title'),
'link': value.get('link'),
})
self.post_migration_report_messages['social_media_links'].append(
f'site: {row["site_name"]}, hostname: {row["hostname"]} has following social media links '
f'{[(link["title"], link["link"]) for link in links]}.')
cur.close()
self.post_migration_report_messages['other'].append(
'A default favicon has been chosen for the site.'
)
return home
def create_index_pages(self):
self.section_index_page = models.SectionIndexPage(title='Sections')
self.home_page.add_child(instance=self.section_index_page)
self.banner_index_page = models.BannerIndexPage(title='Banners')
self.home_page.add_child(instance=self.banner_index_page)
self.footer_index_page = models.FooterIndexPage(title='Footers')
self.home_page.add_child(instance=self.footer_index_page)
self.poll_index_page = PollIndexPage(title='Polls')
self.home_page.add_child(instance=self.poll_index_page)
self.survey_index_page = SurveyIndexPage(title='Surveys')
self.home_page.add_child(instance=self.survey_index_page)
self.quiz_index_page = QuizIndexPage(title='Quizzes')
self.home_page.add_child(instance=self.quiz_index_page)
self.miscellaneous_index_page = models.MiscellaneousIndexPage(title='Miscellaneous')
self.home_page.add_child(instance=self.miscellaneous_index_page)
def migrate_collections(self):
cur = self.db_query('select * from wagtailcore_collection')
for row in cur:
collection, _ = Collection.objects.get_or_create(
name=row['name'],
defaults={
'path': row['path'],
'depth': row['depth'],
'numchild': row['numchild'],
}
)
collection.save()
self.collection_map.update({row['id']: collection})
V1ToV2ObjectMap.create_map(content_object=collection, v1_object_id=row['id'])
cur.close()
self.stdout.write('Collections migrated')
def migrate_documents(self):
cur = self.db_query('select * from wagtaildocs_document')
content_type = self.find_content_type_id('wagtaildocs', 'document')
for row in cur:
if not row['file']:
self.post_migration_report_messages['document_file_not_found'].append(
f'Document file path not found, id={row["id"]}'
)
continue
file = self.open_file(row['file'])
if file:
document = Document.objects.create(
title=row['title'],
file=File(file),
created_at=row['created_at'],
collection=self.collection_map.get(row['collection_id']),
)
V1ToV2ObjectMap.create_map(content_object=document, v1_object_id=row['id'])
tags = self.find_tags(content_type, row['id'])
if tags:
document.tags.add(*tags)
self.document_map.update({row['id']: document})
cur.close()
self.stdout.write('Documents migrated')
def migrate_media(self):
cur = self.db_query('select * from core_molomedia')
content_type = self.find_content_type_id('core', 'molomedia')
for row in cur:
if not row['file']:
self.post_migration_report_messages['media_file_not_found'].append(
f'Media file path not found, id={row["id"]}'
)
continue
file = self.open_file(row['file'])
if file:
thumbnail = self.open_file(row['thumbnail'])
media = Media.objects.create(
title=row['title'],
file=File(file),
type=row['type'],
duration=row['duration'],
thumbnail=File(thumbnail) if thumbnail else None,
created_at=row['created_at'],
collection=self.collection_map.get(row['collection_id']),
)
V1ToV2ObjectMap.create_map(content_object=media, v1_object_id=row['id'])
tags = self.find_tags(content_type, row['id'])
if tags:
media.tags.add(*tags)
self.media_map.update({row['id']: media})
cur.close()
self.stdout.write('Media migrated')
def migrate_images(self):
cur = self.db_query('select * from wagtailimages_image')
content_type = self.find_content_type_id('wagtailimages', 'image')
for row in cur:
if not row['file']:
self.post_migration_report_messages['image_file_not_found'].append(
f'Image file path not found, id={row["id"]}'
)
continue
image_file = self.open_file(row['file'])
if image_file:
self.stdout.write(f"Creating image, file={row['file']}")
image = Image.objects.create(
title=row['title'],
file=ImageFile(image_file, name=row['file'].split('/')[-1]),
focal_point_x=row['focal_point_x'],
focal_point_y=row['focal_point_y'],
focal_point_width=row['focal_point_width'],
focal_point_height=row['focal_point_height'],
created_at=row['created_at'],
collection=self.collection_map.get(row['collection_id']),
)
V1ToV2ObjectMap.create_map(content_object=image, v1_object_id=row['id'])
image.get_file_size()
image.get_file_hash()
tags = self.find_tags(content_type, row['id'])
if tags:
image.tags.add(*tags)
self.image_map.update({row['id']: image})
cur.close()
self.stdout.write('Images migrated')
def migrate_locales(self):
sql = f'select * ' \
f'from core_sitelanguage'
cur = self.db_query(sql)
for row in cur:
Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
cur.close()
def find_content_type_id(self, app_label, model):
cur = self.db_query(f"select id from django_content_type where app_label = '{app_label}' and model = '{model}'")
content_type = cur.fetchone()
cur.close()
return content_type.get('id')
def open_file(self, file):
file_path = Path(self.media_dir) / file
try:
return open(file_path, 'rb')
except:
self.post_migration_report_messages['file_not_found'].append(
f"File not found: {file_path}"
)
def find_tags(self, content_type, object_id):
tags_query = 'select t.name from taggit_tag t join taggit_taggeditem ti on t.id = ti.tag_id where ti.content_type_id = {} and ti.object_id = {}'
cur = self.db_query(tags_query.format(content_type, object_id))
tags = [tag['name'] for tag in cur]
cur.close()
return tags
def migrate_sections(self):
sql = f"select * " \
f"from core_sectionpage csp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where csp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
section_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
section_page_translations.append(row)
else:
self.create_section(row)
else:
for row in section_page_translations:
section = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=section)
except:
self.post_migration_report_messages['untranslated_sections'].append(
f"Unable to translate section, title={row['title']}"
)
continue
translated_section = section.get_translation_or_none(locale)
if translated_section:
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
translated_section.lead_image = self.image_map.get(row['image_id'])
translated_section.title = row['title']
translated_section.slug = row['slug']
translated_section.draft_title = row['draft_title']
translated_section.live = row['live']
translated_section.locked = row['locked']
translated_section.go_live_at = row['go_live_at']
translated_section.expire_at = row['expire_at']
translated_section.first_published_at = row['first_published_at']
translated_section.last_published_at = row['last_published_at']
translated_section.search_description = row['search_description']
translated_section.seo_title = row['seo_title']
translated_section.font_color = self.get_color_hex(row['extra_style_hints']) or section.font_color
translated_section.larger_image_for_top_page_in_list_as_in_v1 = True
translated_section.commenting_status = commenting_status
translated_section.commenting_starts_at = commenting_open_time
translated_section.commenting_ends_at = commenting_close_time
translated_section.latest_revision_created_at = row['latest_revision_created_at']
translated_section.save()
self.add_warning_for_sections_with_description(row, section)
content_type = self.find_content_type_id('core', 'sectionpage')
tags = self.find_tags(content_type, row['page_ptr_id'])
if tags:
translated_section.tags.add(*tags)
V1ToV2ObjectMap.create_map(content_object=translated_section, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_section)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_section
})
if row['description'] is None:
self.post_migration_report_messages['sections_with_null_description'].append(
f'title: {translated_section.title}. URL: {translated_section.full_url}. '
f'Admin URL: {self.get_admin_url(translated_section.id)}.'
)
self.stdout.write(f"Translated section, title={row['title']}")
cur.close()
def mark_empty_sections_as_draft(self):
for section in models.Section.objects.all():
if section.get_children().filter(live=True).count() == 0:
section.live = False
section.save(update_fields=['live'])
def create_section(self, row):
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
section = models.Section(
lead_image=self.image_map.get(row['image_id']),
title=row['title'],
draft_title=row['draft_title'],
show_in_menus=True,
slug=row['slug'],
path=self.section_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
commenting_status=commenting_status,
commenting_starts_at=commenting_open_time,
commenting_ends_at=commenting_close_time,
search_description=row['search_description'],
seo_title=row['seo_title'],
font_color=self.get_color_hex(row['extra_style_hints']),
larger_image_for_top_page_in_list_as_in_v1=True,
latest_revision_created_at=row['latest_revision_created_at'],
)
section.save()
self.add_warning_for_sections_with_description(row, section)
content_type = self.find_content_type_id('core', 'sectionpage')
tags = self.find_tags(content_type, row['page_ptr_id'])
if tags:
section.tags.add(*tags)
V1ToV2ObjectMap.create_map(content_object=section, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=section)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: section
})
if row['description'] is None:
self.post_migration_report_messages['sections_with_null_description'].append(
f'title: {section.title}. URL: {section.full_url}. '
f'Admin URL: {self.get_admin_url(section.id)}.'
)
self.stdout.write(f"saved section, title={section.title}")
def add_warning_for_sections_with_description(self, row, section):
if row['description']:
self.post_migration_report_messages['sections_with_description'].append(
f'title: {section.title}. URL: {section.full_url}. '
f'Admin URL: {self.get_admin_url(section.id)}. '
f'Description (not migrated): {row["description"]}.'
)
def migrate_articles(self):
sql = f"select * " \
f"from core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cap.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"and wcp.path like '000100010002%' " \
f"order by wcp.path"
cur = self.db_query(sql)
article_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
article_page_translations.append(row)
else:
self.create_article(row)
else:
for row in article_page_translations:
article = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=article)
except:
self.post_migration_report_messages['untranslated_articles'].append(
f"Unable to translate article, title={row['title']}"
)
continue
translated_article = article.get_translation_or_none(locale)
if translated_article:
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
translated_article.lead_image = self.image_map.get(row['image_id'])
translated_article.title = row['title']
translated_article.slug = row['slug']
translated_article.draft_title = row['draft_title']
translated_article.live = row['live']
translated_article.locked = row['locked']
translated_article.go_live_at = row['go_live_at']
translated_article.expire_at = row['expire_at']
translated_article.first_published_at = row['first_published_at']
translated_article.last_published_at = row['last_published_at']
translated_article.search_description = row['search_description']
translated_article.seo_title = row['seo_title']
translated_article.index_page_description = row['subtitle']
translated_article.commenting_status = commenting_status
translated_article.commenting_starts_at = commenting_open_time
translated_article.commenting_ends_at = commenting_close_time
translated_article.latest_revision_created_at = row['latest_revision_created_at']
translated_article.save()
content_type = self.find_content_type_id('core', 'articlepage')
tags = self.find_tags(content_type, row['page_ptr_id'])
if tags:
translated_article.tags.add(*tags)
V1ToV2ObjectMap.create_map(content_object=translated_article, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_article)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_article
})
self.stdout.write(f"Translated article, title={row['title']}")
cur.close()
def _get_commenting_fields(self, row):
comments_map = {
'O': CommentStatus.OPEN,
'C': CommentStatus.CLOSED,
'D': CommentStatus.DISABLED,
'T': CommentStatus.TIMESTAMPED
}
commenting_status = comments_map[row['commenting_state']] if row['commenting_state'] else CommentStatus.INHERITED
return commenting_status, row['commenting_open_time'], row['commenting_close_time']
def create_article(self, row):
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
article = models.Article(
lead_image=self.image_map.get(row['image_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=self.section_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
commenting_status=commenting_status,
commenting_starts_at=commenting_open_time,
commenting_ends_at=commenting_close_time,
search_description=row['search_description'],
seo_title=row['seo_title'],
index_page_description=row['subtitle'],
latest_revision_created_at=row['latest_revision_created_at'],
)
try:
article.save()
content_type = self.find_content_type_id('core', 'articlepage')
tags = self.find_tags(content_type, row['page_ptr_id'])
if tags:
article.tags.add(*tags)
V1ToV2ObjectMap.create_map(content_object=article, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=article)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: article
})
except Page.DoesNotExist:
self.post_migration_report_messages['articles'].append(
f"Skipping article with missing parent: title={row['title']}"
)
return
self.stdout.write(f"saved article, title={article.title}")
def get_unsupported_html_tags(self, value):
bleach_kwargs = _get_bleach_kwargs()
unsupported_html_tags = []
tags = BeautifulSoup(value, "html.parser").find_all()
for tag in tags:
if tag.name not in bleach_kwargs['tags']:
unsupported_html_tags.append(tag.name)
return unsupported_html_tags
def _map_body(self, type_, row, v2_body):
for block in v2_body:
if block['type'] == 'paragraph':
unsupported_html_tags = self.get_unsupported_html_tags(block['value'])
if unsupported_html_tags:
block['type'] = 'paragraph_v1_legacy'
page = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if page:
self.post_migration_report_messages['page_with_unsupported_tags'].append(
f'title: {page.title}. URL: {page.full_url}. '
f'Admin URL: {self.get_admin_url(page.id)}. '
f'Tags: {unsupported_html_tags}.'
)
else:
block['type'] = 'markdown'
if bool([domain for domain in self.v1_domains_list if domain in block['value']]):
page = self.v1_to_v2_page_map.get(row['page_id'])
self.post_migration_report_messages['sections_with_internal_links'].append(
f"title: {page.title}. URL: {page.full_url}. "
f"Admin URL: {self.get_admin_url(page.id)}.")
elif block['type'] == 'richtext':
block['type'] = 'paragraph'
if bool([domain for domain in self.v1_domains_list if domain in block['value']]):
page = self.v1_to_v2_page_map.get(row['page_id'])
self.post_migration_report_messages['sections_with_internal_links'].append(
f"title: {page.title}. URL: {page.full_url}. "
f"Admin URL: {self.get_admin_url(page.id)}.")
elif block['type'] == 'image':
image = self.image_map.get(block['value'])
if image:
block['value'] = image.id
else:
page = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if page:
self.post_migration_report_messages['page_with_empty_image'].append(
f'title: {page.title}. URL: {page.full_url}. '
f'Admin URL: {self.get_admin_url(page.id)}. '
f'Image ID: {block["value"]}'
)
else:
self.post_migration_report_messages['invalid_image_id'].append(
f"title={row['title']} has image with invalid id {block['value']}"
)
block['value'] = None
elif block['type'] == 'media':
media = self.media_map.get(block['value'])
if media:
block['value'] = media.id
else:
self.post_migration_report_messages['invalid_media_id'].append(
f"title={row['title']} has media with invalid id {block['value']}"
)
block['value'] = None
elif block['type'] == 'page':
block['type'] = 'page_button'
page = self.v1_to_v2_page_map.get(block['value'])
if page:
block['value'] = {'page': page.id, 'text': ''}
else:
block['value'] = {'page': None, 'text': ''}
self.post_migration_report_messages['invalid_page_id'].append(
f'Unable to attach v2 page for {type_[:-1]}, title={row["title"]}'
)
return v2_body
def map_article_body(self, row):
v1_body = json.loads(row['body'])
v2_body = self._map_body('articles', row, v1_body)
if row['subtitle']:
v2_body = [{
'type': 'paragraph',
'value': row['subtitle'],
}] + v2_body
return json.dumps(v2_body)
def migrate_banners(self):
sql = f"select * " \
f"from core_bannerpage cbp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cbp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
banner_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
banner_page_translations.append(row)
else:
self.create_banner(row)
else:
for row in banner_page_translations:
banner = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=banner)
except:
self.post_migration_report_messages['untranslated_banners'].append(
f"Unable to translate banner, title={row['title']}"
)
continue
translated_banner = banner.get_translation_or_none(locale)
if translated_banner:
translated_banner.banner_image = self.image_map.get(row['banner_id'])
translated_banner.title = row['title']
translated_banner.slug = row['slug']
translated_banner.draft_title = row['draft_title']
translated_banner.live = row['live']
translated_banner.locked = row['locked']
translated_banner.go_live_at = row['go_live_at']
translated_banner.expire_at = row['expire_at']
translated_banner.first_published_at = row['first_published_at']
translated_banner.last_published_at = row['last_published_at']
translated_banner.search_description = row['search_description']
translated_banner.seo_title = row['seo_title']
translated_banner.latest_revision_created_at = row['latest_revision_created_at']
translated_banner.save()
V1ToV2ObjectMap.create_map(content_object=translated_banner, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_banner)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_banner
})
self.stdout.write(f"Translated banner, title={row['title']}")
cur.close()
def create_banner(self, row):
banner = models.BannerPage(
banner_image=self.image_map.get(row['banner_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=self.banner_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
banner_description='',
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
search_description=row['search_description'],
seo_title=row['seo_title'],
latest_revision_created_at=row['latest_revision_created_at'],
)
banner.save()
V1ToV2ObjectMap.create_map(content_object=banner, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=banner)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: banner
})
self.stdout.write(f"saved banner, title={banner.title}")
def map_banner_page(self, row):
v2_page = None
v1_banner_link_page_id = row['banner_link_page_id']
if v1_banner_link_page_id:
v2_page = self.v1_to_v2_page_map.get(v1_banner_link_page_id)
if not v2_page:
self.post_migration_report_messages['banner_page_link'].append(
f'Unable to attach v2 page for banner, title={row["title"]}'
)
return v2_page
def migrate_footers(self):
sql = f"select * " \
f"from core_footerpage cfp, core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cfp.articlepage_ptr_id = cap.page_ptr_id " \
f"and cap.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
footer_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
footer_page_translations.append(row)
else:
self.create_footer(row)
else:
for row in footer_page_translations:
footer = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=footer)
except:
self.post_migration_report_messages['untranslated_footers'].append(
f"Unable to translate footer, title={row['title']}"
)
continue
translated_footer = footer.get_translation_or_none(locale)
if translated_footer:
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
image = self.image_map.get(row['image_id'])
translated_footer.image_icon = image
translated_footer.title = row['title']
translated_footer.slug = row['slug']
translated_footer.draft_title = row['draft_title']
translated_footer.live = row['live']
translated_footer.locked = row['locked']
translated_footer.go_live_at = row['go_live_at']
translated_footer.expire_at = row['expire_at']
translated_footer.first_published_at = row['first_published_at']
translated_footer.last_published_at = row['last_published_at']
translated_footer.search_description = row['search_description']
translated_footer.seo_title = row['seo_title']
translated_footer.commenting_status = commenting_status
translated_footer.commenting_starts_at = commenting_open_time
translated_footer.commenting_ends_at = commenting_close_time
translated_footer.latest_revision_created_at = row['latest_revision_created_at']
translated_footer.save()
if image:
self.post_migration_report_messages['footers_with_image'].append(
f'title: {translated_footer.title}. URL: {translated_footer.full_url}. '
f'Admin URL: {self.get_admin_url(translated_footer.id)}.'
)
V1ToV2ObjectMap.create_map(content_object=translated_footer, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_footer)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_footer
})
self.stdout.write(f"Translated footer, title={row['title']}")
cur.close()
def create_footer(self, row):
commenting_status, commenting_open_time, commenting_close_time = self._get_commenting_fields(row)
image = self.image_map.get(row['image_id'])
footer = models.Article(
image_icon=image,
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=self.footer_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
search_description=row['search_description'],
seo_title=row['seo_title'],
commenting_status=commenting_status,
commenting_starts_at=commenting_open_time,
commenting_ends_at=commenting_close_time,
latest_revision_created_at=row['latest_revision_created_at'],
)
footer.save()
if image:
self.post_migration_report_messages['footers_with_image'].append(
f'title: {footer.title}. URL: {footer.full_url}. Admin URL: {self.get_admin_url(footer.id)}.'
)
V1ToV2ObjectMap.create_map(content_object=footer, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=footer)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: footer
})
self.stdout.write(f"saved footer, title={footer.title}")
def load_page_translation_map(self):
sql = "select * " \
"from core_pagetranslation"
cur = self.db_query(sql)
for row in cur:
self.page_translation_map.update({
row['translated_page_id']: row['page_id'],
})
cur.close()
self.stdout.write('Page translation map loaded.')
def translate_page(self, locale, page):
translator = TranslationCreator(user=None, target_locales=[locale])
translator.create_translations(page)
def stop_translations(self):
Translation.objects.update(enabled=False)
self.stdout.write('Translations stopped.')
def migrate_polls(self):
sql = f"select * " \
f"from polls_pollsindexpage ppip, wagtailcore_page wcp " \
f"where ppip.page_ptr_id = wcp.id " \
f"order by wcp.path"
cur = self.db_query(sql)
v1_poll_index_page = cur.fetchone()
cur.close()
self._migrate_polls(v1_poll_index_page, self.poll_index_page)
sql = f"select * " \
f"from core_sectionindexpage csip, wagtailcore_page wcp " \
f"where csip.page_ptr_id = wcp.id " \
f"order by wcp.path"
cur = self.db_query(sql)
v1_section_index_page = cur.fetchone()
cur.close()
self._migrate_polls(v1_section_index_page, self.section_index_page)
def _migrate_polls(self, v1_index_page, v2_index_page):
sql = f"select * " \
f"from polls_question pq, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where pq.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"and wcp.path like '{v1_index_page['path']}%' " \
f"order by wcp.path"
cur = self.db_query(sql)
poll_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
poll_page_translations.append(row)
else:
self.create_poll(v2_index_page, row)
else:
for row in poll_page_translations:
poll = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=poll)
except Exception as e:
self.post_migration_report_messages['untranslated_polls'].append(
f"Unable to translate poll, title={row['title']}"
)
continue
translated_poll = poll.get_translation_or_none(locale)
if translated_poll:
translated_poll.title = row['title']
translated_poll.slug = row['slug']
translated_poll.draft_title = row['draft_title']
translated_poll.live = row['live']
translated_poll.result_as_percentage = row['result_as_percentage']
translated_poll.show_results = row['show_results']
translated_poll.locked = row['locked']
translated_poll.go_live_at = row['go_live_at']
translated_poll.expire_at = row['expire_at']
translated_poll.first_published_at = row['first_published_at']
translated_poll.last_published_at = row['last_published_at']
translated_poll.search_description = row['search_description']
translated_poll.seo_title = row['seo_title']
translated_poll.randomise_options = row['randomise_options']
translated_poll.allow_anonymous_submissions = False
translated_poll.allow_multiple_submissions = False
translated_poll.latest_revision_created_at = row['latest_revision_created_at']
translated_poll.save()
V1ToV2ObjectMap.create_map(content_object=translated_poll, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_poll)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_poll
})
row['path'] = row['path'][:-4]
self.migrate_poll_questions(translated_poll, row)
self.stdout.write(f"Translated poll, title={row['title']}")
cur.close()
def create_poll(self, v2_index_page, row):
poll = Poll(
title=row['title'],
draft_title=row['draft_title'],
show_in_menus=True,
slug=row['slug'],
path=v2_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
show_results=row['show_results'],
result_as_percentage=row['result_as_percentage'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
search_description=row['search_description'],
seo_title=row['seo_title'],
randomise_options=row['randomise_options'],
allow_anonymous_submissions=False,
allow_multiple_submissions=False,
latest_revision_created_at=row['latest_revision_created_at'],
)
try:
poll.save()
V1ToV2ObjectMap.create_map(content_object=poll, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=poll)
except Exception as e:
self.post_migration_report_messages['polls'].append(
f"Unable to save poll, title={row['title']}"
)
return
self.migrate_poll_questions(poll, row)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: poll
})
self.stdout.write(f"saved poll, title={poll.title}")
def migrate_poll_questions(self, poll, poll_row):
sql = f'select * ' \
f'from polls_choice pc, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl ' \
f'where pc.page_ptr_id = wcp.id ' \
f'and wcp.path like \'{poll_row["path"]}%\' ' \
f'and wcp.id = clr.page_id ' \
f'and clr.language_id = csl.id ' \
f'and csl.locale = \'{poll_row["locale"]}\' ' \
f'order by wcp.path'
cur = self.db_query(sql)
self.create_poll_question(poll, poll_row, cur)
cur.close()
def create_poll_question(self, poll, poll_row, cur):
PollFormField.objects.filter(page=poll).delete()
choices = []
for row in cur:
choices.append(row['title'])
choices_length = len(choices)
if choices_length == 0:
field_type = 'multiline'
elif choices_length > 1:
if poll_row['allow_multiple_choice']:
field_type = 'checkboxes'
else:
field_type = 'radio'
else:
self.post_migration_report_messages['poll_questions'].append(
f'Unable to determine field type for poll={poll_row["title"]}.'
)
return
choices = '|'.join(choices)
poll_form_field = PollFormField.objects.create(
page=poll, label=poll.title, field_type=field_type, choices=choices,
admin_label=poll_row['short_name'] or poll.title)
if choices:
cur.scroll(0, 'absolute')
for row in cur:
V1ToV2ObjectMap.create_map(content_object=poll_form_field, v1_object_id=row['page_ptr_id'])
self.stdout.write(f"saved poll question, label={poll.title}")
def migrate_surveys(self):
sql = f"select * " \
f"from surveys_surveysindexpage ssip, wagtailcore_page wcp " \
f"where ssip.page_ptr_id = wcp.id " \
f"order by wcp.path"
cur = self.db_query(sql)
v1_survey_index_page = cur.fetchone()
cur.close()
self._migrate_surveys(v1_survey_index_page, self.survey_index_page)
sql = f"select * " \
f"from core_sectionindexpage csip, wagtailcore_page wcp " \
f"where csip.page_ptr_id = wcp.id " \
f"order by wcp.path"
cur = self.db_query(sql)
v1_section_index_page = cur.fetchone()
cur.close()
self._migrate_surveys(v1_section_index_page, self.section_index_page)
def _migrate_surveys(self, v1_index_page, v2_index_page):
sql = f"select * " \
f"from surveys_molosurveypage smsp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where smsp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"and wcp.path like '{v1_index_page['path']}%' " \
f"order by wcp.path"
cur = self.db_query(sql)
survey_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
survey_page_translations.append(row)
else:
self.create_survey(v2_index_page, row)
else:
for row in survey_page_translations:
survey = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=self._get_iso_locale(row['locale']))
try:
self.translate_page(locale=locale, page=survey)
except Exception as e:
self.post_migration_report_messages['untranslated_surveys'].append(
f"Unable to translate survey, title={row['title']}"
)
continue
translated_survey = survey.get_translation_or_none(locale)
if translated_survey:
translated_survey.title = row['title']
translated_survey.slug = row['slug']
translated_survey.draft_title = row['draft_title']
translated_survey.live = row['live']
translated_survey.thank_you_text = self.map_survey_thank_you_text(row)
translated_survey.allow_anonymous_submissions = row['allow_anonymous_submissions']
translated_survey.allow_multiple_submissions = row['allow_multiple_submissions_per_user']
translated_survey.submit_button_text = row['submit_text'][:40] if row['submit_text'] else 'Submit'
translated_survey.direct_display = row['display_survey_directly']
translated_survey.multi_step = row['multi_step']
translated_survey.locked = row['locked']
translated_survey.go_live_at = row['go_live_at']
translated_survey.expire_at = row['expire_at']
translated_survey.first_published_at = row['first_published_at']
translated_survey.last_published_at = row['last_published_at']
translated_survey.search_description = row['search_description']
translated_survey.seo_title = row['seo_title']
translated_survey.index_page_description = row['homepage_introduction']
translated_survey.index_page_description_line_2 = row['homepage_button_text']
translated_survey.terms_and_conditions = self.map_survey_terms_and_conditions(row)
translated_survey.latest_revision_created_at = row['latest_revision_created_at']
translated_survey.save()
if row['submit_text'] and len(row['submit_text']) > 40:
self.post_migration_report_messages['truncated_submit_button_text'].append(
f'title: {translated_survey.title}. URL: {translated_survey.full_url}. '
f'Admin URL: {self.get_admin_url(translated_survey.id)}. '
f'Full text: {row["submit_text"]}.'
)
V1ToV2ObjectMap.create_map(content_object=translated_survey, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=translated_survey)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: translated_survey
})
self.migrate_survey_questions(translated_survey, row)
self.stdout.write(f"Translated survey, title={row['title']}")
cur.close()
def create_survey(self, v2_index_page, row):
survey = Survey(
title=row['title'],
draft_title=row['draft_title'],
show_in_menus=True,
slug=row['slug'],
path=v2_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
thank_you_text=self.map_survey_thank_you_text(row),
allow_anonymous_submissions=row['allow_anonymous_submissions'],
allow_multiple_submissions=row['allow_multiple_submissions_per_user'],
submit_button_text=row['submit_text'][:40] if row['submit_text'] else 'Submit',
direct_display=row['display_survey_directly'],
multi_step=row['multi_step'],
locked=row['locked'],
go_live_at=row['go_live_at'],
expire_at=row['expire_at'],
first_published_at=row['first_published_at'],
last_published_at=row['last_published_at'],
search_description=row['search_description'],
seo_title=row['seo_title'],
index_page_description=row['homepage_introduction'],
index_page_description_line_2=row['homepage_button_text'],
terms_and_conditions=self.map_survey_terms_and_conditions(row),
latest_revision_created_at=row['latest_revision_created_at'],
)
try:
survey.save()
if row['submit_text'] and len(row['submit_text']) > 40:
self.post_migration_report_messages['truncated_submit_button_text'].append(
f'title: {survey.title}. URL: {survey.full_url}. '
f'Admin URL: {self.get_admin_url(survey.id)}. '
f'Full text: {row["submit_text"]}.'
)
V1ToV2ObjectMap.create_map(content_object=survey, v1_object_id=row['page_ptr_id'])
V1PageURLToV2PageMap.create_map(url=row['url_path'], page=survey)
except Exception as e:
self.post_migration_report_messages['surveys'].append(
f"Unable to save survey, title={row['title']}"
)
return
self.migrate_survey_questions(survey, row)
self.v1_to_v2_page_map.update({
row['page_ptr_id']: survey
})
self.stdout.write(f"saved survey, title={survey.title}")
def map_survey_description(self, row):
v1_survey_description = json.loads(row['description'])
v2_survey_description = self._map_body('surveys', row, v1_survey_description)
if row['introduction']:
v2_survey_description = [{
'type': 'paragraph',
'value': row['introduction'],
}] + v2_survey_description
return json.dumps(v2_survey_description)
def map_survey_thank_you_text(self, row):
v2_thank_you_text = []
if row['thank_you_text']:
v2_thank_you_text.append({'type': 'paragraph', 'value': row['thank_you_text']})
return json.dumps(v2_thank_you_text)
def map_survey_terms_and_conditions(self, row):
sql = f'select * ' \
f'from surveys_surveytermsconditions stc, surveys_molosurveypage msp, wagtailcore_page wcp ' \
f'where stc.page_id = msp.page_ptr_id ' \
f'and stc.terms_and_conditions_id = wcp.id ' \
f'and stc.page_id = {row["page_ptr_id"]} ' \
f'order by wcp.path'
cur = self.db_query(sql)
v1_term_and_condition = cur.fetchone()
cur.close()
if v1_term_and_condition:
return json.dumps([
{
"type": "page_button",
"value": {
"page": self.v1_to_v2_page_map[v1_term_and_condition["terms_and_conditions_id"]].id,
},
},
])
def migrate_survey_questions(self, survey, survey_row):
sql = f'select *, smsff.id as smsffid ' \
f'from surveys_molosurveyformfield smsff, surveys_molosurveypage smsp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl ' \
f'where smsff.page_id = smsp.page_ptr_id ' \
f'and smsp.page_ptr_id = wcp.id ' \
f'and wcp.id = clr.page_id ' \
f'and clr.language_id = csl.id ' \
f'and wcp.id = {survey_row["page_ptr_id"]} ' \
f'order by wcp.path'
cur = self.db_query(sql)
self.create_survey_question(survey, survey_row, cur)
cur.close()
def create_survey_question(self, survey, survey_row, cur):
SurveyFormField.objects.filter(page=survey).delete()
for row in cur:
field_type = 'positivenumber' if row['field_type'] == 'positive_number' else row['field_type']
survey_form_field = SurveyFormField.objects.create(
page=survey, sort_order=row['sort_order'], label=row['label'], required=row['required'],
default_value=row['default_value'], help_text=row['help_text'], field_type=field_type,
admin_label=row['admin_label'], page_break=row['page_break'],
choices='|'.join(row['choices'].split(',')), skip_logic=row['skip_logic']
)
V1ToV2ObjectMap.create_map(content_object=survey_form_field, v1_object_id=row['smsffid'])
skip_logic_next_actions = [logic['value']['skip_logic'] for logic in json.loads(row['skip_logic'])]
if not survey_row['multi_step'] and (
'end' in skip_logic_next_actions or 'question' in skip_logic_next_actions):
self.post_migration_report_messages['survey_multistep'].append(
f'skip logic without multi step'
)
self.stdout.write(f"saved survey question, label={row['label']}")
def _get_iso_locale(self, locale):
iso_locales_map = {
'sho': 'sn',
'ch': 'ny',
}
return iso_locales_map.get(locale, locale)
def translate_home_pages(self):
locales = Locale.objects.all()
for locale in locales:
self.translate_page(locale=locale, page=self.home_page)
translated_home_page = self.home_page.get_translation_or_none(locale)
if translated_home_page:
translated_home_page.title = f"{translated_home_page.title} [{str(locale)}]"
translated_home_page.draft_title = f"{translated_home_page.draft_title} [{str(locale)}]"
translated_home_page.save()
def translate_index_pages(self):
index_pages = [
self.section_index_page, self.banner_index_page, self.footer_index_page, self.poll_index_page,
self.survey_index_page, self.quiz_index_page, self.miscellaneous_index_page,
]
locales = Locale.objects.all()
for page in index_pages:
for locale in locales:
self.translate_page(locale=locale, page=page)
def migrate_recommended_articles_for_article(self):
article_cur = self.db_query(f'select DISTINCT page_id from core_articlepagerecommendedsections')
for article_row in article_cur:
v1_article_id = article_row['page_id']
v2_article = self.v1_to_v2_page_map.get(v1_article_id)
if v2_article:
cur = self.db_query(
f'select * from core_articlepagerecommendedsections where page_id = {v1_article_id} and recommended_article_id is not null')
for row in cur:
v2_recommended_article = self.v1_to_v2_page_map.get(row['recommended_article_id'])
if v2_recommended_article:
models.ArticleRecommendation.objects.create(
sort_order=row['sort_order'],
article=v2_recommended_article,
source=v2_article
)
cur.close()
article_cur.close()
self.stdout.write('Recommended articles migrated')
def migrate_featured_articles_for_homepage(self):
locale_cur = self.db_query(f"select * from core_sitelanguage")
for locale_row in locale_cur:
articles_cur = self.db_query(
f"select * "
f"from core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl "
f"where cap.page_ptr_id = wcp.id "
f"and wcp.id = clr.page_id "
f"and clr.language_id = csl.id "
f"and wcp.live = true "
f"and csl.locale = '{locale_row['locale']}' "
f"order by left(wcp.path, 16) "
)
articles_list = []
for article_row in articles_cur:
translated_from_page_id = self.page_translation_map.get(article_row['page_ptr_id'])
featured_in_homepage_start_date = article_row['featured_in_homepage_start_date']
if translated_from_page_id:
translated_from_article_cur = self.db_query(
f'select * from core_articlepage where page_ptr_id = {translated_from_page_id}')
translated_from_article_row = translated_from_article_cur.fetchone()
translated_from_article_cur.close()
# For translated articles, only the date of the translated from matters
featured_in_homepage_start_date = translated_from_article_row['featured_in_homepage_start_date']
if featured_in_homepage_start_date:
article = self.v1_to_v2_page_map.get(article_row['page_ptr_id'])
if article:
article.featured_in_homepage_start_date = featured_in_homepage_start_date
articles_list.append(article)
articles_cur.close()
articles_list = sorted(articles_list, key=lambda x: x.featured_in_homepage_start_date, reverse=True)
articles_list = sorted(articles_list, key=lambda x: x.path[:16])
article_groups = defaultdict(list)
for article in articles_list:
article_groups[article.path[:16]].append(article)
for k, v in article_groups.items():
for i, article in enumerate(v):
if i < 5:
self.add_article_as_featured_content_in_home_page(article)
else:
self.post_migration_report_messages['ommitted_old_featured_article'].append(
f'title: {article.title}. URL: {article.full_url}. '
f'Admin URL: {self.get_admin_url(article.id)}. '
f'featured since: {article.featured_in_homepage_start_date}.'
)
section = models.Section.objects.get(path=k)
self.add_section_as_featured_content_in_home_page(section)
locale_cur.close()
def add_article_as_featured_content_in_home_page(self, article):
home_page = self.home_page.get_translation_or_none(article.locale)
if home_page:
home_featured_content = home_page.home_featured_content.stream_data
home_featured_content.append({
'type': 'article',
'value': {
'article': article.id,
'display_section_title': True,
},
})
home_page.save()
def add_section_as_featured_content_in_home_page(self, section):
home_page = self.home_page.get_translation_or_none(section.locale)
if home_page:
home_featured_content = home_page.home_featured_content.stream_data
home_featured_content.append({
'type': 'page_button',
'value': {
'page': section.id,
'text': '',
},
})
home_page.save()
def attach_banners_to_home_page(self):
sql = f"select * " \
f"from core_bannerpage cbp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cbp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_banner = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_banner:
home_page = v2_banner.get_ancestors().exact_type(models.HomePage).first().specific
models.HomePageBanner.objects.create(source=home_page, banner_page=v2_banner)
cur.close()
def get_color_hex(self, color_name):
return {
'--tiber': '#07292F',
'--mecury': '#eae9e9',
'--light_scampi': '#685FA1',
'--dove_gray': '#737373',
'--mineral_gray': '#dedede',
'--washed_gray': '#f1f1f1',
'--brown': '#a03321',
'--medium_red_violet': '#B62A99',
'--dark_medium_red_violet': '#b43393',
'--violet_blue': '#a54f9e',
'--mandy': '#E24256',
'--plum': '#7e2268',
'--wisteria': '#8e68ad',
'--grape': '#541c56',
'--paris_m': '#202855',
'--east_bay': '#4E4682',
'--victoria': '#4D4391',
'--scampi': '#685FA1',
'--sandybrown': '#EF9955',
'--jaffa': '#ee8c39',
'--saffron': '#F2B438',
'--saffron_light': '#f2b437',
'--cinnabar': '#EC3B3A',
'--cinnabar_dark': '#ee5523',
'--cardinal': '#bf2026',
'--pomegranate': '#ed3330',
'--roman': '#DF6859',
'--mauvelous': '#F38AA5',
'--beed_blush': '#e764a0',
'--maxican_red': '#a21d2e',
'--kobi': '#d481b5',
'--illusion': '#ee97ac',
'--celery': '#A4CE55',
'--de_york': '#6EC17F',
'--eucalyptus': '#2A9B58',
'--tradewind': '#4bab99',
'--moss_green': '#b3d9a1',
'--danube': '#6093CD',
'--light_danube': '#627abc',
'--indigo': '#5F7AC9',
'--mariner': '#4759a6',
'--robin_egg_blue': '#00BFC6',
'--pelorous': '#37BFBE',
'--iris_blue': '#03acc3',
'--red_berry': '#711e29',
'--bay_of_may': '#2b378c',
'--viking': '#3bbfbd',
'--denim': '#127f99',
'--tory_blue': '#134b90',
}.get(color_name)
def fix_articles_body(self):
sql = f"select * " \
f"from core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cap.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"and wcp.path like '000100010002%' " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_article = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_article:
v2_article.refresh_from_db()
v2_article.body = self.map_article_body(row)
v2_article.save()
else:
self.post_migration_report_messages['articles'].append(
f'Unable to add article body, title={row["title"]}'
)
cur.close()
def fix_footers_body(self):
sql = f"select * " \
f"from core_footerpage cfp, core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cfp.articlepage_ptr_id = cap.page_ptr_id " \
f"and cap.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_footer = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_footer:
v2_footer.refresh_from_db()
v2_footer.body = self.map_article_body(row)
v2_footer.save()
cur.close()
def fix_survey_description(self):
sql = f"select * " \
f"from surveys_molosurveypage smsp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where smsp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_survey = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_survey:
v2_survey.refresh_from_db()
v2_survey.description = self.map_survey_description(row)
v2_survey.save()
cur.close()
def fix_banner_link_page(self):
sql = f"select * " \
f"from core_bannerpage cbp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
f"where cbp.page_ptr_id = wcp.id " \
f"and wcp.id = clr.page_id " \
f"and clr.language_id = csl.id " \
f"order by wcp.path"
cur = self.db_query(sql)
for row in cur:
v2_banner = self.v1_to_v2_page_map.get(row['page_ptr_id'])
if v2_banner:
v2_banner.refresh_from_db()
v2_banner.banner_link_page = self.map_banner_page(row)
v2_banner.save()
cur.close()
def add_polls_from_polls_index_page_to_footer_index_page_as_page_link_page(self):
self.poll_index_page.refresh_from_db()
self.footer_index_page.refresh_from_db()
file = File(open(Path(settings.BASE_DIR) / 'iogt/static/icons/clip_board_pen.svg'), name='clip_board_pen.svg')
icon = Svg.objects.create(title='clip board pen', file=file)
poll_index_pages = self.poll_index_page.get_translations(inclusive=True)
for poll_index_page in poll_index_pages:
polls = poll_index_page.get_children()
for poll in polls:
page_link_page = models.PageLinkPage(title=poll.title, page=poll, icon=icon, live=poll.live)
footer_index_page = self.footer_index_page.get_translation_or_none(poll.locale)
footer_index_page.add_child(instance=page_link_page)
self.stdout.write('Added polls from poll index page to footer index page as page link page.')
def add_surveys_from_surveys_index_page_to_footer_index_page_as_page_link_page(self):
self.survey_index_page.refresh_from_db()
self.footer_index_page.refresh_from_db()
file = File(open(Path(settings.BASE_DIR) / 'iogt/static/icons/loud_speaker.svg'), name='loud_speaker.svg')
icon = Svg.objects.create(title='loud speaker', file=file)
survey_index_page = self.survey_index_page.get_translations(inclusive=True)
for survey_index_page in survey_index_page:
surveys = survey_index_page.get_children()
for survey in surveys:
page_link_page = models.PageLinkPage(title=survey.title, page=survey, icon=icon, live=survey.live)
footer_index_page = self.footer_index_page.get_translation_or_none(survey.locale)
footer_index_page.add_child(instance=page_link_page)
self.stdout.write('Added surveys from survey index page to footer index page as page link page.')
def mark_pages_which_are_not_translated_in_v1_as_draft(self):
self.section_index_page.refresh_from_db()
self.banner_index_page.refresh_from_db()
self.footer_index_page.refresh_from_db()
self.poll_index_page.refresh_from_db()
self.survey_index_page.refresh_from_db()
self.quiz_index_page.refresh_from_db()
page_ids_to_exclude = []
page_ids_to_exclude += self.section_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.banner_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.footer_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.poll_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.survey_index_page.get_translations(inclusive=True).values_list('id', flat=True)
page_ids_to_exclude += self.quiz_index_page.get_translations(inclusive=True).values_list('id', flat=True)
Page.objects.filter(alias_of__isnull=False).exclude(id__in=page_ids_to_exclude).update(live=False)
def migrate_social_media_links(self):
self.footer_index_page.refresh_from_db()
sql = f'select * from core_sitesettings'
cur = self.db_query(sql)
for row in cur:
social_media_links = json.loads(row['social_media_links_on_footer_page'])
for social_media_link in social_media_links:
block_value = social_media_link.get('value')
if block_value:
page_link_page_data = {
'title': block_value.get('title'),
'external_link': block_value.get('link'),
}
v2_image = self.image_map.get(block_value.get('image'))
if v2_image:
page_link_page_data.update({'image_icon_id': v2_image.id})
page_link_page = models.PageLinkPage(**page_link_page_data)
self.footer_index_page.add_child(instance=page_link_page)
def migrate_page_revisions(self):
PageRevision.objects.all().delete()
sql = f"select * " \
f"from wagtailcore_pagerevision wcpr"
cur = self.db_query(sql)
for row in cur:
v2_page = self.v1_to_v2_page_map.get(row['page_id'])
if v2_page:
page_revision = PageRevision.objects.create(
page=v2_page,
submitted_for_moderation=row['submitted_for_moderation'],
created_at=row['created_at'],
content_json=row['content_json'],
approved_go_live_at=row['approved_go_live_at'],
)
V1ToV2ObjectMap.create_map(page_revision, row['id'])
cur.close()
def add_polls_from_polls_index_page_to_home_page_featured_content(self):
self.poll_index_page.refresh_from_db()
self.home_page.refresh_from_db()
poll_index_pages = self.poll_index_page.get_translations(inclusive=True)
for poll_index_page in poll_index_pages:
home_page = self.home_page.get_translation_or_none(poll_index_page.locale)
home_featured_content = home_page.home_featured_content.stream_data
polls = poll_index_page.get_children().live()
for poll in polls:
home_featured_content.append({
'type': 'embedded_poll',
'value': {
'direct_display': True,
'poll': poll.id,
},
})
home_page.home_featured_content = json.dumps(home_featured_content)
home_page.save()
self.stdout.write('Added polls from poll index page to home page featured content.')
def add_surveys_from_surveys_index_page_to_home_page_featured_content(self):
self.survey_index_page.refresh_from_db()
self.home_page.refresh_from_db()
survey_index_pages = self.survey_index_page.get_translations(inclusive=True)
for survey_index_page in survey_index_pages:
home_page = self.home_page.get_translation_or_none(survey_index_page.locale)
home_featured_content = home_page.home_featured_content.stream_data
surveys = survey_index_page.get_children().live()
for survey in surveys:
home_featured_content.append({
'type': 'embedded_survey',
'value': {
'direct_display': survey.specific.direct_display,
'survey': survey.id,
},
})
home_page.home_featured_content = json.dumps(home_featured_content)
home_page.save()
self.stdout.write('Added surveys from survey index page to home page featured content.')
def migrate_article_related_sections(self):
cur = self.db_query('select * from core_articlepagerelatedsections caprs')
sections = defaultdict(list)
for row in cur:
section = self.v1_to_v2_page_map.get(row['section_id'])
article = self.v1_to_v2_page_map.get(row['page_id'])
if (not section) or (not article):
self.post_migration_report_messages['articles_in_related_sections'].append(
f"Couldn't find v2 page for v1 section: {row['section_id']} and article: {row['page_id']}"
)
continue
section.refresh_from_db()
article.refresh_from_db()
page_link_page = models.PageLinkPage(title=article.title, page=article, live=article.live)
section.add_child(instance=page_link_page)
page = Page.objects.get(id=page_link_page.id)
self.move_page(page_to_move=page, position=0)
sections[section.id].append(article.title)
for k, v in sections.items():
page = Page.objects.get(id=k)
self.post_migration_report_messages['unordered_related_articles_in_section'].append(
f"title: {page.title}. URL: {page.full_url}. Admin URL: {self.get_admin_url(page.id)}. "
f"articles: {', '.join(v)}"
)
def move_footers_to_end_of_footer_index_page(self):
footer_index_pages = self.footer_index_page.get_translations(inclusive=True)
for footer_index_page in footer_index_pages:
footer_index_page_children = footer_index_page.get_children()
articles = footer_index_page_children.exact_type(models.Article)
for article in articles:
self.move_page(page_to_move=article, position=footer_index_page_children.count())
def move_page(self, page_to_move, position):
parent_page = page_to_move.get_parent()
# Find page that is already in this position
position_page = None
if position is not None:
try:
position_page = parent_page.get_children()[int(position)]
except IndexError:
pass # No page in this position
# Move page
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
if position_page:
# If the page has been moved to the right, insert it to the
# right. If left, then left.
old_position = list(parent_page.get_children()).index(page_to_move)
if int(position) < old_position:
page_to_move.move(position_page, pos='left')
elif int(position) > old_position:
page_to_move.move(position_page, pos='right')
else:
# Move page to end
page_to_move.move(parent_page, pos='last-child')
def _sort_articles(self):
pages = models.Section.objects.all().order_by('path')
for page in pages:
page.refresh_from_db()
articles = page.get_children().type(models.Article)
children_list = []
for article in articles:
try:
v1_id = V1ToV2ObjectMap.get_v1_id(article.specific, article.id)
except:
continue
if v1_id:
translated_from_page_id = self.page_translation_map.get(v1_id)
if translated_from_page_id:
v1_id = translated_from_page_id
cur = self.db_query(f'select * from wagtailcore_page wcp where id = {v1_id}')
v1_row = cur.fetchone()
cur.close()
setattr(article, 'creation_date', v1_row['first_published_at'])
else:
setattr(article, 'creation_date', None)
children_list.append(article)
children_list = sorted(
children_list, key=lambda x: (x.creation_date is not None, x.creation_date))
for article in children_list:
article.refresh_from_db()
article.move(page, pos='first-child')
def _sort_sections(self):
locales = Locale.objects.all()
for locale in locales:
pages = models.Section.objects.filter(locale=locale).order_by('path')
for page in pages:
page.refresh_from_db()
try:
v1_id = V1ToV2ObjectMap.get_v1_id(page.specific, page.id)
except:
continue
translated_from_page_id = self.page_translation_map.get(v1_id)
if not translated_from_page_id:
continue
translated_from_page = self.v1_to_v2_page_map.get(translated_from_page_id)
if not translated_from_page:
continue
translated_from_page.refresh_from_db()
translated_from_sub_sections = translated_from_page.get_children().type(models.Section)
translated_sub_sections = page.get_children().type(models.Section)
if translated_sub_sections:
index_to_move = list(page.get_children()).index(translated_sub_sections.first())
for child in translated_from_sub_sections:
child.refresh_from_db()
translated_sub_section = child.get_translation_or_none(locale)
if translated_sub_section:
self.move_page(page_to_move=translated_sub_section, position=index_to_move)
index_to_move += 1
def sort_pages(self):
if self.sort != 'type1':
return
self._sort_sections()
self._sort_articles()
self.stdout.write('Pages sorted.')
def populate_registration_survey_translations(self):
with open(f'{settings.BASE_DIR}/iogt_content_migration/files/registration_survey_translations.csv',
newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
str_key = self._get_iso_locale(row.pop('str'))
self.registration_survey_translations[str_key] = row
def translate_default_survey_submit_button_text(self):
surveys = Survey.objects.all()
for survey in surveys:
if survey.submit_button_text == 'Submit':
# Technically, someone could have manually put 'Submit' on a non-English button,
# which we would now translate even though we shouldn't.
# This is quite unlikely though.
submit_button_text = self.registration_survey_translations['submit_button_text'][survey.locale.language_code]
if not submit_button_text:
self.post_migration_report_messages['untranslated_survey_button'].append(
f'title: {survey.title}. URL: {survey.full_url}. '
f'Admin URL: {self.get_admin_url(survey.id)}.'
)
if submit_button_text and len(submit_button_text) > 40:
# This should never happen in practice as we provide submit_button_text
self.stdout.write(f"Truncated default submit button text, title={survey.title}")
survey.submit_button_text = submit_button_text[:40] if submit_button_text else 'Submit'
survey.save()
def migrate_post_registration_survey(self):
sql = 'select * from profiles_userprofilessettings pups ' \
'inner join wagtailcore_site ws on pups.site_id = ws.id ' \
'where is_default_site = true'
cur = self.db_query(sql)
row = cur.fetchone()
survey = Survey(
title='Registration Survey', live=True, allow_multiple_submissions=True,
allow_anonymous_submissions=False, submit_button_text='Register')
self.survey_index_page.add_child(instance=survey)
for (should_add_field_key, translation_key, is_required_key, field_type, admin_label) in [
('activate_dob', 'dob', 'dob_required', 'date', 'date_of_birth'),
('activate_gender', 'gender', 'gender_required', 'singleline', 'gender'),
('activate_location', 'location', 'location_required', 'singleline', 'location'),
('activate_education_level', 'education_level', 'activate_education_level_required', 'singleline',
'education_level'),
('show_mobile_number_field', 'mobile_number', 'mobile_number_required', 'singleline', 'mobile_number'),
('show_email_field', 'email_address', 'email_required', 'email', 'email'),
]:
if row[should_add_field_key]:
SurveyFormField.objects.create(
page=survey,
label=self.registration_survey_translations[translation_key]['en'],
required=bool(row[is_required_key]),
field_type=field_type,
admin_label=admin_label,
help_text=self.registration_survey_translations[f'{translation_key}_helptext']['en']
)
self.stdout.write('Successfully migrated post registration survey')
default_site_settings = models.SiteSettings.get_for_default_site()
default_site_settings.registration_survey = survey
default_site_settings.save()
for locale in Locale.objects.all():
try:
self.translate_page(locale=locale, page=survey)
translated_survey = survey.get_translation_or_none(locale)
except Exception as e:
self.post_migration_report_messages['registration_survey'].append(
f"Unable to translate survey, title={survey.title} to locale={locale}"
)
continue
submit_button_text = self.registration_survey_translations['register_button_text'][locale.language_code]
if not submit_button_text:
self.post_migration_report_messages['registration_survey_translation_not_found'].append(
f'No translation for submit button of registration survey to locale: {locale}'
)
if submit_button_text and len(submit_button_text) > 40:
# This should never happen in practice as we provide submit_button_text
self.stdout.write(f"Truncated survey submit button text, title={translated_survey.title}")
translated_survey.submit_button_text = submit_button_text[:40] if submit_button_text else 'Register'
translated_survey.save()
if translated_survey:
for (admin_label, label_identifier) in [
('date_of_birth', 'dob'),
('gender', 'gender'),
('location', 'location'),
('mobile_number', 'mobile_number'),
('education_level', 'education_level'),
('email', 'email_address')
]:
try:
field = SurveyFormField.objects.get(page=translated_survey, admin_label=admin_label)
except SurveyFormField.DoesNotExist:
# This field is not marked as required in the registration survey
continue
try:
field.label = self.registration_survey_translations[label_identifier][locale.language_code]
field.help_text = self.registration_survey_translations[
f'{label_identifier}_helptext'][locale.language_code]
except KeyError:
self.post_migration_report_messages['registration_survey_translation_not_found'].append(
f'Incomplete translation for registration survey to locale: {locale}'
)
break
field.save()
self.post_migration_report_messages['other'].append(
'Title of registration survey (Pages > Internet of Good Things [Language] > Surveys > Registration Survey) '
'has not been translated for any language.'
)
def get_admin_url(self, id):
site = Site.objects.filter(is_default_site=True).first()
return f"{site.root_url}{reverse('wagtailadmin_pages:edit', args=(id,))}"
def print_post_migration_report(self):
self.stdout.write(self.style.ERROR('====================='))
self.stdout.write(self.style.ERROR('POST MIGRATION REPORT'))
self.stdout.write(self.style.ERROR('====================='))
for k, v in self.post_migration_report_messages.items():
self.stdout.write(self.style.ERROR(f"===> {k.replace('_', ' ').upper()}"))
self.stdout.write(self.style.ERROR('\n'.join(v))) | 0.371137 | 0.063802 |
from collections import Counter
from eva_cttv_pipeline.clinvar_xml_io import clinvar_xml_io
from eva_cttv_pipeline.trait_mapping.trait import Trait
def parse_trait_names(filepath: str) -> list:
"""For a file containing ClinVar records in the XML format, return a list of Traits for the records in the file.
Each Trait object contains trait name, how many times it occurs in the input file, and whether it is linked to an NT
expansion variant.
Trait occurrence count is calculated based on all unique (RCV, trait name) tuples in the input file. This is because
each such tuple will, generally speaking, correspond to one output evidence string. So if we want to gauge which
trait names are more important to curate, we need to consider how many such tuples it appears in.
Traits which are implicated in "Microsatellite" variants are marked using a special field, because a subset of
microsatellites are NT expansion variants, and their curation is of highest importance even if the number of records
which they are linked to is low.
:param filepath: Path to a gzipped file containing ClinVar XML dump.
:return: A list of Trait objects."""
# Tracks how many times a trait name occurs in ClinVar
trait_name_counter = Counter()
# Tracks all traits which are at least once implicated in "NT expansion", or nucleotide repeat expansion, variants.
# Their curation is of highest importance regardless of how many records they are actually associated with.
nt_expansion_traits = set()
for clinvar_record in clinvar_xml_io.ClinVarDataset(filepath):
trait_names_and_ids = set((trait.preferred_or_other_valid_name.lower(), trait.identifier)
for trait in clinvar_record.traits_with_valid_names)
for trait_tuple in trait_names_and_ids:
trait_name_counter[trait_tuple] += 1
if clinvar_record.measure and clinvar_record.measure.is_repeat_expansion_variant:
nt_expansion_traits |= trait_names_and_ids
# Count trait occurrences
traits = []
for trait_tuple, trait_frequency in trait_name_counter.items():
if trait_tuple[0] == '-':
print('Skipped {} missing trait names'.format(trait_frequency))
continue
associated_with_nt_expansion = trait_tuple in nt_expansion_traits
traits.append(Trait(name=trait_tuple[0], identifier=trait_tuple[1], frequency=trait_frequency,
associated_with_nt_expansion=associated_with_nt_expansion))
return traits | eva_cttv_pipeline/trait_mapping/trait_names_parsing.py | from collections import Counter
from eva_cttv_pipeline.clinvar_xml_io import clinvar_xml_io
from eva_cttv_pipeline.trait_mapping.trait import Trait
def parse_trait_names(filepath: str) -> list:
"""For a file containing ClinVar records in the XML format, return a list of Traits for the records in the file.
Each Trait object contains trait name, how many times it occurs in the input file, and whether it is linked to an NT
expansion variant.
Trait occurrence count is calculated based on all unique (RCV, trait name) tuples in the input file. This is because
each such tuple will, generally speaking, correspond to one output evidence string. So if we want to gauge which
trait names are more important to curate, we need to consider how many such tuples it appears in.
Traits which are implicated in "Microsatellite" variants are marked using a special field, because a subset of
microsatellites are NT expansion variants, and their curation is of highest importance even if the number of records
which they are linked to is low.
:param filepath: Path to a gzipped file containing ClinVar XML dump.
:return: A list of Trait objects."""
# Tracks how many times a trait name occurs in ClinVar
trait_name_counter = Counter()
# Tracks all traits which are at least once implicated in "NT expansion", or nucleotide repeat expansion, variants.
# Their curation is of highest importance regardless of how many records they are actually associated with.
nt_expansion_traits = set()
for clinvar_record in clinvar_xml_io.ClinVarDataset(filepath):
trait_names_and_ids = set((trait.preferred_or_other_valid_name.lower(), trait.identifier)
for trait in clinvar_record.traits_with_valid_names)
for trait_tuple in trait_names_and_ids:
trait_name_counter[trait_tuple] += 1
if clinvar_record.measure and clinvar_record.measure.is_repeat_expansion_variant:
nt_expansion_traits |= trait_names_and_ids
# Count trait occurrences
traits = []
for trait_tuple, trait_frequency in trait_name_counter.items():
if trait_tuple[0] == '-':
print('Skipped {} missing trait names'.format(trait_frequency))
continue
associated_with_nt_expansion = trait_tuple in nt_expansion_traits
traits.append(Trait(name=trait_tuple[0], identifier=trait_tuple[1], frequency=trait_frequency,
associated_with_nt_expansion=associated_with_nt_expansion))
return traits | 0.86941 | 0.54462 |
import numpy as np
import scipy.integrate as integrate
def rb_nfw(m200,c,z):
"""
Function to compute a NFW profile.
Velocity Dispersion equation taken from Hoeft M.; <NAME>. & Gottlober, S, 2004, ApJ 602,1
http://adsabs.harvard.edu/cgi-bin/bib_query?2004ApJ...602..162H
Input :-
m200 :- Halo mass
c :- NFW concentration paramter
z :- redshift
Returns :-
A bunch of stuff
"""
#Setting up cosmology
rho0=1.4876862e+11;
omegam=0.238000;
msun=1.98892e+33;
delta_vir=200.;
G=6.6730003e-08;
kmpsToCmps = 1.0*10.**(5.);
Rvir=200.;
kpc2cm=3.086*10.**(21);
deltac = (delta_vir/3.)*( (c**3.)/( np.log(1.+c) - (c / (1.+c))));
rho_crit =rho0*omegam*(1.+z)**3.;
r200 =(m200/delta_vir / rho_crit / (4.*np.pi/3.) )**0.33333 * 1000. ;
v200 = ((6.67e-8 * m200 * msun / (r200* 3.086*10.**(21.)) )**0.5)/1e5 ;
r =np.linspace(1.,3.*r200,500); # kpc
rs = r200 / c;
ss=(((r/rs)*(1.+(r/rs))**2.)*1000.**3);
rho = (rho_crit * deltac)/(ss);
M_r = 4.*np.pi* integrate.cumtrapz((r**2)*rho, r,initial=0.)
x = r/r200 ;
tab=1./x*(np.log(1.+c*x)-c*x/(1.+c*x))/(np.log(1.+c)-c/(1.+c));
vcirc = v200*(tab)**0.5 ;
maxvcirc = np.max(vcirc) ;
q=np.where((vcirc == np.max(vcirc)));
maxvcircr = r[q];
# Now compute V_Esc as per nfw.pro Binney & Tremaine equation 2.31
Phi_new = r * 0.0;
vesc = r * 0.0 ;
for ir in range(2,len(r)-4):
term1 = (np.trapz(rho[0:ir]*(r[0:ir]**2.),x=r[0:ir])/(r[ir]))* msun;
term2 = np.trapz(rho[ir:len(r)]*r[ir:len(r)],x=r[ir:len(r)])*msun;
Phi_new[ir] = -4. *np.pi*6.67e-8*(term1 + term2)/3.086e21 ;
vesc[ir] = ((2. * np.abs(Phi_new[ir]))**0.5) / 1e5 ; # See Binney & Tremaine (2-22)
# Chage Units to do velocity dispersion calculations
rcm=r*kpc2cm;
#M_r in gram
M_r_gram=M_r*msun;
Phi=G*integrate.cumtrapz((M_r_gram/rcm**(2)),rcm,initial=0);
Phi=Phi*(1./((1e5)**2.));#%km^2/s^2
Phi_out=np.max(Phi);
k=0.41;
a=0.29;
sig = np.sqrt(a *(( Phi/Phi_out)**(k))*(Phi_out -Phi));
nfw={}
qqqt=np.where((vesc==0.))
vesc[qqqt]=1e-99
nfw["m200"]=m200;
nfw["c"]=c;
nfw["r200"]=r200;
nfw["v200"]=v200;
nfw["maxvcirc"]=maxvcirc;
nfw["maxvcircr"]=maxvcircr;
nfw["r"]=r;
nfw["rho"]=rho;
nfw["vcirc"]=vcirc;
nfw["M_r"]=M_r;
nfw["sig_v"]=sig;
nfw["vesc"]=vesc;
return nfw | halo/rb_nfw.py | import numpy as np
import scipy.integrate as integrate
def rb_nfw(m200,c,z):
"""
Function to compute a NFW profile.
Velocity Dispersion equation taken from Hoeft M.; <NAME>. & Gottlober, S, 2004, ApJ 602,1
http://adsabs.harvard.edu/cgi-bin/bib_query?2004ApJ...602..162H
Input :-
m200 :- Halo mass
c :- NFW concentration paramter
z :- redshift
Returns :-
A bunch of stuff
"""
#Setting up cosmology
rho0=1.4876862e+11;
omegam=0.238000;
msun=1.98892e+33;
delta_vir=200.;
G=6.6730003e-08;
kmpsToCmps = 1.0*10.**(5.);
Rvir=200.;
kpc2cm=3.086*10.**(21);
deltac = (delta_vir/3.)*( (c**3.)/( np.log(1.+c) - (c / (1.+c))));
rho_crit =rho0*omegam*(1.+z)**3.;
r200 =(m200/delta_vir / rho_crit / (4.*np.pi/3.) )**0.33333 * 1000. ;
v200 = ((6.67e-8 * m200 * msun / (r200* 3.086*10.**(21.)) )**0.5)/1e5 ;
r =np.linspace(1.,3.*r200,500); # kpc
rs = r200 / c;
ss=(((r/rs)*(1.+(r/rs))**2.)*1000.**3);
rho = (rho_crit * deltac)/(ss);
M_r = 4.*np.pi* integrate.cumtrapz((r**2)*rho, r,initial=0.)
x = r/r200 ;
tab=1./x*(np.log(1.+c*x)-c*x/(1.+c*x))/(np.log(1.+c)-c/(1.+c));
vcirc = v200*(tab)**0.5 ;
maxvcirc = np.max(vcirc) ;
q=np.where((vcirc == np.max(vcirc)));
maxvcircr = r[q];
# Now compute V_Esc as per nfw.pro Binney & Tremaine equation 2.31
Phi_new = r * 0.0;
vesc = r * 0.0 ;
for ir in range(2,len(r)-4):
term1 = (np.trapz(rho[0:ir]*(r[0:ir]**2.),x=r[0:ir])/(r[ir]))* msun;
term2 = np.trapz(rho[ir:len(r)]*r[ir:len(r)],x=r[ir:len(r)])*msun;
Phi_new[ir] = -4. *np.pi*6.67e-8*(term1 + term2)/3.086e21 ;
vesc[ir] = ((2. * np.abs(Phi_new[ir]))**0.5) / 1e5 ; # See Binney & Tremaine (2-22)
# Chage Units to do velocity dispersion calculations
rcm=r*kpc2cm;
#M_r in gram
M_r_gram=M_r*msun;
Phi=G*integrate.cumtrapz((M_r_gram/rcm**(2)),rcm,initial=0);
Phi=Phi*(1./((1e5)**2.));#%km^2/s^2
Phi_out=np.max(Phi);
k=0.41;
a=0.29;
sig = np.sqrt(a *(( Phi/Phi_out)**(k))*(Phi_out -Phi));
nfw={}
qqqt=np.where((vesc==0.))
vesc[qqqt]=1e-99
nfw["m200"]=m200;
nfw["c"]=c;
nfw["r200"]=r200;
nfw["v200"]=v200;
nfw["maxvcirc"]=maxvcirc;
nfw["maxvcircr"]=maxvcircr;
nfw["r"]=r;
nfw["rho"]=rho;
nfw["vcirc"]=vcirc;
nfw["M_r"]=M_r;
nfw["sig_v"]=sig;
nfw["vesc"]=vesc;
return nfw | 0.416678 | 0.257213 |
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareUcscInfoConsts:
CONN_PROTOCOL_IPV4 = "ipv4"
CONN_PROTOCOL_IPV6 = "ipv6"
CONN_PROTOCOL_UNKNOWN = "unknown"
class FirmwareUcscInfo(ManagedObject):
"""This is FirmwareUcscInfo class."""
consts = FirmwareUcscInfoConsts()
naming_props = set([])
mo_meta = MoMeta("FirmwareUcscInfo", "firmwareUcscInfo", "ucsc-info", VersionMeta.Version222c, "InputOutput", 0x1f, [], ["admin"], ['firmwareBootDefinition', 'firmwareCatalogue', 'firmwareInstallable'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version222c, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"conn_protocol": MoPropertyMeta("conn_protocol", "connProtocol", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ipv4", "ipv6", "unknown"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"host": MoPropertyMeta("host", "host", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, None, None, r"""^[A-Za-z]([A-Za-z0-9-]*[A-Za-z0-9])?$|^[A-Za-z0-9]([A-Za-z0-9-]*[A-Za-z0-9])?(\.[A-Za-z0-9]([A-Za-z0-9-]*[A-Za-z0-9])?)*(\.[A-Za-z]([A-Za-z0-9-]*[A-Za-z0-9])?)$|^([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version223a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"connProtocol": "conn_protocol",
"dn": "dn",
"host": "host",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"version": "version",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.conn_protocol = None
self.host = None
self.sacl = None
self.status = None
self.version = None
ManagedObject.__init__(self, "FirmwareUcscInfo", parent_mo_or_dn, **kwargs) | ucsmsdk/mometa/firmware/FirmwareUcscInfo.py |
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareUcscInfoConsts:
CONN_PROTOCOL_IPV4 = "ipv4"
CONN_PROTOCOL_IPV6 = "ipv6"
CONN_PROTOCOL_UNKNOWN = "unknown"
class FirmwareUcscInfo(ManagedObject):
"""This is FirmwareUcscInfo class."""
consts = FirmwareUcscInfoConsts()
naming_props = set([])
mo_meta = MoMeta("FirmwareUcscInfo", "firmwareUcscInfo", "ucsc-info", VersionMeta.Version222c, "InputOutput", 0x1f, [], ["admin"], ['firmwareBootDefinition', 'firmwareCatalogue', 'firmwareInstallable'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version222c, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"conn_protocol": MoPropertyMeta("conn_protocol", "connProtocol", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ipv4", "ipv6", "unknown"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"host": MoPropertyMeta("host", "host", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, None, None, r"""^[A-Za-z]([A-Za-z0-9-]*[A-Za-z0-9])?$|^[A-Za-z0-9]([A-Za-z0-9-]*[A-Za-z0-9])?(\.[A-Za-z0-9]([A-Za-z0-9-]*[A-Za-z0-9])?)*(\.[A-Za-z]([A-Za-z0-9-]*[A-Za-z0-9])?)$|^([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version223a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"connProtocol": "conn_protocol",
"dn": "dn",
"host": "host",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"version": "version",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.conn_protocol = None
self.host = None
self.sacl = None
self.status = None
self.version = None
ManagedObject.__init__(self, "FirmwareUcscInfo", parent_mo_or_dn, **kwargs) | 0.418578 | 0.204898 |
from __future__ import print_function
import numpy as np
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import utils, jit
from .support import TestCase
def complex_constant(n):
tmp = n + 4
return tmp + 3j
def long_constant(n):
return n + 100000000000000000000000000000000000000000000000
def delitem_usecase(x):
del x[:]
forceobj = Flags()
forceobj.set("force_pyobject")
def loop_nest_3(x, y):
n = 0
for i in range(x):
for j in range(y):
for k in range(x+y):
n += i * j
return n
def array_of_object(x):
return x
class TestObjectMode(TestCase):
def test_complex_constant(self):
pyfunc = complex_constant
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertPreciseEqual(pyfunc(12), cfunc(12))
def test_long_constant(self):
pyfunc = long_constant
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertPreciseEqual(pyfunc(12), cfunc(12))
def test_loop_nest(self):
"""
Test bug that decref the iterator early.
If the bug occurs, a segfault should occur
"""
pyfunc = loop_nest_3
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertEqual(pyfunc(5, 5), cfunc(5, 5))
def bm_pyfunc():
pyfunc(5, 5)
def bm_cfunc():
cfunc(5, 5)
print(utils.benchmark(bm_pyfunc))
print(utils.benchmark(bm_cfunc))
def test_array_of_object(self):
cfunc = jit(array_of_object)
objarr = np.array([object()] * 10)
self.assertIs(cfunc(objarr), objarr)
def test_sequence_contains(self):
"""
Test handling of the `in` comparison
"""
@jit(forceobj=True)
def foo(x, y):
return x in y
self.assertTrue(foo(1, [0, 1]))
self.assertTrue(foo(0, [0, 1]))
self.assertFalse(foo(2, [0, 1]))
with self.assertRaises(TypeError) as raises:
foo(None, None)
self.assertIn("is not iterable", str(raises.exception))
def test_delitem(self):
pyfunc = delitem_usecase
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
l = [3, 4, 5]
cfunc(l)
self.assertPreciseEqual(l, [])
with self.assertRaises(TypeError):
cfunc(42)
class TestObjectModeInvalidRewrite(TestCase):
"""
Tests to ensure that rewrite passes didn't affect objmode lowering.
"""
def _ensure_objmode(self, disp):
self.assertTrue(disp.signatures)
self.assertFalse(disp.nopython_signatures)
return disp
def test_static_raise_in_objmode_fallback(self):
"""
Test code based on user submitted issue at
https://github.com/numba/numba/issues/2159
"""
def test0(n):
return n
def test1(n):
if n == 0:
# static raise will fail in objmode if the IR is modified by
# rewrite pass
raise ValueError()
return test0(n) # trigger objmode fallback
compiled = jit(test1)
self.assertEqual(test1(10), compiled(10))
self._ensure_objmode(compiled)
def test_static_setitem_in_objmode_fallback(self):
"""
Test code based on user submitted issue at
https://github.com/numba/numba/issues/2169
"""
def test0(n):
return n
def test(a1, a2):
a1 = np.asarray(a1)
# static setitem here will fail in objmode if the IR is modified by
# rewrite pass
a2[0] = 1
return test0(a1.sum() + a2.sum()) # trigger objmode fallback
compiled = jit(test)
args = np.array([3]), np.array([4])
self.assertEqual(test(*args), compiled(*args))
self._ensure_objmode(compiled)
if __name__ == '__main__':
unittest.main() | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/tests/test_object_mode.py | from __future__ import print_function
import numpy as np
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import utils, jit
from .support import TestCase
def complex_constant(n):
tmp = n + 4
return tmp + 3j
def long_constant(n):
return n + 100000000000000000000000000000000000000000000000
def delitem_usecase(x):
del x[:]
forceobj = Flags()
forceobj.set("force_pyobject")
def loop_nest_3(x, y):
n = 0
for i in range(x):
for j in range(y):
for k in range(x+y):
n += i * j
return n
def array_of_object(x):
return x
class TestObjectMode(TestCase):
def test_complex_constant(self):
pyfunc = complex_constant
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertPreciseEqual(pyfunc(12), cfunc(12))
def test_long_constant(self):
pyfunc = long_constant
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertPreciseEqual(pyfunc(12), cfunc(12))
def test_loop_nest(self):
"""
Test bug that decref the iterator early.
If the bug occurs, a segfault should occur
"""
pyfunc = loop_nest_3
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertEqual(pyfunc(5, 5), cfunc(5, 5))
def bm_pyfunc():
pyfunc(5, 5)
def bm_cfunc():
cfunc(5, 5)
print(utils.benchmark(bm_pyfunc))
print(utils.benchmark(bm_cfunc))
def test_array_of_object(self):
cfunc = jit(array_of_object)
objarr = np.array([object()] * 10)
self.assertIs(cfunc(objarr), objarr)
def test_sequence_contains(self):
"""
Test handling of the `in` comparison
"""
@jit(forceobj=True)
def foo(x, y):
return x in y
self.assertTrue(foo(1, [0, 1]))
self.assertTrue(foo(0, [0, 1]))
self.assertFalse(foo(2, [0, 1]))
with self.assertRaises(TypeError) as raises:
foo(None, None)
self.assertIn("is not iterable", str(raises.exception))
def test_delitem(self):
pyfunc = delitem_usecase
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
l = [3, 4, 5]
cfunc(l)
self.assertPreciseEqual(l, [])
with self.assertRaises(TypeError):
cfunc(42)
class TestObjectModeInvalidRewrite(TestCase):
"""
Tests to ensure that rewrite passes didn't affect objmode lowering.
"""
def _ensure_objmode(self, disp):
self.assertTrue(disp.signatures)
self.assertFalse(disp.nopython_signatures)
return disp
def test_static_raise_in_objmode_fallback(self):
"""
Test code based on user submitted issue at
https://github.com/numba/numba/issues/2159
"""
def test0(n):
return n
def test1(n):
if n == 0:
# static raise will fail in objmode if the IR is modified by
# rewrite pass
raise ValueError()
return test0(n) # trigger objmode fallback
compiled = jit(test1)
self.assertEqual(test1(10), compiled(10))
self._ensure_objmode(compiled)
def test_static_setitem_in_objmode_fallback(self):
"""
Test code based on user submitted issue at
https://github.com/numba/numba/issues/2169
"""
def test0(n):
return n
def test(a1, a2):
a1 = np.asarray(a1)
# static setitem here will fail in objmode if the IR is modified by
# rewrite pass
a2[0] = 1
return test0(a1.sum() + a2.sum()) # trigger objmode fallback
compiled = jit(test)
args = np.array([3]), np.array([4])
self.assertEqual(test(*args), compiled(*args))
self._ensure_objmode(compiled)
if __name__ == '__main__':
unittest.main() | 0.612541 | 0.573499 |
import copy
import logging
import typing
from collections import defaultdict
from typing import Dict, Any, List, Type, Optional
import pandas as pd
from flask_babel import _
from sqlalchemy import func, distinct
from anyway.app_and_db import db
from anyway.backend_constants import BE_CONST, LabeledCode
def get_query(table_obj, filters, start_time, end_time):
query = db.session.query(table_obj)
if start_time:
query = query.filter(getattr(table_obj, "accident_timestamp") >= start_time)
if end_time:
query = query.filter(getattr(table_obj, "accident_timestamp") <= end_time)
if filters:
for field_name, value in filters.items():
if isinstance(value, list):
values = value
else:
values = [value]
query = query.filter((getattr(table_obj, field_name)).in_(values))
return query
def get_accidents_stats(
table_obj,
filters=None,
group_by=None,
count=None,
cnt_distinct=False,
start_time=None,
end_time=None,
):
filters = filters or {}
provider_code_filters = [BE_CONST.CBS_ACCIDENT_TYPE_1_CODE, BE_CONST.CBS_ACCIDENT_TYPE_3_CODE]
filters["provider_code"] = filters.get("provider_code", provider_code_filters)
# get stats
query = get_query(table_obj, filters, start_time, end_time)
if group_by:
if isinstance(group_by, tuple):
if len(group_by) == 2:
query = query.group_by(*group_by)
query = query.with_entities(*group_by, func.count(count))
dd = query.all()
res = retro_dictify(dd)
return res
else:
err_msg = f"get_accidents_stats: {group_by}: Only a string or a tuple of two are valid for group_by"
logging.error(err_msg)
raise Exception(err_msg)
else:
query = query.group_by(group_by)
query = query.with_entities(
group_by, func.count(count) if not cnt_distinct else func.count(distinct(count))
)
df = pd.read_sql_query(query.statement, query.session.bind)
df.rename(columns={"count_1": "count"}, inplace=True) # pylint: disable=no-member
df.columns = [c.replace("_hebrew", "") for c in df.columns]
return ( # pylint: disable=no-member
df.to_dict(orient="records") if group_by or count else df.to_dict()
)
# noinspection Mypy
def retro_dictify(indexable) -> Dict[Any, Dict[Any, Any]]:
d = defaultdict(dict)
for row in indexable:
here = d
for elem in row[:-2]:
if elem not in here:
here[elem] = defaultdict(lambda: 0)
here = here[elem]
here[row[-2]] = row[-1]
return d
def add_empty_keys_to_gen_two_level_dict(
d, level_1_values: List[Any], level_2_values: List[Any], default_level_3_value: int = 0
) -> Dict[Any, Dict[Any, int]]:
for v1 in level_1_values:
if v1 not in d:
d[v1] = {}
for v2 in level_2_values:
if v2 not in d[v1]:
d[v1][v2] = default_level_3_value
return d
def gen_entity_labels(entity: Type[LabeledCode]) -> dict:
res = {}
for code in entity:
label = code.get_label()
res[label] = _(label)
return res
def get_injured_filters(location_info):
new_filters = {}
for curr_filter, curr_values in location_info.items():
if curr_filter in ["region_hebrew", "district_hebrew", "yishuv_name"]:
new_filter_name = "accident_" + curr_filter
new_filters[new_filter_name] = curr_values
else:
new_filters[curr_filter] = curr_values
new_filters["injury_severity"] = [1, 2, 3, 4, 5]
return new_filters
def run_query(query: db.session.query) -> Dict:
# pylint: disable=no-member
return pd.read_sql_query(query.statement, query.session.bind).to_dict(orient="records")
# TODO: Find a better way to deal with typing.Union[int, str]
def format_2_level_items(
items: Dict[typing.Union[int, str], dict],
level1_vals: Optional[Type[LabeledCode]],
level2_vals: Optional[Type[LabeledCode]],
):
res: List[Dict[str, Any]] = []
for l1_code, year_res in items.items():
l1 = level1_vals.labels()[level1_vals(l1_code)] if level1_vals else l1_code
series_data = []
for l2_code, num in year_res.items():
l2 = level2_vals.labels()[level2_vals(l2_code)] if level2_vals else l2_code
series_data.append({BE_CONST.LKEY: l2, BE_CONST.VAL: num})
res.append({BE_CONST.LKEY: l1, BE_CONST.SERIES: series_data})
return res
def second_level_fill_and_sort(data: dict, default_order: dict) -> dict:
for num, value in data.items():
new_value = copy.deepcopy(default_order)
for key, value_in in value.items():
new_value[key] += value_in
data[num] = new_value
return data
def fill_and_sort_by_numeric_range(
data: defaultdict, numeric_range: typing.Iterable, default_order: dict
) -> Dict[int, dict]:
for item in numeric_range:
if item not in data:
data[item] = default_order
return dict(sorted(data.items()))
def sort_and_fill_gaps_for_stacked_bar(
data: defaultdict, numeric_range: typing.Iterable, default_order: dict
) -> Dict[int, dict]:
res = fill_and_sort_by_numeric_range(data, numeric_range, default_order)
res2 = second_level_fill_and_sort(res, default_order)
return res2 | anyway/widgets/widget_utils.py | import copy
import logging
import typing
from collections import defaultdict
from typing import Dict, Any, List, Type, Optional
import pandas as pd
from flask_babel import _
from sqlalchemy import func, distinct
from anyway.app_and_db import db
from anyway.backend_constants import BE_CONST, LabeledCode
def get_query(table_obj, filters, start_time, end_time):
query = db.session.query(table_obj)
if start_time:
query = query.filter(getattr(table_obj, "accident_timestamp") >= start_time)
if end_time:
query = query.filter(getattr(table_obj, "accident_timestamp") <= end_time)
if filters:
for field_name, value in filters.items():
if isinstance(value, list):
values = value
else:
values = [value]
query = query.filter((getattr(table_obj, field_name)).in_(values))
return query
def get_accidents_stats(
table_obj,
filters=None,
group_by=None,
count=None,
cnt_distinct=False,
start_time=None,
end_time=None,
):
filters = filters or {}
provider_code_filters = [BE_CONST.CBS_ACCIDENT_TYPE_1_CODE, BE_CONST.CBS_ACCIDENT_TYPE_3_CODE]
filters["provider_code"] = filters.get("provider_code", provider_code_filters)
# get stats
query = get_query(table_obj, filters, start_time, end_time)
if group_by:
if isinstance(group_by, tuple):
if len(group_by) == 2:
query = query.group_by(*group_by)
query = query.with_entities(*group_by, func.count(count))
dd = query.all()
res = retro_dictify(dd)
return res
else:
err_msg = f"get_accidents_stats: {group_by}: Only a string or a tuple of two are valid for group_by"
logging.error(err_msg)
raise Exception(err_msg)
else:
query = query.group_by(group_by)
query = query.with_entities(
group_by, func.count(count) if not cnt_distinct else func.count(distinct(count))
)
df = pd.read_sql_query(query.statement, query.session.bind)
df.rename(columns={"count_1": "count"}, inplace=True) # pylint: disable=no-member
df.columns = [c.replace("_hebrew", "") for c in df.columns]
return ( # pylint: disable=no-member
df.to_dict(orient="records") if group_by or count else df.to_dict()
)
# noinspection Mypy
def retro_dictify(indexable) -> Dict[Any, Dict[Any, Any]]:
d = defaultdict(dict)
for row in indexable:
here = d
for elem in row[:-2]:
if elem not in here:
here[elem] = defaultdict(lambda: 0)
here = here[elem]
here[row[-2]] = row[-1]
return d
def add_empty_keys_to_gen_two_level_dict(
d, level_1_values: List[Any], level_2_values: List[Any], default_level_3_value: int = 0
) -> Dict[Any, Dict[Any, int]]:
for v1 in level_1_values:
if v1 not in d:
d[v1] = {}
for v2 in level_2_values:
if v2 not in d[v1]:
d[v1][v2] = default_level_3_value
return d
def gen_entity_labels(entity: Type[LabeledCode]) -> dict:
res = {}
for code in entity:
label = code.get_label()
res[label] = _(label)
return res
def get_injured_filters(location_info):
new_filters = {}
for curr_filter, curr_values in location_info.items():
if curr_filter in ["region_hebrew", "district_hebrew", "yishuv_name"]:
new_filter_name = "accident_" + curr_filter
new_filters[new_filter_name] = curr_values
else:
new_filters[curr_filter] = curr_values
new_filters["injury_severity"] = [1, 2, 3, 4, 5]
return new_filters
def run_query(query: db.session.query) -> Dict:
# pylint: disable=no-member
return pd.read_sql_query(query.statement, query.session.bind).to_dict(orient="records")
# TODO: Find a better way to deal with typing.Union[int, str]
def format_2_level_items(
items: Dict[typing.Union[int, str], dict],
level1_vals: Optional[Type[LabeledCode]],
level2_vals: Optional[Type[LabeledCode]],
):
res: List[Dict[str, Any]] = []
for l1_code, year_res in items.items():
l1 = level1_vals.labels()[level1_vals(l1_code)] if level1_vals else l1_code
series_data = []
for l2_code, num in year_res.items():
l2 = level2_vals.labels()[level2_vals(l2_code)] if level2_vals else l2_code
series_data.append({BE_CONST.LKEY: l2, BE_CONST.VAL: num})
res.append({BE_CONST.LKEY: l1, BE_CONST.SERIES: series_data})
return res
def second_level_fill_and_sort(data: dict, default_order: dict) -> dict:
for num, value in data.items():
new_value = copy.deepcopy(default_order)
for key, value_in in value.items():
new_value[key] += value_in
data[num] = new_value
return data
def fill_and_sort_by_numeric_range(
data: defaultdict, numeric_range: typing.Iterable, default_order: dict
) -> Dict[int, dict]:
for item in numeric_range:
if item not in data:
data[item] = default_order
return dict(sorted(data.items()))
def sort_and_fill_gaps_for_stacked_bar(
data: defaultdict, numeric_range: typing.Iterable, default_order: dict
) -> Dict[int, dict]:
res = fill_and_sort_by_numeric_range(data, numeric_range, default_order)
res2 = second_level_fill_and_sort(res, default_order)
return res2 | 0.496094 | 0.214815 |
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="cosmos/params/v1beta1/params.proto",
package="cosmos.params.v1beta1",
syntax="proto3",
serialized_options=b"Z4github.com/cosmos/cosmos-sdk/x/params/types/proposal\250\342\036\001",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n"cosmos/params/v1beta1/params.proto\x12\x15\x63osmos.params.v1beta1\x1a\x14gogoproto/gogo.proto"\x82\x01\n\x17ParameterChangeProposal\x12\r\n\x05title\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x39\n\x07\x63hanges\x18\x03 \x03(\x0b\x32".cosmos.params.v1beta1.ParamChangeB\x04\xc8\xde\x1f\x00:\x08\x88\xa0\x1f\x00\x98\xa0\x1f\x00"A\n\x0bParamChange\x12\x10\n\x08subspace\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t:\x04\x98\xa0\x1f\x00\x42:Z4github.com/cosmos/cosmos-sdk/x/params/types/proposal\xa8\xe2\x1e\x01\x62\x06proto3',
dependencies=[
gogoproto_dot_gogo__pb2.DESCRIPTOR,
],
)
_PARAMETERCHANGEPROPOSAL = _descriptor.Descriptor(
name="ParameterChangeProposal",
full_name="cosmos.params.v1beta1.ParameterChangeProposal",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="title",
full_name="cosmos.params.v1beta1.ParameterChangeProposal.title",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="description",
full_name="cosmos.params.v1beta1.ParameterChangeProposal.description",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="changes",
full_name="cosmos.params.v1beta1.ParameterChangeProposal.changes",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\310\336\037\000",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\210\240\037\000\230\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=84,
serialized_end=214,
)
_PARAMCHANGE = _descriptor.Descriptor(
name="ParamChange",
full_name="cosmos.params.v1beta1.ParamChange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="subspace",
full_name="cosmos.params.v1beta1.ParamChange.subspace",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="key",
full_name="cosmos.params.v1beta1.ParamChange.key",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="cosmos.params.v1beta1.ParamChange.value",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\230\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=216,
serialized_end=281,
)
_PARAMETERCHANGEPROPOSAL.fields_by_name["changes"].message_type = _PARAMCHANGE
DESCRIPTOR.message_types_by_name["ParameterChangeProposal"] = _PARAMETERCHANGEPROPOSAL
DESCRIPTOR.message_types_by_name["ParamChange"] = _PARAMCHANGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ParameterChangeProposal = _reflection.GeneratedProtocolMessageType(
"ParameterChangeProposal",
(_message.Message,),
{
"DESCRIPTOR": _PARAMETERCHANGEPROPOSAL,
"__module__": "cosmos.params.v1beta1.params_pb2"
# @@protoc_insertion_point(class_scope:cosmos.params.v1beta1.ParameterChangeProposal)
},
)
_sym_db.RegisterMessage(ParameterChangeProposal)
ParamChange = _reflection.GeneratedProtocolMessageType(
"ParamChange",
(_message.Message,),
{
"DESCRIPTOR": _PARAMCHANGE,
"__module__": "cosmos.params.v1beta1.params_pb2"
# @@protoc_insertion_point(class_scope:cosmos.params.v1beta1.ParamChange)
},
)
_sym_db.RegisterMessage(ParamChange)
DESCRIPTOR._options = None
_PARAMETERCHANGEPROPOSAL.fields_by_name["changes"]._options = None
_PARAMETERCHANGEPROPOSAL._options = None
_PARAMCHANGE._options = None
# @@protoc_insertion_point(module_scope) | terra_sdk/protobuf/cosmos/params/v1beta1/params_pb2.py | """Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="cosmos/params/v1beta1/params.proto",
package="cosmos.params.v1beta1",
syntax="proto3",
serialized_options=b"Z4github.com/cosmos/cosmos-sdk/x/params/types/proposal\250\342\036\001",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n"cosmos/params/v1beta1/params.proto\x12\x15\x63osmos.params.v1beta1\x1a\x14gogoproto/gogo.proto"\x82\x01\n\x17ParameterChangeProposal\x12\r\n\x05title\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x39\n\x07\x63hanges\x18\x03 \x03(\x0b\x32".cosmos.params.v1beta1.ParamChangeB\x04\xc8\xde\x1f\x00:\x08\x88\xa0\x1f\x00\x98\xa0\x1f\x00"A\n\x0bParamChange\x12\x10\n\x08subspace\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t:\x04\x98\xa0\x1f\x00\x42:Z4github.com/cosmos/cosmos-sdk/x/params/types/proposal\xa8\xe2\x1e\x01\x62\x06proto3',
dependencies=[
gogoproto_dot_gogo__pb2.DESCRIPTOR,
],
)
_PARAMETERCHANGEPROPOSAL = _descriptor.Descriptor(
name="ParameterChangeProposal",
full_name="cosmos.params.v1beta1.ParameterChangeProposal",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="title",
full_name="cosmos.params.v1beta1.ParameterChangeProposal.title",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="description",
full_name="cosmos.params.v1beta1.ParameterChangeProposal.description",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="changes",
full_name="cosmos.params.v1beta1.ParameterChangeProposal.changes",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\310\336\037\000",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\210\240\037\000\230\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=84,
serialized_end=214,
)
_PARAMCHANGE = _descriptor.Descriptor(
name="ParamChange",
full_name="cosmos.params.v1beta1.ParamChange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="subspace",
full_name="cosmos.params.v1beta1.ParamChange.subspace",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="key",
full_name="cosmos.params.v1beta1.ParamChange.key",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="cosmos.params.v1beta1.ParamChange.value",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\230\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=216,
serialized_end=281,
)
_PARAMETERCHANGEPROPOSAL.fields_by_name["changes"].message_type = _PARAMCHANGE
DESCRIPTOR.message_types_by_name["ParameterChangeProposal"] = _PARAMETERCHANGEPROPOSAL
DESCRIPTOR.message_types_by_name["ParamChange"] = _PARAMCHANGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ParameterChangeProposal = _reflection.GeneratedProtocolMessageType(
"ParameterChangeProposal",
(_message.Message,),
{
"DESCRIPTOR": _PARAMETERCHANGEPROPOSAL,
"__module__": "cosmos.params.v1beta1.params_pb2"
# @@protoc_insertion_point(class_scope:cosmos.params.v1beta1.ParameterChangeProposal)
},
)
_sym_db.RegisterMessage(ParameterChangeProposal)
ParamChange = _reflection.GeneratedProtocolMessageType(
"ParamChange",
(_message.Message,),
{
"DESCRIPTOR": _PARAMCHANGE,
"__module__": "cosmos.params.v1beta1.params_pb2"
# @@protoc_insertion_point(class_scope:cosmos.params.v1beta1.ParamChange)
},
)
_sym_db.RegisterMessage(ParamChange)
DESCRIPTOR._options = None
_PARAMETERCHANGEPROPOSAL.fields_by_name["changes"]._options = None
_PARAMETERCHANGEPROPOSAL._options = None
_PARAMCHANGE._options = None
# @@protoc_insertion_point(module_scope) | 0.422862 | 0.116011 |
import os
import pytest
import fetch_data as fd
def test_file_logging():
import logging
from fetch_data import utils
dest = "./tests/downloads/logging_download.log"
utils.log_to_file(dest)
logging.warning("[TESTING] This is a test log for downloading")
with open(dest) as file:
assert "regrid" not in file.read()
def test_read_catalog():
fname = "./tests/example_catalog.yml"
cat = fd.read_catalog(fname)
assert isinstance(cat, dict)
assert cat != {}
def test_get_url_list_no_login_http():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/ESACCI-SEASURFACESALINITY-L4-*_25km-*-fv2.31.nc" # wildcards
)
flist = fd.core.get_url_list(url, use_cache=False)
assert len(flist) != 0
@pytest.mark.skipif(
os.environ.get("CI", "false") == "true", reason="Skipping downloads in CI"
)
def test_get_url_list_bad_url():
url = "http://fake_url.com/test_*_file.nc" # wildcards
with pytest.raises(FileNotFoundError):
fd.core.get_url_list(url, use_cache=False)
def test_get_url_list_bad_filename_raise():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/bad_file_*_name.nc" # wildcards
)
flist = fd.core.get_url_list(url, use_cache=False)
assert flist == []
def test_get_url_list_fake_kwarg_https():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/ESACCI-SEASURFACESALINITY-L4-*_25km-*-fv2.31.nc" # wildcards
)
with pytest.raises(KeyError):
fd.core.get_url_list(url, use_cache=False, username="tester", password="<PASSWORD>")
def test_choose_downloader():
import pooch
url = "ftp://thispartdoesntmatter.com"
protocol = fd.core.choose_downloader(url, progress=False)
assert protocol.__class__ == pooch.downloaders.FTPDownloader().__class__
@pytest.mark.skipif(
os.environ.get("CI", "false") == "true", reason="Skipping downloads in CI"
)
def test_download_urls():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/ESACCI-SEASURFACESALINITY-L4-*_25km-*-fv2.31.nc"
)
dest = "./tests/downloads/"
urls = fd.core.get_url_list(
url, cache_path=f"{dest}/remote_files.cache", use_cache=True
)[:1]
fd.core.download_urls(urls, dest_dir=dest)
def test_make_readme():
fname = "./tests/example_catalog.yml"
cat = fd.read_catalog(fname)
for key in cat:
cat[key]["name"] = key.upper().replace("_", " ")
fd.core.create_download_readme("README.txt", **cat[key]) | tests/test_download.py | import os
import pytest
import fetch_data as fd
def test_file_logging():
import logging
from fetch_data import utils
dest = "./tests/downloads/logging_download.log"
utils.log_to_file(dest)
logging.warning("[TESTING] This is a test log for downloading")
with open(dest) as file:
assert "regrid" not in file.read()
def test_read_catalog():
fname = "./tests/example_catalog.yml"
cat = fd.read_catalog(fname)
assert isinstance(cat, dict)
assert cat != {}
def test_get_url_list_no_login_http():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/ESACCI-SEASURFACESALINITY-L4-*_25km-*-fv2.31.nc" # wildcards
)
flist = fd.core.get_url_list(url, use_cache=False)
assert len(flist) != 0
@pytest.mark.skipif(
os.environ.get("CI", "false") == "true", reason="Skipping downloads in CI"
)
def test_get_url_list_bad_url():
url = "http://fake_url.com/test_*_file.nc" # wildcards
with pytest.raises(FileNotFoundError):
fd.core.get_url_list(url, use_cache=False)
def test_get_url_list_bad_filename_raise():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/bad_file_*_name.nc" # wildcards
)
flist = fd.core.get_url_list(url, use_cache=False)
assert flist == []
def test_get_url_list_fake_kwarg_https():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/ESACCI-SEASURFACESALINITY-L4-*_25km-*-fv2.31.nc" # wildcards
)
with pytest.raises(KeyError):
fd.core.get_url_list(url, use_cache=False, username="tester", password="<PASSWORD>")
def test_choose_downloader():
import pooch
url = "ftp://thispartdoesntmatter.com"
protocol = fd.core.choose_downloader(url, progress=False)
assert protocol.__class__ == pooch.downloaders.FTPDownloader().__class__
@pytest.mark.skipif(
os.environ.get("CI", "false") == "true", reason="Skipping downloads in CI"
)
def test_download_urls():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/ESACCI-SEASURFACESALINITY-L4-*_25km-*-fv2.31.nc"
)
dest = "./tests/downloads/"
urls = fd.core.get_url_list(
url, cache_path=f"{dest}/remote_files.cache", use_cache=True
)[:1]
fd.core.download_urls(urls, dest_dir=dest)
def test_make_readme():
fname = "./tests/example_catalog.yml"
cat = fd.read_catalog(fname)
for key in cat:
cat[key]["name"] = key.upper().replace("_", " ")
fd.core.create_download_readme("README.txt", **cat[key]) | 0.356895 | 0.34726 |