content
stringlengths 0
894k
| type
stringclasses 2
values |
|---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from cinder import exception as exc
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'attached_host': None,
'mountpoint': '/',
'attached_mode': 'rw',
'status': 'fakestatus',
'migration_status': None,
'attach_status': 'attached',
'bootable': 'false',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'snapshot_id': None,
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'},
'readonly': 'False'}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['source_volid'] = None
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_create_from_image(self, context, size, name, description,
snapshot, volume_type, metadata,
availability_zone):
vol = stub_volume('1')
vol['status'] = 'creating'
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['availability_zone'] = 'cinder'
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_get_notfound(self, context, volume_id):
raise exc.NotFound
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_get_all_by_project(self, context, search_opts=None):
return [stub_volume_get(self, context, '1')]
def stub_snapshot(id, **kwargs):
snapshot = {'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_get_all(self):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_snapshot_get_all_by_project(self, context):
return [stub_snapshot(1)]
def stub_snapshot_update(self, context, *args, **param):
pass
def stub_service_get_all_by_topic(context, topic):
return [{'availability_zone': "zone1:host1", "disabled": 0}]
|
python
|
from baseline.train import create_trainer, register_trainer, register_training_func, Trainer
from baseline.embeddings import register_embeddings
from baseline.reporting import register_reporting, ReportingHook
from baseline.tf.embeddings import TensorFlowEmbeddings
from baseline.tf.optz import optimizer
from baseline.confusion import ConfusionMatrix
from baseline.utils import listify, get_model_file, write_json, color, Colors
from baseline.tf.tfy import embed
import tensorflow as tf
import os
import numpy as np
@register_embeddings(name='cbow')
class CharBoWEmbeddings(TensorFlowEmbeddings):
"""Bag of character embeddings, sum char embeds, so in this case `wsz == dsz`
"""
@classmethod
def create_placeholder(cls, name):
return tf.placeholder(tf.int32, [None, None, None], name=name)
def __init__(self, name, **kwargs):
super(CharBoWEmbeddings, self).__init__()
self.vsz = kwargs.get('vsz')
self.dsz = kwargs.get('dsz')
self.finetune = kwargs.get('finetune', True)
self.name = name
self.scope = kwargs.get('scope', '{}/CharBoWLUT'.format(self.name))
self.weights = kwargs.get('weights')
if self.weights is None:
unif = kwargs.get('unif', 0.1)
self.weights = np.random.uniform(-unif, unif, (self.vsz, self.dsz))
self.params = kwargs
def save_md(self, target):
write_json({'vsz': self.get_vsz(), 'dsz': self.get_dsz()}, target)
def encode(self, x=None):
if x is None:
x = CharBoWEmbeddings.create_placeholder(self.name)
self.x = x
return tf.reduce_sum(embed(x,
self.get_vsz(),
self.get_dsz(),
tf.constant_initializer(self.weights, dtype=tf.float32),
self.finetune,
self.scope), axis=2, keepdims=False)
def get_vsz(self):
return self.vsz
# Warning this function is only initialized AFTER encode
def get_dsz(self):
return self.dsz
@register_reporting(name='slack')
class SlackReporting(ReportingHook):
def __init__(self, **kwargs):
super(SlackReporting, self).__init__(**kwargs)
self.webhook = kwargs['webhook']
def step(self, metrics, tick, phase, tick_type=None, **kwargs):
"""Write results to `slack` (webhook)
:param metrics: A map of metrics to scores
:param tick: The time (resolution defined by `tick_type`)
:param phase: The phase of training (`Train`, `Valid`, `Test`)
:param tick_type: The resolution of tick (`STEP`, `EPOCH`)
:return:
"""
import requests
chunks = ''
if phase in ['Valid', 'Test']:
chunks += '%s(%d) [Epoch %d] [%s]' % (os.getlogin(), os.getpid(), tick, phase)
for k, v in metrics.items():
if k not in ['avg_loss', 'perplexity']:
v *= 100.
chunks += '\t%s=%.3f' % (k, v)
requests.post(self.webhook, json={"text": chunks})
@register_training_func('classify', name='test_every_n_epochs')
def train(model, ts, vs, es=None, **kwargs):
"""
Train a classifier using TensorFlow
:param model: The model to train
:param ts: A training data set
:param vs: A validation data set
:param es: A test data set, can be None
:param kwargs:
See below
:Keyword Arguments:
* *do_early_stopping* (``bool``) --
Stop after evaluation data is no longer improving. Defaults to True
* *epochs* (``int``) -- how many epochs. Default to 20
* *outfile* -- Model output file, defaults to classifier-model.pyth
* *patience* --
How many epochs where evaluation is no longer improving before we give up
* *reporting* --
Callbacks which may be used on reporting updates
* Additional arguments are supported, see :func:`baseline.tf.optimize` for full list
:return:
"""
n = int(kwargs.get('test_epochs', 5))
do_early_stopping = bool(kwargs.get('do_early_stopping', True))
epochs = int(kwargs.get('epochs', 20))
model_file = get_model_file('classify', 'tf', kwargs.get('basedir'))
if do_early_stopping:
early_stopping_metric = kwargs.get('early_stopping_metric', 'acc')
patience = kwargs.get('patience', epochs)
print('Doing early stopping on [%s] with patience [%d]' % (early_stopping_metric, patience))
reporting_fns = listify(kwargs.get('reporting', []))
print('reporting', reporting_fns)
trainer = create_trainer(model, **kwargs)
tables = tf.tables_initializer()
model.sess.run(tables)
model.sess.run(tf.global_variables_initializer())
model.set_saver(tf.train.Saver())
max_metric = 0
last_improved = 0
for epoch in range(epochs):
trainer.train(ts, reporting_fns)
test_metrics = trainer.test(vs, reporting_fns, phase='Valid')
if epoch > 0 and epoch % n == 0 and epoch < epochs - 1:
print(color('Running test', Colors.GREEN))
trainer.test(es, reporting_fns, phase='Test')
if do_early_stopping is False:
trainer.checkpoint()
trainer.model.save(model_file)
elif test_metrics[early_stopping_metric] > max_metric:
last_improved = epoch
max_metric = test_metrics[early_stopping_metric]
print('New max %.3f' % max_metric)
trainer.checkpoint()
trainer.model.save(model_file)
elif (epoch - last_improved) > patience:
print(color('Stopping due to persistent failures to improve', Colors.RED))
break
if do_early_stopping is True:
print('Best performance on max_metric %.3f at epoch %d' % (max_metric, last_improved))
if es is not None:
print(color('Reloading best checkpoint', Colors.GREEN))
trainer.recover_last_checkpoint()
trainer.test(es, reporting_fns, phase='Test')
|
python
|
"""Super class of contextual bandit algorithm agent class"""
import numpy as np
class ContextualBanditAlgorithm(object):
"""
Args:
n_features : 特徴量の次元数
Attributes:
iter_num(int) : 現在の反復回数
"""
def __init__(self, n_features:int):
self.n_features = n_features
self.iter_num = 0
def get_iteration_number(self) -> int:
"""Getter of iteration 回数"""
return self.iter_num
def set_iteration_number(self, t: int) -> None:
"""Setter of iteration 回数"""
# t が自然数でない場合、エラーを返す
assert t > 0, "iteration number must be positive. t = {0}".format(t)
# python の型定義は正確ではないため、しっかりエラーを吐くように設定しないといけない
assert isinstance(t, int), "iteration number must be int. t = {0}".format(t)
self.iter_num = t
if __name__ == '__main__':
pass
|
python
|
# Generated by Django 2.1.8 on 2019-08-08 23:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailnhsukfrontendsettings', '0003_footersettings'),
]
operations = [
migrations.AddField(
model_name='footersettings',
name='fixed_coloumn_footer',
field=models.BooleanField(default=False, help_text='Enable this setting to change way the footer is styled, so links group into coloumns'),
),
]
|
python
|
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID', axis=1)
print(banks.isnull().sum())
bank_mode = banks.mode()
banks.fillna(banks.mode().iloc[0],inplace=True)
print(banks.isnull().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount = pd.pivot_table(
banks,
index=['Gender','Married','Self_Employed'],
values='LoanAmount',
aggfunc=np.mean)
# code ends here
# --------------
# code starts here
loan_approved_se = len(banks[
(banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')
])
loan_approved_nse = len(banks[
(banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')
])
total = banks.shape[0]
percentage_se = loan_approved_se/total*100
percentage_nse = loan_approved_nse/total*100
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x:x/12)
big_loan_term = len(loan_term[loan_term>=25])
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby[['ApplicantIncome', 'Credit_History']]
mean_values = loan_groupby.mean()
# code ends here
|
python
|
# Copyright © 2018 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
# !/usr/bin/python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vcd_vapp_netcommit
short_description: Ansible Module to manage (create/delete) Networks in vApps in vCloud Director.
version_added: "2.4"
description:
- "Ansible Module to manage (create/delete) Networks in vApps."
options:
user:
description:
- vCloud Director user name
required: false
password:
description:
- vCloud Director user password
required: false
host:
description:
- vCloud Director host address
required: false
org:
description:
- Organization name on vCloud Director to access
required: false
api_version:
description:
- Pyvcloud API version
required: false
verify_ssl_certs:
description:
- whether to use secure connection to vCloud Director host
required: false
network:
description:
- Network name
required: true
vapp:
description:
- vApp name
required: true
vdc:
description:
- VDC name
required: true
fence_mode:
description:
- Network fence mode
required: false
parent_network:
description:
- VDC parent network to connect to
required: false
ip_scope:
description:
- IP scope when no parent_network is defined
state:
description:
- state of network ('present'/'absent').
required: true
author:
- mtaneja@vmware.com
'''
EXAMPLES = '''
- name: Test with a message
vcd_vapp_netcommit:
user: terraform
password: abcd
host: csa.sandbox.org
org: Terraform
api_version: 30
verify_ssl_certs: False
network = "uplink"
vapp = "vapp1"
vdc = "vdc1"
state = "present"
'''
RETURN = '''
msg: success/failure message corresponding to vapp network state
changed: true if resource has been changed else false
'''
from lxml import etree
from ipaddress import ip_network
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.vdc import VDC
from pyvcloud.vcd.client import E
from pyvcloud.vcd.vapp import VApp
from pyvcloud.vcd.client import NSMAP
from pyvcloud.vcd.client import E_OVF
from pyvcloud.vcd.client import FenceMode
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.client import RelationType
from ansible.module_utils.vcd import VcdAnsibleModule
from pyvcloud.vcd.exceptions import EntityNotFoundException, OperationNotSupportedException
VAPP_NETWORK_STATES = ['present', 'absent']
def vapp_network_argument_spec():
return dict(
network=dict(type='str', required=True),
vapp=dict(type='str', required=True),
vdc=dict(type='str', required=True),
fence_mode=dict(type='str', required=False, default=FenceMode.BRIDGED.value),
parent_network=dict(type='str', required=False, default=None),
ip_scope=dict(type='str', required=False, default=None),
state=dict(choices=VAPP_NETWORK_STATES, required=True),
)
class VappNetwork(VcdAnsibleModule):
def __init__(self, **kwargs):
super(VappNetwork, self).__init__(**kwargs)
vapp_resource = self.get_resource()
self.vapp = VApp(self.client, resource=vapp_resource)
def manage_states(self):
state = self.params.get('state')
if state == "present":
return self.add_network()
if state == "absent":
return self.delete_network()
def get_resource(self):
vapp = self.params.get('vapp')
vdc = self.params.get('vdc')
org_resource = Org(self.client, resource=self.client.get_org())
vdc_resource = VDC(self.client, resource=org_resource.get_vdc(vdc))
vapp_resource_href = vdc_resource.get_resource_href(name=vapp, entity_type=EntityType.VAPP)
vapp_resource = self.client.get_resource(vapp_resource_href)
return vapp_resource
def get_network(self):
network_name = self.params.get('network')
networks = self.vapp.get_all_networks()
for network in networks:
if network.get('{'+NSMAP['ovf']+'}name') == network_name:
return network
raise EntityNotFoundException('Can\'t find the specified vApp network')
def delete_network(self):
network_name = self.params.get('network')
response = dict()
response['changed'] = False
try:
self.get_network()
except EntityNotFoundException:
response['warnings'] = 'Vapp Network {} is not present.'.format(network_name)
else:
network_config_section = self.vapp.resource.NetworkConfigSection
for network_config in network_config_section.NetworkConfig:
if network_config.get('networkName') == network_name:
network_config_section.remove(network_config)
delete_network_task = self.client.put_linked_resource(
self.vapp.resource.NetworkConfigSection, RelationType.EDIT,
EntityType.NETWORK_CONFIG_SECTION.value,
network_config_section)
self.execute_task(delete_network_task)
response['msg'] = 'Vapp Network {} has been deleted.'.format(network_name)
response['changed'] = True
return response
def add_network(self):
network_name = self.params.get('network')
fence_mode = self.params.get('fence_mode')
parent_network = self.params.get('parent_network')
ip_scope = self.params.get('ip_scope')
response = dict()
response['changed'] = False
try:
self.get_network()
except EntityNotFoundException:
network_config_section = self.vapp.resource.NetworkConfigSection
config = E.Configuration()
if parent_network:
vdc = self.params.get('vdc')
org_resource = Org(self.client, resource=self.client.get_org())
vdc_resource = VDC(self.client, resource=org_resource.get_vdc(vdc))
orgvdc_networks = vdc_resource.list_orgvdc_network_resources(parent_network)
parent = next((network for network in orgvdc_networks if network.get('name') == parent_network), None)
if parent:
config.append(E.ParentNetwork(href=parent.get('href')))
else:
raise EntityNotFoundException('Parent network \'%s\' does not exist'.format(parent_network))
elif ip_scope:
scope = E.IpScope(
E.IsInherited('false'),
E.Gateway(str(ip_network(ip_scope, strict=False).network_address+1)),
E.Netmask(str(ip_network(ip_scope, strict=False).netmask)))
config.append(E.IpScopes(scope))
else:
raise VappNetworkCreateError('Either parent_network or ip_scope must be set')
config.append(E.FenceMode(fence_mode))
network_config = E.NetworkConfig(config, networkName=network_name)
network_config_section.append(network_config)
add_network_task = self.client.put_linked_resource(
self.vapp.resource.NetworkConfigSection, RelationType.EDIT,
EntityType.NETWORK_CONFIG_SECTION.value,
network_config_section)
self.execute_task(add_network_task)
response['msg'] = 'Vapp Network {} has been added'.format(network_name)
response['changed'] = True
else:
response['warnings'] = 'Vapp Network {} is already present.'.format(network_name)
return response
def main():
argument_spec = vapp_network_argument_spec()
response = dict(
msg=dict(type='str')
)
module = VappNetwork(argument_spec=argument_spec, supports_check_mode=True)
try:
if not module.params.get('state'):
raise Exception('Please provide the state for the resource.')
response = module.manage_states()
module.exit_json(**response)
except Exception as error:
response['msg'] = error
module.fail_json(**response)
if __name__ == '__main__':
main()
|
python
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import random
import go
import numpy as np
import tensorflow as tf
"""
Allowable symmetries:
identity [12][34]
rot90 [24][13]
rot180 [43][21]
rot270 [31][42]
flip [13][24]
fliprot90 [34][12]
fliprot180 [42][31]
fliprot270 [21][43]
"""
INVERSES = {
'identity': 'identity',
'rot90': 'rot270',
'rot180': 'rot180',
'rot270': 'rot90',
'flip': 'flip',
'fliprot90': 'fliprot90',
'fliprot180': 'fliprot180',
'fliprot270': 'fliprot270',
}
IMPLS = {
'identity': lambda x: x,
'rot90': np.rot90,
'rot180': functools.partial(np.rot90, k=2),
'rot270': functools.partial(np.rot90, k=3),
'flip': lambda x: np.rot90(np.fliplr(x)),
'fliprot90': np.flipud,
'fliprot180': lambda x: np.rot90(np.flipud(x)),
'fliprot270': np.fliplr,
}
assert set(IMPLS.keys()) == set(INVERSES.keys())
# A symmetry is just a string describing the transformation.
SYMMETRIES = list(INVERSES.keys())
def invert_symmetry(s):
return INVERSES[s]
def apply_symmetry_feat(sym, features):
return IMPLS[sym](features)
def apply_symmetry_pi(s, pi):
pi = np.copy(pi)
# rotate all moves except for the pass move at end
pi[:-1] = IMPLS[s](pi[:-1].reshape([go.N, go.N])).ravel()
return pi
def randomize_symmetries_feat(features):
symmetries_used = [random.choice(SYMMETRIES) for _ in features]
return symmetries_used, [apply_symmetry_feat(s, f)
for s, f in zip(symmetries_used, features)]
def invert_symmetries_pi(symmetries, pis):
return [apply_symmetry_pi(invert_symmetry(s), pi)
for s, pi in zip(symmetries, pis)]
def rotate_train_nhwc(x, pi):
sym = tf.random_uniform(
[],
minval=0,
maxval=len(SYMMETRIES),
dtype=tf.int32,
seed=123)
def rotate(tensor):
# flipLeftRight
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 1) > 0,
tf.reverse(tensor, axis=[0]),
tensor)
# flipUpDown
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 2) > 0,
tf.reverse(tensor, axis=[1]),
tensor)
# flipDiagonal
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 4) > 0,
tf.transpose(tensor, perm=[1, 0, 2]),
tensor)
return tensor
# TODO(tommadams): use tf.ensure_shape instead of tf.assert_equal.
squares = go.N * go.N
assert_shape_pi = tf.assert_equal(pi.shape.as_list(), [squares + 1])
x_shape = x.shape.as_list()
assert_shape_x = tf.assert_equal(x_shape, [go.N, go.N, x_shape[2]])
pi_move = tf.slice(pi, [0], [squares], name="slice_moves")
pi_pass = tf.slice(pi, [squares], [1], name="slice_pass")
# Add a final dim so that x and pi have same shape: [N,N,num_features].
pi_n_by_n = tf.reshape(pi_move, [go.N, go.N, 1])
with tf.control_dependencies([assert_shape_x, assert_shape_pi]):
pi_rot = tf.concat(
[tf.reshape(rotate(pi_n_by_n), [squares]), pi_pass],
axis=0)
return rotate(x), pi_rot
def rotate_train_nchw(x, pi):
sym = tf.random_uniform(
[],
minval=0,
maxval=len(SYMMETRIES),
dtype=tf.int32,
seed=123)
def rotate(tensor):
# flipLeftRight
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 1) > 0,
tf.reverse(tensor, axis=[1]),
tensor)
# flipUpDown
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 2) > 0,
tf.reverse(tensor, axis=[2]),
tensor)
# flipDiagonal
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 4) > 0,
tf.transpose(tensor, perm=[0, 2, 1]),
tensor)
return tensor
# TODO(tommadams): use tf.ensure_shape instead of tf.assert_equal.
squares = go.N * go.N
assert_shape_pi = tf.assert_equal(pi.shape.as_list(), [squares + 1])
x_shape = x.shape.as_list()
assert_shape_x = tf.assert_equal(x_shape, [x_shape[0], go.N, go.N])
pi_move = tf.slice(pi, [0], [squares], name="slice_moves")
pi_pass = tf.slice(pi, [squares], [1], name="slice_pass")
# Add a dim so that x and pi have same shape: [num_features,N,N].
pi_n_by_n = tf.reshape(pi_move, [1, go.N, go.N])
with tf.control_dependencies([assert_shape_x, assert_shape_pi]):
pi_rot = tf.concat(
[tf.reshape(rotate(pi_n_by_n), [squares]), pi_pass],
axis=0)
return rotate(x), pi_rot
def apply_symmetry_dual(X0, y0, v0, num_symmetries=8):
"""
to transform on the fly, just need to use tensor ops
# return tf.repeat(X0, repeats=2, axis=0), tf.repeat(y0, repeats=2, axis=0), tf.repeat(v0, repeats=2, axis=0)
# return tf.experimental.numpy.rot90(X0, axes=(1, 2)), y0, v0
"""
Xs, ys, vs = [], [], []
for s in random.sample(SYMMETRIES, num_symmetries):
Xs.append(apply_symmetry_feat(s, X0))
ys.append(apply_symmetry_pi(s, y0))
vs.append(v0)
return np.stack(Xs), np.stack(ys), np.stack(vs)
|
python
|
import asyncio
import unittest
from unittest.mock import ANY
from aiobeanstalk.proto import Client
from aiobeanstalk.packets import Using, Inserted
def btalk_test(fun):
fun = asyncio.coroutine(fun)
def wrapper(self):
@asyncio.coroutine
def full_test():
cli = yield from Client.connect('localhost', 11300, loop=self.loop)
try:
yield from fun(self, cli)
finally:
cli.close()
self.loop.run_until_complete(full_test())
return wrapper
class TestCase(unittest.TestCase):
def setUp(self):
asyncio.set_event_loop(None)
self.loop = asyncio.new_event_loop()
@btalk_test
def testPut(self, btalk):
self.assertEqual((yield from btalk.send_command('use', 'test.q1')),
Using('test.q1'))
self.assertEqual((yield from btalk.send_command(
'put', 0, 0, 30,
body=b'hello world')),
Inserted(ANY))
|
python
|
import os.path
charmap = []
charmapDescription = []
if os.path.isfile('charmap.mif'):
charmapFile = open('charmap.mif', 'r+')
lines = charmapFile.readlines()
cont = 0
character = []
for line in lines:
if line[0] == " ":
newLine = line[-10:-2]
if cont % 8 == 0 and cont != 0:
charmap.append(character[:])
character.clear()
character.append(newLine[:])
cont += 1
if line[0] == '-':
newDescription = line[line.index(']')+1:-1]
charmapDescription.append(newDescription[:])
charmap.append(character[:])
charmapFile.close()
else:
charmap = [
#0
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#1
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#2
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#3
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#4
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#5
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#6
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#7
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#8
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#9
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#10
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#11
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#12
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#13
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#14
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#15
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#16
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#17
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#18
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#19
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#20
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#21
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#22
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#23
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#24
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#25
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#26
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#27
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#28
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#29
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#30
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#31
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#32 SPACE
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#33 !
[
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00000000",
"00011000",
"00000000"
],
#34 "
[
"01100110",
"01100110",
"01100110",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#35 #
[
"01100110",
"01100110",
"11111111",
"01100110",
"11111111",
"01100110",
"01100110",
"00000000"
],
#36 $
[
"00011000",
"00111110",
"01100000",
"00111100",
"00000110",
"01111100",
"00011000",
"00000000"
],
#37 %
[
"01100010",
"01100110",
"00001100",
"00011000",
"00110000",
"01100110",
"01000110",
"00000000"
],
#38 &
[
"00111100",
"01100110",
"00111100",
"00111000",
"01100111",
"01100110",
"00111111",
"00000000"
],
#39 '
[
"00000110",
"00001100",
"00011000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#40 (
[
"00001100",
"00011000",
"00110000",
"00110000",
"00110000",
"00011000",
"00001100",
"00000000"
],
#41 )
[
"00110000",
"00011000",
"00001100",
"00001100",
"00001100",
"00011000",
"00110000",
"00000000"
],
#42 *
[
"00000000",
"01100110",
"00111100",
"11111111",
"00111100",
"01100110",
"00000000",
"00000000"
],
#43 +
[
"00000000",
"00011000",
"00011000",
"01111110",
"00011000",
"00011000",
"00000000",
"00000000"
],
#44 ,
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00011000",
"00011000",
"00110000"
],
#45 -
[
"00000000",
"00000000",
"00000000",
"01111110",
"00000000",
"00000000",
"00000000",
"00000000"
],
#46 .
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00011000",
"00011000",
"00000000"
],
#47 /
[
"00000000",
"00000011",
"00000110",
"00001100",
"00011000",
"00110000",
"01100000",
"00000000"
],
#48 0
[
"00111100",
"01100110",
"01101110",
"01110110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#49 1
[
"00011000",
"00011000",
"00111000",
"00011000",
"00011000",
"00011000",
"01111110",
"00000000"
],
#50 2
[
"00111100",
"01100110",
"00000110",
"00001100",
"00110000",
"01100000",
"01111110",
"00000000"
],
#51 3
[
"00111100",
"01100110",
"00000110",
"00011100",
"00000110",
"01100110",
"00111100",
"00000000"
],
#52 4
[
"00000110",
"00001110",
"00011110",
"01100110",
"01111111",
"00000110",
"00000110",
"00000000"
],
#53 5
[
"01111110",
"01100000",
"01111100",
"00000110",
"00000110",
"01100110",
"00111100",
"00000000"
],
#54 6
[
"00111100",
"01100110",
"01100000",
"01111100",
"01100110",
"01100110",
"00111100",
"00000000"
],
#55 7
[
"01111110",
"01100110",
"00001100",
"00011000",
"00011000",
"00011000",
"00011000",
"00000000"
],
#56 8
[
"00111100",
"01100110",
"01100110",
"00111100",
"01100110",
"01100110",
"00111100",
"00000000"
],
#57 9
[
"00111100",
"01100110",
"01100110",
"00111110",
"00000110",
"01100110",
"00111100",
"00000000"
],
#58 :
[
"00000000",
"00011000",
"00011000",
"00000000",
"00000000",
"00011000",
"00011000",
"00000000"
],
#59 ;
[
"00000000",
"00011000",
"00011000",
"00000000",
"00011000",
"00110000",
"00000000",
"00000000"
],
#60 <
[
"00000000",
"00000110",
"00011000",
"01100000",
"01100000",
"00011000",
"00000110",
"00000000"
],
#61 =
[
"00000000",
"00000000",
"01111110",
"00000000",
"00000000",
"01111110",
"00000000",
"00000000"
],
#62 >
[
"00000000",
"01100000",
"00011000",
"00000110",
"00000110",
"00011000",
"01100000",
"00000000"
],
#63 ?
[
"00111100",
"01000010",
"00000010",
"00000100",
"00001000",
"00000000",
"00001000",
"00000000"
],
#64 @
[
"00111100",
"01100110",
"01101110",
"01101110",
"01100000",
"01100010",
"00111100",
"00000000"
],
#65 A
[
"00011000",
"00111100",
"01100110",
"01111110",
"01100110",
"01100110",
"01100110",
"00000000"
],
#66 B
[
"01111100",
"01100110",
"01100110",
"01111100",
"01100110",
"01100110",
"01111100",
"00000000"
],
#67 C
[
"00111100",
"01100110",
"01100000",
"01100000",
"01100000",
"01100110",
"00111100",
"00000000"
],
#68 D
[
"01111000",
"01101100",
"01100110",
"01100110",
"01100110",
"01101100",
"01111000",
"00000000"
],
#69 E
[
"01111110",
"01100000",
"01100000",
"01111000",
"01100000",
"01100000",
"01111110",
"00000000"
],
#70 F
[
"01111110",
"01100000",
"01100000",
"01111000",
"01100000",
"01100000",
"01100000",
"00000000"
],
#71 G
[
"00111100",
"01100110",
"01100000",
"01101110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#72 H
[
"01100110",
"01100110",
"01100110",
"01111110",
"01100110",
"01100110",
"01100110",
"00000000"
],
#73 I
[
"00111100",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00111100",
"00000000"
],
#74 J
[
"00011110",
"00001100",
"00001100",
"00001100",
"00001100",
"01101100",
"00111000",
"00000000"
],
#75 K
[
"01100110",
"01101100",
"01111000",
"01110000",
"01111000",
"01101100",
"01100110",
"00000000"
],
#76 L
[
"01100000",
"01100000",
"01100000",
"01100000",
"01100000",
"01100000",
"01111110",
"00000000"
],
#77 M
[
"01100011",
"01110111",
"01111111",
"01101011",
"01100011",
"01100011",
"01100011",
"00000000"
],
#78 N
[
"01100110",
"01110110",
"01111110",
"01111110",
"01101110",
"01100110",
"01100110",
"00000000"
],
#79 O
[
"00111100",
"01100110",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#80 P
[
"01111100",
"01100110",
"01100110",
"01111100",
"01100000",
"01100000",
"01100000",
"00000000"
],
#81 Q
[
"00111100",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00001110",
"00000000"
],
#82 R
[
"00111100",
"01100110",
"01100110",
"01111100",
"01111000",
"01101100",
"01100110",
"00000000"
],
#83 S
[
"00111100",
"01100110",
"01100000",
"00111100",
"00000110",
"01100110",
"00111100",
"00000000"
],
#84 T
[
"01111110",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00000000"
],
#85 U
[
"01100110",
"01100110",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#86 V
[
"01100110",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00011000",
"00000000"
],
#87 W
[
"01100011",
"01100011",
"01100011",
"01101011",
"01111111",
"01110111",
"01100011",
"00000000"
],
#88 X
[
"01100110",
"01100110",
"00111100",
"00011000",
"00111100",
"01100110",
"01100110",
"00000000"
],
#89 Y
[
"01100110",
"01100110",
"01100110",
"00111100",
"00011000",
"00011000",
"00011000",
"00000000"
],
#90 Z
[
"01111110",
"00000110",
"00001100",
"00011000",
"00110000",
"01100000",
"01111110",
"00000000"
],
#91 [
[
"00111100",
"00110000",
"00110000",
"00110000",
"00110000",
"00110000",
"00111100",
"00000000"
],
#92 \
[
"00000000",
"01100000",
"00110000",
"00011000",
"00001100",
"00000110",
"00000011",
"00000000"
],
#93 ]
[
"00111100",
"00001100",
"00001100",
"00001100",
"00001100",
"00001100",
"00111100",
"00000000"
],
#94 ^
[
"00011000",
"00111100",
"01100110",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#95 _
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"01111111",
"00000000"
],
#96 `
[
"00110000",
"00011000",
"00001100",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#97 a
[
"00000000",
"00000000",
"00111100",
"00000110",
"00111110",
"01100110",
"00111110",
"00000000"
],
#98 b
[
"00000000",
"01100000",
"01100000",
"01111100",
"01100110",
"01100110",
"01111100",
"00000000"
],
#99 c
[
"00000000",
"00000000",
"00111110",
"01100000",
"01100000",
"01100000",
"00111110",
"00000000"
],
#100 d
[
"00000000",
"00000110",
"00000110",
"00111110",
"01100110",
"01100110",
"00111110",
"00000000"
],
#101 e
[
"00000000",
"00000000",
"00111100",
"01100110",
"01111110",
"01100000",
"00111110",
"00000000"
],
#102 f
[
"00000000",
"00111100",
"01100000",
"01111000",
"01100000",
"01100000",
"01100000",
"00000000"
],
#103 g
[
"00000000",
"00000000",
"00111100",
"01100110",
"00111110",
"00000110",
"00111100",
"00000000"
],
#104 h
[
"00000000",
"01100000",
"01100000",
"01111100",
"01100110",
"01100110",
"01100110",
"00000000"
],
#105 i
[
"00000000",
"00011000",
"00000000",
"00111000",
"00011000",
"00011000",
"00111100",
"00000000"
],
#106 j
[
"00000000",
"00011000",
"00000000",
"00111000",
"00011000",
"00011000",
"01110000",
"00000000"
],
#107 k
[
"00000000",
"01100000",
"01100110",
"01101100",
"01111000",
"01101100",
"01100110",
"00000000"
],
#108 l
[
"00000000",
"01110000",
"00110000",
"00110000",
"00110000",
"00110000",
"00011100",
"00000000"
],
#109 m
[
"00000000",
"00000000",
"00110110",
"01101011",
"01100011",
"01100011",
"01100011",
"00000000"
],
#110 n
[
"00000000",
"00000000",
"00111100",
"01100110",
"01100110",
"01100110",
"01100110",
"00000000"
],
#111 o
[
"00000000",
"00000000",
"00111100",
"01100110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#112 p
[
"00000000",
"00000000",
"00111100",
"01100110",
"01111100",
"01100000",
"01100000",
"00000000"
],
#113 q
[
"00000000",
"00000000",
"00111100",
"01100110",
"00111110",
"00000110",
"00000110",
"00000000"
],
#114 r
[
"00000000",
"00000000",
"00111100",
"01100110",
"01100000",
"01100000",
"01100000",
"00000000"
],
#115 s
[
"00000000",
"00000000",
"00111100",
"01100000",
"00111100",
"00000110",
"00111100",
"00000000"
],
#116 t
[
"00000000",
"01100000",
"01100000",
"01111000",
"01100000",
"01100000",
"00111100",
"00000000"
],
#117 u
[
"00000000",
"00000000",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#118 v
[
"00000000",
"00000000",
"01100110",
"01100110",
"01100110",
"00111100",
"00011000",
"00000000"
],
#119 w
[
"00000000",
"00000000",
"01100011",
"01100011",
"01100011",
"01101011",
"00110110",
"00000000"
],
#120 x
[
"00000000",
"00000000",
"01100110",
"00111100",
"00011000",
"00111100",
"01100110",
"00000000"
],
#121 y
[
"00000000",
"00000000",
"01100110",
"00111100",
"00011000",
"00110000",
"01100000",
"00000000"
],
#122 z
[
"00000000",
"00000000",
"01111110",
"00001100",
"00011000",
"00110000",
"01111110",
"00000000"
],
#123 {
[
"00001100",
"00011000",
"00110000",
"11110000",
"00110000",
"00011000",
"00001100",
"00000000"
],
#124 |
[
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00000000"
],
#125 }
[
"00110000",
"00011000",
"00001100",
"00001111",
"00001100",
"00011000",
"00110000",
"00000000"
],
#126 ~
[
"00000000",
"00000000",
"00000000",
"01110110",
"11011100",
"00000000",
"00000000",
"00000000"
],
#127
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
]
]
charmapDescription = [
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"SPACE",
"!",
'"',
"#",
"$",
"%",
"&",
"'",
"(",
")",
"*",
"+",
",",
"-",
".",
"/",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
":",
";",
"<",
"=",
">",
"?",
"@",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"[",
"\\",
"]",
"^",
"_",
"`",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"{",
"|",
"}",
"~",
"ESC"
]
|
python
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
#helper methods to create a Junit file
import xml.etree.ElementTree as ET
import os
from warrior.Framework.Utils.print_Utils import print_debug
from warrior.Framework.Utils import file_Utils
from warrior.WarriorCore.Classes.html_results_class import WarriorHtmlResults
from warrior.WarriorCore.Classes.execution_summary_class import ExecutionSummary
class Junit(object):
"""Junit class"""
def __init__(self, filename, **kwargs):
"""constructor """
#self.junit_xslt = "{0}{1}Reporting{1}junit_to_html.xsl".format(Tools.__path__[0], os.sep)
self.junit_xslt = "{0}{1}Reporting{1}junit_to_html.xsl".format(os.getenv("WAR_TOOLS_DIR"), os.sep)
self.root = self.create_element("testsuites", tests="0", suites="0",
**self.init_arg(**kwargs))
self.filename = filename
properties = self.create_element("properties")
self.root.append(properties)
def init_arg(self, **kwargs):
"""
initialize the common attribute for an element
"""
default_keys = ["errors", "failures", "skipped", "time", "passes"]
result = {}
for default_key in default_keys:
result[default_key] = kwargs[default_key] if default_key in kwargs else "0"
for key, val in list(kwargs.items()):
result[key] = val
return result
def create_testsuite(self, location, **kwargs):
"""
Create a testsuite element
"""
testsuite = self.create_element("testsuite", tests="0", **self.init_arg(**kwargs))
properties = self.create_element("properties")
testsuite.append(properties)
properties.append(self.create_element("property", {"name": "location", "value": location}))
self.root.append(testsuite)
def create_testcase(self, location, timestamp, ts_timestamp, name,
classname="customTestsuite_independant_testcase_execution",
tag="testcase", **kwargs):
"""
Create a testcase element
"""
if self.root.find("testsuite") is None:
self.update_attr("timestamp", timestamp, "pj", "0")
self.create_testsuite(location=location, name=classname, timestamp=timestamp,
display='False', **self.init_arg(**kwargs))
for ts in self.root.findall("testsuite"):
if ts.get("timestamp") == ts_timestamp:
#create an element with name as in tag
tc = self.create_element(tag, classname=classname, timestamp=timestamp,
exceptions="0", keywords="0", name=name,
display='True', **self.init_arg(**kwargs))
ts.append(tc)
properties = self.create_element("properties")
tc.append(properties)
@classmethod
def create_element(cls, tagname="", attr=None, **kwargs):
"""create an xml element with given name and a dict of attribute"""
if attr is None:
attr = {}
elem = ET.Element(tagname)
for key, val in list(attr.items()):
elem.set(str(key), str(val))
for key, val in list(kwargs.items()):
elem.set(str(key), str(val))
return elem
def get_family_with_timestamp(self, timestamp):
""" Get case, suite & root element based on the timestamp value """
for testsuite in list(self.root):
for testcase in list(testsuite):
if testcase.get("timestamp") == timestamp:
return [testcase, testsuite, self.root]
def get_tc_with_timestamp(self, timestamp):
""" Get case element based on the timestamp value """
for testsuite in list(self.root):
for testcase in list(testsuite):
if testcase.get("timestamp") == timestamp:
return testcase
def get_ts_with_timestamp(self, timestamp):
""" Get suite element based on the timestamp value """
for testsuite in list(self.root):
if testsuite.get("timestamp") == timestamp:
return testsuite
def add_keyword_result(self, tc_timestamp, step_num, kw_name, status, kw_timestamp, duration,
resultfile, impact, onerror, desc="", info="", tc_name="",
tc_resultsdir=""):
"""form a keyword status dict with kw info and call function to build keyword elem"""
if str(status).lower() == "true":
status = "PASS"
elif str(status).lower() == "false":
status = "FAIL"
keyword_items = {"type": "keyword", 'display': 'True', "step": step_num,
"name": kw_name, "status": status, "timestamp": kw_timestamp,
"time": duration, "resultfile": resultfile,
"impact": impact, "onerror": onerror, "description": desc,
"info":info}
# if a failing status if encountered add a defects atribute to the keyword tag
# and its value is the path to the defects file.
failing_status = ['FAIL', 'EXCEPTION', 'ERROR']
if str(status).upper() in failing_status:
defects_dir = os.path.dirname(tc_resultsdir) + os.sep + 'Defects'
kw_resultfile_nameonly = file_Utils.getNameOnly(os.path.basename(resultfile))
defects_file = tc_name + "_" + kw_resultfile_nameonly + ".json"
defects_filepath = defects_dir + os.sep + defects_file
keyword_items['defects'] = defects_filepath
self.add_property(name=kw_name, value="KEYWORD_DISCARD", elem_type="kw",
timestamp=tc_timestamp, keyword_items=keyword_items)
def add_testcase_message(self, timestamp, status):
""" Add a message element for fail/error/skip cases """
elem = self.get_tc_with_timestamp(timestamp)
if elem is None:
elem = self.get_ts_with_timestamp(timestamp)
if str(status).lower() == "false":
elem.append(self.create_element("failure", {"message": "test failure"}))
elif str(status).lower() == "error":
elem.append(self.create_element("error", {}))
elif str(status).lower() == "skipped":
elem.append(self.create_element("skipped", {}))
def add_requirement(self, requirement, timestamp):
"""add a new requirement when called"""
self.get_ts_with_timestamp(timestamp).find("properties").append(self.create_element\
("property", {"name": "requirement", "value": requirement}))
def add_property(self, name, value, elem_type, timestamp, **kwargs):
"""add a new property to specific element when called
since steps are logged as property, need special handling to create kw item"""
if elem_type == "pj":
elem = self.root
elif elem_type == "ts":
elem = self.get_ts_with_timestamp(timestamp)
else:
elem = self.get_tc_with_timestamp(timestamp)
if elem_type == "kw":
item = self.create_element("property", kwargs["keyword_items"])
else:
item = self.create_element("property", {"name": name, "value": value})
elem.find("properties").append(item)
def add_jobid(self, jobid):
"""add a new requirement when called"""
self.root.append(self.create_element("property", {"name": "jobid", "value": jobid}))
def add_project_location(self, location):
"""add a new requirement when called"""
self.root.find("properties").append(self.create_element(
"property", {"name": "location", "value": location}))
self.root.append(self.create_element(
"property", {"name": "location", "value": location}))
def update_count(self, attr, value, elem_type, timestamp="0"):
"""
increase the value of an attribute based on
element type (project, testsuite or testcase) and timestamp
"""
if elem_type == "pj":
elem = self.root
elif elem_type == "ts":
elem = self.get_ts_with_timestamp(timestamp)
else:
elem = self.get_tc_with_timestamp(timestamp)
attr = str(attr).lower()
statuses = {"true": "passes", "false": "failures", "exception": "exceptions",
"error": "errors", "skip": "skipped"}
if attr in statuses:
attr = statuses[attr]
if elem.tag != "testcase" and attr == "exceptions":
attr = "errors"
if elem.get(attr) is not None:
elem.set(attr, str(int(elem.get(attr)) + int(value)))
def update_attr(self, attr, value, elem_type, timestamp=None):
"""
update the value of an attribute based on
element type (project, testsuite or testcase) and timestamp
special handling to create failure message for fail/exception status
"""
if elem_type == "pj":
elem = self.root
elif elem_type == "ts":
elem = self.get_ts_with_timestamp(timestamp)
else:
elem = self.get_tc_with_timestamp(timestamp)
if attr == "status":
if elem.tag == "testcase":
if attr == "false":
elem.append(self.create_element("failure", {"message": "test failure"}))
elif attr == "exception" or attr == "error":
elem.append(self.create_element("failure",
{"message": "errors/exceptions "\
"encountered during testcase execution"}))
if str(value).lower() == "true":
value = "PASS"
elif str(value).lower() == "false":
value = "FAIL"
elem.set(attr, value)
def _junit_to_html(self, junit_file, print_summary=True):
""" Convert junit file to html"""
if not hasattr(self, 'html_result_obj'):
self.html_result_obj = WarriorHtmlResults(junit_file)
self.html_result_obj.write_live_results(junit_file, None, print_summary)
def remove_html_obj(self):
"""checks and removes html_results_obj from junit object usecase in parralel execution"""
if hasattr(self, 'html_result_obj'):
del self.html_result_obj
def output_junit(self, path, print_summary=True):
"""output the actual file
copy xslt to the results folder
Print execution summary in console based on 'print_summary' value """
if print_summary is True:
fpath = path + os.sep + self.filename + "_junit.xml"
tree = ET.ElementTree(self.root)
tree.write(fpath)
summary_obj = ExecutionSummary(fpath)
summary_obj.print_result_in_console(fpath)
print_debug("\n")
if print_summary is True:
self._junit_to_html(fpath, print_summary)
def junit_output(self, path, print_summary=False):
"""output the actual file
copy xslt to the results folder """
fpath = path + os.sep + self.filename + "_junit.xml"
tree = ET.ElementTree(self.root)
tree.write(fpath)
self._junit_to_html(fpath, print_summary)
|
python
|
from flask import Blueprint, request, jsonify
from werkzeug import check_password_hash
from flask.ext.login import login_user, logout_user
from app.core import db
from app.api_decorators import requires_login, requires_keys
from app.models.user import User
blueprint = Blueprint('api_slash', __name__, url_prefix='/api')
@blueprint.route('/login/', methods=['POST'])
@requires_keys('email', 'password')
def login():
errors = []
json = request.get_json(force=True)
user = User.query.filter_by(email=json['email']).first()
if user is None:
errors.append('Invalid username/password combination.')
if not errors and not check_password_hash(user.password, json['password']):
errors.append('Invalid username/password combination.')
if not errors:
login_user(user, remember=False)
return jsonify(success=not errors, errors=errors)
@blueprint.route('/logout/', methods=['GET'])
@requires_login
def logout():
logout_user()
return jsonify(success=True)
@blueprint.route('/register/', methods=['POST'])
# Need to add challenge / response captcha stuff in later
# @requires_keys('email', 'password', 'confirm', 'challenge', 'response')
@requires_keys('email', 'name', 'password', 'confirm')
def register():
json = request.get_json(force=True)
errors = []
user_id = None
# captcha_result = submit(json['challenge'], json['response'],
# RECAPTCHA_PRIVATE_KEY, request.remote_addr)
# if not captcha_result.is_valid:
# errors.append('captcha: Validation failed.')
if not errors:
if User.query.filter_by(email=json['email']).first():
errors.append('An account already exists with this email.')
# Need better password requirements later
if len(json['password']) < 6:
errors.append('Password must be at least 6 characters long.')
if json['password'] != json['confirm']:
errors.append('Passwords do not match.')
if not errors:
user = User(json)
db.session.add(user)
db.session.commit()
user_id = user.id
login_user(user)
return jsonify(success=not errors, errors=errors, id=user_id)
|
python
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""environment variables template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.base_object
class envvars(hpccm.base_object):
"""Template for setting environment variables"""
def __init__(self, **kwargs):
"""Initialize template"""
super(envvars, self).__init__(**kwargs)
self.environment = kwargs.get('environment', True)
self.environment_variables = {}
# Use only if the runtime environment is incompatible with the
# non-runtime environment, e.g., PATH contains different
# values. Otherwise, try to use the filtering options.
self.runtime_environment_variables = {}
def environment_step(self, include_only=None, exclude=None, runtime=False):
"""Return dictionary of environment variables"""
if runtime:
e = self.runtime_environment_variables
else:
e = self.environment_variables
if self.environment:
if include_only:
return {x: e[x] for x in e if x in include_only}
elif exclude:
return {x: e[x] for x in e if x not in exclude}
else:
return e
else:
return {}
|
python
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import logging
import os
from google.cloud import firestore
from google.cloud import storage
# API clients
gcs = None
db = None
def analyze(data, context):
"""Function entry point, triggered by creation of an object in a GCS bucket.
The function reads the content of the triggering file, analyses its contents,
and persists the results of the analysis to a new Firestore document.
Args:
data (dict): The trigger event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
page_metrics = get_gcs_file_contents(data)
max_time_meaningful_paint = int(os.environ.get('MAX_TIME_MEANINGFUL_PAINT'))
analysis_result = analyze_metrics(data, page_metrics,
max_time_meaningful_paint)
docref = persist(analysis_result, data['name'])
logging.info('Created new Firestore document %s/%s describing analysis of %s',
docref.parent.id, docref.id, analysis_result['input_file'])
def get_gcs_file_contents(data):
"""Get the content of the GCS object that triggered this function."""
global gcs
if not gcs:
gcs = storage.Client()
bucket = gcs.get_bucket(data['bucket'])
blob = bucket.blob(data['name'])
return blob.download_as_string()
def persist(analysis_result, document_id):
"""Persist analysis results to the configured Firestore collection."""
global db
if not db:
db = firestore.Client()
collection_name = os.environ.get('METRICS_COLLECTION')
collection = db.collection(collection_name)
inserted = collection.add(analysis_result, document_id=document_id)
return inserted[1]
# [START parse-block]
def analyze_metrics(data, metrics, max_time_meaningful_paint):
"""Parse the page metrics and return a dict with details of the operation."""
calculated = parse_metrics(metrics)
gcs_filename = 'gs://{}/{}'.format(data['bucket'], data['name'])
parse_result = {
'metrics': calculated,
'input_file': gcs_filename,
'page_url': data['metadata']['pageUrl'],
'fetch_timestamp': data['timeCreated'],
'analysis_timestamp': datetime.utcnow().isoformat() + 'Z'
}
# check whether page performance is within threshold
time_meaningful_paint = calculated['FirstMeaningfulPaint']
if time_meaningful_paint > max_time_meaningful_paint:
logging.warning('FAILED: page load time (%d) exceeded max threshold (%d)',
time_meaningful_paint, max_time_meaningful_paint)
parse_result['status'] = 'FAIL'
else:
parse_result['status'] = 'PASS'
return parse_result
def parse_metrics(metrics_str):
metrics_obj = json.loads(metrics_str)
metrics = metrics_obj['metrics']
keys = [x['name'] for x in metrics]
values = [x['value'] for x in metrics]
kv = dict(zip(keys, values))
calculated = {
'DomContentLoaded': calc_event_time(kv, 'DomContentLoaded'),
'FirstMeaningfulPaint': calc_event_time(kv, 'FirstMeaningfulPaint'),
'JSHeapTotalSize': kv['JSHeapTotalSize'],
'JSHeapUsedSize': kv['JSHeapUsedSize']
}
return calculated
# [END parse-block]
def calc_event_time(metrics_kv, event_name):
return int((metrics_kv[event_name] - metrics_kv['NavigationStart']) * 1000)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class FaceSearchUserInfo(object):
def __init__(self):
self._customuserid = None
self._merchantid = None
self._merchantuid = None
self._score = None
@property
def customuserid(self):
return self._customuserid
@customuserid.setter
def customuserid(self, value):
self._customuserid = value
@property
def merchantid(self):
return self._merchantid
@merchantid.setter
def merchantid(self, value):
self._merchantid = value
@property
def merchantuid(self):
return self._merchantuid
@merchantuid.setter
def merchantuid(self, value):
self._merchantuid = value
@property
def score(self):
return self._score
@score.setter
def score(self, value):
self._score = value
def to_alipay_dict(self):
params = dict()
if self.customuserid:
if hasattr(self.customuserid, 'to_alipay_dict'):
params['customuserid'] = self.customuserid.to_alipay_dict()
else:
params['customuserid'] = self.customuserid
if self.merchantid:
if hasattr(self.merchantid, 'to_alipay_dict'):
params['merchantid'] = self.merchantid.to_alipay_dict()
else:
params['merchantid'] = self.merchantid
if self.merchantuid:
if hasattr(self.merchantuid, 'to_alipay_dict'):
params['merchantuid'] = self.merchantuid.to_alipay_dict()
else:
params['merchantuid'] = self.merchantuid
if self.score:
if hasattr(self.score, 'to_alipay_dict'):
params['score'] = self.score.to_alipay_dict()
else:
params['score'] = self.score
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FaceSearchUserInfo()
if 'customuserid' in d:
o.customuserid = d['customuserid']
if 'merchantid' in d:
o.merchantid = d['merchantid']
if 'merchantuid' in d:
o.merchantuid = d['merchantuid']
if 'score' in d:
o.score = d['score']
return o
|
python
|
import torch
import torch.nn as nn
class SLAF(nn.Module):
def __init__(self, k=2):
super().__init__()
self.k = k
self.coeff = nn.ParameterList(
[nn.Parameter(torch.tensor(1.0)) for i in range(k)])
def forward(self, x):
out = sum([self.coeff[k] * torch.pow(x, k) for k in range(self.k)])
return out
|
python
|
#from keras.models import Sequential, Model
#from keras.layers import Dense, Dropout, Flatten, Input
#from keras.layers import Conv2D, MaxPooling2D, Reshape, Concatenate
from keras.optimizers import Adam
#import tensorflow as tf
import numpy as np
import sys
import os
import cv2
import keras.backend as K
import math
if len(sys.argv) == 2:
dataset = sys.argv[1]
else:
print('usage: python3 test.py A(or B)')
exit()
print('dataset:', dataset)
train_path = './data/formatted_trainval/shanghaitech_part_' + dataset + '_patches_9/train/'
train_den_path = './data/formatted_trainval/shanghaitech_part_' + dataset + '_patches_9/train_den/'
val_path = './data/formatted_trainval/shanghaitech_part_' + dataset + '_patches_9/val/'
val_den_path = './data/formatted_trainval/shanghaitech_part_' + dataset + '_patches_9/val_den/'
img_path = './data/original/shanghaitech/part_' + dataset + '_final/test_data/images/'
den_path = './data/original/shanghaitech/part_' + dataset + '_final/test_data/ground_truth_csv/'
def data_pre_train():
print('loading data from dataset ', dataset, '...')
train_img_names = os.listdir(train_path)
img_num = len(train_img_names)
train_data = []
for i in range(img_num):
if i % 100 == 0:
print(i, '/', img_num)
name = train_img_names[i]
#print(name + '****************************')
img = cv2.imread(train_path + name, 0)
img = np.array(img)
img = (img - 127.5) / 128
#print(img.shape)
den = np.loadtxt(open(train_den_path + name[:-4] + '.csv'), delimiter = ",")
den_quarter = np.zeros((int(den.shape[0] / 4), int(den.shape[1] / 4)))
#print(den_quarter.shape)
for i in range(len(den_quarter)):
for j in range(len(den_quarter[0])):
for p in range(4):
for q in range(4):
den_quarter[i][j] += den[i * 4 + p][j * 4 + q]
train_data.append([img, den_quarter])
print('load data finished.')
return train_data
def data_pre_test():
print('loading test data from dataset', dataset, '...')
img_names = os.listdir(img_path)
img_num = len(img_names)
data = []
for i in range(img_num):
if i % 50 == 0:
print(i, '/', img_num)
name = 'IMG_' + str(i + 1) + '.jpg'
#print(name + '****************************')
img = cv2.imread(img_path + name, 0)
img = np.array(img)
img = (img - 127.5) / 128
#print(img.shape)
den = np.loadtxt(open(den_path + name[:-4] + '.csv'), delimiter = ",")
den_quarter = np.zeros((int(den.shape[0] / 4), int(den.shape[1] / 4)))
#print(den_quarter.shape)
for i in range(len(den_quarter)):
for j in range(len(den_quarter[0])):
for p in range(4):
for q in range(4):
den_quarter[i][j] += den[i * 4 + p][j * 4 + q]
#print(den.shape)
data.append([img, den_quarter])
print('load data finished.')
return data
data = data_pre_train()
data_test = data_pre_test()
np.random.shuffle(data)
x_train = []
y_train = []
for d in data:
x_train.append(np.reshape(d[0], (d[0].shape[0], d[0].shape[1], 1)))
y_train.append(np.reshape(d[1], (d[1].shape[0], d[1].shape[1], 1)))
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = []
y_test = []
for d in data_test:
x_test.append(np.reshape(d[0], (d[0].shape[0], d[0].shape[1], 1)))
y_test.append(np.reshape(d[1], (d[1].shape[0], d[1].shape[1], 1)))
x_test = np.array(x_test)
y_test = np.array(y_test)
def maaae(y_true, y_pred):
return abs(K.sum(y_true) - K.sum(y_pred))
def mssse(y_true, y_pred):
return (K.sum(y_true) - K.sum(y_pred)) * (K.sum(y_true) - K.sum(y_pred))
inputs = Input(shape = (None, None, 1))
conv_m = Conv2D(20, (7, 7), padding = 'same', activation = 'relu')(inputs)
conv_m = MaxPooling2D(pool_size = (2, 2))(conv_m)
conv_m = (conv_m)
conv_m = Conv2D(40, (5, 5), padding = 'same', activation = 'relu')(conv_m)
conv_m = MaxPooling2D(pool_size = (2, 2))(conv_m)
conv_m = Conv2D(20, (5, 5), padding = 'same', activation = 'relu')(conv_m)
conv_m = Conv2D(10, (5, 5), padding = 'same', activation = 'relu')(conv_m)
#conv_m = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_m)
conv_s = Conv2D(24, (5, 5), padding = 'same', activation = 'relu')(inputs)
conv_s = MaxPooling2D(pool_size = (2, 2))(conv_s)
conv_s = (conv_s)
conv_s = Conv2D(48, (3, 3), padding = 'same', activation = 'relu')(conv_s)
conv_s = MaxPooling2D(pool_size = (2, 2))(conv_s)
conv_s = Conv2D(24, (3, 3), padding = 'same', activation = 'relu')(conv_s)
conv_s = Conv2D(12, (3, 3), padding = 'same', activation = 'relu')(conv_s)
#conv_s = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_s)
conv_l = Conv2D(16, (9, 9), padding = 'same', activation = 'relu')(inputs)
conv_l = MaxPooling2D(pool_size = (2, 2))(conv_l)
conv_l = (conv_l)
conv_l = Conv2D(32, (7, 7), padding = 'same', activation = 'relu')(conv_l)
conv_l = MaxPooling2D(pool_size = (2, 2))(conv_l)
conv_l = Conv2D(16, (7, 7), padding = 'same', activation = 'relu')(conv_l)
conv_l = Conv2D(8, (7, 7), padding = 'same', activation = 'relu')(conv_l)
#conv_l = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_l)
conv_merge = Concatenate(axis = 3)([conv_m, conv_s, conv_l])
result = Conv2D(1, (1, 1), padding = 'same')(conv_merge)
'''
inputs = Input(shape = (None, None, 1))
conv_m = Conv2D(20, (7, 7), padding = 'same', activation = 'relu')(inputs)
conv_m = MaxPooling2D(pool_size = (2, 2))(conv_m)
conv_m = (conv_m)
conv_m = Conv2D(40, (5, 5), padding = 'same', activation = 'relu')(conv_m)
conv_m = MaxPooling2D(pool_size = (2, 2))(conv_m)
conv_m = Conv2D(20, (5, 5), padding = 'same', activation = 'relu')(conv_m)
conv_m = Conv2D(10, (5, 5), padding = 'same', activation = 'relu')(conv_m)
#conv_m = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_m)
conv_s = Conv2D(24, (5, 5), padding = 'same', activation = 'relu')(inputs)
conv_s = MaxPooling2D(pool_size = (2, 2))(conv_s)
conv_s = (conv_s)
conv_s = Conv2D(48, (3, 3), padding = 'same', activation = 'relu')(conv_s)
conv_s = MaxPooling2D(pool_size = (2, 2))(conv_s)
conv_s = Conv2D(24, (3, 3), padding = 'same', activation = 'relu')(conv_s)
conv_s = Conv2D(12, (3, 3), padding = 'same', activation = 'relu')(conv_s)
#conv_s = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_s)
conv_l = Conv2D(16, (9, 9), padding = 'same', activation = 'relu')(inputs)
conv_l = MaxPooling2D(pool_size = (2, 2))(conv_l)
conv_l = (conv_l)
conv_l = Conv2D(32, (7, 7), padding = 'same', activation = 'relu')(conv_l)
conv_l = MaxPooling2D(pool_size = (2, 2))(conv_l)
conv_l = Conv2D(16, (7, 7), padding = 'same', activation = 'relu')(conv_l)
conv_l = Conv2D(8, (7, 7), padding = 'same', activation = 'relu')(conv_l)
#conv_l = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_l)
conv_merge = Concatenate(axis = 3)([conv_m, conv_s, conv_l])
result = Conv2D(1, (1, 1), padding = 'same')(conv_merge)
'''
model = Model(inputs = inputs, outputs = result)
adam = Adam(lr = 1e-4)
model.compile(loss = 'mse', optimizer = adam, metrics = [maaae, mssse])
best_mae = 10000
best_mae_mse = 10000
best_mse = 10000
best_mse_mae = 10000
for i in range(200):
model.fit(x_train, y_train, epochs = 3, batch_size = 1, validation_split = 0.2)
score = model.evaluate(x_test, y_test, batch_size = 1)
score[2] = math.sqrt(score[2])
print(score)
if score[1] < best_mae:
best_mae = score[1]
best_mae_mse = score[2]
json_string = model.to_json()
open('model.json', 'w').write(json_string)
model.save_weights('weights.h5')
if score[2] < best_mse:
best_mse = score[2]
best_mse_mae = score[1]
print('best mae: ', best_mae, '(', best_mae_mse, ')')
print('best mse: ', '(', best_mse_mae, ')', best_mse)
|
python
|
import torch
import torchvision
def get_loader(root='.', batch_size=512):
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train_dataset = torchvision.datasets.CIFAR10(root, train=True,
download=True,
transform=transform)
test_dataset = torchvision.datasets.CIFAR10(root, train=False,
download=True,
transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=8,
drop_last=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
num_workers=8)
return (train_loader, test_loader)
|
python
|
# -*- coding: utf-8 -*-
from guillotina.factory import serialize # noqa
from guillotina.factory.app import make_app # noqa
from guillotina.factory.content import ApplicationRoot # noqa
from guillotina.factory.content import Database # noqa
from guillotina.factory import security # noqa
|
python
|
import typing
from kubernetes import client
from kubernetes import config
from kubernetes.client.rest import ApiException
from kuber import definitions
from kuber import versioning
def load_access_config(in_cluster: bool = False, **kwargs):
"""
Initializes the kubernetes library from either a kube configuration
file for external access or using mounted configuration data for
access from within a pod in the cluster.
:param in_cluster:
Whether or not to initialize access within the cluster or not. By
default the access will be loaded from a kube config file for
external access to a cluster.
:param kwargs:
Optional arguments to pass ot the external kube-config-based
initialization process.
"""
if in_cluster:
return config.load_incluster_config()
return config.load_kube_config(**kwargs)
def get_version_from_cluster(
fallback: typing.Union["versioning.KubernetesVersion", str] = None
) -> versioning.KubernetesVersion:
"""
Returns the KubernetesVersion object associated with the configured
cluster. If the cluster version cannot be determined, the specified
fallback version will be returned instead. If no fallback is specified
the earliest (oldest) version available in the kuber library installation
will be used instead.
"""
versions = versioning.get_all_versions()
default = fallback or versions[0]
if not isinstance(default, versioning.KubernetesVersion):
default = versioning.get_version_data(fallback)
try:
response: client.VersionInfo = client.VersionApi().get_code()
major = response.major
minor = response.minor.rstrip("+")
except ApiException:
return default
return next((v for v in versions if v.major == major and v.minor == minor), default)
def execute(
action: str,
resource: "definitions.Resource",
names: typing.List[str],
namespace: str = None,
api_client: client.ApiClient = None,
api_args: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["definitions.ExecutionResponse"]:
"""
Executes the specified action on the given resource object using
the kubernetes API client.
:param action:
The CRUD operation to carry out for the given resource.
:param resource:
Kuber resource on which to carry out the operation.
:param names:
Names of potential kubernetes python client functions that can be
called to carry out this operation.
:param namespace:
Kubernetes namespace in which this execution will take place.
:param api_client:
Kubernetes python client API connection to use when carrying out
the execution.
:param api_args:
Keyword arguments to pass through to the kubernetes python client
execution call.
"""
api = resource.get_resource_api(api_client=api_client)
name = next((n for n in names if hasattr(api, n)), None)
if name is None:
raise ValueError(
f"{action.capitalize()} function not found for resource "
f"{resource.__class__.__name__}"
)
func = getattr(api, name)
func_variables = func.__code__.co_varnames
args = (api_args or {}).copy()
ns = namespace or getattr(resource.metadata, "namespace", None)
if ns and "namespace" in func_variables:
args["namespace"] = ns
return typing.cast(
typing.Optional[definitions.ExecutionResponse],
getattr(api, name)(**args),
)
def to_camel_case(source: str) -> str:
"""Converts the specified source string from snake_case to camelCase."""
parts = source.split("_")
prefix = parts.pop(0)
suffix = "".join([p.capitalize() for p in parts])
return f"{prefix}{suffix}"
def to_kuber_dict(kube_api_entity: typing.Union[typing.Any, typing.Dict]) -> dict:
"""
Converts a Kubernetes client object, or serialized dictionary of
configuration values to the kuber representation, which enforces
camelCase and omits any keys with `None` values.
:param kube_api_entity:
Either a kubernetes Python client object or a dictionary that
contains keys and value for a kubernetes resource configuration.
"""
entity = kube_api_entity
if not isinstance(entity, dict):
entity = entity.to_dict()
return {to_camel_case(k): v for k, v in entity.items() if v is not None}
|
python
|
# coding: utf-8
from abc import ABCMeta
from config.config_loader import logger
from mall_spider.spiders.actions.action import Action
from mall_spider.spiders.actions.context import Context
class DefaultAction(Action):
__metaclass__ = ABCMeta
def on_error(self, context, exp):
task = context.get(Context.KEY_CURRENT_TASK, '')
good = context.get(Context.KEY_GOOD_DICT, dict())
task_id = None
data = None
if task:
task_id = task.id
data = task.raw_data
logger.error(u'context key:[%s],action:[%s],task_id:[%s],good:[%s],execute error,data:%s,exception:%s',
context.context_key, self.__class__.__name__, task_id, good, data, exp)
|
python
|
import pygame
from Player import PlayerBase
class Player2():
def __init__(self, image, speed = [0,0], pos = [0,0]):
self.image = pygame.image.load(image)
|
python
|
import theano.tensor as T
class Regularizer(object):
def __call__(self, **kwargs):
raise NotImplementedError
class L2Regularizer(Regularizer):
def __call__(self, alpha, params):
return alpha * l2_sqr(params) / 2.
def l2_sqr(params):
sqr = 0.0
for p in params:
sqr += T.sum((p ** 2))
return sqr
|
python
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
from __future__ import absolute_import, unicode_literals
# 3rd party imports
import pytest
from six import string_types
# project imports
from restible.url_params import from_string
@pytest.mark.parametrize('value,expected_type', (
('123', int),
('3.14159', float),
('value', string_types),
))
def test_coerces_to_the_right_type(value, expected_type):
result = from_string(value)
assert isinstance(result, expected_type)
|
python
|
from frangiclave.bot.templates.base import make_section, DIVIDER, URL_FORMAT
from frangiclave.compendium.deck import Deck
def make_deck(deck: Deck):
draw_messages = '\n'.join(f'• <https://www.frangiclave.net/element/{dm.element.element_id}/|{dm.element.element_id}>: {dm.message}' for dm in deck.draw_messages)
cards = '\n'.join(f'• <https://www.frangiclave.net/element/{card.element_id}/|{card.element_id}>' for card in deck.cards)
default_card = f'<https://www.frangiclave.net/element/{deck.default_card.element_id}/|{deck.default_card.element_id}>' if deck.default_card else 'None'
return [
make_section('*Deck: {}*'.format(URL_FORMAT.format('deck', deck.deck_id))),
DIVIDER,
make_section(
f'*_Label:_* {deck.label}\n'
f'*_Description:_* {deck.description}\n'
f'*_Draw Messages:_* \n{draw_messages}\n'
)
]
|
python
|
import collections
import logging
import re
import socket
import subprocess
def json_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = json_update(d.get(k, {}), v)
else:
d[k] = v
return d
def remove_dict_null(d: dict):
"""Remove `None` value in dictionary."""
return {k: v for k, v in d.items() if v is not None}
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except socket.error:
ip = '127.0.0.1'
finally:
s.close()
return ip
def get_device(device: str):
"""Get device (cuda and device order) from device name string.
Args:
device: Device name string.
Returns:
Tuple[bool, Optional[int]]: A tuple containing flag for CUDA device and CUDA device order. If the CUDA device
flag is `False`, the CUDA device order is `None`.
"""
# obtain device
device_num = None
if device == 'cpu':
cuda = False
else:
# match something like cuda, cuda:0, cuda:1
matched = re.match(r'^cuda(?::([0-9]+))?$', device)
if matched is None: # load with CPU
logging.warning('Wrong device specification, using `cpu`.')
cuda = False
else: # load with CUDA
cuda = True
device_num = int(matched.groups()[0])
if device_num is None:
device_num = 0
return cuda, device_num
def check_process_running(port: int):
args = ['lsof', '-t', f'-i:{port}']
try:
pid = int(subprocess.check_output(args, universal_newlines=True, text=True, stderr=subprocess.DEVNULL))
except subprocess.CalledProcessError:
# process not found
pid = None
return pid
|
python
|
# -*- coding: utf-8 -*-
import re
import scrapy
from locations.items import GeojsonPointItem
class GuzmanyGomezSpider(scrapy.Spider):
name = "guzmany_gomez"
item_attributes = {"brand": "Guzman Y Gomez"}
allowed_domains = ["guzmanygomez.com.au"]
start_urls = [
"https://www.guzmanygomez.com.au/wp-json/wpapi/v2/getall",
]
def parse(self, response):
data = response.json()
for i in data:
properties = {
"ref": i["OrderLink"],
"name": i["Name"],
"addr_full": i["Address1"],
"city": i["City"],
"state": i["State"],
"postcode": i["Postcode"],
"country": "AU",
"phone": i["Phone"],
"lat": i["Latitude"],
"lon": i["Longitude"],
}
yield GeojsonPointItem(**properties)
|
python
|
import os
import lab_test
def mean(list_a):
return sum(list_a) / len(list_a)
def create_md_file(path, bpp_mine, psnr_mine, ssim_mine, bpp_jpg, psnr_jpg, ssim_jpg):
os.system('mkdir -p {}'.format(path))
file_p = os.path.join(path,'res.md')
mdfile = open(file_p, 'w')
res = []
res.append('MyModel: mean bpp is {:.4f}, mean psnr is {:.4f}, mean ssim is {:.4f}\n'.format(mean(bpp_mine), mean(psnr_mine), mean(ssim_mine)))
res.append('JPEG: mean bpp is {:.4f}, mean psnr is {:.4f}, mean ssim is {:.4f}\n'.format(mean(bpp_jpg), mean(psnr_jpg), mean(ssim_jpg)))
res.append('|BPP_Mine |PSNR_Mine |SSIM_Mine |BPP_JPG |PSNR_JPG |SSIM_JPG |\n')
res.append('|----|----|----|----|-----|----|\n')
comb = zip(bpp_mine, psnr_mine, ssim_mine,bpp_jpg, psnr_jpg, ssim_jpg)
for i in range(len(psnr_mine)):
str = '|{:.4f} | {:.4f} | {:.4f} | {:.4f}| {:.4f} | {:.4f} | \n'.format(
bpp_mine[i], psnr_mine[i], ssim_mine[i], bpp_jpg[i], psnr_jpg[i], ssim_jpg[i]
)
res.append(str)
mdfile.writelines(res)
def process(model, version, args, run = True):
if run:
lab_test.test_kodak(version, model)
lab_test.test_jpg(int(args.jpg))
png_path = 'res/{}'.format(version)
jpg_path = 'jpg_res/{}'.format(args.jpg)
bpp_mine = lab_test.get_bpp('codes/{}'.format(version))
psnr_mine = lab_test.get_psnr(png_path)
ssim_mine = lab_test.get_ssim(png_path)
bpp_jpg = lab_test.get_bpp(jpg_path,jpeg=True)
psnr_jpg = lab_test.get_psnr(jpg_path,jpeg=True)
ssim_jpg = lab_test.get_ssim(jpg_path,jpeg=True)
save_path = 'report/{}'.format(version)
os.system('mkdir -p {}'.format(save_path))
create_md_file(save_path, bpp_mine, psnr_mine, ssim_mine, bpp_jpg, psnr_jpg, ssim_jpg)
def CABAC_res():
os.system('touch CABAC.md')
res1 = open('CABAC.txt','r')
size1 = res1.readlines()
res = []
res.append('|CABAC(kb) |Huffman(kb) |\n')
res.append('|----|----|\n')
i = 0
for x in size1:
i += 1
if i < 10:
n_id = '0' + str(i)
else:
n_id = str(i)
res.append('|{} |{:d} |\n'.format(x.strip('\n'), os.path.getsize('codes/entropy-1/{}.npz'.format(n_id))))
md_file = open('CABAC.md','w')
md_file.writelines(res)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model', '-m', required=True, type=str)
parser.add_argument('--version', '-v', required=True, type=str)
parser.add_argument('--jpg', '-j', required=True, type=str)
args = parser.parse_args()
process(args.model, args.version, args)
|
python
|
#! /usr/bin/env python
#coding: utf-8
######################################################################################
#Script for download and convert to fastq SRA datasets serially. #
#Authors: David Peris UW-Madison, Dept Genetics #
#Usage: python download_SRA_serially.py INPUT OUTPUTFOLDER YES/NO #
# #
#INPUT a SRA accession number or a text file with a list of SRAs #
#OUTPUTFOLDER the folder where your fastq will be saved #
#YES or NO if your input is a list or just an accession number #
######################################################################################
import sys,os
SRA_files = sys.argv[1]
output_folder = sys.argv[2]
list_file = sys.argv[3]
downloaded_path = '~/ncbi/public/sra/'
if list_file == "NO":
SRA_list = []
SRA_list.append(SRA_files)
else:
SRA_list = open(SRA_files)
def prefetch(SRA_file): #It is downloaded into the directory user/ncbi/public/sra/
cmdA = 'prefetch -v ' + SRA_file
return cmdA
def convert_fastq(SRA_file,output_folder):
cmdB = 'fastq-dump --outdir ' + output_folder
cmdB += ' --split-files ' + downloaded_path + SRA_file + '.sra'
return cmdB
for SRA_file in SRA_list:
SRA_file = SRA_file.strip()
os.system(prefetch(SRA_file))
os.system(convert_fastq(SRA_file,output_folder))
print "SRA files downloaded"
|
python
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
import itk
from sys import argv, stderr, exit
itk.auto_progress(2)
if len(argv) < 3:
print((
"Missing Parameters \n Usage: AntiAliasBinaryImageFilter"
" inputImage outputImage [RMS] [numberOfIterations]"), file=stderr)
exit(1)
inputFilename = argv[1]
outputFilename = argv[2]
maximumRMSError = 0.01
numberOfIterations = 50
if len(argv) > 3:
maximumRMSError = float(argv[3])
if len(argv) > 4:
numberOfIterations = int(argv[4])
CharPixelType = itk.UC
RealPixelType = itk.F
Dimension = 3
CharImageType = itk.Image[CharPixelType, Dimension]
RealImageType = itk.Image[RealPixelType, Dimension]
ReaderType = itk.ImageFileReader[CharImageType]
WriterType = itk.ImageFileWriter[CharImageType]
CastToRealFilterType = itk.CastImageFilter[CharImageType, RealImageType]
RescaleFilter = itk.RescaleIntensityImageFilter[RealImageType, CharImageType]
antiAliasFilter = itk.AntiAliasBinaryImageFilter[RealImageType, RealImageType]
antiAliasFilter = antiAliasFilter.New()
reader = ReaderType.New()
writer = WriterType.New()
toReal = CastToRealFilterType.New()
rescale = RescaleFilter.New()
reader.SetFileName(inputFilename)
writer.SetFileName(outputFilename)
rescale.SetOutputMinimum(0)
rescale.SetOutputMaximum(255)
toReal.SetInput(reader.GetOutput())
antiAliasFilter.SetInput(toReal.GetOutput())
antiAliasFilter.SetMaximumRMSError(maximumRMSError)
antiAliasFilter.SetNumberOfIterations(numberOfIterations)
antiAliasFilter.SetNumberOfLayers(2)
rescale.SetInput(antiAliasFilter.GetOutput())
writer.SetInput(rescale.GetOutput())
writer.Update()
|
python
|
from typing import Callable
def test_hello_default(hello: Callable[..., str]) -> None:
assert hello() == "Hello !"
def test_hello_name(hello: Callable[..., str], name: str) -> None:
assert hello(name) == "Hello {0}!".format(name)
|
python
|
# -*- coding: utf-8 -*-
"""
equip.analysis.python
~~~~~~~~~~~~~~~~~~~~~
Python related information for analysis.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
|
python
|
""" XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# MOD INFO
XFW_MOD_INFO = {
# mandatory
'VERSION': '0.9.19.0.1',
'URL': 'http://www.modxvm.com/',
'UPDATE_URL': 'http://www.modxvm.com/en/download-xvm/',
'GAME_VERSIONS': ['0.9.19.0.1'],
# optional
}
#####################################################################
# imports
import traceback
import sys
from math import degrees, pi
import BigWorld
import game
import gui.shared.tooltips.vehicle as tooltips_vehicle
from gun_rotation_shared import calcPitchLimitsFromDesc
from helpers import i18n
from gui import g_htmlTemplates
from gui.shared import g_eventBus
from gui.shared.formatters import text_styles
from gui.shared.tooltips import formatters
from gui.shared.gui_items import GUI_ITEM_TYPE
from gui.Scaleform.locale.MENU import MENU
from gui.shared.items_parameters import formatters as param_formatter
from gui.shared.items_parameters.formatters import measureUnitsForParameter
from gui.shared.items_parameters.params_helper import getParameters as getParameters_helper
from gui.shared.items_parameters.params_helper import idealCrewComparator as idealCrewComparator_helper
from gui.shared.utils.requesters.ItemsRequester import ItemsRequester
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.Scaleform.framework.ToolTip import ToolTip
from gui.Scaleform.daapi.view.battle.shared.consumables_panel import ConsumablesPanel
from gui.Scaleform.daapi.view.meta.ModuleInfoMeta import ModuleInfoMeta
from gui.shared.tooltips.module import ModuleBlockTooltipData
from helpers import dependency
from skeletons.gui.shared import IItemsCache
from xfw import *
import xvm_main.python.config as config
from xvm_main.python.consts import *
from xvm_main.python.logger import *
from xvm_main.python.vehinfo import _getRanges
from xvm_main.python.vehinfo_tiers import getTiers
from xvm_main.python.xvm import l10n
#####################################################################
# globals
shells_vehicles_compatibility = {}
carousel_tooltips_cache = {}
styles_templates = {}
toolTipDelayIntervalId = None
weightTooHeavy = False
p_replacement = None # will be something like <font size... color...>
#####################################################################
# initialization/finalization
def start():
g_eventBus.addListener(XVM_EVENT.CONFIG_LOADED, tooltips_clear_cache)
BigWorld.callback(0, start)
@registerEvent(game, 'fini')
def fini():
g_eventBus.removeListener(XVM_EVENT.CONFIG_LOADED, tooltips_clear_cache)
#####################################################################
# handlers
# tooltip delay to resolve performance issue
@overrideMethod(ToolTip, 'onCreateComplexTooltip')
def ToolTip_onCreateComplexTooltip(base, self, tooltipId, stateType):
# log('ToolTip_onCreateComplexTooltip')
_createTooltip(self, lambda:_onCreateComplexTooltip_callback(base, self, tooltipId, stateType))
# tooltip delay to resolve performance issue
# suppress carousel tooltips
@overrideMethod(ToolTip, 'onCreateTypedTooltip')
def ToolTip_onCreateTypedTooltip(base, self, type, *args):
# log('ToolTip_onCreateTypedTooltip')
try:
if type == TOOLTIPS_CONSTANTS.CAROUSEL_VEHICLE and config.get('hangar/carousel/suppressCarouselTooltips'):
return
except Exception as ex:
err(traceback.format_exc())
_createTooltip(self, lambda:_onCreateTypedTooltip_callback(base, self, type, *args))
# adds delay for tooltip appearance
def _createTooltip(self, func):
try:
global toolTipDelayIntervalId
self.xvm_hide()
tooltipDelay = config.get('tooltips/tooltipsDelay', 0.4)
toolTipDelayIntervalId = BigWorld.callback(tooltipDelay, func)
except Exception as ex:
err(traceback.format_exc())
def _onCreateTypedTooltip_callback(base, self, type, *args):
# log('ToolTip_onCreateTypedTooltip_callback')
global toolTipDelayIntervalId
toolTipDelayIntervalId = None
base(self, type, *args)
def _onCreateComplexTooltip_callback(base, self, tooltipId, stateType):
# log('_onCreateComplexTooltip_callback')
global toolTipDelayIntervalId
toolTipDelayIntervalId = None
base(self, tooltipId, stateType)
def _ToolTip_xvm_hide(self):
# log('_ToolTip_xvm_hide')
global toolTipDelayIntervalId
if toolTipDelayIntervalId is not None:
BigWorld.cancelCallback(toolTipDelayIntervalId)
toolTipDelayIntervalId = None
ToolTip.xvm_hide = _ToolTip_xvm_hide
#############################
# carousel events
@overrideMethod(tooltips_vehicle.VehicleInfoTooltipData, '_packBlocks')
def VehicleInfoTooltipData_packBlocks(base, self, *args, **kwargs):
result = base(self, *args, **kwargs)
result = [item for item in result if item.get('data', {}).get('blocksData')]
return result
@overrideMethod(tooltips_vehicle.SimplifiedStatsBlockConstructor, 'construct')
def SimplifiedStatsBlockConstructor_construct(base, self):
if config.get('tooltips/hideSimplifiedVehParams'):
return []
else:
return base(self)
@overrideMethod(tooltips_vehicle.AdditionalStatsBlockConstructor, 'construct')
def AdditionalStatsBlockConstructor_construct(base, self):
if config.get('tooltips/hideBottomText'):
return []
else:
return base(self)
@overrideMethod(text_styles, "_getStyle")
def text_styles_getStyle(base, style, ctx = None):
if ctx is None:
ctx = {}
try:
if style not in styles_templates:
template = g_htmlTemplates['html_templates:lobby/textStyle'][style].source
template_string = template if type(template) is str else template['text']
if "size='14'" in template_string and "face='$FieldFont'" in template_string:
template_string = template_string \
.replace("size='14'", "size='%s'" % config.get('tooltips/fontSize', 14)) \
.replace("face='$FieldFont'", "face='%s'" % config.get('tooltips/fontName', '$FieldFont'))
styles_templates[style] = template_string if type(template) is str else {'text': template_string}
if type(styles_templates[style]) is str:
return styles_templates[style]
else:
if ctx:
return styles_templates[style]['text'] % ctx
else:
return base(style, ctx)
except Exception as ex:
err(traceback.format_exc())
return base(style, ctx)
def tooltip_add_param(self, result, param0, param1):
result.append(formatters.packTextParameterBlockData(name=text_styles.main(param0), value=text_styles.stats(param1), valueWidth=107, padding=formatters.packPadding(left=self.leftPadding, right=self.rightPadding)))
def tooltip_with_units(value, units):
return '%s %s' % (value, text_styles.standard(units))
def getParameterValue(paramName):
return text_styles.main(i18n.makeString(MENU.tank_params(paramName))) + text_styles.standard(measureUnitsForParameter(paramName))
def formatNumber(value):
if value > 99:
value = round(value)
elif value > 9:
value = round(value, 1)
else:
value = round(value, 2)
return str(BigWorld.wg_getNiceNumberFormat(value))
# replace <h>text1 <p>text2</p></h> with: text1 text_styles.standard(text2)
def replace_p(text):
global p_replacement
if not p_replacement:
p_replacement = text_styles.standard('').split('>', 1)[0] + '>'
return text.replace('<p>', p_replacement).replace('</p>', '</font>').replace('<h>', '').replace('</h>', '')
# overriding tooltips for tanks in hangar, configuration in tooltips.xc
@overrideMethod(tooltips_vehicle.CommonStatsBlockConstructor, 'construct')
def CommonStatsBlockConstructor_construct(base, self):
try:
self.leftPadding = -15
vehicle = self.vehicle
cache_result = carousel_tooltips_cache.get(vehicle.intCD)
if cache_result:
return cache_result
result = []
if not config.get('tooltips/hideSimplifiedVehParams'):
result.append(formatters.packTitleDescBlock(text_styles.middleTitle(i18n.makeString(TOOLTIPS.TANKCARUSEL_MAINPROPERTY)), padding=formatters.packPadding(left=0, right=self.rightPadding, bottom=8)))
params = self.configuration.params
veh_descr = vehicle.descriptor
gun = vehicle.gun.descriptor
turret = vehicle.turret.descriptor
comparator = idealCrewComparator_helper(vehicle)
vehicleCommonParams = getParameters_helper(vehicle)
veh_type_inconfig = vehicle.type.replace('AT-SPG', 'TD')
clipGunInfoShown = False
premium_shells = {}
for shell in vehicle.shells:
premium_shells[shell.intCompactDescr] = shell.isPremium
if params:
values = config.get('tooltips/%s' % veh_type_inconfig)
if values and len(values):
params_list = values # overriding parameters
else:
params_list = self.PARAMS.get(vehicle.type, 'default') # original parameters
paramInfo = None
for paramName in params_list:
if paramName is None:
continue
if paramName == 'rateOfFire':
paramName = 'reloadTime'
elif paramName == 'traverseLimits':
paramName = 'gunYawLimits' if 'gunYawLimits' in vehicleCommonParams else 'turretYawLimits'
elif paramName == 'radioRange':
paramName = 'radioDistance'
elif paramName == 'reloadTimeSecs' and vehicle.gun.isClipGun():
paramName = 'clipFireRate'
elif paramName == 'turretRotationSpeed' and not vehicle.hasTurrets:
paramName = 'gunRotationSpeed'
if paramName in vehicleCommonParams:
paramInfo = comparator.getExtendedData(paramName)
if paramName == 'turretArmor' and not vehicle.hasTurrets:
continue
#maxHealth
elif paramName == 'maxHealth':
tooltip_add_param(self, result, i18n.makeString('#menu:vehicleInfo/params/maxHealth'), formatNumber(veh_descr.maxHealth))
#battle tiers
elif paramName == 'battleTiers':
(minTier, maxTier) = getTiers(vehicle.level, vehicle.type, vehicle.name)
tooltip_add_param(self, result, l10n('Battle tiers'), '%s..%s' % (minTier, maxTier))
#explosionRadius
elif paramName == 'explosionRadius':
explosionRadiusMin = 999
explosionRadiusMax = 0
for shot in gun['shots']:
if 'explosionRadius' in shot['shell']:
if shot['shell']['explosionRadius'] < explosionRadiusMin:
explosionRadiusMin = shot['shell']['explosionRadius']
if shot['shell']['explosionRadius'] > explosionRadiusMax:
explosionRadiusMax = shot['shell']['explosionRadius']
if explosionRadiusMax == 0: # no HE
continue
explosionRadius_str = formatNumber(explosionRadiusMin)
if explosionRadiusMin != explosionRadiusMax:
explosionRadius_str += '/%s' % gold_pad(formatNumber(explosionRadiusMax))
tooltip_add_param(self, result, getParameterValue(paramName), explosionRadius_str)
#shellSpeedSummary
elif paramName == 'shellSpeedSummary':
shellSpeedSummary_arr = []
for shot in gun['shots']:
shellSpeed_str = '%g' % round(shot['speed'] * 1.25)
if premium_shells[shot['shell']['compactDescr']]:
shellSpeed_str = gold_pad(shellSpeed_str)
shellSpeedSummary_arr.append(shellSpeed_str)
shellSpeedSummary_str = '/'.join(shellSpeedSummary_arr)
tooltip_add_param(self, result, tooltip_with_units(l10n('shellSpeed'), l10n('(m/sec)')), shellSpeedSummary_str)
#piercingPowerAvg
elif paramName == 'piercingPowerAvg':
piercingPowerAvg = formatNumber(veh_descr.shot['piercingPower'][0])
tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/avgPiercingPower')), piercingPowerAvg)
#piercingPowerAvgSummary
elif paramName == 'piercingPowerAvgSummary':
piercingPowerAvgSummary_arr = []
for shot in gun['shots']:
piercingPower_str = formatNumber(shot['piercingPower'][0])
if premium_shells[shot['shell']['compactDescr']]:
piercingPower_str = gold_pad(piercingPower_str)
piercingPowerAvgSummary_arr.append(piercingPower_str)
piercingPowerAvgSummary_str = '/'.join(piercingPowerAvgSummary_arr)
tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/avgPiercingPower')), piercingPowerAvgSummary_str)
#damageAvgSummary
elif paramName == 'damageAvgSummary':
damageAvgSummary_arr = []
for shot in gun['shots']:
damageAvg_str = formatNumber(shot['shell']['damage'][0])
if premium_shells[shot['shell']['compactDescr']]:
damageAvg_str = gold_pad(damageAvg_str)
damageAvgSummary_arr.append(damageAvg_str)
damageAvgSummary_str = '/'.join(damageAvgSummary_arr)
tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/avgDamage')), damageAvgSummary_str)
#magazine loading
# elif (paramName == 'reloadTimeSecs' or paramName == 'rateOfFire') and vehicle.gun.isClipGun():
# if clipGunInfoShown:
# continue
# (shellsCount, shellReloadingTime) = gun['clip']
# reloadMagazineTime = gun['reloadTime']
# shellReloadingTime_str = formatNumber(shellReloadingTime)
# reloadMagazineTime_str = formatNumber(reloadMagazineTime)
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/shellsCount')), shellsCount)
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/shellReloadingTime')), shellReloadingTime_str)
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/reloadMagazineTime')), reloadMagazineTime_str)
# clipGunInfoShown = True
#rate of fire
# elif paramName == 'rateOfFire' and not vehicle.gun.isClipGun():
# rateOfFire_str = formatNumber(60 / gun['reloadTime'])
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/reloadTime')), rateOfFire_str)
# gun traverse limits
# elif paramName == 'traverseLimits' and gun['turretYawLimits']:
# (traverseMin, traverseMax) = gun['turretYawLimits']
# traverseLimits_str = '%g..+%g' % (round(degrees(traverseMin)), round(degrees(traverseMax)))
# tooltip_add_param(self, result, l10n('traverseLimits'), traverseLimits_str)
# elevation limits (front)
# elif paramName == 'pitchLimits':
# (pitchMax, pitchMin) = calcPitchLimitsFromDesc(0, gun['pitchLimits'])
# pitchLimits_str = '%g..+%g' % (round(degrees(-pitchMin)), round(degrees(-pitchMax)))
# tooltip_add_param(self, result, l10n('pitchLimits'), pitchLimits_str)
# elevation limits (side)
elif paramName == 'pitchLimitsSide':
if gun['turretYawLimits'] and abs(degrees(gun['turretYawLimits'][0])) < 89: continue # can't look aside 90 degrees
(pitchMax, pitchMin) = calcPitchLimitsFromDesc(pi / 2, gun['pitchLimits'])
pitchLimits_str = '%g..+%g' % (round(degrees(-pitchMin)), round(degrees(-pitchMax)))
tooltip_add_param(self, result, l10n('pitchLimitsSide'), pitchLimits_str)
# elevation limits (rear)
elif paramName == 'pitchLimitsRear':
if gun['turretYawLimits']: continue # can't look back
(pitchMax, pitchMin) = calcPitchLimitsFromDesc(pi, gun['pitchLimits'])
pitchLimits_str = '%g..+%g' % (round(degrees(-pitchMin)), round(degrees(-pitchMax)))
tooltip_add_param(self, result, l10n('pitchLimitsRear'), pitchLimits_str)
# shooting range
elif paramName == 'shootingRadius':
viewRange, shellRadius, artiRadius = _getRanges(turret, gun, vehicle.nationName, vehicle.type)
if vehicle.type == 'SPG':
tooltip_add_param(self, result, tooltip_with_units(l10n('shootingRadius'), l10n('(m)')), artiRadius)
elif shellRadius < 707:
tooltip_add_param(self, result, tooltip_with_units(l10n('shootingRadius'), l10n('(m)')), shellRadius)
#reverse max speed
elif paramName == 'speedLimits':
(speedLimitForward, speedLimitReverse) = veh_descr.physics['speedLimits']
speedLimits_str = str(int(speedLimitForward * 3.6)) + '/' + str(int(speedLimitReverse * 3.6))
tooltip_add_param(self, result, getParameterValue(paramName), speedLimits_str)
#turret rotation speed
# elif paramName == 'turretRotationSpeed' or paramName == 'gunRotationSpeed':
# if not vehicle.hasTurrets:
# paramName = 'gunRotationSpeed'
# turretRotationSpeed_str = str(int(degrees(veh_descr.turret['rotationSpeed'])))
# tooltip_add_param(self, result, tooltip_with_units(i18n.makeString('#menu:tank_params/%s' % paramName).rstrip(), i18n.makeString('#menu:tank_params/gps')), turretRotationSpeed_str)
#terrain resistance
elif paramName == 'terrainResistance':
resistances_arr = []
for key in veh_descr.chassis['terrainResistance']:
resistances_arr.append(formatNumber(key))
terrainResistance_str = '/'.join(resistances_arr)
tooltip_add_param(self, result, l10n('terrainResistance'), terrainResistance_str)
#radioRange
# elif paramName == 'radioRange':
# radioRange_str = '%s' % int(vehicle.radio.descriptor['distance'])
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/radioDistance')), radioRange_str)
#gravity
elif paramName == 'gravity':
gravity_str = formatNumber(veh_descr.shot['gravity'])
tooltip_add_param(self, result, l10n('gravity'), gravity_str)
#inner name, for example - ussr:R100_SU122A
elif paramName == 'innerName':
tooltip_add_param(self, result, vehicle.name, '')
#custom text
elif paramName.startswith('TEXT:'):
customtext = paramName[5:]
tooltip_add_param(self, result, l10n(customtext), '')
elif paramInfo is not None and paramName in paramInfo.name:
valueStr = str(param_formatter.formatParameter(paramName, paramInfo.value))
tooltip_add_param(self, result, getParameterValue(paramName), valueStr)
if vehicle.isInInventory:
# optional devices icons, must be in the end
if 'optDevicesIcons' in params_list:
optDevicesIcons_arr = []
for key in vehicle.optDevices:
if key:
imgPath = 'img://gui' + key.icon.lstrip('.')
else:
imgPath = 'img://gui/maps/icons/artefact/empty.png'
optDevicesIcons_arr.append('<img src="%s" height="16" width="16">' % imgPath)
optDevicesIcons_str = ' '.join(optDevicesIcons_arr)
tooltip_add_param(self, result, optDevicesIcons_str, '')
# equipment icons, must be in the end
if 'equipmentIcons' in params_list:
equipmentIcons_arr = []
for key in vehicle.eqs:
if key:
imgPath = 'img://gui' + key.icon.lstrip('.')
else:
imgPath = 'img://gui/maps/icons/artefact/empty.png'
equipmentIcons_arr.append('<img src="%s" height="16" width="16">' % imgPath)
equipmentIcons_str = ' '.join(equipmentIcons_arr)
if config.get('tooltips/combineIcons') and optDevicesIcons_str:
tmp_list = []
tooltip_add_param(self, tmp_list, equipmentIcons_str, '')
result[-1]['data']['name'] += ' ' + tmp_list[0]['data']['name']
else:
tooltip_add_param(self, result, equipmentIcons_str, '')
# crew roles icons, must be in the end
if 'crewRolesIcons' in params_list:
imgPath = 'img://../mods/shared_resources/xvm/res/icons/tooltips/roles'
crewRolesIcons_arr = []
for tankman_role in vehicle.descriptor.type.crewRoles:
crewRolesIcons_arr.append('<img src="%s/%s.png" height="16" width="16">' % (imgPath, tankman_role[0]))
crewRolesIcons_str = ''.join(crewRolesIcons_arr)
tooltip_add_param(self, result, crewRolesIcons_str, '')
if (len(result) > 30) and config.get('tooltips/hideBottomText'): # limitation
result = result[:30]
elif (len(result) > 29) and not config.get('tooltips/hideBottomText'): # limitation
result = result[:29]
carousel_tooltips_cache[vehicle.intCD] = result
return result
except Exception as ex:
err(traceback.format_exc())
return base(self)
# in battle, add tooltip for HE shells - explosion radius
@overrideMethod(ConsumablesPanel, '_ConsumablesPanel__makeShellTooltip')
def ConsumablesPanel__makeShellTooltip(base, self, descriptor, piercingPower):
result = base(self, descriptor, piercingPower)
try:
if 'explosionRadius' in descriptor:
key_str = i18n.makeString('#menu:tank_params/explosionRadius')
result = result.replace('{/BODY}', '\n%s: %s{/BODY}' % (key_str, formatNumber(descriptor['explosionRadius'])))
except Exception as ex:
err(traceback.format_exc())
return result
# show compatible vehicles for shells info window in warehouse and shop
@overrideMethod(ModuleInfoMeta, 'as_setModuleInfoS')
def ModuleInfoMeta_as_setModuleInfoS(base, self, moduleInfo):
try:
if moduleInfo.get('type') == 'shell':
if not shells_vehicles_compatibility:
relate_shells_vehicles()
if self.moduleCompactDescr in shells_vehicles_compatibility:
moduleInfo['compatible'].append({'type': i18n.makeString('#menu:moduleInfo/compatible/vehicles'), 'value': ', '.join(shells_vehicles_compatibility[self.moduleCompactDescr])})
except Exception as ex:
err(traceback.format_exc())
base(self, moduleInfo)
# # add '#menu:moduleInfo/params/weightTooHeavy' (red 'weight (kg)')
# @overrideMethod(i18n, 'makeString')
# def makeString(base, key, *args, **kwargs):
# if key == '#menu:moduleInfo/params/weightTooHeavy':
# global weightTooHeavy
# if weightTooHeavy is None:
# weightTooHeavy = '<h>%s</h>' % red_pad(strip_html_tags(i18n.makeString('#menu:moduleInfo/params/weight'))) # localized red 'weight (kg)'
# return weightTooHeavy
# return base(key, *args, **kwargs)
##########################################################################
# paint 'weight (kg)' with red if module does not fit due to overweight
@overrideMethod(param_formatter, 'formatModuleParamName')
def formatters_formatModuleParamName(base, paramName):
builder = text_styles.builder()
if weightTooHeavy and paramName == 'weight':
builder.addStyledText(text_styles.error, MENU.moduleinfo_params(paramName))
builder.addStyledText(text_styles.error, param_formatter.MEASURE_UNITS.get(paramName, ''))
else:
builder.addStyledText(text_styles.main, MENU.moduleinfo_params(paramName))
builder.addStyledText(text_styles.standard, param_formatter.MEASURE_UNITS.get(paramName, ''))
return builder.render()
@overrideMethod(ModuleBlockTooltipData, '_packBlocks')
def ModuleBlockTooltipData_packBlocks(base, self, *args, **kwargs):
try:
global weightTooHeavy
module = self.context.buildItem(*args, **kwargs)
statusConfig = self.context.getStatusConfiguration(module)
vehicle = statusConfig.vehicle
slotIdx = statusConfig.slotIdx
if vehicle is not None:
isFit, reason = module.mayInstall(vehicle, slotIdx)
weightTooHeavy = not isFit and reason == 'too heavy'
except Exception as ex:
err(traceback.format_exc())
return base(self, *args, **kwargs)
#####################################################################
# Utility functions
def h1_pad(text):
return '<h1>%s</h1>' % text
def gold_pad(text):
return "<font color='%s'>%s</font>" % (config.get('tooltips/goldColor', '#FFC363'), text)
def red_pad(text):
return "<font color='#FF0000'>%s</font>" % text
# make dict: shells => compatible vehicles
def relate_shells_vehicles():
global shells_vehicles_compatibility
try:
shells_vehicles_compatibility = {}
itemsCache = dependency.instance(IItemsCache)
for vehicle in itemsCache.items.getVehicles().values():
if vehicle.name.find('_IGR') > 0 or vehicle.name.find('_training') > 0:
continue
for turrets in vehicle.descriptor.type.turrets:
for turret in turrets:
for gun in turret['guns']:
for shot in gun['shots']:
shell_id = shot['shell']['compactDescr']
if shell_id in shells_vehicles_compatibility:
if vehicle.userName not in shells_vehicles_compatibility[shell_id]:
shells_vehicles_compatibility[shell_id].append(vehicle.userName)
else:
shells_vehicles_compatibility[shell_id] = [vehicle.userName]
except Exception as ex:
err(traceback.format_exc())
shells_vehicles_compatibility = {}
@registerEvent(ItemsRequester, '_invalidateItems')
def ItemsRequester_invalidateItems(self, itemTypeID, uniqueIDs):
try:
if itemTypeID == GUI_ITEM_TYPE.VEHICLE:
for veh_id in uniqueIDs:
carousel_tooltips_cache[veh_id] = {}
except Exception as ex:
err(traceback.format_exc())
carousel_tooltips_cache.clear()
@registerEvent(ItemsRequester, 'clear')
def ItemsRequester_clear(*args, **kwargs):
tooltips_clear_cache(*args, **kwargs)
def tooltips_clear_cache(*args, **kwargs):
carousel_tooltips_cache.clear()
styles_templates.clear()
|
python
|
import numpy as np
import pandas as pd
import pytest
from dku_timeseries import WindowAggregator
from recipe_config_loading import get_windowing_params
@pytest.fixture
def columns():
class COLUMNS:
date = "Date"
category = "country"
aggregation = "value1_avg"
return COLUMNS
@pytest.fixture
def df(columns):
co2 = [315.58, 316.39, 316.79, 316.2]
country = ["first", "first", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="M")
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def long_df(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 345, 234, 100, 299]
country = ["first", "first", "first", "first", "second", "second", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def long_df_2(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10]
country = ["first", "first", "second", "second", "third", "third"]
country_2 = ["first", "first", "second", "second", "third", "third"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, columns.date: time_index})
return df
@pytest.fixture
def long_df_3(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10, 2, 3]
country = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_2 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_3 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M")).append(pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, "store": country_3, columns.date: time_index})
return df
@pytest.fixture
def long_df_4(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10, 2, 3]
country = ["first", "first", "second", "second", "third", "third", "first", "first"]
country_2 = ["first", "first", "second", "second", "third", "third", "second", "first"]
country_3 = ["first", "first", "second", "second", "third", "third", "third", "fourth"]
time_index = pd.date_range("1-1-2020", periods=2, freq="M").append(pd.date_range("1-1-2020", periods=2, freq="M")).append(
pd.date_range("1-1-2020", periods=2, freq="M")).append(pd.date_range("1-1-2020", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, "store": country_3, columns.date: time_index})
return df
@pytest.fixture
def long_df_numerical(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 345, 234, 100, 299]
country = [1, 1, 1, 1, 2, 2, 2, 2]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def recipe_config(columns):
config = {u'window_type': u'none', u'groupby_columns': [u'country'], u'closed_option': u'left', u'window_unit': u'days', u'window_width': 3,
u'causal_window': True, u'datetime_column': u'Date', u'advanced_activated': True, u'aggregation_types': [u'retrieve', u'average'],
u'gaussian_std': 1}
return config
@pytest.fixture
def params(recipe_config):
return get_windowing_params(recipe_config)
@pytest.fixture
def params_no_causal(recipe_config):
recipe_config["causal_window"] = False
return get_windowing_params(recipe_config)
class TestWindowingLongFormat:
def test_long_format(self, long_df, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = [columns.category]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(np.round(output_df[columns.aggregation].values, 2), np.array([np.nan, 315.58, 315.98, 316.25, np.nan, 345.,
289.5, 226.33]))
np.testing.assert_array_equal(output_df.country.values, np.array(['first', 'first', 'first', 'first', 'second', 'second', 'second', 'second']))
def test_two_identifiers(self, long_df_2, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = ["country", "item"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df_2, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df[datetime_column].values,
pd.DatetimeIndex(['1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000']))
def test_three_identifiers(self, long_df_3, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = ["country", "item", "store"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df_3, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df[datetime_column].values,
pd.DatetimeIndex(['1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000']))
def test_mix_identifiers(self, long_df_4, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = ["country", "item", "store"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df_4, datetime_column, groupby_columns=groupby_columns)
expected_dates = pd.DatetimeIndex(['2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000',
'2020-02-29T00:00:00.000000000', '2020-01-31T00:00:00.000000000',
'2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000',
'2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000'])
np.testing.assert_array_equal(output_df[datetime_column].values, expected_dates)
def test_empty_identifiers(self, df, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(df, datetime_column, groupby_columns=[])
assert output_df.shape == (4, 5)
output_df = window_aggregator.compute(df, datetime_column)
assert output_df.shape == (4, 5)
output_df = window_aggregator.compute(df, datetime_column, groupby_columns=None)
assert output_df.shape == (4, 5)
def test_long_format_no_causal(self, long_df, params_no_causal, recipe_config,columns):
window_aggregator = WindowAggregator(params_no_causal)
groupby_columns = ["country"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(np.round(output_df[columns.aggregation].values, 2), np.array([np.nan, 316.25, 316.46, np.nan, np.nan, 226.33,
211., np.nan]))
np.testing.assert_array_equal(output_df.country.values, np.array(['first', 'first', 'first', 'first', 'second', 'second', 'second', 'second']))
def test_long_format_numerical(self, long_df_numerical, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = ["country"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df_numerical, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df.country.values, np.array([1, 1, 1, 1, 2, 2, 2, 2]))
|
python
|
#!/usr/bin/env python
import pyinotify
import os, sys
import logging
import json
import thread, threading
import time, datetime
import hashlib
import mimetypes
import traceback
# google stuff
from ServiceProviders.Google import GoogleServiceProvider
from apiclient.http import BatchHttpRequest
from apiclient import errors
#logging stuff
class NotImplementedError(Exception):
"""mime.from_file(fp)
Generic Exception for Placeholder Functions
"""
class GenericEventHandler(pyinotify.ProcessEvent):
"""
define every possible event type here
overloads methods in parent class
"""
def process_IN_CREATE(self, event):
self.logger.info("-> Creating: %s" % event.name)
def process_IN_DELETE(self, event):
self.logger.info("-> Removing: %s" % event.name)
def process_default(self, event):
self.logger.info("->Unknown event: %s" % event.maskname)
class GoogleEventHandler(pyinotify.ProcessEvent):
"""
uploads to google drive
"""
def __init__(self, options_dict, watch_descriptors):
"""
options_dict contains all parameters necesary for
the GoogleServiceProvider.__init__() method.
"""
self.sp = GoogleServiceProvider(**options_dict)
self.credentials = self.sp.get_stored_credentials('testid')
self.service = self.sp.build_service(self.credentials)
self.http = self.service[0]
self.service = self.service[1]
self.options_dict = options_dict
for key, value in watch_descriptors[0].items():
if value == 1:
self.protected_dir = key
self.descriptors = watch_descriptors
self.descriptors_dict = {}
for desc in self.descriptors:
self.descriptors_dict.update(desc)
### logging stuff:
self.logger = logging.getLogger('main')
self.logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
self.ch = logging.StreamHandler()
self.ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename=options_dict['LOGFILE'])
# add formatter to ch
self.ch.setFormatter(formatter)
# add ch to logger
logging.addLevelName( logging.WARNING, "%s" % logging.getLevelName(logging.WARNING))
logging.addLevelName( logging.ERROR, "%s" % logging.getLevelName(logging.ERROR))
logging.addLevelName( logging.DEBUG, "%s" % logging.getLevelName(logging.DEBUG))
logging.addLevelName( logging.INFO, "%s" % logging.getLevelName(logging.INFO))
# we need this mutex for the files_dict dictionary
self.mutex = threading.Lock()
# this is by default the SyncThemAll folder on GoogleDrive
if self.options_dict['DEFAULT_PARENT'] != 'root':
self.default_pid = self.sp.query_entity(self.service,"title = '"+self.options_dict['DEFAULT_PARENT']+"'")[0]['id']
else:
self.default_pid = 'root'
# this will have to be loaded from json
## the structure of the json is:
"""
{
'folder_pathname' :
{
'files' : {
'file1': {'md5sum': None,
'ownId': None,
'parent':None,
'alreadyUploaded': False,
'alreadyUpdated': False,
'upLoadinProgress': False,
'progressBar': int,
'fullpath':None,
'fileBody': {},
'googleBody': {} } } },
'file2': {'md5sum': None,
'ownId': None,
'parent':None,
'alreadyUploaded': False,
'alreadyUpdated': False,
'upLoadinProgress': False,
'progressBar': int,
'fullpath':None,
'fileBody': {},
'googleBody': {} } } },
'file3': {'md5sum': None,
'ownId': None,
'parent':None,
'alreadyUploaded': False,
'alreadyUpdated': False,
'upLoadinProgress': False,
'progressBar': int,
'fullpath':None,
'fileBody': {},
'googleBody': {} } } }
},
'parent': None
'alreadyCreated': False,
'alreadyUpdated':False,
'grive': {'own_google_id':None, 'parent_google_id': None }
'folderBody': {}
'googleMetaData': {}
},
}
"""
self.jsonfile = self.options_dict['treefile']
self.files_dict = {}
if not os.path.exists(self.jsonfile):
self.files_dict.update(self.descriptors_dict.fromkeys(self.descriptors_dict.keys(),
{'files': {}, 'grive':{'own_google_id': None, 'parent_google_id': None}, 'folderBody':{}, 'googleMetaData':{} }))
else:
with open(self.jsonfile, 'r') as infile:
try:
self.files_dict = json.loads(infile.read())
infile.close()
except ValueError as e:
self.logger.info("Jsonfile %s not found or corrupted!\n Please remove, or stash it." % self.jsonfile)
self.syncthread = thread.start_new_thread(self._save_to_json, ())
self.filesyncthread = thread.start_new_thread(self._synchronize_files, ())
# [thread.start_new_thread(self._synchronize_files, ()) for i in range(10)]
def _save_to_json(self):
while True:
self.logger.info("%s save_to_json()" % datetime.datetime.now())
try:
# logging.debug("Opening %s" % self.jsonfile)
with open(self.jsonfile,'w') as outfile:
# locking stuff here
try:
json.dump(self.files_dict, outfile)
except Exception as e:
self.logger.info("%s" % e)
# release lock here
outfile.close()
except Exception as e:
tb = traceback.self.logger.info_exc()
t = (datetime.datetime.now(), tb, e)
self.logger.info("%s" % t)
time.sleep(10)
def _synchronize_files(self):
self.file_sp = GoogleServiceProvider(**self.options_dict)
self.file_credentials = self.file_sp.get_stored_credentials('testid')
self.file_service = self.file_sp.build_service(self.file_credentials)[1]
while True:
# self.logger.info("%s %s -> _synchronize_files() " % (datetime.datetime.now(), threading.current_thread()))
for (fullpath, directory, file_list) in os.walk(self.protected_dir):
try:
if fullpath not in self.files_dict.keys():
continue
for fisier in file_list:
fp = os.path.join(fullpath, fisier)
self.mutex.acquire()
if fisier not in self.files_dict[fullpath]['files']:
self.files_dict[fullpath]['files'][fisier] = {
'md5sum': hashlib.md5(open(fp).read()).hexdigest(),
'ownId': None,
'parent': fullpath,
'alreadyUploaded': False,
'alreadyUpdated': False,
'upLoadinProgress': False,
'progressBar': 0,
'fullpath': fp,
'fileBody': {
'title': fisier,
'description': fp,
'mimeType': mimetypes.guess_type(fp)[0] or 'text/plain',
'parents': [
{
"kind": "drive#parentReference",
"id": None,
}
],
},
'googleBody': {},
}
if self.files_dict[fullpath]['files'][fisier]['alreadyUploaded']:
self.mutex.release()
continue
if os.path.getsize(fp) == 0:
self.logger.info("%s is 0 bytes in size, skipping" % fp)
self.mutex.release()
continue
if self.files_dict[fullpath]['grive']['own_google_id']:
self.files_dict[fullpath]['files'][fisier]['fileBody']['parents'][0]['id'] = self.files_dict[fullpath]['grive']['own_google_id']
if self.files_dict[fullpath]['grive']['own_google_id'] is None and fullpath in self.descriptors[0]:
self.files_dict[fullpath]['files'][fisier]['fileBody']['parents'][0]['id'] = self.default_pid
self.mutex.release()
for retry in range(5):
try:
self.logger.debug("Uploading file: %s" % fisier)
googleReturnBody = self.file_sp.upload_file(fisier,
self.files_dict[fullpath]['files'][fisier]['fullpath'],
self.file_service,
self.files_dict[fullpath]['files'][fisier]['fileBody'])
break
except Exception as e:
self.logger.error("%s" % e)
traceback.print_exc()
if googleReturnBody:
try:
self.mutex.acquire()
self.files_dict[fullpath]['files'][fisier]['googleBody'] = googleReturnBody
self.files_dict[fullpath]['files'][fisier]['ownId'] = googleReturnBody['id']
self.files_dict[fullpath]['files'][fisier]['alreadyUploaded'] = True
self.logger.info("Successfully uploaded file: %s " % fp)
self.mutex.release()
except KeyError as e:
self.logger.info("File has already been deleted from the filesytem: %s" % e)
self.mutex.release()
continue
except IOError as e:
self.logger.info("File has already been deleted from the filesystem: %s " % e)
self.mutex.release()
continue
# finally:
# # if self.mutex._is_owned():
# self.mutex.release()
time.sleep(self.options_dict['FILE_SYNC_INTERVAL'])
def callb(request_id, response, exception):
"""
in case something went wrong, attempts to retransmit the batch request ( 5 times )
"""
t = (request_id, self.batch._requests, exception)
def upd():
self.files_dict[response['description']]['alreadyCreated'] = True
self.files_dict[response['description']]['grive']['own_google_id'] = response['id']
self.files_dict[response['description']]['googleMetaData'].update(response)
if exception is not None:
self.logger.info("Error occured during BatchHttpRequest %s" % (t,))
else:
self.mutex.acquire()
upd()
self.mutex.release()
self.batch = BatchHttpRequest(callback=callb)
def process_IN_CREATE(self, event):
"""
triggered by pyinotify when a file is created
it only updates FILES inside files_dict
"""
t = {'event.pathname': event.pathname,
'event.maskname': event.maskname,
'event.wd': event.wd,
'event.dir': event.dir }
self.logger.info("-> Creating: %s" % t)
parent = os.path.abspath(os.path.join(event.pathname, os.pardir))
folderbody = {'files': {},
'parent': parent,
'alreadyCreated': False,
'alreadyUpdated':False,
'grive': {'own_google_id':None, 'parent_google_id': None },
'folderBody': {
'title': os.path.basename(event.pathname),
'description': event.pathname,
'mimeType': 'application/vnd.google-apps.folder',
"parents": [{
"kind": "drive#parentReference",
"id": None,
}],
},
'googleMetaData': {}}
if event.dir:
# we populate the structure first
self.mutex.acquire()
try:
if self.files_dict[event.pathname]['alreadyCreated']:
self.mutex.release()
return 0
except KeyError as e:
self.files_dict[event.pathname] = folderbody
self.mutex.release()
# let's get the parent id
if parent != self.protected_dir and parent in self.files_dict.keys():
pid = self.files_dict[parent]['grive']['own_google_id']
else:
pid = None
if parent == self.protected_dir:
pid = self.default_pid
self.mutex.acquire()
# update structure first
self.files_dict[event.pathname]['grive']['parent_google_id'] = pid
self.files_dict[event.pathname]['folderBody']['parents'][0]['id'] = pid
self.mutex.release()
self.mutex.acquire()
own_id = self.sp.create_folder(self.service, self.files_dict[event.pathname]['folderBody'])
self.mutex.release()
if own_id:
self.mutex.acquire()
t = (own_id['id'], own_id['title'])
self.logger.info("Acquired own_id and title: %s" % (t,))
self.files_dict[event.pathname]['grive']['own_google_id'] = own_id['id']
self.files_dict[event.pathname]['googleMetaData'] = own_id
self.files_dict[event.pathname]['alreadyCreated'] = True
self.mutex.release()
def process_IN_DELETE(self, event):
t = {'event.pathname': event.pathname,
'event.maskname': event.maskname,
'event.wd': event.wd,
'event.dir': event.dir }
self.logger.info("-> Removing: %s" % t)
parent = os.path.abspath(os.path.join(event.pathname, os.pardir))
if event.dir:
self.mutex.acquire()
#if parent in self.files_dict.keys() and self.files_dict[event.pathname]['grive']['own_google_id']:
if self.files_dict[event.pathname]['grive']['own_google_id']:
for retry in range(5):
try:
self.service.files().delete(fileId=self.files_dict[event.pathname]['grive']['own_google_id']).execute()
except errors.HttpError as e:
self.logger.info("%s" % e)
continue
self.files_dict.pop(event.pathname)
self.mutex.release()
else:
if parent in self.files_dict.keys():
self.mutex.acquire()
try:
if self.files_dict[parent]['files'][os.path.basename(event.pathname)]['ownId']:
for retry in range(5):
try:
self.service.files().delete(fileId=self.files_dict[parent]['files'][os.path.basename(event.pathname)]['ownId']).execute()
break
except errors.HttpError as e:
self.logger.info("%s" % e)
continue
except KeyError as e:
self.mutex.release()
return 0 # parent folder has been deleted
try:
self.files_dict[parent]['files'].pop(os.path.basename(event.pathname))
except KeyError as e:
self.mutex.release()
return 0
self.mutex.release()
def process_IN_MODIFY(self, event):
"""
used when updating files
"""
t = {'event.pathname': event.pathname,
'event.maskname': event.maskname,
'event.wd': event.wd,
'event.dir': event.dir }
self.logger.info("-> Modified: %s" % t)
parent = os.path.abspath(os.path.join(event.pathname, os.pardir))
self.mutex.acquire()
if event.name not in self.files_dict[parent]['files']:
self.mutex.release()
return 0
try:
if not event.dir:
if hashlib.md5(open(event.pathname).read()).hexdigest() != self.files_dict[parent]['files'][event.name]['md5sum']:
self.files_dict[parent]['files'][event.name]['md5sum'] = hashlib.md5(open(event.pathname).read()).hexdigest()
updated_file = self.sp.update_file(self.service, event.pathname, self.files_dict[parent]['files'][event.name]['ownId'],
new_body=self.files_dict[parent]['files'][event.name]['fileBody'])
except (KeyError, IOError) as e:
self.mutex.release()
self.logger.info("Modify error: %s" % e)
return 0
self.mutex.release()
def __del__(self):
self.sp = None
self.credentials = None
self.service = None
self.logger.info("Shutting down %s" % self.__class__.__name__)
|
python
|
# CS4120 NLP, Northeastern University 2020
import spacy
from tqdm import tqdm
from spacy.analysis import Token, Doc, Span
from data_management import output_filepath, input_filepath
def main():
nlp = spacy.load("en_core_web_sm")
docs = []
with open(input_filepath("samplesentences.txt")) as f:
for line in tqdm(f, desc="Parsing dataset"):
if line.isspace():
# skip blank lines
continue
else:
doc: Doc = nlp(line)
docs.append(doc)
with open(input_filepath("training_tags_out.txt"), "w") as f:
for doc in docs: # type: Doc
def token_info_string(token: Token):
return f"{token.tag_}/{token.ent_type_}"
f.write(" ".join([token_info_string(token) for token in doc]))
f.write("\n")
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
"""Tests for CommandChainDispatcher."""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.core.error import TryNext
from IPython.core.hooks import CommandChainDispatcher
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# Define two classes, one which succeeds and one which raises TryNext. Each
# sets the attribute `called` to True when it is called.
class Okay(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
return self.message
class Fail(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
raise TryNext(self.message)
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_command_chain_dispatcher_ff():
"""Test two failing hooks"""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
dp = CommandChainDispatcher([(0, fail1),
(10, fail2)])
try:
dp()
except TryNext as e:
nt.assert_equal(str(e), u'fail2')
else:
assert False, "Expected exception was not raised."
nt.assert_true(fail1.called)
nt.assert_true(fail2.called)
def test_command_chain_dispatcher_fofo():
"""Test a mixture of failing and succeeding hooks."""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
okay1 = Okay(u'okay1')
okay2 = Okay(u'okay2')
dp = CommandChainDispatcher([(0, fail1),
# (5, okay1), # add this later
(10, fail2),
(15, okay2)])
dp.add(okay1, 5)
nt.assert_equal(dp(), u'okay1')
nt.assert_true(fail1.called)
nt.assert_true(okay1.called)
nt.assert_false(fail2.called)
nt.assert_false(okay2.called)
def test_command_chain_dispatcher_eq_priority():
okay1 = Okay(u'okay1')
okay2 = Okay(u'okay2')
dp = CommandChainDispatcher([(1, okay1)])
dp.add(okay2, 1)
|
python
|
from datetime import date
import uuid
from typing import Optional, List
from pydantic import BaseModel, Field
def generate_invoice_id():
return str(uuid.uuid4())
class InvoiceInfo(BaseModel):
invoice_id: str = Field(default_factory=generate_invoice_id)
issuer_name: str
issuer_address: Optional[str]
recipient_name: Optional[str]
document_date: Optional[date]
payment_date: Optional[date]
due_date: Optional[date]
currency: Optional[str]
amount_total: float
amount_paid: Optional[float]
amount_tax: Optional[float]
amount_due: Optional[float]
amount_sum: Optional[float]
num_items: Optional[int]
class Config:
orm_mode = True
class InvoiceItem(BaseModel):
invoice_id: str
item_name: str
sub_total: float
class Config:
orm_mode = True
class Invoice(BaseModel):
info: InvoiceInfo
items: List[InvoiceItem]
|
python
|
# Generated by Django 3.0.7 on 2020-07-17 15:52
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0010_auto_20200717_1735'),
]
operations = [
migrations.AlterField(
model_name='problem',
name='create_sql',
field=models.CharField(blank=True, max_length=20000, null=True),
),
migrations.AlterField(
model_name='problem',
name='insert_sql',
field=models.CharField(blank=True, max_length=20000, null=True),
),
migrations.AlterField(
model_name='problem',
name='text_md',
field=models.CharField(blank=True, max_length=5000),
),
migrations.AlterField(
model_name='problem',
name='title_md',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='selectproblem',
name='solution',
field=models.CharField(blank=True, max_length=5000, validators=[django.core.validators.MinLengthValidator(1)]),
),
]
|
python
|
import sys
import numpy as np
import matplotlib.pyplot as plt
# Load data
#bf = np.loadtxt('data/times/brute_force.txt')
#cp = np.loadtxt('data/times/closest_pair.txt')
bf = np.loadtxt(sys.argv[1])
cp = np.loadtxt(sys.argv[2])
# Reshape data
bf = bf.reshape(6, len(bf) // 6)
cp = cp.reshape(6, len(cp) // 6)
# Average times
bf_mean = np.mean(bf[:,1:], axis=1)
cp_mean = np.mean(cp[:,1:], axis=1)
# N sims
N = np.array([10**i for i in range(1, 7)])
# Plots
plt.plot(N, bf_mean, "r-x", label="Brute Force")
plt.plot(N, cp_mean, "b-o", label="Closest Pair")
plt.plot(N,1e-8*N**2, "r-.", label=r"$O(n^2)$")
plt.plot(N,1.5e-7*N*np.log(N)/np.log(2), "b-.", label=r"$O(n\log_2n)$")
plt.xscale('log')
plt.yscale('log')
plt.xlabel("N Aircrafts")
plt.ylabel("Time [s]")
plt.grid(True)
plt.legend()
plt.show()
|
python
|
from core.testing import APITestCase
class TestAPITestCase(APITestCase):
def test_tests(self):
self.assertTrue(hasattr(self, 'pytestmark'))
self.assertTrue(hasattr(self, 'mixer'))
|
python
|
# NOT FINISHED, barely started
import copy
import time
import random
import math
from typing import List
import jax.numpy as np
from pomdp_py.framework.basics import Action, Agent, POMDP, State, Observation,\
ObservationModel, TransitionModel, GenerativeDistribution, PolicyModel
from pomdp_py.framework.planner import Planner
from pomdp_py.representations.distribution.particles import Particles
from pomdp_py.algorithms.po_uct import VNode, RootVNode, QNode, POUCT, RandomRollout
from pomdp_py.algorithms.pomcp import VNodeParticles, RootVNodeParticles, POMCP
def particle_reinvigoration(particles, num_particles, state_transform_func=None):
"""Note that particles should contain states that have already made
the transition as a result of the real action. Therefore, they simply
form part of the reinvigorated particles. At least maintain `num_particles`
number of particles. If already have more, then it's ok.
"""
# If not enough particles, introduce artificial noise to existing particles (reinvigoration)
new_particles = copy.deepcopy(particles)
if len(new_particles) == 0:
raise ValueError("Particle deprivation.")
if len(new_particles) > num_particles:
return new_particles
print("Particle reinvigoration for %d particles" % (num_particles - len(new_particles)))
while len(new_particles) < num_particles:
# need to make a copy otherwise the transform affects states in 'particles'
next_state = copy.deepcopy(particles.random())
# Add artificial noise
if state_transform_func is not None:
next_state = state_transform_func(next_state)
new_particles.add(next_state)
return new_particles
def update_particles_belief(
current_particles,
real_action,
real_observation=None,
observation_model=None,
transition_model=None,
blackbox_model=None,
state_transform_func=None,
):
"""
update_particles_belief(Particles current_particles,
Action real_action, Observation real_observation=None,
ObservationModel observation_model=None,
TransitionModel transition_model=None,
BlackboxModel blackbox_model=None,
state_transform_func=None)
This is the second case (update particles belief explicitly); Either
BlackboxModel is not None, or TransitionModel and ObservationModel are not
None. Note that you DON'T need to call this function if you are using POMCP.
|TODO: not tested|
Args:
state_transform_func (State->State) is used to add artificial noise to
the reinvigorated particles.
"""
for particle in current_particles.particles:
# particle represents a state
if blackbox_model is not None:
# We're using a blackbox generator; (s',o,r) ~ G(s,a)
result = blackbox_model.sample(particle, real_action)
next_state = result[0]
observation = result[1]
else:
# We're using explicit models
next_state = transition_model.sample(particle, real_action)
observation = observation_model.sample(next_state, real_action)
# If observation matches real, then the next_state is accepted
if observation == real_observation:
filtered_particles.append(next_state)
# Particle reinvigoration
return particle_reinvigoration(Particles(filtered_particles), len(current_particles.particles),
state_transform_func=state_transform_func)
def sample_explicit_models(T, O, R, state, action, discount_factor=1.):
# states, actions: batch, returns next_state, reward: batch
next_state = T.sample(state, action)
reward = R.sample(state, action, next_state)
nsteps = 1
if O is not None:
observation = O.sample(next_state, action)
return next_state, observation, reward, nsteps
else:
return next_state, reward, nsteps
class ParticlesJax(Particles):
# represents a belief / distribution over states
def __init__(self, values: List[State], weights: np.ndarray):
self._values = values # used to convert from integer to State
self._weights = weights # can be unnormalized, i.e. counts
def add(self, particle, weight=1):
# not sure we want to use this API
self._weights = self._weights.at[particle].add(weight)
#self._values.index(particle)
#if isinstance(particle, State)
#else particle
#].add(weight)
class PomcpJax(POMCP):
"""POMCP is POUCT + particle belief representation.
This POMCP version only works for problems
with action space that can be enumerated."""
def __init__(self,
max_depth=5, planning_time=-1., num_sims=-1,
discount_factor=0.9, exploration_const=math.sqrt(2),
num_visits_init=0, value_init=0,
rollout_policy=RandomRollout(), action_prior=None,
show_progress=False, pbar_update_interval=5):
super().__init__(max_depth=max_depth,
planning_time=planning_time,
num_sims=num_sims,
discount_factor=discount_factor,
exploration_const=exploration_const,
num_visits_init=num_visits_init,
value_init=value_init,
rollout_policy=rollout_policy,
action_prior=action_prior,
show_progress=show_progress,
pbar_update_interval=pbar_update_interval)
# TODO: can remove all when convert to cython
#self._show_progress = show_progress
def plan(self, agent):
# Only works if the agent's belief is particles
if not isinstance(agent.belief, ParticlesJax):
raise TypeError("Agent's belief is not represented in particles.\n"\
"POMCP not usable. Please convert it to particles.")
return POUCT.plan(self, agent)
def update(self, agent, real_action, real_observation,
state_transform_func=None):
"""
Assume that the agent's history has been updated after taking real_action
and receiving real_observation.
`state_transform_func`: Used to add artificial transform to states during
particle reinvigoration. Signature: s -> s_transformed
"""
if not isinstance(agent.belief, ParticlesJax):
raise TypeError("agent's belief is not represented in particles.\n"\
"POMCP not usable. Please convert it to particles.")
if not hasattr(agent, "tree"):
print("Warning: agent does not have tree. Have you planned yet?")
return
if agent.tree[real_action][real_observation] is None:
# Never anticipated the real_observation. No reinvigoration can happen.
raise ValueError("Particle deprivation.")
# Update the tree; Reinvigorate the tree's belief and use it
# as the updated belief for the agent.
agent.tree = RootVNodeParticles.from_vnode(agent.tree[real_action][real_observation],
agent.history)
tree_belief = agent.tree.belief
agent.set_belief(particle_reinvigoration(
tree_belief,
len(agent.init_belief.particles),
state_transform_func=state_transform_func))
# If observation was never encountered in simulation, then tree will be None;
# particle reinvigoration will occur.
if agent.tree is not None:
agent.tree.belief = copy.deepcopy(agent.belief)
def _search(self):
if self._show_progress:
if stop_by_sims:
total = int(self._num_sims)
else:
total = self._planning_time
pbar = tqdm(total=total)
start_time = time.time()
while True:
## Note: the tree node with () history will have
## the init belief given to the agent.
state = self._agent.sample_belief()
self._simulate(state, self._agent.history, self._agent.tree,
None, None, 0)
sims_count +=1
time_taken = time.time() - start_time
if self._show_progress and sims_count % self._pbar_update_interval == 0:
if stop_by_sims:
pbar.n = sims_count
else:
pbar.n = time_taken
pbar.refresh()
if stop_by_sims:
if sims_count >= self._num_sims:
break
else:
if time_taken > self._planning_time:
if self._show_progress:
pbar.n = self._planning_time
pbar.refresh()
break
if self._show_progress:
pbar.close()
best_action = self._agent.tree.argmax()
return best_action, time_taken, sims_count
def _simulate(self,
state, history, root, parent,
observation, depth):
if depth > self._max_depth:
return 0
if root is None:
if self._agent.tree is None:
root = self._VNode(agent=self._agent, root=True)
self._agent.tree = root
if self._agent.tree.history != self._agent.history:
raise ValueError("Unable to plan for the given history.")
else:
root = self._VNode()
if parent is not None:
parent[observation] = root
self._expand_vnode(root, history, state=state)
rollout_reward = self._rollout(state, history, root, depth)
return rollout_reward
action = self._ucb(root)
next_state, observation, reward, nsteps = sample_generative_model(self._agent, state, action)
if nsteps == 0:
# This indicates the provided action didn't lead to transition
# Perhaps the action is not allowed to be performed for the given state
# (for example, the state is not in the initiation set of the option,
# or the state is a terminal state)
return reward
total_reward = reward + (self._discount_factor**nsteps)*self._simulate(
next_state,
history + ((action, observation),),
root[action][observation],
root[action],
observation,
depth+nsteps)
root.num_visits += 1
root[action].num_visits += 1
root[action].value = root[action].value + (total_reward - root[action].value) / (root[action].num_visits)
# POMCP simulate, need to update belief as well
if depth == 1 and root is not None:
root.belief.add(state) # belief update happens as simulation goes.
return total_reward
def _rollout(self, state, history, root, depth):
while depth < self._max_depth:
action = self._rollout_policy.rollout(state, history)
next_state, observation, reward, nsteps = sample_generative_model(self._agent, state, action)
history = history + ((action, observation),)
depth += nsteps
total_discounted_reward += reward * discount
discount *= (self._discount_factor**nsteps)
state = next_state
return total_discounted_reward
def _ucb(self, root):
"""UCB1"""
best_action, best_value = None, float('-inf')
for action in root.children:
if root[action].num_visits == 0:
val = float('inf')
else:
val = root[action].value + \
self._exploration_const * math.sqrt(math.log(root.num_visits + 1) / root[action].num_visits)
if val > best_value:
best_action = action
best_value = val
return best_action
def _sample_generative_model(self, state, action):
'''
(s', o, r) ~ G(s, a)
'''
if self._agent.transition_model is None:
next_state, observation, reward = self._agent.generative_model.sample(state, action)
else:
next_state = self._agent.transition_model.sample(state, action)
observation = self._agent.observation_model.sample(next_state, action)
reward = self._agent.reward_model.sample(state, action, next_state)
return next_state, observation, reward
def _VNode(self, agent=None, root=False, **kwargs):
"""Returns a VNode with default values; The function naming makes it clear
that this function is about creating a VNode object."""
if root:
# agent cannot be None.
return RootVNodeParticles(self._num_visits_init,
agent.history,
belief=copy.deepcopy(agent.belief))
else:
if agent is None:
return VNodeParticles(self._num_visits_init,
belief=Particles([]))
else:
return VNodeParticles(self._num_visits_init,
belief=copy.deepcopy(agent.belief))
|
python
|
# Generated by Django 3.2.3 on 2021-11-11 14:04
from django.db import migrations, models
import django.db.models.deletion
def copy_funding_instruments_from_calls_to_projects(apps, schema_editor):
Project = apps.get_model('project_core', 'Project')
for project in Project.objects.all():
project.funding_instrument = project.call.funding_instrument
project.save()
class Migration(migrations.Migration):
dependencies = [
('project_core', '0167_organisation_display_name'),
]
operations = [
migrations.AddField(
model_name='historicalproject',
name='funding_instrument',
field=models.ForeignKey(blank=True, db_constraint=False, help_text='Funding instrument to which the call belongs', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='project_core.fundinginstrument'),
),
migrations.AddField(
model_name='project',
name='funding_instrument',
field=models.ForeignKey(blank=True, help_text='Funding instrument to which the call belongs', null=True, on_delete=django.db.models.deletion.PROTECT, to='project_core.fundinginstrument'),
),
migrations.RunPython(
copy_funding_instruments_from_calls_to_projects
)
]
|
python
|
import json
import argparse
def contains(splits):
# Returns 1D binary map of images to take such that access is O(1)
MAX, MIN = max([int(x.split('-')[-1]) for x in splits]), min([int(x.split('-')[0]) for x in splits])
A = [0 for _ in range(MAX-MIN+1)]
for sp in splits:
if '-' in sp:
beg, end = [int(x) for x in sp.split('-')]
else:
beg = end = int(sp)
for idx in range(beg-MIN, end+1-MIN):
print (idx)
A[idx] = 1
return A, MIN, MAX
if __name__=='__main__':
ap = argparse.ArgumentParser()
ap.add_argument('json', help='Path to JSON dataset file')
ap.add_argument('split', nargs='+', help='Dataset split for splitting')
ap.add_argument('--out', help='Path to output JSON file', default='cut_dataset.json')
args = ap.parse_args()
with open(args.json, 'r') as f:
obj = json.load(f)
A, MIN, MAX = contains(args.split)
imgs, anns = [], []
for img in obj['images']:
if img['id'] >= MIN and img['id'] <= MAX:
if A[img['id']-MIN]:
ANN = [ann for ann in obj['annotations'] if ann['image_id']==img['id']]
anns.extend(ANN)
imgs.append(img)
with open(args.out, 'w') as f:
json.dump({'images': imgs, 'annotations': anns, 'classes': obj['classes'], 'categories': []}, f)
|
python
|
from sklearn.base import BaseEstimator
import numpy as np
from sklearn.base import clone
from .logs.loggers import get_logger
import math
class DeepModel(BaseEstimator):
def __init__(self, estimator, depths, n_estimators=100,
learning_rate=0.01, verbose=True, logging=None, logging_params={}):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.verbose = verbose
self.depths = depths
self.estimator = estimator
self.logger = get_logger(logging, 'DeepModel', logging_params)
def fit(self, X_train, y_train):
self.models = []
self.logger('Training...')
feed = y_train.copy()
for depth in self.depths:
self.logger(f"Depth: {depth}")
model = clone(self.estimator)
model.fit(X_train, feed)
self.models.append(model)
preds = model.predict(X_train)
feed -= preds
self.logger('%.15f' % np.mean(abs(feed)))
def predict(self, X_test):
preds = np.zeros(X_test.shape[0])
for model in self.models:
preds += model.predict(X_test)
return preds
return preds
class EarlyStoppingError(Exception):
pass
class EarlyStopping:
def __init__(self, direction, patience=100, threshold=1e-3):
self.best = -math.inf if direction == 'maximize' else math.inf
self.fn = max if direction == 'maximize' else min
self.count = 0
self.threshold = threshold
self.patience = patience
def __call__(self, value):
new_value = self.fn(self.best, value)
if abs(new_value - self.best) < self.threshold:
self.count += 1
if self.count > self.patience:
raise EarlyStoppingError()
else:
self.count = 0
self.best = new_value
|
python
|
################################################################################
# COPYRIGHT(c) 2018 STMicroelectronics #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# 1. Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# 3. Neither the name of STMicroelectronics nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
################################################################################
"""ble_advertising_data_parser
The ble_advertising_data_parser module contains tools to parse the advertising
data coming from Bluetooth devices and recognized by the BlueSTSDK.
"""
# IMPORT
import blue_st_sdk.node
from blue_st_sdk.utils.blue_st_exceptions import InvalidBLEAdvertisingDataException
# CLASSES
class BLEAdvertisingDataParser(object):
"""Parse the advertising data sent by a device that follows the BlueST
protocol.
It raises an exception if the advertising data is not valid.
"""
# Note: the Bluepy library hides the field-type.
ADVERTISING_DATA_MANUFACTURER_LENGTH_1 = 7
"""Allowed length for the advertising data manufacturer in bytes."""
ADVERTISING_DATA_MANUFACTURER_LENGTH_2 = 13
"""Allowed length for the advertising data manufacturer in bytes."""
VERSION_PROTOCOL_SUPPORTED_MIN = 0x01
"""Minimum version protocol supported."""
VERSION_PROTOCOL_SUPPORTED_MAX = 0x01
"""Maximum version protocol supported."""
_COMPLETE_LOCAL_NAME = 0x09
"""Code identifier for the complete local name."""
_TX_POWER = 0x0A
"""Code identifier for the transmission power."""
_MANUFACTURER_SPECIFIC_DATA = 0xFF
"""Code identifier for themanufacturer data."""
_NAME_UNKNOWN = 'UNKNOWN'
"""Unknown name."""
def __init__(self, advertising_data):
"""Constructor.
Args:
advertising_data (str): BLE advertising_data.
Raises:
:exc:`blue_st_sdk.utils.blue_st_exceptions.InvalidBLEAdvertisingDataException`
is raised if the advertising data is not well formed.
"""
# Device name (str).
self._name = self._NAME_UNKNOWN
# Device transmission power (int).
self._tx_power = -1
# Device MAC address (str).
self._address = None
# Bitmask that keeps track of the available features (int).
self._feature_mask = -1
# Device identifier (int).
self._device_id = -1
# Device Protocol Version (int).
self._protocol_version = -1
# Board's type (NodeType).
self._board_type = None
# Board in sleeping status (bool).
self._board_sleeping = None
# Manufacturer specific data (str).
self._manufacturer_specific_data = None
# Getting data.
for data in advertising_data:
if data[0] == self._COMPLETE_LOCAL_NAME:
self._name = data[2].encode('utf-8')
elif data[0] == self._TX_POWER:
self._tx_power = data[2]
elif data[0] == self._MANUFACTURER_SPECIFIC_DATA:
self._manufacturer_specific_data = data[2]
if self._manufacturer_specific_data is None:
raise InvalidBLEAdvertisingDataException(
' ' + self._name + ': ' \
'"Manufacturer specific data" is mandatory: ' \
'the advertising data does not contain it.'
)
try:
# Parse manufacturer specific data.
self._parse_manufacturer_specific_data(self._manufacturer_specific_data)
except InvalidBLEAdvertisingDataException as e:
raise e
def _parse_manufacturer_specific_data(self, manufacturer_specific_data):
"""Parse the manufacturer specific data.
Args:
manufacturer_specific_data (str): The manufacturer specific data.
Raises:
:exc:`blue_st_sdk.utils.blue_st_exceptions.InvalidBLEAdvertisingDataException`
is raised if the advertising data is not well formed.
"""
length = len(manufacturer_specific_data.decode('hex')) + 1 # Adding 1 byte of the field-type, which is hidden by the Bluepy library.
if length != self.ADVERTISING_DATA_MANUFACTURER_LENGTH_1 and length != self.ADVERTISING_DATA_MANUFACTURER_LENGTH_2:
raise InvalidBLEAdvertisingDataException(
' ' + self._name + ': ' \
'"Manufacturer specific data" must be of length "' \
+ str(self.ADVERTISING_DATA_MANUFACTURER_LENGTH_1) + '" or "' \
+ str(self.ADVERTISING_DATA_MANUFACTURER_LENGTH_2) + '", not "' + str(length) + '".'
)
self._protocol_version = int(manufacturer_specific_data[0:2], 16)
if (self._protocol_version < self.VERSION_PROTOCOL_SUPPORTED_MIN) or \
(self._protocol_version > self.VERSION_PROTOCOL_SUPPORTED_MAX):
raise InvalidBLEAdvertisingDataException(
' ' + self._name + ': ' \
'Protocol version "' + str(self._protocol_version) + '" unsupported. ' \
'Version must be in [' + str(self.VERSION_PROTOCOL_SUPPORTED_MIN) + '..' + str(self.VERSION_PROTOCOL_SUPPORTED_MAX) + '].'
)
self._device_id = int(manufacturer_specific_data[2:4], 16)
self._device_id = self._device_id & 0xFF if self._device_id & 0x80 == 0x80 else self._device_id & 0x1F
try:
self._board_type = self._get_node_type(self._device_id)
except InvalidBLEAdvertisingDataException as e:
raise e
self._board_sleeping = self._get_node_sleeping_status(int(manufacturer_specific_data[2:4], 16))
self._feature_mask = int(manufacturer_specific_data[4:12], 16)
self._address = manufacturer_specific_data[12:24] if length == self.ADVERTISING_DATA_MANUFACTURER_LENGTH_2 else None
def _get_node_type(self, device_id):
"""Get the node's type.
Args:
device_id (int): Device identifier.
Returns:
:class:`blue_st_sdk.node.NodeType`: The node's type.
Raises:
:exc:`blue_st_sdk.utils.blue_st_exceptions.InvalidBLEAdvertisingDataException`
is raised if the advertising data is not well formed.
"""
temp = int(device_id & 0xFF)
if temp == 0x01:
return blue_st_sdk.node.NodeType.STEVAL_WESU1
if temp == 0x02:
return blue_st_sdk.node.NodeType.SENSOR_TILE
if temp == 0x03:
return blue_st_sdk.node.NodeType.BLUE_COIN
if temp == 0x04:
return blue_st_sdk.node.NodeType.STEVAL_IDB008VX
if temp >= 0x80 and temp <= 0xFF:
return blue_st_sdk.node.NodeType.NUCLEO
return blue_st_sdk.node.NodeType.GENERIC
@classmethod
def _get_node_sleeping_status(self, node_type):
"""Parse the node type field to check whether the board is sleeping.
Args:
node_type (int): Node type.
Returns:
True if the board is sleeping, False otherwise.
"""
return ((node_type & 0x80) != 0x80 and ((node_type & 0x40) == 0x40))
def get_name(self):
"""Get the device name.
Returns:
str: The device name.
"""
return self._name
def get_tx_power(self):
"""Get the device transmission power in mdb.
Returns:
int: The device transmission power in mdb.
"""
return self._tx_power
def get_address(self):
"""Get the device MAC address.
Returns:
str: The device MAC address.
"""
return self._address
def get_protocol_version(self):
"""Get the device protocol version.
Returns:
int: The device protocol version.
"""
return self._protocol_version
def get_board_type(self):
"""Get the board's type.
Returns:
The board's type.
"""
return self._board_type
def get_board_sleeping(self):
"""Get the sleeping status.
Returns:
True if the board is sleeping, False otherwise.
"""
return self._board_sleeping
def get_device_id(self):
"""Get the device identifier.
Returns:
int: The device identifier.
"""
return self._device_id
def get_feature_mask(self):
"""Get the bitmask that keeps track of the available features.
Returns:
The bitmask that keeps track of the available features.
"""
return self._feature_mask
def __str__(self):
"""Print the advertising_data.
Returns:
str: A string that contains the advertising_data.
"""
return "Name: " + self._name + \
"\n\tTxPower: " + self._tx_power + \
"\n\tAddress: " + self._address + \
"\n\tFeature Mask: " + self._feature_mask + \
"\n\tProtocol Version: " + self._protocol_version
|
python
|
from django.core.mail import send_mail
from django.shortcuts import render,redirect,reverse
from django.http import HttpResponse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import generic
from .models import AgentModel, LeadModel,CategoryModel
from .forms import (
LeadCreationForm,UserCreationForm,AssignAgentForm,
CategoryUpdateForm,
)
from agents.mixin import OrganisorAndLoginRequiredMixin
from django.views.generic import (
TemplateView,CreateView,ListView,
UpdateView,DeleteView,DetailView,
)
class LandingPageView(TemplateView):
template_name="leads/index.html"
class SignupView(CreateView):
template_name="registration/signup.html"
form_class=UserCreationForm
def get_success_url(self):
return reverse("login")
def index(request):
return render(request,"leads/index.html")
class LeadListView(LoginRequiredMixin,ListView):
template_name="leads/leads_list.html"
context_object_name="leads"
#""" #queryset=LeadModel.objects.all()
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=LeadModel.objects.filter(
organisation=user.userprofile,
agent__isnull=False,
)
else:
queryset=LeadModel.objects.filter(
organisation = user.agent.organisation,
agent__isnull=False,
)
#filter for the agent that is logged in
queryset=queryset.filter(agent__user=user)
return queryset
def get_context_data(self,**kwargs):
context=super(LeadListView,self).get_context_data(**kwargs)
user=self.request.user
if user.is_organisor:
queryset=LeadModel.objects.filter(
organisation=user.userprofile,
agent__isnull=True,
)
# context["'unassigned_leads'"]=queryset
# return context
context.update({
'unassigned_leads':queryset
})
return context
#"""
def LeadList(request):
leads=LeadModel.objects.all()
context={
"leads":leads
}
return render(request,"leads/leads_list.html",context)
class LeadCreateView(OrganisorAndLoginRequiredMixin,CreateView):
template_name="leads/leads_create.html"
form_class=LeadCreationForm
def get_success_url(self):
return reverse("leads:leadlist")
def form_valid(self,form):
lead = form.save(commit=False)
lead.organisation = self.request.user.userprofile
lead.save()
send_mail(
subject="A lead has been created",
message="Go to the site to see the new lead",
from_email="test@test.com",
recipient_list=['test2@test.com']
# recipient_list=["test2@test.com"]
)
return super(LeadCreateView,self).form_valid(form)
def LeadCreate(request):
if request.POST:
form =LeadCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect("leadlist")
context={
"LeadCreationForm":LeadCreationForm
}
return render(request,"leads/leads_create.html",context)
class LeadDetailView(LoginRequiredMixin,DetailView):
template_name="leads/leads_detail.html"
context_object_name="lead"
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=LeadModel.objects.filter(organisation=user.userprofile)
else:
queryset=LeadModel.objects.filter(organisation=user.agent.organisation)
#filter for the agent that is logged in
queryset=queryset.filte(agent__user=user)
return queryset
def LeadDetail(request,pk):
lead=LeadModel.objects.get(id=pk)
context={
"lead":lead
}
return render(request,"leads/leads_detail.html",context)
class LeadUpdateView(OrganisorAndLoginRequiredMixin,UpdateView):
template_name="leads/leads_update.html"
#queryset=LeadModel.objects.all()
form_class=LeadCreationForm
context_object_name="lead"
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
queryset=LeadModel.objects.filter(organisation=user.userprofile)
return queryset
def get_success_url(self):
return reverse("leads:leadlist")
def LeadUpdate(request,pk):
lead=LeadModel.objects.get(id=pk)
form=LeadCreationForm(instance=lead)
if request.POST:
form =LeadCreationForm(request.POST,instance=lead)
if form.is_valid():
form.save()
return redirect("leadlist")
context={
#"lead":lead,
"form":form,
"lead":lead,
}
return render(request,"leads/leads_update.html",context)
class LeadDeleteView(LoginRequiredMixin,DeleteView):
template_name="leads/leads_delete.html"
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
queryset=LeadModel.objects.filter(organisation=user.userprofile)
def get_success_url(self):
return reverse("leads:leadlist")
def LeadDelete(request,pk):
lead=LeadModel.objects.get(id=pk)
lead.delete()
return redirect("leads:leadlist")
class AssignAgentView(OrganisorAndLoginRequiredMixin,generic.FormView):
template_name="leads/assign_agent.html"
form_class=AssignAgentForm
def get_form_kwargs(self,**kwargs):
kwargs=super(AssignAgentView,self).get_form_kwargs(**kwargs)
kwargs.update({
"request":self.request
})
return kwargs
def form_valid(self, form):
agent=form.cleaned_data["agents"]
lead=LeadModel.objects.get(id=self.kwargs["pk"])
lead.agent=agent
lead.save()
return super(AssignAgentView,self).form_valid(form)
def get_success_url(self):
return reverse("leads:leadlist")
class CategoryListView(LoginRequiredMixin,generic.ListView):
template_name="leads/category_list.html"
context_object_name="category_list"
def get_context_data(self, **kwargs):
context= super(CategoryListView,self).get_context_data(**kwargs)
user=self.request.user
if user.is_organisor:
queryset=LeadModel.objects.filter(organisation=user.userprofile,)
else:
queryset=LeadModel.objects.filter(organisation = user.agent.organisation,)
context.update({
"unassigned_lead_count":queryset.filter(category__isnull=True).count()
})
return context
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=CategoryModel.objects.filter(organisation=user.userprofile,)
else:
queryset=CategoryModel.objects.filter(organisation = user.agent.organisation,)
return queryset
class CategoryDetailView(LoginRequiredMixin,generic.DetailView):
template_name="leads/category_detail.html"
context_object_name="category"
#direct relation query from models can used to achieve same result <category.leads.all>
def get_context_data(self, **kwargs):
context= super(CategoryDetailView,self).get_context_data(**kwargs)
leads=self.get_object().leads.all()
context.update({
"leads":leads
})
return context
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=CategoryModel.objects.filter(organisation=user.userprofile,)
else:
queryset=CategoryModel.objects.filter(organisation = user.agent.organisation,)
return queryset
class CategoryUpdateView(LoginRequiredMixin,generic.UpdateView):
template_name="leads/category_update.html"
form_class=CategoryUpdateForm
context_object_name="lead"
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=LeadModel.objects.filter(organisation=user.userprofile,)
else:
queryset=LeadModel.objects.filter(organisation = user.agent.organisation,)
return queryset
def get_success_url(self):
return reverse("leads:leaddetail",kwargs={"pk":self.get_object().id})
|
python
|
# Copyright 2016 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.services.qos import qos_plugin
from oslo_config import cfg
from oslo_log import log as logging
from vmware_nsx._i18n import _
from vmware_nsx.common import exceptions as nsx_exc
LOG = logging.getLogger(__name__)
class NsxVQosPlugin(qos_plugin.QoSPlugin):
"""Service plugin for VMware NSX-v to implement Neutron's Qos API."""
supported_extension_aliases = ["qos"]
def __init__(self):
LOG.info("Loading VMware NSX-V Qos Service Plugin")
super(NsxVQosPlugin, self).__init__()
if not cfg.CONF.nsxv.use_dvs_features:
error = _("Cannot use the NSX-V QoS plugin without "
"enabling the dvs features")
raise nsx_exc.NsxPluginException(err_msg=error)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import logging
logger = logging.getLogger(__name__)
class RestartHandler:
def __init__(self, observer, command):
self.observer = observer
self.command = command
def run(self):
logger.info("Running restart handler")
command_process = subprocess.Popen(self.command)
while True:
events = self.observer.observe_and_update()
if events:
logger.info("Restarting the process")
command_process.terminate()
command_process.wait()
command_process = subprocess.Popen(self.command)
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ConnectionAliasAssociation',
'ConnectionAliasTag',
'WorkspaceProperties',
'WorkspaceTag',
]
@pulumi.output_type
class ConnectionAliasAssociation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "associatedAccountId":
suggest = "associated_account_id"
elif key == "associationStatus":
suggest = "association_status"
elif key == "connectionIdentifier":
suggest = "connection_identifier"
elif key == "resourceId":
suggest = "resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ConnectionAliasAssociation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ConnectionAliasAssociation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ConnectionAliasAssociation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
associated_account_id: Optional[str] = None,
association_status: Optional['ConnectionAliasAssociationAssociationStatus'] = None,
connection_identifier: Optional[str] = None,
resource_id: Optional[str] = None):
if associated_account_id is not None:
pulumi.set(__self__, "associated_account_id", associated_account_id)
if association_status is not None:
pulumi.set(__self__, "association_status", association_status)
if connection_identifier is not None:
pulumi.set(__self__, "connection_identifier", connection_identifier)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="associatedAccountId")
def associated_account_id(self) -> Optional[str]:
return pulumi.get(self, "associated_account_id")
@property
@pulumi.getter(name="associationStatus")
def association_status(self) -> Optional['ConnectionAliasAssociationAssociationStatus']:
return pulumi.get(self, "association_status")
@property
@pulumi.getter(name="connectionIdentifier")
def connection_identifier(self) -> Optional[str]:
return pulumi.get(self, "connection_identifier")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
return pulumi.get(self, "resource_id")
@pulumi.output_type
class ConnectionAliasTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class WorkspaceProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computeTypeName":
suggest = "compute_type_name"
elif key == "rootVolumeSizeGib":
suggest = "root_volume_size_gib"
elif key == "runningMode":
suggest = "running_mode"
elif key == "runningModeAutoStopTimeoutInMinutes":
suggest = "running_mode_auto_stop_timeout_in_minutes"
elif key == "userVolumeSizeGib":
suggest = "user_volume_size_gib"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
compute_type_name: Optional[str] = None,
root_volume_size_gib: Optional[int] = None,
running_mode: Optional[str] = None,
running_mode_auto_stop_timeout_in_minutes: Optional[int] = None,
user_volume_size_gib: Optional[int] = None):
if compute_type_name is not None:
pulumi.set(__self__, "compute_type_name", compute_type_name)
if root_volume_size_gib is not None:
pulumi.set(__self__, "root_volume_size_gib", root_volume_size_gib)
if running_mode is not None:
pulumi.set(__self__, "running_mode", running_mode)
if running_mode_auto_stop_timeout_in_minutes is not None:
pulumi.set(__self__, "running_mode_auto_stop_timeout_in_minutes", running_mode_auto_stop_timeout_in_minutes)
if user_volume_size_gib is not None:
pulumi.set(__self__, "user_volume_size_gib", user_volume_size_gib)
@property
@pulumi.getter(name="computeTypeName")
def compute_type_name(self) -> Optional[str]:
return pulumi.get(self, "compute_type_name")
@property
@pulumi.getter(name="rootVolumeSizeGib")
def root_volume_size_gib(self) -> Optional[int]:
return pulumi.get(self, "root_volume_size_gib")
@property
@pulumi.getter(name="runningMode")
def running_mode(self) -> Optional[str]:
return pulumi.get(self, "running_mode")
@property
@pulumi.getter(name="runningModeAutoStopTimeoutInMinutes")
def running_mode_auto_stop_timeout_in_minutes(self) -> Optional[int]:
return pulumi.get(self, "running_mode_auto_stop_timeout_in_minutes")
@property
@pulumi.getter(name="userVolumeSizeGib")
def user_volume_size_gib(self) -> Optional[int]:
return pulumi.get(self, "user_volume_size_gib")
@pulumi.output_type
class WorkspaceTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
|
python
|
import numpy as np
from scipy.constants import mu_0
# TODO: make this to take a vector rather than a single frequency
def rTEfunfwd(nlay, f, lamda, sig, chi, depth, HalfSwitch):
"""
Compute reflection coefficients for Transverse Electric (TE) mode.
Only one for loop for multiple layers. Do not use for loop for lambda,
which has 801 times of loops (actually, this makes the code really slow).
"""
Mtemp00 = np.zeros(lamda.size, dtype=complex)
Mtemp10 = np.zeros(lamda.size, dtype=complex)
Mtemp01 = np.zeros(lamda.size, dtype=complex)
Mtemp11 = np.zeros(lamda.size, dtype=complex)
M1sum00 = np.zeros(lamda.size, dtype=complex)
M1sum10 = np.zeros(lamda.size, dtype=complex)
M1sum01 = np.zeros(lamda.size, dtype=complex)
M1sum11 = np.zeros(lamda.size, dtype=complex)
thick = -np.diff(depth)
w = 2*np.pi*f
rTE = np.zeros(lamda.size, dtype=complex)
utemp0 = np.zeros(lamda.size, dtype=complex)
utemp1 = np.zeros(lamda.size, dtype=complex)
const = np.zeros(lamda.size, dtype=complex)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
Mtemp00 = 0.5*(1+const)
Mtemp10 = 0.5*(1-const)
Mtemp01 = 0.5*(1-const)
Mtemp11 = 0.5*(1+const)
M00 = []
M10 = []
M01 = []
M11 = []
M0sum00 = Mtemp00
M0sum10 = Mtemp10
M0sum01 = Mtemp01
M0sum11 = Mtemp11
if HalfSwitch == True:
M1sum00 = np.zeros(lamda.size, dtype=complex)
M1sum10 = np.zeros(lamda.size, dtype=complex)
M1sum01 = np.zeros(lamda.size, dtype=complex)
M1sum11 = np.zeros(lamda.size, dtype=complex)
M1sum00 = M0sum00
M1sum10 = M0sum10
M1sum01 = M0sum01
M1sum11 = M0sum11
else :
for j in range (nlay-1):
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j])*sig[j])
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j+1])*sig[j+1])
const = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0)
h0 = thick[j]
Mtemp00 = 0.5*(1.+ const)*np.exp(-2.*utemp0*h0)
Mtemp10 = 0.5*(1.- const)
Mtemp01 = 0.5*(1.- const)*np.exp(-2.*utemp0*h0)
Mtemp11 = 0.5*(1.+ const)
M1sum00 = M0sum00*Mtemp00 + M0sum01*Mtemp10
M1sum10 = M0sum10*Mtemp00 + M0sum11*Mtemp10
M1sum01 = M0sum00*Mtemp01 + M0sum01*Mtemp11
M1sum11 = M0sum10*Mtemp01 + M0sum11*Mtemp11
M0sum00 = M1sum00
M0sum10 = M1sum10
M0sum01 = M1sum01
M0sum11 = M1sum11
rTE = M1sum01/M1sum11
return rTE
def matmul(a00, a10, a01, a11, b00, b10, b01, b11):
"""
Compute 2x2 matrix mutiplication in vector way
C = A*B
C = [a00 a01] * [b00 b01] = [c00 c01]
[a10 a11] [b10 b11] [c10 c11]
"""
c00 = a00*b00 + a01*b10
c10 = a10*b00 + a11*b10
c01 = a00*b01 + a01*b11
c11 = a10*b01 + a11*b11
return c00, c10, c01, c11
# TODO: make this to take a vector rather than a single frequency
def rTEfunjac(nlay, f, lamda, sig, chi, depth, HalfSwitch):
"""
Compute reflection coefficients for Transverse Electric (TE) mode.
Only one for loop for multiple layers. Do not use for loop for lambda,
which has 801 times of loops (actually, this makes the code really slow).
"""
# Initializing arrays
Mtemp00 = np.zeros(lamda.size, dtype=complex)
Mtemp10 = np.zeros(lamda.size, dtype=complex)
Mtemp01 = np.zeros(lamda.size, dtype=complex)
Mtemp11 = np.zeros(lamda.size, dtype=complex)
M1sum00 = np.zeros(lamda.size, dtype=complex)
M1sum10 = np.zeros(lamda.size, dtype=complex)
M1sum01 = np.zeros(lamda.size, dtype=complex)
M1sum11 = np.zeros(lamda.size, dtype=complex)
M0sum00 = np.zeros(lamda.size, dtype=complex)
M0sum10 = np.zeros(lamda.size, dtype=complex)
M0sum01 = np.zeros(lamda.size, dtype=complex)
M0sum11 = np.zeros(lamda.size, dtype=complex)
dMtemp00 = np.zeros(lamda.size, dtype=complex)
dMtemp10 = np.zeros(lamda.size, dtype=complex)
dMtemp01 = np.zeros(lamda.size, dtype=complex)
dMtemp11 = np.zeros(lamda.size, dtype=complex)
dj0temp00 = np.zeros(lamda.size, dtype=complex)
dj0temp10 = np.zeros(lamda.size, dtype=complex)
dj0temp01 = np.zeros(lamda.size, dtype=complex)
dj0temp11 = np.zeros(lamda.size, dtype=complex)
dj1temp00 = np.zeros(lamda.size, dtype=complex)
dj1temp10 = np.zeros(lamda.size, dtype=complex)
dj1temp01 = np.zeros(lamda.size, dtype=complex)
dj1temp11 = np.zeros(lamda.size, dtype=complex)
thick = -np.diff(depth)
w = 2*np.pi*f
rTE = np.zeros(lamda.size, dtype=complex)
drTE = np.zeros((nlay, lamda.size) , dtype=complex)
utemp0 = np.zeros(lamda.size, dtype=complex)
utemp1 = np.zeros(lamda.size, dtype=complex)
const = np.zeros(lamda.size, dtype=complex)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
# Compute M1
Mtemp00 = 0.5*(1+const)
Mtemp10 = 0.5*(1-const)
Mtemp01 = 0.5*(1-const)
Mtemp11 = 0.5*(1+const)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
# Compute dM1du1
dj0Mtemp00 = 0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp10 = -0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp01 = -0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp11 = 0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
# TODO: for computing Jacobian
M00 = []
M10 = []
M01 = []
M11 = []
dJ00 = []
dJ10 = []
dJ01 = []
dJ11 = []
M00.append(Mtemp00)
M01.append(Mtemp01)
M10.append(Mtemp10)
M11.append(Mtemp11)
M0sum00 = Mtemp00.copy()
M0sum10 = Mtemp10.copy()
M0sum01 = Mtemp01.copy()
M0sum11 = Mtemp11.copy()
if HalfSwitch == True:
M1sum00 = np.zeros(lamda.size, dtype=complex)
M1sum10 = np.zeros(lamda.size, dtype=complex)
M1sum01 = np.zeros(lamda.size, dtype=complex)
M1sum11 = np.zeros(lamda.size, dtype=complex)
M1sum00 = M0sum00.copy()
M1sum10 = M0sum10.copy()
M1sum01 = M0sum01.copy()
M1sum11 = M0sum11.copy()
else:
for j in range (nlay-1):
dJ_10Mtemp00 = np.zeros(lamda.size, dtype=complex)
dJ_10Mtemp10 = np.zeros(lamda.size, dtype=complex)
dJ_10Mtemp01 = np.zeros(lamda.size, dtype=complex)
dJ_10Mtemp11 = np.zeros(lamda.size, dtype=complex)
dJ01Mtemp00 = np.zeros(lamda.size, dtype=complex)
dJ01Mtemp10 = np.zeros(lamda.size, dtype=complex)
dJ01Mtemp01 = np.zeros(lamda.size, dtype=complex)
dJ01Mtemp11 = np.zeros(lamda.size, dtype=complex)
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j])*sig[j])
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j+1])*sig[j+1])
const = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0)
h0 = thick[j]
Mtemp00 = 0.5*(1.+ const)*np.exp(-2.*utemp0*h0)
Mtemp10 = 0.5*(1.- const)
Mtemp01 = 0.5*(1.- const)*np.exp(-2.*utemp0*h0)
Mtemp11 = 0.5*(1.+ const)
M1sum00, M1sum10, M1sum01, M1sum11 = matmul(
M0sum00, M0sum10, M0sum01, M0sum11,
Mtemp00, Mtemp10, Mtemp01, Mtemp11
)
M0sum00 = M1sum00
M0sum10 = M1sum10
M0sum01 = M1sum01
M0sum11 = M1sum11
# TODO: for Computing Jacobian
dudsig = 0.5*1j*w*mu_0*(1+chi[j])/utemp0
if j==0:
const1a = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0**2)
const1b = const1a*utemp0
dj1Mtemp00 = -0.5*const1a*np.exp(-2.*utemp0*h0)-h0*(1+const1b)*np.exp(-2.*utemp0*h0)
dj1Mtemp10 = 0.5*const1a
dj1Mtemp01 = 0.5*const1a*np.exp(-2.*utemp0*h0)-h0*(1-const1b)*np.exp(-2.*utemp0*h0)
dj1Mtemp11 = -0.5*const1a
#Compute dM1dm1*M2
dJ_10Mtemp00, dJ_10Mtemp10, dJ_10Mtemp01, dJ_10Mtemp11 = matmul(dj0Mtemp00, dj0Mtemp10, dj0Mtemp01, dj0Mtemp11, Mtemp00, Mtemp10, Mtemp01, Mtemp11)
#Compute M1*dM2dm1
dJ01Mtemp00, dJ01Mtemp10, dJ01Mtemp01, dJ01Mtemp11 = matmul(M00[j], M10[j], M01[j], M11[j], dj1Mtemp00, dj1Mtemp10, dj1Mtemp01, dj1Mtemp11)
dJ00.append(dudsig*(dJ_10Mtemp00+dJ01Mtemp00))
dJ10.append(dudsig*(dJ_10Mtemp10+dJ01Mtemp10))
dJ01.append(dudsig*(dJ_10Mtemp01+dJ01Mtemp01))
dJ11.append(dudsig*(dJ_10Mtemp11+dJ01Mtemp11))
else:
h_1 = thick[j-1]
utemp_1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j-1])*sig[j-1])
const0 = mu_0*(1+chi[j-1])/(mu_0*(1+chi[j])*utemp_1)
dj0Mtemp00 = 0.5*(const0)*np.exp(-2.*utemp_1*h_1)
dj0Mtemp10 = -0.5*(const0)
dj0Mtemp01 = -0.5*(const0)*np.exp(-2.*utemp_1*h_1)
dj0Mtemp11 = 0.5*(const0)
const1a = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0**2)
const1b = const1a*utemp0
dj1Mtemp00 = -0.5*const1a*np.exp(-2.*utemp0*h0)-h0*(1+const1b)*np.exp(-2.*utemp0*h0)
dj1Mtemp10 = 0.5*const1a
dj1Mtemp01 = 0.5*const1a*np.exp(-2.*utemp0*h0)-h0*(1-const1b)*np.exp(-2.*utemp0*h0)
dj1Mtemp11 = -0.5*const1a
#Compute dMjdmj*Mj+1
dJ_10Mtemp00, dJ_10Mtemp10, dJ_10Mtemp01, dJ_10Mtemp11 = matmul(dj0Mtemp00, dj0Mtemp10, dj0Mtemp01, dj0Mtemp11, Mtemp00, Mtemp10, Mtemp01, Mtemp11)
#Compute Mj*dMj+1dmj
dJ01Mtemp00, dJ01Mtemp10, dJ01Mtemp01, dJ01Mtemp11 = matmul(M00[j], M10[j], M01[j], M11[j], dj1Mtemp00, dj1Mtemp10, dj1Mtemp01, dj1Mtemp11)
dJ00.append(dudsig*(dJ_10Mtemp00+dJ01Mtemp00))
dJ10.append(dudsig*(dJ_10Mtemp10+dJ01Mtemp10))
dJ01.append(dudsig*(dJ_10Mtemp01+dJ01Mtemp01))
dJ11.append(dudsig*(dJ_10Mtemp11+dJ01Mtemp11))
M00.append(Mtemp00)
M01.append(Mtemp01)
M10.append(Mtemp10)
M11.append(Mtemp11)
# rTE = M1sum01/M1sum11
if HalfSwitch == True:
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0])
dudsig = 0.5*1j*w*mu_0*(1+chi[0])/utemp0
dJ1sum00 = np.zeros(lamda.size, dtype=complex)
dJ1sum10 = np.zeros(lamda.size, dtype=complex)
dJ1sum01 = np.zeros(lamda.size, dtype=complex)
dJ1sum11 = np.zeros(lamda.size, dtype=complex)
dJ1sum00 = dudsig*dj0Mtemp00
dJ1sum10 = dudsig*dj0Mtemp10
dJ1sum01 = dudsig*dj0Mtemp01
dJ1sum11 = dudsig*dj0Mtemp11
drTE = dJ1sum01/M1sum11 - M1sum01/(M1sum11**2)*dJ1sum11
else:
#j = nlay
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[nlay-1])*sig[nlay-1])
dudsig = 0.5*1j*w*mu_0*(1+chi[j])/utemp0
h_1 = thick[nlay-2]
utemp_1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[nlay-2])*sig[nlay-2])
const0 = mu_0*(1+chi[nlay-2])/(mu_0*(1+chi[nlay-1])*utemp_1)
dj0Mtemp00 = 0.5*(const0)*np.exp(-2.*utemp_1*h_1)
dj0Mtemp10 = -0.5*(const0)
dj0Mtemp01 = -0.5*(const0)*np.exp(-2.*utemp_1*h_1)
dj0Mtemp11 = 0.5*(const0)
dJ_10Mtemp00 = dj0Mtemp00
dJ_10Mtemp10 = dj0Mtemp10
dJ_10Mtemp01 = dj0Mtemp01
dJ_10Mtemp11 = dj0Mtemp11
dJ00.append(dudsig*dJ_10Mtemp00)
dJ10.append(dudsig*dJ_10Mtemp10)
dJ01.append(dudsig*dJ_10Mtemp01)
dJ11.append(dudsig*dJ_10Mtemp11)
for i in range (nlay):
dJ0sum00 = np.zeros(lamda.size, dtype=complex)
dJ0sum10 = np.zeros(lamda.size, dtype=complex)
dJ0sum01 = np.zeros(lamda.size, dtype=complex)
dJ0sum11 = np.zeros(lamda.size, dtype=complex)
dJ1sum00 = np.zeros(lamda.size, dtype=complex)
dJ1sum10 = np.zeros(lamda.size, dtype=complex)
dJ1sum01 = np.zeros(lamda.size, dtype=complex)
dJ1sum11 = np.zeros(lamda.size, dtype=complex)
if i==0:
for j in range (nlay-2):
if j==0:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ00[i], dJ10[i], dJ01[i], dJ11[i], M00[j+2], M10[j+2], M01[j+2], M11[j+2]
)
else:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, M00[j+2], M10[j+2], M01[j+2], M11[j+2]
)
dJ0sum00 = dJ1sum00
dJ0sum10 = dJ1sum10
dJ0sum01 = dJ1sum01
dJ0sum11 = dJ1sum11
elif (i>0) & (i<nlay-1):
dJ0sum00 = M00[0]
dJ0sum10 = M10[0]
dJ0sum01 = M01[0]
dJ0sum11 = M11[0]
for j in range (nlay-2):
if j==i-1:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, dJ00[i], dJ10[i], dJ01[i], dJ11[i]
)
elif j < i-1:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, M00[j+1], M10[j+1], M01[j+1], M11[j+1]
)
elif j > i-1:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, M00[j+2], M10[j+2], M01[j+2], M11[j+2]
)
dJ0sum00 = dJ1sum00
dJ0sum10 = dJ1sum10
dJ0sum01 = dJ1sum01
dJ0sum11 = dJ1sum11
elif i==nlay-1:
dJ0sum00 = M00[0]
dJ0sum10 = M10[0]
dJ0sum01 = M01[0]
dJ0sum11 = M11[0]
for j in range (nlay-1):
if j < nlay-2:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, M00[j+1], M10[j+1], M01[j+1], M11[j+1]
)
elif j == nlay-2:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, dJ00[i], dJ10[i], dJ01[i], dJ11[i]
)
dJ0sum00 = dJ1sum00
dJ0sum10 = dJ1sum10
dJ0sum01 = dJ1sum01
dJ0sum11 = dJ1sum11
drTE[i, :] = dJ1sum01/M1sum11 - M1sum01/(M1sum11**2)*dJ1sum11
return drTE
# return rTE, drTE
|
python
|
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from . import models
from . import serializers
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def root(request):
return JSONResponse({"name": "The DataShed Annotation Store.", "version": "0.0.1"})
@csrf_exempt
def index_create(request):
if request.method == "GET":
annotations = models.Annotation.objects.all()
serializer = serializers.AnnotationSerializer(annotations, many=True)
return JSONResponse(serializer.data)
if request.method == "POST":
data = JSONParser().parse(request)
serializer = serializers.AnnotationSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JSONResponse(serializer.data, status=201)
# TODO: The below is what *should* happen...
response = HttpResponse(status=303)
response["Location"] = reverse("read_update_delete",
kwargs={"pk": serializer.data["id"]})
return response
else:
return HttpResponseForbidden(str(serializer.errors))
else:
return HttpResponseForbidden()
@csrf_exempt
def read_update_delete(request, pk):
if request.method == "GET":
annotation = get_object_or_404(models.Annotation, pk=pk)
serializer = serializers.AnnotationSerializer(annotation)
return JSONResponse(serializer.data, status=200)
elif request.method == "PUT":
annotation = get_object_or_404(models.Annotation, pk=pk)
data = JSONParser().parse(request)
serializer = serializers.AnnotationSerializer(annotation, data=data)
if serializer.is_valid():
serializer.save()
return JSONResponse(serializer.data, status=200)
# TODO: The below is what *should* happen...
response = HttpResponse(status=303)
response["Location"] = reverse("read_update_delete",
kwargs={"pk": serializer.data["id"]})
return response
elif request.method == "DELETE":
annotation = get_object_or_404(models.Annotation, pk=pk)
annotation.delete()
return HttpResponse(status=204)
else:
return HttpResponseForbidden()
def search(request):
if request.method == "GET":
query = {k: v for k, v in request.GET.items()}
annotations = models.Annotation.objects.filter(**query)
serializer = serializers.AnnotationSerializer(annotations, many=True)
return JSONResponse({"total": len(serializer.data), "rows": serializer.data})
else:
return HttpResponseForbidden()
class DemoView(TemplateView):
template_name = "demo.html"
|
python
|
import cv2 as cv
import numpy as np
import math
import time
beg=time.time()
def readimg (xmin,xmax,ymin,ymax):
ymins=ymin
n=(xmax-xmin+1)*(ymax-ymin+1)*21.25
target=0
while xmin<xmax :
while ymin<ymax :
target = target+img[xmin,ymin]
ymin += 1
xmin += 1
ymin=ymins
target=math.floor(target/n)
return target
def basicTransform(input):
dictionary=['鑪','罚','朋','同','团','田','口','厂','十','一','、','。',',']
goal=dictionary[input]
return goal
def imageTransform(xCharN,yCharN):
xStep = size[1]/xCharN
yStep = size[0]/yCharN
print(xStep,yStep)
i=0
j=0
finalstr=''
while i < size[0]:
while j < size[1] :
finalstr=finalstr+basicTransform(readimg(math.ceil(i),math.ceil(i+xStep),math.ceil(j),math.ceil(j+yStep)))
j=j+xStep
i=i+yStep
j=0
return finalstr
def textwrite(name,msg):
file_path = 'D:/TestFiles/'
full_path = file_path + name + '.txt'
file = open(full_path,'w')
file.write(msg)
file.close()
print('Done')
number=10000
while number <=13595:
print(number)
img = cv.imread("D:/[WPF]JJDown/Download/rua/"+str(number)+".jpg",cv.IMREAD_GRAYSCALE)
size=np.shape(img)
print (size)
text = imageTransform(157,77)
textwrite(str(number),text)
number+=1
end=time.time()
runTime=beg-end
print(runTime)
|
python
|
#!/usr/local/bin/python
import ogr, osr
import datetime
print "Start: ", datetime.datetime.now()
for i in range(10000):
pointX = -84
pointY = 38
inputESPG = 4267
outputEPSG = 2246
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(pointX, pointY)
inSpatialRef = osr.SpatialReference()
inSpatialRef.ImportFromEPSG(inputESPG)
outSpatialRef = osr.SpatialReference()
outSpatialRef.ImportFromEPSGA(outputEPSG)
coordTransform = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)
point.Transform(coordTransform)
print "end: ", datetime.datetime.now()
print point.GetX(), point.GetY()
|
python
|
#!/usr/local/bin/python3
from MPL3115A2 import MPL3115A2
from si7021 import Si7021
from pms5003 import PMS5003
from smbus import SMBus
import influxdb_client
from influxdb_client import InfluxDBClient
import time
import logging
hostname="indoors"
logging.basicConfig(level=logging.DEBUG)
mpl = MPL3115A2(1, fetchPressure=False)
si = Si7021(SMBus(1))
pms5003 = PMS5003(device='/dev/ttyAMA0', baudrate=9600, pin_enable=22, pin_reset=27)
influxdb = InfluxDBClient(url="http://filtr.home.rkas.net:9999", token="dyuhAG11e2qX7dAvsZx9DvmZT8kG006pgyaTnYQ62_I9uwHitjy7PnGW8gLEZctZGCLKbgqcsJKOuJYNfEvGnA==")
influx_write_client = influxdb.write_api()
def readMPL():
#print("🗻 Altitude is %.3f" % mpl.altitude)
pressure = mpl.pressure
temp = mpl.temperature
print("🌬 Pressure is %.2f" % pressure)
print("🌡 Temp is %.3f°C (%.3f°F)" % (temp, (temp * 1.8 + 32.0)))
return [f"weather,host={hostname},sensor=MPL3115A2 pressure={pressure}",
f"weather,host={hostname},sensor=MPL3115A2 temperature={temp}"]
def readSi():
(humidity, temp) = si.read()
print("🌡 Temp is %.3f°C (%.3f°F)" % (temp, (temp * 1.8 + 32.0)))
print("🌫 Relative humidity is %0.2f%%" % humidity)
data = [f"weather,host={hostname},sensor=Si7021 temperature={temp}"]
# Filter out undiagnosed spikes of 100% humidity
if humidity < 100:
data += [f"weather,host={hostname},sensor=Si7021 humidity={humidity}"]
return data
def readPMS():
pmsdata = pms5003.read()
pm10 = pmsdata.pm_ug_per_m3(1.0)
pm25 = pmsdata.pm_ug_per_m3(2.5)
pm100 = pmsdata.pm_ug_per_m3(10)
print("✨ PM1.0 ug/m3: %d" % pm10)
print("✨ PM2.5 ug/m3: %d" % pm25)
print("✨ PM10 ug/m3: %d" % pm100)
return [f"airquality,host={hostname},sensor=PMS5003 pm10={pm10}",
f"airquality,host={hostname},sensor=PMS5003 pm25={pm25}",
f"airquality,host={hostname},sensor=PMS5003 pm100={pm100}"]
while True:
print("-----")
datapoints = []
try:
datapoints += readMPL()
except Exception as e:
print(f"Exception: {e}")
pass
try:
datapoints += readSi()
except:
print(f"Exception: {e}")
pass
try:
datapoints += readPMS()
except:
print(f"Exception: {e}")
pass
print("Writing datapoints:\n%s" % ",\n".join(datapoints))
influx_write_client.write("FWAP", "farkhome", datapoints)
print("-----")
time.sleep(60)
|
python
|
#! /usr/bin/env python
#coding=utf8
import os
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'USAGE: commit message'
sys.exit()
commit_msg = sys.argv[1]
os.system('git pull origin master')
os.system('git status')
os.system('git add ./')
os.system('git commit * -m "%s"'%commit_msg)
os.system('git push origin master')
|
python
|
#!/usr/bin/env python
from csv import DictReader
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from wordcloud import WordCloud
from snli_cooccur import mkdirp_parent
DEFAULT_COLOR_NAME = '#1f497d'
DEFAULT_RELATIVE_SCALING = 1.
DEFAULT_WIDTH = 800
DEFAULT_HEIGHT = 400
DEFAULT_MAX_WORDS = 50
DEFAULT_COLOR_MAP_RANGE = (0., 1.)
def parse_color_map_range(s):
t = tuple(map(float, s.split(',')))
if len(t) != 2:
raise ValueError('color map range must be two comma-delimited numbers')
if t[0] > t[1]:
raise ValueError('lower bound of color map range must be no greater '
'than upper bound')
if t[0] < 0 or t[1] > 1:
raise ValueError('color map range must be within [0, 1]')
return t
def top_y_csv_to_word_cloud(input_path, query, x, output_path,
mask_path=None,
color_name=DEFAULT_COLOR_NAME,
color_map_name=None,
color_map_range=DEFAULT_COLOR_MAP_RANGE,
relative_scaling=DEFAULT_RELATIVE_SCALING,
background_color_name=None,
max_words=DEFAULT_MAX_WORDS,
width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT):
y_scores = dict()
with open(input_path) as f:
reader = DictReader(f)
for row in reader:
if row['query'] == query and row['x'] == x:
y_scores[row['y']] = float(row['score'])
if not y_scores:
raise ValueError('found no rows matching query %s and row %s' %
(query, x))
mask = None if mask_path is None else np.array(Image.open(mask_path))
cmap = None if color_map_name is None else plt.get_cmap(color_map_name)
def color_func(word, font_size, position, orientation, font_path,
random_state):
if cmap is None:
return color_name
else:
u = random_state.uniform(*color_map_range)
(r, g, b, a) = 255 * np.array(cmap(u))
return 'rgb(%.0f, %.0f, %.0f)' % (r, g, b)
wordcloud = WordCloud(
max_words=max_words,
stopwords=(),
prefer_horizontal=0.9,
width=width,
height=height,
margin=2,
relative_scaling=relative_scaling,
mode='RGBA',
color_func=color_func,
background_color=background_color_name,
mask=mask,
collocations=False,
normalize_plurals=False,
regexp=r'\S+',
)
wordcloud.generate_from_frequencies(y_scores)
image = wordcloud.to_image()
mkdirp_parent(output_path)
with open(output_path, 'wb') as f:
image.save(f, format='png')
def main():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description='Generate word cloud from CSV top-y results',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('input_path', help='path to input CSV file')
parser.add_argument('query',
help='query for which top y will be visualized')
parser.add_argument('x',
help='x for which top y will be visualized '
'(must appear in specified query)')
parser.add_argument('output_path', help='path to output PNG file')
parser.add_argument('--mask-path', help='path to image mask PNG file')
parser.add_argument('--background-color-name',
help='name of background color (default: transparent)')
parser.add_argument('--color-name', default=DEFAULT_COLOR_NAME,
help='name of text color')
parser.add_argument('--color-map-name',
help='name of color map to select word colors from '
'(randomly) (default: use color-name for all '
'words)')
parser.add_argument('--color-map-range', type=parse_color_map_range,
default=DEFAULT_COLOR_MAP_RANGE,
help='range of color map to use (as two '
'comma-delimited floats, a lower bound and an '
'upper bound)')
parser.add_argument('--max-words', type=int, default=DEFAULT_MAX_WORDS,
help='number of words to display')
parser.add_argument('--width', type=int, default=DEFAULT_WIDTH,
help='width of image, in pixels')
parser.add_argument('--height', type=int, default=DEFAULT_HEIGHT,
help='height of image, in pixels')
parser.add_argument('--relative-scaling', type=float,
default=DEFAULT_RELATIVE_SCALING,
help='degree to which score (rather than rank) is '
'used to scale words')
args = parser.parse_args()
top_y_csv_to_word_cloud(args.input_path, args.query, args.x,
args.output_path, mask_path=args.mask_path,
background_color_name=args.background_color_name,
color_name=args.color_name,
color_map_name=args.color_map_name,
color_map_range=args.color_map_range,
width=args.width,
height=args.height,
relative_scaling=args.relative_scaling)
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
from unittest import TestCase
from flaky import flaky
from polyaxon_schemas.ops.build_job import BuildConfig
from polyaxon_schemas.ops.environments.pods import EnvironmentConfig
from polyaxon_schemas.ops.environments.resources import K8SResourcesConfig, PodResourcesConfig
from polyaxon_schemas.ops.experiment.frameworks import ExperimentFramework
from polyaxon_schemas.ops.group.early_stopping_policies import EarlyStoppingConfig
from polyaxon_schemas.ops.group.hptuning import HPTuningConfig, SearchAlgorithms
from polyaxon_schemas.ops.group.matrix import MatrixConfig
from polyaxon_schemas.ops.logging import LoggingConfig
from polyaxon_schemas.polyaxonfile import PolyaxonFile
from polyaxon_schemas.specs.frameworks import TensorflowSpecification
from polyaxon_schemas.utils import TaskType
class TestPolyaxonfileDeprecation(TestCase):
def test_simple_file_framework_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/deprecated/simple_file_framework.yml'))
spec = plxfile.specification
spec.apply_context()
assert spec.version == 1
assert spec.logging is None
assert spec.tags is None
assert spec.build.dockerfile == 'Dockerfile'
assert spec.run.cmd == 'video_prediction_train --model=DNA --num_masks=1'
assert spec.environment is not None
assert spec.environment.resources.gpu.to_dict() == {'requests': 1, 'limits': 1}
assert spec.environment.outputs.to_dict() == {'jobs': [111], 'experiments': None}
assert spec.framework is not None
assert spec.is_experiment is True
def test_deprecated_advanced_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/deprecated/advanced_file.yml'))
spec = plxfile.specification
spec.apply_context()
assert spec.version == 1
assert isinstance(spec.logging, LoggingConfig)
assert spec.is_experiment
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.framework == ExperimentFramework.TENSORFLOW
assert spec.config.tensorflow.n_workers == 5
assert spec.config.tensorflow.n_ps == 10
# check properties for returning worker configs and resources
assert spec.config.tensorflow.worker_resources == {}
assert spec.config.tensorflow.ps_resources == {}
cluster, is_distributed = spec.cluster_def
assert TensorflowSpecification.get_worker_resources(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
) == {}
assert TensorflowSpecification.get_ps_resources(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
) == {}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5,
TaskType.PS: 10}, True)
def test_deprecated_notebook_job_with_node_selectors(self):
plxfile = PolyaxonFile(os.path.abspath(
'tests/fixtures/deprecated/notebook_with_custom_environment.yml'))
spec = plxfile.specification
spec.apply_context()
assert spec.version == 1
assert spec.is_notebook
assert spec.is_notebook is True
assert spec.backend is None
assert spec.logging is None
assert sorted(spec.tags) == sorted(['foo', 'bar'])
assert isinstance(spec.build, BuildConfig)
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.artifact_refs == ['outputs1']
assert spec.data_refs == ['data1', 'data2']
assert spec.config_map_refs == ['config_map1', 'config_map2']
node_selector = {'polyaxon.com': 'node_for_notebook_jobs'}
assert spec.environment.node_selector == node_selector
assert spec.node_selector == node_selector
resources = {
'cpu': {'requests': 1, 'limits': 2},
'memory': {'requests': 200, 'limits': 200},
}
assert spec.environment.resources.to_dict() == resources
assert spec.resources.to_dict() == resources
affinity = {
'nodeAffinity': {'requiredDuringSchedulingIgnoredDuringExecution': {}}
}
assert spec.environment.affinity == affinity
assert spec.affinity == affinity
tolerations = [{'key': 'key', 'operator': 'Exists'}]
assert spec.environment.tolerations == tolerations
assert spec.tolerations == tolerations
def test_deprecated_advanced_file_with_custom_configs_and_resources_passes(self):
plxfile = PolyaxonFile(os.path.abspath(
'tests/fixtures/deprecated/advanced_file_with_custom_configs_and_resources.yml'))
spec = plxfile.specification
spec.apply_context()
assert spec.version == 1
assert isinstance(spec.logging, LoggingConfig)
assert spec.is_experiment
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.framework == ExperimentFramework.TENSORFLOW
assert spec.artifact_refs == ['outputs1']
assert spec.data_refs == ['data1', 'data2']
assert spec.config_map_refs == ['config_map1', 'config_map2']
assert spec.config.tensorflow.n_workers == 5
assert spec.config.tensorflow.n_ps == 10
assert isinstance(spec.environment.resources, PodResourcesConfig)
assert isinstance(spec.environment.resources.cpu, K8SResourcesConfig)
assert spec.environment.resources.cpu.requests == 1
assert spec.environment.resources.cpu.limits == 2
assert spec.config.tensorflow.default_worker_node_selector == {
'foo': True
}
assert spec.config.tensorflow.worker_resources == {}
assert spec.config.tensorflow.worker_affinities == {}
assert isinstance(spec.config.tensorflow.worker_node_selectors[3], dict)
assert spec.config.tensorflow.worker_node_selectors[3] == {
'foo': False
}
assert isinstance(spec.config.tensorflow.worker_tolerations[4], list)
assert spec.config.tensorflow.worker_tolerations[4] == [{
'key': 'key',
'operator': 'Exists',
'effect': 'NoSchedule',
}]
assert isinstance(spec.config.tensorflow.default_ps_resources, PodResourcesConfig)
assert isinstance(spec.config.tensorflow.default_ps_resources.cpu, K8SResourcesConfig)
assert spec.config.tensorflow.default_ps_resources.cpu.requests == 2
assert spec.config.tensorflow.default_ps_resources.cpu.limits == 4
assert spec.config.tensorflow.ps_node_selectors == {}
assert isinstance(spec.config.tensorflow.ps_tolerations[7], list)
assert spec.config.tensorflow.ps_tolerations[7] == [{
'operator': 'Exists'
}]
assert isinstance(spec.config.tensorflow.ps_affinities[7], dict)
assert isinstance(spec.config.tensorflow.ps_resources[9], PodResourcesConfig)
assert isinstance(spec.config.tensorflow.ps_resources[9].memory, K8SResourcesConfig)
assert spec.config.tensorflow.ps_resources[9].memory.requests == 512
assert spec.config.tensorflow.ps_resources[9].memory.limits == 1024
# check that properties for return list of configs and resources is working
cluster, is_distributed = spec.cluster_def
worker_node_selectors = TensorflowSpecification.get_worker_node_selectors(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
)
assert len(worker_node_selectors) == spec.config.tensorflow.n_workers
assert set([i['foo'] for i in worker_node_selectors.values()]) == {
spec.config.tensorflow.default_worker_node_selector['foo'],
spec.config.tensorflow.worker_node_selectors[3]['foo']}
assert TensorflowSpecification.get_worker_resources(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
) == {}
ps_resources = TensorflowSpecification.get_ps_resources(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
)
assert len(ps_resources) == spec.config.tensorflow.n_ps
assert set(ps_resources.values()) == {
spec.config.tensorflow.default_ps_resources,
spec.config.tensorflow.ps_resources[9]}
# Check total resources
assert spec.total_resources == {
'cpu': {'requests': 1 + 2 * 9, 'limits': 2 + 4 * 9},
'memory': {'requests': 512, 'limits': 1024},
}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5,
TaskType.PS: 10}, True)
|
python
|
#!/usr/bin/env python
from typing import NamedTuple
from hummingbot.market.market_base import MarketBase
class ArbitrageMarketPair(NamedTuple):
"""
Specifies a pair of markets for arbitrage
"""
market_1: MarketBase
market_1_trading_pair: str
market_1_base_asset: str
market_1_quote_asset: str
market_2: MarketBase
market_2_trading_pair: str
market_2_base_asset: str
market_2_quote_asset: str
|
python
|
param_names = [\
'Kon_IL13Rec',
'Rec_phosphorylation',
'pRec_intern',
'pRec_degradation',
'Rec_intern',
'Rec_recycle',
'JAK2_phosphorylation',
'pJAK2_dephosphorylation',
'STAT5_phosphorylation',
'pSTAT5_dephosphorylation',
'SOCS3mRNA_production',
'DecoyR_binding',
'JAK2_p_inhibition',
'SOCS3_translation',
'SOCS3_accumulation',
'SOCS3_degradation',
'CD274mRNA_production',
#
'len_f_params'\
]
for idx,name in enumerate(param_names):
exec('%s=%d'%(name,idx))
|
python
|
import datetime
import genshin
async def test_diary(lclient: genshin.Client, genshin_uid: int):
diary = await lclient.get_diary()
assert diary.uid == genshin_uid == lclient.uids[genshin.Game.GENSHIN]
assert diary.nickname == "sadru"
assert diary.month == datetime.datetime.now().month
assert diary.data.current_mora > 0
async def test_diary_log(lclient: genshin.Client, genshin_uid: int):
log = lclient.diary_log(limit=10)
data = await log.flatten()
assert data[0].amount > 0
assert log.data.uid == genshin_uid == lclient.uids[genshin.Game.GENSHIN]
assert log.data.nickname == "sadru"
assert log.data.month == datetime.datetime.now().month
|
python
|
"""
A :class:`~miso.data.dataset_readers.dataset_reader.DatasetReader`
reads a file and converts it to a collection of
:class:`~miso.data.instance.Instance` s.
The various subclasses know how to read specific filetypes
and produce datasets in the formats required by specific models.
"""
# pylint: disable=line-too-long
from .decomp import DecompDatasetReader
|
python
|
import numpy as np
from napari.components import Camera
def test_camera():
"""Test camera."""
camera = Camera()
assert camera.center == (0, 0, 0)
assert camera.zoom == 1
assert camera.angles == (0, 0, 90)
center = (10, 20, 30)
camera.center = center
assert camera.center == center
assert camera.angles == (0, 0, 90)
zoom = 200
camera.zoom = zoom
assert camera.zoom == zoom
angles = (20, 90, 45)
camera.angles = angles
assert camera.angles == angles
def test_calculate_view_direction_3d():
"""Check that view direction is calculated properly from camera angles."""
# simple case
camera = Camera(center=(0, 0, 0), angles=(90, 0, 0), zoom=1)
assert np.allclose(camera.view_direction, (0, 1, 0))
# shouldn't change with zoom
camera = Camera(center=(0, 0, 0), angles=(90, 0, 0), zoom=10)
assert np.allclose(camera.view_direction, (0, 1, 0))
# shouldn't change with center
camera = Camera(center=(15, 15, 15), angles=(90, 0, 0), zoom=1)
assert np.allclose(camera.view_direction, (0, 1, 0))
def test_calculate_view_direction_nd():
"""Check that nD view direction is calculated properly."""
camera = Camera(center=(0, 0, 0), angles=(90, 0, 0), zoom=1)
# should return none if ndim == 2
view_direction = camera.calculate_nd_view_direction(
ndim=2, dims_displayed=[0, 1]
)
assert view_direction is None
# should return 3d if ndim == 3
view_direction = camera.calculate_nd_view_direction(
ndim=3, dims_displayed=[0, 1, 2]
)
assert len(view_direction) == 3
assert np.allclose(view_direction, (0, 1, 0))
# should return nD with 3d embedded in nD if ndim > 3
view_direction = camera.calculate_nd_view_direction(
ndim=5, dims_displayed=[0, 2, 4]
)
assert len(view_direction) == 5
assert np.allclose(view_direction[[0, 2, 4]], (0, 1, 0))
|
python
|
class APIError(Exception):
"""
Simple error handling
"""
codes = {
204: 'No Results',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Unauthorized (Payment Required)',
403: 'Forbidden',
404: 'Not Found',
413: 'Too Much Data Given',
429: 'Too Many Requests (Rate Limiting)',
500: 'Internal Server Error',
501: 'Not Implemented',
503: 'Service Unavailable'
}
def __init__(self, msg, code=0):
Exception.__init__(self)
self.msg = msg
self.code = code
def __str__(self):
return "HTTP error code %s: %s (%s)" % (self.code, self.codes.get(self.code, 'Communication Error'), self.msg)
|
python
|
ta=[1,2,3]
tb=[9,8,7]
# cluster
zipped=zip(ta,tb)
print('zip(ta,tb)=',zip(ta,tb))
#decompose
na,nb=zip(*zipped)
print(na,nb)
|
python
|
import os, logging, math
import numpy as np
import torch
import torch.nn as nn
from volsim.base_models import *
from volsim.simulation_dataset import *
from volsim.params import *
class DistanceModel(nn.Module):
def __init__(self, modelParams:Params, useGPU:bool=True):
super(DistanceModel, self).__init__()
self.hp = modelParams
self.useGPU = useGPU
if "multiScale" in self.hp.mBase:
base = self.hp.mBase.split("_")
try:
layers = int(base[1])
except ValueError:
layers = 12
try:
width = float(base[2])
except ValueError:
width = 1
useSkip = "Skip" in self.hp.mBase
self.basenet = MultiScaleNet(widthFactor=width, layers=layers, firstChannels=3, useSkip=useSkip)
elif "alex" in self.hp.mBase:
base = self.hp.mBase.split("_")
try:
layers = int(base[1])
except ValueError:
layers = 5
try:
width = float(base[2])
except ValueError:
width = 1
convKernel, maxPoolKernel, firstStride = (12, 4, 4)
self.basenet = AlexNetLike(widthFactor=width, layers=layers, convKernel=convKernel, maxPoolKernel=maxPoolKernel,
firstStride=firstStride)
else:
raise ValueError('Unknown base network type.')
self.normAcc = [] #for normMode max
self.normM2 = [] #for normMode mean
for i in range(self.basenet.layers):
if self.useGPU:
self.normAcc += [torch.tensor([0.0], requires_grad=False).cuda()]
self.normM2 += [torch.tensor([0.0], requires_grad=False).cuda()]
else:
self.normAcc += [torch.tensor([0.0], requires_grad=False)]
self.normM2 += [torch.tensor([0.0], requires_grad=False)]
self.normCount = [0] * self.basenet.layers #for normMode avg
self.avgs = []
self.avg0 = self.avgLayer(self.basenet.channels[0])#, self.basenet.featureMapSize[0])
self.avgs += [self.avg0]
if self.basenet.layers > 1:
self.avg1 = self.avgLayer(self.basenet.channels[1])#, self.basenet.featureMapSize[1])
self.avgs += [self.avg1]
if self.basenet.layers > 2:
self.avg2 = self.avgLayer(self.basenet.channels[2])#, self.basenet.featureMapSize[2])
self.avgs += [self.avg2]
if self.basenet.layers > 3:
self.avg3 = self.avgLayer(self.basenet.channels[3])#, self.basenet.featureMapSize[3])
self.avgs += [self.avg3]
if self.basenet.layers > 4:
self.avg4 = self.avgLayer(self.basenet.channels[4])#, self.basenet.featureMapSize[4])
self.avgs += [self.avg4]
if self.basenet.layers > 5:
self.avg5 = self.avgLayer(self.basenet.channels[5])#, self.basenet.featureMapSize[5])
self.avgs += [self.avg5]
if self.basenet.layers > 6:
self.avg6 = self.avgLayer(self.basenet.channels[6])#, self.basenet.featureMapSize[6])
self.avgs += [self.avg6]
if self.basenet.layers > 7:
self.avg7 = self.avgLayer(self.basenet.channels[7])#, self.basenet.featureMapSize[7])
self.avgs += [self.avg7]
if self.basenet.layers > 8:
self.avg8 = self.avgLayer(self.basenet.channels[8])#, self.basenet.featureMapSize[8])
self.avgs += [self.avg8]
if self.basenet.layers > 9:
self.avg9 = self.avgLayer(self.basenet.channels[9])#, self.basenet.featureMapSize[9])
self.avgs += [self.avg9]
if self.basenet.layers > 10:
self.avg10 = self.avgLayer(self.basenet.channels[10])#, self.basenet.featureMapSize[10])
self.avgs += [self.avg10]
if self.basenet.layers > 11:
self.avg11 = self.avgLayer(self.basenet.channels[11])#, self.basenet.featureMapSize[11])
self.avgs += [self.avg11]
if self.basenet.layers > 12:
self.avg12 = self.avgLayer(self.basenet.channels[12])#, self.basenet.featureMapSize[12])
self.avgs += [self.avg12]
if self.basenet.layers > 13:
self.avg13 = self.avgLayer(self.basenet.channels[13])#, self.basenet.featureMapSize[13])
self.avgs += [self.avg13]
if self.basenet.layers > 14:
self.avg14 = self.avgLayer(self.basenet.channels[14])#, self.basenet.featureMapSize[14])
self.avgs += [self.avg14]
if self.basenet.layers > 15:
self.avg15 = self.avgLayer(self.basenet.channels[15])#, self.basenet.featureMapSize[15])
self.avgs += [self.avg15]
if self.basenet.layers > 16:
self.avg16 = self.avgLayer(self.basenet.channels[16])#, self.basenet.featureMapSize[16])
self.avgs += [self.avg16]
if self.basenet.layers > 17:
self.avg17 = self.avgLayer(self.basenet.channels[17])#, self.basenet.featureMapSize[17])
self.avgs += [self.avg17]
if self.basenet.layers > 18:
self.avg18 = self.avgLayer(self.basenet.channels[18])#, self.basenet.featureMapSize[18])
self.avgs += [self.avg18]
if self.basenet.layers > 19:
self.avg19 = self.avgLayer(self.basenet.channels[19])#, self.basenet.featureMapSize[19])
self.avgs += [self.avg19]
# initialize learned average weight layers
for avgLayer in self.avgs:
for layer in avgLayer:
if isinstance(layer, nn.Conv3d):
layer.weight.data.fill_(self.hp.mLinInit)
if self.useGPU:
self.cuda()
@classmethod
def load(cls, path:str, useGPU:bool=True):
if useGPU:
print('Loading model from %s' % path)
loaded = torch.load(path)
else:
print('CPU - Loading model from %s' % path)
loaded = torch.load(path, map_location=torch.device('cpu'))
params = Params.fromDict(loaded['hyperparams'])
stateDict = loaded['stateDict']
model = cls(params, useGPU)
model.load_state_dict(stateDict)
model.eval()
if params.mNormMode != "norm":
model.normAcc = loaded['normAcc']
model.normM2 = loaded['normM2']
model.normCount = loaded['normCount']
return model
def forward(self, x:dict) -> Tuple[torch.Tensor, list]:
full = x["data"].cuda() if self.useGPU else x["data"]
idxA = x["indexA"][0,x["idxMin"]:x["idxMax"]].long() #only use index of first batch element for entire batch
idxB = x["indexB"][0,x["idxMin"]:x["idxMax"]].long()
idxA = idxA.cuda() if self.useGPU else idxA
idxB = idxB.cuda() if self.useGPU else idxB
dataA = torch.index_select(full, 1, idxA)
dataB = torch.index_select(full, 1, idxB)
dataA = dataA.view(-1,full.shape[2],full.shape[3],full.shape[4],full.shape[5])
dataB = dataB.view(-1,full.shape[2],full.shape[3],full.shape[4],full.shape[5])
dataA = dataA.permute(0,4,1,2,3) # change shape to [batch*sampleSlice,3,128,128,128]
dataB = dataB.permute(0,4,1,2,3)
self.clampWeights()
outBaseA = self.basenet(dataA)
outBaseB = self.basenet(dataB)
result = torch.tensor([[0.0]]).cuda() if self.useGPU else torch.tensor([[0.0]])
for i in range( len(outBaseA) ):
if i in self.hp.mIgnoreLayers:
continue
#print(outBaseA[i].shape)
normalized1 = self.normalizeTensor(outBaseA[i], i)
normalized2 = self.normalizeTensor(outBaseB[i], i)
if self.hp.mFeatDist == "L1":
diff = torch.abs(normalized2 - normalized1)
elif self.hp.mFeatDist == "L2" or self.hp.mFeatDist == "L2Sqrt":
diff = (normalized2 - normalized1)**2
else:
raise ValueError('Unknown feature distance.')
weightedDiff = self.avgs[i](diff)
result = result + torch.mean(weightedDiff, dim=[2,3,4])
if self.hp.mFeatDist == "L2Sqrt":
result = torch.sqrt(result)
return torch.squeeze(result, dim=1).view(full.shape[0],-1)
# input two numpy arrays with shape [width, height, depth, channel] or shape
# [batch, width, height, depth, channel] where channel = 1 or channel = 3
# and return a distance of shape [1] or [batch]
# If true, normalize performs a normalization to the models native data range jointly for the full data batch
# If true, interpolate performs a spatial interpolation to the models native data size jointly for the full data batch
def computeDistance(self, input1:np.ndarray, input2:np.ndarray, normalize:bool, interpolate:bool) -> np.ndarray:
assert (not self.training), "Distance computation should happen in evaluation mode!"
assert (input1.shape == input2.shape), "Input shape mismatch!"
in1 = input1[None,...] if input1.ndim == 4 else input1
in2 = input2[None,...] if input2.ndim == 4 else input2
data_transform = TransformsInference("single", 3, self.hp)
if not normalize:
data_transform.normalize = "none"
if not interpolate:
data_transform.outputSize = -1
data = np.concatenate([in1, in2], axis=0) # stack along param dimension
dataDict = {"data": data, "path": None, "distance": None, "indexA" : None, "indexB" : None, "idxMin" : None, "idxMax" : None}
data = data_transform(dataDict)["data"]
nPairs = in1.shape[0]
distance = torch.from_numpy(np.zeros(nPairs, dtype=np.float32))
indexA = torch.from_numpy(np.arange(nPairs, dtype=np.int32))
indexB = torch.from_numpy(np.arange(nPairs, dtype=np.int32) + nPairs)
path = np.array([""]*nPairs)
sample = {"data": data[None,...], "path": path, "distance": distance[None,...],
"indexA" : indexA[None,...], "indexB" : indexB[None,...], "idxMin" : 0, "idxMax" : nPairs}
output = self(sample)
output = output.cpu().detach().view(-1).numpy()
return output
# ensures that avg layer weights are greater or equal to zero
def clampWeights(self):
for avgLayer in self.avgs:
for layer in avgLayer:
if isinstance(layer, nn.Conv3d):
layer.weight.data = torch.clamp(layer.weight.data, min=0)
# 1x1 convolution layer to scale feature maps channel-wise
def avgLayer(self, channelsIn:int) -> nn.Sequential:
if self.hp.mLinDropout:
return nn.Sequential(
nn.Dropout(),
nn.Conv3d(channelsIn, 1, 1, stride=1, padding=0, bias=False),
)
else:
return nn.Sequential(
nn.Conv3d(channelsIn, 1, 1, stride=1, padding=0, bias=False),
)
# preprocessing step that updates internal accumulators for feature map normalization
def updateNorm(self, sample:dict):
full = sample["data"].cuda() if self.useGPU else sample["data"]
for i in range(full.shape[1]): # do not use index here, only iterate over all data once
data = full[:, i]
data = data.permute(0,4,1,2,3) # change shape to [batch,3,128,128,128]
self.clampWeights()
outBase = self.basenet(data)
for j in range( len(outBase) ):
self.normalizeTensor(outBase[j], j, updateAcc=True)
# normalizes feature map tensor along channel dimension with different methods
def normalizeTensor(self, tensorIn:torch.Tensor, layer:int, epsilon:float=1e-10,
updateAcc:bool=False) -> torch.Tensor:
size = tensorIn.size()
# unit normalize tensor in channel dimension
if self.hp.mNormMode == "normUnit":
norm = torch.sqrt( torch.sum(tensorIn**2,dim=1) )
norm = norm.view(size[0], 1, size[2], size[3], size[4])
return tensorIn / (norm.expand_as(tensorIn) + epsilon)
elif self.hp.mNormMode == "normMeanLayerGlobal":
if updateAcc:
self.normCount[layer] = self.normCount[layer] + size[0]
delta = tensorIn - self.normAcc[layer].expand_as(tensorIn)
self.normAcc[layer] = self.normAcc[layer] + torch.sum( torch.mean(delta / self.normCount[layer], dim=1) , dim=0)
self.normM2[layer] = self.normM2[layer] + torch.sum( torch.mean(delta *(tensorIn - self.normAcc[layer].expand_as(tensorIn)), dim=1) , dim=0)
# rescale norm accumulators for differently sized inputs
if size[2] != self.normAcc[layer].shape[0] or size[3] != self.normAcc[layer].shape[1] or size[4] != self.normAcc[layer].shape[2]:
up = nn.Upsample(size=(size[2], size[3], size[4]), mode="trilinear", align_corners=True)
normAcc = torch.squeeze(up( torch.unsqueeze(torch.unsqueeze(self.normAcc[layer].detach(), dim=0), dim=0) ))
normM2 = torch.squeeze(up( torch.unsqueeze(torch.unsqueeze(self.normM2[layer].detach(), dim=0), dim=0) ))
mean = normAcc
mean = mean.view(1, 1, size[2], size[3], size[4])
std = torch.sqrt( normM2 / (self.normCount[layer] - 1) )
std = std.view(1, 1, size[2], size[3], size[4])
# directly use norm accumulators for matching input size
else:
mean = self.normAcc[layer]
mean = mean.view(1, 1, size[2], size[3], size[4])
std = torch.sqrt( self.normM2[layer] / (self.normCount[layer] - 1) )
std = std.view(1, 1, size[2], size[3], size[4])
normalized = (tensorIn - mean.expand_as(tensorIn)) / (std.expand_as(tensorIn) + epsilon)
normalized2 = normalized / (math.sqrt(size[1]) - 1)
return normalized2
elif self.hp.mNormMode == "normNone":
return tensorIn
else:
raise ValueError('Unknown norm mode.')
def printModelInfo(self):
parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in parameters])
print("Trainable parameters: %d" % params)
print(self)
print("")
logging.info("Trainable parameters: %d" % params)
logging.info(self)
logging.info("")
def save(self, path:str, override:bool=False, noPrint:bool=False):
if not noPrint:
print('Saving model to %s' % path)
if not override and os.path.isfile(path):
raise ValueError("Override warning!")
else:
saveDict = {'stateDict' : self.state_dict(), 'hyperparams' : self.hp.asDict(),}
if self.hp.mNormMode != "norm":
saveDict['normAcc'] = self.normAcc
saveDict['normM2'] = self.normM2
saveDict['normCount'] = self.normCount
torch.save(saveDict, path)
def resume(self, path:str):
if self.useGPU:
print('Resuming model from %s' % path)
loaded = torch.load(path)
else:
print('CPU - Resuming model from %s' % path)
loaded = torch.load(path, map_location=torch.device('cpu'))
self.load_state_dict(loaded['stateDict'])
self.hp = Params().fromDict(loaded['hyperparams'])
if self.hp.mNormMode != "norm":
self.normAcc = loaded['normAcc']
self.normM2 = loaded['normM2']
self.normCount = loaded['normCount']
|
python
|
"""Add hostname column to the resources table
Revision ID: 58a12e45663e
Revises: 06ce06e9bb85
Create Date: 2020-10-20 18:24:40.267394
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '58a12e45663e'
down_revision = '06ce06e9bb85'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('resources') as batch_op:
batch_op.add_column(sa.Column('hostname', sa.String(length=64), nullable=True))
def downgrade():
with op.batch_alter_table('resources') as batch_op:
batch_op.drop_column('hostname')
|
python
|
from cantoolz.module import *
from cantoolz.uds import *
import json
class control_ecu_doors(CANModule):
name = "Doors trigger for vircar"
help = """
This module emulating lock control.
Init params (example):
{
'id_report': {0x91:'Left', 0x92:'Right'},
'id_command': 0x81,
'commands': {
'lock':'1000',
'unlock':'10ff',
'init': '00ff',
},
'reports': {
'Locked': '2000',
'Unlocked': '20ff'
}
}
"""
_active = True
def do_init(self, params):
self._status2 = params
self.frames = []
self._doors = {}
self._cmdList['status'] = Command("Get doors status", 0, "", self.control_get_status, True)
self._cmdList['central_lock'] = Command("Lock doors", 0, "", self.control_lock, True)
self._cmdList['central_unlock'] = Command("Unlock doors", 0, "", self.control_unlock, True)
def control_lock(self, flag):
self.frames.append(CANMessage(self._status2['id_command'],int(len(self._status2['commands']['lock'])/2),bytes.fromhex(self._status2['commands']['lock']),False, CANMessage.DataFrame))
return ""
def control_unlock(self, flag):
self.frames.append(CANMessage(self._status2['id_command'],int(len(self._status2['commands']['unlock'])/2),bytes.fromhex(self._status2['commands']['unlock']),False, CANMessage.DataFrame))
return ""
def control_get_status(self, flag):
json_string = json.dumps({'status': self._doors})
return json_string
# Effect (could be fuzz operation, sniff, filter or whatever)
def do_effect(self, can_msg, args):
if args['action'] == 'read' and can_msg.CANData: # READ
if can_msg.CANFrame.frame_id in self._status2.get('id_report', {}).keys():
for status, code in self._status2['reports'].items():
if can_msg.CANFrame.frame_length == int(len(code)/2) and code == self.get_hex(can_msg.CANFrame.frame_raw_data):
self._doors.update(
{self._status2['id_report'][can_msg.CANFrame.frame_id]: status}
)
if args['action'] == 'write' and not can_msg.CANData:
if len(self.frames) > 0:
can_msg.CANFrame = self.frames.pop(0)
can_msg.CANData = True
can_msg.bus = self._bus
return can_msg
|
python
|
# Under MIT licence, see LICENCE.txt
import random
from typing import List
from Util import Pose, Position
from Util.ai_command import MoveTo
from Util.constant import BALL_RADIUS, ROBOT_RADIUS, POSITION_DEADZONE, ANGLE_TO_HALT
from Util.geometry import compare_angle
from ai.GameDomainObjects.player import Player
from ai.STA.Tactic.tactic import Tactic
from ai.states.game_state import GameState
ORIENTATION_DEADZONE = 0.2
DISTANCE_TO_KICK_REAL = ROBOT_RADIUS * 3.4
DISTANCE_TO_KICK_SIM = ROBOT_RADIUS + BALL_RADIUS
COMMAND_DELAY = 1.5
class GoToRandomPosition(Tactic):
def __init__(self, game_state: GameState,
player: Player,
args: List[str]=None,
center_of_zone=Position(0, 0),
height_of_zone=800,
width_of_zone=800):
super().__init__(game_state, player, args=args)
self.current_state = self.main_state
self.next_state = self.main_state
self.center_of_zone = center_of_zone
self.height_of_zone = height_of_zone
self.width_of_zone = width_of_zone
self.bottom_left_corner = Position(self.center_of_zone[0] - self.width_of_zone / 2,
self.center_of_zone[1] - self.height_of_zone / 2)
self.grid_of_positions = []
discretisation = 100
for i in range(int(self.width_of_zone / discretisation)):
for j in range(int(self.height_of_zone / discretisation)):
self.grid_of_positions.append(self.bottom_left_corner + Position(discretisation * i,
discretisation * j))
self.current_position_index_to_go = random.randint(0, len(self.grid_of_positions) - 1)
self.current_position_to_go = self.grid_of_positions[self.current_position_index_to_go]
self.current_angle_to_go = 0 #random.randint(0, 100) * np.pi / 100.
self.next_pose = Pose(self.current_position_to_go, self.current_angle_to_go)
def main_state(self):
if self.check_success():
self.current_position_index_to_go = random.randint(0, len(self.grid_of_positions) - 1)
self.current_position_to_go = self.grid_of_positions[self.current_position_index_to_go]
#self.current_angle_to_go = random.randint(-1, 1) * np.pi / 100.
self.next_pose = Pose(self.current_position_to_go, self.current_angle_to_go)
return MoveTo(self.next_pose, cruise_speed=2)
def check_success(self):
distance = (self.player.pose.position - self.next_pose.position).norm
if distance < POSITION_DEADZONE and compare_angle(self.player.pose.orientation, self.next_pose.orientation, abs_tol=ANGLE_TO_HALT):
return True
return False
|
python
|
import factory
import json
from django.test import TestCase, Client
from django.urls import reverse
from django.test import RequestFactory
from django.contrib.auth.models import AnonymousUser
from movies.models import Movie
from movies.views import home
from movies.forms import SearchMovieForm
from movie_database.users.models import User
class MovieFactory(factory.DjangoModelFactory):
class Meta:
model = Movie
django_get_or_create = ('title',)
title = 'Spiderman'
# data = json.dumps({'Year': '2001'})
class FavouriteTests(TestCase):
def setUp(self):
self.movie = MovieFactory()
self.factory = RequestFactory()
self.user = User.objects.create_user(
username='jacob',
email='jacob@gmail.com',
password='topsecret'
)
self.client = Client()
def test_home_page(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['form'], SearchMovieForm)
self.assertTemplateUsed(response, template_name="pages/home.html")
def test_home_page_form(self):
data = {'title': 'Spiderman'}
form = SearchMovieForm(data=data)
self.assertTrue(form.is_valid())
|
python
|
import spatialfacet
import numpy as np
from matplotlib import pyplot as plt
from shapely.geometry import Polygon, Point
U = Polygon([[-1,-1],
[-1,1],
[0,1],
[0,-1],
[-1,-1]])
V = Polygon([[0,-1],
[0,1],
[1,1],
[1,-1],
[0,-1]])
U_minus = Polygon([[-1,0.75],
[-1,1],
[0,1],
[0,0.75],
[-1,0.75]])
print(U)
s = spatialfacet.SpatialFacetMiner()
s.add_database("databases/simple","german")
s.query("red blue",1,20,1000)
c0, c1, docs,wt = s.getSpyData();
v1,values = s.getSpyStringData();
print(c0)
print("="*50)
print(c1)
print("="*50)
print(docs)
print("="*50)
print(wt)
print("="*50)
print(v1)
print("="*50)
print(values)
print("="*50)
plt.scatter(c0,c1, s=(15*wt)**2+5)
plt.savefig("test.png")
## facet
def get_facet (c0,c1,U):
return ([rowid for x,y,rowid in zip(c0,c1,range(c0.shape[0])) if Point([x,y]).within(U)])
facet = {
"U": get_facet(c0,c1,U),
"V": get_facet(c0,c1,V),
"U-": get_facet(c0,c1,U_minus),
}
print(facet)
## now propose query terms
out = s.augment("red", [1,2,3], 5)
print(out)
print(s.query_with_data("red green",1,10,1))
print(s.query("red green",1,10,1))
|
python
|
import numpy
def diff(features1, features2):
pixelMap1 = numpy.asarray(features1)
pixelMap2 = numpy.asarray(features2)
return numpy.linalg.norm(pixelMap1-pixelMap2)
def highOrSober(soberFeatures, highFeatures, queryFeatures):
if(diff(soberFeatures, queryFeatures) < diff(highFeatures, queryFeatures)):
return "sober"
else:
return "high"
|
python
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'A tiny Python/C library for loading MIAS images from file.',
'author': 'Samuel Jackson',
'url': 'http://github.com/samueljackson92/mias-loader',
'download_url': 'http://github.com/samueljackson92/mias-loader',
'author_email': 'samueljackson@outlook.com',
'version': '0.1.0',
'install_requires': [
'numpy'
],
'py_modules': ['mias_load'],
'name': 'mias_loader'
}
setup(**config)
|
python
|
from PIL import Image
import gym
import gym_pacman
import time
env = gym.make('BerkeleyPacmanPO-v0')
env.seed(1)
done = False
while True:
done = False
env.reset()
i = 0
while i < 100:
i += 1
s_, r, done, info = env.step(env.action_space.sample())
env.render()
print("Iteration over")
|
python
|
from asyncio import sleep
from requests import get
from main import bot, reg_handler, des_handler, par_handler
async def diss(message, args, origin_text):
await message.edit("获取中 . . .")
status = False
for _ in range(20):
req = get("https://nmsl.shadiao.app/api.php?level=min&from=tntcrafthim")
if req.status_code == 200:
res = req.text
await message.edit(res, parse_mode='html')
status = True
break
else:
continue
if status == False:
await message.edit("出错了呜呜呜 ~ 试了好多好多次都无法访问到 API 服务器 。")
await sleep(2)
await message.delete()
async def biss(message, args, origin_text):
await message.edit("获取中 . . .")
status = False
for _ in range(20):
req = get("https://nmsl.shadiao.app/api.php?from=tntcrafthim")
if req.status_code == 200:
res = req.text
await message.edit(res, parse_mode='html')
status = True
break
else:
continue
if status == False:
await message.edit("出错了呜呜呜 ~ 试了好多好多次都无法访问到 API 服务器 。")
await sleep(2)
await message.delete()
reg_handler('diss', diss)
reg_handler('biss', biss)
des_handler('diss', "儒雅随和版祖安语录。")
des_handler('diss', '加带力度版祖安语录。')
par_handler('diss', '')
par_handler('biss', '')
|
python
|
# Generated by Django 2.1 on 2018-10-03 01:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('unlabel_backend', '0021_auto_20180625_1904'),
]
operations = [
migrations.DeleteModel(
name='Article',
),
migrations.DeleteModel(
name='Capability',
),
migrations.RemoveField(
model_name='client',
name='city',
),
migrations.RemoveField(
model_name='client',
name='country',
),
migrations.RemoveField(
model_name='client',
name='description',
),
migrations.RemoveField(
model_name='client',
name='image',
),
migrations.RemoveField(
model_name='client',
name='slug',
),
migrations.RemoveField(
model_name='client',
name='state',
),
migrations.RemoveField(
model_name='client',
name='url',
),
]
|
python
|
#!/usr/bin/env python
# vim:set ts=2 sw=2 expandtab:
#
# the pre.py script permits the reconstruction to specify the server, user
# name, and password used for connection. This is achieved by setting
# the parameters in a dictionary named jobArgs.
#
# Optional output variable : jobArgs
#
# Default dictionary; only those being changed need to be specified.
#
# jobArgs = { 'machines' : ('ip address 1', 'ip address 2'),
# 'user' : 'reconuser',
# 'keyfile' : None } (Which implies id_recon next to autorec.py)
print("********* Entering pre.py *************")
print("# Objects present at pre.py:")
for x in sorted(locals().keys()):
print("# {0:20} : {1}".format(x, locals()[x]))
print("")
# To override the DEFAULT_MACHINES_LIST for this reconstruction, for example:
#jobArgs = { 'machines' : ('127.0.0.1',) } # Note trailing ',' if only one.
print("********* Exiting pre.py *************")
|
python
|
class Solution(object):
def findLongestWord(self, s, d):
"""
:type s: str
:type d: List[str]
:rtype: str
"""
newD = sorted(d, key=len, reverse=True)
tempList = []
for word in newD:
if len(tempList) != 0 and len(word) != len(tempList[-1]):
return sorted(tempList)[0]
if self.scanWord(s, word):
tempList.append(word)
if len(tempList) != 0:
return sorted(tempList)[0]
else:
return ""
def scanWord(self, sIn, s):
if len(s) > len(sIn):
return False
i2 = 0
for i1 in range(len(sIn)):
if sIn[i1] == s[i2]:
i2 += 1
if i2 == len(s):
return True
return False
a = Solution()
print(a.findLongestWord("abpcplea", [
"ale", "apple", "monkey", "plea"])) # apple
print(a.findLongestWord("abpcplea", ["a", "b", "c"])) # a
print(a.findLongestWord("apple", ["zxc", "vbn"])) # ""
|
python
|
"""
@author: Andrea Domenico Giuliano
@contact: andreadomenico.giuliano@studenti.unipd.it
@organization: University of Padua
"""
import datetime
import math
from collections import defaultdict
#File contenente le funzioni riguardanti la creazione dei dict rigurandanti i gruppi degli items e degli users
def Nr_items(c):
c.execute('select count(id) as co from items');
nr_it = 0;
for r in c:
nr_it = int(r['co']);
return nr_it;
def Jobroles_list(c):
c.execute('select * from us_jobroles');
jbr_l = [];
for r in c:
l_par = [];
l_par.append(int(r['user_id']));
l_par.append(int(r['jobrole']));
jbr_l.append(l_par);
return jbr_l;
def Jobroles_Dist_list(c):
c.execute('select DISTINCT jobrole as jb from us_jobroles');
job_roles_l = [];
for r in c:
job_roles_l.append(int(r['jb']));
return job_roles_l;
"""
# Versione senza divisione temporale
def Jobroles_d_creation(c,jbr_l,items_upop_score_d):
#print "Inizio calcolo Jobroles";
a = datetime.datetime.now();
jobroles_d = {};
jbr_d_l = Jobroles_Dist_list(c);
items_upop_sc_d = defaultdict(lambda: 0);
if (len(jbr_d_l)>0):
for i in range(0,len(jbr_d_l)):
jobroles_d[jbr_d_l[i]] = [int(0),[],items_upop_sc_d.copy()];
if (len(jbr_l)>0):
for i in range(0,len(jbr_l)):
if jbr_l[i][0] not in jobroles_d[jbr_l[i][1]][1]:
jobroles_d[jbr_l[i][1]][0] += 1;
l = jobroles_d[jbr_l[i][1]][1];
l.append(jbr_l[i][0]);
jobroles_d[jbr_l[i][1]][1] = l;
for key in jobroles_d:
if (jobroles_d[key][0]>0):
jobroles_d[key][0] = float(1.00/jobroles_d[key][0]);
b = datetime.datetime.now();
#print "Fine calcolo Jobroles";
#print (b-a);
return jobroles_d;
"""
def Jobroles_d_creation(c,jbr_l):
#print "Inizio calcolo Jobroles";
a = datetime.datetime.now();
jobroles_d = {};
jbr_d_l = Jobroles_Dist_list(c);
#items_upop_sc_d = defaultdict(lambda: 0);
if (len(jbr_d_l)>0):
for i in range(0,len(jbr_d_l)):
p = {};
for k in range(1,7):
p[k] = defaultdict(lambda: 0.0);
jobroles_d[jbr_d_l[i]] = [int(0),[],p];
if (len(jbr_l)>0):
for i in range(0,len(jbr_l)):
if jbr_l[i][0] not in jobroles_d[jbr_l[i][1]][1]:
jobroles_d[jbr_l[i][1]][0] += 1;
l = jobroles_d[jbr_l[i][1]][1];
l.append(jbr_l[i][0]);
jobroles_d[jbr_l[i][1]][1] = l;
for key in jobroles_d:
if (jobroles_d[key][0]>0):
jobroles_d[key][0] = float(1.00/jobroles_d[key][0]);
b = datetime.datetime.now();
#print "Fine calcolo Jobroles";
#print (b-a);
return jobroles_d;
def Fos_list(c):
c.execute('select * from us_fos');
fos_l = [];
for r in c:
l_par = [];
l_par.append(int(r['user_id']));
l_par.append(int(r['fos']));
fos_l.append(l_par);
return fos_l;
def Fos_Dist_list(c):
c.execute('select DISTINCT fos as f from us_fos');
f_l = [];
for r in c:
f_l.append(int(r['f']));
return f_l;
"""
# Versione senza divisione temporale
def Fos_d_creation(c,fos_l,items_upop_score_d):
#print "Inizio calcolo Fos";
a = datetime.datetime.now();
fos_d = {};
fos_di_l = Fos_Dist_list(c);
items_upop_sc_d = defaultdict(lambda: 0);
if (len(fos_di_l)>0):
for i in range(0,len(fos_di_l)):
fos_d[fos_di_l[i]] = [int(0),[],items_upop_sc_d.copy()];
if (len(fos_l)>0):
for i in range(0,len(fos_l)):
if fos_l[i][0] not in fos_d[fos_l[i][1]][1]:
fos_d[fos_l[i][1]][0] += 1;
l = fos_d[fos_l[i][1]][1];
l.append(fos_l[i][0]);
fos_d[fos_l[i][1]][1] = l;
for key in fos_d:
if (fos_d[key][0]>0):
fos_d[key][0] = float(1.00/fos_d[key][0]);
b = datetime.datetime.now();
#print "Fine calcolo Fos";
#print (b - a);
return fos_d;
"""
def Fos_d_creation(c,fos_l):
#print "Inizio calcolo Fos";
a = datetime.datetime.now();
fos_d = {};
fos_di_l = Fos_Dist_list(c);
if (len(fos_di_l)>0):
for i in range(0,len(fos_di_l)):
p = {};
for k in range(1,7):
p[k] = defaultdict(lambda: 0.0);
fos_d[fos_di_l[i]] = [int(0),[],p];
if (len(fos_l)>0):
for i in range(0,len(fos_l)):
if fos_l[i][0] not in fos_d[fos_l[i][1]][1]:
fos_d[fos_l[i][1]][0] += 1;
l = fos_d[fos_l[i][1]][1];
l.append(fos_l[i][0]);
fos_d[fos_l[i][1]][1] = l;
for key in fos_d:
if (fos_d[key][0]>0):
fos_d[key][0] = float(1.00/fos_d[key][0]);
b = datetime.datetime.now();
#print "Fine calcolo Fos";
#print (b - a);
return fos_d;
def Tag_Not_used(c):
c.execute('select max(tag) as t from it_tags where tag != ""');
n_u_t = int(0);
for r in c:
n_u_t = int(r['t']);
n_u_t += 1;
return n_u_t;
def Tags_Dist_list(c,n_u_t):
c.execute('select DISTINCT tag as t from it_tags');
t_l = [];
for r in c:
if (str(r['t']) != ''):
t_l.append(int(r['t']));
t_l.append(n_u_t);
return t_l;
def Tags_list(c,n_u_t):
c.execute('select * from it_tags');
tags_l = [];
for r in c:
l_par = [];
l_par.append(int(r['item_id']));
if (str(r['tag']) == ''):
l_par.append(n_u_t);
else:
l_par.append(int(r['tag']));
tags_l.append(l_par);
return tags_l;
def Tags_d_creation(c,tags_l,n_u_t,nr_items):
#print "Inizio calcolo Tags";
a = datetime.datetime.now();
tags_d = {};
tags_di_l = Tags_Dist_list(c,n_u_t);
if (len(tags_di_l)>0):
for i in range(0,len(tags_di_l)):
tf_idf_sc = float(0.0);
tags_d[tags_di_l[i]] = [int(0),[],tf_idf_sc];
if (len(tags_l)>0):
for i in range(0,len(tags_l)):
if tags_l[i][0] not in tags_d[tags_l[i][1]][1]:
tags_d[tags_l[i][1]][0] += 1;
l = tags_d[tags_l[i][1]][1];
l.append(tags_l[i][0]);
tags_d[tags_l[i][1]][1] = l;
for tag_id in tags_d:
nr_users_group = tags_d[tag_id][0];
if (nr_users_group > 0):
idf = math.log(nr_items/nr_users_group);
tags_d[tag_id][2] = idf;
b = datetime.datetime.now();
#print "Fine calcolo Tags";
#print (b - a);
return tags_d;
def Title_Not_used(c):
c.execute('select max(title) as t from it_titles where title != ""');
n_u_t = int(0);
for r in c:
n_u_t = int(r['t']);
n_u_t += 1;
return n_u_t;
def Titles_Dist_list(c,n_u_t):
c.execute('select DISTINCT title as t from it_titles');
t_l = [];
for r in c:
if (str(r['t']) != ''):
t_l.append(int(r['t']));
t_l.append(int(0));
return t_l;
def Titles_list(c,n_u_t):
c.execute('select * from it_titles');
titles_l = [];
for r in c:
l_par = [];
l_par.append(int(r['item_id']));
if (str(r['title']) == ''):
l_par.append(int(0));
else:
l_par.append(int(r['title']));
titles_l.append(l_par);
#print titles_l;
return titles_l;
def Titles_d_creation(c,titles_l,n_u_t,nr_items):
#print "Inizio calcolo Titles";
a = datetime.datetime.now();
titles_d = {};
t_di_l = Titles_Dist_list(c,n_u_t);
if (len(t_di_l)>0):
for i in range(0,len(t_di_l)):
tf_idf_sc = float(0.0);
titles_d[t_di_l[i]] = [int(0),[],tf_idf_sc];
if (len(titles_l)>0):
for i in range(0,len(titles_l)):
if titles_l[i][0] not in titles_d[titles_l[i][1]][1]:
titles_d[titles_l[i][1]][0] += 1;
l = titles_d[titles_l[i][1]][1];
l.append(titles_l[i][0]);
titles_d[titles_l[i][1]][1] = l;
for title_id in titles_d:
nr_users_group = titles_d[title_id][0];
if (nr_users_group > 0):
idf = math.log(nr_items / nr_users_group);
titles_d[title_id][2] = idf;
b = datetime.datetime.now();
#print "Fine calcolo Titles";
#print (b - a);
return titles_d;
|
python
|
import logging
import os
import shutil
import numpy as np
import torch
from pytorch_metric_learning.utils import common_functions as pml_cf
from sklearn.model_selection import train_test_split
from torchmetrics.functional import accuracy as tmf_accuracy
from ..adapters import Finetuner
from ..containers import Models, Optimizers
from ..datasets import DataloaderCreator, SourceDataset
from ..models import Discriminator
from ..utils import common_functions as c_f
from ..utils.savers import Saver
from .accuracy_validator import AccuracyValidator
from .base_validator import BaseValidator
from .score_history import ScoreHistory
class DeepEmbeddedValidator(BaseValidator):
"""
Implementation of
[Towards Accurate Model Selection in Deep Unsupervised Domain Adaptation](http://proceedings.mlr.press/v97/you19a.html)
"""
def __init__(
self,
temp_folder,
layer="features",
num_workers=0,
batch_size=32,
error_fn=None,
error_layer="logits",
framework_cls=None,
**kwargs,
):
super().__init__(**kwargs)
self.temp_folder = temp_folder
self.layer = layer
self.num_workers = num_workers
self.batch_size = batch_size
self.error_fn = c_f.default(
error_fn, torch.nn.CrossEntropyLoss(reduction="none")
)
self.error_layer = error_layer
self.framework_cls = framework_cls
if self.framework_cls is None:
from ..frameworks.ignite import Ignite
self.framework_cls = Ignite
self.D_accuracy_val = None
self.D_accuracy_test = None
self.mean_error = None
self._DEV_recordable = ["D_accuracy_val", "D_accuracy_test", "mean_error"]
pml_cf.add_to_recordable_attributes(self, list_of_names=self._DEV_recordable)
def compute_score(self, src_train, src_val, target_train):
init_logging_level = c_f.LOGGER.level
c_f.LOGGER.setLevel(logging.WARNING)
weights, self.D_accuracy_val, self.D_accuracy_test = get_weights(
src_train[self.layer],
src_val[self.layer],
target_train[self.layer],
self.num_workers,
self.batch_size,
self.temp_folder,
self.framework_cls,
)
error_per_sample = self.error_fn(src_val[self.error_layer], src_val["labels"])
output = get_dev_risk(weights, error_per_sample[:, None])
self.mean_error = torch.mean(error_per_sample).item()
c_f.LOGGER.setLevel(init_logging_level)
return -output
def extra_repr(self):
x = super().extra_repr()
x += f"\n{c_f.extra_repr(self, self._DEV_recordable)}"
return x
#########################################################################
#### ADAPTED FROM https://github.com/thuml/Deep-Embedded-Validation #####
#########################################################################
def get_dev_risk(weight, error):
"""
:param weight: shape [N, 1], the importance weight for N source samples in the validation set
:param error: shape [N, 1], the error value for each source sample in the validation set
(typically 0 for correct classification and 1 for wrong classification)
"""
if torch.any(weight < 0) or torch.any(error < 0):
raise ValueError("weights and errors must be positive")
weight = pml_cf.to_numpy(weight)
error = pml_cf.to_numpy(error)
N, d = weight.shape
_N, _d = error.shape
assert N == _N and d == _d, "dimension mismatch!"
weighted_error = weight * error
cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1]
var_w = np.var(weight, ddof=1)
eta = -cov / (var_w + 1e-6)
return np.mean(weighted_error) + eta * np.mean(weight) - eta
def get_weights(
source_feature,
validation_feature,
target_feature,
num_workers,
batch_size,
temp_folder,
framework_cls,
):
"""
:param source_feature: shape [N_tr, d], features from training set
:param validation_feature: shape [N_v, d], features from validation set
:param target_feature: shape [N_te, d], features from test set
:return:
"""
device = source_feature.device
source_feature = pml_cf.to_numpy(source_feature)
validation_feature = pml_cf.to_numpy(validation_feature)
target_feature = pml_cf.to_numpy(target_feature)
N_s, d = source_feature.shape
N_t, _d = target_feature.shape
source_feature = source_feature.copy()
target_feature = target_feature.copy()
all_feature = np.concatenate((source_feature, target_feature))
all_label = np.asarray([1] * N_s + [0] * N_t, dtype=np.int64)
(
feature_for_train,
feature_for_test,
label_for_train,
label_for_test,
) = train_test_split(all_feature, all_label, train_size=0.8)
train_set = SourceDataset(
pml_cf.EmbeddingDataset(feature_for_train, label_for_train)
)
val_set = SourceDataset(pml_cf.EmbeddingDataset(feature_for_test, label_for_test))
decays = [1e-1, 3e-2, 1e-2, 3e-3, 1e-3, 3e-4, 1e-4, 3e-5, 1e-5]
val_acc, trainers, savers, folders = [], [], [], []
epochs = 100
patience = 2
for i, decay in enumerate(decays):
torch.cuda.empty_cache()
curr_folder = os.path.join(temp_folder, f"DeepEmbeddedValidation{i}")
models = Models(
{
"G": torch.nn.Identity(),
"C": Discriminator(d, h=d, out_size=2).to(device),
}
)
optimizers = Optimizers(
(torch.optim.Adam, {"lr": 0.001, "weight_decay": decay})
)
trainer = Finetuner(models=models, optimizers=optimizers)
validator = AccuracyValidator(
torchmetric_kwargs={"average": "macro", "num_classes": 2}
)
validator = ScoreHistory(validator)
saver = Saver(folder=curr_folder)
trainer = framework_cls(
trainer, validator=validator, saver=saver, with_pbars=False
)
datasets = {"train": train_set, "src_val": val_set}
bs = int(np.min([len(train_set), len(val_set), batch_size]))
acc, _ = trainer.run(
datasets,
dataloader_creator=DataloaderCreator(
num_workers=num_workers, batch_size=bs
),
max_epochs=epochs,
validation_interval=1,
patience=patience,
)
val_acc.append(acc)
trainers.append(trainer)
savers.append(saver)
folders.append(curr_folder)
torch.cuda.empty_cache()
D_accuracy_val = max(val_acc)
index = val_acc.index(D_accuracy_val)
labels = torch.ones(len(validation_feature), dtype=int)
validation_set = SourceDataset(pml_cf.EmbeddingDataset(validation_feature, labels))
trainer, saver = trainers[index], savers[index]
saver.load_adapter(trainer.adapter, "best")
bs = min(len(validation_set), batch_size)
dataloader = torch.utils.data.DataLoader(
validation_set, num_workers=num_workers, batch_size=bs
)
domain_out = trainer.get_all_outputs(dataloader, "val")
domain_out = domain_out["val"]["preds"]
weights = (domain_out[:, :1] / domain_out[:, 1:]) * (float(N_s) / N_t)
[shutil.rmtree(f) for f in folders]
D_accuracy_test = tmf_accuracy(domain_out, labels.to(domain_out.device)).item()
return weights, D_accuracy_val, D_accuracy_test
|
python
|
#!/usr/bin/env python
# $Id$
""" Abstract base class for driver classes"""
import exceptions
class DriverError(exceptions.Exception):
def __init__(self, arg):
exceptions.Exception.__init__(self,arg)
class Driver:
mount_delay = 0
def fileno(self):
raise NotImplementedError
def tell(self):
raise NotImplementedError
def open(self, device, mode,retry_count=10):
raise NotImplementedError
def flush(self, device):
raise NotImplementedError
def close(self):
raise NotImplementedError
def rewind(self):
raise NotImplementedError
def seek(self, where, eot_ok=0):
raise NotImplementedError
def skipfm(self, n):
raise NotImplementedError
def get_status(self):
raise NotImplementedError
def verify_label(self, volume_label, mode, expected_length=80):
raise NotImplementedError
def set_mode(self, density=None, compression=None, blocksize=None):
raise NotImplementedError
def rates(self):
raise NotImplementedError
def get_cleaning_bit(self):
return 0
|
python
|
#
# Copyright (c) 2015-2016 Erik Derr [derr@cs.uni-saarland.de]
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#!/usr/bin/python
#
# Crawler for libraries hosted at mvn central
# Retrieves jar|aar files along with some meta data
import json
import urllib2
import datetime
import os
import errno
import zipfile
import traceback
from retrying import retry # may require "pip install retrying"
## functions ##
def unix2Date(unixTime):
unixTime = int(str(unixTime)[:-3])
return datetime.datetime.fromtimestamp(unixTime).strftime('%d.%m.%Y')
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def write_library_description(fileName, libName, category, version, date, comment):
make_sure_path_exists(os.path.dirname(fileName))
# write lib description in xml format
with open(fileName, "w") as desc:
desc.write("<?xml version=\"1.0\"?>\n")
desc.write("<library>\n")
desc.write(" <!-- library name -->\n")
desc.write(" <name>{}</name>\n".format(libName))
desc.write("\n")
desc.write(" <!-- Advertising, Analytics, Android, SocialMedia, Cloud, Utilities -->\n")
desc.write(" <category>{}</category>\n".format(category))
desc.write("\n")
desc.write(" <!-- optional: version string -->\n")
desc.write(" <version>{}</version>\n".format(version))
desc.write("\n")
desc.write(" <!-- optional: date (format: DD/MM/YYYY) -->\n")
desc.write(" <releasedate>{}</releasedate>\n".format(date))
desc.write("\n")
desc.write(" <!-- optional: comment -->\n")
desc.write(" <comment>{}</comment>\n".format(comment))
desc.write("</library>\n")
# TODO: decorator does not work
@retry(urllib2.URLError, tries=3, delay=3, backoff=1)
def urlopen_with_retry(URL):
return urllib2.urlopen(URL)
def downloadFile(targetDir, groupid, artefactid, version, filetype):
make_sure_path_exists(os.path.dirname(targetDir + "/"))
# assemble download URL
baseURL = "http://search.maven.org/remotecontent?filepath="
artefactid_r = artefactid.replace(".","/")
groupid_r = groupid.replace(".","/")
URL = baseURL + groupid_r + "/" + artefactid_r + "/"
# sometimes it just returns the type "bundle", we then access the jar file
if filetype == "bundle":
filetype = "jar"
fileName = artefactid_r + "-" + version + "." + filetype
URL = URL + version + "/" + fileName
# retrieve and save file
targetFile = targetDir + "/" + fileName
try:
libFile = urllib2.urlopen(URL)
with open(targetFile,'wb') as output:
output.write(libFile.read())
# if filetype is aar unzip classes.jar (since WALA currently does not handle aar's directly)
if filetype == "aar":
fh = open(targetFile, 'rb')
z = zipfile.ZipFile(fh)
for f in z.namelist():
if f == "classes.jar":
z.extract(f, targetDir)
fh.close()
return 0
except urllib2.HTTPError as e:
print 'HTTPError = ' + str(e.code)
return 1
except urllib2.URLError as e:
print 'URLError = ' + str(e.reason)
return 1
except Exception, excp:
print 'Download failed (' + str(excp) + ')'
return 1
def updateLibrary(libName, category, comment, groupId, artefactId):
# replace all blanks with dash
libName = libName.replace(" ", "-")
print " # check library " + libName + " [" + category + "] (g:\"" + groupId + "\" AND a:\"" + artefactId + "\")"
baseDirName = rootDir + category + "/" + libName + "/"
dir = os.path.dirname(baseDirName)
make_sure_path_exists(dir);
# Assemble mvn central search URL and retrieve meta data
try:
mvnSearchURL = "http://search.maven.org/solrsearch/select?q=g:%22" + groupId + "%22+AND+a:%22" + artefactId + "%22&rows=100&core=gav"
response = urllib2.urlopen(mvnSearchURL)
data = json.loads(response.read())
except urllib2.URLError, e:
print 'URLError = ' + str(e.reason)
return
except Exception, excp:
print 'Could not retrieve meta data for ' + libName + ' [SKIP] (' + str(excp) + ')'
return
# DEBUG: pretty print json
#print json.dumps(data, indent=4, sort_keys=True)
#print
numberOfVersions = data["response"]["numFound"]
print " - retrieved meta data for " + str(numberOfVersions) + " versions:"
numberOfUpdates = 0
if numberOfVersions > 0:
for version in data["response"]["docs"]:
# skip lib version if already existing
if not os.path.isfile(baseDirName + "/" + version["v"] + "/" + libDescriptorFileName):
numberOfUpdates += 1
date = unix2Date(version["timestamp"])
targetDir = baseDirName + version["v"]
print " - update version: {} type: {} date: {} target-dir: {}".format(version["v"], version["p"], date, targetDir)
result = downloadFile(targetDir, groupId, artefactId, version["v"], version["p"])
if result == 0:
# write lib description
fileName = targetDir + "/" + "library.xml"
write_library_description(fileName, libName, category, version["v"], date, comment)
if numberOfUpdates == 0:
print " -> all versions up-to-date"
## Main functionality ##
inputFile = "libraries-ILC.json"
libDescriptorFileName = "library.xml"
rootDir = "E:\gradute\libs-ILC/" ### change this directory to your lib-sdks dir ###
print "== mvn central crawler =="
# load iterate over lib json
with open(inputFile) as ifile:
data = json.load(ifile)
# update each lib
for lib in data["libraries"]:
updateLibrary(lib["name"], lib["category"], lib["comment"], lib["groupid"], lib["artefactid"])
|
python
|
def train_interupter():
with open('train_interupter.ini', 'r', encoding='utf-8') as f:
flag = f.read().strip()
if flag == '0':
return False
elif flag == '1':
with open('train_interupter.ini', 'w', encoding='utf-8') as f:
f.write('0')
return True
else:
raise ValueError('Wrong flag value.')
|
python
|
# The MIT License (MIT)
# Copyright (c) 2021 Jonah Yolles-Murphy (TG-Techie)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import builtins
from typing import *
__version__ = "0.1.0"
T = TypeVar("T")
Sliceable = Union["Slice[T]", MutableSequence[T]]
class _SliceConstructor(Generic[T]):
"""
An intermediate constructor that holds the sequence to be sliced and allows for
a more flexible `.slice(...)` or `.slice[...]` syntax.
"""
__slots__ = {"_seq"}
def __init__(self, seq: Sliceable) -> None:
self._seq = seq
def __getitem__(self, s: Union[int, builtins.slice]) -> "Slice[T]":
# allow single item slicing with Slice(...)[n] syntax
if isinstance(s, int) or hasattr(s, "__index__"):
index = s.__index__()
s = builtins.slice(index, index + 1)
assert (
s.step is None
), f"slicing cannot be non-contiguous (got `{s.step!r}` for step)"
seq = self._seq
start = s.start
stop = s.stop
if start is None:
start = 0
while start < 0:
start += len(seq)
if stop is None:
stop = len(seq)
while stop < 0:
stop += len(seq)
return self(
start=start,
length=stop - start,
)
def __call__(self, *, length, start) -> "Slice[T]":
return Slice(self._seq, start=start, length=length)
class Slice(Generic[T]):
"""
A more tradition slice of sequences where the created slice mutates the sliced object.
When using a Slice to mutate the base Sequence the Slice assumes the base will not change size
ex:
```
ls = [0, 3, -1, 1, 4]
slc = Slice(ls)[1:4]
slc[0] = 1
slc[2] = 3
assert ls == [0, 1, -1, 3, 4]
```
By default, slicing Slice object will return whatever slicing the base object would normally be.
```
assert type(slc[0:1]) == list # evaluates as True
```
If you want a "sub slice" use .slice to make a further slice
```
sub = slc.slice[1:2]
sub[0] = 2
assert ls == [0, 1, 2, 3, 4]
```
"""
Self = Union["Slice"]
_seq: Sliceable
_start: int
_length: int
_constructor: Optional[_SliceConstructor[T]]
__slots__ = {"_seq", "_start", "_length", "_constructor"}
def __new__(
cls: Type[Self],
seq: Sliceable,
start=None,
length=None,
):
if start is not None and length is not None:
return super(Slice, cls).__new__(cls) # type: ignore
elif start is None and length is None:
return _SliceConstructor(seq)
else:
raise ValueError(
f"{cls.__name__} cannot be called with only one of start= and length=, "
f"got only {'start=' if start is not None else 'length='}"
)
def __init__(
self,
seq: Sliceable,
*,
start=None, # type: ignore
length=None, # type: ignore
) -> None:
# sanitize the inputs, as they must be integers
start = start.__index__()
length = length.__index__()
# verify that the given start and length are in bounds
if not length >= 1:
raise ValueError(
f"Slices cannot be created with lengths less than 1, got {length}"
)
if not (0 <= start < len(seq)):
raise ValueError(f"start index out of bounds, got {start}")
if not ((start + length) <= len(seq)):
raise ValueError(
f"slice out of bounds. starting at {start}, a slice of length {length} extends"
f" past the end of the sliced sequence "
)
# if this is slicing a slice, instead driectly slice the original object
if isinstance(seq, Slice):
self._seq = seq._seq
start += seq._start
else:
self._seq = seq
self._start = start
self._length = length
# sanitization
assert hasattr(start, "__index__"), (
"start must be an integer, " + f"got {start!r}"
)
assert hasattr(length, "__index__"), (
"length must be an integer, " + f"got {length!r}"
)
# this will be lazily evaluated later
self._constructor = None
@property
def slice(self) -> _SliceConstructor[T]:
# lazily create a constructor for sub slices of this slice
constructor = self._constructor
if constructor is None:
self._constructor = constructor = _SliceConstructor(self)
return constructor
def _isinited(self) -> bool:
return hasattr(self, "_start") and hasattr(self, "_length")
def __getitem__(self, index: Union[int, builtins.slice]):
if isinstance(index, int) or hasattr(index, "__index__"):
return self._get_item(index.__index__()) # type: ignore
# idk to test for SupportsIndex in 3.6 yet
elif isinstance(index, slice):
return self._get_slice(index)
else:
raise TypeError(
f"{type(self).__name__} indices must be integers or slices, "
f"not {type(index).__name__}"
)
def __setitem__(self, index: Union[int, builtins.slice], value: T) -> None:
# check for slice assignment as it is not yet supported
if isinstance(index, builtins.slice):
offset = self._start
self._seq.__setitem__(
builtins.slice(
index.start + offset,
index.stop + offset,
index.step,
),
value,
)
return
elif isinstance(index, int) or hasattr(index, "__index__"):
index = self._bounds_check_and_mod(index)
self._seq[self._start + index] = value
else:
raise NotImplementedError()
def _get_slice(self, s: builtins.slice) -> MutableSequence[T]:
offset = self._bounds_check_and_mod(self._start)
stop = s.stop % self._length
return self._seq[s.start + offset : stop + offset : s.step]
def _get_item(self, index: int) -> T:
# check that the index is in range assuming the base sequence has not changed
index = self._bounds_check_and_mod(index)
return self._seq[self._start + index]
def __len__(self) -> int:
assert self._isinited()
return self._length
def __iter__(self) -> Generator[T, None, None]:
seq = self._seq
for index in range(self._start, self._start + self._length):
yield seq[index]
else:
return None
def __repr__(self) -> str:
return f"${self._seq[self._start : self._start+self._length]}"
def _bounds_check_and_mod(self, index: int) -> int:
if index >= self._length:
raise IndexError(
f"Slice index out of range, got [{index}] in slice of length {self._length}"
)
elif index < 0:
index %= self._length
else:
pass
return index
def sort(self, **kwargs) -> None:
for index, value in enumerate(sorted(self, **kwargs)):
self[index] = value
if __name__ == "__main__":
# test basic sicing
ls = [0, 3, -1, 1, 4]
slc = Slice(ls)[1:4]
slc[0] = 1
slc[2] = 3
assert ls == [0, 1, -1, 3, 4]
# test sub-slicing
sub = slc.slice[1:2]
sub[0] = 2
assert ls == [0, 1, 2, 3, 4]
# test slicing types
ls = [*range(8)]
# test default start and stop
slc = Slice(ls)[:]
assert [*slc] == ls
# test negative end
slc = Slice(ls)[0:-1]
assert [*slc] == ls[0:-1]
# test negative start
slc = Slice(ls)[-8:]
assert [*slc] == ls[-8:]
# test slice sorting
ls = [0, 4, 3, 2, 1, 5]
slc = Slice(ls)[1:-1]
assert [*slc] == [4, 3, 2, 1]
slc.sort()
assert ls == [0, 1, 2, 3, 4, 5]
|
python
|
import argparse
import logging
from sqlalchemy.orm import Session
from ...db import yield_connection_from_env_ctx
from ..indices import update_installation_default_indices
from ..models import SlackOAuthEvent
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def upgrade_one(
db_session: Session, bot_installation: SlackOAuthEvent
) -> SlackOAuthEvent:
update_installation_default_indices(db_session, bot_installation)
bot_installation.version = 2
db_session.add(bot_installation)
db_session.commit()
return bot_installation
def main(args: argparse.Namespace) -> None:
with yield_connection_from_env_ctx() as db_session:
query = (
db_session.query(SlackOAuthEvent)
.filter(SlackOAuthEvent.version == 1)
.filter(SlackOAuthEvent.deleted is not False)
)
if args.workspace is not None:
query = query.filter(SlackOAuthEvent.team_id == args.workspace)
installations_for_upgrade = query.all()
for bot_installation in installations_for_upgrade:
logger.info(
f"Upgrading installation {bot_installation.id} for team {bot_installation.team_id} "
f"({bot_installation.team_name}) to version 2"
)
upgrade_one(db_session, bot_installation)
logger.info("Done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Set up default search indices for fresh @bugout slack installations"
)
parser.add_argument(
"-w",
"--workspace",
required=False,
type=str,
default=None,
help="ID for the bot installation",
)
args = parser.parse_args()
main(args)
|
python
|
from extractors.blockextractor import BlockExtractor
from extractors.characterfactory import CharacterFactory
from extractors.emojiextractor import EmojiExtractor
from extractors.mathcollectionextractor import MathExtractor
from extractors.nerdextractor import NerdExtractor
if __name__ == "__main__":
character_factory = CharacterFactory()
EmojiExtractor().extract()
BlockExtractor(character_factory).extract()
MathExtractor(character_factory).extract()
NerdExtractor().extract()
|
python
|
"""AnimeSuki Media models"""
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.text import slugify
from animesuki.core.models import ArtworkModel
from animesuki.core.utils import DatePrecision
from animesuki.history.models import HistoryModel
class Media(HistoryModel):
class Type:
ANIME = 1
MANGA = 2
NOVEL = 3
choices = (
(ANIME, 'Anime'),
(MANGA, 'Manga'),
(NOVEL, 'Novel'),
)
class SubType:
UNKNOWN = 0
# Anime
TV = 1
OVA = 2
MOVIE = 3
WEB = 4
SPECIAL = 5
MUSIC = 6
# Manga
MANGA = 7
MANHUA = 8
MANHWA = 9
WEB_MANGA = 10
ONE_SHOT = 11
DOUJIN = 12
# Novel
LIGHT_NOVEL = 13
WEB_NOVEL = 14
NOVEL = 15
choices = (
(UNKNOWN, 'Unknown'),
('Anime', (
(TV, 'TV'),
(OVA, 'OVA'),
(MOVIE, 'Movie'),
(WEB, 'Web'),
(SPECIAL, 'Special'),
(MUSIC, 'Music'),
)),
('Manga', (
(MANGA, 'Manga'),
(MANHUA, 'Manhua'),
(MANHWA, 'Manhwa'),
(WEB_MANGA, 'Web Manga'),
(ONE_SHOT, 'One Shot'),
(DOUJIN, 'Doujin'),
)),
('Novel', (
(LIGHT_NOVEL, 'Light Novel'),
(WEB_NOVEL, 'Web Novel'),
(NOVEL, 'Novel'),
))
)
class Status:
AUTO = 1
HIATUS = 2
CANCELLED = 3
choices = (
(AUTO, 'Automatic'),
(HIATUS, 'On Hiatus'),
(CANCELLED, 'Cancelled')
)
class Season:
WINTER = 1
SPRING = 2
SUMMER = 3
FALL = 4
choices = (
(WINTER, 'Winter'),
(SPRING, 'Spring'),
(SUMMER, 'Summer'),
(FALL, 'Fall')
)
title = models.CharField('title', max_length=250, blank=True)
media_type = models.PositiveSmallIntegerField('type', choices=Type.choices, default=Type.ANIME)
sub_type = models.PositiveSmallIntegerField('sub Type', choices=SubType.choices, default=SubType.UNKNOWN)
status = models.PositiveSmallIntegerField('status', choices=Status.choices, default=Status.AUTO)
is_adult = models.BooleanField('r-18', default=False)
episodes = models.IntegerField('episodes', null=True, blank=True)
duration = models.IntegerField('duration', null=True, blank=True)
volumes = models.IntegerField('volumes', null=True, blank=True)
chapters = models.IntegerField('chapters', null=True, blank=True)
start_date = models.DateField('start date', null=True, blank=True)
start_precision = models.PositiveSmallIntegerField('precision', choices=DatePrecision.choices,
default=DatePrecision.FULL)
end_date = models.DateField('end date', null=True, blank=True)
end_precision = models.PositiveSmallIntegerField('precision', choices=DatePrecision.choices,
default=DatePrecision.FULL)
season_year = models.IntegerField('season year', null=True, blank=True)
season = models.PositiveSmallIntegerField('season', choices=Season.choices, null=True, blank=True)
description = models.TextField('description', blank=True)
synopsis = models.TextField('synopsis', blank=True)
artwork_active = models.ForeignKey('MediaArtwork', related_name='media_artwork', on_delete=models.SET_NULL,
null=True, blank=True, default=None)
HISTORY_MODERATE_FIELDS = ('title', 'media_type', 'sub_type', 'is_adult')
def __str__(self):
return self.title
def get_status(self):
if self.status != self.Status.AUTO:
return self.get_status_display()
status = {
self.Type.ANIME: {
'future': 'Not yet aired',
'present': 'Currently airing',
'past': 'Finished'
},
self.Type.MANGA: {
'future': 'Not yet published',
'present': 'Currently publishing',
'past': 'Finished'
},
}
status[self.Type.NOVEL] = status[self.Type.MANGA]
now = timezone.now().date()
if self.end_date and self.end_date <= now:
return status[self.media_type]['past']
elif not self.start_date or self.start_date > now:
return status[self.media_type]['future']
else:
return status[self.media_type]['present']
def get_absolute_url(self, view='media:detail'):
return reverse(view, args=[slugify(self.get_media_type_display()), self.pk, slugify(self.title)])
class Meta:
db_table = 'media'
verbose_name_plural = 'media'
class MediaArtwork(ArtworkModel):
media = models.ForeignKey(Media, on_delete=models.PROTECT)
ARTWORK_FOLDER = 'media'
ARTWORK_SIZES = ((75, 75, 't75'), (150, 150, 't150'), (225, 225, 't225'), (450, 450, 't450'),
(292, 600, '292w'), (352, 800, '352w'), (438, 1000, '438w'),
(528, 1200, '528w'), (584, 1200, '584w'), (704, 1400, '704w'))
def sub_folder(self):
return self.media.pk
class Meta:
db_table = 'media_artwork'
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-12 07:41
from __future__ import unicode_literals
import bluebottle.files.fields
import bluebottle.utils.fields
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('files', '0001_initial'),
('funding', '0007_auto_20190605_1639'),
]
operations = [
migrations.CreateModel(
name='BudgetLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(default=b'', max_length=255, verbose_name='description')),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3)),
('amount', bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'budget line',
'verbose_name_plural': 'budget lines',
},
),
migrations.CreateModel(
name='Fundraiser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('description', models.TextField(blank=True, verbose_name='description')),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3)),
('amount', bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12, verbose_name='amount')),
('deadline', models.DateTimeField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'fundraiser',
'verbose_name_plural': 'fundraisers',
},
),
migrations.CreateModel(
name='Reward',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3)),
('amount', bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12, verbose_name='Amount')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('description', models.CharField(max_length=500, verbose_name='Description')),
('limit', models.IntegerField(blank=True, help_text='How many of this rewards are available', null=True, verbose_name='Limit')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-project__created', 'amount'],
'verbose_name': 'Gift',
'verbose_name_plural': 'Gifts',
'permissions': (('api_read_reward', 'Can view reward through the API'), ('api_add_reward', 'Can add reward through the API'), ('api_change_reward', 'Can change reward through the API'), ('api_delete_reward', 'Can delete reward through the API'), ('api_read_own_reward', 'Can view own reward through the API'), ('api_add_own_reward', 'Can add own reward through the API'), ('api_change_own_reward', 'Can change own reward through the API'), ('api_delete_own_reward', 'Can delete own reward through the API')),
},
),
migrations.AlterField(
model_name='donation',
name='amount',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12),
),
migrations.AlterField(
model_name='donation',
name='amount_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
migrations.AlterField(
model_name='funding',
name='target',
field=bluebottle.utils.fields.MoneyField(blank=True, currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=None, max_digits=12, null=True),
),
migrations.AlterField(
model_name='funding',
name='target_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
migrations.AddField(
model_name='reward',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='funding.Funding', verbose_name='Activity'),
),
migrations.AddField(
model_name='fundraiser',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fundraisers', to='funding.Funding', verbose_name='activity'),
),
migrations.AddField(
model_name='fundraiser',
name='image',
field=bluebottle.files.fields.ImageField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='files.Image'),
),
migrations.AddField(
model_name='fundraiser',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='funding_fundraisers', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='budgetline',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='funding.Funding'),
),
]
|
python
|
from __future__ import annotations
import subprocess
import sys
def test_same_version():
""" Test the the version in setup.py matches the version in __init__.py """
res = subprocess.run(
[sys.executable, '-m', 'pip', 'show', 'cptk'],
stdout=subprocess.PIPE,
check=True,
encoding='utf8',
)
fields = {
line.partition(':')[0]: line.partition(':')[-1].strip()
for line in res.stdout.split('\n')
}
from cptk import __version__
assert __version__ == fields['Version']
|
python
|
# coding=utf-8 ##以utf-8编码储存中文字符
import jieba.analyse
import codecs,sys
import itertools
from work import match
from io import BufferedReader
from work import simplyParticiple
def Synonym(): #同义词函数
seperate_word = {}
dict1={}
i=0
file = codecs.open("same_word.txt","r","utf-8") # 这是同义词库
lines = file.readlines() # 读取全部内容
for line in lines:
seperate_word[i] = line.split() # 同义词放入字典seperate_word中
i = i + 1
x1 = len(lines)
for i in range(0, x1):
x2 = {k: seperate_word[i][0] for k in seperate_word[i]} # 这是同义词字典,不同键,相同值
dict1 = dict(dict1, **x2) # 将多个字典合并为一个
return dict1
def _synonym(txt):
# synonym函数将列表中的同义词函数替换
# final_sentence=""
list_prim=[]
line = simplyParticiple. participle(txt)
line_2 =line.split("/")
dict1 = Synonym()
for word in line_2:
if word in dict1:
word = dict1[word]
list_prim.append(word)
else:
list_prim.append(word)
return list_prim
def getkeyword(list_A,list_B):
# getkeyword 的作为是从分词去停用词同义词过后的原始关键词中于关键词列表进行匹配,找出最后的关键词
list_C=[]
for each_itema in list_A:
for item in list_B:
if(each_itema==item):
list_C.append(item)
break
return list_C
def combination(the_list):
str=""
for each_item in the_list:
str += each_item
return str
with open("final_keyword.txt",encoding="utf_8") as fp:#list_keyword用于保存初步匹配关键词
list_keyword=[]
for lines in fp.readlines():
lines=lines.split()
list_keyword=list_keyword+lines
fp.close()
def result(txt):
# list_final保存全排列后字符串,list_prim保存与知识点关键词匹配后的关键词,list_mid保存全排列后关键词列表
# list_final=[]
list_prim = getkeyword(_synonym(txt), list_keyword)
# print(_synonym(txt))
# print(list_prim)
# list_mid=(list(itertools.permutations(list_prim, len(list_prim))))
# for item in list_mid:
# list_final.append(combination(item))
# return list_final
return list_prim
# c = result('进制的转换觉得有点难。')
# print(c)
|
python
|
from .common import (
AskHandler,
CommonHandler,
AskCommutativeHandler,
TautologicalHandler,
test_closed_group,
)
__all__ = [
"AskHandler",
"CommonHandler",
"AskCommutativeHandler",
"TautologicalHandler",
"test_closed_group",
]
|
python
|
from matching_algorithm import matching_algorithm
import json
import copy
class top_trading_cycle(matching_algorithm):
def group_1_optimal(self):
return self.match(copy.deepcopy(self.group_1), copy.deepcopy(self.group_2), 'top_trading_cycle', False)
def group_2_optimal(self):
return self.match(copy.deepcopy(self.group_2), copy.deepcopy(self.group_1), 'top_trading_cycle', False)
def get_top_trading_cycle(file_name):
with open(file_name) as f:
algorithm = top_trading_cycle(json.load(f), ('group_1', 'group_2'))
a, b = algorithm.group_1_optimal(), algorithm.group_2_optimal()
return a, b
|
python
|
from setuptools import find_packages, setup
setup(
name="Skaak",
packages=find_packages(include=["skaak"]),
version="0.12.5",
description="A Python Chess Library",
author="George Munyoro",
license="MIT",
install_requires=[],
setup_requires=["pytest-runner"],
tests_require=["pytest==6.1.1"],
test_suite="tests",
)
|
python
|
"""
Ticket numbers usually consist of an even number of digits. A ticket number is considered lucky if the sum of the first
half of the digits is equal to the sum of the second half.
Given a ticket number n, determine if it's lucky or not.
Example
For n = 1230, the output should be
solution(n) = true;
For n = 239017, the output should be
solution(n) = false.
"""
def solution(n):
list_num = list(str(n))
split_len = int(len(list_num)/2)
first_part = 0
last_part = 0
for i in range(split_len):
first_part+= int(list_num[i])
for i in range(split_len):
last_part += int(list_num[i+split_len])
if first_part == last_part:
return True
else:
return False
print(solution(239017))
|
python
|
import graphene
from ipam import filtersets, models
from netbox.graphql.scalars import BigInt
from netbox.graphql.types import BaseObjectType, OrganizationalObjectType, PrimaryObjectType
__all__ = (
'ASNType',
'AggregateType',
'FHRPGroupType',
'FHRPGroupAssignmentType',
'IPAddressType',
'IPRangeType',
'PrefixType',
'RIRType',
'RoleType',
'RouteTargetType',
'ServiceType',
'VLANType',
'VLANGroupType',
'VRFType',
)
class ASNType(PrimaryObjectType):
asn = graphene.Field(BigInt)
class Meta:
model = models.ASN
fields = '__all__'
filterset_class = filtersets.ASNFilterSet
class AggregateType(PrimaryObjectType):
class Meta:
model = models.Aggregate
fields = '__all__'
filterset_class = filtersets.AggregateFilterSet
class FHRPGroupType(PrimaryObjectType):
class Meta:
model = models.FHRPGroup
fields = '__all__'
filterset_class = filtersets.FHRPGroupFilterSet
def resolve_auth_type(self, info):
return self.auth_type or None
class FHRPGroupAssignmentType(BaseObjectType):
class Meta:
model = models.FHRPGroupAssignment
fields = '__all__'
filterset_class = filtersets.FHRPGroupAssignmentFilterSet
class IPAddressType(PrimaryObjectType):
class Meta:
model = models.IPAddress
fields = '__all__'
filterset_class = filtersets.IPAddressFilterSet
def resolve_role(self, info):
return self.role or None
class IPRangeType(PrimaryObjectType):
class Meta:
model = models.IPRange
fields = '__all__'
filterset_class = filtersets.IPRangeFilterSet
def resolve_role(self, info):
return self.role or None
class PrefixType(PrimaryObjectType):
class Meta:
model = models.Prefix
fields = '__all__'
filterset_class = filtersets.PrefixFilterSet
class RIRType(OrganizationalObjectType):
class Meta:
model = models.RIR
fields = '__all__'
filterset_class = filtersets.RIRFilterSet
class RoleType(OrganizationalObjectType):
class Meta:
model = models.Role
fields = '__all__'
filterset_class = filtersets.RoleFilterSet
class RouteTargetType(PrimaryObjectType):
class Meta:
model = models.RouteTarget
fields = '__all__'
filterset_class = filtersets.RouteTargetFilterSet
class ServiceType(PrimaryObjectType):
class Meta:
model = models.Service
fields = '__all__'
filterset_class = filtersets.ServiceFilterSet
class VLANType(PrimaryObjectType):
class Meta:
model = models.VLAN
fields = '__all__'
filterset_class = filtersets.VLANFilterSet
class VLANGroupType(OrganizationalObjectType):
class Meta:
model = models.VLANGroup
fields = '__all__'
filterset_class = filtersets.VLANGroupFilterSet
class VRFType(PrimaryObjectType):
class Meta:
model = models.VRF
fields = '__all__'
filterset_class = filtersets.VRFFilterSet
|
python
|
from utils import *
import matplotlib.pyplot as plt
# import matplotlib.colors
from sklearn.preprocessing import StandardScaler
from skimage.transform import resize
from PIL import Image
path_save = "./results/face_glasses_separation2/"
if not os.path.exists(path_save):
os.makedirs(path_save)
# color_map = matplotlib.colors.hsv_to_rgb(plt.cm.hsv) # plt.cm.bwr #--> plt.cm.brg, plt.cm.hsv
# color_map = plt.cm.bwr
path_1 = "C:/Users/benya/Desktop/my_PhD/QQE/codes/4_results/17_face_glasses_transform_inputSpace/run2_good/algorithm_files/class_" + str(0) + "/fuzzy_QQplot/"
X0 = load_variable(name_of_variable="X_matched_initial", path=path_1)
path_1 = "C:/Users/benya/Desktop/my_PhD/QQE/codes/4_results/17_face_glasses_transform_inputSpace/run2_good/algorithm_files/class_" + str(1) + "/fuzzy_QQplot/"
X1 = load_variable(name_of_variable="X_matched_initial", path=path_1)
X = np.column_stack((X0, X1))
y = [0]*X0.shape[1] + [1]*X1.shape[1]
y = np.asarray(y)
for i, plot_name in enumerate(["X_matched_iteration_0", "X_matched_iteration_20", "X_matched_iteration_30", "X_matched_iteration_10"]):
if i <= 2:
class_index_of_plot = 0
else:
class_index_of_plot = 1
path_1 = "C:/Users/benya/Desktop/my_PhD/QQE/codes/4_results/17_face_glasses_transform_inputSpace/run2_good/algorithm_files/class_" + str(class_index_of_plot) + "/fuzzy_QQplot/iterations_numpy/"
X_class = load_variable(name_of_variable=plot_name, path=path_1)
if i != 0:
X[:, y==class_index_of_plot] = X_class
# plt.scatter(X[0, :], X[1, :], c=y, cmap=color_map, edgecolors='k')
markers = ["v", "o"]
colors = ["r", "b"]
for class_index in range(2):
sample_of_this_class = X[:, y == class_index]
# c = class_index * np.ones((sample_of_this_class.shape[1],))
plt.scatter(sample_of_this_class[0, :], sample_of_this_class[1, :], s=30, color=colors[class_index], alpha=1.0, marker=markers[class_index])
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# plt.show()
plt.savefig(path_save + str(i) + ".png")
plt.clf()
plt.close()
evaluate_embedding(embedding=X.T, labels=y, path_save_accuracy_of_test_data=path_save, k_list=[1, 2, 4, 8, 16], name=str(i))
|
python
|
from flask_apscheduler import APScheduler
from actions import *
from context import *
from config import Config
class Executor:
"""
An Executor drives a pipeline which composed by a sequence of actions with a context
"""
def __init__(self, config: Config, pipeline_name, pipeline):
self.config = config
self.pipeline_name = pipeline_name
self.pipeline = pipeline
self.__context = Context()
from logger import Logger
# Each Executor has its own log file
self.logger = Logger("%s.log" % pipeline_name).get_logger()
def start(self):
pass
def get_context(self):
return self.__context
def execute(self, args):
"""
"""
self.__on_execute(args)
def __on_execute(self, args):
"""
"""
# self.logger.info(self.pipeline_name, self.pipeline)
action_name = Config.get_start_action_name(self.pipeline)
while action_name:
action_config = Config.get_action_config(self.pipeline, action_name)
if not action_config:
break
if 'type' not in action_config:
print("No action type for ", action_name)
exit(0)
action_type = action_config['type']
action_type = action_config['type']
action = BaseAction.create_action(action_type, action_config)
print(action_name, action)
action.try_execute(self.get_context())
action_name = action.get_next()
|
python
|
#!/usr/bin/env python
import os
try:
import cplex
except ImportError:
cplex = None
import numpy as np
from mapel.voting.metrics.inner_distances import hamming
# FOR SUBELECTIONS
def solve_lp_voter_subelection(election_1, election_2, metric_name='0'):
""" LP solver for voter subelection problem """
cp = cplex.Cplex()
cp.parameters.threads.set(1)
# OBJECTIVE FUNCTION
cp.objective.set_sense(cp.objective.sense.maximize)
objective = []
names = []
for v1 in range(election_1.num_voters):
for v2 in range(election_2.num_voters):
names.append('N' + str(v1) + '_' + str(v2))
objective.append(1.)
cp.variables.add(obj=objective,
names=names,
types=[
cp.variables.type.binary] * election_1.num_voters * election_2.num_voters)
# FIRST CONSTRAINT FOR VOTERS
lin_expr = []
for v1 in range(election_1.num_voters):
ind = []
for v2 in range(election_2.num_voters):
ind.append('N' + str(v1) + '_' + str(v2))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * election_2.num_voters))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['L'] * election_1.num_voters,
rhs=[1.0] * election_1.num_voters,
names=['C1_' + str(i) for i in range(election_1.num_voters)])
# SECOND CONSTRAINT FOR VOTERS
lin_expr = []
for v2 in range(election_2.num_voters):
ind = []
for v1 in range(election_1.num_voters):
ind.append('N' + str(v1) + '_' + str(v2))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * election_1.num_voters))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['L'] * election_2.num_voters,
rhs=[1.0] * election_2.num_voters,
names=['C2_' + str(i) for i in range(election_2.num_voters)])
# ADD VARIABLES FOR CANDIDATES
names = []
for c1 in range(election_1.num_candidates):
for c2 in range(election_2.num_candidates):
names.append('M' + str(c1) + '_' + str(c2))
cp.variables.add(names=list(names),
types=[
cp.variables.type.binary] * election_1.num_candidates * election_2.num_candidates)
# FIRST CONSTRAINT FOR CANDIDATES
lin_expr = []
for c1 in range(election_1.num_candidates):
ind = []
for c2 in range(election_2.num_candidates):
ind.append('M' + str(c1) + '_' + str(c2))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * election_2.num_candidates))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * election_1.num_candidates,
rhs=[1.0] * election_1.num_candidates,
names=['C3_' + str(i) for i in range(election_1.num_candidates)])
# SECOND CONSTRAINT FOR CANDIDATES
lin_expr = []
for c2 in range(election_2.num_candidates):
ind = []
for c1 in range(election_1.num_candidates):
ind.append('M' + str(c1) + '_' + str(c2))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * election_1.num_candidates))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * election_2.num_candidates,
rhs=[1.0] * election_2.num_candidates,
names=['C4_' + str(i) for i in range(election_2.num_candidates)])
# MAIN CONSTRAINT FOR VOTES
lin_expr = []
for v1 in range(election_1.num_voters):
for v2 in range(election_2.num_voters):
ind = []
val = []
for c1 in range(election_1.num_candidates):
for c2 in range(election_2.num_candidates):
ind.append('M' + str(c1) + '_' + str(c2))
if abs(election_1.potes[v1][c1] - election_2.potes[v2][c2]) <= int(metric_name):
val.append(1.)
else:
val.append(0.)
ind.append('N' + str(v1) + '_' + str(v2))
val.append(-election_1.num_candidates)
lin_expr.append(cplex.SparsePair(ind=ind, val=val))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['G'] * election_1.num_voters * election_2.num_voters,
rhs=[0.0] * election_1.num_voters * election_2.num_voters,
names=['C5_' + str(i) for i in
range(election_1.num_voters * election_2.num_voters)])
# cp.write('new.lp')
# SOLVE THE ILP
cp.set_results_stream(None)
try:
cp.solve()
except: # cplex.CplexSolverError:
print("Exception raised while solving")
return
objective_value = cp.solution.get_objective_value()
return objective_value
def solve_lp_candidate_subelections(lp_file_name, election_1, election_2):
""" LP solver for candidate subelection problem """
# PRECOMPUTING
# """
P = np.zeros([election_1.num_voters, election_2.num_voters, election_1.num_candidates,
election_2.num_candidates,
election_1.num_candidates, election_2.num_candidates])
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
for d2 in range(election_2.num_candidates):
if (election_1.potes[v][c1] > election_1.potes[v][c2] and
election_2.potes[u][d1] >
election_2.potes[u][d2]) or \
(election_1.potes[v][c1] < election_1.potes[v][c2] and
election_2.potes[u][d1] <
election_2.potes[u][d2]):
P[v][u][c1][d1][c2][d2] = 1
# print(P)
# """
# CREATE LP FILE
lp_file = open(lp_file_name, 'w')
lp_file.write("Maximize\nobj: ")
first = True
for c in range(election_1.num_candidates):
for d in range(election_2.num_candidates):
if not first:
lp_file.write(" + ")
first = False
lp_file.write(" M_" + str(c) + "_" + str(d))
lp_file.write("\n")
"""
first = True
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
if P[v][u][c1][d1][c2][d2] == 1:
if not first:
lp_file.write(" + ")
first = False
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write("\n")
"""
lp_file.write("Subject To\n")
ctr_c = 0
# FIRST CONSTRAINT FOR VOTERS
for v in range(election_1.num_voters):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for u in range(election_2.num_voters):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" N_" + str(v) + "_" + str(u))
lp_file.write(" = 1" + "\n")
ctr_c += 1
# SECOND CONSTRAINT FOR VOTERS
for u in range(election_2.num_voters):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for v in range(election_1.num_voters):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" N_" + str(v) + "_" + str(u))
lp_file.write(" = 1" + "\n")
ctr_c += 1
# FIRST CONSTRAINT FOR CANDIDATES
for c in range(election_1.num_candidates):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for d in range(election_2.num_candidates):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" M_" + str(c) + "_" + str(d))
lp_file.write(" <= 1" + "\n")
ctr_c += 1
# SECOND CONSTRAINT FOR CANDIDATES
for d in range(election_2.num_candidates):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for c in range(election_1.num_candidates):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" M_" + str(c) + "_" + str(d))
lp_file.write(" <= 1" + "\n")
ctr_c += 1
# FIRST CONSTRAINT FOR P
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
# if P[v][u][c1][d1][c2][d2] == 1:
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write(" - 0.34 N_" + str(v) + "_" + str(u))
lp_file.write(" - 0.34 M_" + str(c1) + "_" + str(d1))
lp_file.write(" - 0.34 M_" + str(c2) + "_" + str(d2))
lp_file.write(" <= 0" + "\n")
ctr_c += 1
# SECOND CONSTRAINT FOR P
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
# if P[v][u][c1][d1][c2][d2] == 1:
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
# lp_file.write(" + 1")
lp_file.write(" - 0.34 N_" + str(v) + "_" + str(u))
lp_file.write(" - 0.34 M_" + str(c1) + "_" + str(d1))
lp_file.write(" - 0.34 M_" + str(c2) + "_" + str(d2))
lp_file.write(" > -1" + "\n")
ctr_c += 1
# THIRD CONSTRAINT FOR P
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
# if P[v][u][c1][d1][c2][d2] == 1:
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write(" <= " + str(P[v][u][c1][d1][c2][d2]) + "\n")
ctr_c += 1
"""
# NEW 1
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
if P[v][u][c1][d1][c2][d2] == 1:
if not first:
lp_file.write(" +")
first = False
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write(' - ' + str((magic_param-1)*election_1.num_voters) + ' M_' + str(c1) + '_' + str(d1) + ' = 0' + "\n")
ctr_c += 1
# NEW 2
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
if P[v][u][c1][d1][c2][d2] == 1:
if not first:
lp_file.write(" +")
first = False
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write(' - ' + str((magic_param-1)*2) + ' N_' + str(v) + '_' + str(u) + ' = 0' + "\n")
ctr_c += 1
"""
lp_file.write("Binary\n")
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
# if P[v][u][c1][d1][c2][d2] == 1:
lp_file.write("P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(
d2) + "\n")
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
lp_file.write("N_" + str(v) + "_" + str(u) + "\n")
for c in range(election_1.num_candidates):
for d in range(election_2.num_candidates):
lp_file.write("M_" + str(c) + "_" + str(d) + "\n")
lp_file.write("End\n")
lp_file.close()
### SECOND PART
cp_lp = cplex.Cplex(lp_file_name)
cp_lp.parameters.threads.set(1)
cp_lp.set_results_stream(None)
try:
cp_lp.solve()
except: # cplex.CplexSolverError:
print("Exception raised during solve")
return
##########################
##########################
result = np.zeros([election_1.num_candidates, election_1.num_candidates])
for i in range(election_1.num_candidates):
for j in range(election_1.num_candidates):
name = 'M_' + str(i) + '_' + str(j)
result[i][j] = cp_lp.solution.get_values(name)
# print('M', result)
"""
result_2 = np.zeros([election_1.num_voters, election_1.num_voters])
for i in range(election_1.num_voters):
for j in range(election_1.num_voters):
election_id = 'N_' + str(i) + '_' + str(j)
result_2[i][j] = cp_lp.solution.get_values(election_id)
print('N', result_2)
total = 0
for v in range(election_1.num_voters):
for u in range(election_1.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_1.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_1.num_candidates):
if d1 == d2:
continue
#if P[v][u][c1][d1][c2][d2] == 1:
election_id = "P_" + str(v) + "_" + str(u) + "_" + str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2)
value = cp_lp.solution.get_values(election_id)
#print(value)
if value == 1:
print(election_id)
total += value
print(total)
"""
##########################
##########################
# objective_value = cp_lp.solution.get_objective_value()
# print('O-V: ', objective_value)
# print(sum(sum(result)))
return sum(sum(result))
# FOR METRICS
def solve_lp_matching_vector_with_lp(cost_table, length):
""" LP solver for vectors' matching """
# print(cost_table)
cp = cplex.Cplex()
cp.parameters.threads.set(1)
# OBJECTIVE FUNCTION
cp.objective.set_sense(cp.objective.sense.minimize)
objective = []
names = []
pos = 0
for i in range(length):
for j in range(length):
names.append('x' + str(pos))
objective.append(cost_table[i][j])
pos += 1
cp.variables.add(obj=objective,
names=names,
types=[cp.variables.type.binary] * length ** 2)
# FIRST GROUP OF CONSTRAINTS
lin_expr = []
for i in range(length):
ind = []
for j in range(length):
pos = i * length + j
ind.append('x' + str(pos))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * length))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * length,
rhs=[1.0] * length)
# SECOND GROUP OF CONSTRAINTS
lin_expr = []
for j in range(length):
ind = []
for i in range(length):
pos = i * length + j
ind.append('x' + str(pos))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * length))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * length,
rhs=[1.0] * length)
# c.write('new.lp')
# SOLVE THE ILP
cp.set_results_stream(None)
try:
cp.solve()
except: # cplex.CplexSolverError:
print("Exception raised while solving")
return
# UNPACK THE RESULTS
"""
result = [0.] * length ** 2
for i in range(len(result)):
result[i] = c.solution.get_values('x' + str(i))
matching = [0] * length
ctr = 0
for i in range(len(result)):
if result[i] == 1:
matching[ctr] = i % length
ctr += 1
"""
objective_value = cp.solution.get_objective_value()
return objective_value
def solve_lp_matching_interval(cost_table, length_1, length_2):
precision = length_1 * length_2
# print(cost_table)
c = cplex.Cplex()
c.parameters.threads.set(1)
# OBJECTIVE FUNCTION
c.objective.set_sense(c.objective.sense.minimize)
c.objective.set_name("Obj")
objective = []
names = []
pos = 0
for i in range(length_1):
for j in range(length_2):
names.append('x' + str(pos))
objective.append(cost_table[i][j])
pos += 1
c.variables.add(obj=objective,
names=names,
types=[c.variables.type.integer] * precision)
# FIRST GROUP OF CONSTRAINTS
lin_expr = []
c_names = []
for i in range(length_1):
ind = []
for j in range(length_2):
pos = i * length_2 + j
ind.append('x' + str(pos))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * length_2))
c_names.append('c1_' + str(i))
c.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * length_1,
rhs=[length_2] * length_1,
names=c_names)
# SECOND GROUP OF CONSTRAINTS
lin_expr = []
c_names = []
for j in range(length_2):
ind = []
for i in range(length_1):
pos = i * length_2 + j
ind.append('x' + str(pos))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * length_1))
c_names.append('c2_' + str(j))
c.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * length_2,
rhs=[length_1] * length_2,
names=c_names)
c.write('interval.lp')
c.write('interval.mps')
# SOLVE THE ILP
c.set_results_stream(None)
try:
c.solve()
except: # cplex.CplexSolverError:
print("Exception raised while solving")
return
result = c.solution.get_objective_value() / precision
return result
# DODGSON SCORE
def generate_lp_file_dodgson_score(lp_file_name, N=None, e=None, D=None):
lp_file = open(lp_file_name, 'w')
lp_file.write("Minimize\nobj: ")
first = True
for i in range(len(N)):
for j in range(1, len(D)):
if not first:
lp_file.write(" + ")
first = False
lp_file.write(str(j) + " y" + str(i) + "_" + str(j))
lp_file.write("\n")
lp_file.write("Subject To\n")
ctr_c = 0
for i in range(len(N)):
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" y" + str(i) + "_" + str(0) + " = " + str(N[i]) + "\n")
ctr_c += 1
# """
for k in range(len(D)):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for i in range(len(N)):
for j in range(1, len(D)):
# print(i,j,k)
# print(e[i][j][k], e[i][j-1][k])
if not first:
lp_file.write(" +")
first = False
lp_file.write(" " + str(e[i][j][k] - e[i][j - 1][k]) + " y" + str(i) + "_" + str(j))
lp_file.write(" >= " + str(D[k]) + "\n")
ctr_c += 1
# """
# """
for i in range(len(N)):
for j in range(1, len(D)):
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(
" y" + str(i) + "_" + str(j - 1) + " - y" + str(i) + "_" + str(j) + " >= 0" + "\n")
ctr_c += 1
# """
# """
# chyba nie potrzeba bo integer zalatwia sprawe...
for i in range(len(N)):
for j in range(len(D)):
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" y" + str(i) + "_" + str(j) + " >= 0" + "\n")
ctr_c += 1
# """
# """
lp_file.write("General\n")
for i in range(len(N)):
for j in range(len(D)):
lp_file.write("y" + str(i) + "_" + str(j) + "\n")
ctr_c += 1
# """
lp_file.write("End\n")
def solve_lp_dodgson_score(lp_file_name):
""" this function ..."""
cp_lp = cplex.Cplex(lp_file_name)
cp_lp.parameters.threads.set(1)
cp_lp.set_results_stream(None)
try:
cp_lp.solve()
except: # cplex.CplexSolverError:
print("Exception raised during solve")
return
"""
import numpy as np
result = np.zeros([len(N), len(D)])
for i in range(len(N)):
for j in range(len(D)):
result[i] = cp_lp.solution.get_values('y' + str(i) + '_' + str(j))
"""
return cp_lp.solution.get_objective_value()
# FOR WINNERS - needs update
def generate_lp_file_borda_owa(owa, lp_file_name, params, votes):
""" this function generates lp file"""
lp_file = open(lp_file_name, 'w')
lp_file.write("Maximize\nobj: ")
pos = 0
first = True
for i in range(params['voters']):
for j in range(params['orders']):
for k in range(params['candidates']):
if not first and owa[j] >= 0.:
lp_file.write(" + ")
first = False
lp_file.write(str(owa[j]) + " x" + str(pos))
pos += 1
lp_file.write("\n")
lp_file.write("Subject To\n")
lp_file.write("c0:")
first = True
for i in range(params['candidates']):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" y" + str(i))
lp_file.write(' = ' + str(params['orders']) + '\n')
for i in range(params['voters']):
for j in range(params['candidates']):
lp_file.write("c" + str(i * params['candidates'] + j + 1) + ": ")
pos = i * params['orders'] * params['candidates'] + j
first = True
for k in range(params['orders']):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" x" + str(pos + params['candidates'] * k))
for k in range(0, j + 1):
lp_file.write(" - y" + str(int(votes[i][k])))
lp_file.write(" <= 0 \n")
lp_file.write("Binary\n")
for i in range(params['voters'] * params['orders'] * params['candidates']):
lp_file.write("x" + str(i) + "\n")
for i in range(params['candidates']):
lp_file.write("y" + str(i) + "\n")
lp_file.write("End\n")
def generate_lp_file_bloc_owa(owa, lp_file_name, params, votes, t_bloc):
""" this function generates lp file"""
lp_file = open(lp_file_name, 'w')
lp_file.write("Maximize\nobj: ")
pos = 0
first = True
for i in range(params['voters']):
for j in range(params['orders']):
for k in range(params['candidates']):
if not first:
if k == t_bloc - 1:
lp_file.write(" + ")
first = False
if k == t_bloc - 1:
lp_file.write(str(owa[j]) + " x" + str(pos))
pos += 1
lp_file.write("\n")
lp_file.write("Subject To\n")
lp_file.write("c0:")
first = True
for i in range(params['candidates']):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" y" + str(i))
lp_file.write(' = ' + str(params['orders']) + '\n')
for i in range(params['voters']):
for j in range(params['candidates']):
lp_file.write("c" + str(i * params['candidates'] + j + 1) + ": ")
pos = i * params['orders'] * params['candidates'] + j
first = True
for k in range(params['orders']):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" x" + str(pos + params['candidates'] * k))
for k in range(0, j + 1):
lp_file.write(" - y" + str(int(votes[i][k])))
lp_file.write(" <= 0 \n")
lp_file.write("Binary\n")
for i in range(params['voters'] * params['orders'] * params['candidates']):
lp_file.write("x" + str(i) + "\n")
for i in range(params['candidates']):
lp_file.write("y" + str(i) + "\n")
lp_file.write("End\n")
def get_winners_from_lp(tmp_file, params, candidates):
""" this function ..."""
cp_lp = cplex.Cplex(tmp_file)
cp_lp.parameters.threads.set(1)
cp_lp.set_results_stream(None)
try:
cp_lp.solve()
except cplex.CplexSolverError:
print("Exception raised during solve")
return
result = [0.] * params['candidates']
for i in range(params['candidates']):
result[i] = cp_lp.solution.get_values('y' + str(i))
# print(result)
params['pure'] = True
winner_id = 0
winners = [0.] * params['orders']
for i in range(params['candidates']):
if result[i] == 1.:
if params['pure']:
winners[winner_id] = i
else:
winners[winner_id] = candidates[i]
winner_id += 1
winners = sorted(winners)
return winners
"""
def generate_lp_file_matching_matrix_half(lp_file_name, matrix_1, matrix_2, length):
# [1, 4, 6, 9, 11]
# [1, 5, 6, 9, 11]
print(matrix_1)
print(matrix_2)
lp_file = open(lp_file_name, 'w')
lp_file.write("Minimize\n") # obj: ")
first = True
for k in range(length):
for l in range(length):
for i in range(k+1, length):
for j in range(l+1, length):
if not first:
lp_file.write(" + ")
first = False
weight = abs(matrix_1[k][i] - matrix_2[l][j])#**2
print(weight)
lp_file.write(str(weight) + " P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write("\n")
lp_file.write("Subject To\n")
for k in range(length):
for l in range(length):
for i in range(k+1, length):
for j in range(l+1, length):
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(i) + "j" + str(j) + " <= 0" + "\n")
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(k) + "j" + str(l) + " <= 0" + "\n")
for i in range(length):
first = True
for j in range(length):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
for j in range(length):
first = True
for i in range(length):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# Not sure about this part #
for k in range(length):
for i in range(k+1, length):
if k == i:
continue
first = True
for l in range(length):
for j in range(l+1, length):
if l == j:
continue
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# Not sure about this part #
for l in range(length):
for j in range(l+1, length):
if l == j:
continue
first = True
for k in range(length):
for i in range(k+1, length):
if k == i:
continue
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
lp_file.write("Binary\n")
for k in range(length):
for l in range(length):
for i in range(k+1, length):
for j in range(l+1, length):
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j) + "\n")
for i in range(length):
for j in range(length):
lp_file.write("M" + "i" + str(i) + "j" + str(j) + "\n")
lp_file.write("End\n")
"""
def generate_lp_file_matching_matrix(lp_file_name, matrix_1, matrix_2, length, inner_distance):
lp_file = open(lp_file_name, 'w')
lp_file.write("Minimize\n")
first = True
for k in range(length):
for l in range(length):
for i in range(length):
if i == k:
continue
for j in range(length):
if j == l:
continue
if not first:
lp_file.write(" + ")
first = False
weight = inner_distance(matrix_1[k][i], matrix_2[l][j])
lp_file.write(
str(weight) + " P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(
j))
lp_file.write("\n")
lp_file.write("Subject To\n")
for k in range(length):
for l in range(length):
for i in range(length):
if i == k:
continue
for j in range(length):
if j == l:
continue
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(i) + "j" + str(j) + " <= 0" + "\n")
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(k) + "j" + str(l) + " <= 0" + "\n")
for i in range(length):
first = True
for j in range(length):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
for j in range(length):
first = True
for i in range(length):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# Not sure about this part #
for k in range(length):
for i in range(length):
if k == i:
continue
first = True
for l in range(length):
for j in range(length):
if l == j:
continue
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# Not sure about this part #
for l in range(length):
for j in range(length):
if l == j:
continue
first = True
for k in range(length):
for i in range(length):
if k == i:
continue
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
lp_file.write("Binary\n")
for k in range(length):
for l in range(length):
for i in range(length):
if i == k:
continue
for j in range(length):
if j == l:
continue
lp_file.write(
"P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j) + "\n")
for i in range(length):
for j in range(length):
lp_file.write("M" + "i" + str(i) + "j" + str(j) + "\n")
lp_file.write("End\n")
def solve_lp_matrix(lp_file_name, matrix_1, matrix_2, length):
cp_lp = cplex.Cplex(lp_file_name)
cp_lp.set_results_stream(None)
cp_lp.parameters.threads.set(1)
# cp_lp.parameters.mip.tolerances.mipgap = 0.0001
# cp_lp.parameters.mip.strategy.probe.set(3)
try:
cp_lp.solve()
except:
print("Exception raised during solve")
return
"""
for k in range(length):
for l in range(length):
for i in range(k+1, length):
if k == i:
continue
for j in range(l+1, length):
if l == j:
continue
A = "P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j)
if int(cp_lp.solution.get_values(A)) == 1:
print(A)
"""
"""
for i in range(length):
for j in range(length):
A = "M" + "i" + str(i) + "j" + str(j)
if int(cp_lp.solution.get_values(A)) == 1:
print(A)
"""
# print(cp_lp.solution.get_objective_value())
return cp_lp.solution.get_objective_value()
# SPEARMAN - old
def generate_ilp_distance(lp_file_name, votes_1, votes_2, params, metric_name):
lp_file = open(lp_file_name, 'w')
lp_file.write("Minimize\n") # obj: ")
first = True
for k in range(params['voters']):
for l in range(params['voters']):
vote_1 = votes_1[k]
vote_2 = votes_2[l]
if metric_name == 'spearman':
pote_1 = [0] * params['candidates']
pote_2 = [0] * params['candidates']
for i in range(params['candidates']):
pote_1[vote_1[i]] = i
pote_2[vote_2[i]] = i
for i in range(params['candidates']):
for j in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
if metric_name == "spearman":
weight = abs(pote_1[i] - pote_2[j])
elif metric_name == "alt":
weight = float(abs(pote_1[i] - pote_2[j]) ** (2)) / float(
1. + min(pote_1[i], pote_2[j]))
elif metric_name == 'hamming':
weight = hamming(vote_1, vote_2)
else:
weight = 0
lp_file.write(
str(weight) + " P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(
j))
lp_file.write("\n")
lp_file.write("Subject To\n")
for k in range(params['voters']):
for l in range(params['voters']):
for i in range(params['candidates']):
for j in range(params['candidates']):
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(i) + "j" + str(j) + " <= 0" + "\n")
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "N" + "k" + str(k) + "l" + str(l) + " <= 0" + "\n")
for k in range(params['voters']):
first = True
for l in range(params['voters']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("N" + "k" + str(k) + "l" + str(l))
lp_file.write(" = 1" + "\n")
for l in range(params['voters']):
first = True
for k in range(params['voters']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("N" + "k" + str(k) + "l" + str(l))
lp_file.write(" = 1" + "\n")
for i in range(params['candidates']):
first = True
for j in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
for j in range(params['candidates']):
first = True
for i in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# IMPORTANT #
for k in range(params['voters']):
for i in range(params['candidates']):
first = True
for l in range(params['voters']):
for j in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# IMPORTANT #
for l in range(params['voters']):
for j in range(params['candidates']):
first = True
for k in range(params['voters']):
for i in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
lp_file.write("Binary\n")
for k in range(params['voters']):
for l in range(params['voters']):
for i in range(params['candidates']):
for j in range(params['candidates']):
lp_file.write(
"P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j) + "\n")
for k in range(params['voters']):
for l in range(params['voters']):
lp_file.write("N" + "k" + str(k) + "l" + str(l) + "\n")
for i in range(params['candidates']):
for j in range(params['candidates']):
lp_file.write("M" + "i" + str(i) + "j" + str(j) + "\n")
lp_file.write("End\n")
def solve_ilp_distance(lp_file_name, votes_1, votes_2, params, metric_name):
cp_lp = cplex.Cplex(lp_file_name)
cp_lp.set_results_stream(None)
# cp_lp.parameters.threads.set(1)
# cp_lp.parameters.timelimit.set(60)
try:
cp_lp.solve()
except cplex.CplexSolverError:
print("Exception raised during solve")
return
total = cp_lp.solution.get_objective_value()
return total
def spearman_cost(single_votes_1, single_votes_2, params, perm):
pote_1 = [0] * params['candidates']
pote_2 = [0] * params['candidates']
for i in range(params['candidates']):
id_1 = int(perm[0][single_votes_1[i]])
pote_1[id_1] = i
id_2 = int(perm[1][single_votes_2[i]])
pote_2[id_2] = i
total_diff = 0.
for i in range(params['candidates']):
local_diff = float(abs(pote_1[i] - pote_2[i]))
total_diff += local_diff
return total_diff
def spearman_cost_per_cand(single_votes_1, single_votes_2, params, perm):
pote_1 = [0] * params['candidates']
pote_2 = [0] * params['candidates']
for i in range(params['candidates']):
id_1 = int(perm[0][single_votes_1[i]])
pote_1[id_1] = i
id_2 = int(perm[1][single_votes_2[i]])
pote_2[id_2] = i
cand_diff = [0] * params['candidates']
for i in range(params['candidates']):
cand_diff[i] = float(abs(pote_1[i] - pote_2[i]))
return cand_diff
def remove_lp_file(path):
""" Safely remove lp file """
try:
os.remove(path)
except:
pass
|
python
|
# -*- coding: utf-8 -*-
# Author : Jesse Wei
# LastUpdate : 2020/10/04
# Impact : Jobs generated by SQLG
# Message : Humanity towards others, we live by sharing. Fear can hold you prisoner, only hope can set you free.
# from __future__ import print_function
import logging
import re
import airflow
import pendulum
from datetime import datetime, timedelta
from airflow.operators.sensors import ExternalTaskSensor
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
from airflow.contrib.sensors.file_sensor import FileSensor
from airflow import models
from airflow.models import Variable, DagModel, DagBag
from airflow.operators.python_operator import BranchPythonOperator
from airflow.operators.dummy_operator import DummyOperator
# For ODP platform
# from acme.operators.sqlg_oracle import OracleOperatorWithTemplatedParams
# from airflow.operators.oracle_operator import OracleOperator
from acme.operators.sqlg_mssql import MsSqlOperatorWithTemplatedParams
from airflow.operators.mssql_operator import MsSqlOperator
# DB_NAME = 'DWH' # for future xDB operator
proj_start_date = pendulum.datetime(2021, 1, 1, tzinfo="Etc/GMT-8")
tmpl_search_path = Variable.get("sql_path")
data_stage_imp_ptn = '_ODS_'
data_stage = []
# list for standard internval order sequence
std_interval = {
'@once' :1,
'@hourly' :2,
'0 5 * * *' :3,
'0 5 * * 0' :4,
'0 5 1 * *' :5,
'0 5 1 */3 *' :6,
'0 5 1 1 *' :7,
}
# function to sync execution for diff frequency
def sqlg_exec_date_fn(dt, context):
var_date = Variable.get("sqlg_execution_date")
ti = context['ti']
dag = context['dag']
ti_exec_date = context['execution_date']
schedule_interval = dag.schedule_interval
# if wait INIT and standard freq then set as default {{ ds }} # set in planner
# else use dag own execution date
if ti.task.external_dag_id == 'D_STG_INIT' and schedule_interval[0] == '@':
exec_date = pendulum.parse(var_date)
else:
exec_date = ti_exec_date
print("sqlg_exec_date_fn::DEBUG:external_dag_id, exec_date:", ti.task.external_dag_id, exec_date)
return exec_date
args = {
"owner": "SPA010038",
'start_date': proj_start_date,
'provide_context': True
}
# XSLT:loop: declaration: END}
# XSLT:loop: JOB_FLOW_NAME: START{
job_flow_name = "D_ODS_SCM"
data_stage = job_flow_name.split('_')
tags = data_stage
D_ODS_SCM = airflow.DAG(
"D_ODS_SCM",
tags=tags,
schedule_interval="0 5 * * *",
dagrun_timeout=timedelta(minutes=60*4),
template_searchpath=tmpl_search_path,
default_args=args,
# start_date=proj_start_date,
max_active_runs=1
)
job_flow_name = "D_DM_SCM"
data_stage = job_flow_name.split('_')
tags = data_stage
D_DM_SCM = airflow.DAG(
"D_DM_SCM",
tags=tags,
schedule_interval="0 5 * * *",
dagrun_timeout=timedelta(minutes=60*4),
template_searchpath=tmpl_search_path,
default_args=args,
# start_date=proj_start_date,
max_active_runs=1
)
job_flow_name = "D_INT_SCM"
data_stage = job_flow_name.split('_')
tags = data_stage
D_INT_SCM = airflow.DAG(
"D_INT_SCM",
tags=tags,
schedule_interval="0 5 * * *",
dagrun_timeout=timedelta(minutes=60*4),
template_searchpath=tmpl_search_path,
default_args=args,
# start_date=proj_start_date,
max_active_runs=1
)
# XSLT:loop: JOB_FLOW_NAME: END}
# JOB_TYPE=ODS-MAIN
my_taskid = "PNL_Revenue_Cost_A"
PNL_Revenue_Cost_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "NRE_Summary"
NRE_Summary = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "Daily_Revenue_F"
Daily_Revenue_F = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "RFQ_Master"
RFQ_Master = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "Inventory_A"
Inventory_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DOI_Actual"
DOI_Actual = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_PNL_Revenue_Cost_A"
DM_PNL_Revenue_Cost_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_NRE_Summary"
DM_NRE_Summary = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_Daily_Revenue_F"
DM_Daily_Revenue_F = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_RFQ_Master"
DM_RFQ_Master = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_Inventory_A"
DM_Inventory_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_DOI_Actual"
DM_DOI_Actual = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_PNL_Revenue_Cost_A"
INT_PNL_Revenue_Cost_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_NRE_Summary"
INT_NRE_Summary = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_Daily_Revenue_F"
INT_Daily_Revenue_F = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_RFQ_Master"
INT_RFQ_Master = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_Inventory_A"
INT_Inventory_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_DOI_Actual"
INT_DOI_Actual = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
ExternalTaskSensor.ui_color = 'white'
ExternalTaskSensor.ui_fgcolor = 'blue'
# tmpl_search_path = Variable.get("sql_path")
# XSLT:loop: JOB_FLOW_NAME-and-PRE_JOB: External:START{{
def branch_D_ODS_SCMxD_STG_INIT__SYS_STS_STG(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_STG_INIT")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG","D_ODS_SCMxD_STG_INIT__SYS_STS_STG"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG","D_ODS_SCMxD_STG_INIT__SYS_STS_STG"]
return ["proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG"]
my_taskid = "BRANCH_D_ODS_SCMxD_STG_INIT__SYS_STS_STG"
BRANCH_D_ODS_SCMxD_STG_INIT__SYS_STS_STG= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_ODS_SCMxD_STG_INIT__SYS_STS_STG,
dag=D_ODS_SCM,
provide_context=True,
)
my_taskid = "proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG"
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_ODS_SCM,
)
# Cross dag sensor
my_taskid = "D_ODS_SCMxD_STG_INIT__SYS_STS_STG"
D_ODS_SCMxD_STG_INIT__SYS_STS_STG= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_STG_INIT",
external_task_id="SYS_STS_STG",
mode="reschedule",
dag=D_ODS_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG)
BRANCH_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(D_ODS_SCMxD_STG_INIT__SYS_STS_STG)
D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG)
def branch_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A","D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A","D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"
BRANCH_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"
proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"
D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_PNL_Revenue_Cost_A",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A)
BRANCH_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A.set_downstream(D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A)
D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A)
def branch_D_DM_SCMxD_INT_SCM__INT_NRE_Summary(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary","D_DM_SCMxD_INT_SCM__INT_NRE_Summary"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary","D_DM_SCMxD_INT_SCM__INT_NRE_Summary"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_NRE_Summary"
BRANCH_D_DM_SCMxD_INT_SCM__INT_NRE_Summary= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_NRE_Summary,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary"
proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_NRE_Summary"
D_DM_SCMxD_INT_SCM__INT_NRE_Summary= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_NRE_Summary",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_NRE_Summary.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary)
BRANCH_D_DM_SCMxD_INT_SCM__INT_NRE_Summary.set_downstream(D_DM_SCMxD_INT_SCM__INT_NRE_Summary)
D_DM_SCMxD_INT_SCM__INT_NRE_Summary.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary)
def branch_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F","D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F","D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"
BRANCH_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"
proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"
D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_Daily_Revenue_F",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F)
BRANCH_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F.set_downstream(D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F)
D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F)
def branch_D_DM_SCMxD_INT_SCM__INT_RFQ_Master(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master","D_DM_SCMxD_INT_SCM__INT_RFQ_Master"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master","D_DM_SCMxD_INT_SCM__INT_RFQ_Master"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_RFQ_Master"
BRANCH_D_DM_SCMxD_INT_SCM__INT_RFQ_Master= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_RFQ_Master,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master"
proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_RFQ_Master"
D_DM_SCMxD_INT_SCM__INT_RFQ_Master= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_RFQ_Master",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_RFQ_Master.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master)
BRANCH_D_DM_SCMxD_INT_SCM__INT_RFQ_Master.set_downstream(D_DM_SCMxD_INT_SCM__INT_RFQ_Master)
D_DM_SCMxD_INT_SCM__INT_RFQ_Master.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master)
def branch_D_DM_SCMxD_INT_SCM__INT_Inventory_A(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A","D_DM_SCMxD_INT_SCM__INT_Inventory_A"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A","D_DM_SCMxD_INT_SCM__INT_Inventory_A"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_Inventory_A"
BRANCH_D_DM_SCMxD_INT_SCM__INT_Inventory_A= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_Inventory_A,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A"
proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_Inventory_A"
D_DM_SCMxD_INT_SCM__INT_Inventory_A= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_Inventory_A",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_Inventory_A.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A)
BRANCH_D_DM_SCMxD_INT_SCM__INT_Inventory_A.set_downstream(D_DM_SCMxD_INT_SCM__INT_Inventory_A)
D_DM_SCMxD_INT_SCM__INT_Inventory_A.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A)
def branch_D_DM_SCMxD_INT_SCM__INT_DOI_Actual(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual","D_DM_SCMxD_INT_SCM__INT_DOI_Actual"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual","D_DM_SCMxD_INT_SCM__INT_DOI_Actual"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_DOI_Actual"
BRANCH_D_DM_SCMxD_INT_SCM__INT_DOI_Actual= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_DOI_Actual,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual"
proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_DOI_Actual"
D_DM_SCMxD_INT_SCM__INT_DOI_Actual= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_DOI_Actual",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_DOI_Actual.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual)
BRANCH_D_DM_SCMxD_INT_SCM__INT_DOI_Actual.set_downstream(D_DM_SCMxD_INT_SCM__INT_DOI_Actual)
D_DM_SCMxD_INT_SCM__INT_DOI_Actual.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual)
def branch_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A","D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A","D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"]
return ["proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"
BRANCH_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"
proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"
D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="PNL_Revenue_Cost_A",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A)
BRANCH_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A.set_downstream(D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A)
D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A)
def branch_D_INT_SCMxD_ODS_SCM__NRE_Summary(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary","D_INT_SCMxD_ODS_SCM__NRE_Summary"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary","D_INT_SCMxD_ODS_SCM__NRE_Summary"]
return ["proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__NRE_Summary"
BRANCH_D_INT_SCMxD_ODS_SCM__NRE_Summary= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__NRE_Summary,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary"
proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__NRE_Summary"
D_INT_SCMxD_ODS_SCM__NRE_Summary= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="NRE_Summary",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__NRE_Summary.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary)
BRANCH_D_INT_SCMxD_ODS_SCM__NRE_Summary.set_downstream(D_INT_SCMxD_ODS_SCM__NRE_Summary)
D_INT_SCMxD_ODS_SCM__NRE_Summary.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary)
def branch_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F","D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F","D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"]
return ["proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"
BRANCH_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"
proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"
D_INT_SCMxD_ODS_SCM__Daily_Revenue_F= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="Daily_Revenue_F",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F)
BRANCH_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F.set_downstream(D_INT_SCMxD_ODS_SCM__Daily_Revenue_F)
D_INT_SCMxD_ODS_SCM__Daily_Revenue_F.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F)
def branch_D_INT_SCMxD_ODS_SCM__RFQ_Master(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master","D_INT_SCMxD_ODS_SCM__RFQ_Master"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master","D_INT_SCMxD_ODS_SCM__RFQ_Master"]
return ["proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__RFQ_Master"
BRANCH_D_INT_SCMxD_ODS_SCM__RFQ_Master= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__RFQ_Master,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master"
proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__RFQ_Master"
D_INT_SCMxD_ODS_SCM__RFQ_Master= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="RFQ_Master",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__RFQ_Master.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master)
BRANCH_D_INT_SCMxD_ODS_SCM__RFQ_Master.set_downstream(D_INT_SCMxD_ODS_SCM__RFQ_Master)
D_INT_SCMxD_ODS_SCM__RFQ_Master.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master)
def branch_D_INT_SCMxD_ODS_SCM__Inventory_A(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__Inventory_A","D_INT_SCMxD_ODS_SCM__Inventory_A"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__Inventory_A","D_INT_SCMxD_ODS_SCM__Inventory_A"]
return ["proxy_D_INT_SCMxD_ODS_SCM__Inventory_A"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__Inventory_A"
BRANCH_D_INT_SCMxD_ODS_SCM__Inventory_A= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__Inventory_A,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__Inventory_A"
proxy_D_INT_SCMxD_ODS_SCM__Inventory_A= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__Inventory_A"
D_INT_SCMxD_ODS_SCM__Inventory_A= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="Inventory_A",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__Inventory_A.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__Inventory_A)
BRANCH_D_INT_SCMxD_ODS_SCM__Inventory_A.set_downstream(D_INT_SCMxD_ODS_SCM__Inventory_A)
D_INT_SCMxD_ODS_SCM__Inventory_A.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__Inventory_A)
def branch_D_INT_SCMxD_ODS_SCM__DOI_Actual(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual","D_INT_SCMxD_ODS_SCM__DOI_Actual"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual","D_INT_SCMxD_ODS_SCM__DOI_Actual"]
return ["proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__DOI_Actual"
BRANCH_D_INT_SCMxD_ODS_SCM__DOI_Actual= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__DOI_Actual,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual"
proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__DOI_Actual"
D_INT_SCMxD_ODS_SCM__DOI_Actual= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="DOI_Actual",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__DOI_Actual.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual)
BRANCH_D_INT_SCMxD_ODS_SCM__DOI_Actual.set_downstream(D_INT_SCMxD_ODS_SCM__DOI_Actual)
D_INT_SCMxD_ODS_SCM__DOI_Actual.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual)
# XSLT:loop: JOB_FLOW_NAME-and-PRE_JOB: External: END}}
# XSLT:loop: JOB_FLOW_NAME: START{
# XSLT:loop: Rows-by-JOB_FLOW_NAME: JOB_NAME: START{{
# FLOW: D_ODS_SCM.PNL_Revenue_Cost_A
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(PNL_Revenue_Cost_A)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(NRE_Summary)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(Daily_Revenue_F)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(RFQ_Master)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(Inventory_A)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(DOI_Actual)
# XSLT:loop: Rows-by-JOB_FLOW_NAME: JOB_NAME: START{{
# FLOW: D_DM_SCM.DM_PNL_Revenue_Cost_A
proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A.set_downstream(DM_PNL_Revenue_Cost_A)
proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary.set_downstream(DM_NRE_Summary)
proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F.set_downstream(DM_Daily_Revenue_F)
proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master.set_downstream(DM_RFQ_Master)
proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A.set_downstream(DM_Inventory_A)
proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual.set_downstream(DM_DOI_Actual)
# XSLT:loop: Rows-by-JOB_FLOW_NAME: JOB_NAME: START{{
# FLOW: D_INT_SCM.INT_PNL_Revenue_Cost_A
proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A.set_downstream(INT_PNL_Revenue_Cost_A)
proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary.set_downstream(INT_NRE_Summary)
proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F.set_downstream(INT_Daily_Revenue_F)
proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master.set_downstream(INT_RFQ_Master)
proxy_D_INT_SCMxD_ODS_SCM__Inventory_A.set_downstream(INT_Inventory_A)
proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual.set_downstream(INT_DOI_Actual)
|
python
|
from segmentTree import SumSegmentTree, MinSegmentTree
import numpy as np
import matplotlib.pyplot as plt
class RingBuffer(object):
def __init__(self, maxlen, shape, dtype='int32'):
self.maxlen = maxlen
self.data = np.zeros((maxlen,) + shape).astype(dtype)
self.next_idx = 0
def append(self, v):
self.data[self.next_idx] = v
self.next_idx = (self.next_idx+1) % self.maxlen
def __getitem__(self, idx):
if idx < 0 or idx >= self.maxlen:
raise KeyError()
return self.data[idx]
def array_min2d(x):
x = np.array(x)
if x.ndim >= 2:
return x
return x.reshape(-1, 1)
class Buffer(object):
def __init__(self, limit, content_shape):
self.next_idx = 0
self.limit = limit
self.length = 0
self.contents = {}
for content, shape in content_shape.items():
self.contents[content] = RingBuffer(limit, shape=shape)
def append(self, buffer_item):
for name, value in self.contents.items():
value.append(buffer_item[name])
self.next_idx = (self.next_idx+1) % self.limit
if self.length < self.limit:
self.length += 1
class PrioritizedGoalBuffer(Buffer):
def __init__(self, limit, alpha):
self.content = {'goal': (1,)}
self.alpha = alpha
super(PrioritizedGoalBuffer, self).__init__(limit, self.content)
it_capacity = 1
while it_capacity < limit:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._max_priority = 1.0
def append(self, buffer_item, priority=None):
"""See ReplayBuffer.store_effect"""
idx = self.next_idx
super().append(buffer_item)
if priority is None:
self._it_sum[idx] = self._max_priority ** self.alpha
else:
self._it_sum[idx] = priority
def sample_proportional_idx(self):
sum = self._it_sum.sum()
mass = np.random.random() * sum
idx = self._it_sum.find_prefixsum_idx(mass)
return idx
def sample(self):
# Draw such that we always have a proceeding element.
idx = self.sample_proportional_idx()
result = {}
for name, value in self.contents.items():
result[name] = array_min2d(value[idx])
return idx, result
def update_priority(self, idx, priority):
self._it_sum[idx] = priority ** self.alpha
self._max_priority = max(self._max_priority, priority)
def _demo():
buffer = PrioritizedGoalBuffer(11, 1)
samples = np.zeros((100000), dtype=int)
for i in range(15):
buffer_item = {'goal': i}
buffer.append(buffer_item, i)
for j in range(100000):
idx, sample = buffer.sample()
samples[j] = int(sample['goal'])
bins = np.bincount(samples)
plt.plot(range(bins.shape[0]), bins)
plt.show()
buffer.update_priority(6,100)
for j in range(100000):
idx, sample = buffer.sample()
samples[j] = int(sample['goal'])
bins = np.bincount(samples)
plt.plot(range(bins.shape[0]), bins)
plt.show()
if __name__ == "__main__":
_demo()
|
python
|
from .plots import Plot,PlotError,PlotState
from .. import context
from .. import items
from .. import maps
from .. import randmaps
from .. import waypoints
from .. import monsters
from .. import dialogue
from .. import services
from .. import teams
from .. import characters
from .. import namegen
import random
from .. import cutscene
from .. import worlds
# BARDIC_DUNGEON
# This subplot will generate a dungeon of a given type. All these subplots
# should be unique in order to prevent dungeon types from repeating.
# - Generate dungeon
# - Generate connection to previous dungeon
# - Install dungeon
# - Add chapter resources, as appropriate
class BardicCaves( Plot ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Caverns of {0}", "Caves of {0}", "{0} Grotto", "{0} Chasm" )
DUNGEON_PATTERN = (context.HAB_CAVE,)
UNIQUE = True
scope = True
active = True
def custom_init( self, nart ):
"""Load dungeon levels, and connect this dungeon to the adventure."""
# Decide on a good name. Do this first in case we want to generate an antagonist
# or boss monster to include in the dungeon. The name generator will generate
# this antagonist, and it will be passed on to the levels of the dungeon.
self.elements[ "ANTAGONIST" ] = False
self.dname = self.gen_name()
# Generate the levels
self.levels = self.get_dungeon_levels( nart, self.DUNGEON_PATTERN, self.chapter.start_rank, self.chapter.end_rank )
# Connect all the levels, and name them.
self.add_sub_plot( nart, "BARDIC_CONNECTION",
PlotState(elements={"LEVELS":self.levels,"DNAME":self.dname}, rank=self.chapter.start_rank).based_on( self ) )
# Set the LAST_DUNGEON element, for use by the next dungeon.
self.register_element( "LAST_DUNGEON", self.levels[-1] )
return True
def gen_name( self ):
return random.choice( self.NAME_PATTERNS ).format( namegen.random_style_name() )
class BardicCrypt( BardicCaves ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Crypt of {0}", "Tomb of {0}", "{0} Boneyard", "{0} Catacombs" )
DUNGEON_PATTERN = (context.HAB_TUNNELS,context.GEN_UNDEAD)
UNIQUE = True
class AntagonisticForest( BardicCaves ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Forest","Woods","Wilds" )
DUNGEON_PATTERN = (context.HAB_FOREST,)
UNIQUE = True
def gen_name( self ):
Antagonist = self.register_element( "ANTAGONIST", teams.AntagonistFaction(dungeon_type=self.NAME_PATTERNS) )
return Antagonist.name
class AntagonisticCaves( BardicCaves ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Caves","Caverns","Grotto","Chasm" )
DUNGEON_PATTERN = (context.HAB_CAVE,)
UNIQUE = True
def gen_name( self ):
Antagonist = self.register_element( "ANTAGONIST", teams.AntagonistFaction(dungeon_type=self.NAME_PATTERNS) )
return Antagonist.name
class AntagonisticTunnels( BardicCaves ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Hideout", "Tunnels", "Catacombs" )
DUNGEON_PATTERN = (context.HAB_CAVE,)
UNIQUE = True
def gen_name( self ):
Antagonist = self.register_element( "ANTAGONIST", teams.AntagonistFaction(dungeon_type=self.NAME_PATTERNS) )
return Antagonist.name
# BARDIC_CONNECTION
# This subplot will add a connection for the new bardic dungeon from the
# previous one. If no dungeons have yet been added, it will just connect to
# the city scene. Otherwise, it will likely add a boss encounter to the
# previous dungeon and a new set of resources (shops, etc) for the new level.
#
# DUTIES:
# - To activate the chapter
# - To connect the next dungeon to the previous
# - Provide access to needed resources: shops, temple, etc.
# - Provide rumours regarding the previous/current chapter.
class BC_DirectConnection( Plot ):
"""The first dungeon gets directly connected to the LOCALE scene."""
LABEL = "BARDIC_CONNECTION"
scope = True
active = True
@classmethod
def matches( self, pstate ):
"""Requires LOCALE to exist, but no LAST_DUNGEON."""
return pstate.elements.get( "LOCALE" ) and not pstate.elements.get( "LAST_DUNGEON" )
def custom_init( self, nart ):
"""Install the dungeon."""
self.install_dungeon( nart, self.elements[ "LEVELS" ], self.elements[ "LOCALE" ], self.elements["DNAME"] )
self._ready = True
return True
### TESTING CUTSCENES HERE- FOR TESTING ONLY
do_cutscene = False
def t_START( self, explo ):
if self._ready:
self.chapter.activate()
self._ready = False
#explo.alert("[PORTENT]")
explo.alert("They say that a journey of a thousand miles begins with a single step. Today your journey begins as you prepare to leave the city of [city] and begin your adventure.")
# Print message, activate chapter upon entering city the first time.
if self.do_cutscene:
explo.alert( "You enter a ." )
cs1=cutscene.Say( "This place stinks of death...", species=(characters.Human,characters.Elf,characters.Fuzzy,characters.Hurthling), children= [
cutscene.Say( "You say that like it's a bad thing.", job=(characters.Necromancer,) ),
cutscene.Say( "Yes, it reminds me of my mother's cooking.", species=(characters.Orc,) ),
cutscene.Say( "The sooner we get this job finished, the sooner we can get out of here.", job=(characters.Warrior,) ),
])
cutscene.roll_cutscene( explo, [cs1,] )
#self.do_cutscene = False
def get_dialogue_grammar( self, npc, explo ):
if self.chapter.active:
dname = self.elements.get("DNAME")
mygram = {
"[RUMOUR]": ["[rumourleadin] there are [monsters] coming from the {}.".format( dname )],
}
city = self.elements["LOCALE"]
anti = self.elements.get( "ANTAGONIST" )
if anti:
mygram["[HOWAREYOU]"] = ["Heavens save {} from the {}.".format(city,anti),]
mygram["[RUMOUR]"].append( "[rumourleadin] {} lives in fear of the {}.".format( city, anti ) )
return mygram
class BC_DwarvenCity( Plot ):
LABEL = "BARDIC_CONNECTION"
UNIQUE = True
scope = True
active = True
NAME_PATTERNS = ( "{0} Deep", "{0} Halls" )
_ready = True
@classmethod
def matches( self, pstate ):
"""Requires LAST_DUNGEON to exist and to not go up, and the next dungeon to go down."""
return ( pstate.elements.get( "LAST_DUNGEON" )
and context.MAP_GOUP not in pstate.elements["LAST_DUNGEON"].desctags
and context.MAP_GODOWN in pstate.elements["LEVELS"][0].desctags )
def custom_init( self, nart ):
"""Install the dungeon."""
# Create the intermediary level.
interior = maps.Scene( 65,65, sprites={maps.SPRITE_WALL: "terrain_wall_cave.png", maps.SPRITE_GROUND: "terrain_ground_under.png", maps.SPRITE_FLOOR: "terrain_floor_gravel.png" },
biome=context.HAB_TUNNELS, setting=self.setting, desctags=(context.MAP_DUNGEON,context.MAP_GODOWN) )
igen = randmaps.SubtleMonkeyTunnelScene( interior )
self.register_scene( nart, interior, igen, ident="_LAIR" )
# Create the guardian.
btype = monsters.choose_monster_type(self.rank,self.rank+2,{(context.DES_EARTH,context.MTY_FIGHTER,context.MTY_CONSTRUCT):True,context.DES_EARTH:context.MAYBE})
boss = self.register_element( "_BOSS", monsters.generate_boss( btype, self.rank+3 ) )
interior.name = "{0}'s Lair".format( boss )
# Connect to previous level.
self.add_sub_plot( nart, "CONNECT", PlotState( elements={"PREV":self.elements["LAST_DUNGEON"],"NEXT":interior} ).based_on( self ) )
# Create the goal room.
team = teams.Team(default_reaction=-999, rank=self.rank, strength=150,
habitat=interior.get_encounter_request(), respawn=False, boss=boss )
int_goalroom = randmaps.rooms.SharpRoom( tags=(context.GOAL,), parent=interior )
int_goalroom.contents.append( team )
int_goalroom.contents.append( boss )
boss.team = team
stairs_1 = waypoints.SpiralStairsDown()
int_goalroom.contents.append( stairs_1 )
# Create the Dwarven City.
myscene = maps.Scene( 65, 65,
sprites={maps.SPRITE_WALL: "terrain_wall_cave.png", maps.SPRITE_GROUND: "terrain_ground_under.png", maps.SPRITE_FLOOR: "terrain_floor_gravel.png"},
biome=context.HAB_BUILDING, setting=self.setting,
name=random.choice( self.NAME_PATTERNS ).format( namegen.DWARF.gen_word() ),
desctags=(context.MAP_DUNGEON,context.DES_CIVILIZED,context.MAP_GODOWN) )
mymapgen = randmaps.CaveScene( myscene )
self.register_scene( nart, myscene, mymapgen, ident="LOCALE" )
castle = self.register_element( "CITY", randmaps.rooms.VillageRoom( width=35,height=35,tags=(context.CIVILIZED,context.ROOM_PUBLIC), parent=myscene ) )
myroom = randmaps.rooms.FuzzyRoom( tags=(context.ENTRANCE,), parent=castle )
myteam = teams.Team( strength=0, default_reaction=characters.SAFELY_FRIENDLY)
castle.contents.append( myteam )
stairs_2 = waypoints.SpiralStairsUp()
myroom.contents.append( stairs_2 )
myroom.contents.append( monsters.generate_npc(species=characters.Dwarf, team=myteam) )
myroom.contents.append( monsters.generate_npc(species=characters.Dwarf, team=myteam) )
# Connect the stairs.
self.move_element( myscene, interior )
stairs_1.destination = myscene
stairs_1.otherside = stairs_2
stairs_2.destination = interior
stairs_2.otherside = stairs_1
# Add some city services.
self.add_sub_plot( nart, "CITY_GENERALSTORE" )
self.add_sub_plot( nart, "CITY_LIBRARY" )
self.add_sub_plot( nart, "CITY_INN" )
self.add_sub_plot( nart, "CITY_TEMPLE" )
self.add_sub_plot( nart, "CITY_EXTRASHOP" )
# Install the dungeon in the city.
self.install_dungeon( nart, self.elements[ "LEVELS" ], self.elements[ "LOCALE" ], self.elements["DNAME"] )
return True
def t_START( self, explo ):
# Print message, activate chapter upon entering city the first time.
if explo.scene is self.elements["LOCALE"] and self._ready:
explo.alert( "You step into a bustling dwarven city." )
self.chapter.activate()
self._ready = False
def get_dialogue_grammar( self, npc, explo ):
dname = self.elements.get("DNAME")
city = self.elements.get("LOCALE")
monster = self.elements.get("_BOSS")
if self.chapter.prev and self.chapter.prev.active:
mygram = {
"[RUMOUR]": ["[rumourleadin] the dwarves of {} protect the world from {}.".format( city, dname ),
"[rumourleadin] {} is now under siege from {} the {}.".format( city, monster, monster.monster_name )
],
}
return mygram
elif self.chapter.active:
mygram = {
"[RUMOUR]": ["[rumourleadin] beneath {} lies {}.".format( city, dname )],
}
return mygram
class BC_AdvanceAgent( Plot ):
# Fight an agent of next chapter's ANTAGONIST.
LABEL = "BARDIC_CONNECTION"
scope = True
active = True
_ready = True
@classmethod
def matches( self, pstate ):
"""Requires LAST_DUNGEON and ANTAGONIST to exist"""
return ( pstate.elements.get( "LAST_DUNGEON" )
and pstate.elements.get( "ANTAGONIST" ) )
def custom_init( self, nart ):
"""Install the dungeon."""
# Create the intermediary level.
interior = maps.Scene( 65,65, sprites={maps.SPRITE_WALL: "terrain_wall_darkbrick.png", maps.SPRITE_GROUND: "terrain_ground_under.png", maps.SPRITE_FLOOR: "terrain_floor_tile.png" },
fac=self.elements["ANTAGONIST"],
biome=context.HAB_TUNNELS, setting=self.setting, desctags=(context.MAP_DUNGEON,) )
igen = randmaps.SubtleMonkeyTunnelScene( interior )
self.register_scene( nart, interior, igen, ident="LOCALE" )
# Create the goal room.
team = teams.Team(default_reaction=-999, rank=self.rank, strength=50, habitat=interior.get_encounter_request(),
fac=self.elements["ANTAGONIST"], respawn=False )
int_goalroom = randmaps.rooms.SharpRoom( tags=(context.GOAL,), parent=interior )
int_goalroom.contents.append( team )
# Create the guardian.
boss = self.register_element( "_BOSS", monsters.generate_npc(team=team,upgrade=True,rank=self.rank+3) )
self.enemy_defeated = False
interior.name = "{}'s Chamber".format( boss )
int_goalroom.contents.append( boss )
for t in range( random.randint(2,4) ):
self.add_sub_plot( nart, "ENCOUNTER" )
# Connect to previous level.
self.add_sub_plot( nart, "CONNECT", PlotState( elements={"PREV":self.elements["LAST_DUNGEON"],"NEXT":interior} ).based_on( self ) )
# Add a BARDIC_FRESHSTART to install the dungeon somewhere else.
sp = self.add_sub_plot( nart, "BARDIC_FRESHSTART" )
self.register_element( "DESTINATION", sp.elements.get( "LOCALE" ) )
return True
def _BOSS_DEATH( self, explo ):
self.enemy_defeated = True
def t_COMBATOVER( self, explo ):
if self.enemy_defeated:
# Activate the resolution, whatever that is.
explo.alert( "You discover that {} was carrying a map leading to {}. That should be your next destination.".format(self.elements["_BOSS"],self.elements["DESTINATION"]) )
explo.alert( "New world map location discovered." )
self.chapter.activate()
self.active = False
def get_dialogue_grammar( self, npc, explo ):
dname = self.elements.get("DNAME")
enemy = self.elements.get("ANTAGONIST")
olddname = self.elements["LAST_DUNGEON"].dname
monster = self.elements.get("_BOSS")
newloc = self.elements.get("DESTINATION")
if self.chapter.prev and self.chapter.prev.active:
mygram = {
"[RUMOUR]": ["[rumourleadin] the {} is in league with the {}.".format( olddname, enemy )],
}
return mygram
elif self.chapter.active:
mygram = {
"[RUMOUR]": ["[rumourleadin] the {} is near {}.".format( dname, newloc )],
}
return mygram
#
# BARDIC_FRESHSTART
# This subplot opens up a new world map scene in which to place the next dungeon.
# Because of this, it installs the dungeon... normally BARDIC_CONNECTION is
# supposed to do that, but it can pawn off the responsibility to this subplot.
#
# The world map entrance should get activated when the chapter is activated.
# That scene should be stored as element LOCALE, in case the connection needs
# to do anything with it.
#
class BF_ForestVillage( Plot ):
"""A new world map scene, set in a forest."""
LABEL = "BARDIC_FRESHSTART"
scope = True
active = True
@classmethod
def matches( self, pstate ):
"""Requires LEVELS[0] to be forest or not MAP_WILDERNESS."""
dungeon = pstate.elements.get( "LEVELS" )
return dungeon and ( dungeon[0].biome is context.HAB_FOREST
or context.MAP_WILDERNESS not in dungeon[0].desctags )
def custom_init( self, nart ):
# Add the forest itself.
myscene = maps.Scene( min( 95 + self.rank * 3, 129 ), min( 95 + self.rank * 3, 129 ),
sprites={maps.SPRITE_WALL: "terrain_wall_woodfort.png", maps.SPRITE_GROUND: "terrain_ground_forest.png",
maps.SPRITE_FLOOR: "terrain_floor_gravel.png" },
biome=context.HAB_FOREST, setting=self.setting, fac=None,
desctags=(context.MAP_WILDERNESS,) )
mymapgen = randmaps.ForestScene( myscene )
self.register_scene( nart, myscene, mymapgen, ident="LOCALE" )
# Add a village.
castle = self.register_element( "CITY", randmaps.rooms.VillageRoom( width=35,
height=35,tags=(context.CIVILIZED,context.ROOM_PUBLIC,context.MAP_ON_EDGE), parent=myscene ) )
myroom = randmaps.rooms.FuzzyRoom( tags=(context.ENTRANCE,), parent=castle )
myteam = teams.Team( strength=0, default_reaction=characters.SAFELY_FRIENDLY)
castle.contents.append( myteam )
myent = waypoints.Well()
myroom.contents.append( myent )
myroom.contents.append( monsters.generate_npc(species=characters.Elf, team=myteam) )
myroom.contents.append( monsters.generate_npc(species=characters.Elf, team=myteam) )
self.add_sub_plot( nart, "CITY_GENERALSTORE" )
self.add_sub_plot( nart, "CITY_LIBRARY" )
self.add_sub_plot( nart, "CITY_INN" )
self.add_sub_plot( nart, "CITY_EXTRASHOP" )
# Add world map entrance.
self._entrance = self.chapter.world.add_entrance( myscene, myscene.name, worlds.W_VILLAGE, myent, False )
for t in range( random.randint(2+min(self.rank//3,6),4+min(self.rank//2,6)) ):
self.add_sub_plot( nart, "ENCOUNTER" )
self.add_sub_plot( nart, "SPECIAL_FEATURE" )
# Install the dungeon here.
self.install_dungeon( nart, self.elements[ "LEVELS" ], myscene, self.elements["DNAME"] )
self._ready = True
return True
def t_START( self, explo ):
# When the chapter activates, show the world map entrance.
if self.chapter.active:
self._entrance.visible = True
self.active = False
# BARDIC_CONCLUSION
# This subplot will feature a big boss battle to take place after the LAST_DUNGEON.
class StraightBardicBalrog( Plot ):
"""Fight a boss encounter."""
LABEL = "BARDIC_CONCLUSION"
active = True
scope = True
def custom_init( self, nart ):
"""Create the final dungeon, boss encounter, and resolution."""
btype = monsters.choose_monster_type(self.rank+2,self.rank+4,{context.MTY_BOSS:True,context.MTY_LEADER:context.MAYBE})
boss = monsters.generate_boss( btype, self.rank+5 )
#print( "{0} the {1}".format( boss, boss.monster_name ) )
interior = maps.Scene( 65,65, sprites={maps.SPRITE_WALL: "terrain_wall_darkbrick.png",
maps.SPRITE_FLOOR: "terrain_floor_dungeon.png", },
biome=context.HAB_BUILDING, setting=self.setting, desctags=(context.MAP_DUNGEON,context.MTY_HUMANOID) )
igen = randmaps.SubtleMonkeyTunnelScene( interior )
interior.name = "{0}'s Lair".format( boss )
self.register_scene( nart, interior, igen, ident="_LAIR" )
self.add_sub_plot( nart, "CONNECT", PlotState( elements={"PREV":self.elements["LAST_DUNGEON"],"NEXT":interior} ).based_on( self ) )
team = teams.Team(default_reaction=-999, rank=self.rank, strength=200,
habitat=interior.get_encounter_request(), respawn=False, boss=boss )
int_goalroom = randmaps.rooms.SharpRoom( tags=(context.GOAL,), parent=interior )
int_goalroom.contents.append( team )
boss.team = team
self.register_element( "_LAIR_ROOM", int_goalroom )
self.register_element( "ENEMY", boss, "_LAIR_ROOM" )
self.add_sub_plot( nart, "DUNGEON_ARMORY", PlotState( elements={"LOCALE":interior} ).based_on( self ) )
self.enemy_defeated = False
return True
def ENEMY_DEATH( self, explo ):
self.enemy_defeated = True
def t_COMBATOVER( self, explo ):
if self.enemy_defeated:
# Activate the resolution, whatever that is.
explo.alert( "With {0} defeated, peace soon returns to the land.".format( self.elements["ENEMY"] ) )
explo.alert( "Thanks for playing Dungeon Monkey Eternal. You can follow development at www.gearheadrpg.com, or via @Pyrro12 on Twitter." )
self.active = False
def get_dialogue_grammar( self, npc, explo ):
if self.active:
boss = self.elements["ENEMY"]
mygram = {
"[HOWAREYOU]": ["Heavens save us from {0}.".format(boss)],
"[monsters]": ["{0}'s minions".format(boss)],
"[RUMOUR]": ["[rumourleadin] {0} the {1} is the cause of our problems.".format( boss, boss.monster_name )],
}
city = self.elements.get( "LOCALE" )
if city:
mygram["[RUMOUR]"].append( "[rumourleadin] {0} the {1} plans to destroy {2}.".format( boss, boss.monster_name,city ) )
return mygram
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.